Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / qsnet-suse-2.6.patch
1 Index: linux-2.6.5-7.191/arch/i386/defconfig
2 ===================================================================
3 --- linux-2.6.5-7.191.orig/arch/i386/defconfig  2005-06-28 12:24:26.000000000 -0400
4 +++ linux-2.6.5-7.191/arch/i386/defconfig       2005-07-28 14:52:52.764691504 -0400
5 @@ -139,6 +139,8 @@
6  CONFIG_EFI=y
7  CONFIG_BOOT_IOREMAP=y
8  CONFIG_REGPARM=y
9 +CONFIG_IOPROC=y
10 +CONFIG_PTRACK=y
11  
12  #
13  # Special options
14 Index: linux-2.6.5-7.191/arch/i386/Kconfig
15 ===================================================================
16 --- linux-2.6.5-7.191.orig/arch/i386/Kconfig    2005-06-28 12:24:15.000000000 -0400
17 +++ linux-2.6.5-7.191/arch/i386/Kconfig 2005-07-28 14:52:52.765691352 -0400
18 @@ -1030,6 +1030,9 @@
19           a work-around for a number of buggy BIOSes. Switch this option on if
20           your computer crashes instead of powering off properly.
21  
22 +source "mm/Kconfig"
23 +source "kernel/Kconfig"
24 +       
25  endmenu
26  
27  source "arch/i386/kernel/cpu/cpufreq/Kconfig"
28 Index: linux-2.6.5-7.191/arch/i386/mm/hugetlbpage.c
29 ===================================================================
30 --- linux-2.6.5-7.191.orig/arch/i386/mm/hugetlbpage.c   2005-06-28 12:24:09.000000000 -0400
31 +++ linux-2.6.5-7.191/arch/i386/mm/hugetlbpage.c        2005-07-28 14:52:52.766691200 -0400
32 @@ -16,6 +16,7 @@
33  #include <linux/err.h>
34  #include <linux/sysctl.h>
35  #include <linux/mempolicy.h>
36 +#include <linux/ioproc.h>
37  #include <asm/mman.h>
38  #include <asm/pgalloc.h>
39  #include <asm/tlb.h>
40 @@ -393,6 +394,7 @@
41  {
42         struct mm_struct *mm = vma->vm_mm;
43         spin_lock(&mm->page_table_lock);
44 +       ioproc_invalidate_range(vma, start, start + length);
45         unmap_hugepage_range(vma, start, start + length);
46         spin_unlock(&mm->page_table_lock);
47  }
48 Index: linux-2.6.5-7.191/arch/ia64/defconfig
49 ===================================================================
50 --- linux-2.6.5-7.191.orig/arch/ia64/defconfig  2005-06-28 12:24:26.000000000 -0400
51 +++ linux-2.6.5-7.191/arch/ia64/defconfig       2005-07-28 14:52:52.767691048 -0400
52 @@ -103,6 +103,8 @@
53  CONFIG_EFI_VARS=y
54  CONFIG_BINFMT_ELF=y
55  CONFIG_BINFMT_MISC=m
56 +CONFIG_IOPROC=y
57 +CONFIG_PTRACK=y
58  
59  #
60  # Power management and ACPI
61 Index: linux-2.6.5-7.191/arch/ia64/Kconfig
62 ===================================================================
63 --- linux-2.6.5-7.191.orig/arch/ia64/Kconfig    2005-06-28 12:24:20.000000000 -0400
64 +++ linux-2.6.5-7.191/arch/ia64/Kconfig 2005-07-28 14:52:52.768690896 -0400
65 @@ -323,6 +323,8 @@
66           To use this option, you have to check that the "/proc file system
67           support" (CONFIG_PROC_FS) is enabled, too.
68  
69 +source "mm/Kconfig"
70 +source "kernel/Kconfig"
71  source "fs/Kconfig.binfmt"
72  
73  endmenu
74 Index: linux-2.6.5-7.191/arch/ia64/mm/hugetlbpage.c
75 ===================================================================
76 --- linux-2.6.5-7.191.orig/arch/ia64/mm/hugetlbpage.c   2005-06-28 12:24:04.000000000 -0400
77 +++ linux-2.6.5-7.191/arch/ia64/mm/hugetlbpage.c        2005-07-28 14:52:52.768690896 -0400
78 @@ -19,6 +19,7 @@
79  #include <linux/slab.h>
80  #include <linux/sysctl.h>
81  #include <linux/mempolicy.h>
82 +#include <linux/ioproc.h>
83  #include <asm/mman.h>
84  #include <asm/pgalloc.h>
85  #include <asm/tlb.h>
86 @@ -378,6 +379,7 @@
87  {
88         struct mm_struct *mm = vma->vm_mm;
89         spin_lock(&mm->page_table_lock);
90 +       ioproc_invalidate_range(vma, start, start + length);
91         unmap_hugepage_range(vma, start, start + length);
92         spin_unlock(&mm->page_table_lock);
93  }
94 Index: linux-2.6.5-7.191/arch/x86_64/defconfig
95 ===================================================================
96 --- linux-2.6.5-7.191.orig/arch/x86_64/defconfig        2005-06-28 12:24:26.000000000 -0400
97 +++ linux-2.6.5-7.191/arch/x86_64/defconfig     2005-07-28 14:52:52.770690592 -0400
98 @@ -91,6 +91,8 @@
99  CONFIG_GART_IOMMU=y
100  CONFIG_SWIOTLB=y
101  CONFIG_X86_MCE=y
102 +CONFIG_IOPROC=y
103 +CONFIG_PTRACK=y
104  
105  #
106  # Power management options
107 Index: linux-2.6.5-7.191/arch/x86_64/Kconfig
108 ===================================================================
109 --- linux-2.6.5-7.191.orig/arch/x86_64/Kconfig  2005-06-28 12:24:23.000000000 -0400
110 +++ linux-2.6.5-7.191/arch/x86_64/Kconfig       2005-07-28 14:52:52.770690592 -0400
111 @@ -341,6 +341,9 @@
112  
113  source "arch/x86_64/kernel/cpufreq/Kconfig"
114  
115 +source "mm/Kconfig"
116 +source "kernel/Kconfig"
117 +
118  endmenu
119  
120  menu "Bus options (PCI etc.)"
121 Index: linux-2.6.5-7.191/Documentation/vm/ioproc.txt
122 ===================================================================
123 --- linux-2.6.5-7.191.orig/Documentation/vm/ioproc.txt  2004-02-23 16:02:56.000000000 -0500
124 +++ linux-2.6.5-7.191/Documentation/vm/ioproc.txt       2005-07-28 14:52:52.771690440 -0400
125 @@ -0,0 +1,468 @@
126 +Linux IOPROC patch overview
127 +===========================
128 +
129 +The network interface for an HPC network differs significantly from
130 +network interfaces for traditional IP networks. HPC networks tend to
131 +be used directly from user processes and perform large RDMA transfers
132 +between theses processes address space. They also have a requirement
133 +for low latency communication, and typically achieve this by OS bypass
134 +techniques.  This then requires a different model to traditional
135 +interconnects, in that a process may need to expose a large amount of
136 +it's address space to the network RDMA.
137 +
138 +Locking down of memory has been a common mechanism for performing
139 +this, together with a pin-down cache implemented in user
140 +libraries. The disadvantage of this method is that large portions of
141 +the physical memory can be locked down for a single process, even if
142 +it's working set changes over the different phases of it's
143 +execution. This leads to inefficient memory utilisation - akin to the
144 +disadvantage of swapping compared to paging.
145 +
146 +This model also has problems where memory is being dynamically
147 +allocated and freed, since the pin down cache is unaware that memory
148 +may have been released by a call to munmap() and so it will still be
149 +locking down the now unused pages.
150 +
151 +Some modern HPC network interfaces implement their own MMU and are
152 +able to handle a translation fault during a network access. The
153 +Quadrics (http://www.quadrics.com) devices (Elan3 and Elan4) have done
154 +this for some time and we expect others to follow the same route in
155 +the relatively near future. These NICs are able to operate in an
156 +environment where paging occurs and do not require memory to be locked
157 +down. The advantage of this is that the user process can expose large
158 +portions of it's address space without having to worry about physical
159 +memory constraints.
160 +
161 +However should the operating system decide to swap a page to disk,
162 +then the NIC must be made aware that it should no longer read/write
163 +from this memory, but should generate a translation fault instead.
164 +
165 +The ioproc patch has been developed to provide a mechanism whereby the
166 +device driver for a NIC can be aware of when a user process's address
167 +translations change, either by paging or by explicitly mapping or
168 +unmapping memory.
169 +
170 +The patch involves inserting callbacks where translations are being
171 +invalidated to notify the NIC that the memory behind those
172 +translations is no longer visible to the application (and so should
173 +not be visible to the NIC). This callback is then responsible for
174 +ensuring that the NIC will not access the physical memory that was
175 +being mapped.
176 +
177 +An ioproc invalidate callback in the kswapd code could be utilised to
178 +prevent memory from being paged out if the NIC is unable to support
179 +network page faulting.
180 +
181 +For NICs which support network page faulting, there is no requirement
182 +for a user level pin down cache, since they are able to page-in their
183 +translations on the first communication using a buffer. However this
184 +is likely to be inefficient, resulting in slow first use of the
185 +buffer. If the communication buffers were continually allocated and
186 +freed using mmap based malloc() calls then this would lead to all
187 +communications being slower than desirable.
188 +
189 +To optimise these warm-up cases the ioproc patch adds calls to
190 +ioproc_update wherever the kernel is creating translations for a user
191 +process. These then allows the device driver to preload translations
192 +so that they are already present for the first network communication
193 +from a buffer.
194 +
195 +Linux 2.6 IOPROC implementation details
196 +=======================================
197 +
198 +The Linux IOPROC patch adds hooks to the Linux VM code whenever page
199 +table entries are being created and/or invalidated. IOPROC device
200 +drivers can register their interest in being informed of such changes
201 +by registering an ioproc_ops structure which is defined as follows;
202 +
203 +extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip);
204 +extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip);
205 +
206 +typedef struct ioproc_ops {
207 +       struct ioproc_ops *next;
208 +       void *arg;
209 +
210 +       void (*release)(void *arg, struct mm_struct *mm);
211 +       void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
212 +       void (*invalidate_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
213 +       void (*update_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
214 +
215 +       void (*change_protection)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot);
216 +
217 +       void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
218 +       void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
219 +       void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
220 +
221 +} ioproc_ops_t;
222 +
223 +ioproc_register_ops
224 +===================
225 +This function should be called by the IOPROC device driver to register
226 +its interest in PTE changes for the process associated with the passed
227 +in mm_struct.
228 +
229 +The ioproc registration is not inherited across fork() and should be
230 +called once for each process that IOPROC is interested in.
231 +
232 +This function must be called whilst holding the mm->page_table_lock.
233 +
234 +ioproc_unregister_ops
235 +=====================
236 +This function should be called by the IOPROC device driver when it no
237 +longer requires informing of PTE changes in the process associated
238 +with the supplied mm_struct.
239 +
240 +This function is not normally needed to be called as the ioproc_ops
241 +struct is unlinked from the associated mm_struct during the
242 +ioproc_release() call.
243 +
244 +This function must be called whilst holding the mm->page_table_lock.
245 +
246 +ioproc_ops struct
247 +=================
248 +A linked list ioproc_ops structures is hung off the user process
249 +mm_struct (linux/sched.h). At each hook point in the patched kernel
250 +the ioproc patch will call the associated ioproc_ops callback function
251 +pointer in turn for each registered structure.
252 +
253 +The intention of the callbacks is to allow the IOPROC device driver to
254 +inspect the new or modified PTE entry via the Linux kernel
255 +(e.g. find_pte_map()). These callbacks should not modify the Linux
256 +kernel VM state or PTE entries.
257 +
258 +The ioproc_ops callback function pointers are defined as follows;
259 +
260 +ioproc_release
261 +==============
262 +The release hook is called when a program exits and all its vma areas
263 +are torn down and unmapped. i.e. during exit_mmap(). Before each
264 +release hook is called the ioproc_ops structure is unlinked from the
265 +mm_struct.
266 +
267 +No locks are required as the process has the only reference to the mm
268 +at this point.
269 +
270 +ioproc_sync_[range|page]
271 +========================
272 +The sync hooks are called when a memory map is synchronised with its
273 +disk image i.e. when the msync() syscall is invoked. Any future read
274 +or write by the IOPROC device to the associated pages should cause the
275 +page to be marked as referenced or modified.
276 +
277 +Called holding the mm->page_table_lock
278 +
279 +ioproc_invalidate_[range|page]
280 +==============================
281 +The invalidate hooks are called whenever a valid PTE is unloaded
282 +e.g. when a page is unmapped by the user or paged out by the
283 +kernel. After this call the IOPROC must not access the physical memory
284 +again unless a new translation is loaded.
285 +
286 +Called holding the mm->page_table_lock
287 +
288 +ioproc_update_[range|page]
289 +==========================
290 +The update hooks are called whenever a valid PTE is loaded
291 +e.g. mmaping memory, moving the brk up, when breaking COW or faulting
292 +in an anonymous page of memory. These give the IOPROC device the
293 +opportunity to load translations speculatively, which can improve
294 +performance by avoiding device translation faults.
295 +
296 +Called holding the mm->page_table_lock
297 +
298 +ioproc_change_protection
299 +========================
300 +This hook is called when the protection on a region of memory is
301 +changed i.e. when the mprotect() syscall is invoked.
302 +
303 +The IOPROC must not be able to write to a read-only page, so if the
304 +permissions are downgraded then it must honour them. If they are
305 +upgraded it can treat this in the same way as the
306 +ioproc_update_[range|page]() calls
307 +
308 +Called holding the mm->page_table_lock
309 +
310 +
311 +Linux 2.6 IOPROC patch details
312 +==============================
313 +
314 +Here are the specific details of each ioproc hook added to the Linux
315 +2.6 VM system and the reasons for doing so;
316 +
317 +++++ FILE
318 +       mm/fremap.c
319 +
320 +==== FUNCTION
321 +       zap_pte
322 +
323 +CALLED FROM
324 +       install_page
325 +       install_file_pte
326 +
327 +PTE MODIFICATION
328 +       ptep_clear_flush
329 +
330 +ADDED HOOKS
331 +       ioproc_invalidate_page
332 +
333 +==== FUNCTION
334 +       install_page
335 +
336 +CALLED FROM
337 +       filemap_populate, shmem_populate
338 +
339 +PTE MODIFICATION
340 +       set_pte
341 +
342 +ADDED HOOKS
343 +       ioproc_update_page
344 +
345 +==== FUNCTION
346 +       install_file_pte
347 +
348 +CALLED FROM
349 +       filemap_populate, shmem_populate
350 +
351 +PTE MODIFICATION
352 +       set_pte
353 +
354 +ADDED HOOKS
355 +       ioproc_update_page
356 +
357 +
358 +++++ FILE
359 +       mm/memory.c
360 +
361 +==== FUNCTION
362 +       zap_page_range
363 +
364 +CALLED FROM
365 +       read_zero_pagealigned, madvise_dontneed, unmap_mapping_range,
366 +       unmap_mapping_range_list, do_mmap_pgoff
367 +
368 +PTE MODIFICATION
369 +       set_pte (unmap_vmas)
370 +
371 +ADDED HOOKS
372 +       ioproc_invalidate_range
373 +
374 +
375 +==== FUNCTION
376 +       zeromap_page_range
377 +
378 +CALLED FROM
379 +       read_zero_pagealigned, mmap_zero
380 +
381 +PTE MODIFICATION
382 +       set_pte (zeromap_pte_range)
383 +
384 +ADDED HOOKS
385 +       ioproc_invalidate_range
386 +       ioproc_update_range
387 +
388 +
389 +==== FUNCTION
390 +       remap_page_range
391 +
392 +CALLED FROM
393 +       many device drivers
394 +
395 +PTE MODIFICATION
396 +       set_pte (remap_pte_range)
397 +
398 +ADDED HOOKS
399 +       ioproc_invalidate_range
400 +       ioproc_update_range
401 +
402 +
403 +==== FUNCTION
404 +       break_cow
405 +
406 +CALLED FROM
407 +       do_wp_page
408 +
409 +PTE MODIFICATION
410 +       ptep_establish
411 +
412 +ADDED HOOKS
413 +       ioproc_invalidate_page
414 +       ioproc_update_page
415 +
416 +
417 +==== FUNCTION
418 +       do_wp_page
419 +
420 +CALLED FROM
421 +       do_swap_page, handle_pte_fault
422 +
423 +PTE MODIFICATION
424 +       ptep_set_access_flags
425 +
426 +ADDED HOOKS
427 +       ioproc_update_page
428 +
429 +
430 +==== FUNCTION
431 +       do_swap_page
432 +
433 +CALLED FROM
434 +       handle_pte_fault
435 +
436 +PTE MODIFICATION
437 +       set_pte
438 +
439 +ADDED HOOKS
440 +       ioproc_update_page
441 +
442 +
443 +==== FUNCTION
444 +       do_anonymous_page
445 +
446 +CALLED FROM
447 +       do_no_page
448 +
449 +PTE MODIFICATION
450 +       set_pte
451 +
452 +ADDED HOOKS
453 +       ioproc_update_page
454 +
455 +
456 +==== FUNCTION
457 +       do_no_page
458 +
459 +CALLED FROM
460 +       do_file_page, handle_pte_fault
461 +
462 +PTE MODIFICATION
463 +       set_pte
464 +
465 +ADDED HOOKS
466 +       ioproc_update_page
467 +
468 +
469 +++++ FILE
470 +       mm/mmap.c
471 +
472 +==== FUNCTION
473 +       unmap_region
474 +
475 +CALLED FROM
476 +       do_munmap
477 +
478 +PTE MODIFICATION
479 +       set_pte (unmap_vmas)
480 +
481 +ADDED HOOKS
482 +       ioproc_invalidate_range
483 +
484 +
485 +==== FUNCTION
486 +       exit_mmap
487 +
488 +CALLED FROM
489 +       mmput
490 +
491 +PTE MODIFICATION
492 +       set_pte (unmap_vmas)
493 +
494 +ADDED HOOKS
495 +       ioproc_release
496 +
497 +
498 +++++ FILE
499 +       mm/mprotect.c
500 +
501 +==== FUNCTION
502 +       change_protection
503 +
504 +CALLED FROM
505 +       mprotect_fixup
506 +
507 +PTE MODIFICATION
508 +       set_pte (change_pte_range)
509 +
510 +ADDED HOOKS
511 +       ioproc_change_protection
512 +
513 +
514 +++++ FILE
515 +       mm/mremap.c
516 +
517 +==== FUNCTION
518 +       move_page_tables
519 +
520 +CALLED FROM
521 +       move_vma
522 +
523 +PTE MODIFICATION
524 +       ptep_clear_flush (move_one_page)
525 +
526 +ADDED HOOKS
527 +       ioproc_invalidate_range
528 +       ioproc_invalidate_range
529 +
530 +
531 +++++ FILE
532 +       mm/rmap.c
533 +
534 +==== FUNCTION
535 +       try_to_unmap_one
536 +
537 +CALLED FROM
538 +       try_to_unmap_anon, try_to_unmap_file
539 +
540 +PTE MODIFICATION
541 +       ptep_clear_flush
542 +
543 +ADDED HOOKS
544 +       ioproc_invalidate_page
545 +
546 +
547 +==== FUNCTION
548 +       try_to_unmap_cluster
549 +
550 +CALLED FROM
551 +       try_to_unmap_file
552 +
553 +PTE MODIFICATION
554 +       ptep_clear_flush
555 +
556 +ADDED HOOKS
557 +       ioproc_invalidate_page
558 +
559 +
560 +
561 +++++ FILE 
562 +       mm/msync.c
563 +
564 +==== FUNCTION
565 +       filemap_sync
566 +
567 +CALLED FROM
568 +       msync_interval
569 +
570 +PTE MODIFICATION
571 +       ptep_clear_flush_dirty (filemap_sync_pte)
572 +
573 +ADDED HOOKS
574 +       ioproc_sync_range
575 +
576 +
577 +++++ FILE
578 +       mm/hugetlb.c
579 +
580 +==== FUNCTION
581 +       zap_hugepage_range
582 +
583 +CALLED FROM
584 +       hugetlb_vmtruncate_list
585 +
586 +PTE MODIFICATION
587 +       ptep_get_and_clear (unmap_hugepage_range)
588 +
589 +ADDED HOOK
590 +       ioproc_invalidate_range
591 +
592 +
593 +-- Last update DavidAddison - 17 Aug 2004
594 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_linux.c
595 ===================================================================
596 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/eip_linux.c    2004-02-23 16:02:56.000000000 -0500
597 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_linux.c 2005-07-28 14:52:52.774689984 -0400
598 @@ -0,0 +1,1576 @@
599 +/*
600 + *    Copyright (c) 2003 by Quadrics Ltd.
601 + * 
602 + *    For licensing information please see the supplied COPYING file
603 + *
604 + */
605 +
606 +#ident "@(#)$Id: eip_linux.c,v 1.89.2.4 2005/02/04 14:30:35 mike Exp $"
607 +
608 +#include <qsnet/kernel.h>
609 +#include <qsnet/debug.h>
610 +
611 +#include <linux/module.h>
612 +
613 +#include <linux/init.h>
614 +#include <linux/list.h>
615 +#include <linux/netdevice.h>
616 +#include <linux/etherdevice.h>
617 +#include <linux/skbuff.h>
618 +#include <linux/kernel.h>
619 +#include <linux/proc_fs.h>
620 +#include <linux/time.h>
621 +#include <linux/version.h>
622 +
623 +#include <asm/uaccess.h>
624 +#include <asm/unaligned.h>
625 +
626 +#undef ASSERT
627 +#include <net/sock.h>
628 +#include <net/ip.h>
629 +
630 +
631 +
632 +#include <elan/epcomms.h>
633 +#include <elan/epsvc.h>
634 +
635 +#include "eip_linux.h"
636 +#include "eip_stats.h"
637 +
638 +#ifdef UNUSED
639 +static void eip_skb_display(struct sk_buff *);
640 +#endif
641 +static void eip_iph_display(struct iphdr *);
642 +#ifdef UNUSED
643 +static void eip_eiph_display(EIP_HEADER *);
644 +static void eip_packet_display(unsigned char *);
645 +#endif
646 +static void eip_tmd_display(EIP_TMD *);
647 +static void eip_tmd_head_display(EIP_TMD_HEAD *);
648 +static void eip_rmd_display(EIP_RMD *);
649 +static void eip_rmd_head_display(EIP_RMD_HEAD *);
650 +
651 +static void eip_rmd_reclaim(EIP_RMD *);
652 +
653 +static inline EP_NMH *eip_dma_reserve(int, int);
654 +static inline void __eip_tmd_load(EIP_TMD *, EP_RAILMASK *);
655 +static inline void __eip_tmd_unload(EIP_TMD *);
656 +static inline unsigned long eip_buff_alloc(int, int);
657 +static inline void eip_buff_free(unsigned long, int);
658 +static struct iphdr *eip_ipfrag_get(char *);
659 +static inline void eip_rmd_free(EIP_RMD *);
660 +static inline void eip_skb_load(EIP_RMD *);
661 +static inline void eip_skb_unload(EIP_RMD *);
662 +static inline void eip_rmd_requeue(EIP_RMD *);
663 +static EIP_RMD *eip_rmd_alloc(int, int);
664 +static int eip_rmd_alloc_replace(EIP_RMD *, int, int);
665 +static int eip_rmd_alloc_queue(int, int, int, int);
666 +static int eip_rmds_alloc(void);
667 +static void eip_rxhandler(EP_RXD *);
668 +static void eip_rx_tasklet(unsigned long);
669 +static inline void eip_tmd_init(EIP_TMD *, unsigned long, EIP_TMD_HEAD *, unsigned long, int);
670 +static inline EIP_TMD *eip_tmd_get(int);
671 +static inline void eip_tmd_put(EIP_TMD *);
672 +static inline void eip_tmd_load(EIP_TMD *);
673 +static inline void eip_tmd_unload(EIP_TMD *);
674 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD *, EIP_TMD_HEAD *, int);
675 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD *, int);
676 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD *, int);
677 +static int eip_tmds_alloc(void);
678 +int eip_hard_start_xmit(struct sk_buff *, struct net_device *);
679 +static inline int eip_do_xmit(EIP_TMD *, EP_NMD *i, EP_PAYLOAD *);
680 +static void eip_txhandler(EP_TXD *, void *, EP_STATUS);
681 +static void eip_tx_tasklet(unsigned long);
682 +void eip_stop_queue(void);
683 +void eip_start_queue(void);
684 +static int eip_open(struct net_device *);
685 +static int eip_close(struct net_device *);
686 +static struct net_device_stats *eip_get_stats(struct net_device *);
687 +static int eip_change_mtu(struct net_device *, int);
688 +
689 +static int eip_rx_dropping = 0;
690 +static int eip_rx_tasklet_locked = 1;
691 +
692 +/* Global */
693 +struct timer_list eip_rx_tasklet_timer;
694 +       
695 +EIP_RX *eip_rx = NULL;
696 +EIP_TX *eip_tx = NULL;
697 +int  eip_checksum_state=CHECKSUM_NONE;
698 +
699 +int tmd_max = EIP_TMD_MAX_NR;
700 +int rmd_max = EIP_RMD_MAX_NR;
701 +int rx_envelope_nr = EIP_RX_ENVELOPE_NR;
702 +int rx_granularity = EIP_RX_GRANULARITY;
703 +int tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
704 +EP_RAILMASK tx_railmask = EP_RAILMASK_ALL;
705 +int eipdebug = 0;
706 +
707 +#ifdef UNUSED
708 +static void eip_skb_display(struct sk_buff *skb)
709 +{
710 +       if (skb) {
711 +               __EIP_DBG_PRINTF("SKB [%p] : len %d truesize %d  proto %x pkt type %x cloned %d users %d summed %d\n", 
712 +                       skb, skb->len, skb->truesize, skb->protocol, skb->pkt_type, skb->cloned, atomic_read(&skb->users), skb->ip_summed);
713 +               __EIP_DBG_PRINTF("SKB [%p] : skb_shinfo dataref %d nr_frags %d frag_list[%p] (device %p)\n", skb,
714 +                        atomic_read(&skb_shinfo(skb)->dataref), skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->frag_list, skb->dev);
715 +               __EIP_DBG_PRINTF("SKB [%p] : head[%p] data[%p] tail [%p] end [%p] data_len [%d]\n", skb, skb->head, skb->data, 
716 +                               skb->tail, skb->end, skb->data_len);
717 +               __EIP_DBG_PRINTF("SKB [%p] : Transport Layer h.(th, uh, icmph, raw)[%p]\n", skb, skb->h.th);
718 +               __EIP_DBG_PRINTF("SKB [%p] : Network Layer      nh.(iph, arph, raw)[%p]\n", skb, skb->nh.iph);
719 +               __EIP_DBG_PRINTF("SKB [%p] : Link Layer         mac.(ethernet, raw)[%p]\n", skb, skb->mac.ethernet);
720 +               return;
721 +       }
722 +       EIP_ERR_PRINTF("SKB IS NULL - NO SKB TO DISPLAY\n");
723 +}
724 +#endif
725 +static void eip_iph_display(struct iphdr *iph)
726 +{
727 +       if (iph) {
728 +               __EIP_DBG_PRINTF("IPH [%p] : version %d header len %d TOS 0x%x Total len %d\n", 
729 +                       iph, iph->version, iph->ihl, htons(iph->tos), htons(iph->tot_len));
730 +               __EIP_DBG_PRINTF("IPH [%p] : id %d frag flags 0x%x offset %d\n",
731 +                               iph, htons(iph->id), (iph->frag_off & htons(IP_CE | IP_DF | IP_MF)) >> 4, 
732 +                               (htons(iph->frag_off) << 3) & IP_OFFSET);
733 +               __EIP_DBG_PRINTF("IPH [%p] : TTL %d proto %d header checksum 0x%x\n", iph, iph->ttl, iph->protocol, iph->check);
734 +               __EIP_DBG_PRINTF("IPH [%p] : IP src %u.%u.%u.%u dest %u.%u.%u.%u\n", iph, 
735 +                                ((unsigned char *)&(iph->saddr))[0],((unsigned char *)&(iph->saddr))[1], ((unsigned char *)&(iph->saddr))[2],((unsigned char *)&(iph->saddr))[3],
736 +                                ((unsigned char *)&(iph->daddr))[0],((unsigned char *)&(iph->daddr))[1], ((unsigned char *)&(iph->daddr))[2],((unsigned char *)&(iph->daddr))[3]);
737 +               return;
738 +       }
739 +       EIP_ERR_PRINTF("IPH IS NULL - NO IPH TO DISPLAY\n");
740 +}
741 +#ifdef UNUSED
742 +static void eip_eiph_display(EIP_HEADER * eiph)
743 +{
744 +       if (eiph) {
745 +               __EIP_DBG_PRINTF("EIPH [%p] : dhost %04x.%04x.%04x sap %x\n", eiph, eiph->h_dhost.ip_bcast, eiph->h_dhost.ip_inst, 
746 +                               eiph->h_dhost.ip_addr, eiph->h_sap);
747 +               __EIP_DBG_PRINTF("EIPH [%p] : shost %04x.%04x.%04x \n", eiph, eiph->h_shost.ip_bcast, eiph->h_shost.ip_inst,
748 +                                eiph->h_shost.ip_addr);
749 +               return;
750 +       }
751 +       EIP_ERR_PRINTF("EIPH IS NULL - NO EIPH TO DISPLAY\n");
752 +}
753 +static void eip_packet_display(unsigned char *data)
754 +{
755 +       eip_eiph_display((EIP_HEADER *) data);
756 +       eip_iph_display((struct iphdr *) (data + EIP_HEADER_PAD + ETH_HLEN));
757 +}
758 +#endif
759 +static void eip_tmd_display(EIP_TMD * tmd)
760 +{
761 +       if (tmd) {
762 +               __EIP_DBG_PRINTF("\t\tTMD [%p] : next[%p] skb[%p] DVMA[%d]\n", tmd, tmd->chain.next, tmd->skb, tmd->dvma_idx);
763 +               if (tmd->dma_base)
764 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] *data 0x%lx\n", tmd, tmd->head, *((unsigned long *) tmd->dma_base));
765 +               else
766 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] NO DATA !!!\n", tmd, tmd->head);
767 +               __EIP_DBG_PRINTF("TMD [%p] : DMA(%lx,%d,%d) ebase[%x]\n",tmd,  tmd->dma_base, tmd->dma_len, tmd->nmd.nmd_len,
768 +                                tmd->nmd.nmd_addr);
769 +               return;
770 +       }
771 +       EIP_ERR_PRINTF("TMD IS NULL - NO TMD TO DISPLAY\n");
772 +       
773 +}
774 +static void eip_ipf_display(EIP_IPFRAG * ipf)
775 +{
776 +       if (ipf) {
777 +               __EIP_DBG_PRINTF("IPF[%p] : datagram len %d dma correction %d uts %lx frag_nr %d\n", ipf, ipf->datagram_len,
778 +                               ipf->dma_correction, ipf->timestamp.tv_usec, ipf->frag_nr);
779 +               eip_tmd_display((EIP_TMD *) ipf);
780 +               return;
781 +       }
782 +       EIP_ERR_PRINTF("IPF IS NULL - NO IPF TO DISPLAY\n");
783 +}
784 +
785 +static void eip_tmd_head_display(EIP_TMD_HEAD * head)
786 +{
787 +       if (head) {
788 +               __EIP_DBG_PRINTF("TMD HEAD [%p] : handle[%p] tmds[%p] %3.3d/%3.3d/%3.3d\n", head, head->handle, head->tmd, 
789 +                       EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats),
790 +                       eip_tx->tmd_max_nr);
791 +               return;
792 +       }
793 +       EIP_ERR_PRINTF("TMD HEAD IS NULL - NO TMD HEAD TO DISPLAY\n");
794 +}
795 +static void eip_rmd_display(EIP_RMD * rmd)
796 +{
797 +       if (rmd) {
798 +               __EIP_DBG_PRINTF("RMD [%p] : next[%p] rxd[%p] DVMA[%d]\n", rmd, rmd->chain.next, rmd->rxd, rmd->dvma_idx);
799 +               __EIP_DBG_PRINTF("RMD [%p] : head[%p]\n", rmd, rmd->head); 
800 +               __EIP_DBG_PRINTF("RMD [%p] : ebase[%x]\n", rmd,  rmd->nmd.nmd_addr); 
801 +               return;
802 +       }
803 +       EIP_ERR_PRINTF("RMD IS NULL - NO RMD TO DISPLAY\n");
804 +}
805 +static void eip_rmd_head_display(EIP_RMD_HEAD * head)
806 +{
807 +       if (head) {
808 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : rcvr[%p] handle[%p] busy list[%p]\n", head, head->rcvr, head->handle, head->busy_list);
809 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : %3.3d/%3.3d/%3.3d\n", head, 
810 +                               EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats), eip_rx->rmd_max_nr);
811 +               return;
812 +       }
813 +       EIP_ERR_PRINTF("RMD HEAD IS NULL - NO RMD HEAD TO DISPLAY\n");
814 +}
815 +
816 +/* END  - DISPLAY FUNCTIONS */
817 +static inline EP_NMH *eip_dma_reserve(int pages_nr, int perm)
818 +{
819 +       EP_NMH *handle = ep_dvma_reserve(eip_tx->ep_system, pages_nr, perm);
820 +       
821 +       if (handle)
822 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HANDLE [%p] %d pages of elan address space reserved\n", 
823 +                               handle, pages_nr);
824 +       else
825 +               EIP_ERR_PRINTF("cannot reserve %d page(s) of elan address space\n", pages_nr);
826 +
827 +       return handle;
828 +}
829 +
830 +static inline void __eip_tmd_load(EIP_TMD * tmd, EP_RAILMASK *rmask)
831 +{
832 +       EIP_ASSERT(tmd->nmd.nmd_len > 0);
833 +       
834 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) tmd->dma_base, tmd->nmd.nmd_len, tmd->head->handle,
835 +                       tmd->dvma_idx, rmask, &tmd->nmd);
836 +}
837 +
838 +static inline void __eip_tmd_unload(EIP_TMD * tmd)
839 +{
840 +       EIP_ASSERT(tmd->nmd.nmd_addr && tmd->head->handle);
841 +       
842 +       ep_dvma_unload(eip_tx->ep_system, tmd->head->handle, &tmd->nmd);
843 +       tmd->nmd.nmd_addr = 0;
844 +}
845 +static inline unsigned long eip_buff_alloc(int buff_len, int gfp)
846 +{
847 +       unsigned long buff_base = (buff_len < PAGE_SIZE) ? 
848 +                               (unsigned long) kmalloc(buff_len, gfp) :
849 +                               __get_dma_pages(gfp, get_order(buff_len));
850 +       
851 +       if (likely(buff_base))
852 +               return buff_base;
853 +
854 +       EIP_ERR_PRINTF("cannot allocate %db of memory\n", buff_len);
855 +       return 0;
856 +}
857 +static inline void eip_buff_free(unsigned long buff_base, int buff_len)
858 +{
859 +       (buff_len < PAGE_SIZE) ?  kfree((void *) buff_base) :
860 +               free_pages(buff_base, get_order(buff_len));
861 +}
862 +static struct iphdr *eip_ipfrag_get(char *data)
863 +{
864 +       struct ethhdr *eh = (struct ethhdr *) (data);
865 +       struct iphdr *iph;
866 +
867 +       if (eh->h_proto == htons(ETH_P_IP)) {
868 +               iph = (struct iphdr *) ((char *) eh + ETH_HLEN);
869 +
870 +               /* EIP_DBG(eip_iph_display(iph)); */
871 +
872 +               if ((iph->frag_off & htons(IP_MF | IP_OFFSET)))
873 +                       return iph;
874 +       }
875 +       return NULL;
876 +}
877 +
878 +static inline void eip_rmd_free(EIP_RMD * rmd)
879 +{
880 +       EIP_ASSERT2(rmd->nmd.nmd_addr == 0, eip_rmd_display, rmd);
881 +       
882 +       if ( rmd->skb != NULL) 
883 +               kfree_skb (rmd->skb);
884 +       
885 +       kfree(rmd);
886 +
887 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "RMD [%p] : FREED\n", rmd);
888 +}
889 +static inline void eip_skb_load(EIP_RMD * rmd)
890 +{
891 +       EP_RAILMASK rmask = rmd->rxd ? ep_rxd_railmask (rmd->rxd) : 0;
892 +
893 +       EIP_ASSERT(skb_tailroom(rmd->skb) > 0);
894 +
895 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) rmd->skb->data, skb_tailroom(rmd->skb), rmd->head->handle,
896 +                    rmd->dvma_idx, &rmask, &rmd->nmd);
897 +       
898 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : LOADED\n", rmd);
899 +}
900 +static inline void eip_skb_unload(EIP_RMD * rmd)
901 +{
902 +       EIP_ASSERT(rmd->nmd.nmd_addr && rmd->head->handle);
903 +       
904 +       ep_dvma_unload(eip_tx->ep_system, rmd->head->handle, &rmd->nmd);
905 +       rmd->nmd.nmd_addr = 0;
906 +       
907 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : UNLOADED\n", rmd);
908 +}
909 +static inline void eip_rmd_requeue(EIP_RMD * rmd)
910 +{
911 +       EIP_ASSERT(rmd->rxd);
912 +
913 +       rmd->chain.next    = NULL;
914 +
915 +       ep_requeue_receive(rmd->rxd, eip_rxhandler, rmd, &rmd->nmd, EP_NO_ALLOC|EP_NO_SLEEP );
916 +
917 +       atomic_inc(&rmd->head->stats);
918 +       
919 +       EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : REQUEUED\n", rmd);
920 +}
921 +static EIP_RMD * eip_rmd_alloc(int svc, int gfp)
922 +{
923 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
924 +       EIP_RMD *rmd;
925 +       struct sk_buff *skb;
926 +
927 +       if (!(skb = alloc_skb((buff_len -  EIP_EXTRA), gfp)))
928 +               return NULL;
929 +       
930 +       skb_reserve(skb, 2);
931 +
932 +       if (!(rmd = (EIP_RMD *) kmalloc(buff_len, gfp))) {
933 +               kfree_skb(skb);
934 +               return NULL;
935 +       }
936 +
937 +       rmd->skb = skb;
938 +
939 +       rmd->chain.next = NULL;
940 +       rmd->rxd = NULL;
941 +       rmd->head = &eip_rx->head[svc];
942 +
943 +       return rmd;
944 +}
945 +
946 +static int eip_rmd_alloc_replace(EIP_RMD *rmd, int svc, int gfp) 
947 +{
948 +       struct sk_buff *skb,*old;
949 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
950 +
951 +       if (!(skb = alloc_skb(buff_len, gfp)))
952 +               return 1;
953 +       
954 +       skb_reserve(skb, 2);
955 +
956 +       eip_skb_unload(rmd);
957 +
958 +       old      = rmd->skb;
959 +       rmd->skb = skb;
960 +
961 +       eip_skb_load(rmd);
962 +
963 +       eip_rmd_requeue(rmd);
964 +
965 +       kfree_skb(old);
966 +
967 +       return 0;
968 +}
969 +
970 +static int eip_rmd_alloc_queue(int svc, int dvma_idx, int gfp, int attr)
971 +{
972 +       EIP_RMD * rmd = eip_rmd_alloc(svc, gfp);
973 +
974 +       if (!rmd)
975 +               return 1;
976 +
977 +       EIP_STAT_ALLOC_ADD(&rmd->head->stats, 1);
978 +
979 +       rmd->dvma_idx = dvma_idx;
980 +       eip_skb_load(rmd);
981 +
982 +       EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "RMD [%p] : ALLOCATED for SVC 0x%x\n", rmd, svc);
983 +
984 +       if (ep_queue_receive(rmd->head->rcvr, eip_rxhandler, (void *) rmd, &rmd->nmd, attr) == ESUCCESS) {
985 +               atomic_inc(&rmd->head->stats);
986 +               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : QUEUED on SVC 0x%x\n", rmd, svc);
987 +               return 0;
988 +       }
989 +       
990 +       EIP_ERR_PRINTF("RMD [%p] : couldn't be QUEUED on SVC 0x%x\n", rmd, svc);
991 +
992 +       EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
993 +
994 +       eip_skb_unload(rmd);
995 +       eip_rmd_free(rmd);
996 +
997 +       return 1;
998 +}
999 +
1000 +static int eip_rmds_alloc(void)
1001 +{
1002 +       int idx, svc;
1003 +
1004 +       eip_rx->irq_list = NULL;
1005 +       eip_rx->irq_list_nr = 0;
1006 +
1007 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1008 +               eip_rx->head[svc].rcvr = ep_alloc_rcvr(eip_tx->ep_system, EIP_SVC_EP(svc), rx_envelope_nr);
1009 +               if (!eip_rx->head[svc].rcvr) {
1010 +                       EIP_ERR_PRINTF("Cannot install receiver for SVC 0x%x - maybe cable is disconnected\n", svc);
1011 +                       return -EAGAIN;
1012 +               }
1013 +
1014 +               eip_rx->head[svc].handle =
1015 +                   eip_dma_reserve(EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)) * eip_rx->rmd_max_nr,
1016 +                                   EP_PERM_WRITE);
1017 +               if (!eip_rx->head[svc].handle)
1018 +                       return -ENOMEM;
1019 +               
1020 +               EIP_DBG(EIP_DBG_RMD_HEAD, eip_rmd_head_display, &eip_rx->head[svc]);
1021 +
1022 +               for (idx = 0; idx < EIP_RMD_NR; idx++) {
1023 +                       if (eip_rmd_alloc_queue(svc, idx * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
1024 +                                               GFP_KERNEL, EP_NO_SLEEP))
1025 +                               return -ENOMEM;
1026 +               }
1027 +       }
1028 +       return 0;
1029 +}
1030 +static void eip_rmds_free(void)
1031 +{
1032 +       unsigned long flags;
1033 +       EIP_RMD *rmd;
1034 +       int svc; 
1035 +       
1036 +       spin_lock_irqsave(&eip_rx->lock, flags);
1037 +       rmd = eip_rx->irq_list;
1038 +       eip_rx->irq_list = NULL;
1039 +       eip_rx->irq_list_nr = 0;
1040 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
1041 +
1042 +       eip_rmd_reclaim(rmd);
1043 +       
1044 +       for (svc = 0; svc < EIP_SVC_NR ; svc++) {
1045 +               
1046 +               while ((rmd = eip_rx->head[svc].busy_list)) {
1047 +                       eip_rx->head[svc].busy_list = NULL;
1048 +                       eip_rmd_reclaim(rmd);
1049 +                       if (eip_rx->head[svc].busy_list) {
1050 +                               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "Still RMD [%p] on BUSY list SVC 0x%d - Scheduling\n", rmd, svc);     
1051 +                               schedule();
1052 +                       }
1053 +               }
1054 +
1055 +               EIP_ASSERT(EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats) == EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats));
1056 +               
1057 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "HEAD[%p] : FREEING RCVR [%p]\n", &eip_rx->head[svc],
1058 +                               eip_rx->head[svc].rcvr);
1059 +               
1060 +               ep_free_rcvr(eip_rx->head[svc].rcvr);
1061 +
1062 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HEAD[%p] : RELEASING DVMA [%p]\n", &eip_rx->head[svc], 
1063 +                               eip_rx->head[svc].handle);
1064 +
1065 +               ep_dvma_release(eip_tx->ep_system, eip_rx->head[svc].handle);
1066 +       }
1067 +
1068 +}
1069 +static int eip_rx_queues_low (void) {
1070 +       int svc;
1071 +       for (svc = 0; svc < EIP_SVC_NR; svc++) 
1072 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats)  < EIP_RMD_ALLOC_THRESH) 
1073 +                       return (1);
1074 +       return (0);
1075 +}
1076 +static void eip_rxhandler(EP_RXD * rxd)
1077 +{
1078 +       EIP_RMD *rmd            = (EIP_RMD *) ep_rxd_arg(rxd);
1079 +       EP_STATUS ret           = ep_rxd_status(rxd);
1080 +       EP_PAYLOAD * payload    = ep_rxd_payload(rxd);
1081 +       unsigned long data      = (unsigned long) rmd->skb->data; 
1082 +       int frag_nr             = 0;
1083 +       int len;
1084 +
1085 +       struct sk_buff *skb;
1086 +       static char count = 0;
1087 +
1088 +       atomic_dec(&rmd->head->stats);
1089 +       rmd->rxd = rxd;
1090 +
1091 +       if (likely(ret == EP_SUCCESS)) {
1092 +
1093 +               rmd->head->dma++;
1094 +
1095 +               if ( eip_rx_dropping) {
1096 +                   eip_rmd_requeue(rmd);
1097 +                   return;
1098 +               }
1099 +
1100 +               len = (payload) ? payload->Data[frag_nr++] : ep_rxd_len(rxd);
1101 +
1102 +               EIP_DBG(EIP_DBG_RMD, eip_rmd_display, rmd);
1103 +
1104 +again:
1105 +               if ( (skb = skb_clone(rmd->skb, GFP_ATOMIC)) ) {
1106 +                       unsigned int off = (data - (unsigned long) rmd->skb->data);
1107 +
1108 +                       /* have to set the length before calling
1109 +                        * skb pull as it will not allow you to
1110 +                        * pull past the end */
1111 +
1112 +                       skb_put (skb, off + len);
1113 +                       skb_pull (skb, off);
1114 +
1115 +                       skb->protocol = eth_type_trans(skb, eip_rx->net_device);
1116 +                       skb->ip_summed = eip_checksum_state;
1117 +                       skb->dev = eip_rx->net_device;
1118 +
1119 +                       /* Fabien/David/Mike this is a hack/fix to allow aggrigation of packets to work.
1120 +                        * The problem is ip_frag looks at the truesize to see if it is caching too much space.
1121 +                        * As we are reusing a large skb (cloned) for a number of small fragments, they appear to take up alot of space.
1122 +                        * so ip_frag dropped them after 4 frags (not good). So we lie and set the truesize to just bigger than the data. 
1123 +                        */
1124 +                       if (payload) 
1125 +                               skb->truesize = SKB_DATA_ALIGN(skb->len + EIP_HEADER_PAD) +sizeof(struct sk_buff);
1126 +
1127 +               }
1128 +               if ( (skb) && 
1129 +                    (netif_rx(skb) != NET_RX_DROP)){
1130 +
1131 +                       eip_rx->bytes += len;
1132 +                       
1133 +                       if (payload && payload->Data[frag_nr] ) {
1134 +                               data += EIP_IP_ALIGN(len);
1135 +                               len   = payload->Data[frag_nr++];
1136 +                               goto again;
1137 +                       }
1138 +                       eip_rx->packets += ++frag_nr;
1139 +               } else if ( (eip_rx->dropped++ % 20) == 0)
1140 +                               __EIP_DBG_PRINTK("Packet dropped by the TCP/IP stack - increase /proc/sys/net/core/netdev_max_backlog\n");
1141 +       } else if (ret == EP_SHUTDOWN ) {
1142 +               EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "ABORTING\n");
1143 +                ep_complete_receive(rxd);
1144 +                eip_skb_unload(rmd);
1145 +               EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
1146 +                eip_rmd_free(rmd);
1147 +               return;
1148 +       } else {
1149 +               EP_ENVELOPE *env = ep_rxd_envelope(rxd);
1150 +               EP_NMD *nmd ;
1151 +               
1152 +               EIP_ERR_PRINTF("RMD[%p] : RECEIVE ret = %d\n", rmd, ret);
1153 +
1154 +               for (len = 0 ; len < env->nFrags ; len++) {
1155 +                       nmd = &env->Frags[len];
1156 +                       EIP_ERR_PRINTF("RMD[%p] : ep_frag #%d nmd_addr [%x] nmd_len %d\n", rmd, len, 
1157 +                                       (unsigned int) nmd->nmd_addr, nmd->nmd_len);
1158 +               }
1159 +               eip_rx->errors++;
1160 +               EIP_ASSERT2(atomic_read(&skb_shinfo(rmd->skb)->dataref) == 1, eip_rmd_display, rmd);
1161 +       }
1162 +
1163 +       /* data is used to store the irq flags */
1164 +       spin_lock_irqsave(&eip_rx->lock, data);
1165 +       rmd->chain.next = eip_rx->irq_list;
1166 +       eip_rx->irq_list = rmd;
1167 +       eip_rx->irq_list_nr++;
1168 +       spin_unlock_irqrestore(&eip_rx->lock, data);
1169 +
1170 +       if (((count++ % eip_rx->sysctl_granularity) == 0) /* and either we have passed up a number of them */
1171 +           || eip_rx_queues_low())                       /* or we are low                                 */
1172 +               tasklet_schedule(&eip_rx->tasklet);
1173 +       else
1174 +       {
1175 +               if ( !timer_pending (&eip_rx_tasklet_timer)  )    /* the timer not already set  */
1176 +                       mod_timer (&eip_rx_tasklet_timer, lbolt);
1177 +       }
1178 +}
1179 +
1180 +/* dest ; if the buffer still reference on it mocve the rmd to the dest list */
1181 +static void eip_rmd_reclaim(EIP_RMD *rmd) 
1182 +{
1183 +       EIP_RMD *rmd_next = rmd;
1184 +       int dataref;
1185 +
1186 +       while (rmd_next) {
1187 +               rmd = rmd_next;
1188 +               rmd_next = rmd_next->chain.next;
1189 +
1190 +               dataref = atomic_read(&skb_shinfo(rmd->skb)->dataref);
1191 +               EIP_ASSERT(dataref > 0);
1192 +               
1193 +               if (dataref == 1) {
1194 +                       eip_rmd_requeue(rmd);
1195 +               } else {
1196 +                       rmd->chain.next = rmd->head->busy_list;
1197 +                       rmd->head->busy_list = rmd;
1198 +               }
1199 +       }
1200 +}
1201 +static void eip_rx_tasklet(unsigned long arg)
1202 +{
1203 +       EIP_RMD *rmd, *rmd_next;
1204 +       unsigned long flags;
1205 +       short svc, queued;
1206 +       int   needs_reschedule;
1207 +
1208 +       if (eip_rx_tasklet_locked) /* we dont want the tasklet to do anything when we are finishing */
1209 +           return;
1210 +
1211 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1212 +               rmd = eip_rx->head[svc].busy_list;
1213 +               eip_rx->head[svc].busy_list = NULL;
1214 +               eip_rmd_reclaim(rmd);
1215 +       }
1216 +
1217 +       spin_lock_irqsave(&eip_rx->lock, flags);
1218 +       rmd = eip_rx->irq_list;
1219 +       eip_rx->irq_list = NULL;
1220 +       eip_rx->irq_list_nr = 0;
1221 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
1222 +       
1223 +       eip_rmd_reclaim(rmd);
1224 +
1225 +       needs_reschedule = 0;
1226 +
1227 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1228 +               /* the plan is : allocate some more if possible or steall some dvma space from those on the EIP_BUSY_LIST */
1229 +               queued = EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats);
1230 +
1231 +               EIP_ASSERT(queued >= 0 && queued <= EIP_RMD_MAX_NR);    
1232 +               
1233 +               if (queued < EIP_RMD_ALLOC_THRESH) {
1234 +                       short allocated = EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats);
1235 +                       short how_many; 
1236 +
1237 +                       EIP_ASSERT(allocated >= 0 && allocated <= EIP_RMD_MAX_NR);
1238 +                       
1239 +                       if (likely(allocated < eip_rx->rmd_max_nr)) {
1240 +
1241 +                               how_many = (((allocated / EIP_RMD_ALLOC_STEP) + 1) * EIP_RMD_ALLOC_STEP);
1242 +                               if (how_many > eip_rx->rmd_max_nr)
1243 +                                       how_many = eip_rx->rmd_max_nr;
1244 +
1245 +                               for (; allocated < how_many &&  
1246 +                                                       (eip_rmd_alloc_queue(svc, allocated * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
1247 +                                                                             GFP_ATOMIC, EP_NO_ALLOC|EP_NO_SLEEP) == 0) ; allocated++);
1248 +                               if ( allocated != how_many ) {
1249 +                                       eip_rx->reschedule++;
1250 +                                       needs_reschedule = 1;
1251 +                               }
1252 +                       } else {
1253 +                               /* steal how_many rmds and put them on the aside list */
1254 +                               how_many = EIP_RMD_ALLOC_THRESH - queued;
1255 +
1256 +                               EIP_ASSERT(how_many >= 0 && how_many <= EIP_RMD_ALLOC_THRESH);
1257 +                               
1258 +                               rmd_next = eip_rx->head[svc].busy_list;
1259 +                               eip_rx->head[svc].busy_list = NULL;
1260 +
1261 +                               while (how_many-- && rmd_next) {
1262 +                                       rmd = rmd_next;
1263 +                                       rmd_next = rmd_next->chain.next;
1264 +
1265 +                                       if (eip_rmd_alloc_replace(rmd, svc, GFP_ATOMIC)) {
1266 +                                               rmd_next = rmd;
1267 +                                               break;
1268 +                                       }
1269 +                               }
1270 +                               eip_rx->head[svc].busy_list = rmd_next;
1271 +                               if ( how_many )
1272 +                                       needs_reschedule = 1;
1273 +                       }
1274 +               }
1275 +       }
1276 +       
1277 +       if (needs_reschedule) 
1278 +       {
1279 +               if ( !timer_pending (&eip_rx_tasklet_timer)) 
1280 +                       mod_timer (&eip_rx_tasklet_timer, lbolt);
1281 +       }
1282 +}
1283 +static void eip_rx_tasklet_resched(unsigned long arg)
1284 +{
1285 +       tasklet_schedule(&eip_rx->tasklet);     
1286 +}
1287 +
1288 +static inline void eip_tmd_init(EIP_TMD * tmd, unsigned long buff_base, EIP_TMD_HEAD * head, unsigned long buff_len,
1289 +                               int dvma_idx)
1290 +{
1291 +       tmd->dvma_idx = dvma_idx;
1292 +       tmd->dma_base = buff_base;
1293 +       tmd->dma_len = -1;
1294 +       tmd->skb = NULL;
1295 +       tmd->head = head;
1296 +       tmd->chain.next = NULL;
1297 +
1298 +       if (tmd->head != &eip_tx->head[EIP_TMD_STD]) {
1299 +               tmd->nmd.nmd_len = buff_len;
1300 +               eip_tmd_load(tmd);
1301 +       } else  {
1302 +               tmd->nmd.nmd_len  = -1;
1303 +               tmd->nmd.nmd_addr = 0;
1304 +       }
1305 +}
1306 +
1307 +static inline EIP_TMD *eip_tmd_get(int id)
1308 +{
1309 +       unsigned long flags;
1310 +       EIP_TMD *tmd = NULL;
1311 +       spin_lock_irqsave(&eip_tx->lock, flags);
1312 +       while ((tmd = eip_tx->head[id].tmd) == NULL) {
1313 +               spin_unlock_irqrestore(&eip_tx->lock, flags);
1314 +               if (ep_enable_txcallbacks(eip_tx->xmtr) == 0) {
1315 +
1316 +                       spin_lock_irqsave (&eip_tx->lock, flags);
1317 +                       if (eip_tx->head[id].tmd == NULL) {
1318 +                               __EIP_DBG_PRINTF("Cannot get a TMD on head %d ... stopping queue\n", id);
1319 +                               
1320 +                               eip_stop_queue ();
1321 +                               
1322 +                               spin_unlock_irqrestore (&eip_tx->lock, flags);
1323 +
1324 +                               return NULL;
1325 +                       }
1326 +                       spin_unlock_irqrestore (&eip_tx->lock, flags);
1327 +               }
1328 +
1329 +               ep_disable_txcallbacks(eip_tx->xmtr);
1330 +               spin_lock_irqsave(&eip_tx->lock, flags);
1331 +       }
1332 +       eip_tx->head[id].tmd = tmd->chain.next;
1333 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
1334 +       atomic_dec(&tmd->head->stats);
1335 +       return tmd;
1336 +}
1337 +
1338 +static inline void eip_tmd_put(EIP_TMD * tmd)
1339 +{
1340 +       unsigned long flags;
1341 +
1342 +       tmd->skb = NULL;
1343 +
1344 +       spin_lock_irqsave(&eip_tx->lock, flags);
1345 +       tmd->chain.next = tmd->head->tmd;
1346 +       tmd->head->tmd = tmd;
1347 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
1348 +       atomic_inc(&tmd->head->stats);
1349 +
1350 +       eip_start_queue();
1351 +
1352 +       EIP_DBG_PRINTF(EIP_DBG_TMD_QUEUE, "TMD [%p] : REQUEUED\n", tmd);
1353 +}
1354 +static inline void eip_tmd_load(EIP_TMD * tmd)
1355 +{
1356 +       EP_RAILMASK rmask = tx_railmask;
1357 +
1358 +       __eip_tmd_load(tmd, &rmask);
1359 +       
1360 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : LOADED\n", tmd);
1361 +}
1362 +static inline void eip_tmd_unload(EIP_TMD * tmd)
1363 +{
1364 +       __eip_tmd_unload(tmd);
1365 +       
1366 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : UNLOADED\n", tmd);
1367 +}
1368 +static inline void eip_tmd_free(EIP_TMD * tmd)
1369 +{
1370 +       eip_buff_free(tmd->dma_base, tmd->nmd.nmd_len);
1371 +       
1372 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "TMD [%p] : FREED\n", tmd);
1373 +       
1374 +       EIP_STAT_ALLOC_SUB(&tmd->head->stats, 1);
1375 +}
1376 +
1377 +/* tmd on a separate block */
1378 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD * tmd, EIP_TMD_HEAD * head, int dvma_idx)
1379 +{
1380 +       eip_tmd_init(tmd, 0, head, -1, dvma_idx);
1381 +
1382 +       eip_tmd_put(tmd);
1383 +
1384 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1385 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1386 +       return tmd;
1387 +}
1388 +/* tmd on the buffer */
1389 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD * head, int dvma_idx)
1390 +{
1391 +       EIP_TMD *tmd;
1392 +       unsigned long buff_base;
1393 +
1394 +       if (!(buff_base = eip_buff_alloc(tx_copybreak_max + sizeof(EIP_TMD), GFP_KERNEL)))
1395 +               return NULL;
1396 +
1397 +       tmd = (EIP_TMD *) (buff_base + tx_copybreak_max);
1398 +       eip_tmd_init(tmd, buff_base, head, tx_copybreak_max, dvma_idx);
1399 +
1400 +       eip_tmd_put(tmd);
1401 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1402 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1403 +       return tmd;
1404 +}
1405 +
1406 +/* ipf are on the buffer */
1407 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD * head, int dvma_idx)
1408 +{
1409 +       EIP_TMD *tmd;
1410 +       unsigned long buff_base;
1411 +
1412 +       if (!(buff_base = eip_buff_alloc(EIP_SVC_BIGGEST_LEN, GFP_KERNEL)))
1413 +               return NULL;
1414 +
1415 +       tmd = (EIP_TMD *) (buff_base + EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG));
1416 +       eip_tmd_init(tmd, buff_base, head, EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG), dvma_idx);
1417 +
1418 +       eip_tmd_put(tmd);
1419 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1420 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1421 +       return tmd;
1422 +}
1423 +
1424 +static int eip_tmds_alloc()
1425 +{
1426 +       int i;
1427 +       int page_nr;
1428 +       EIP_TMD *tmd;
1429 +
1430 +       page_nr = EIP_DVMA_PAGES(tx_copybreak_max);
1431 +
1432 +       eip_tx->head[EIP_TMD_COPYBREAK].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1433 +       
1434 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_COPYBREAK]);
1435 +
1436 +       for (i = 0; i < EIP_TMD_NR; i++) {
1437 +               if (!eip_tmd_alloc_queue_copybreak(&eip_tx->head[EIP_TMD_COPYBREAK], i * page_nr))
1438 +                       return -ENOMEM;
1439 +       }
1440 +
1441 +       eip_tx->head[EIP_TMD_STD].handle =
1442 +           eip_dma_reserve(EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN) * eip_tx->tmd_max_nr, EP_PERM_READ);
1443 +       
1444 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_STD]);
1445 +
1446 +       tmd = kmalloc(sizeof(EIP_TMD) * EIP_TMD_NR, GFP_KERNEL);
1447 +       if (!tmd) {
1448 +               EIP_ERR_PRINTF("Cannot ALLOCATE %d of tmds\n", (int) sizeof(EIP_TMD) * EIP_TMD_NR);
1449 +               return -ENOMEM;
1450 +       }
1451 +       
1452 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1453 +       
1454 +       for (i = 0; i < EIP_TMD_NR; i++, tmd++) {
1455 +               if (!eip_tmd_alloc_queue(tmd, &eip_tx->head[EIP_TMD_STD], i * page_nr))
1456 +                       return -ENOMEM;
1457 +       }
1458 +
1459 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1460 +
1461 +       eip_tx->head[EIP_TMD_AGGREG].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1462 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_AGGREG]);
1463 +
1464 +       for (i = 0; i < EIP_TMD_NR; i++) {
1465 +               if (!eip_tmd_alloc_queue_aggreg(&eip_tx->head[EIP_TMD_AGGREG], i * page_nr))
1466 +                       return -ENOMEM;
1467 +       }
1468 +       return 0;
1469 +}
1470 +
1471 +static void eip_tmds_free(void) 
1472 +{
1473 +       EIP_TMD *tmd;
1474 +       EIP_TMD *tmd_next;
1475 +       int i;
1476 +       
1477 +       ep_poll_transmits(eip_tx->xmtr);
1478 +
1479 +       for (i = 0 ; i < 3 ; i++) {
1480 +again:
1481 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats) < EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats)) {
1482 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "Polling XMTR [%p]\n", eip_tx->xmtr);       
1483 +                       ep_poll_transmits(eip_tx->xmtr);
1484 +                       goto again;
1485 +               }
1486 +       }
1487 +       /* everything should be queued */
1488 +        if ((tmd = eip_tx->head[EIP_TMD_COPYBREAK].tmd)) {
1489 +            do {
1490 +                       tmd_next = tmd->chain.next;
1491 +                        eip_tmd_unload(tmd);
1492 +                       
1493 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1494 +                       
1495 +                        eip_tmd_free(tmd);
1496 +            } while (tmd_next && (tmd = tmd_next));
1497 +        }
1498 +       
1499 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_COPYBREAK] release DVMA [%p]\n",
1500 +                       eip_tx->head[EIP_TMD_COPYBREAK].handle);        
1501 +       
1502 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_COPYBREAK].handle);
1503 +       
1504 +       /* these ones have been allocated as a block */
1505 +       if ((tmd = eip_tx->head[EIP_TMD_STD].tmd)) {
1506 +               do {
1507 +                       if (tmd->dvma_idx == 0 ) {
1508 +                               kfree(tmd);
1509 +                               /* eip_tmd_free(tmd); */
1510 +                               EIP_STAT_ALLOC_SUB(&tmd->head->stats, EIP_TMD_NR);
1511 +                               tmd_next = NULL;
1512 +                               EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_STD] BLOCK FREED\n", tmd); 
1513 +                       } else 
1514 +                               tmd_next = tmd->chain.next;
1515 +               } while (tmd_next && (tmd = tmd_next));
1516 +       }
1517 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_STD] release DVMA [%p]\n", 
1518 +                       eip_tx->head[EIP_TMD_STD].handle);      
1519 +       
1520 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_STD].handle);
1521 +       
1522 +       if ((tmd = eip_tx->head[EIP_TMD_AGGREG].tmd)) {
1523 +               do {
1524 +                       tmd_next = tmd->chain.next;
1525 +
1526 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1527 +                       
1528 +                       eip_tmd_unload(tmd);
1529 +                       eip_tmd_free(tmd);
1530 +               } while (tmd_next && (tmd = tmd_next));
1531 +       }
1532 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_AGGREG] release DVMA\n", 
1533 +                       eip_tx->head[EIP_TMD_AGGREG].handle);   
1534 +       
1535 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_AGGREG].handle);
1536 +
1537 +       ep_free_xmtr(eip_tx->xmtr);
1538 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "XMTR[%p] : FREED\n", eip_tx->xmtr);
1539 +}
1540 +
1541 +static inline void eip_ipf_skb_add(EIP_IPFRAG * ipf, struct sk_buff *skb)
1542 +{
1543 +       int align = EIP_IP_ALIGN(skb->len);
1544 +       
1545 +       
1546 +       if (ipf->dma_len == -1) {       /* like a virgin; touched for the very first time */
1547 +               do_gettimeofday(&ipf->timestamp);
1548 +               /* FIXE ME put that in release tmd code */
1549 +               ipf->frag_nr            = 0;
1550 +               ipf->dma_len            = 0;
1551 +               ipf->datagram_len       = -1;
1552 +               ipf->dma_correction     = 0;
1553 +       }
1554 +       
1555 +       memcpy((void *) (ipf->dma_base + ipf->dma_len), skb->data, skb->len);
1556 +       
1557 +       if (ipf->datagram_len == -1) {
1558 +               struct iphdr * iph = skb->nh.iph;
1559 +               int offset = ntohs(iph->frag_off);
1560 +
1561 +               /* last one ? ;  offset & ~IP_OFFSET = IP fragment flags */
1562 +               if (((offset & ~IP_OFFSET) & IP_MF) == 0) {
1563 +                       offset &= IP_OFFSET;
1564 +                       offset <<= 3;    
1565 +                       ipf->datagram_len = offset + htons(iph->tot_len) - sizeof(struct iphdr);
1566 +               }
1567 +       }
1568 +
1569 +       skb->next                       = ipf->skb;
1570 +       ipf->skb                        = skb;
1571 +       ipf->payload.Data[ipf->frag_nr] = skb->len;
1572 +       ipf->dma_len                   += align;
1573 +       ipf->dma_correction            += align - skb->len  + ETH_HLEN + sizeof(struct iphdr);
1574 +       /* FIXME ; Count got wrong if ip header has options */
1575 +
1576 +       ipf->frag_nr++;
1577 +
1578 +       EIP_DBG2(EIP_DBG_TMD, eip_ipf_display, ipf, "ADDED skb[%p] len %db ALIGNED(%db)\n", skb, skb->len, EIP_IP_ALIGN(skb->len));
1579 +}
1580 +
1581 +#define eip_ipf_hasroom(ipf, skb) ((ipf->dma_len + EIP_IP_ALIGN(skb->len) < eip_tx->sysctl_ipfrag_copybreak))
1582 +int eip_hard_start_xmit(struct sk_buff *skb, struct net_device *devnet) 
1583 +{
1584 +
1585 +       EIP_TMD *tmd;
1586 +       EP_NMD nmd;
1587 +       struct iphdr *iph;
1588 +       int j;
1589 +
1590 +       if (skb->destructor){
1591 +               atomic_inc(&eip_tx->destructor);
1592 +               tasklet_schedule(&eip_tx->tasklet);
1593 +       } 
1594 +
1595 +       if (!(iph = eip_ipfrag_get(skb->data)) || (eip_tx->sysctl_aggregation == 0)) { /* not ip fragment */
1596 +no_aggreg:
1597 +               j = (skb->len < eip_tx->sysctl_copybreak) ? EIP_TMD_COPYBREAK : EIP_TMD_STD; /* j = head id */
1598 +               
1599 +               if (!(tmd = eip_tmd_get(j))) {
1600 +                       if (skb->destructor)
1601 +                               atomic_dec(&eip_tx->destructor);
1602 +                       return 1;
1603 +               }
1604 +               
1605 +               tmd->dma_len    = skb->len;
1606 +               tmd->skb        = skb;
1607 +               tmd->skb->next  = NULL;
1608 +               tmd->chain.next = NULL;
1609 +               
1610 +               if (j == EIP_TMD_COPYBREAK) {
1611 +                       memcpy((void *) tmd->dma_base, skb->data, skb->len);
1612 +                       
1613 +                       ep_nmd_subset(&nmd, &tmd->nmd, 0, skb->len);
1614 +#ifdef EIP_MORE_STATS
1615 +                       eip_tx->sent_copybreak++;
1616 +#endif
1617 +                       return eip_do_xmit(tmd, &nmd, NULL);
1618 +               }
1619 +               tmd->dma_base           = (unsigned long) skb->data;
1620 +               tmd->nmd.nmd_len        = skb->len;
1621 +               eip_tmd_load(tmd);
1622 +
1623 +#ifdef EIP_MORE_STATS
1624 +               eip_tx->sent_std++;
1625 +#endif
1626 +               return eip_do_xmit(tmd, &tmd->nmd, NULL);
1627 +       } else if ( skb->len > EIP_SVC_BIGGEST_LEN/2 ) { 
1628 +               /* don't aggregate when we have a full mtu of data */
1629 +               /* or more than 32k ; in this case it is cheaper   */
1630 +               /* to just map the buffer and send it              */
1631 +               goto no_aggreg;
1632 +       } else {
1633 +               EIP_IPFRAG *ipf = NULL;
1634 +               unsigned long flags;
1635 +               struct list_head *l;
1636 +               struct iphdr *iph2;
1637 +               int i;
1638 +               __u16 id = iph->id;
1639 +               __u32 saddr = iph->saddr;
1640 +               __u32 daddr = iph->daddr;
1641 +               __u8 protocol = iph->protocol;
1642 +
1643 +                       EIP_DBG(EIP_DBG_IPH, eip_iph_display, iph);
1644 +
1645 +               j = 0;
1646 +
1647 +               /* here we can't have full mtu size aggregated packet */
1648 +               EIP_ASSERT_RET(skb->len < eip_tx->sysctl_ipfrag_copybreak, 0);
1649 +
1650 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1651 +               list_for_each(l, &eip_tx->ipfrag) {
1652 +                       ipf = list_entry(l, EIP_IPFRAG, list);
1653 +                       iph2 = eip_ipfrag_get((char *) ipf->dma_base);
1654 +                       
1655 +                        EIP_ASSERT(iph2);
1656 +                       
1657 +                       if ((iph2->id == id) && 
1658 +                                       (get_unaligned(&iph2->saddr) == saddr) && 
1659 +                                       (get_unaligned(&iph2->daddr) == daddr) && 
1660 +                                       (iph2->protocol == protocol)) {
1661 +                               /* || timeout */
1662 +                               if (eip_ipf_hasroom(ipf, skb)) {
1663 +                                       
1664 +                                       eip_ipf_skb_add(ipf, skb);
1665 +                                       
1666 +                                       if ((ipf->datagram_len != -1) && 
1667 +                                                       (ipf->dma_len == (ipf->datagram_len + ipf->dma_correction) || 
1668 +                                                        ipf->frag_nr == (128 / sizeof(uint32_t)))) {
1669 +send_aggreg:
1670 +                                               ipf->payload.Data[ipf->frag_nr] = 0;
1671 +                                               list_del(&ipf->list);
1672 +                                               eip_tx->ipfrag_count--;
1673 +                                               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1674 +                                       
1675 +                                               ep_nmd_subset(&nmd, &ipf->nmd, 0, ipf->dma_len);
1676 +                                               
1677 +#ifdef EIP_MORE_STATS
1678 +                                               eip_tx->sent_aggreg++;
1679 +#endif
1680 +                                               if ((i = eip_do_xmit((EIP_TMD *) ipf, &nmd, &ipf->payload)) != EP_SUCCESS)
1681 +                                                       return i;
1682 +                                               if (j)
1683 +                                                       goto new;
1684 +                                               return 0;
1685 +                                       }
1686 +                                       
1687 +                                       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1688 +                                       tasklet_schedule(&eip_tx->tasklet);
1689 +                                       return 0;
1690 +                               } else {
1691 +                                       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF[%p] : FULL %db full - sending it\n", ipf, ipf->dma_len);
1692 +                                       j = 1;
1693 +                                       goto send_aggreg;
1694 +                               }
1695 +                       }
1696 +               }
1697 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1698 +new:
1699 +               if (!(ipf = (EIP_IPFRAG *) eip_tmd_get(EIP_TMD_AGGREG)))
1700 +                       goto no_aggreg;
1701 +
1702 +               eip_ipf_skb_add(ipf, skb);
1703 +               
1704 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1705 +               list_add_tail(&ipf->list, &eip_tx->ipfrag);
1706 +               eip_tx->ipfrag_count++;
1707 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1708 +               tasklet_schedule(&eip_tx->tasklet);
1709 +       }
1710 +       return 0;
1711 +}
1712 +static int eip_do_xmit(EIP_TMD * tmd, EP_NMD *nmd, EP_PAYLOAD *payload)
1713 +{
1714 +       EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
1715 +       int         attr = EP_SET_DATA((EP_NO_SLEEP | EP_NO_INTERRUPT | EP_NO_FAILOVER), EP_TYPE_SVC_INDICATOR, EP_SVC_EIP);
1716 +       unsigned long flags;
1717 +       int svc, rnum;
1718 +
1719 +       SIZE_TO_SVC(nmd->nmd_len, svc);
1720 +
1721 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1722 +       /* EIP_DBG(eip_eiph_display(eiph)); */
1723 +       
1724 +       if (unlikely (eiph->h_dhost.ip_bcast))
1725 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_availrails(eip_tx->xmtr));
1726 +       else
1727 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_noderails(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr)));
1728 +
1729 +       if (rnum >= 0)
1730 +               attr = EP_SET_PREFRAIL(attr, rnum);
1731 +
1732 +       /* add to inuse list  */
1733 +       spin_lock_irqsave (&eip_tx->lock, flags);
1734 +       list_add_tail (&tmd->chain.link, &eip_tx->inuse);
1735 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1736 +
1737 +       /* ENOMEM EINVAL ECONNREFUSED ESUCCESS */
1738 +       svc = (unlikely(eiph->h_dhost.ip_bcast)) ? 
1739 +               ep_multicast_message(eip_tx->xmtr, -1, -1, NULL, EIP_SVC_EP(svc), attr | EP_NOT_MYSELF, eip_txhandler, tmd, payload, nmd, 1) :
1740 +
1741 +               ep_transmit_message(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr), EIP_SVC_EP(svc),  attr, eip_txhandler, tmd, payload, nmd, 1);
1742 +               
1743 +       if (likely(svc == EP_SUCCESS))
1744 +               return 0;
1745 +       else if (svc == ENOMEM) {
1746 +               EIP_ERR_PRINTF("%s", "Memory allocation error ...\n");
1747 +               eip_tx->errors++;
1748 +       }
1749 +       else
1750 +       {
1751 +               /* EP_EINVAL occurs when the svc has a bad value or the iovec has too many frag; */
1752 +               /* we don't use the latter option here                                        */
1753 +               __EIP_DBG_PRINTF("TMD [%p] : DROPPED skb[%p] status = %d from ep_?_message\n", tmd, tmd->skb, svc);
1754 +
1755 +               eip_tx->dropped++;
1756 +       }
1757 +
1758 +       eip_txhandler(NULL, tmd, -99);
1759 +
1760 +       /* Quadrics GNAT sw-elan/4397 - since we will "never" be able to send this packet to the */
1761 +       /* destination node, we drop it and feign success - this has the same behaviour as an    */
1762 +       /* ethernet where it sticks the packet on the wire, but no-one receives it.              */
1763 +       return 0;
1764 +}
1765 +
1766 +static void eip_txhandler(EP_TXD * txd, void *arg, EP_STATUS status)
1767 +{
1768 +       EIP_TMD *tmd = (EIP_TMD *) arg;
1769 +       struct sk_buff *skb_next;
1770 +       unsigned long flags;
1771 +       int svc = 0;
1772 +       
1773 +       if (likely(status == EP_SUCCESS)) {
1774 +               SIZE_TO_SVC(tmd->dma_len, svc);
1775 +               eip_tx->dma[svc]++;
1776 +               eip_tx->bytes += tmd->dma_len;
1777 +               
1778 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1779 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1780 +                       eip_tx->packets += ipf->frag_nr;
1781 +               } else
1782 +                       eip_tx->packets++;
1783 +       } else {
1784 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1785 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1786 +                       eip_tx->dropped += ipf->frag_nr;
1787 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler aggreg packet dropped status = %d\n", status);
1788 +               } else  {
1789 +                       eip_tx->dropped++;
1790 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler packet dropped status = %d\n", status);
1791 +               }
1792 +       }
1793 +
1794 +       if (tmd->head == &eip_tx->head[EIP_TMD_STD]) {
1795 +               eip_tmd_unload(tmd);
1796 +               tmd->dma_base = 0;
1797 +               tmd->nmd.nmd_len = -1;
1798 +       }
1799 +               
1800 +       tmd->dma_len = -1;
1801 +       
1802 +       svc = 0;
1803 +       while (tmd->skb) {
1804 +               svc++;
1805 +               
1806 +               if (tmd->skb->destructor)
1807 +                       atomic_dec(&eip_tx->destructor);
1808 +
1809 +               skb_next = tmd->skb->next;
1810 +               dev_kfree_skb_any(tmd->skb);
1811 +               tmd->skb = skb_next;
1812 +       }
1813 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF/TMD [%p] : %d skb RELEASE/FREED\n", tmd, svc);
1814 +
1815 +       /* remove from inuse list  */
1816 +       spin_lock_irqsave (&eip_tx->lock, flags);
1817 +       list_del (&tmd->chain.link);
1818 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1819 +
1820 +       eip_tmd_put(tmd);
1821 +}
1822 +
1823 +static void eip_tx_tasklet(unsigned long arg)
1824 +{
1825 +       struct timeval now;
1826 +       unsigned long flags;
1827 +       EIP_IPFRAG *ipf, *ipfq = NULL;
1828 +       EP_NMD nmd;
1829 +       struct list_head *list;
1830 +       struct list_head *tmp;
1831 +       char resched = 0;
1832 +       char poll = 1;
1833 +       
1834 +       do_gettimeofday(&now);
1835 +       
1836 +       spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1837 +       if (eip_tx->ipfrag_count) {
1838 +               list_for_each_safe(list, tmp, &eip_tx->ipfrag) {
1839 +                       ipf = list_entry(list, EIP_IPFRAG, list);
1840 +                       /* delta = (((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - ipf->timestamp.tv_usec; */
1841 +                       if (((((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - 
1842 +                                       ipf->timestamp.tv_usec) >= (1000UL * eip_tx->sysctl_ipfrag_to)) {
1843 +                               list_del(&ipf->list);
1844 +                               eip_tx->ipfrag_count--;
1845 +                               ipf->chain.next = (EIP_TMD *) ipfq;
1846 +                               ipfq = ipf;
1847 +                       }
1848 +               }
1849 +       }
1850 +       if (eip_tx->ipfrag_count)
1851 +               resched = 1;
1852 +       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1853 +
1854 +       while (ipfq) {
1855 +               poll = 0;
1856 +
1857 +               ep_nmd_subset(&nmd, &ipfq->nmd, 0, ipfq->dma_len);
1858 +               
1859 +               ipfq->payload.Data[ipfq->frag_nr] = 0;
1860 +               
1861 +#ifdef EIP_MORE_STATS
1862 +               eip_tx->sent_aggreg++;
1863 +#endif
1864 +               ipf = (EIP_IPFRAG *) ipfq->chain.next;
1865 +               eip_do_xmit((EIP_TMD *) ipfq, &nmd, &ipfq->payload);
1866 +               ipfq = ipf;
1867 +       }
1868 +       
1869 +       if (poll)
1870 +               ep_poll_transmits(eip_tx->xmtr);
1871 +
1872 +       if (atomic_read(&eip_tx->destructor) || resched )
1873 +               tasklet_schedule(&eip_tx->tasklet);
1874 +}
1875 +void eip_start_queue()
1876 +{
1877 +       if (netif_queue_stopped(eip_tx->net_device)) {
1878 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Waking up %s queue\n", eip_tx->net_device->name);
1879 +               netif_wake_queue(eip_tx->net_device);
1880 +       }
1881 +}
1882 +void eip_stop_queue()
1883 +{
1884 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Stopping %s queue\n", eip_tx->net_device->name);
1885 +       netif_stop_queue(eip_tx->net_device);
1886 +}
1887 +
1888 +static int eip_open(struct net_device *devnet)
1889 +{
1890 +       if (devnet->flags & IFF_PROMISC)
1891 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s entering in promiscuous mode\n", devnet->name);
1892 +
1893 +       netif_start_queue(devnet);
1894 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x up\n",
1895 +                       devnet->name, (devnet->dev_addr[0]) & 0xff,
1896 +                       (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1897 +                       (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1898 +       return 0;
1899 +}
1900 +
1901 +static int eip_close(struct net_device *devnet)
1902 +{
1903 +       if (devnet->flags & IFF_PROMISC)
1904 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s leaving promiscuous mode\n", devnet->name);
1905 +
1906 +       netif_stop_queue(devnet);
1907 +
1908 +       eip_rx_tasklet(0);
1909 +
1910 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x down\n", 
1911 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
1912 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1913 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1914 +       return 0;
1915 +}
1916 +
1917 +static struct net_device_stats *eip_get_stats(struct net_device *devnet)
1918 +{
1919 +       static struct net_device_stats stats;
1920 +
1921 +       stats.rx_packets = eip_rx->packets;
1922 +       stats.rx_bytes = eip_rx->bytes;
1923 +       stats.rx_errors = eip_rx->errors;
1924 +       stats.rx_dropped = eip_rx->dropped;
1925 +
1926 +       stats.tx_packets = eip_tx->packets;
1927 +       stats.tx_bytes = eip_tx->bytes;
1928 +       stats.tx_errors = eip_tx->errors;
1929 +       stats.tx_dropped = eip_tx->dropped;
1930 +       return &stats;
1931 +}
1932 +
1933 +static int eip_change_mtu(struct net_device *devnet, int mtu)
1934 +{
1935 +       if (mtu <= EIP_MTU_MAX) {
1936 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "MTU size changed from %d to %d\n", devnet->mtu, mtu);
1937 +               devnet->mtu = mtu;
1938 +       }
1939 +       return 0;
1940 +}
1941 +
1942 +#ifdef MODULE
1943 +int eip_init(void)
1944 +{
1945 +       struct net_device *devnet;
1946 +       int errno = 0;
1947 +
1948 +       eip_rx_dropping = 0; 
1949 +       eip_rx_tasklet_locked = 1;
1950 +
1951 +       /* timer up but not started */
1952 +       init_timer (&eip_rx_tasklet_timer);
1953 +       eip_rx_tasklet_timer.function = eip_rx_tasklet_resched;
1954 +       eip_rx_tasklet_timer.data     = (unsigned long) 0;
1955 +       eip_rx_tasklet_timer.expires  = lbolt + hz;
1956 +
1957 +       devnet = alloc_etherdev(sizeof(EIP_RX) + sizeof(EIP_TX));
1958 +       if (!devnet) {
1959 +               EIP_ERR_PRINTF("Unable to ALLOCATE etherdev structure\n");
1960 +               return -ENOMEM;
1961 +       }
1962 +       strcpy (devnet->name, "eip0");
1963 +
1964 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Enabling aggregation code\n");
1965 +       devnet->change_mtu = eip_change_mtu;
1966 +       devnet->mtu = EIP_MTU_MAX;
1967 +       devnet->open = eip_open;
1968 +       devnet->stop = eip_close;
1969 +       devnet->hard_start_xmit = eip_hard_start_xmit;
1970 +       devnet->get_stats = eip_get_stats;
1971 +
1972 +        /* devnet->features |= (NETIF_F_DYNALLOC); */
1973 +        /* devnet->features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA); */
1974 +        /* devnet->features |= (NETIF_F_SG|NETIF_F_FRAGLIST|NETIF_F_HIGHDMA|NETIF_F_HW_CSUM); */
1975 +
1976 +       eip_rx = (EIP_RX *) devnet->priv;
1977 +       eip_tx = (EIP_TX *) (eip_rx + 1);
1978 +
1979 +       /* instance 0 */
1980 +       eip_tx->ep_system = ep_system();
1981 +       if (eip_tx->ep_system == NULL) {
1982 +               EIP_ERR_PRINTF("kernel comms for iface %s does not exist\n", devnet->name);
1983 +               errno = -ENXIO;
1984 +               goto out;
1985 +       }
1986 +       if (ep_waitfor_nodeid(eip_tx->ep_system) == ELAN_INVALID_NODE) {
1987 +               EIP_ERR_PRINTF("network position not found\n");
1988 +               errno = -EAGAIN;
1989 +               goto out;
1990 +       }
1991 +       eip_tx->xmtr = ep_alloc_xmtr(eip_tx->ep_system);
1992 +       if (!eip_tx->xmtr) {
1993 +               EIP_ERR_PRINTF("Cannot create allocated transmitter - maybe cable is disconnected\n");
1994 +               errno = -EAGAIN;
1995 +               goto out;
1996 +       }
1997 +       /* assign MAC address */
1998 +       *((int *) &devnet->dev_addr[4]) = htons(ep_nodeid(eip_tx->ep_system));
1999 +       eip_rx->net_device = devnet;
2000 +       eip_tx->net_device = devnet;
2001 +
2002 +       atomic_set(&eip_tx->destructor, 0);
2003 +
2004 +       if ((tmd_max >= EIP_TMD_MIN_NR) && (tmd_max <= EIP_TMD_MAX_NR)) {
2005 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tmd_max_nr to %d\n", tmd_max);
2006 +               eip_tx->tmd_max_nr = tmd_max;
2007 +       } else {
2008 +               EIP_ERR_PRINTF("parameter error : %d <= tmd_max(%d) <= %d using default %d\n", 
2009 +                               EIP_TMD_MIN_NR, tmd_max, EIP_TMD_MAX_NR, EIP_TMD_MAX_NR);
2010 +               eip_tx->tmd_max_nr = EIP_TMD_MAX_NR;
2011 +       }
2012 +
2013 +       if ((rmd_max >= EIP_RMD_MIN_NR) && (rmd_max <= EIP_RMD_MAX_NR)) {
2014 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting rmd_max_nr to %d\n", rmd_max);
2015 +               eip_rx->rmd_max_nr = rmd_max;
2016 +       } else {
2017 +               EIP_ERR_PRINTF("parameter error : %d <= rmd_max(%d) <= %d using default %d\n", EIP_RMD_MIN_NR,
2018 +                          rmd_max, EIP_RMD_MAX_NR, EIP_RMD_MAX_NR);
2019 +               eip_rx->rmd_max_nr = EIP_RMD_MAX_NR;
2020 +       }
2021 +
2022 +       if ((rx_envelope_nr > 0) && (rx_envelope_nr <= 1024)) { /* > 1024 don't be silly */
2023 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting rx_envelope_nr to %d\n", rx_envelope_nr);
2024 +       } else {
2025 +               EIP_ERR_PRINTF("parameter error : 0 < rx_envelope_nr(%d) <= 1024 using default %d\n",
2026 +                          rx_envelope_nr, EIP_RX_ENVELOPE_NR);
2027 +               rx_envelope_nr = EIP_RX_ENVELOPE_NR;
2028 +       }
2029 +
2030 +       if (tx_copybreak_max <= EIP_TX_COPYBREAK_MAX) {
2031 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tx_copybreak_max to %d\n", tx_copybreak_max);
2032 +       } else {
2033 +               EIP_ERR_PRINTF("parameter error : tx_copybreak_max > %d using default %d\n",
2034 +                          EIP_TX_COPYBREAK_MAX, EIP_TX_COPYBREAK_MAX);
2035 +               tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
2036 +       }
2037 +#ifdef EIP_MORE_STATS
2038 +       eip_tx->sent_copybreak = 0;
2039 +       eip_tx->sent_std = 0;
2040 +       eip_tx->sent_aggreg = 0;
2041 +#endif
2042 +
2043 +       eip_tx->ipfrag_count = 0;
2044 +       eip_aggregation_set(1);
2045 +       eip_rx_granularity_set(rx_granularity);
2046 +       eip_tx_copybreak_set(EIP_TX_COPYBREAK);
2047 +       eip_ipfrag_to_set(EIP_IPFRAG_TO);
2048 +       eip_ipfrag_copybreak_set(EIP_IPFRAG_COPYBREAK);
2049 +
2050 +       spin_lock_init(&eip_tx->lock);
2051 +       spin_lock_init(&eip_tx->ipfraglock);
2052 +       spin_lock_init(&eip_rx->lock);
2053 +       tasklet_init(&eip_rx->tasklet, eip_rx_tasklet, 0);
2054 +       tasklet_init(&eip_tx->tasklet, eip_tx_tasklet, 0);
2055 +       INIT_LIST_HEAD(&eip_tx->ipfrag);
2056 +       INIT_LIST_HEAD(&eip_tx->inuse);
2057 +
2058 +       /* if we fail here cannot do much yet; waiting for rcvr remove code in ep. */
2059 +       errno = eip_tmds_alloc();
2060 +       if (errno)
2061 +               goto out;
2062 +
2063 +       errno = eip_rmds_alloc();
2064 +       if (errno)
2065 +               goto out;
2066 +
2067 +       errno = eip_stats_init();
2068 +       if (errno)
2069 +               goto out;
2070 +
2071 +       if (ep_svc_indicator_set(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
2072 +               EIP_ERR_PRINTF("Cannot set the service indicator\n");
2073 +               errno = -EINVAL;
2074 +               goto out;
2075 +       }
2076 +
2077 +       eip_rx_tasklet_locked = 0;
2078 +       tasklet_schedule(&eip_rx->tasklet);
2079 +
2080 +       SET_MODULE_OWNER(eip_tx->net_device);
2081 +
2082 +       if (register_netdev(devnet)) {
2083 +               printk("eip: failed to register netdev\n");
2084 +               goto out;
2085 +       }
2086 +
2087 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x ready\n", 
2088 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
2089 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
2090 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
2091 +
2092 +       return 0;
2093 +      out:
2094 +       unregister_netdev(devnet);
2095 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 25)
2096 +       kfree(devnet);
2097 +#else
2098 +       free_netdev(devnet);
2099 +#endif
2100 +
2101 +       return errno;
2102 +}
2103 +void eip_exit(void)
2104 +{
2105 +       int i;
2106 +
2107 +       eip_rx_dropping = 1;                /* means that new messages wont be sent to tcp stack */
2108 +       eip_rx_tasklet_locked = 1;
2109 +
2110 +       netif_stop_queue(eip_tx->net_device);
2111 +
2112 +       if (ep_svc_indicator_clear(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
2113 +               EIP_ERR_PRINTF("Cannot unset the service indicator\n");
2114 +       }
2115 +
2116 +       schedule_timeout(10);
2117 +       
2118 +       del_timer_sync (&eip_rx_tasklet_timer);
2119 +
2120 +       tasklet_disable(&eip_rx->tasklet);
2121 +       tasklet_disable(&eip_tx->tasklet);
2122 +
2123 +       tasklet_kill(&eip_tx->tasklet);
2124 +       tasklet_kill(&eip_rx->tasklet);
2125 +
2126 +        eip_rmds_free();
2127 +        eip_tmds_free();
2128 +
2129 +       /* that things freed */
2130 +       for (i = 0 ; i < EIP_SVC_NR ; i++) {
2131 +               if ( EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats) != 0 )
2132 +                       EIP_ERR_PRINTF("%d RMDs not FREED on SVC[%d]\n", EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), i);
2133 +       }
2134 +       for (i = 0 ; i < 3 ; i++) {
2135 +               if ( EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats) != 0 )
2136 +                       EIP_ERR_PRINTF("%d TMDs not freed on TX HEAD[%d]\n", EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), i);
2137 +               
2138 +       }
2139 +       unregister_netdev(eip_tx->net_device);
2140 +       kfree(eip_tx->net_device);
2141 +       
2142 +       eip_stats_cleanup();
2143 +}
2144 +
2145 +module_init(eip_init);
2146 +module_exit(eip_exit);
2147 +
2148 +MODULE_PARM(eipdebug, "i");
2149 +MODULE_PARM_DESC(eipdebug, "Set debug flags");
2150 +
2151 +MODULE_PARM(rx_envelope_nr, "i");
2152 +MODULE_PARM_DESC(rx_enveloppe_nr, "Number of allocated enveloppe on the rx side");
2153 +
2154 +MODULE_PARM(tx_copybreak_max, "i");
2155 +MODULE_PARM_DESC(tx_copybreak_max, "Maximum size of the tx copybreak limit (default 512)");
2156 +
2157 +MODULE_PARM(tmd_max, "i");
2158 +MODULE_PARM(rmd_max, "i");
2159 +MODULE_PARM_DESC(tmd_max, "Maximun number of transmit buffers (default 64)");
2160 +MODULE_PARM_DESC(rmd_max, "Maximun number of receive buffers (default 64)");
2161 +
2162 +MODULE_PARM(tx_railmask, "i");
2163 +MODULE_PARM_DESC(tx_railmask, "Mask of which rails transmits can be queued on");
2164 +
2165 +MODULE_AUTHOR("Quadrics Ltd.");
2166 +MODULE_DESCRIPTION("Elan IP driver");
2167 +MODULE_LICENSE("GPL");
2168 +#endif /* MODULE */
2169 +
2170 +/*
2171 + * Local variables:
2172 + * c-file-style: "linux"
2173 + * End:
2174 + */
2175 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_linux.h
2176 ===================================================================
2177 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/eip_linux.h    2004-02-23 16:02:56.000000000 -0500
2178 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_linux.h 2005-07-28 14:52:52.775689832 -0400
2179 @@ -0,0 +1,399 @@
2180 +/*
2181 + *    Copyright (c) 2003 by Quadrics Ltd.
2182 + * 
2183 + *    For licensing information please see the supplied COPYING file
2184 + *
2185 + */
2186 +
2187 +#ident "$Id: eip_linux.h,v 1.46.2.1 2004/10/01 10:49:38 mike Exp $"
2188 +
2189 +#ifndef __EIP_LINUX_H
2190 +#define __EIP_LINUX_H
2191 +
2192 +#define EIP_WATERMARK                  (0xfab1e)
2193 +
2194 +#define EIP_PAGES(s)                   (((s - 1) >> PAGE_SHIFT) + 1)
2195 +#define EIP_DVMA_PAGES(s)              ((s < PAGE_SIZE) ? EIP_PAGES(s) + 1 : EIP_PAGES(s))
2196 +
2197 +#define EIP_SVC_SMALLEST_LEN           (1 << 9)        /* 512 */
2198 +#define EIP_SVC_BIGGEST_LEN            (1 << 16)       /* 64k */
2199 +
2200 +#define EIP_SVC_SMALLEST               (0)
2201 +#define EIP_SVC_BIGGEST                        (7)
2202 +
2203 +#define EIP_SVC_NR                     (8)
2204 +#define EIP_SVC_EP(s)                  (s + EP_MSG_SVC_EIP512)
2205 +
2206 +#define EIP_STAT_ALLOC_SHIFT           (8)
2207 +#define EIP_STAT_ALLOC_GET(atomicp)    ((int) atomic_read(atomicp) >> EIP_STAT_ALLOC_SHIFT)
2208 +#define EIP_STAT_ALLOC_ADD(atomicp, v) (atomic_add((v << EIP_STAT_ALLOC_SHIFT), atomicp))
2209 +#define EIP_STAT_ALLOC_SUB(atomicp, v) (atomic_sub((v << EIP_STAT_ALLOC_SHIFT), atomicp))
2210 +
2211 +#define EIP_STAT_QUEUED_MASK           (0xff)
2212 +#define EIP_STAT_QUEUED_GET(atomicp)   ((int) atomic_read(atomicp) & EIP_STAT_QUEUED_MASK)
2213 +
2214 +#define EIP_RMD_NR                     (8)
2215 +#define EIP_RMD_MIN_NR                 (8)
2216 +#define EIP_RMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
2217 +
2218 +#define EIP_RMD_ALLOC_STEP             (8)
2219 +#define EIP_RMD_ALLOC_THRESH           (16)
2220 +
2221 +#define EIP_RMD_ALLOC                  (1)
2222 +#define EIP_RMD_REPLACE                        (0)
2223 +
2224 +#define EIP_TMD_NR                     (64)
2225 +#define EIP_TMD_MIN_NR                 (16)
2226 +#define EIP_TMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
2227 +
2228 +#define EIP_TMD_TYPE_NR                        (3)
2229 +#define EIP_TMD_COPYBREAK              (0x0)
2230 +#define EIP_TMD_STD                    (0x1)
2231 +#define EIP_TMD_AGGREG                 (0x2)
2232 +
2233 +#define EIP_TX_COPYBREAK               (512)
2234 +#define EIP_TX_COPYBREAK_MAX           (1024)
2235 +
2236 +#define EIP_IPFRAG_TO                  (50)    /* time out before a frag is sent in msec */
2237 +#define EIP_IPFRAG_COPYBREAK           (EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG) - EIP_HEADER_PAD)
2238 +
2239 +#define EIP_RX_ENVELOPE_NR             ((EIP_RMD_MAX_NR*EIP_SVC_NR)/2)
2240 +#define EIP_RX_GRANULARITY             (1)
2241 +
2242 +#define EIP_IP_ALIGN(X)                        (((X) + (15)) & ~(15))
2243 +#define EIP_EXTRA                      roundup (sizeof(EIP_RMD), 256)
2244 +#define EIP_RCV_DMA_LEN(s)                     (s - EIP_EXTRA - EIP_HEADER_PAD)
2245 +#define EIP_MTU_MAX                    (EIP_RCV_DMA_LEN(EIP_SVC_BIGGEST_LEN) - (ETH_HLEN))
2246 +
2247 +#define SIZE_TO_SVC(s, svc)                                                                    \
2248 +       do {                                                                                    \
2249 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 9)))  {svc = 0;break;}   \
2250 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 10))) {svc = 1;break;}   \
2251 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 11))) {svc = 2;break;}   \
2252 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 12))) {svc = 3;break;}   \
2253 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 13))) {svc = 4;break;}   \
2254 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 14))) {svc = 5;break;}   \
2255 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 15))) {svc = 6;break;}   \
2256 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 16))) {svc = 7;break;}   \
2257 +                                       svc = -666;                                             \
2258 +                                       EIP_ASSERT(1 == 0);                                     \
2259 +       } while (0)
2260 +
2261 +extern int eipdebug;
2262 +#define EIP_ASSERT_ON 
2263 +/* #define NO_DEBUG */
2264 +
2265 +
2266 +/* ######################## */
2267 +#ifdef NO_DEBUG
2268 +#define __EIP_DBG_PRINTF(fmt, args...)
2269 +#define EIP_DBG_PRINTF(flag, fmt, args...)
2270 +#else
2271 +
2272 +#define EIP_DBG_RMD            0x1
2273 +#define EIP_DBG_TMD            0x2
2274 +#define EIP_DBG_RMD_HEAD       0x4
2275 +#define EIP_DBG_TMD_HEAD       0x8
2276 +#define EIP_DBG_EIPH           0x10
2277 +#define EIP_DBG_IPH            0x20
2278 +#define EIP_DBG_RMD_EP_DVMA    0x40
2279 +#define EIP_DBG_TMD_EP_DVMA    0x80
2280 +#define EIP_DBG_EP_DVMA                (EIP_DBG_RMD_EP_DVMA|EIP_DBG_TMD_EP_DVMA)
2281 +#define EIP_DBG_MEMALLOC       0x100
2282 +#define EIP_DBG_MEMFREE                0x200
2283 +#define EIP_DBG_RMD_QUEUE      0x400
2284 +#define EIP_DBG_TMD_QUEUE      0x800
2285 +#define EIP_DBG_GEN            0x1000
2286 +#define EIP_DBG_DEBUG          0x2000
2287 +       
2288 +#define __EIP_DBG_PRINTF(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUFFER, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
2289 +#define EIP_DBG_PRINTF(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
2290 +
2291 +#define __EIP_DBG_PRINTK(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUF_CON, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
2292 +#define EIP_DBG_PRINTK(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
2293 +           
2294 +#define EIP_ERR_PRINTF(fmt, args...)   __EIP_DBG_PRINTK("!!! ERROR !!! - " fmt, ## args)
2295 +
2296 +       
2297 +#define EIP_DBG2(flag, fn, fn_arg, fmt, args...)                                                               \
2298 +    if (unlikely(eipdebug & flag)) {                                                                           \
2299 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
2300 +            (void)(fn)(fn_arg);                                                                                \
2301 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
2302 +    }
2303 +
2304 +
2305 +#define EIP_DBG(flag, fn, args...)                                                             \
2306 +    if (unlikely(eipdebug & flag)) {                                                           \
2307 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s\n", smp_processor_id(), __func__);   \
2308 +            (void)(fn)(args);                                                                  \
2309 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s :\n", smp_processor_id(), __func__); \
2310 +    }
2311 +#endif /* NO_DEBUG */
2312 +
2313 +
2314 +#ifdef EIP_ASSERT_ON
2315 +
2316 +#define __EIP_ASSERT_PRINT(exp)                                \
2317 +               eipdebug = 0xffff;                              \
2318 +               EIP_ERR_PRINTF("ASSERT : %s, %s::%d\n",         \
2319 +                      #exp, __BASE_FILE__, __LINE__);          
2320 +
2321 +#define EIP_ASSERT(exp)                                                        \
2322 +               if (!(exp)) {                                           \
2323 +                       __EIP_ASSERT_PRINT(exp);                        \
2324 +                       netif_stop_queue(eip_tx->net_device);           \
2325 +               }
2326 +
2327 +#define EIP_ASSERT2(exp, f, arg)                                       \
2328 +       do {                                                            \
2329 +               if (!(exp)) {                                           \
2330 +                       __EIP_ASSERT_PRINT(exp);                        \
2331 +                       f(arg);                                         \
2332 +               }                                                       \
2333 +       } while (0)
2334 +
2335 +#define EIP_ASSERT_BUG(exp)                                            \
2336 +       do {                                                            \
2337 +               if (!(exp)) {                                           \
2338 +                       __EIP_ASSERT_PRINT(exp);                        \
2339 +                       BUG();                                          \
2340 +               }                                                       \
2341 +       } while (0)
2342 +
2343 +#define EIP_ASSERT_GOTO(exp, label, f, arg)                            \
2344 +       do {                                                            \
2345 +               if (!(exp)) {                                           \
2346 +                       __EIP_ASSERT_PRINT(exp);                        \
2347 +                       f(arg);                                         \
2348 +                       goto label;                                     \
2349 +               }                                                       \
2350 +       } while (0)
2351 +
2352 +#define EIP_ASSERT_RET(exp, ret)                                       \
2353 +       do {                                                            \
2354 +               if (!(exp)) {                                           \
2355 +                       __EIP_ASSERT_PRINT(exp);                        \
2356 +                       return ret;                                     \
2357 +               }                                                       \
2358 +       } while (0)
2359 +
2360 +#define EIP_ASSERT_RETURN(exp, f, arg)                                 \
2361 +       do {                                                            \
2362 +               if (!(exp)) {                                           \
2363 +                       __EIP_ASSERT_PRINT(exp);                        \
2364 +                       f(arg);                                         \
2365 +                       return;                                         \
2366 +               }                                                       \
2367 +       } while (0)
2368 +
2369 +#define EIP_ASSERT_RETNULL(exp, f, arg)                                        \
2370 +       do {                                                            \
2371 +               if (!(exp)) {                                           \
2372 +                       __EIP_ASSERT_PRINT(exp);                        \
2373 +                       f(arg);                                         \
2374 +                       return NULL;                                    \
2375 +               }                                                       \
2376 +       } while (0)
2377 +
2378 +#else
2379 +
2380 +#define EIP_ASSERT(exp)                do {} while(0)
2381 +#define EIP_ASSERT_OUT(exp)            do {} while(0)
2382 +#define EIP_ASSERT_RETURN(exp)                 do {} while(0)
2383 +#define EIP_ASSERT_RETNULL(exp)                do {} while(0)
2384 +#define EIP_ASSERT_BUG(exp)            do {} while(0)
2385 +
2386 +#endif /* EIP_ASSERT */
2387 +
2388 +
2389 +
2390 +typedef struct {
2391 +       u_short ip_bcast;
2392 +       u_short ip_inst;
2393 +       u_short ip_addr;
2394 +} EIP_ADDRESS;
2395 +
2396 +typedef struct {
2397 +       EIP_ADDRESS h_dhost;
2398 +       EIP_ADDRESS h_shost;
2399 +       u_short h_sap;
2400 +} EIP_HEADER;
2401 +#define EIP_HEADER_PAD                 (2)
2402 +
2403 +typedef struct eip_proc_fs {
2404 +       const char *name;
2405 +       struct proc_dir_entry **parent;
2406 +       read_proc_t *read;
2407 +       write_proc_t *write;
2408 +       unsigned char allocated;
2409 +       struct proc_dir_entry *entry;
2410 +} EIP_PROC_FS;
2411 +
2412 +#define EIP_PROC_ROOT_DIR              "eip"
2413 +
2414 +#define EIP_PROC_DEBUG_DIR             "debug"
2415 +#define EIP_PROC_DEBUG_RX_FLUSH                "rx_flush"
2416 +#define EIP_PROC_DEBUG_TX_FLUSH                "tx_flush"
2417 +
2418 +#define EIP_PROC_AGGREG_DIR            "aggregation"
2419 +#define EIP_PROC_AGGREG_ONOFF          "enable"
2420 +#define EIP_PROC_AGGREG_TO             "timeout"
2421 +#define EIP_PROC_AGGREG_COPYBREAK      "copybreak"
2422 +
2423 +#define EIP_PROC_TX_COPYBREAK          "tx_copybreak"
2424 +#define EIP_PROC_STATS                 "stats"
2425 +#define EIP_PROC_RX_GRAN               "rx_granularity"
2426 +#define EIP_PROC_TX_RAILMASK           "tx_railmask"
2427 +#define EIP_PROC_TMD_INUSE             "tmd_inuse"
2428 +#define EIP_PROC_EIPDEBUG              "eipdebug"
2429 +#define EIP_PROC_CHECKSUM               "checksum"
2430 +
2431 +/* RX */
2432 +/* dma_len is used to keep the len of a received packet */
2433 +/* nmd.nmd_len is the max dma that can be received      */
2434 +/*                                                      */
2435 +struct eip_rmd {
2436 +       struct sk_buff *skb;
2437 +
2438 +       EP_NMD nmd;
2439 +       u16 dvma_idx;
2440 +
2441 +       EP_RXD *rxd;
2442 +       struct eip_rmd_head *head;
2443 +       union {
2444 +               struct list_head link;                          /* when on "busy" list */
2445 +               struct eip_rmd  *next;                          /* all other lists */
2446 +       } chain;
2447 +};
2448 +typedef struct eip_rmd EIP_RMD;
2449 +struct eip_rmd_head {
2450 +       EP_NMH *handle;
2451 +
2452 +       EP_RCVR *rcvr;
2453 +       EIP_RMD *busy_list;
2454 +
2455 +       /* stats */
2456 +       atomic_t stats;
2457 +       unsigned long dma;
2458 +};
2459 +
2460 +typedef struct eip_rmd_head EIP_RMD_HEAD;
2461 +typedef struct eip_rx {
2462 +       struct eip_rmd_head head[EIP_SVC_NR];
2463 +
2464 +       EIP_RMD *irq_list;
2465 +       short    irq_list_nr;   
2466 +
2467 +       /* stats */
2468 +       unsigned long packets;
2469 +       unsigned long bytes;
2470 +       unsigned long errors;
2471 +       unsigned long dropped;
2472 +       unsigned long reschedule;
2473 +
2474 +       spinlock_t lock;
2475 +       struct tasklet_struct tasklet;
2476 +       unsigned char rmd_max_nr;
2477 +       unsigned char sysctl_granularity;
2478 +       struct net_device *net_device;
2479 +} EIP_RX;
2480 +
2481 +/* TX */
2482 +/* dma_len_max is the maximum len for a given DMA                      */
2483 +/* where mnd.nmd_len is the len of the packet to send ~> than skb->len */
2484 +typedef struct eip_ipfrag_handle {
2485 +       /* common with tmd */
2486 +       unsigned long dma_base;
2487 +       int dma_len;
2488 +       EP_NMD nmd;
2489 +       u16 dvma_idx;
2490 +
2491 +       struct sk_buff *skb;
2492 +       struct eip_tmd_head *head;
2493 +       union {
2494 +               struct list_head link;                          /* when on "busy" list */
2495 +               struct eip_tmd  *next;                          /* all other lists */
2496 +       } chain;
2497 +
2498 +       /* private */
2499 +       struct list_head list;
2500 +       struct timeval timestamp;
2501 +       unsigned int frag_nr;
2502 +       int datagram_len; /* Ip data */
2503 +       int dma_correction;
2504 +       EP_PAYLOAD payload;
2505 +} EIP_IPFRAG;
2506 +
2507 +struct eip_tmd {
2508 +       unsigned long dma_base;
2509 +       int dma_len;
2510 +       EP_NMD nmd;
2511 +       u16 dvma_idx;
2512 +
2513 +       struct sk_buff *skb;
2514 +       struct eip_tmd_head *head;
2515 +       union {
2516 +               struct list_head link;                          /* when on "busy" list */
2517 +               struct eip_tmd  *next;                          /* all other lists */
2518 +       } chain;
2519 +};
2520 +
2521 +struct eip_tmd_head {
2522 +       EP_NMH *handle;
2523 +
2524 +       struct eip_tmd *tmd;
2525 +       atomic_t stats;
2526 +};
2527 +
2528 +typedef struct eip_tmd EIP_TMD;
2529 +typedef struct eip_tmd_head EIP_TMD_HEAD;
2530 +
2531 +/* #define EIP_MORE_STATS */
2532 +
2533 +typedef struct eip_tx {
2534 +       struct net_device *net_device;
2535 +       EP_XMTR *xmtr;
2536 +       EP_SYS *ep_system;
2537 +
2538 +       struct eip_tmd_head head[EIP_TMD_TYPE_NR];
2539 +       struct list_head inuse;
2540 +       atomic_t destructor;
2541 +
2542 +       /* stats */
2543 +       unsigned long packets;
2544 +       unsigned long bytes;
2545 +       unsigned long errors;
2546 +       unsigned long dropped;
2547 +       unsigned long dma[EIP_SVC_NR];
2548 +       
2549 +#ifdef EIP_MORE_STATS
2550 +       unsigned long sent_copybreak;
2551 +       unsigned long sent_std;
2552 +       unsigned long sent_aggreg;
2553 +#endif
2554 +
2555 +       unsigned char tmd_max_nr;
2556 +
2557 +       unsigned short sysctl_copybreak;
2558 +       unsigned short sysctl_ipfrag_to;
2559 +       unsigned short sysctl_ipfrag_copybreak;
2560 +       unsigned short sysctl_aggregation;
2561 +
2562 +       unsigned short ipfrag_count;
2563 +       struct list_head ipfrag;
2564 +       spinlock_t ipfraglock;
2565 +
2566 +       spinlock_t lock;
2567 +       struct tasklet_struct tasklet;
2568 +} EIP_TX;
2569 +
2570 +/* =============================================== */
2571 +    /* unsigned long   multicast; */
2572 +#endif                         /* __EIP_LINUX_H */
2573 +
2574 +/*
2575 + * Local variables:
2576 + * c-file-style: "linux"
2577 + * End:
2578 + */
2579 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_stats.c
2580 ===================================================================
2581 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/eip_stats.c    2004-02-23 16:02:56.000000000 -0500
2582 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_stats.c 2005-07-28 14:52:52.776689680 -0400
2583 @@ -0,0 +1,375 @@
2584 +/*
2585 + *    Copyright (c) 2003 by Quadrics Ltd.
2586 + * 
2587 + *    For licensing information please see the supplied COPYING file
2588 + *
2589 + */
2590 +
2591 +/*
2592 + * $Id: eip_stats.c,v 1.34.2.2 2005/03/20 12:01:22 david Exp $
2593 + * $Source: /cvs/master/quadrics/eipmod/eip_stats.c,v $
2594 + */
2595 +
2596 +#include <qsnet/kernel.h>
2597 +#include <linux/module.h>
2598 +
2599 +#include <elan/epcomms.h>
2600 +
2601 +#include <linux/netdevice.h>
2602 +
2603 +#include <linux/kernel.h>
2604 +#include <linux/proc_fs.h>
2605 +
2606 +#include <asm/atomic.h>
2607 +
2608 +#include <qsnet/procfs_linux.h>
2609 +
2610 +#include "eip_linux.h"
2611 +#include "eip_stats.h"
2612 +
2613 +extern EIP_RX *eip_rx;
2614 +extern EIP_TX *eip_tx;
2615 +extern int tx_copybreak_max;
2616 +extern EP_RAILMASK tx_railmask;
2617 +extern int  eip_checksum_state;
2618 +extern void eip_stop_queue(void);
2619 +extern void eip_start_queue(void);
2620 +
2621 +static int eip_stats_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2622 +{
2623 +       int i, outlen = 0;
2624 +
2625 +       *buf = '\0';
2626 +       strcat(buf, "\n");
2627 +       strcat(buf, "--------------------------------------------+------------+-----------------+\n");
2628 +       strcat(buf, "    SKB/DMA    |               | Rx         | Tx         |  TMD TYPE       |\n");
2629 +       strcat(buf, "--------------------------------------------+------------|-----------------+\n");
2630 +
2631 +       i = 0;
2632 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #1[%3.3d/%3.3d/%3.3d] |\n",
2633 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2634 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2635 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2636 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2637 +                eip_tx->tmd_max_nr);
2638 +
2639 +       i++;
2640 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #2[%3.3d/%3.3d/%3.3d] |\n",
2641 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2642 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2643 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2644 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2645 +               eip_tx->tmd_max_nr);
2646 +
2647 +       i++;
2648 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #3[%3.3d/%3.3d/%3.3d] |\n",
2649 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2650 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2651 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2652 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2653 +               eip_tx->tmd_max_nr);
2654 +
2655 +       i++;
2656 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld +-----------------+\n",
2657 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2658 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2659 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2660 +
2661 +       i++;
2662 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2663 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2664 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2665 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2666 +
2667 +       i++;
2668 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2669 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2670 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2671 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2672 +
2673 +       i++;
2674 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2675 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2676 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2677 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2678 +
2679 +       i++;
2680 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2681 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2682 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2683 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2684 +
2685 +       strcat(buf, "--------------------------------------------+------------+\n");
2686 +       sprintf(buf + strlen(buf), " RMD IRQ %4.4d                    %10lu | %10lu |\n",
2687 +               eip_rx->irq_list_nr, 
2688 +               eip_rx->packets, eip_tx->packets);
2689 +       strcat(buf, "--------------------------------------------+------------+\n");
2690 +
2691 +#ifdef EIP_MORE_STATS
2692 +       strcat(buf, "\n");
2693 +       sprintf(buf + strlen(buf), " Copybreak %10ld Std %10ld Aggreg %10ld\n",
2694 +                       eip_tx->sent_copybreak, eip_tx->sent_std, eip_tx->sent_aggreg);
2695 +#endif
2696 +
2697 +
2698 +       strcat(buf, "\n");
2699 +       sprintf(buf + strlen(buf), "Rx bytes: %lu (%lu Mb) errors: %lu dropped: %lu reschedule: %lu\n",
2700 +               eip_rx->bytes, eip_rx->bytes / (1024 * 1024), eip_rx->errors, eip_rx->dropped, eip_rx->reschedule);
2701 +       sprintf(buf + strlen(buf), "Tx bytes: %lu (%lu Mb) errors: %lu dropped: %lu\n",
2702 +               eip_tx->bytes, eip_tx->bytes / (1024 * 1024), eip_tx->errors, eip_tx->dropped);
2703 +       strcat(buf, "\n");
2704 +
2705 +       outlen = strlen(buf);
2706 +       ASSERT(outlen < PAGE_SIZE);
2707 +       *eof = 1;
2708 +       return outlen;
2709 +}
2710 +
2711 +void eip_stats_dump(void)
2712 +{
2713 +    int eof;
2714 +
2715 +    char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2716 +
2717 +    if (buf == NULL)
2718 +    {
2719 +       printk("no memory to produce eip_stats\n");
2720 +       return;
2721 +    }
2722 +
2723 +    eip_stats_read(buf, NULL, 0, 0, &eof, NULL);
2724 +
2725 +    printk(buf);
2726 +
2727 +    kfree(buf);
2728 +}
2729 +
2730 +static int eip_stats_write(struct file *file, const char *buf, unsigned long count, void *data)
2731 +{
2732 +       int i;
2733 +       unsigned long flags;
2734 +
2735 +       spin_lock_irqsave(&eip_rx->lock, flags);
2736 +       eip_rx->packets = 0;
2737 +       eip_rx->bytes = 0;
2738 +       eip_rx->errors = 0;
2739 +       eip_rx->dropped = 0;
2740 +       eip_rx->reschedule = 0;
2741 +       for (i = 0; i < EIP_SVC_NR; eip_rx->head[i].dma = 0, i++);
2742 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
2743 +
2744 +       spin_lock_irqsave(&eip_tx->lock, flags);
2745 +       eip_tx->packets = 0;
2746 +       eip_tx->bytes = 0;
2747 +       eip_tx->errors = 0;
2748 +       eip_tx->dropped = 0;
2749 +#ifdef EIP_MORE_STATS
2750 +       eip_tx->sent_copybreak = 0;
2751 +       eip_tx->sent_std = 0;
2752 +       eip_tx->sent_aggreg = 0;
2753 +#endif
2754 +       for (i = 0; i < EIP_SVC_NR; eip_tx->dma[i] = 0, i++);
2755 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
2756 +
2757 +       return count;
2758 +}
2759 +
2760 +#define                eip_stats_var_write(name)                                                                       \
2761 +static int eip_stats_##name##_write(struct file *file, const char *buf, unsigned long count, void *data)       \
2762 +{                                                                                                              \
2763 +       char * b = (char *) buf;                                                                                \
2764 +       *(b + count) = '\0';                                                                                    \
2765 +       eip_##name##_set((int) simple_strtoul(b, NULL, 10));                                                    \
2766 +       return count;                                                                                           \
2767 +}
2768 +
2769 +#define        eip_stats_var_read(name, var)                                                                   \
2770 +static int eip_stats_##name##_read(char *buf, char **start, off_t off, int count, int *eof, void *data)                \
2771 +{                                                                                                              \
2772 +       sprintf(buf, "%d\n", var);                                                                              \
2773 +       *eof = 1;                                                                                               \
2774 +       return strlen(buf);                                                                                     \
2775 +}
2776 +
2777 +
2778 +#define                eip_stats_var_set(name, min, max, default, var)                                                                 \
2779 +void eip_##name##_set(int i)                                                                                                   \
2780 +{                                                                                                                              \
2781 +       if ( (i >= min) && (i <= max)) {                                                                                        \
2782 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting " #name " to %d\n", i);                                                    \
2783 +               var =(unsigned short) i;                                                                                        \
2784 +       }                                                                                                                       \
2785 +       else {                                                                                                                  \
2786 +               EIP_ERR_PRINTF("parameter error : %d <= " #name "(%d) <= %d using default %d\n", min, i, (int) max, (int) default);     \
2787 +       }                                                                                                                       \
2788 +}
2789 +
2790 +eip_stats_var_set(tx_copybreak, 0, tx_copybreak_max, EIP_TX_COPYBREAK, eip_tx->sysctl_copybreak);
2791 +eip_stats_var_set(rx_granularity, 1, EIP_RMD_MIN_NR, EIP_RX_GRANULARITY, eip_rx->sysctl_granularity);
2792 +eip_stats_var_set(tx_railmask, 0, EP_RAILMASK_ALL, EP_RAILMASK_ALL, tx_railmask);
2793 +eip_stats_var_set(ipfrag_to, 0, (1 << 16), EIP_IPFRAG_TO, eip_tx->sysctl_ipfrag_to);
2794 +eip_stats_var_set(aggregation, 0, 1, 1, eip_tx->sysctl_aggregation);
2795 +eip_stats_var_set(ipfrag_copybreak, 0, EIP_IPFRAG_COPYBREAK, EIP_IPFRAG_COPYBREAK, eip_tx->sysctl_ipfrag_copybreak);
2796 +/* eip_stats_var_set(eipdebug, 0, , 0, eipdebug); */
2797 +
2798 +eip_stats_var_read(aggregation, eip_tx->sysctl_aggregation);
2799 +eip_stats_var_read(ipfrag_count, eip_tx->ipfrag_count);
2800 +eip_stats_var_read(ipfrag_to, eip_tx->sysctl_ipfrag_to);
2801 +eip_stats_var_read(ipfrag_copybreak, eip_tx->sysctl_ipfrag_copybreak);
2802 +eip_stats_var_read(tx_copybreak, eip_tx->sysctl_copybreak);
2803 +eip_stats_var_read(rx_granularity, eip_rx->sysctl_granularity);
2804 +eip_stats_var_read(tx_railmask, tx_railmask);
2805 +
2806 +eip_stats_var_write(aggregation);
2807 +eip_stats_var_write(ipfrag_to);
2808 +eip_stats_var_write(ipfrag_copybreak);
2809 +eip_stats_var_write(tx_copybreak);
2810 +eip_stats_var_write(rx_granularity);
2811 +eip_stats_var_write(tx_railmask);
2812 +
2813 +
2814 +static int eip_checksum_write(struct file *file, const char *buf, unsigned long count, void *data)
2815 +{
2816 +       char * b = (char *) buf;
2817 +       int    value;
2818 +
2819 +       *(b + count) = '\0';
2820 +
2821 +       value = (int) simple_strtoul(b, NULL, 10);
2822 +       if  ((value >= CHECKSUM_NONE) && (value <= CHECKSUM_UNNECESSARY)) 
2823 +               eip_checksum_state = value;
2824 +       else 
2825 +               EIP_ERR_PRINTF("%d <= checksum(%d) <= %d using old value %d\n", CHECKSUM_NONE, value, CHECKSUM_UNNECESSARY, eip_checksum_state);
2826 +
2827 +       return count;
2828 +}
2829 +
2830 +static int eip_checksum_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2831 +{
2832 +       switch ( eip_checksum_state ) 
2833 +       {
2834 +       case 0  : sprintf(buf, "0 CHECKSUM_NONE\n");                      break;
2835 +       case 1  : sprintf(buf, "1 CHECKSUM_HW\n");                        break;
2836 +       case 2  : sprintf(buf, "2 CHECKSUM_UNNECESSARY\n");               break;
2837 +       default : sprintf(buf, "%d INVALID VALUE\n", eip_checksum_state); break;
2838 +       }
2839 +       *eof = 1;
2840 +       return strlen(buf);
2841 +}
2842 +
2843 +static int eip_stats_eipdebug_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2844 +{
2845 +       *buf = '\0';
2846 +       sprintf(buf + strlen(buf), "0x%x\n", eipdebug);
2847 +       *eof = 1;
2848 +       return strlen(buf);
2849 +}
2850 +static int eip_stats_eipdebug_write(struct file *file, const char *buf, unsigned long count, void *data)
2851 +{
2852 +       char * p = (char *) buf;
2853 +       *(p + count - 1) = '\0';
2854 +       eipdebug = simple_strtoul(p, NULL, 0);
2855 +       __EIP_DBG_PRINTK("Setting eipdebug to 0x%x\n", eipdebug);
2856 +       return count;
2857 +}
2858 +
2859 +static int eip_stats_tmd_inuse_read(char *page, char **start, off_t off, int count, int *eof, void *data)
2860 +{
2861 +       struct list_head *lp;
2862 +       unsigned long flags;
2863 +       unsigned int len = 0;
2864 +
2865 +       spin_lock_irqsave(&eip_tx->lock, flags);
2866 +       list_for_each (lp, &eip_tx->inuse) {
2867 +               EIP_TMD *tmd = list_entry (lp, EIP_TMD, chain.link);
2868 +               EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
2869 +               
2870 +                len += sprintf(page+len, "tmd=%p id=%d len=%d\n",
2871 +                              tmd, eiph ? ntohs(eiph->h_dhost.ip_addr) : -1,
2872 +                              tmd->dma_len);
2873 +
2874 +                if (len + 40 >= count)
2875 +                        break;
2876 +        }
2877 +        spin_unlock_irqrestore(&eip_tx->lock, flags);
2878 +
2879 +       return qsnet_proc_calc_metrics (page, start, off, count, eof, len);
2880 +}
2881 +
2882 +static int eip_stats_debug_rx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2883 +{
2884 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing rx ...\n");
2885 +       tasklet_schedule(&eip_rx->tasklet);
2886 +       return count;
2887 +}
2888 +static int eip_stats_debug_tx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2889 +{
2890 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing tx ... %d tmds reclaimed\n", ep_enable_txcallbacks(eip_tx->xmtr));
2891 +       ep_disable_txcallbacks(eip_tx->xmtr);
2892 +       tasklet_schedule(&eip_tx->tasklet);
2893 +       return count;
2894 +}
2895 +
2896 +#define EIP_PROC_PARENT_NR     (3)
2897 +/* NOTE : the parents should be declared b4 the children */
2898 +static EIP_PROC_FS eip_procs[] = {
2899 +       /* {name, parent, read fn, write fn, allocated, entry}, */
2900 +       {EIP_PROC_ROOT_DIR, &qsnet_procfs_root, NULL, NULL, 0, NULL},
2901 +       {EIP_PROC_DEBUG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},
2902 +       {EIP_PROC_AGGREG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},        /* end of parents */
2903 +       {EIP_PROC_STATS, &eip_procs[0].entry, eip_stats_read, eip_stats_write, 0, NULL},
2904 +       {EIP_PROC_TX_COPYBREAK, &eip_procs[0].entry, eip_stats_tx_copybreak_read, eip_stats_tx_copybreak_write, 0, NULL},
2905 +       {EIP_PROC_RX_GRAN, &eip_procs[0].entry, eip_stats_rx_granularity_read, eip_stats_rx_granularity_write, 0, NULL},
2906 +       {EIP_PROC_TX_RAILMASK, &eip_procs[0].entry, eip_stats_tx_railmask_read, eip_stats_tx_railmask_write, 0, NULL},
2907 +       {EIP_PROC_TMD_INUSE, &eip_procs[0].entry, eip_stats_tmd_inuse_read, NULL, 0, NULL},
2908 +       {EIP_PROC_EIPDEBUG, &eip_procs[0].entry, eip_stats_eipdebug_read, eip_stats_eipdebug_write, 0, NULL},
2909 +       {EIP_PROC_CHECKSUM, &eip_procs[0].entry, eip_checksum_read, eip_checksum_write, 0, NULL},
2910 +       {EIP_PROC_DEBUG_RX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_rx_flush, 0, NULL},
2911 +       {EIP_PROC_DEBUG_TX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_tx_flush, 0, NULL},
2912 +       {"ipfrag_count", &eip_procs[2].entry, eip_stats_ipfrag_count_read, NULL, 0, NULL},
2913 +       {EIP_PROC_AGGREG_TO, &eip_procs[2].entry, eip_stats_ipfrag_to_read, eip_stats_ipfrag_to_write, 0, NULL},
2914 +       {EIP_PROC_AGGREG_ONOFF, &eip_procs[2].entry, eip_stats_aggregation_read, eip_stats_aggregation_write, 0, NULL},
2915 +       {EIP_PROC_AGGREG_COPYBREAK, &eip_procs[2].entry, eip_stats_ipfrag_copybreak_read, eip_stats_ipfrag_copybreak_write, 0, NULL},
2916 +       {NULL, NULL, NULL, NULL, 1, NULL},
2917 +};
2918 +
2919 +int eip_stats_init(void)
2920 +{
2921 +       int p;
2922 +
2923 +       for (p = 0; !eip_procs[p].allocated; p++) {
2924 +               if (p < EIP_PROC_PARENT_NR)
2925 +                       eip_procs[p].entry = proc_mkdir(eip_procs[p].name, *eip_procs[p].parent);
2926 +               else
2927 +                       eip_procs[p].entry = create_proc_entry(eip_procs[p].name, 0, *eip_procs[p].parent);
2928 +
2929 +               if (!eip_procs[p].entry) {
2930 +                       EIP_ERR_PRINTF("%s\n", "Cannot allocate proc entry");
2931 +                       eip_stats_cleanup();
2932 +                       return -ENOMEM;
2933 +               }
2934 +
2935 +               eip_procs[p].entry->owner = THIS_MODULE;
2936 +               eip_procs[p].entry->write_proc = eip_procs[p].write;
2937 +               eip_procs[p].entry->read_proc = eip_procs[p].read;
2938 +               eip_procs[p].allocated = 1;
2939 +       }
2940 +       eip_procs[p].allocated = 0;
2941 +       return 0;
2942 +}
2943 +
2944 +void eip_stats_cleanup(void)
2945 +{
2946 +       int p;
2947 +       for (p = (sizeof (eip_procs)/sizeof (eip_procs[0]))-1; p >= 0; p--)
2948 +               if (eip_procs[p].allocated) {
2949 +                       EIP_DBG_PRINTF(EIP_DBG_GEN, "Removing %s from proc\n", eip_procs[p].name);
2950 +                       remove_proc_entry(eip_procs[p].name, *eip_procs[p].parent);
2951 +               }
2952 +}
2953 +
2954 +/*
2955 + * Local variables:
2956 + * c-file-style: "linux"
2957 + * End:
2958 + */
2959 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_stats.h
2960 ===================================================================
2961 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/eip_stats.h    2004-02-23 16:02:56.000000000 -0500
2962 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/eip_stats.h 2005-07-28 14:52:52.776689680 -0400
2963 @@ -0,0 +1,22 @@
2964 +/*
2965 + *    Copyright (c) 2003 by Quadrics Ltd.
2966 + * 
2967 + *    For licensing information please see the supplied COPYING file
2968 + *
2969 + */
2970 +
2971 +#ident "$Id: eip_stats.h,v 1.14 2004/05/10 14:47:47 daniel Exp $"
2972 +
2973 +#ifndef __EIP_STATS_H
2974 +#define        __EIP_STATS_H
2975 +
2976 +int eip_stats_init(void);
2977 +void eip_stats_cleanup(void);
2978 +void eip_rx_granularity_set(int);
2979 +void eip_tx_copybreak_set(int);
2980 +void eip_ipfrag_to_set(int);
2981 +void eip_aggregation_set(int);
2982 +void eip_ipfrag_copybreak_set(int);
2983 +void eip_stats_dump(void);
2984 +
2985 +#endif                         /* __EIP_STATS_H */
2986 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/Makefile
2987 ===================================================================
2988 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/Makefile       2004-02-23 16:02:56.000000000 -0500
2989 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/Makefile    2005-07-28 14:52:52.776689680 -0400
2990 @@ -0,0 +1,15 @@
2991 +#
2992 +# Makefile for Quadrics QsNet
2993 +#
2994 +# Copyright (c) 2002-2004 Quadrics Ltd
2995 +#
2996 +# File: drivers/net/qsnet/eip/Makefile
2997 +#
2998 +
2999 +
3000 +#
3001 +
3002 +obj-$(CONFIG_EIP)      += eip.o
3003 +eip-objs       := eip_linux.o eip_stats.o
3004 +
3005 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
3006 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/Makefile.conf
3007 ===================================================================
3008 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/Makefile.conf  2004-02-23 16:02:56.000000000 -0500
3009 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/Makefile.conf       2005-07-28 14:52:52.777689528 -0400
3010 @@ -0,0 +1,10 @@
3011 +# Flags for generating QsNet Linux Kernel Makefiles
3012 +MODNAME                =       eip.o
3013 +MODULENAME     =       eip
3014 +KOBJFILES      =       eip_linux.o eip_stats.o
3015 +EXPORT_KOBJS   =       
3016 +CONFIG_NAME    =       CONFIG_EIP
3017 +SGALFC         =       
3018 +# EXTRALINES START
3019 +
3020 +# EXTRALINES END
3021 Index: linux-2.6.5-7.191/drivers/net/qsnet/eip/quadrics_version.h
3022 ===================================================================
3023 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/eip/quadrics_version.h     2004-02-23 16:02:56.000000000 -0500
3024 +++ linux-2.6.5-7.191/drivers/net/qsnet/eip/quadrics_version.h  2005-07-28 14:52:52.777689528 -0400
3025 @@ -0,0 +1 @@
3026 +#define QUADRICS_VERSION "4.31qsnet"
3027 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/bitmap.c
3028 ===================================================================
3029 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/bitmap.c      2004-02-23 16:02:56.000000000 -0500
3030 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/bitmap.c   2005-07-28 14:52:52.777689528 -0400
3031 @@ -0,0 +1,287 @@
3032 +/*
3033 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
3034 + *
3035 + *    For licensing information please see the supplied COPYING file
3036 + *
3037 + */
3038 +
3039 +#ident "@(#)$Id: bitmap.c,v 1.5 2004/01/20 17:32:17 david Exp $"
3040 +/*      $Source: /cvs/master/quadrics/elanmod/shared/bitmap.c,v $*/
3041 +
3042 +#if defined(__KERNEL__)
3043 +#include <qsnet/kernel.h>
3044 +#endif
3045 +#include <qsnet/config.h>
3046 +#include <elan/bitmap.h>
3047 +
3048 +/*
3049 + * Return the index of the first available bit in the 
3050 + * bitmap , or -1 for failure
3051 + */
3052 +int
3053 +bt_freebit (bitmap_t *bitmap, int nbits)
3054 +{
3055 +    int last = (--nbits) >> BT_ULSHIFT;
3056 +    int maxbit;
3057 +    int        i, j;
3058 +
3059 +    /* look for a word with a bit off */
3060 +    for (i = 0; i <= last; i++)
3061 +       if (bitmap[i] != ~((bitmap_t) 0))
3062 +           break;
3063 +
3064 +    if (i <= last)
3065 +    {
3066 +       /* found an word with a bit off,  now see which bit it is */
3067 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3068 +       for (j = 0; j <= maxbit; j++)
3069 +           if ((bitmap[i] & (1 << j)) == 0)
3070 +               return ((i << BT_ULSHIFT) | j);
3071 +    }
3072 +    return (-1);
3073 +    
3074 +}
3075 +
3076 +/*
3077 + * bt_lowbit:
3078 + *     Return the index of the lowest set bit in the
3079 + *     bitmap, or -1 for failure.
3080 + */
3081 +int
3082 +bt_lowbit (bitmap_t *bitmap, int nbits)
3083 +{
3084 +    int last = (--nbits) >> BT_ULSHIFT;
3085 +    int maxbit;
3086 +    int i, j;
3087 +    
3088 +    /* look for a word with a bit on */
3089 +    for (i = 0; i <= last; i++)
3090 +       if (bitmap[i] != 0)
3091 +           break;
3092 +    if (i <= last)
3093 +    {
3094 +       /* found a word bit a bit on, now see which bit it is */
3095 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3096 +       for (j = 0; j <= maxbit; j++)
3097 +           if (bitmap[i] & (1 << j))
3098 +               return ((i << BT_ULSHIFT) | j);
3099 +    }
3100 +
3101 +    return (-1);
3102 +}
3103 +
3104 +/*
3105 + * Return the index of the first available bit in the 
3106 + * bitmap , or -1 for failure
3107 + */
3108 +int
3109 +bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset)
3110 +{
3111 +    int first = ((last+1) + BT_NBIPUL-1) >> BT_ULSHIFT;
3112 +    int end   = (--nbits) >> BT_ULSHIFT;
3113 +    int maxbit;
3114 +    int        i, j;
3115 +
3116 +    /* look for bits before the first whole word */
3117 +    if (((last+1) & BT_ULMASK) != 0)
3118 +    {
3119 +       maxbit = ((first-1) == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3120 +       for (j = ((last+1) & BT_ULMASK); j <= maxbit; j++)
3121 +           if ((bitmap[first-1] & (1 << j)) == (isset << j))
3122 +               return (((first-1) << BT_ULSHIFT) | j);
3123 +    }
3124 +
3125 +    /* look for a word with a bit off */
3126 +    for (i = first; i <= end; i++)
3127 +       if (bitmap[i] != (isset ? 0 : ~((bitmap_t) 0)))
3128 +           break;
3129 +
3130 +    if (i <= end)
3131 +    {
3132 +       /* found an word with a bit off,  now see which bit it is */
3133 +       maxbit = (i == end) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3134 +       for (j = 0; j <= maxbit; j++)
3135 +           if ((bitmap[i] & (1 << j)) == (isset << j))
3136 +               return ((i << BT_ULSHIFT) | j);
3137 +    }
3138 +    return (-1);
3139 +}
3140 +
3141 +void
3142 +bt_copy (bitmap_t *a, bitmap_t *b, int nbits)
3143 +{
3144 +    int i;
3145 +
3146 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3147 +       b[i] = a[i];
3148 +
3149 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3150 +       if (BT_TEST(a, i))
3151 +           BT_SET(b,i);
3152 +       else
3153 +           BT_CLEAR(b,i);
3154 +}
3155 +
3156 +void
3157 +bt_zero (bitmap_t *bitmap, int nbits)
3158 +{
3159 +    int i;
3160 +
3161 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3162 +       bitmap[i] = 0;
3163 +
3164 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3165 +       BT_CLEAR(bitmap,i);
3166 +}
3167 +
3168 +void
3169 +bt_fill (bitmap_t *bitmap, int nbits)
3170 +{
3171 +    int i;
3172 +
3173 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3174 +       bitmap[i] = ~((bitmap_t) 0);
3175 +
3176 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3177 +       BT_SET(bitmap,i);
3178 +}
3179 +
3180 +int
3181 +bt_cmp (bitmap_t *a, bitmap_t *b, int nbits)
3182 +{
3183 +    int i;
3184 +
3185 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3186 +       if (a[i] != b[i])
3187 +           return (1);
3188 +
3189 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3190 +       if (BT_TEST (a, i) != BT_TEST(b, i))
3191 +           return (1);
3192 +    return (0);
3193 +}
3194 +
3195 +void
3196 +bt_intersect (bitmap_t *a, bitmap_t *b, int nbits)
3197 +{
3198 +    int i;
3199 +    
3200 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3201 +       a[i] &= b[i];
3202 +
3203 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3204 +       if (BT_TEST (a, i) && BT_TEST (b, i))
3205 +           BT_SET (a, i);
3206 +       else
3207 +           BT_CLEAR (a, i);
3208 +}
3209 +
3210 +void
3211 +bt_remove (bitmap_t *a, bitmap_t *b, int nbits)
3212 +{
3213 +    int i;
3214 +
3215 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3216 +       a[i] &= ~b[i];
3217 +
3218 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3219 +       if (BT_TEST (b, i))
3220 +           BT_CLEAR (a, i);
3221 +}
3222 +
3223 +void
3224 +bt_add (bitmap_t *a, bitmap_t *b, int nbits)
3225 +{
3226 +    int i;
3227 +
3228 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3229 +       a[i] |= b[i];
3230 +
3231 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3232 +       if (BT_TEST(b, i))
3233 +           BT_SET (a, i);
3234 +}
3235 +
3236 +/*
3237 + * bt_spans : partition a spans partition b
3238 + *    == all bits set in 'b' are set in 'a'
3239 + */
3240 +int
3241 +bt_spans (bitmap_t *a, bitmap_t *b, int nbits)
3242 +{
3243 +    int i;
3244 +    
3245 +    for (i = 0; i < nbits; i++)
3246 +       if (BT_TEST (b, i) && !BT_TEST (a, i))
3247 +           return (0);
3248 +    return (1);
3249 +}
3250 +
3251 +/*
3252 + * bt_subset: copy [base,base+nbits-1] from 'a' to 'b'
3253 + */
3254 +void
3255 +bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits)
3256 +{
3257 +    int i;
3258 +
3259 +    for (i = 0; i < nbits; i++)
3260 +    {
3261 +       if (BT_TEST (a, base+i))
3262 +           BT_SET(b,i);
3263 +       else
3264 +           BT_CLEAR (b,i);
3265 +    }
3266 +}
3267 +
3268 +void 
3269 +bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
3270 +{
3271 +    int i;
3272 +    
3273 +    for (i = 0; i < nbits; i++)
3274 +    {
3275 +       if (!BT_TEST (a, i) && BT_TEST (b, i))
3276 +       {
3277 +           BT_SET (c, i);
3278 +        }
3279 +       else
3280 +       {
3281 +           BT_CLEAR (c, i);
3282 +        }
3283 +    }
3284 +}
3285 +
3286 +void 
3287 +bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
3288 +{
3289 +    int i;
3290 +    
3291 +    for (i = 0; i < nbits; i++)
3292 +    {
3293 +       if (BT_TEST (a, i) && !BT_TEST (b, i))
3294 +       {
3295 +           BT_SET (c, i);
3296 +        }
3297 +       else
3298 +       {
3299 +           BT_CLEAR (c, i);
3300 +        }
3301 +    }
3302 +}
3303 +
3304 +int
3305 +bt_nbits (bitmap_t *a, int nbits)
3306 +{
3307 +    int i, c;
3308 +    for (i = 0, c = 0; i < nbits; i++)
3309 +       if (BT_TEST (a, i))
3310 +           c++;
3311 +    return (c);
3312 +}
3313 +
3314 +/*
3315 + * Local variables:
3316 + * c-file-style: "stroustrup"
3317 + * End:
3318 + */
3319 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/capability.c
3320 ===================================================================
3321 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/capability.c  2004-02-23 16:02:56.000000000 -0500
3322 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/capability.c       2005-07-28 14:52:52.779689224 -0400
3323 @@ -0,0 +1,628 @@
3324 +/*
3325 + *    Copyright (c) 2003 by Quadrics Ltd.
3326 + * 
3327 + *    For licensing information please see the supplied COPYING file
3328 + *
3329 + */
3330 +
3331 +#ident "@(#)$Id: capability.c,v 1.13 2004/07/20 10:15:33 david Exp $"
3332 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.c,v $ */
3333 +
3334 +
3335 +#include <qsnet/kernel.h>
3336 +#include <elan/elanmod.h>
3337 +
3338 +static LIST_HEAD(elan_cap_list); 
3339 +
3340 +typedef struct elan_vp_struct
3341 +{
3342 +       struct list_head list;
3343 +       ELAN_CAPABILITY  vp;
3344 +} ELAN_VP_NODE_STRUCT;
3345 +
3346 +
3347 +typedef struct elan_attached_struct
3348 +{
3349 +       void               *cb_args;
3350 +       ELAN_DESTROY_CB  cb_func;
3351 +} ELAN_ATTACHED_STRUCT;
3352 +
3353 +typedef struct elan_cap_node_struct
3354 +{
3355 +       struct list_head list;
3356 +       ELAN_CAP_STRUCT     node;
3357 +       ELAN_ATTACHED_STRUCT *attached[ELAN_MAX_RAILS];
3358 +       struct list_head vp_list;
3359 +} ELAN_CAP_NODE_STRUCT;
3360 +
3361 +
3362 +ELAN_CAP_NODE_STRUCT *
3363 +find_cap_node(ELAN_CAPABILITY *cap)
3364 +{
3365 +       struct list_head        *tmp;
3366 +       ELAN_CAP_NODE_STRUCT *ptr=NULL;
3367 +
3368 +       list_for_each(tmp, &elan_cap_list) {
3369 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3370 +               /* is it an exact match */
3371 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3372 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)) {
3373 +                       return ptr;
3374 +               }
3375 +       }
3376 +       return ptr;
3377 +};
3378 +
3379 +ELAN_VP_NODE_STRUCT *
3380 +find_vp_node( ELAN_CAP_NODE_STRUCT *cap_node,ELAN_CAPABILITY *map)
3381 +{
3382 +       struct list_head       * tmp;
3383 +       ELAN_VP_NODE_STRUCT * ptr = NULL;
3384 +
3385 +       list_for_each(tmp, &cap_node->vp_list) {
3386 +               ptr = list_entry(tmp, ELAN_VP_NODE_STRUCT , list);
3387 +               /* is it an exact match */
3388 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->vp,map) 
3389 +                    && ELAN_CAP_GEOM_MATCH(&ptr->vp,map)){
3390 +                       return ptr;
3391 +               }
3392 +       }
3393 +       return ptr;
3394 +}
3395 +
3396 +int 
3397 +elan_validate_cap(ELAN_CAPABILITY *cap)
3398 +{
3399 +       char                      space[127];
3400 +
3401 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap %s\n",elan_capability_string(cap,space));
3402 +
3403 +       /* check versions */
3404 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
3405 +       {
3406 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
3407 +               return (EINVAL);
3408 +       }
3409 +
3410 +       /* check its not HWTEST */
3411 +       if ( cap->cap_type & ELAN_CAP_TYPE_HWTEST )
3412 +       {
3413 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_HWTEST \n");   
3414 +               return (EINVAL);
3415 +       }
3416 +       
3417 +       /* check its type */
3418 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
3419 +       {
3420 +       case ELAN_CAP_TYPE_KERNEL :     
3421 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_KERNEL \n");   
3422 +               return (EINVAL);
3423 +
3424 +               /* check it has a valid type */
3425 +       case ELAN_CAP_TYPE_BLOCK:
3426 +       case ELAN_CAP_TYPE_CYCLIC:
3427 +               break;
3428 +
3429 +               /* all others are failed as well */
3430 +       default:
3431 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed unknown type = %x \n", (cap->cap_type & ELAN_CAP_TYPE_MASK));       
3432 +               return (EINVAL);
3433 +       }
3434 +       
3435 +       if ((cap->cap_lowcontext == ELAN_CAP_UNINITIALISED) || (cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
3436 +           || (cap->cap_lownode == ELAN_CAP_UNINITIALISED) || (cap->cap_highnode    == ELAN_CAP_UNINITIALISED))
3437 +       {
3438 +               
3439 +               ELAN_DEBUG4 (ELAN_DBG_VP,"elan_validate_cap: ELAN_CAP_UNINITIALISED   LowNode %d   HighNode %d   LowContext %d   highContext %d\n",
3440 +                            cap->cap_lownode , cap->cap_highnode,
3441 +                            cap->cap_lowcontext , cap->cap_highcontext);
3442 +               return (EINVAL);
3443 +       }       
3444 +
3445 +       if (cap->cap_lowcontext > cap->cap_highcontext)
3446 +       {
3447 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
3448 +               return (EINVAL);
3449 +       }
3450 +       
3451 +       if (cap->cap_lownode > cap->cap_highnode)
3452 +       {
3453 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lownode > cap->cap_highnode) %d %d\n",cap->cap_lownode, cap->cap_highnode);
3454 +               return (EINVAL);
3455 +       }
3456 +
3457 +       if (cap->cap_mycontext != ELAN_CAP_UNINITIALISED) 
3458 +       {
3459 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed cap->cap_mycontext is set %d  \n", cap->cap_mycontext);
3460 +               return (EINVAL);
3461 +       }
3462 +
3463 +
3464 +       if ((ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)) > ELAN_MAX_VPS)
3465 +       {
3466 +               ELAN_DEBUG6 (ELAN_DBG_VP,"elan_validate_cap: too many vps  LowNode %d   HighNode %d   LowContext %d   highContext %d,  %d >% d\n",
3467 +                            cap->cap_lownode , cap->cap_highnode,
3468 +                            cap->cap_lowcontext , cap->cap_highcontext,
3469 +                            (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)),
3470 +                            ELAN_MAX_VPS);
3471 +               
3472 +               return (EINVAL);
3473 +       }
3474 +
3475 +       return (ESUCCESS);
3476 +}
3477 +
3478 +int
3479 +elan_validate_map(ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3480 +{
3481 +       ELAN_CAP_NODE_STRUCT * ptr  = NULL;
3482 +       ELAN_VP_NODE_STRUCT  * vptr = NULL;
3483 +       char space[256];
3484 +
3485 +       kmutex_lock(&elan_mutex);
3486 +
3487 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map \n");
3488 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map cap = %s \n",elan_capability_string(cap,space));
3489 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map map = %s \n",elan_capability_string(map,space));
3490 +
3491 +       /* does cap exist    */
3492 +       ptr = find_cap_node(cap);
3493 +       if ( ptr == NULL ) 
3494 +       {
3495 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not found \n");
3496 +               kmutex_unlock(&elan_mutex);
3497 +               return EINVAL;
3498 +       }
3499 +       /* is it active */
3500 +       if ( ! ptr->node.active ) 
3501 +       {
3502 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not active \n");
3503 +               kmutex_unlock(&elan_mutex);
3504 +               return EINVAL;
3505 +       }
3506 +
3507 +       /* are they the same */
3508 +       if ( ELAN_CAP_TYPE_MATCH(cap,map) 
3509 +            && ELAN_CAP_GEOM_MATCH(cap,map)) 
3510 +       {
3511 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap == map  passed\n");
3512 +               kmutex_unlock(&elan_mutex);
3513 +               return ESUCCESS;
3514 +       }
3515 +
3516 +       /* is map in map list */
3517 +       vptr = find_vp_node(ptr, map);
3518 +       if ( vptr == NULL ) 
3519 +       {
3520 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map not found\n");
3521 +               kmutex_unlock(&elan_mutex);
3522 +               return EINVAL;
3523 +       }
3524 +       
3525 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map passed\n");
3526 +       kmutex_unlock(&elan_mutex);
3527 +       return ESUCCESS;
3528 +}
3529 +
3530 +int
3531 +elan_create_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3532 +{
3533 +       char                      space[127];
3534 +       struct list_head        * tmp;
3535 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3536 +       int                       i, rail;
3537 +
3538 +       kmutex_lock(&elan_mutex);
3539 +
3540 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_create_cap %s\n",elan_capability_string(cap,space));     
3541 +
3542 +       /* need to check that the cap does not over lap another one 
3543 +          or is an exact match with only the userkey changing */
3544 +       list_for_each(tmp, &elan_cap_list) {
3545 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3546 +
3547 +               /* is it an exact match */
3548 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3549 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)
3550 +                    && (&ptr->node.owner == owner)) {
3551 +                       if ( ptr->node.active ) {
3552 +                               /* dont inc attached count as its like a create */
3553 +                               ptr->node.cap.cap_userkey = cap->cap_userkey;
3554 +                               kmutex_unlock(&elan_mutex);
3555 +                               return ESUCCESS;
3556 +                       }
3557 +                       else
3558 +                       {
3559 +                               kmutex_unlock(&elan_mutex);
3560 +                               return EINVAL;
3561 +                       }
3562 +               }
3563 +               
3564 +               /* does it overlap, even with ones being destroyed */
3565 +               if (elan_cap_overlap(&ptr->node.cap,cap))
3566 +               {
3567 +                       kmutex_unlock(&elan_mutex);
3568 +                       return  EACCES;
3569 +               }
3570 +       }
3571 +
3572 +       /* create it */
3573 +       KMEM_ALLOC(ptr, ELAN_CAP_NODE_STRUCT *, sizeof(ELAN_CAP_NODE_STRUCT), 1);
3574 +       if (ptr == NULL)
3575 +       {
3576 +               kmutex_unlock(&elan_mutex);
3577 +               return  ENOMEM;
3578 +       }
3579 +
3580 +       /* create space for the attached array */
3581 +       for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3582 +       {
3583 +               ptr->attached[rail]=NULL;
3584 +               if ( ELAN_CAP_IS_RAIL_SET(cap,rail) ) 
3585 +               {
3586 +                       KMEM_ALLOC(ptr->attached[rail], ELAN_ATTACHED_STRUCT *, sizeof(ELAN_ATTACHED_STRUCT) *  ELAN_CAP_NUM_CONTEXTS(cap), 1);
3587 +                       if (ptr->attached[rail] == NULL) 
3588 +                       {
3589 +                               for(;rail>=0;rail--)
3590 +                                       if ( ptr->attached[rail] )
3591 +                                               KMEM_FREE(ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) *  ELAN_CAP_NUM_CONTEXTS(cap));
3592 +
3593 +                               KMEM_FREE(ptr, sizeof(ELAN_CAP_NODE_STRUCT));
3594 +                               kmutex_unlock(&elan_mutex);
3595 +                               return  ENOMEM;
3596 +                       }
3597 +                       /* blank the attached array */
3598 +                       for(i=0;i<ELAN_CAP_NUM_CONTEXTS(cap);i++)
3599 +                               ptr->attached[rail][i].cb_func = NULL;
3600 +               }
3601 +       }       
3602 +       
3603 +       ptr->node.owner     = owner;
3604 +       ptr->node.cap       = *cap;
3605 +       ptr->node.attached  = 1;    /* creator counts as attached */
3606 +       ptr->node.active    = 1;
3607 +       ptr->vp_list.next   = &(ptr->vp_list);
3608 +       ptr->vp_list.prev   = &(ptr->vp_list);
3609 +
3610 +       list_add_tail(&ptr->list, &elan_cap_list);      
3611 +
3612 +       kmutex_unlock(&elan_mutex);
3613 +       return  ESUCCESS;
3614 +}
3615 +
3616 +void
3617 +elan_destroy_cap_test(ELAN_CAP_NODE_STRUCT *cap_ptr)
3618 +{
3619 +       /* called by someone holding the mutex   */
3620 +       struct list_head       * vp_tmp;
3621 +       ELAN_VP_NODE_STRUCT * vp_ptr = NULL;
3622 +       int                      rail;
3623 +
3624 +       /* check to see if it can be deleted now */
3625 +       if ( cap_ptr->node.attached == 0 ) {
3626 +               
3627 +               ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_cap_test: attached == 0\n");     
3628 +               
3629 +               /* delete the vp list */
3630 +               list_for_each(vp_tmp, &(cap_ptr->vp_list)) {
3631 +                       vp_ptr = list_entry(vp_tmp, ELAN_VP_NODE_STRUCT , list);
3632 +                       list_del(&vp_ptr->list);
3633 +                       KMEM_FREE( vp_ptr, sizeof(ELAN_VP_NODE_STRUCT));
3634 +               }
3635 +               
3636 +               list_del(&cap_ptr->list);
3637 +
3638 +               /* delete space for the attached array */
3639 +               for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3640 +                       if (cap_ptr->attached[rail]) 
3641 +                               KMEM_FREE(cap_ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) * ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap)));
3642 +                       
3643 +               KMEM_FREE(cap_ptr, sizeof(ELAN_CAP_NODE_STRUCT));               
3644 +       }
3645 +}
3646 +
3647 +int
3648 +elan_destroy_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3649 +{
3650 +       char                      space[127];
3651 +       struct list_head        * el;
3652 +       struct list_head        * nel;
3653 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3654 +       int                       i, rail;
3655 +       int                       found = 0;
3656 +
3657 +       kmutex_lock(&elan_mutex);
3658 +
3659 +       ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_destroy_cap %s\n",elan_capability_string(cap,space));   
3660 +
3661 +       list_for_each_safe (el, nel, &elan_cap_list) {
3662 +               ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3663 +               
3664 +               /* is it an exact match */
3665 +               if ( (ptr->node.owner == owner )
3666 +                    && (  (cap == NULL) 
3667 +                          || (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)))) {
3668 +
3669 +                       if ( ptr->node.active ) {
3670 +
3671 +                               /* mark as in active and dec attached count */
3672 +                               ptr->node.active = 0;
3673 +                               ptr->node.attached--;
3674 +                               ptr->node.owner  = 0; /* no one own's it now */
3675 +                               
3676 +                               /* need to tell any one who was attached that this has been destroy'd */
3677 +                               for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3678 +                                       if (ELAN_CAP_IS_RAIL_SET( &(ptr->node.cap), rail)) {
3679 +                                               for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(ptr->node.cap));i++)
3680 +                                                       if ( ptr->attached[rail][i].cb_func != NULL) 
3681 +                                                               ptr->attached[rail][i].cb_func(ptr->attached[rail][i].cb_args, cap, NULL);
3682 +                                       }
3683 +                               
3684 +                               /* now try to destroy it */
3685 +                               elan_destroy_cap_test(ptr);
3686 +                               
3687 +                               /* found it */
3688 +                               found = 1;
3689 +                       }
3690 +               }
3691 +       }
3692 +       
3693 +       if ( found )
3694 +       {
3695 +               kmutex_unlock(&elan_mutex);
3696 +               return ESUCCESS;
3697 +       }
3698 +
3699 +       /* failed */
3700 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_cap: didnt find it \n"); 
3701 +
3702 +       kmutex_unlock(&elan_mutex);
3703 +       return EINVAL;
3704 +}
3705 +
3706 +int 
3707 +elan_get_caps(uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps)
3708 +{
3709 +       uint                      results = 0;
3710 +       struct list_head        * tmp;
3711 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3712 +       
3713 +
3714 +       kmutex_lock(&elan_mutex);
3715 +
3716 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_get_caps\n");    
3717 +
3718 +       list_for_each(tmp, &elan_cap_list) {
3719 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3720 +               
3721 +               copyout(&ptr->node, &caps[results], sizeof (ELAN_CAP_STRUCT));
3722 +               
3723 +               results++;
3724 +               
3725 +               if ( results >= array_size )
3726 +               {
3727 +                       copyout(&results, number_of_results, sizeof(uint));     
3728 +                       kmutex_unlock(&elan_mutex);
3729 +                       return ESUCCESS;
3730 +               }
3731 +       }
3732 +
3733 +       copyout(&results, number_of_results, sizeof(uint));     
3734 +
3735 +       kmutex_unlock(&elan_mutex);
3736 +       return ESUCCESS;
3737 +}
3738 +
3739 +int
3740 +elan_create_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3741 +{
3742 +       ELAN_CAP_NODE_STRUCT * cap_ptr = NULL;
3743 +       ELAN_VP_NODE_STRUCT  * vp_ptr  = NULL;
3744 +       
3745 +       kmutex_lock(&elan_mutex);
3746 +
3747 +
3748 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_create_vp\n");
3749 +
3750 +       /* the railmasks must match */
3751 +       if ( cap->cap_railmask != map->cap_railmask)
3752 +       {
3753 +               kmutex_unlock(&elan_mutex);
3754 +               return  EINVAL;
3755 +       }
3756 +
3757 +       /* does the cap exist */
3758 +       cap_ptr = find_cap_node(cap);
3759 +       if ((cap_ptr == NULL) || ( cap_ptr->node.owner != owner ) || (! cap_ptr->node.active) )
3760 +       {
3761 +               kmutex_unlock(&elan_mutex);
3762 +               return  EINVAL;
3763 +       }
3764 +       
3765 +       /* is there already a mapping */
3766 +       vp_ptr = find_vp_node(cap_ptr,map);
3767 +       if ( vp_ptr != NULL) 
3768 +       {
3769 +               kmutex_unlock(&elan_mutex);
3770 +               return  EINVAL;
3771 +       }
3772 +
3773 +       /* create space for mapping */
3774 +       KMEM_ALLOC(vp_ptr, ELAN_VP_NODE_STRUCT *, sizeof(ELAN_VP_NODE_STRUCT), 1);
3775 +       if (vp_ptr == NULL)
3776 +       {
3777 +               kmutex_unlock(&elan_mutex);
3778 +               return  ENOMEM;
3779 +       }
3780 +                       
3781 +       /* copy map */
3782 +       vp_ptr->vp = *map;
3783 +       list_add_tail(&vp_ptr->list, &(cap_ptr->vp_list));      
3784 +       kmutex_unlock(&elan_mutex);
3785 +       return  ESUCCESS;
3786 +}
3787 +
3788 +int
3789 +elan_destroy_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3790 +{
3791 +       ELAN_CAP_NODE_STRUCT * cap_ptr = NULL;
3792 +       ELAN_VP_NODE_STRUCT  * vp_ptr  = NULL;
3793 +       int                       i, rail;
3794 +
3795 +       kmutex_lock(&elan_mutex);
3796 +
3797 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_vp\n");  
3798 +
3799 +       cap_ptr = find_cap_node(cap);
3800 +       if ((cap_ptr!=NULL) && (cap_ptr->node.owner == owner) && ( cap_ptr->node.active))
3801 +       {               
3802 +               vp_ptr = find_vp_node( cap_ptr, map );
3803 +               if ( vp_ptr != NULL ) 
3804 +               {
3805 +                       list_del(&vp_ptr->list);
3806 +                       KMEM_FREE(vp_ptr, sizeof(ELAN_VP_NODE_STRUCT));
3807 +             
3808 +                       /* need to tell those who are attached that map is nolonger in use */
3809 +                       for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3810 +                               if (ELAN_CAP_IS_RAIL_SET(cap, rail))
3811 +                               {
3812 +                                       for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap));i++)
3813 +                                               if ( cap_ptr->attached[rail][i].cb_func != NULL) 
3814 +                                                       cap_ptr->attached[rail][i].cb_func( cap_ptr->attached[rail][i].cb_args, cap, map);
3815 +                               }
3816 +
3817 +                       kmutex_unlock(&elan_mutex);
3818 +                       return  ESUCCESS;
3819 +               }
3820 +       }       
3821 +       
3822 +       /* didnt find it */
3823 +       kmutex_unlock(&elan_mutex);
3824 +       return  EINVAL;
3825 +}
3826 +
3827 +int 
3828 +elan_attach_cap(ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB func)
3829 +{
3830 +       char                  space[127];
3831 +       struct list_head     *el;
3832 +
3833 +       ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_attach_cap %s\n",elan_capability_string(cap,space));
3834 +
3835 +       /* currently must provide a call back, as null mean something */
3836 +       if ( func == NULL)
3837 +               return (EINVAL);
3838 +
3839 +       /* mycontext must be set and correct */
3840 +       if ( ! ELAN_CAP_VALID_MYCONTEXT(cap))
3841 +               return (EINVAL);
3842 +
3843 +       /* rail must be one of the rails in railmask */
3844 +       if (((1 << rail) & cap->cap_railmask) == 0)
3845 +               return (EINVAL);
3846 +       
3847 +       kmutex_lock(&elan_mutex);
3848 +
3849 +       list_for_each(el, &elan_cap_list) {
3850 +               ELAN_CAP_NODE_STRUCT *cap_ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3851 +               
3852 +               /* is it an exact match */
3853 +               if (ELAN_CAP_MATCH(&cap_ptr->node.cap,cap) && cap_ptr->node.active) {
3854 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3855 +                       
3856 +                       if ( cap_ptr->attached[rail][attached_index].cb_func != NULL ) /* only one per ctx per rail */
3857 +                       {
3858 +                               kmutex_unlock(&elan_mutex);
3859 +                               return   EINVAL;
3860 +                       }
3861 +
3862 +                       /* keep track of who attached as we might need to tell them when */
3863 +                       /* cap or maps get destroyed                                     */
3864 +                       cap_ptr->attached[rail][ attached_index ].cb_func = func;
3865 +                       cap_ptr->attached[rail][ attached_index ].cb_args = args;
3866 +                       cap_ptr->node.attached++;
3867 +
3868 +                       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: passed\n");
3869 +                       kmutex_unlock(&elan_mutex);
3870 +                       return ESUCCESS;
3871 +               }
3872 +       }
3873 +       
3874 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: failed to find \n");
3875 +
3876 +       /* didnt find one */
3877 +       kmutex_unlock(&elan_mutex);
3878 +       return EINVAL;
3879 +}
3880 +
3881 +int 
3882 +elan_detach_cap(ELAN_CAPABILITY *cap, unsigned int rail)
3883 +{
3884 +       struct list_head *el, *nel;
3885 +       char              space[256];
3886 +
3887 +       kmutex_lock(&elan_mutex);
3888 +
3889 +       ELAN_DEBUG1(ELAN_DBG_CAP,"elan_detach_cap %s\n",elan_capability_string(cap,space));
3890 +       list_for_each_safe (el, nel, &elan_cap_list) {
3891 +               ELAN_CAP_NODE_STRUCT *ptr = list_entry (el, ELAN_CAP_NODE_STRUCT, list);
3892 +
3893 +               /* is it an exact match */
3894 +               if (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) &&
3895 +                   ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap) &&
3896 +                   (ptr->node.cap.cap_railmask & cap->cap_railmask) == cap->cap_railmask) {
3897 +               
3898 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3899 +
3900 +                       if ( ptr->attached[rail][ attached_index ].cb_func == NULL ) 
3901 +                               ELAN_DEBUG0(ELAN_DBG_CAP,"elanmod_detach_cap already removed \n");
3902 +
3903 +                       ptr->attached[rail][ attached_index ].cb_func = NULL;
3904 +                       ptr->attached[rail][ attached_index ].cb_args = (void *)0;
3905 +
3906 +                       ptr->node.attached--;
3907 +
3908 +                       ELAN_DEBUG1(ELAN_DBG_CAP,"elanmod_detach_cap new attach count%d \n", ptr->node.attached);
3909 +
3910 +                       elan_destroy_cap_test(ptr);
3911 +
3912 +                       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: success\n"); 
3913 +
3914 +                       kmutex_unlock(&elan_mutex);
3915 +                       return  ESUCCESS;
3916 +               }
3917 +       }
3918 +
3919 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: failed to find\n");
3920 +       kmutex_unlock(&elan_mutex);
3921 +       return  EINVAL;
3922 +}
3923 +
3924 +int
3925 +elan_cap_dump()
3926 +{
3927 +       struct list_head        * tmp;
3928 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3929 +       
3930 +       kmutex_lock(&elan_mutex);       
3931 +       
3932 +       list_for_each(tmp, &elan_cap_list) {
3933 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3934 +
3935 +               ELAN_DEBUG2 (ELAN_DBG_ALL, "cap dump: owner %p type %x\n", ptr->node.owner, ptr->node.cap.cap_type);
3936 +                       
3937 +               ELAN_DEBUG5 (ELAN_DBG_ALL, "cap dump: LowNode %d   HighNode %d   LowContext %d   mycontext %d   highContext %d\n",
3938 +                            ptr->node.cap.cap_lownode , ptr->node.cap.cap_highnode,
3939 +                            ptr->node.cap.cap_lowcontext , ptr->node.cap.cap_mycontext, ptr->node.cap.cap_highcontext);
3940 +
3941 +       }
3942 +
3943 +       kmutex_unlock(&elan_mutex);
3944 +       return  ESUCCESS;
3945 +}
3946 +
3947 +/*
3948 + * Local variables:
3949 + * c-file-style: "linux"
3950 + * End:
3951 + */
3952 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/capability_general.c
3953 ===================================================================
3954 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/capability_general.c  2004-02-23 16:02:56.000000000 -0500
3955 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/capability_general.c       2005-07-28 14:52:52.779689224 -0400
3956 @@ -0,0 +1,446 @@
3957 +/*
3958 + *    Copyright (c) 2003 by Quadrics Ltd.
3959 + * 
3960 + *    For licensing information please see the supplied COPYING file
3961 + *
3962 + */
3963 +
3964 +#ident "@(#)$Id: capability_general.c,v 1.10 2004/02/25 13:47:59 daniel Exp $"
3965 +/*      $Source: /cvs/master/quadrics/elanmod/shared/capability_general.c,v $ */
3966 +
3967 +#if defined(__KERNEL__)
3968 +
3969 +#include <qsnet/kernel.h>
3970 +
3971 +#else
3972 +
3973 +#include <stdlib.h>
3974 +#include <stdio.h>
3975 +#include <sys/param.h>
3976 +
3977 +#endif
3978 +
3979 +#include <elan/elanmod.h>
3980 +
3981 +
3982 +void
3983 +elan_nullcap (ELAN_CAPABILITY *cap)
3984 +{
3985 +       register int i;
3986 +
3987 +       for (i = 0; i < sizeof (cap->cap_userkey)/sizeof(cap->cap_userkey.key_values[0]); i++)
3988 +               cap->cap_userkey.key_values[i] = ELAN_CAP_UNINITIALISED;
3989 +    
3990 +       cap->cap_lowcontext  = ELAN_CAP_UNINITIALISED;
3991 +       cap->cap_highcontext = ELAN_CAP_UNINITIALISED;
3992 +       cap->cap_mycontext   = ELAN_CAP_UNINITIALISED;
3993 +       cap->cap_lownode     = ELAN_CAP_UNINITIALISED;
3994 +       cap->cap_highnode    = ELAN_CAP_UNINITIALISED;
3995 +       cap->cap_railmask    = ELAN_CAP_UNINITIALISED;
3996 +       cap->cap_type        = ELAN_CAP_UNINITIALISED;
3997 +       cap->cap_spare       = 0;
3998 +       cap->cap_version     = ELAN_CAP_VERSION_NUMBER;
3999 +       
4000 +       for (i = 0; i < sizeof (cap->cap_bitmap)/sizeof (cap->cap_bitmap[0]); i++)
4001 +               cap->cap_bitmap[i] = 0;
4002 +}
4003 +
4004 +char *
4005 +elan_capability_string (ELAN_CAPABILITY *cap, char *str)
4006 +{
4007 +       if (cap == NULL) 
4008 +               sprintf (str, "[-.-.-.-] cap = NULL\n");
4009 +       else
4010 +               sprintf (str, "[%x.%x.%x.%x] Version %x Type %x \n"
4011 +                        "Context %x.%x.%x Node %x.%x\n",
4012 +                        cap->cap_userkey.key_values[0], cap->cap_userkey.key_values[1],
4013 +                        cap->cap_userkey.key_values[2], cap->cap_userkey.key_values[3],
4014 +                        cap->cap_version, cap->cap_type, 
4015 +                        cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext,
4016 +                        cap->cap_lownode, cap->cap_highnode);
4017 +       
4018 +       return (str);
4019 +}
4020 +
4021 +ELAN_LOCATION
4022 +elan_vp2location (u_int process, ELAN_CAPABILITY *cap)
4023 +{
4024 +       ELAN_LOCATION location;
4025 +       int i, vp, node, context, nnodes, nctxs;
4026 +
4027 +       vp = 0;
4028 +
4029 +       location.loc_node    = ELAN_INVALID_NODE;
4030 +       location.loc_context = -1;
4031 +       
4032 +       nnodes = cap->cap_highnode - cap->cap_lownode + 1;
4033 +       nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
4034 +       
4035 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4036 +       {
4037 +       case ELAN_CAP_TYPE_BLOCK:
4038 +               for (node = 0, i = 0; node < nnodes; node++)
4039 +               {
4040 +                       for (context = 0; context < nctxs; context++)
4041 +                       {
4042 +                               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs)))
4043 +                               {
4044 +                                       if (vp == process)
4045 +                                       {
4046 +                                               /* Return relative indices within the capability box */
4047 +                                               location.loc_node    = node;
4048 +                                               location.loc_context = context;
4049 +
4050 +                                               return (location);
4051 +                                       }
4052 +                      
4053 +                                       vp++;
4054 +                               }
4055 +                       }
4056 +               }
4057 +               break;
4058 +       
4059 +       case ELAN_CAP_TYPE_CYCLIC:
4060 +               for (context = 0, i = 0; context < nctxs; context++)
4061 +               {
4062 +                       for (node = 0; node < nnodes; node++)
4063 +                       {
4064 +                               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes)))
4065 +                               {
4066 +                                       if (vp == process)
4067 +                                       {
4068 +                                               location.loc_node    = node;
4069 +                                               location.loc_context = context;
4070 +
4071 +                                               return (location);
4072 +                                       }
4073 +                   
4074 +                                       vp++;
4075 +                               }
4076 +                       }
4077 +               }
4078 +               break;
4079 +       }
4080 +    
4081 +       return( location );
4082 +}
4083 +
4084 +int
4085 +elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap)
4086 +{
4087 +    int  vp, node, context, nnodes, nctxs;
4088 +
4089 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
4090 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
4091 +
4092 +    vp = 0;
4093 +    
4094 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4095 +    {
4096 +    case ELAN_CAP_TYPE_BLOCK:
4097 +       for (node = 0 ; node < nnodes ; node++)
4098 +       {
4099 +           for (context = 0; context < nctxs; context++)
4100 +           {
4101 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs)))
4102 +               {
4103 +                   if ((location.loc_node == node) && (location.loc_context == context))
4104 +                   {
4105 +                       /* Found it ! */
4106 +                       return( vp );
4107 +                   }
4108 +                   
4109 +                   vp++;
4110 +               }
4111 +           }
4112 +       }
4113 +       break;
4114 +       
4115 +    case ELAN_CAP_TYPE_CYCLIC:
4116 +       for (context = 0; context < nctxs; context++)
4117 +       {
4118 +           for (node = 0; node < nnodes; node++)
4119 +           {
4120 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes)))
4121 +               {
4122 +                   if ((location.loc_node == node) && (location.loc_context == context))
4123 +                   {
4124 +                       /* Found it ! */
4125 +                       return( vp );
4126 +                   }
4127 +                   
4128 +                   vp++;
4129 +               }
4130 +           }
4131 +       }
4132 +       break;
4133 +    }
4134 +    
4135 +    /* Failed to find it */
4136 +    return( -1 );
4137 +}
4138 +
4139 +/* Return the number of processes as described by a capability */
4140 +int
4141 +elan_nvps (ELAN_CAPABILITY *cap)
4142 +{
4143 +       int i, c, nbits = ELAN_CAP_BITMAPSIZE(cap);
4144 +
4145 +       if (cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)
4146 +               return (nbits);
4147 +
4148 +       for (i = 0, c = 0; i < nbits; i++)
4149 +               if (BT_TEST (cap->cap_bitmap, i))
4150 +                       c++;
4151 +
4152 +       return (c);
4153 +}
4154 +
4155 +/* Return the number of local processes on a given node as described by a capability */
4156 +int
4157 +elan_nlocal (int node, ELAN_CAPABILITY *cap)
4158 +{
4159 +       int vp;
4160 +       ELAN_LOCATION loc;
4161 +       int nLocal = 0;
4162 +
4163 +       for (vp = 0; vp < elan_nvps(cap); vp++)
4164 +       {
4165 +               loc = elan_vp2location(vp, cap);
4166 +               if (loc.loc_node == node)
4167 +                       nLocal++;
4168 +       }
4169 +
4170 +       return (nLocal);
4171 +}
4172 +
4173 +/* Return the maximum number of local processes on any node as described by a capability */
4174 +int
4175 +elan_maxlocal (ELAN_CAPABILITY *cap)
4176 +{
4177 +       return(cap->cap_highcontext - cap->cap_lowcontext + 1);
4178 +}
4179 +
4180 +/* Return the vps of the local processes on a given node as described by a capability */
4181 +int
4182 +elan_localvps (int node, ELAN_CAPABILITY *cap, int *vps, int size)
4183 +{
4184 +       int context;
4185 +       ELAN_LOCATION loc;
4186 +       int nLocal = 0;
4187 +    
4188 +       loc.loc_node = node;
4189 +
4190 +       for (context = 0; context < MIN(size, elan_maxlocal(cap)); context++)
4191 +       {
4192 +               loc.loc_context = context;
4193 +       
4194 +               /* Should return -1 if none found */
4195 +               if ( (vps[context] = elan_location2vp( loc, cap )) != -1)
4196 +                       nLocal++;
4197 +       }
4198 +
4199 +       return (nLocal);
4200 +}
4201 +
4202 +/* Return the number of rails that this capability utilises */
4203 +int
4204 +elan_nrails (ELAN_CAPABILITY *cap)
4205 +{
4206 +       int nrails = 0;
4207 +       unsigned int railmask;
4208 +
4209 +       /* Test for a multi-rail capability */
4210 +       if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)
4211 +       {
4212 +               /* Grab rail bitmask from capability */
4213 +               railmask = cap->cap_railmask;
4214 +       
4215 +               while (railmask)
4216 +               {
4217 +                       if (railmask & 1)
4218 +                               nrails++;
4219 +           
4220 +                       railmask >>= 1;
4221 +               }
4222 +       }
4223 +       else 
4224 +               /* Default to just one rail */
4225 +               nrails = 1;
4226 +       
4227 +       return (nrails);
4228 +}
4229 +
4230 +/* Fill out an array giving the physical rail numbers utilised by a capability */
4231 +int
4232 +elan_rails (ELAN_CAPABILITY *cap, int *rails)
4233 +{
4234 +       int nrails, rail;
4235 +       unsigned int railmask;
4236 +
4237 +       /* Test for a multi-rail capability */
4238 +       if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)
4239 +       {
4240 +               /* Grab rail bitmask from capability */
4241 +               railmask = cap->cap_railmask;
4242 +       
4243 +               nrails = rail = 0;
4244 +               while (railmask)
4245 +               {
4246 +                       if (railmask & 1)
4247 +                               rails[nrails++] = rail;
4248 +           
4249 +                       rail++;
4250 +                       railmask >>= 1;
4251 +               }
4252 +       }
4253 +       else
4254 +       {
4255 +               /* Default to just one rail */
4256 +               rails[0] = 0;
4257 +               nrails = 1;
4258 +       }
4259 +
4260 +       return( nrails );
4261 +}
4262 +
4263 +int 
4264 +elan_cap_overlap(ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2)
4265 +{
4266 +       /* by context */
4267 +       if ( cap1->cap_highcontext < cap2->cap_lowcontext ) return (0);
4268 +       if ( cap1->cap_lowcontext  > cap2->cap_highcontext) return (0);
4269 +       
4270 +       /* by node */
4271 +       if ( cap1->cap_highnode < cap2->cap_lownode ) return (0);
4272 +       if ( cap1->cap_lownode  > cap2->cap_highnode) return (0);
4273 +
4274 +       /* by rail */
4275 +       /* they overlap if they have a rail in common */
4276 +       return (cap1->cap_railmask & cap2->cap_railmask);
4277 +}
4278 +
4279 +#if !defined(__KERNEL__)
4280 +
4281 +/* Fill out an array that hints at the best use of the rails on a
4282 + * per process basis. The library user can then decide whether or not
4283 + * to take this into account (e.g. TPORTs)
4284 + * All processes calling this fn will be returned the same information.
4285 + */
4286 +int
4287 +elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp)
4288 +{
4289 +       int i;
4290 +       int nrails = elan_nrails(cap);
4291 +       int maxlocal = elan_maxlocal(cap);
4292 +
4293 +       /* Test for a multi-rail capability */
4294 +       if (! (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL))
4295 +       {
4296 +               /* Default to just one rail */
4297 +               for (i = 0; i < nvp; i++)
4298 +                       pref[i] = 0;
4299 +
4300 +               return( 0 );
4301 +       }
4302 +
4303 +       /*
4304 +        * We allocate rails on a per node basis sharing our the rails
4305 +        * equally amongst the local processes. However, if there is only
4306 +        * one process per node and multiple rails, then we use a different
4307 +        * algorithm where rails are allocated across all the processes in 
4308 +        * a round-robin fashion
4309 +        */
4310 +    
4311 +       if (maxlocal == 1)
4312 +       {
4313 +               /* Allocate rails in a round-robin manner */
4314 +               for (i = 0; i < nvp; i++)
4315 +                       *pref++ = i % nrails;
4316 +       }
4317 +       else
4318 +       {
4319 +               int node;
4320 +               int *vps;
4321 +               int nnodes = cap->cap_highnode - cap->cap_lownode + 1;
4322 +
4323 +               vps = (int *) malloc(sizeof(int)*maxlocal);
4324 +
4325 +               /* Grab the local process info for each node and allocate
4326 +                * rails to those vps on an equal basis
4327 +                */
4328 +               for (node = 0; node < nnodes; node++)
4329 +               {
4330 +                       int nlocal;
4331 +                       int pprail;
4332 +
4333 +                       /* Grab an array of local vps */
4334 +                       nlocal = elan_localvps(node, cap, vps, maxlocal);
4335 +           
4336 +                       /* Calculate the number processes per rail */
4337 +                       if ((pprail = nlocal/nrails) == 0)
4338 +                               pprail = 1;
4339 +
4340 +                       /* Allocate processes to rails */
4341 +                       for (i = 0; i < nlocal; i++)
4342 +                       {
4343 +                               pref[vps[i]] = (i / pprail) % nrails;
4344 +                       }
4345 +               }
4346 +       
4347 +               free(vps);
4348 +       }
4349 +
4350 +       return( 0 );
4351 +}
4352 +
4353 +void 
4354 +elan_get_random_key(ELAN_USERKEY *key)
4355 +{
4356 +    int i;
4357 +    for (i = 0; i < sizeof(key->key_values) / sizeof(key->key_values[0]); i++)
4358 +       key->key_values[i] = lrand48();
4359 +}
4360 +
4361 +int elan_lowcontext(ELAN_CAPABILITY *cap)
4362 +{
4363 +    return(cap->cap_lowcontext);
4364 +}
4365 +
4366 +int elan_mycontext(ELAN_CAPABILITY *cap)
4367 +{
4368 +    return(cap->cap_mycontext);
4369 +}
4370 +
4371 +int elan_highcontext(ELAN_CAPABILITY *cap)
4372 +{
4373 +    return(cap->cap_highcontext);
4374 +}
4375 +
4376 +int elan_lownode(ELAN_CAPABILITY *cap)
4377 +{
4378 +    return(cap->cap_lownode);
4379 +}
4380 +
4381 +int elan_highnode(ELAN_CAPABILITY *cap)
4382 +{
4383 +    return(cap->cap_highnode);
4384 +}
4385 +
4386 +int elan_captype(ELAN_CAPABILITY *cap)
4387 +{
4388 +    return(cap->cap_type);
4389 +}
4390 +
4391 +int elan_railmask(ELAN_CAPABILITY *cap)
4392 +{
4393 +    return(cap->cap_railmask);
4394 +}
4395 +
4396 +#endif
4397 +
4398 +/*
4399 + * Local variables:
4400 + * c-file-style: "linux"
4401 + * End:
4402 + */
4403 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/device.c
4404 ===================================================================
4405 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/device.c      2004-02-23 16:02:56.000000000 -0500
4406 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/device.c   2005-07-28 14:52:52.780689072 -0400
4407 @@ -0,0 +1,147 @@
4408 +/*
4409 + *    Copyright (c) 2003 by Quadrics Ltd.
4410 + * 
4411 + *    For licensing information please see the supplied COPYING file
4412 + *
4413 + */
4414 +
4415 +#ident "@(#)$Id: device.c,v 1.5 2003/09/24 13:55:37 david Exp $"
4416 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/device.c,v $*/
4417 +
4418 +#include <qsnet/kernel.h>
4419 +#include <elan/elanmod.h>
4420 +
4421 +static LIST_HEAD(elan_dev_list);
4422 +
4423 +ELAN_DEV_STRUCT *
4424 +elan_dev_find (ELAN_DEV_IDX devidx)
4425 +{
4426 +       struct list_head   *tmp;
4427 +       ELAN_DEV_STRUCT *ptr=NULL;
4428 +
4429 +       list_for_each(tmp, &elan_dev_list) {
4430 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4431 +               if (ptr->devidx == devidx) 
4432 +                       return ptr;
4433 +               if (ptr->devidx > devidx)
4434 +                       return ERR_PTR(-ENXIO);
4435 +       }
4436 +       
4437 +       return ERR_PTR(-EINVAL);
4438 +}
4439 +
4440 +ELAN_DEV_STRUCT *
4441 +elan_dev_find_byrail (unsigned short deviceid, unsigned rail)
4442 +{
4443 +       struct list_head   *tmp;
4444 +       ELAN_DEV_STRUCT *ptr=NULL;
4445 +
4446 +       list_for_each(tmp, &elan_dev_list) {
4447 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4448 +
4449 +               ELAN_DEBUG5 (ELAN_DBG_ALL,"elan_dev_find_byrail devidx %d - %04x %04x,  %d %d \n", ptr->devidx, 
4450 +                            ptr->devinfo->dev_device_id, deviceid, ptr->devinfo->dev_rail, rail);
4451 +
4452 +               if (ptr->devinfo->dev_device_id == deviceid && ptr->devinfo->dev_rail == rail)
4453 +                       return ptr;
4454 +       }
4455 +       
4456 +       return NULL;
4457 +}
4458 +
4459 +ELAN_DEV_IDX
4460 +elan_dev_register (ELAN_DEVINFO *devinfo, ELAN_DEV_OPS *ops, void * user_data)
4461 +{
4462 +       ELAN_DEV_STRUCT *ptr;
4463 +       ELAN_DEV_IDX        devidx = 0;
4464 +       struct list_head   *tmp;
4465 +
4466 +        kmutex_lock(&elan_mutex);
4467 +
4468 +       /* is it already registered */
4469 +       if ((ptr = elan_dev_find_byrail(devinfo->dev_device_id, devinfo->dev_rail)) != NULL) 
4470 +       {
4471 +               kmutex_unlock(&elan_mutex);
4472 +               return EINVAL;
4473 +       }
4474 +
4475 +       /* find a free device idx */
4476 +       list_for_each (tmp, &elan_dev_list) {
4477 +               if (list_entry (tmp, ELAN_DEV_STRUCT, node)->devidx != devidx)
4478 +                       break;
4479 +               devidx++;
4480 +       }
4481 +
4482 +       /* create it and add */
4483 +       KMEM_ALLOC(ptr, ELAN_DEV_STRUCT *, sizeof(ELAN_DEV_STRUCT), 1);
4484 +       if (ptr == NULL)
4485 +       {
4486 +               kmutex_unlock(&elan_mutex);
4487 +               return ENOMEM;
4488 +       }
4489 +
4490 +       ptr->devidx    = devidx;
4491 +       ptr->ops       = ops;
4492 +       ptr->devinfo   = devinfo;
4493 +       ptr->user_data = user_data;
4494 +
4495 +       /* insert this entry *before* the last entry we've found */
4496 +       list_add_tail(&ptr->node, tmp);
4497 +
4498 +       kmutex_unlock(&elan_mutex);
4499 +       return  ESUCCESS;
4500 +}
4501 +
4502 +int
4503 +elan_dev_deregister (ELAN_DEVINFO *devinfo)
4504 +{
4505 +       ELAN_DEV_STRUCT *target;
4506 +
4507 +       kmutex_lock(&elan_mutex);
4508 +
4509 +       if ((target = elan_dev_find_byrail (devinfo->dev_device_id, devinfo->dev_rail)) == NULL)
4510 +       {
4511 +               kmutex_unlock(&elan_mutex);
4512 +               return  EINVAL;
4513 +       }
4514 +
4515 +       list_del(&target->node);
4516 +
4517 +       /* delete target entry */
4518 +       KMEM_FREE(target, sizeof(ELAN_DEV_STRUCT));
4519 +
4520 +       kmutex_unlock(&elan_mutex);
4521 +       return  ESUCCESS;
4522 +}
4523 +
4524 +int
4525 +elan_dev_dump ()
4526 +{
4527 +       struct list_head   *tmp;
4528 +       ELAN_DEV_STRUCT *ptr=NULL;
4529 +
4530 +       kmutex_lock(&elan_mutex);       
4531 +
4532 +       list_for_each(tmp, &elan_dev_list) {
4533 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4534 +
4535 +               ELAN_DEBUG3 (ELAN_DBG_ALL,"dev dump: index %u rail %u elan%c\n", 
4536 +                            ptr->devidx, ptr->devinfo->dev_rail, '3' + ptr->devinfo->dev_device_id);
4537 +               ELAN_DEBUG5 (ELAN_DBG_ALL,"dev dump: Vid %x   Did %x  Rid %x  DR %d  DVal %x\n",
4538 +                            ptr->devinfo->dev_vendor_id,
4539 +                            ptr->devinfo->dev_device_id,
4540 +                            ptr->devinfo->dev_revision_id,
4541 +                            ptr->devinfo->dev_driver_version,
4542 +                            ptr->devinfo->dev_num_down_links_value);
4543 +
4544 +       }
4545 +
4546 +       kmutex_unlock(&elan_mutex);
4547 +       return ESUCCESS;
4548 +}
4549 +
4550 +/*
4551 + * Local variables:
4552 + * c-file-style: "linux"
4553 + * End:
4554 + */
4555 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/devinfo.c
4556 ===================================================================
4557 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/devinfo.c     2004-02-23 16:02:56.000000000 -0500
4558 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/devinfo.c  2005-07-28 14:52:52.780689072 -0400
4559 @@ -0,0 +1,78 @@
4560 +/*
4561 + *    Copyright (c) 2003 by Quadrics Ltd.
4562 + * 
4563 + *    For licensing information please see the supplied COPYING file
4564 + *
4565 + */
4566 +
4567 +#ident "@(#)$Id: devinfo.c,v 1.5 2003/09/24 13:55:37 david Exp $"
4568 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.c,v $*/
4569 +
4570 +#include <qsnet/kernel.h>
4571 +#include <elan/elanmod.h>
4572 +
4573 +int 
4574 +elan_get_devinfo(ELAN_DEV_IDX devidx, ELAN_DEVINFO *devinfo)
4575 +{
4576 +       ELAN_DEV_STRUCT *target;
4577 +       int                 res;
4578 +
4579 +       kmutex_lock(&elan_mutex);
4580 +
4581 +       target = elan_dev_find (devidx);
4582 +
4583 +       if (IS_ERR (target))
4584 +               res = PTR_ERR(target);
4585 +       else
4586 +       {
4587 +               copyout(target->devinfo, devinfo, sizeof(ELAN_DEVINFO));
4588 +               res = ESUCCESS;
4589 +       }
4590 +       
4591 +       kmutex_unlock(&elan_mutex);
4592 +       return res;
4593 +}
4594 +
4595 +int 
4596 +elan_get_position(ELAN_DEV_IDX devidx, ELAN_POSITION *position)
4597 +{
4598 +       ELAN_DEV_STRUCT *target;
4599 +       int                 res;
4600 +
4601 +       kmutex_lock(&elan_mutex);
4602 +
4603 +       target = elan_dev_find(devidx);
4604 +
4605 +       if (IS_ERR (target))
4606 +               res = PTR_ERR(target);
4607 +       else
4608 +               res = target->ops->get_position(target->user_data, position);
4609 +       
4610 +       kmutex_unlock(&elan_mutex);
4611 +       return res;
4612 +}
4613 +
4614 +int 
4615 +elan_set_position(ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes)
4616 +{
4617 +       ELAN_DEV_STRUCT *target;
4618 +       int                 res;
4619 +
4620 +       kmutex_lock(&elan_mutex);
4621 +
4622 +       target = elan_dev_find(devidx);
4623 +
4624 +       if (IS_ERR (target))
4625 +               res = PTR_ERR (target);
4626 +       else
4627 +               res = target->ops->set_position(target->user_data, nodeId, numNodes);
4628 +       
4629 +       kmutex_unlock(&elan_mutex);
4630 +       return res;
4631 +}
4632 +
4633 +/*
4634 + * Local variables:
4635 + * c-file-style: "linux"
4636 + * End:
4637 + */
4638 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/elanmod.c
4639 ===================================================================
4640 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/elanmod.c     2004-02-23 16:02:56.000000000 -0500
4641 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/elanmod.c  2005-07-28 14:52:52.781688920 -0400
4642 @@ -0,0 +1,149 @@
4643 +/*
4644 + *    Copyright (c) 2003 by Quadrics Ltd.
4645 + * 
4646 + *    For licensing information please see the supplied COPYING file
4647 + *
4648 + */
4649 +#ident "@(#)$Id: elanmod.c,v 1.11 2004/06/18 09:28:16 mike Exp $"
4650 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.c,v $*/
4651 +
4652 +#include <qsnet/kernel.h>
4653 +#include <elan/elanmod.h>
4654 +
4655 +kmutex_t  elan_mutex;
4656 +
4657 +int 
4658 +elan_init()
4659 +{
4660 +       kmutex_init(&elan_mutex);
4661 +       return (ESUCCESS);
4662 +}
4663 +
4664 +int 
4665 +elan_fini()
4666 +{
4667 +       kmutex_destroy(&elan_mutex);
4668 +       return (ESUCCESS);
4669 +}
4670 +
4671 +int 
4672 +elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use)
4673 +{
4674 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
4675 +       {
4676 +               ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
4677 +               return (-EINVAL);
4678 +       }
4679 +       
4680 +       if (cap->cap_lowcontext == ELAN_CAP_UNINITIALISED || cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
4681 +       {
4682 +               ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: LowContext %d    HighContext %d MyContext %d\n",
4683 +                            cap->cap_lowcontext , cap->cap_highcontext, cap->cap_mycontext);
4684 +               return (-EINVAL);
4685 +       }
4686 +       
4687 +       if (cap->cap_lowcontext > cap->cap_highcontext)
4688 +       {
4689 +               ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
4690 +               return (-EINVAL);
4691 +       }
4692 +       
4693 +       
4694 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4695 +       {
4696 +       case ELAN_CAP_TYPE_BLOCK:
4697 +       case ELAN_CAP_TYPE_CYCLIC:
4698 +               if (position->pos_mode == ELAN_POS_UNKNOWN)
4699 +               {
4700 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: Position Unknown \n");
4701 +                       return (-EAGAIN);
4702 +               }
4703 +               
4704 +               if ( ! ( ELAN_USER_CONTEXT(cap->cap_lowcontext) && ELAN_USER_CONTEXT(cap->cap_highcontext)))
4705 +               {
4706 +                       ELAN_DEBUG4 (ELAN_DBG_VP, "elanmod_classify_cap:  USER_BASE_CONTEXT %d %d %d %d \n" ,  ELAN_USER_BASE_CONTEXT_NUM,cap->cap_lowcontext, cap->cap_highcontext ,ELAN_USER_TOP_CONTEXT_NUM);
4707 +                       return (-EINVAL);
4708 +               }
4709 +               if (cap->cap_lownode == ELAN_CAP_UNINITIALISED)
4710 +                       cap->cap_lownode = position->pos_nodeid;
4711 +               if (cap->cap_highnode == ELAN_CAP_UNINITIALISED)
4712 +                       cap->cap_highnode = position->pos_nodeid;
4713 +               
4714 +               if (cap->cap_lownode < 0 || cap->cap_highnode >= position->pos_nodes || cap->cap_lownode > cap->cap_highnode)
4715 +               {
4716 +                       ELAN_DEBUG3 ( ELAN_DBG_VP,"elanmod_classify_cap: low %d high %d pos %d \n" , cap->cap_lownode  ,cap->cap_highnode, position->pos_nodes);
4717 +                       
4718 +                       return (-EINVAL);
4719 +               }
4720 +               
4721 +               if ((cap->cap_highnode < position->pos_nodeid) || (cap->cap_lownode > position->pos_nodeid))
4722 +               {
4723 +                       ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: node not i range low %d high %d this %d\n",
4724 +                                    cap->cap_lownode, cap->cap_highnode, position->pos_nodeid);
4725 +                       return (-EINVAL);
4726 +               }
4727 +
4728 +               break;
4729 +       default:
4730 +               ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: cant decode type %x \n", cap->cap_type & ELAN_CAP_TYPE_MASK);
4731 +               return (-EINVAL);
4732 +
4733 +       }
4734 +
4735 +       switch (use)
4736 +       {
4737 +       case ELAN_USER_ATTACH:
4738 +       case ELAN_USER_DETACH:
4739 +               if (cap->cap_mycontext == ELAN_CAP_UNINITIALISED)
4740 +               {
4741 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext == ELAN_CAP_UNINITIALISED");
4742 +                       return (-EINVAL);
4743 +               }
4744 +       
4745 +               if ((cap->cap_mycontext != ELAN_CAP_UNINITIALISED) && 
4746 +                   (cap->cap_mycontext < cap->cap_lowcontext || cap->cap_mycontext > cap->cap_highcontext))
4747 +               {
4748 +                       ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext out of range %d %d %d \n", cap->cap_lowcontext,cap->cap_mycontext,cap->cap_highcontext);
4749 +                       return (-EINVAL);
4750 +               }   
4751 +               break;
4752 +
4753 +       case ELAN_USER_P2P:
4754 +               break;
4755 +
4756 +       case ELAN_USER_BROADCAST:
4757 +               if (! (cap->cap_type & ELAN_CAP_TYPE_BROADCASTABLE)) {
4758 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: use ELAN_USER_BROADCAST but cap not ELAN_CAP_TYPE_BROADCASTABLE\n");
4759 +                       return (-EINVAL);
4760 +               }
4761 +               break;
4762 +
4763 +       default:
4764 +               ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: unknown use (%d)\n",use);
4765 +               return (-EINVAL);
4766 +       }
4767 +
4768 +
4769 +
4770 +       /* is any ctxt an rms one ?? */
4771 +       if (ELAN_RMS_CONTEXT(cap->cap_lowcontext) || ELAN_RMS_CONTEXT(cap->cap_highcontext))
4772 +       {
4773 +               /* so both low and high must be */
4774 +               if (!(ELAN_RMS_CONTEXT(cap->cap_lowcontext) && ELAN_RMS_CONTEXT(cap->cap_highcontext))) 
4775 +               {
4776 +                       ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: not rms ctxt %x %x\n",cap->cap_lowcontext,cap->cap_highcontext );
4777 +                       return (-EINVAL);
4778 +               }
4779 +               ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_RMS\n");
4780 +               return (ELAN_CAP_RMS);
4781 +       }
4782 +
4783 +       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_OK\n");
4784 +       return (ELAN_CAP_OK);
4785 +}
4786 +
4787 +/*
4788 + * Local variables:
4789 + * c-file-style: "linux"
4790 + * End:
4791 + */
4792 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/elanmod_linux.c
4793 ===================================================================
4794 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/elanmod_linux.c       2004-02-23 16:02:56.000000000 -0500
4795 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/elanmod_linux.c    2005-07-28 14:52:52.781688920 -0400
4796 @@ -0,0 +1,410 @@
4797 +/*
4798 + *    Copyright (c) 2003 by Quadrics Ltd.
4799 + * 
4800 + *    For licensing information please see the supplied COPYING file
4801 + *
4802 + */
4803 +
4804 +#ident "@(#)$Id: elanmod_linux.c,v 1.16 2004/06/14 15:45:37 mike Exp $"
4805 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.c,v $*/
4806 +
4807 +#include <qsnet/kernel.h>
4808 +
4809 +#include <elan/elanmod.h>
4810 +#include <elan/elanmod_linux.h>
4811 +
4812 +#include <linux/module.h>
4813 +
4814 +#include <linux/sysctl.h>
4815 +#include <linux/init.h>
4816 +
4817 +#include <qsnet/procfs_linux.h>
4818 +
4819 +MODULE_AUTHOR("Quadrics Ltd.");
4820 +MODULE_DESCRIPTION("Elan support module");
4821 +
4822 +MODULE_LICENSE("GPL");
4823 +
4824 +/* elanmod.c */
4825 +EXPORT_SYMBOL(elanmod_classify_cap);
4826 +
4827 +/* bitmap.c */
4828 +#include <elan/bitmap.h>
4829 +
4830 +EXPORT_SYMBOL(bt_freebit);
4831 +EXPORT_SYMBOL(bt_lowbit); 
4832 +EXPORT_SYMBOL(bt_nextbit);
4833 +EXPORT_SYMBOL(bt_copy);
4834 +EXPORT_SYMBOL(bt_zero); 
4835 +EXPORT_SYMBOL(bt_fill); 
4836 +EXPORT_SYMBOL(bt_cmp); 
4837 +EXPORT_SYMBOL(bt_intersect);
4838 +EXPORT_SYMBOL(bt_remove); 
4839 +EXPORT_SYMBOL(bt_add); 
4840 +EXPORT_SYMBOL(bt_spans);
4841 +EXPORT_SYMBOL(bt_subset);  
4842 +EXPORT_SYMBOL(bt_up);
4843 +EXPORT_SYMBOL(bt_down);
4844 +EXPORT_SYMBOL(bt_nbits);
4845 +
4846 +/* capability.c */
4847 +EXPORT_SYMBOL(elan_nullcap);
4848 +EXPORT_SYMBOL(elan_detach_cap);
4849 +EXPORT_SYMBOL(elan_attach_cap);
4850 +EXPORT_SYMBOL(elan_validate_map);
4851 +
4852 +/* stats.c */
4853 +EXPORT_SYMBOL(elan_stats_register);
4854 +EXPORT_SYMBOL(elan_stats_deregister);
4855 +
4856 +/* device.c */
4857 +EXPORT_SYMBOL(elan_dev_deregister);
4858 +EXPORT_SYMBOL(elan_dev_register);
4859 +
4860 +/* debug */
4861 +int  elan_debug_mode = QSNET_DEBUG_BUFFER; 
4862 +int  elan_debug_mask;
4863 +
4864 +static struct proc_dir_entry *elan_procfs_root;
4865 +
4866 +extern void elan_procfs_init(void);
4867 +extern void elan_procfs_fini(void);
4868 +
4869 +static int elan_open    (struct inode *ino, struct file *fp);
4870 +static int elan_release (struct inode *ino, struct file *fp);
4871 +static int elan_ioctl   (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
4872 +
4873 +static struct file_operations elan_fops = 
4874 +{
4875 +       ioctl:   elan_ioctl,
4876 +       open:    elan_open,
4877 +       release: elan_release,
4878 +};
4879 +
4880 +static int __init elan_start(void)
4881 +{
4882 +       int res;
4883 +
4884 +       elan_procfs_init(); 
4885 +
4886 +       if ((res = elan_init()) != ESUCCESS)
4887 +       {
4888 +               elan_procfs_fini();
4889 +               return (-res);
4890 +       }
4891 +
4892 +       return (0);
4893 +}
4894 +
4895 +static void __exit elan_exit(void)
4896 +{
4897 +       elan_fini();
4898 +       elan_procfs_fini();
4899 +}
4900 +
4901 +
4902 +/* Declare the module init and exit functions */
4903 +void
4904 +elan_procfs_init()
4905 +{
4906 +       struct proc_dir_entry  *p;
4907 +       
4908 +       elan_procfs_root = proc_mkdir("elan",   qsnet_procfs_root);
4909 +       
4910 +       qsnet_proc_register_hex(elan_procfs_root, "debug_mask", &elan_debug_mask, 0);
4911 +       qsnet_proc_register_hex(elan_procfs_root, "debug_mode", &elan_debug_mode, 0);
4912 +
4913 +       if ((p = create_proc_entry ("ioctl", 0, elan_procfs_root)) != NULL)
4914 +       {
4915 +               p->proc_fops = &elan_fops;
4916 +               p->data      = 0;
4917 +               p->owner     = THIS_MODULE;
4918 +       }   
4919 +}
4920 +
4921 +void
4922 +elan_procfs_fini()
4923 +{
4924 +       remove_proc_entry ("debug_mask", elan_procfs_root);
4925 +       remove_proc_entry ("debug_mode", elan_procfs_root);
4926 +       
4927 +       remove_proc_entry ("ioctl",   elan_procfs_root); 
4928 +       remove_proc_entry ("version", elan_procfs_root);  
4929 +       
4930 +       remove_proc_entry ("elan",   qsnet_procfs_root);
4931 +}
4932 +
4933 +module_init(elan_start);
4934 +module_exit(elan_exit);
4935 +
4936 +static int
4937 +elan_open (struct inode *inode, struct file *fp)
4938 +{
4939 +       MOD_INC_USE_COUNT;
4940 +       fp->private_data = NULL;
4941 +       return (0);
4942 +}
4943 +
4944 +static int
4945 +elan_release (struct inode *inode, struct file *fp)
4946 +{
4947 +       /* mark all caps owned by fp to be destroyed */
4948 +       elan_destroy_cap(fp,NULL);
4949 +
4950 +       MOD_DEC_USE_COUNT;
4951 +       return (0);
4952 +}
4953 +
4954 +static int 
4955 +elan_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
4956 +{
4957 +       int rep = 0;
4958 +
4959 +       switch (cmd) 
4960 +       {
4961 +       case ELANCTRL_STATS_GET_NEXT :
4962 +       {
4963 +               ELANCTRL_STATS_GET_NEXT_STRUCT args;
4964 +
4965 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_NEXT_STRUCT)))
4966 +                       return (-EFAULT); 
4967 +
4968 +               /* uses copyin/copyout */
4969 +               if (elan_stats_get_next_index(args.statidx, args.next_statidx) != 0 ) 
4970 +                       return (-EINVAL);       
4971 +
4972 +               break;
4973 +       }
4974 +       case ELANCTRL_STATS_FIND_INDEX :
4975 +       {
4976 +               ELANCTRL_STATS_FIND_INDEX_STRUCT args;
4977 +
4978 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_FIND_INDEX_STRUCT)))
4979 +                       return (-EFAULT); 
4980 +
4981 +               /* uses copyin/copyout */
4982 +               if (elan_stats_find_index(args.block_name, args.statidx, args.num_entries) != 0 ) 
4983 +                       return (-EINVAL);       
4984 +
4985 +               break;
4986 +       }
4987 +       case ELANCTRL_STATS_GET_BLOCK_INFO :
4988 +       {
4989 +               ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT args;
4990 +               
4991 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT)))
4992 +                       return (-EFAULT);
4993 +
4994 +               /* uses copyin/copyout */
4995 +               if (elan_stats_get_block_info(args.statidx, args.block_name, args.num_entries) != 0 ) 
4996 +                       return (-EINVAL);
4997 +               break;          
4998 +       }
4999 +       case ELANCTRL_STATS_GET_INDEX_NAME :
5000 +       {
5001 +               ELANCTRL_STATS_GET_INDEX_NAME_STRUCT args;
5002 +               
5003 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_INDEX_NAME_STRUCT)))
5004 +                       return (-EFAULT);
5005 +
5006 +               /* uses copyin/copyout */
5007 +               if (elan_stats_get_index_name(args.statidx, args.index, args.name) != 0 )
5008 +                       return (-EINVAL);
5009 +               break;
5010 +       }
5011 +       case ELANCTRL_STATS_CLEAR_BLOCK :
5012 +       {
5013 +               ELANCTRL_STATS_CLEAR_BLOCK_STRUCT args;
5014 +               
5015 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_CLEAR_BLOCK_STRUCT)))
5016 +                       return (-EFAULT);
5017 +
5018 +               /* statidx is not a pointer */
5019 +               if (elan_stats_clear_block(args.statidx) != 0 )
5020 +                       return (-EINVAL);
5021 +               break;
5022 +       }
5023 +       case ELANCTRL_STATS_GET_BLOCK :
5024 +       {
5025 +               ELANCTRL_STATS_GET_BLOCK_STRUCT args;
5026 +               
5027 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_STRUCT)))
5028 +                       return (-EFAULT);
5029 +
5030 +               /* uses copyin/copyout */
5031 +               if (elan_stats_get_block(args.statidx, args.entries, args.values) != 0 )
5032 +                       return (-EINVAL);
5033 +               break;
5034 +       }
5035 +       case ELANCTRL_GET_DEVINFO :
5036 +       {
5037 +               ELANCTRL_GET_DEVINFO_STRUCT args;
5038 +               
5039 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_DEVINFO_STRUCT)))
5040 +                       return (-EFAULT);
5041 +
5042 +               /* uses copyin/copyout */
5043 +               if (elan_get_devinfo(args.devidx, args.devinfo) != 0 )
5044 +                       return (-EINVAL);
5045 +               break;          
5046 +       }
5047 +       case ELANCTRL_GET_POSITION :
5048 +       {
5049 +               ELANCTRL_GET_POSITION_STRUCT args;
5050 +               
5051 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_POSITION_STRUCT)))
5052 +                       return (-EFAULT); 
5053 +
5054 +               /* uses copyin/copyout */
5055 +               if (elan_get_position(args.devidx, args.position) != 0 )
5056 +                       return (-EINVAL);
5057 +               break;          
5058 +       }
5059 +       case ELANCTRL_SET_POSITION :
5060 +       {
5061 +               ELANCTRL_SET_POSITION_STRUCT args;
5062 +               
5063 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_SET_POSITION_STRUCT)))
5064 +                       return (-EFAULT);
5065 +
5066 +               /* uses copyin/copyout */
5067 +               if (elan_set_position(args.devidx, args.nodeId, args.numNodes) != 0 )
5068 +                       return (-EINVAL);       
5069 +               break;          
5070 +       }
5071 +       case ELANCTRL_CREATE_CAP  :
5072 +       {
5073 +               ELANCTRL_CREATE_CAP_STRUCT *args;
5074 +
5075 +               /* get space for args */
5076 +               KMEM_ALLOC(args, ELANCTRL_CREATE_CAP_STRUCT *, sizeof(ELANCTRL_CREATE_CAP_STRUCT), 1);
5077 +               if (args == NULL)
5078 +                       return(-ENOMEM);        
5079 +
5080 +               /* copy them */
5081 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_CAP_STRUCT)))
5082 +                       return (-EFAULT);
5083 +               else 
5084 +               {
5085 +                       if ((elan_validate_cap(&args->cap) != 0) || (elan_create_cap(fp,&args->cap) != 0 )) 
5086 +                               rep = (-EINVAL);
5087 +               }
5088 +
5089 +               /* free the space */
5090 +               KMEM_FREE(args, sizeof(ELANCTRL_CREATE_CAP_STRUCT));
5091 +
5092 +               break;          
5093 +       }
5094 +       case ELANCTRL_DESTROY_CAP  :
5095 +       {
5096 +               ELANCTRL_DESTROY_CAP_STRUCT *args;
5097 +
5098 +               /* get space for args */
5099 +               KMEM_ALLOC(args, ELANCTRL_DESTROY_CAP_STRUCT *, sizeof(ELANCTRL_DESTROY_CAP_STRUCT), 1);
5100 +               if (args == NULL)
5101 +                       return(-ENOMEM);        
5102 +
5103 +               /* copy them */
5104 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_CAP_STRUCT)))
5105 +                       rep = (-EFAULT);
5106 +               else 
5107 +               {
5108 +                       if (elan_destroy_cap(fp, &args->cap) != 0 )
5109 +                               rep = (-EINVAL);
5110 +               }
5111 +
5112 +               /* free the space */
5113 +               KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_CAP_STRUCT));
5114 +
5115 +               break;          
5116 +       }
5117 +       case ELANCTRL_CREATE_VP  :
5118 +       {
5119 +               ELANCTRL_CREATE_VP_STRUCT *args;
5120 +
5121 +               /* get space for args */
5122 +               KMEM_ALLOC(args, ELANCTRL_CREATE_VP_STRUCT *, sizeof(ELANCTRL_CREATE_VP_STRUCT), 1);
5123 +               if (args == NULL)
5124 +                       return(-ENOMEM);        
5125 +
5126 +               /* copy them */
5127 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_VP_STRUCT)))
5128 +                       return (-EFAULT);
5129 +               else
5130 +               {
5131 +                       if ((elan_validate_cap( &args->map) != 0) || (elan_create_vp(fp, &args->cap, &args->map) != 0 ))
5132 +                               rep = (-EINVAL);        
5133 +               }
5134 +
5135 +               KMEM_FREE(args, sizeof(ELANCTRL_CREATE_VP_STRUCT ));
5136 +
5137 +               break;          
5138 +       }
5139 +       case ELANCTRL_DESTROY_VP  :
5140 +       {
5141 +               ELANCTRL_DESTROY_VP_STRUCT *args;
5142 +
5143 +               /* get space for args */
5144 +               KMEM_ALLOC(args, ELANCTRL_DESTROY_VP_STRUCT *, sizeof(ELANCTRL_DESTROY_VP_STRUCT), 1);
5145 +               if (args == NULL)
5146 +                       return(-ENOMEM);        
5147 +               
5148 +               /* copy them */
5149 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_VP_STRUCT)))
5150 +                       rep = (-EFAULT);
5151 +               else 
5152 +               {
5153 +                       if (elan_destroy_vp(fp, &args->cap, &args->map) != 0 )
5154 +                               rep = (-EINVAL);        
5155 +               }
5156 +
5157 +               KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_VP_STRUCT ));
5158 +
5159 +               break;          
5160 +       }
5161 +
5162 +       case ELANCTRL_GET_CAPS  :
5163 +       {
5164 +               ELANCTRL_GET_CAPS_STRUCT args;
5165 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_CAPS_STRUCT)))
5166 +                       return (-EFAULT);
5167 +
5168 +               /* uses copyin/copyout */
5169 +               if (elan_get_caps(args.number_of_results, args.array_size, args.caps) != 0 )
5170 +                       return (-EINVAL);
5171 +               break;          
5172 +       }
5173 +       case ELANCTRL_DEBUG_DUMP :
5174 +       {
5175 +               elan_cap_dump();
5176 +               elan_dev_dump();
5177 +
5178 +               break;
5179 +       }
5180 +       case ELANCTRL_DEBUG_BUFFER :
5181 +       {
5182 +               ELANCTRL_DEBUG_BUFFER_STRUCT args;
5183 +
5184 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT)))
5185 +                       return (-EFAULT);
5186 +
5187 +               /* uses copyin/copyout */
5188 +               if ((args.size = qsnet_debug_buffer (args.buffer, args.size)) != -1 &&
5189 +                   copy_to_user ((void *) arg, &args, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT)))
5190 +                       return (-EFAULT);
5191 +               break;
5192 +       }
5193 +       default:
5194 +               return (-EINVAL);
5195 +               break;
5196 +       }
5197 +
5198 +       return (rep);
5199 +}
5200 +
5201 +
5202 +/*
5203 + * Local variables:
5204 + * c-file-style: "linux"
5205 + * End:
5206 + */
5207 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/Makefile
5208 ===================================================================
5209 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/Makefile      2004-02-23 16:02:56.000000000 -0500
5210 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/Makefile   2005-07-28 14:52:52.782688768 -0400
5211 @@ -0,0 +1,15 @@
5212 +#
5213 +# Makefile for Quadrics QsNet
5214 +#
5215 +# Copyright (c) 2002-2004 Quadrics Ltd
5216 +#
5217 +# File: drivers/net/qsnet/elan/Makefile
5218 +#
5219 +
5220 +
5221 +#
5222 +
5223 +obj-$(CONFIG_QSNET)    += elan.o
5224 +elan-objs      := elanmod.o device.o stats.o devinfo.o capability.o elanmod_linux.o capability_general.o bitmap.o
5225 +
5226 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
5227 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/Makefile.conf
5228 ===================================================================
5229 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/Makefile.conf 2004-02-23 16:02:56.000000000 -0500
5230 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/Makefile.conf      2005-07-28 14:52:52.782688768 -0400
5231 @@ -0,0 +1,10 @@
5232 +# Flags for generating QsNet Linux Kernel Makefiles
5233 +MODNAME                =       elan.o
5234 +MODULENAME     =       elan
5235 +KOBJFILES      =       elanmod.o device.o stats.o devinfo.o capability.o elanmod_linux.o capability_general.o bitmap.o
5236 +EXPORT_KOBJS   =       elanmod_linux.o 
5237 +CONFIG_NAME    =       CONFIG_QSNET
5238 +SGALFC         =       
5239 +# EXTRALINES START
5240 +
5241 +# EXTRALINES END
5242 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/quadrics_version.h
5243 ===================================================================
5244 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/quadrics_version.h    2004-02-23 16:02:56.000000000 -0500
5245 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/quadrics_version.h 2005-07-28 14:52:52.782688768 -0400
5246 @@ -0,0 +1 @@
5247 +#define QUADRICS_VERSION "4.31qsnet"
5248 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan/stats.c
5249 ===================================================================
5250 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan/stats.c       2004-02-23 16:02:56.000000000 -0500
5251 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan/stats.c    2005-07-28 14:52:52.783688616 -0400
5252 @@ -0,0 +1,277 @@
5253 +/*
5254 + *    Copyright (c) 2003 by Quadrics Ltd.
5255 + * 
5256 + *    For licensing information please see the supplied COPYING file
5257 + *
5258 + */
5259 +
5260 +#ident "@(#)$Id: stats.c,v 1.6 2003/09/24 13:55:37 david Exp $"
5261 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/stats.c,v $*/
5262 +
5263 +#include <qsnet/kernel.h>
5264 +#include <elan/elanmod.h>
5265 +
5266 +static LIST_HEAD(elan_stats_list);
5267 +static ELAN_STATS_IDX elan_next_statidx=0;
5268 +
5269 +ELAN_STATS_STRUCT *
5270 +elan_stats_find(ELAN_STATS_IDX statidx)
5271 +{
5272 +       struct list_head     *tmp;
5273 +       ELAN_STATS_STRUCT *ptr=NULL;
5274 +
5275 +       list_for_each(tmp, &elan_stats_list) {
5276 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
5277 +               if ( ptr->statidx == statidx ) 
5278 +                       return ptr;
5279 +       }
5280 +
5281 +       ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find failed %d\n", statidx);    
5282 +       return NULL;
5283 +}
5284 +
5285 +ELAN_STATS_STRUCT *
5286 +elan_stats_find_by_name(caddr_t block_name)
5287 +{
5288 +       struct list_head     *tmp;
5289 +       ELAN_STATS_STRUCT *ptr=NULL;
5290 +
5291 +       list_for_each(tmp, &elan_stats_list)    {
5292 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
5293 +               if (!strcmp(ptr->block_name, block_name)) 
5294 +               {
5295 +                       ELAN_DEBUG3 (ELAN_DBG_CTRL, "elan_stats_find_by_name found %s (%d,%d)\n", block_name, ptr->statidx, ptr->num_entries);  
5296 +                       return ptr;
5297 +               }
5298 +       }
5299 +
5300 +       ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find_by_name failed %s\n", block_name);
5301 +       return NULL;
5302 +}
5303 +
5304 +ELAN_STATS_STRUCT *
5305 +elan_stats_find_next(ELAN_STATS_IDX statidx)
5306 +{
5307 +       struct list_head     *tmp;
5308 +       ELAN_STATS_STRUCT *ptr=NULL;
5309 +
5310 +       list_for_each(tmp, &elan_stats_list) {
5311 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
5312 +         
5313 +               if ( ptr->statidx > statidx ) 
5314 +                       return ptr;       
5315 +       }       
5316 +
5317 +       return NULL;
5318 +}
5319 +
5320 +int 
5321 +elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_block)
5322 +{
5323 +       ELAN_STATS_STRUCT *target;
5324 +       ELAN_STATS_IDX        next = 0;
5325 +
5326 +       kmutex_lock(&elan_mutex);
5327 +
5328 +       if ((target = elan_stats_find_next(statidx)) != NULL)
5329 +               next = target->statidx;
5330 +
5331 +       copyout(&next, next_block, sizeof(ELAN_STATS_IDX) );
5332 +
5333 +       kmutex_unlock(&elan_mutex);
5334 +       return 0;
5335 +}
5336 +
5337 +int 
5338 +elan_stats_find_index  (caddr_t  block_name, ELAN_STATS_IDX *statidx,  uint *num_entries)
5339 +
5340 +{
5341 +       ELAN_STATS_STRUCT *target;
5342 +       ELAN_STATS_IDX        index   = 0;
5343 +       uint                  entries = 0;
5344 +
5345 +       kmutex_lock(&elan_mutex);
5346 +
5347 +       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_find_index %s \n", block_name);
5348 +
5349 +       if ((target = elan_stats_find_by_name(block_name)) != NULL)
5350 +       {
5351 +               index   = target->statidx;
5352 +               entries = target->num_entries;
5353 +       }
5354 +
5355 +       ELAN_DEBUG3(ELAN_DBG_CTRL, "elan_stats_find_index found %d %d (target=%p)\n", index, entries, target);
5356 +
5357 +       copyout(&index,   statidx,     sizeof(ELAN_STATS_IDX));
5358 +       copyout(&entries, num_entries, sizeof(uint));
5359 +
5360 +       kmutex_unlock(&elan_mutex);
5361 +       return  ESUCCESS;
5362 +}
5363 +
5364 +int 
5365 +elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t  block_name, uint *num_entries)
5366 +{
5367 +       ELAN_STATS_STRUCT *target;
5368 +       int                   res=EINVAL;
5369 +
5370 +       kmutex_lock(&elan_mutex);
5371 +
5372 +       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_block_info statidx %d\n",statidx);
5373 +
5374 +       if ((target = elan_stats_find(statidx)) != NULL)
5375 +       {
5376 +               ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_block_info name %s entries %d\n",block_name, *num_entries);
5377 +               
5378 +               copyout( target->block_name,  block_name,  ELAN_STATS_NAME_MAX_LEN);
5379 +               copyout(&target->num_entries, num_entries, sizeof(uint));
5380 +
5381 +               res = ESUCCESS;
5382 +       }
5383 +
5384 +       kmutex_unlock(&elan_mutex);
5385 +       return res;
5386 +}
5387 +
5388 +int 
5389 +elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name)
5390 +{
5391 +       ELAN_STATS_STRUCT *target;
5392 +       int                   res=EINVAL;
5393 +
5394 +       kmutex_lock(&elan_mutex);
5395 +
5396 +       ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_index_name statidx %d index %d\n",statidx, index);
5397 +
5398 +       if ((target = elan_stats_find(statidx)) != NULL)
5399 +       {
5400 +               if ( target->ops->elan_stats_get_name== NULL) 
5401 +               {
5402 +                       ELAN_DEBUG0(ELAN_DBG_CTRL, "elan_stats_get_index_name no callback\n");  
5403 +                       kmutex_unlock(&elan_mutex);
5404 +                       return  res;
5405 +               }
5406 +
5407 +               if ((res = target->ops->elan_stats_get_name(target->arg, index, name)) == 0)
5408 +                       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_index_name name %s\n",name); 
5409 +
5410 +       }
5411 +       kmutex_unlock(&elan_mutex);
5412 +       return  res;
5413 +}
5414 +
5415 +int 
5416 +elan_stats_get_block (ELAN_STATS_IDX statidx, uint entries, ulong *values)
5417 +{
5418 +       ELAN_STATS_STRUCT *target;
5419 +       int                   res=EINVAL;
5420 +
5421 +       kmutex_lock(&elan_mutex);
5422 +
5423 +       
5424 +       if ((target = elan_stats_find(statidx)) != NULL)
5425 +       {
5426 +               if ( target->ops->elan_stats_get_block == NULL) 
5427 +               {
5428 +                       kmutex_unlock(&elan_mutex);
5429 +                       return  res;
5430 +               }
5431 +
5432 +               res = target->ops->elan_stats_get_block(target->arg, entries, values);
5433 +       }
5434 +
5435 +       kmutex_unlock(&elan_mutex);
5436 +       return  res;
5437 +}
5438 +
5439 +int 
5440 +elan_stats_clear_block (ELAN_STATS_IDX statidx)
5441 +{
5442 +       ELAN_STATS_STRUCT *target;
5443 +       int                   res=EINVAL;
5444 +
5445 +       kmutex_lock(&elan_mutex);
5446 +
5447 +       if ((target = elan_stats_find(statidx)) != NULL)
5448 +       {
5449 +               if ( target->ops->elan_stats_clear_block == NULL) 
5450 +               {
5451 +                       kmutex_unlock(&elan_mutex);
5452 +                       return  res;
5453 +               }
5454 +       
5455 +               res = target->ops->elan_stats_clear_block(target->arg);
5456 +       }
5457 +       kmutex_unlock(&elan_mutex);
5458 +       return  res;
5459 +}
5460 +
5461 +void
5462 +elan_stats_next_statidx(void)
5463 +{
5464 +       /* XXXXX need to put not in use check here incase we loop MRH */
5465 +       /* tho its a bigish loop :)                                   */
5466 +       elan_next_statidx++;
5467 +       if (!elan_next_statidx)
5468 +               elan_next_statidx++;
5469 +}
5470 +
5471 +int 
5472 +elan_stats_register (ELAN_STATS_IDX    *statidx, 
5473 +                       char              *block_name, 
5474 +                       uint               num_entries,
5475 +                       ELAN_STATS_OPS *ops,
5476 +                       void              *arg)
5477 +{
5478 +       ELAN_STATS_STRUCT *target;
5479 +
5480 +       kmutex_lock(&elan_mutex);
5481 +
5482 +       /* create it and add */
5483 +       KMEM_ALLOC(target, ELAN_STATS_STRUCT *, sizeof(ELAN_STATS_STRUCT), 1);
5484 +       if (target == NULL)
5485 +       {
5486 +               kmutex_unlock(&elan_mutex);
5487 +               return  ENOMEM;
5488 +       }
5489 +
5490 +       elan_stats_next_statidx();
5491 +
5492 +       *statidx = elan_next_statidx;
5493 +
5494 +       target->statidx     = elan_next_statidx;
5495 +       target->num_entries = num_entries;
5496 +       target->ops         = ops;
5497 +       target->arg         = arg;
5498 +       strcpy(target->block_name, block_name);
5499 +       
5500 +       list_add_tail(&target->node, &elan_stats_list);
5501 +
5502 +       kmutex_unlock(&elan_mutex);
5503 +       return  0;
5504 +}
5505 +
5506 +int
5507 +elan_stats_deregister (ELAN_STATS_IDX statidx)
5508 +{
5509 +       ELAN_STATS_STRUCT *target;
5510 +
5511 +       kmutex_lock(&elan_mutex);
5512 +       if ((target = elan_stats_find(statidx)) != NULL)
5513 +       {
5514 +
5515 +               list_del(&target->node);
5516 +               
5517 +               /* delete target entry */
5518 +               KMEM_FREE(target, sizeof(ELAN_STATS_STRUCT));
5519 +       }
5520 +       kmutex_unlock(&elan_mutex);
5521 +
5522 +       return  target == NULL ? EINVAL : 0;
5523 +}
5524 +
5525 +/*
5526 + * Local variables:
5527 + * c-file-style: "linux"
5528 + * End:
5529 + */
5530 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/context.c
5531 ===================================================================
5532 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/context.c    2004-02-23 16:02:56.000000000 -0500
5533 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/context.c 2005-07-28 14:52:52.786688160 -0400
5534 @@ -0,0 +1,2101 @@
5535 +/*
5536 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
5537 + * 
5538 + *    For licensing information please see the supplied COPYING file
5539 + *
5540 + */
5541 +
5542 +#ident "@(#)$Id: context.c,v 1.116.2.1 2004/11/12 14:24:18 mike Exp $"
5543 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/context.c,v $ */
5544 +
5545 +#include <qsnet/kernel.h>
5546 +#include <qsnet/autoconf.h>
5547 +#include <elan/elanmod.h>
5548 +#include <elan3/elanregs.h>
5549 +#include <elan3/elandev.h>
5550 +#include <elan3/elanvp.h>
5551 +#include <elan3/elan3mmu.h>
5552 +#include <elan3/elanctxt.h>
5553 +#include <elan3/elan3mmu.h>
5554 +#include <elan3/elandebug.h>
5555 +#include <elan3/urom_addrs.h>
5556 +#include <elan3/thread.h>
5557 +#include <elan3/vmseg.h>
5558 +#include <elan3/elan3ops.h>
5559 +#include <elan3/elansyscall.h>
5560 +/*
5561 + * Global variables configurable from /etc/system file
5562 + *     (OR /etc/sysconfigtab on Digital UNIX)
5563 + */
5564 +int ntrapped_threads   = 64;
5565 +int ntrapped_dmas      = 64;
5566 +int ntrapped_events    = E3_NonSysCntxQueueSize + 128;
5567 +int ntrapped_commands  = 64;
5568 +int noverflow_commands = 1024;
5569 +int nswapped_threads   = 64;
5570 +int nswapped_dmas      = 64;
5571 +
5572 +#define NUM_HALTOPS    8
5573 +
5574 +void *SwapListsLockInfo;
5575 +void *CmdLockInfo;
5576 +
5577 +static void HaltSwapContext (ELAN3_DEV *dev, void *arg);
5578 +
5579 +static char *OthersStateStrings[]  = {"others_running", "others_halting", "others_swapping", 
5580 +                                     "others_halting_more", "others_swapping_more", "others_swapped"};
5581 +
5582 +ELAN3_CTXT *
5583 +elan3_alloc (ELAN3_DEV *dev, int  kernel)
5584 +{
5585 +    ELAN3_CTXT    *ctxt;
5586 +    int           i;
5587 +    unsigned long flags;
5588 +
5589 +    PRINTF1 (DBG_DEVICE, DBG_FN, "elan3_alloc: %s\n", kernel ? "kernel" : "user");
5590 +
5591 +    KMEM_ZALLOC (ctxt, ELAN3_CTXT *, sizeof (ELAN3_CTXT), TRUE);
5592 +    
5593 +    if (ctxt == NULL)
5594 +       return (NULL);
5595 +
5596 +    elan_nullcap (&ctxt->Capability);
5597 +
5598 +    ctxt->Device      = dev;
5599 +    ctxt->OthersState = CTXT_OTHERS_SWAPPED;
5600 +    ctxt->RefCnt      = 1;
5601 +    ctxt->Position    = dev->Position;
5602 +
5603 +    if (kernel)
5604 +       ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_KERNEL;
5605 +    else
5606 +       ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_NO_LWPS;
5607 +
5608 +    ctxt->Elan3mmu = elan3mmu_alloc (ctxt);
5609 +
5610 +    kcondvar_init (&ctxt->Wait);
5611 +    kcondvar_init (&ctxt->CommandPortWait);
5612 +    kcondvar_init (&ctxt->LwpWait);
5613 +    kcondvar_init (&ctxt->HaltWait);
5614 +
5615 +    spin_lock_init (&ctxt->InputFaultLock);
5616 +
5617 +    kmutex_init (&ctxt->SwapListsLock);
5618 +    kmutex_init (&ctxt->CmdPortLock);
5619 +    kmutex_init (&ctxt->NetworkErrorLock);
5620 +    kmutex_init (&ctxt->CmdLock);
5621 +
5622 +    krwlock_init (&ctxt->VpLock);
5623 +
5624 +    KMEM_GETPAGES (ctxt->FlagPage, ELAN3_FLAGSTATS *, 1, TRUE);
5625 +    if (!ctxt->FlagPage)
5626 +       goto error;
5627 +    bzero ((char *) ctxt->FlagPage, PAGESIZE);
5628 +
5629 +    KMEM_ZALLOC (ctxt->CommandTraps, COMMAND_TRAP *,    sizeof (COMMAND_TRAP)    * ntrapped_commands, TRUE);
5630 +    if (!ctxt->CommandTraps)
5631 +       goto error;
5632 +
5633 +    KMEM_ZALLOC (ctxt->ThreadTraps,  THREAD_TRAP *,     sizeof (THREAD_TRAP)     * ntrapped_threads,  TRUE);
5634 +    if (!ctxt->ThreadTraps)
5635 +       goto error;
5636 +
5637 +    KMEM_ZALLOC (ctxt->DmaTraps,     DMA_TRAP *,        sizeof (DMA_TRAP)        * ntrapped_dmas,     TRUE);
5638 +    if (!ctxt->DmaTraps)
5639 +       goto error;
5640 +
5641 +    KMEM_ZALLOC (ctxt->EventCookies, EVENT_COOKIE *,    sizeof (EVENT_COOKIE)    * ntrapped_events,   TRUE);
5642 +    if (!ctxt->EventCookies)
5643 +       goto error;
5644 +
5645 +    KMEM_ZALLOC (ctxt->Commands,     CProcTrapBuf_BE *, sizeof (CProcTrapBuf_BE) * noverflow_commands,TRUE);
5646 +    if (!ctxt->Commands)
5647 +       goto error;
5648 +
5649 +    KMEM_ZALLOC (ctxt->SwapThreads,  E3_Addr *,         sizeof (E3_Addr)         * nswapped_threads,  TRUE);
5650 +    if (!ctxt->SwapThreads)
5651 +       goto error;
5652 +
5653 +    KMEM_ZALLOC (ctxt->SwapDmas,     E3_DMA_BE *,       sizeof (E3_DMA_BE)       * nswapped_dmas,     TRUE);
5654 +    if (!ctxt->SwapDmas)
5655 +       goto error;
5656 +
5657 +    /*
5658 +     * "slop" is defined as follows :
5659 +     *     number of entries REQUIRED to be left spare to consume all other traps
5660 +     *     up until the time that the context can be swapped out.
5661 +     *  
5662 +     * CommandTrapQ : 1 command issued by main + 1 issued by the thread processor per elan
5663 +     * ThreadTrapQ  : 2 from command + 2 input
5664 +     * DmaTrapQ     : 2 from command + 2 input
5665 +     * EventTrapQ   : 2 from command + 1 thread + 1 dma + 2 input + E3_NonSysCntxQueueSize
5666 +     */
5667 +    spin_lock_irqsave (&dev->IntrLock, flags);
5668 +    ELAN3_QUEUE_INIT (ctxt->CommandTrapQ, ntrapped_commands,  2);
5669 +    ELAN3_QUEUE_INIT (ctxt->ThreadTrapQ,  ntrapped_threads,   4);
5670 +    ELAN3_QUEUE_INIT (ctxt->DmaTrapQ,     ntrapped_dmas,      4);
5671 +    ELAN3_QUEUE_INIT (ctxt->EventCookieQ, ntrapped_events,    MIN(E3_NonSysCntxQueueSize + 6, ntrapped_events - 6));
5672 +    ELAN3_QUEUE_INIT (ctxt->CommandQ,     noverflow_commands, 0);
5673 +    ELAN3_QUEUE_INIT (ctxt->SwapThreadQ,  nswapped_threads,   0);
5674 +    ELAN3_QUEUE_INIT (ctxt->SwapDmaQ,     nswapped_dmas,      0);
5675 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5676 +
5677 +#if defined(DIGITAL_UNIX)
5678 +    /* Allocate the segelan for the command port */
5679 +    if (! kernel && elan3_segelan3_create (ctxt) == NULL)
5680 +    {
5681 +       elan3_detach(ctxt);
5682 +       elan3_free (ctxt);
5683 +       return ((ELAN3_CTXT *) NULL);
5684 +    }
5685 +#endif
5686 +
5687 +    /*
5688 +     * Initialise the Input Fault list 
5689 +     */
5690 +    spin_lock (&ctxt->InputFaultLock);
5691 +    for (i = 0; i < NUM_INPUT_FAULT_SAVE; i++)
5692 +       ctxt->InputFaults[i].Next = (i == (NUM_INPUT_FAULT_SAVE-1)) ? NULL : &ctxt->InputFaults[i+1];
5693 +    ctxt->InputFaultList = &ctxt->InputFaults[0];
5694 +    spin_unlock (&ctxt->InputFaultLock);
5695 +
5696 +    ReserveHaltOperations (dev, NUM_HALTOPS, TRUE);
5697 +    
5698 +    if ((ctxt->RouteTable = AllocateRouteTable (ctxt->Device, ELAN3_MAX_VPS)) == NULL)
5699 +    {
5700 +       PRINTF0 (DBG_DEVICE, DBG_FN, "elan3_alloc: cannot map route table\n");
5701 +       elan3_detach(ctxt);
5702 +       elan3_free (ctxt);
5703 +       return ((ELAN3_CTXT *) NULL);
5704 +    }  
5705 +
5706 +    return (ctxt);
5707 +
5708 +
5709 + error:
5710 +
5711 +    elan3_detach(ctxt);
5712 +    elan3_free (ctxt);
5713 +    if (ctxt->FlagPage)
5714 +       KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1);
5715 +    if (ctxt->CommandTraps)
5716 +       KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP)    * ntrapped_commands);
5717 +    if (ctxt->ThreadTraps)
5718 +       KMEM_FREE ((void *) ctxt->ThreadTraps,  sizeof (THREAD_TRAP)     * ntrapped_threads);
5719 +    if (ctxt->DmaTraps)
5720 +       KMEM_FREE ((void *) ctxt->DmaTraps,     sizeof (DMA_TRAP)        * ntrapped_dmas);
5721 +    if (ctxt->EventCookies)
5722 +       KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE)    * ntrapped_events);
5723 +    if (ctxt->Commands)
5724 +       KMEM_FREE ((void *) ctxt->Commands,     sizeof (CProcTrapBuf_BE) * noverflow_commands);
5725 +    if (ctxt->SwapThreads)
5726 +       KMEM_FREE ((void *) ctxt->SwapThreads,  sizeof (E3_Addr)         * nswapped_threads);
5727 +    if (ctxt->SwapDmas)
5728 +       KMEM_FREE ((void *) ctxt->SwapDmas,     sizeof (E3_DMA_BE)       * nswapped_dmas);
5729 +
5730 +    kcondvar_destroy (&ctxt->Wait);
5731 +    kcondvar_destroy (&ctxt->CommandPortWait);
5732 +    kcondvar_destroy (&ctxt->LwpWait);
5733 +    kcondvar_destroy (&ctxt->HaltWait);
5734 +
5735 +    kmutex_destroy (&ctxt->SwapListsLock);
5736 +    kmutex_destroy (&ctxt->CmdLock);
5737 +    kmutex_destroy (&ctxt->NetworkErrorLock);
5738 +    spin_lock_destroy  (&ctxt->InputFaultLock);
5739 +
5740 +    krwlock_destroy (&ctxt->VpLock);
5741 +
5742 +    KMEM_FREE (ctxt, sizeof (ELAN3_CTXT));
5743 +
5744 +    return (NULL);
5745 +}
5746 +
5747 +void
5748 +elan3_free (ELAN3_CTXT *ctxt)
5749 +{
5750 +    ELAN3_DEV     *dev = ctxt->Device;
5751 +    NETERR_FIXUP *nef;
5752 +    
5753 +    PRINTF1 (ctxt, DBG_FN, "elan3_free: %p \n", ctxt);
5754 +   
5755 +    elan3_removevp (ctxt, ELAN3_INVALID_PROCESS);                      /* Remove any virtual process mappings */
5756 +
5757 +#if defined(DIGITAL_UNIX)
5758 +    WaitForContext (ctxt);                                     /* wait for all references to this context to go away */
5759 +#endif
5760 +
5761 +    if (ctxt->RouteTable)
5762 +       FreeRouteTable (dev, ctxt->RouteTable);
5763 +    ctxt->RouteTable = NULL;
5764 +
5765 +    elan3mmu_free (ctxt->Elan3mmu);                            /* free of our Elan3mmu  */
5766 +
5767 +    if (ctxt->Private)                                         /* Call back to "user" to free off  */
5768 +       ELAN3_OP_FREE_PRIVATE (ctxt);                           /* private data */
5769 +
5770 +#if defined(DIGITAL_UNIX)
5771 +    if (! CTXT_IS_KERNEL(ctxt))
5772 +       elan3_segelan3_destroy (ctxt);                          /* Unmap the command port from the users address space. */
5773 +#endif
5774 +   
5775 +    ReleaseHaltOperations (dev, NUM_HALTOPS);
5776 +
5777 +    if (ctxt->Input0Resolver)
5778 +       CancelNetworkErrorResolver (ctxt->Input0Resolver);
5779 +
5780 +    if (ctxt->Input1Resolver)
5781 +       CancelNetworkErrorResolver (ctxt->Input1Resolver);
5782 +
5783 +    while ((nef = ctxt->NetworkErrorFixups) != NULL)
5784 +    {
5785 +       ctxt->NetworkErrorFixups = nef->Next;
5786 +
5787 +       CompleteNetworkErrorFixup (ctxt, nef, ESRCH);
5788 +    }
5789 +
5790 +    KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1);
5791 +
5792 +    KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP)    * ntrapped_commands);
5793 +    KMEM_FREE ((void *) ctxt->ThreadTraps,  sizeof (THREAD_TRAP)     * ntrapped_threads);
5794 +    KMEM_FREE ((void *) ctxt->DmaTraps,     sizeof (DMA_TRAP)        * ntrapped_dmas);
5795 +    KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE)    * ntrapped_events);
5796 +    KMEM_FREE ((void *) ctxt->Commands,     sizeof (CProcTrapBuf_BE) * noverflow_commands);
5797 +    KMEM_FREE ((void *) ctxt->SwapThreads,  sizeof (E3_Addr)         * nswapped_threads);
5798 +    KMEM_FREE ((void *) ctxt->SwapDmas,     sizeof (E3_DMA_BE)       * nswapped_dmas);
5799 +
5800 +    kcondvar_destroy (&ctxt->Wait);
5801 +    kcondvar_destroy (&ctxt->CommandPortWait);
5802 +    kcondvar_destroy (&ctxt->LwpWait);
5803 +    kcondvar_destroy (&ctxt->HaltWait);
5804 +
5805 +    kmutex_destroy (&ctxt->SwapListsLock);
5806 +    kmutex_destroy (&ctxt->CmdLock);
5807 +    kmutex_destroy (&ctxt->NetworkErrorLock);
5808 +    spin_lock_destroy  (&ctxt->InputFaultLock);
5809 +
5810 +    krwlock_destroy (&ctxt->VpLock);
5811 +
5812 +    KMEM_FREE (ctxt, sizeof (ELAN3_CTXT));
5813 +}
5814 +
5815 +int 
5816 +elan3_doattach(ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
5817 +{
5818 +    unsigned long pgnum = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) / PAGE_SIZE;
5819 +    unsigned long pgoff = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) & (PAGE_SIZE-1);
5820 +    ELAN3_DEV     *dev   = ctxt->Device;
5821 +    int           res   = ESUCCESS;
5822 +    unsigned long flags;
5823 +
5824 +    /* Map in the command port for this context */
5825 +    if (MapDeviceRegister (dev, ELAN3_BAR_COMMAND_PORT, &ctxt->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ctxt->CommandPageHandle) != ESUCCESS)
5826 +    {
5827 +       PRINTF0 (ctxt, DBG_FN, "elan3_doattach: MapDeviceRegister failed");
5828 +       return (EINVAL);
5829 +    }
5830 +
5831 +    ctxt->CommandPort = ctxt->CommandPage + pgoff;
5832 +
5833 +    spin_lock_irqsave (&dev->IntrLock, flags);
5834 +
5835 +    res = 0;
5836 +    if (ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) != NULL)
5837 +       res = EBUSY;
5838 +    else
5839 +    {
5840 +       if ((res = elan3mmu_attach (ctxt->Device, cap->cap_mycontext, ctxt->Elan3mmu, 
5841 +                                   ctxt->RouteTable->Table, ctxt->RouteTable->Size-1)) == 0)
5842 +       {
5843 +           ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) = ctxt;
5844 +           ctxt->Capability                            = *cap;
5845 +       }
5846 +    }
5847 +
5848 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5849 +
5850 +    if (res == ESUCCESS)
5851 +       elan3_swapin (ctxt, CTXT_DETACHED);
5852 +    else 
5853 +    {
5854 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
5855 +       ctxt->CommandPage = (ioaddr_t) 0; 
5856 +       ctxt->CommandPort = (ioaddr_t) 0;
5857 +    }
5858 +
5859 +    return (res);
5860 +}
5861 +
5862 +void
5863 +elan3_destroy_callback( void * args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
5864 +{
5865 +    if (map == NULL) 
5866 +    {
5867 +       /* the cap is being destroyed */
5868 +       PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the cap is being destroyed \n");
5869 +    }
5870 +    else
5871 +    {
5872 +       /* the map is being destroyed */
5873 +       PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the map is being destroyed \n");
5874 +    }
5875 +}
5876 +
5877 +int
5878 +elan3_attach (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
5879 +{
5880 +    ELAN3_DEV *dev = ctxt->Device;
5881 +    int type;
5882 +    int res;
5883 +
5884 +    switch (type = elan3_validate_cap (dev, cap, ELAN_USER_ATTACH))
5885 +    {
5886 +    case ELAN_CAP_OK:
5887 +       /* nothing */
5888 +       break;
5889 +
5890 +    case ELAN_CAP_RMS:
5891 +       if ((res = elan_attach_cap(cap, dev->Devinfo.dev_rail, ctxt, elan3_destroy_callback)) != 0)
5892 +           return res;
5893 +       break;
5894 +
5895 +    default:
5896 +       return (EINVAL);
5897 +    }
5898 +
5899 +    if (((res = elan3_doattach(ctxt,cap)) != ESUCCESS) && (type == ELAN_CAP_RMS))
5900 +       elan_detach_cap(cap, dev->Devinfo.dev_rail);
5901 +
5902 +    return res;
5903 +}
5904 +
5905 +void
5906 +elan3_detach ( ELAN3_CTXT *ctxt )
5907 +{
5908 +    ELAN3_DEV   *dev                 = ctxt->Device;
5909 +    int need_to_call_elanmod_detach = 0;
5910 +    unsigned long flags;
5911 +
5912 +    PRINTF1 (ctxt, DBG_FN, "elan3_detach: %p \n", ctxt );
5913 +    
5914 +    if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
5915 +    {
5916 +       PRINTF0 (ctxt, DBG_FN, "elan3_detach: context not attached \n");
5917 +       return ;
5918 +    }
5919 +
5920 +    /* must you be in the ctx_table ?? */
5921 +    
5922 +    switch (ctxt->Capability.cap_type & ELAN_CAP_TYPE_MASK)
5923 +    {
5924 +    case ELAN_CAP_TYPE_BLOCK:
5925 +    case ELAN_CAP_TYPE_CYCLIC:
5926 +    {
5927 +       if (ELAN3_SYSTEM_CONTEXT (ctxt->Capability.cap_mycontext))
5928 +           return ;
5929 +
5930 +       if (! (ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST))
5931 +           need_to_call_elanmod_detach = 1;
5932 +
5933 +       break;
5934 +    }  
5935 +    default:
5936 +       return ;
5937 +    }
5938 +
5939 +    elan3_swapout (ctxt, CTXT_DETACHED);
5940 +
5941 +    spin_lock_irqsave (&dev->IntrLock, flags);
5942 +
5943 +    elan3mmu_detach (dev, ctxt->Capability.cap_mycontext);
5944 +    ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL;
5945 +
5946 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5947 +
5948 +    if (ctxt->CommandPage)
5949 +    {
5950 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
5951 +       ctxt->CommandPage = (ioaddr_t) 0;
5952 +    }
5953 +    
5954 +    if (need_to_call_elanmod_detach) 
5955 +       elan_detach_cap(&ctxt->Capability, dev->Devinfo.dev_rail);
5956 +
5957 +    elan_nullcap (&ctxt->Capability);
5958 +
5959 +}
5960 +
5961 +void
5962 +elan3_dodetach ( ELAN3_CTXT *ctxt )
5963 +{
5964 +    ELAN3_DEV     *dev = ctxt->Device;
5965 +    unsigned long flags;
5966 +
5967 +    PRINTF1 (ctxt, DBG_FN, "elan3_dodetach: %p \n", ctxt );
5968 +    
5969 +    if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
5970 +    {
5971 +       PRINTF0 (ctxt, DBG_FN, "elan3_dodetach: context not attached \n");
5972 +       return ;
5973 +    }
5974 +
5975 +    elan3_swapout (ctxt, CTXT_DETACHED);
5976 +
5977 +    spin_lock_irqsave (&dev->IntrLock, flags);
5978 +
5979 +    elan3mmu_detach (dev, ctxt->Capability.cap_mycontext);
5980 +    ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL;
5981 +
5982 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
5983 +
5984 +    if (ctxt->CommandPage)
5985 +    {
5986 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
5987 +       ctxt->CommandPage = (ioaddr_t) 0;
5988 +    }
5989 +    
5990 +    elan_nullcap (&ctxt->Capability);
5991 +}
5992 +
5993 +void
5994 +elan3_swapin (ELAN3_CTXT *ctxt, int reason)
5995 +{
5996 +    ELAN3_DEV *dev = ctxt->Device;
5997 +    unsigned long flags;
5998 +
5999 +    spin_lock_irqsave (&dev->IntrLock, flags);
6000 +
6001 +    ASSERT (ctxt->Status & CTXT_SWAPPED_REASONS);
6002 +
6003 +    PRINTF3 (ctxt, DBG_SWAP, "elan3_swapin: status %x State %s reason %x\n", 
6004 +            ctxt->Status, OthersStateStrings[ctxt->OthersState], reason);
6005 +
6006 +    while (ctxt->Status & CTXT_SWAPPING_OUT)                   /* In transition */
6007 +       kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);
6008 +
6009 +    if (reason == CTXT_NO_LWPS && ctxt->LwpCount++ != 0)       /* Added another LWP */
6010 +    {
6011 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6012 +       return;
6013 +    }
6014 +
6015 +    if ((ctxt->Status & ~reason) & CTXT_SWAPPED_REASONS)
6016 +       ctxt->Status &= ~reason;
6017 +    else
6018 +    {
6019 +       ASSERT (ctxt->Status & CTXT_SWAPPED_OUT);
6020 +       ASSERT (ctxt->OthersState == CTXT_OTHERS_SWAPPED);
6021 +       
6022 +       /*
6023 +        * Will not be swapped out anymore, so ask the "user" to perform 
6024 +        * any swapping in he needs before letting the context run again.
6025 +        */
6026 +       
6027 +       ctxt->Status &= ~(CTXT_SWAPPED_OUT | CTXT_QUEUES_EMPTY | reason);
6028 +       ctxt->OthersState = CTXT_OTHERS_RUNNING;
6029 +
6030 +       if (ctxt->Input0Trap.State == CTXT_STATE_OK && ctxt->Input1Trap.State == CTXT_STATE_OK)
6031 +           SetInputterStateForContext (ctxt, 0, NULL);
6032 +       
6033 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
6034 +    }
6035 +
6036 +    PRINTF2 (ctxt, DBG_SWAP, "elan3_swapin: all done - status %x state %s\n",
6037 +            ctxt->Status, OthersStateStrings[ctxt->OthersState]);
6038 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6039 +}
6040 +
6041 +
6042 +void
6043 +elan3_swapout (ELAN3_CTXT *ctxt, int reason)
6044 +{
6045 +    ELAN3_DEV     *dev = ctxt->Device;
6046 +    int           cansleep;
6047 +    unsigned long flags;
6048 +
6049 +    spin_lock_irqsave (&dev->IntrLock, flags);
6050 +
6051 +    PRINTF3 (ctxt, DBG_SWAP, "elan3_swapout: status %x state %s reason %x\n", 
6052 +            ctxt->Status, OthersStateStrings[ctxt->OthersState], reason);
6053 +
6054 +    if (reason == CTXT_NO_LWPS)
6055 +    {
6056 +       if (--ctxt->LwpCount != 0)                              /* Still other LWPs running */
6057 +       {
6058 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
6059 +           return;
6060 +       }
6061 +
6062 +       kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);            /* Wakeup anyone waiting on LwpCount */
6063 +    }
6064 +    
6065 +    ctxt->Status |= reason;
6066 +    
6067 +    while (ctxt->Status & CTXT_SWAPPING_OUT)                   /* wait for someone else to finish swapping */
6068 +       kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);         /* out */
6069 +
6070 +    if (ctxt->Status & CTXT_SWAPPED_OUT)
6071 +    {
6072 +       if (reason == CTXT_NO_LWPS)                             /* Wakeup other thread waiting on LWP exit */
6073 +           kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);
6074 +       
6075 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6076 +       return;
6077 +    }
6078 +    
6079 +    /*
6080 +     * mark the context as swapping out.
6081 +     */
6082 +    ctxt->Status |= CTXT_SWAPPING_OUT;
6083 +    
6084 +    if (reason != CTXT_FIXUP_NETERR)
6085 +    {
6086 +       /*
6087 +        * Stop all of the lwps.
6088 +        */
6089 +       while (ctxt->LwpCount)
6090 +       {
6091 +           kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);           /* Wake up any lwps */
6092 +           kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);             /* then wait for them to enter elan3_swapout */
6093 +       }
6094 +    }
6095 +    
6096 +    StartSwapoutContext (ctxt, 0, NULL);
6097 +    for (;;)
6098 +    {
6099 +       PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: HandleExceptions\n");
6100 +
6101 +       cansleep = (HandleExceptions(ctxt, &flags) == ESUCCESS);
6102 +
6103 +       PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: OthersState=%d cansleep=%d\n", ctxt->OthersState, cansleep);
6104 +
6105 +       if (ctxt->OthersState == CTXT_OTHERS_SWAPPED)
6106 +           break;
6107 +
6108 +       if (cansleep)
6109 +           kcondvar_wait (&ctxt->Wait, &dev->IntrLock, &flags);
6110 +    }
6111 +    PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: swapped out\n");
6112 +    
6113 +    ASSERT (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ));
6114 +    ASSERT (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ));
6115 +
6116 +    ctxt->Status |=  CTXT_SWAPPED_OUT;
6117 +    ctxt->Status &= ~CTXT_SWAPPING_OUT;
6118 +
6119 +    kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);
6120 +
6121 +    PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: all done - status %x state %s\n",
6122 +            ctxt->Status, OthersStateStrings[ctxt->OthersState]);
6123 +
6124 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6125 +}
6126 +
6127 +int
6128 +elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages)
6129 +{
6130 +    E3_Addr     elanAddr = FaultSave->s.FaultAddress;
6131 +    int                writeable;
6132 +    int                res;
6133 +
6134 +    PRINTF3 (ctxt, DBG_FAULT, "elan3_pagefault: elanAddr %08x FSR %08x : %s\n", elanAddr, FaultSave->s.FSR.Status,
6135 +            FaultSave->s.FSR.s.ProtFault ? "protection fault" : "pte invalid");
6136 +    
6137 +    /* Look at the FSR to determine the fault type etc */
6138 +    
6139 +    if (FaultSave->s.FSR.Status == 0)                          /* this is a target abort/parity error, so look */
6140 +    {                                                          /* at the PCI config space registers to determine  */
6141 +       ElanBusError (ctxt->Device);
6142 +       return (EFAULT);                                        
6143 +    }
6144 +    
6145 +    if (FaultSave->s.FSR.s.AlignmentErr)                       /* Alignment errors are always fatal. */
6146 +    {
6147 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Alignment error\n");
6148 +       return (EFAULT);
6149 +    }
6150 +
6151 +    if (FaultSave->s.FSR.s.WalkBadData)                                /* Memory ECC error during a walk */
6152 +    {
6153 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Memory ECC error during walk\n");
6154 +       return (EFAULT);
6155 +    }
6156 +
6157 +    if (!FaultSave->s.FSR.s.ProtFault &&                       /* DMA memory type changed */
6158 +       !FaultSave->s.FSR.s.Walking)
6159 +    {
6160 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: DMA memory type changed\n");
6161 +       return (EFAULT);
6162 +    }
6163 +
6164 +    ASSERT (FaultSave->s.FSR.s.ProtFault ?                     /* protection errors, should always have a valid pte */
6165 +           (!FaultSave->s.FSR.s.Walking || !(FaultSave->s.FSR.s.Level==3) ||  FaultSave->s.FSR.s.FaultPte == ELAN3_ET_PTE) : 
6166 +           FaultSave->s.FSR.s.FaultPte == ELAN3_ET_INVALID);   /* otherwise it must be an invalid pte */
6167 +
6168 +    /*
6169 +     * Determine whether to fault for a 'write' from the access permissions we need, and not
6170 +     * from the access type (WrAcc).
6171 +     */
6172 +    writeable = (FaultSave->s.FSR.s.AccTypePerm & (1 << FSR_WritePermBit));
6173 +
6174 +    /* Check that we have the right permissions for this access type. */
6175 +    if ((res = elan3mmu_checkperm (ctxt->Elan3mmu, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.AccTypePerm)) != 0)
6176 +    {
6177 +       PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: %s\n", (res == ENOMEM) ? "no protection mapping" : "protection error");
6178 +       
6179 +       return (res);
6180 +    }
6181 +
6182 +    res = LoadElanTranslation (ctxt, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.ProtFault, writeable);
6183 +
6184 +    if (res == ESUCCESS)
6185 +    {
6186 +       BumpStat (ctxt->Device, PageFaults);
6187 +       BumpUserStat (ctxt, PageFaults);
6188 +    }
6189 +
6190 +    PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: -> %d\n", res);
6191 +
6192 +    return (res);
6193 +}
6194 +
6195 +void
6196 +elan3_block_inputter (ELAN3_CTXT *ctxt, int block)
6197 +{
6198 +    ELAN3_DEV *dev = ctxt->Device;
6199 +    unsigned long flags;
6200 +
6201 +    spin_lock_irqsave (&dev->IntrLock, flags);
6202 +    
6203 +    if (block)
6204 +       ctxt->Status |= CTXT_USER_FILTERING;
6205 +    else
6206 +       ctxt->Status &= ~CTXT_USER_FILTERING;
6207 +
6208 +    if (ctxt->Capability.cap_mycontext != ELAN_CAP_UNINITIALISED)
6209 +       SetInputterStateForContext (ctxt, 0, NULL);
6210 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6211 +}
6212 +
6213 +int
6214 +FixupNetworkErrors (ELAN3_CTXT *ctxt, unsigned long *flags)
6215 +{
6216 +    ELAN3_DEV   *dev = ctxt->Device;
6217 +    NETERR_FIXUP *nef;
6218 +
6219 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6220 +    
6221 +    if (ctxt->NetworkErrorFixups == NULL)
6222 +       return (ESUCCESS);
6223 +
6224 +    spin_unlock_irqrestore (&dev->IntrLock, *flags);
6225 +    
6226 +    kmutex_lock (&ctxt->NetworkErrorLock);                     /* single thread while fixing up errors */
6227 +    elan3_swapout (ctxt, CTXT_FIXUP_NETERR);
6228 +
6229 +    spin_lock_irqsave (&dev->IntrLock, *flags);
6230 +    while ((nef = ctxt->NetworkErrorFixups) != NULL)
6231 +    {
6232 +       ctxt->NetworkErrorFixups = nef->Next;
6233 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6234 +
6235 +       if (ELAN3_OP_FIXUP_NETWORK_ERROR (ctxt, nef) == OP_FAILED)
6236 +           CompleteNetworkErrorFixup (ctxt, nef, EINVAL);
6237 +
6238 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6239 +    }
6240 +    spin_unlock_irqrestore (&dev->IntrLock, *flags);
6241 +
6242 +    elan3_swapin (ctxt, CTXT_FIXUP_NETERR);
6243 +
6244 +    kmutex_unlock (&ctxt->NetworkErrorLock);
6245 +    spin_lock_irqsave (&dev->IntrLock, *flags);
6246 +    return (EAGAIN);
6247 +}
6248 +
6249 +int
6250 +CompleteNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER *rvp)
6251 +{
6252 +    int state;
6253 +
6254 +    switch (rvp->Status)
6255 +    {
6256 +    case ESUCCESS:
6257 +       /*
6258 +        * the item still existed at the source - if it's a wait for EOP transaction
6259 +        * then the source will retry - otherwise the remote event will have been
6260 +        * cleared and we should execute it
6261 +        */
6262 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESUCCESS zero WaitForEopTransaction %p\n", trap->WaitForEopTransaction);
6263 +
6264 +       state = trap->WaitForEopTransaction ? CTXT_STATE_OK : CTXT_STATE_NEEDS_RESTART;
6265 +
6266 +       break;
6267 +
6268 +    case ESRCH:        
6269 +       /*
6270 +        * the item was not found at the source - we should always execute the transaction
6271 +        * since it will never be resent
6272 +        */
6273 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESRCH execute WaitForEopTransaction %p\n", trap->WaitForEopTransaction);
6274 +       state = CTXT_STATE_NEEDS_RESTART;
6275 +       break;
6276 +
6277 +    default:                                                   /* other errors */
6278 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: %d\n", rvp->Status);
6279 +       if (ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, &rvp) == OP_HANDLED)
6280 +           state = CTXT_STATE_NEEDS_RESTART;
6281 +       else
6282 +           state = CTXT_STATE_OK;
6283 +       break;
6284 +    }
6285 +
6286 +    FreeNetworkErrorResolver (rvp);
6287 +
6288 +    return (state);
6289 +}
6290 +
6291 +int
6292 +HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags)
6293 +{
6294 +    ELAN3_DEV        *dev    = ctxt->Device;
6295 +    THREAD_TRAP      tproc;
6296 +    DMA_TRAP         dproc;
6297 +    NETERR_RESOLVER *rvp;
6298 +    int                     state;
6299 +
6300 +    if (ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR)
6301 +    {
6302 +       ctxt->Status &= ~CTXT_COMMAND_OVERFLOW_ERROR;
6303 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6304 +       ElanException (ctxt, EXCEPTION_COMMAND_OVERFLOW, COMMAND_PROC, NULL);
6305 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6306 +       return (EAGAIN);
6307 +    }
6308 +    
6309 +    if (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ))
6310 +    {
6311 +       /* XXXX: unmap translations to the command port */
6312 +
6313 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6314 +       ResolveCProcTrap (ctxt);
6315 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6316 +       return (EAGAIN);
6317 +    }
6318 +    
6319 +    if (ctxt->Input0Trap.State == CTXT_STATE_TRAPPED)
6320 +    {
6321 +       ctxt->Input0Trap.State = CTXT_STATE_RESOLVING;
6322 +
6323 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6324 +       ResolveIProcTrap (ctxt, &ctxt->Input0Trap, &ctxt->Input0Resolver);
6325 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6326 +       return (EAGAIN);
6327 +    }
6328 +
6329 +    if (ctxt->Input1Trap.State == CTXT_STATE_TRAPPED)
6330 +    {
6331 +       ctxt->Input1Trap.State = CTXT_STATE_RESOLVING;
6332 +
6333 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6334 +       ResolveIProcTrap (ctxt, &ctxt->Input1Trap, &ctxt->Input1Resolver);
6335 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6336 +       return (EAGAIN);
6337 +    }
6338 +
6339 +    if ((rvp = ctxt->Input0Resolver) != NULL && rvp->Completed)
6340 +    {
6341 +       ASSERT (ctxt->Input0Trap.State == CTXT_STATE_NETWORK_ERROR);
6342 +
6343 +       ctxt->Input0Resolver = NULL;
6344 +       
6345 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6346 +       state = CompleteNetworkErrorResolver (ctxt, &ctxt->Input0Trap, rvp);
6347 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6348 +       ctxt->Input0Trap.State = state;
6349 +       return (EAGAIN);
6350 +    }
6351 +
6352 +    if ((rvp = ctxt->Input1Resolver) != NULL && rvp->Completed)
6353 +    {
6354 +       ASSERT (ctxt->Input1Trap.State == CTXT_STATE_NETWORK_ERROR);
6355 +
6356 +       ctxt->Input1Resolver = NULL;
6357 +       
6358 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6359 +       state = CompleteNetworkErrorResolver (ctxt,&ctxt->Input1Trap, rvp);
6360 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6361 +       ctxt->Input1Trap.State = state;
6362 +       return (EAGAIN);
6363 +    }
6364 +
6365 +    if (NextTProcTrap (ctxt, &tproc))
6366 +    {
6367 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6368 +       ResolveTProcTrap (ctxt, &tproc);
6369 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6370 +       return (EAGAIN);
6371 +    }
6372 +    ctxt->Status &= ~CTXT_THREAD_QUEUE_FULL;
6373 +
6374 +    if (NextDProcTrap (ctxt, &dproc))
6375 +    {
6376 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6377 +       ResolveDProcTrap (ctxt, &dproc);
6378 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6379 +       return (EAGAIN);
6380 +    }
6381 +    ctxt->Status &= ~CTXT_DMA_QUEUE_FULL;
6382 +
6383 +    /* Handle all event interrupts. */
6384 +    if (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ))
6385 +    {
6386 +       while (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ))
6387 +       {
6388 +           E3_uint32 cookie = *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies);
6389 +
6390 +           ELAN3_QUEUE_REMOVE (ctxt->EventCookieQ);
6391 +
6392 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6393 +           if (ELAN3_OP_EVENT (ctxt, cookie, OP_LWP) != OP_DEFER)
6394 +               spin_lock_irqsave (&dev->IntrLock, *flags);
6395 +           else
6396 +           {
6397 +               spin_lock_irqsave (&dev->IntrLock, *flags);     /* place the cookie back on the queue. */
6398 +                                                               /* note we place it on the front to ensure  */
6399 +               ELAN3_QUEUE_ADD_FRONT (ctxt->EventCookieQ);     /* event ordering. */
6400 +               *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies) = cookie;
6401 +           }
6402 +       }
6403 +       return (EAGAIN);
6404 +    }
6405 +    ctxt->Status &= ~CTXT_EVENT_QUEUE_FULL;
6406 +
6407 +    if (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ))
6408 +    {
6409 +       while (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ))
6410 +       {
6411 +           E3_DMA_BE DmaDesc = *ELAN3_QUEUE_FRONT (ctxt->SwapDmaQ, ctxt->SwapDmas);
6412 +
6413 +           ELAN3_QUEUE_REMOVE (ctxt->SwapDmaQ);
6414 +
6415 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6416 +           RestartDmaDesc (ctxt, &DmaDesc);
6417 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6418 +       }
6419 +       return (EAGAIN);
6420 +    }
6421 +    
6422 +    if (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ))
6423 +    {
6424 +       while (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ))
6425 +       {
6426 +           E3_Addr StackPointer = *ELAN3_QUEUE_FRONT (ctxt->SwapThreadQ, ctxt->SwapThreads);
6427 +
6428 +           ELAN3_QUEUE_REMOVE (ctxt->SwapThreadQ);
6429 +
6430 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6431 +           ReissueStackPointer (ctxt, StackPointer);
6432 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6433 +       }
6434 +       return (EAGAIN);
6435 +    }
6436 +    
6437 +    switch (ctxt->OthersState)
6438 +    {
6439 +    case CTXT_OTHERS_SWAPPING:
6440 +       if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6441 +           ctxt->OthersState = CTXT_OTHERS_RUNNING;
6442 +       else
6443 +           ctxt->OthersState = CTXT_OTHERS_SWAPPED;
6444 +
6445 +       PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping -> %s\n", OthersStateStrings[ctxt->OthersState]);
6446 +           
6447 +       break;
6448 +
6449 +    case CTXT_OTHERS_SWAPPING_MORE:
6450 +       ctxt->OthersState = CTXT_OTHERS_HALTING_MORE;
6451 +       QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt);
6452 +
6453 +       PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping_more -> %s\n", OthersStateStrings[ctxt->OthersState]);
6454 +       break;
6455 +    }
6456 +    return (ESUCCESS);
6457 +}
6458 +
6459 +int
6460 +RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags)
6461 +{
6462 +    ELAN3_DEV *dev = ctxt->Device;
6463 +    int       res;
6464 +
6465 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6466 +
6467 +    PRINTF1 (ctxt, DBG_LWP, "RestartContext: status %x\n", ctxt->Status);
6468 +
6469 +    if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6470 +    {
6471 +       if (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ) || ! ELAN3_QUEUE_EMPTY(ctxt->CommandQ))
6472 +       {
6473 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6474 +           RestartCProcTrap (ctxt);
6475 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6476 +           return (EAGAIN);
6477 +       }
6478 +
6479 +       if (ctxt->Input0Trap.State == CTXT_STATE_NEEDS_RESTART)
6480 +       {
6481 +           ctxt->Input0Trap.State = CTXT_STATE_EXECUTING;
6482 +
6483 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6484 +           res = RestartIProcTrap (ctxt, &ctxt->Input0Trap);
6485 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6486 +           
6487 +           if (res == ESUCCESS)
6488 +               ctxt->Input0Trap.State = CTXT_STATE_OK;
6489 +           else
6490 +               ctxt->Input0Trap.State = CTXT_STATE_NEEDS_RESTART;
6491 +           return (EAGAIN);
6492 +       }
6493 +
6494 +       if (ctxt->Input1Trap.State == CTXT_STATE_NEEDS_RESTART)
6495 +       {
6496 +           ctxt->Input1Trap.State = CTXT_STATE_EXECUTING;
6497 +
6498 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6499 +           res = RestartIProcTrap (ctxt, &ctxt->Input1Trap);
6500 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6501 +
6502 +           if (res == ESUCCESS)
6503 +               ctxt->Input1Trap.State = CTXT_STATE_OK;
6504 +           else
6505 +               ctxt->Input1Trap.State = CTXT_STATE_NEEDS_RESTART;
6506 +           return (EAGAIN);
6507 +       }
6508 +
6509 +       if (SetEventsNeedRestart (ctxt))
6510 +       {
6511 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6512 +           RestartSetEvents (ctxt);
6513 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6514 +           return (EAGAIN);
6515 +       }
6516 +
6517 +       SetInputterStateForContext (ctxt, 0, NULL);
6518 +
6519 +       if (TProcNeedsRestart (ctxt))
6520 +       {
6521 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6522 +
6523 +           LoadCommandPortTranslation (ctxt);
6524 +           RestartTProcItems (ctxt);
6525 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6526 +           return (EAGAIN);
6527 +       }
6528 +
6529 +       if (DProcNeedsRestart (ctxt))
6530 +       {
6531 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6532 +           RestartDProcItems (ctxt);
6533 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6534 +           return (EAGAIN);
6535 +       }
6536 +
6537 +       if (ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ))
6538 +       {
6539 +           PRINTF1 (ctxt, DBG_LWP, "RestartContext: setting Command Flag at %p to 0\n", &ctxt->FlagPage->CommandFlag);
6540 +
6541 +           ctxt->FlagPage->CommandFlag = 0;
6542 +
6543 +           if (ctxt->Status & CTXT_WAITING_COMMAND)
6544 +           {
6545 +               PRINTF0 (ctxt, DBG_LWP, "RestartContext: waking up threads waiting for commandport\n");
6546 +               
6547 +               ctxt->Status &= ~CTXT_WAITING_COMMAND;
6548 +               
6549 +               kcondvar_wakeupall (&ctxt->CommandPortWait, &dev->IntrLock);
6550 +           }
6551 +       }
6552 +    }
6553 +
6554 +    return (ESUCCESS);
6555 +}
6556 +
6557 +static void
6558 +HaltSwapContext (ELAN3_DEV *dev, void *arg)
6559 +{
6560 +    ELAN3_CTXT        *ctxt    = (ELAN3_CTXT *) arg;
6561 +    int                      SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT);
6562 +    E3_ThreadQueue_BE thread;
6563 +    E3_DMA_BE         dma;
6564 +    sdramaddr_t       FPtr, BPtr;
6565 +    sdramaddr_t              Base, Top;
6566 +    u_int           *runCount;
6567 +    unsigned long     flags;
6568 +
6569 +    spin_lock_irqsave (&dev->IntrLock, flags);
6570 +
6571 +    ASSERT (ctxt->OthersState == CTXT_OTHERS_HALTING || ctxt->OthersState == CTXT_OTHERS_HALTING_MORE);
6572 +
6573 +    PRINTF2 (ctxt, DBG_SWAP, "HaltSwapContext: status %x state %s\n", ctxt->Status, OthersStateStrings[ctxt->OthersState]);
6574 +
6575 +    if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6576 +    {
6577 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE)
6578 +       {
6579 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6580 +
6581 +           if (--(*runCount) == 0)
6582 +               SetSchedStatusRegister (dev, 0, NULL);
6583 +       }
6584 +       ctxt->OthersState = CTXT_OTHERS_RUNNING;
6585 +       
6586 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: no more reason to swap -> others_running\n");
6587 +
6588 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
6589 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6590 +       return;
6591 +    }
6592 +
6593 +    /*
6594 +     * Capture all other processors since we're not being responsive to 
6595 +     * the command processor interrupt.
6596 +     */
6597 +    CAPTURE_CPUS();
6598 +
6599 +    if (SysCntx)
6600 +    {
6601 +       FPtr = read_reg32 (dev, TProc_SysCntx_FPtr);
6602 +       BPtr = read_reg32 (dev, TProc_SysCntx_BPtr);
6603 +       Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]);
6604 +       Top  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[E3_SysCntxQueueSize-1]);
6605 +    }
6606 +    else
6607 +    {
6608 +       FPtr  = read_reg32 (dev, TProc_NonSysCntx_FPtr);
6609 +       BPtr  = read_reg32 (dev, TProc_NonSysCntx_BPtr);
6610 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0]);
6611 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[E3_NonSysCntxQueueSize-1]);
6612 +    }
6613 +
6614 +    while (FPtr != BPtr)
6615 +    {
6616 +       elan3_sdram_copyq_from_sdram (dev, FPtr, (void *) &thread, sizeof (E3_ThreadQueue_BE));
6617 +       
6618 +       if (thread.s.Context == ctxt->Capability.cap_mycontext)
6619 +       {
6620 +           if (ELAN3_QUEUE_FULL (ctxt->SwapThreadQ))
6621 +               break;
6622 +           
6623 +           *ELAN3_QUEUE_BACK(ctxt->SwapThreadQ, ctxt->SwapThreads) = thread.s.Thread;
6624 +           ELAN3_QUEUE_ADD (ctxt->SwapThreadQ);
6625 +           
6626 +           /*
6627 +            * Remove this entry from the queue by replacing it with 
6628 +            * the "magic" thread value.
6629 +            *
6630 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
6631 +            * to mark the approriate run queue as empty.
6632 +            */
6633 +           thread.s.Context = SysCntx ? SYS_CONTEXT_BIT : 0;
6634 +           thread.s.Thread  = VanishingStackPointer;
6635 +
6636 +           elan3_sdram_copyq_to_sdram (dev, (void *) &thread, FPtr, sizeof (E3_ThreadQueue_BE));
6637 +       }
6638 +       
6639 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_ThreadQueue);
6640 +    }
6641 +
6642 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
6643 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
6644 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
6645 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
6646 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
6647 +
6648 +    if (SysCntx)
6649 +    {
6650 +       FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
6651 +       BPtr  = read_reg32 (dev, DProc_SysCntx_BPtr);
6652 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
6653 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
6654 +    }
6655 +    else
6656 +    {
6657 +       FPtr  = read_reg32 (dev, DProc_NonSysCntx_FPtr);
6658 +       BPtr  = read_reg32 (dev, DProc_NonSysCntx_BPtr);
6659 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]);
6660 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]);
6661 +    }
6662 +
6663 +    while (FPtr != BPtr)
6664 +    {
6665 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
6666 +           
6667 +       if (dma.s.dma_u.s.Context == ctxt->Capability.cap_mycontext)
6668 +       {
6669 +           if (ELAN3_QUEUE_FULL (ctxt->SwapDmaQ))
6670 +               break;
6671 +           
6672 +           *ELAN3_QUEUE_BACK (ctxt->SwapDmaQ, ctxt->SwapDmas) = dma;
6673 +           ELAN3_QUEUE_ADD (ctxt->SwapDmaQ);
6674 +
6675 +           /*
6676 +            * Remove the DMA from the queue by replacing it with one with
6677 +            * zero size and no events.
6678 +            *
6679 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
6680 +            * to mark the approriate run queue as empty.
6681 +            */
6682 +           dma.s.dma_type            = ((SysCntx ? SYS_CONTEXT_BIT : 0) << 16);
6683 +           dma.s.dma_size            = 0;
6684 +           dma.s.dma_source          = (E3_Addr) 0;
6685 +           dma.s.dma_dest            = (E3_Addr) 0;
6686 +           dma.s.dma_destCookieVProc = (E3_Addr) 0;
6687 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
6688 +           dma.s.dma_srcCookieVProc  = (E3_Addr) 0;
6689 +
6690 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
6691 +       }
6692 +
6693 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
6694 +    }
6695 +
6696 +    /*
6697 +     * Release the other processors now before signalling the LWP.
6698 +     */
6699 +    RELEASE_CPUS();
6700 +
6701 +    if (! ELAN3_QUEUE_FULL (ctxt->SwapDmaQ) && !ELAN3_QUEUE_FULL (ctxt->SwapThreadQ))
6702 +    {
6703 +       /*
6704 +        * We've compleletly emptied the elan queues of items in this
6705 +        * context, so we now mark it as fully swapped out.
6706 +        */
6707 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE)
6708 +       {
6709 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6710 +           
6711 +           if (--(*runCount) == 0)
6712 +               SetSchedStatusRegister (dev, 0, NULL);
6713 +           
6714 +       }
6715 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues emptied -> others_swapping\n");
6716 +
6717 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING;
6718 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
6719 +    }
6720 +    else
6721 +    {
6722 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING)
6723 +       {
6724 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6725 +           
6726 +           if ((*runCount)++ == 0)
6727 +               SetSchedStatusRegister (dev, 0, NULL);
6728 +       }
6729 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues not emptied -> others_swapping_more\n");
6730 +
6731 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE;
6732 +       kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
6733 +    }
6734 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6735 +}
6736 +
6737 +void
6738 +UnloadCommandPageMapping (ELAN3_CTXT *ctxt)
6739 +{
6740 +    /*
6741 +     * Unload the Elan translations,  and flag the main processor to stall after 
6742 +     * issueing its next command.
6743 +     */
6744 +    if (ctxt->CommandPageMapping != NULL && (ctxt->Status & CTXT_COMMAND_MAPPED_ELAN))
6745 +    {
6746 +       ELAN3MMU_RGN *rgn = elan3mmu_rgnat_main (ctxt->Elan3mmu, ctxt->CommandPageMapping);
6747 +       
6748 +       if (rgn != NULL)
6749 +       {
6750 +           E3_Addr eaddr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase);
6751 +           
6752 +           PRINTF1 (ctxt, DBG_INTR, "UnloadCommandPageMapping: unmapping command port at addr %08x\n", eaddr);
6753 +           
6754 +           elan3mmu_unload (ctxt->Elan3mmu, eaddr, PAGESIZE, PTE_UNLOAD);
6755 +       }
6756 +       
6757 +       ctxt->Status &= ~CTXT_COMMAND_MAPPED_ELAN;
6758 +    }
6759 +}
6760 +
6761 +void
6762 +StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp)
6763 +{
6764 +    ELAN3_DEV   *dev     = ctxt->Device;
6765 +    int                SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT);
6766 +    u_int      *runCount;
6767 +
6768 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6769 +
6770 +    PRINTF2 (ctxt, DBG_SWAP, "StartSwapoutContext: Status %x OthersState %s\n",
6771 +            ctxt->Status, OthersStateStrings [ctxt->OthersState]);
6772 +    /*
6773 +     * Disable the inputters,  we should already have a reason for it.
6774 +     */
6775 +    SetInputterStateForContext (ctxt, Pend, Maskp);
6776 +
6777 +    UnloadCommandPageMapping (ctxt);
6778 +
6779 +    /* 
6780 +     * Flag main processor to stall after issueing next command
6781 +     */
6782 +    PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag);
6783 +
6784 +    ctxt->FlagPage->CommandFlag = 1;
6785 +
6786 +    PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState);
6787 +
6788 +    /*
6789 +     * And queue a haltop to stop the queues and clear it out.
6790 +     */
6791 +    switch (ctxt->OthersState)
6792 +    {
6793 +    case CTXT_OTHERS_RUNNING:
6794 +       PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_halting\n");
6795 +
6796 +       ctxt->OthersState = CTXT_OTHERS_HALTING;
6797 +
6798 +       QueueHaltOperation (dev, Pend, Maskp, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt);
6799 +       break;
6800 +       
6801 +    case CTXT_OTHERS_SWAPPING:
6802 +       PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_swapping_more\n");
6803 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE;
6804 +
6805 +       runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
6806 +           
6807 +       if ((*runCount)++ == 0)
6808 +           SetSchedStatusRegister (dev, Pend, Maskp);
6809 +       break;
6810 +    default:
6811 +       PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState);
6812 +       break;
6813 +    }
6814 +}
6815 +
6816 +#if defined(DIGITAL_UNIX)
6817 +/* temporary tweaks to priority bump */
6818 +int lwp_do_prio = 1;
6819 +int lwp_do_nxm = 1;
6820 +int lwp_prio = BASEPRI_USER-1;
6821 +#elif defined(LINUX)
6822 +/* This is the default nice level for the helper LWP */
6823 +int LwpNice = -1;
6824 +#endif
6825 +
6826 +int
6827 +elan3_lwp (ELAN3_CTXT *ctxt)
6828 +{
6829 +    ELAN3_DEV     *dev = ctxt->Device;
6830 +    int                  res;
6831 +    unsigned long flags;
6832 +
6833 +    PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: started, context 0x%x\n", ctxt->Capability.cap_mycontext);
6834 +
6835 +#if defined(DIGITAL_UNIX)
6836 +    {
6837 +        thread_t mythread = current_thread();
6838 +        if (lwp_do_prio && (lwp_do_nxm || !IS_NXM_TASK(mythread->task)))
6839 +        {
6840 +            mythread->priority = mythread->sched_pri = lwp_prio;
6841 +            mythread->max_priority = BASEPRI_HIGHEST;
6842 +            (void) thread_priority(mythread, lwp_prio, 0, 1);
6843 +        }
6844 +    }
6845 +#elif defined(LINUX)
6846 +    {
6847 +       /* Do the priority trick for the helper LWP so that it
6848 +        * runs in preferance to the user threads which may be
6849 +        * burning CPU waiting for a trap to be fixed up
6850 +        */
6851 +#ifdef NO_O1_SCHED
6852 +       if (LwpNice >= -20 && LwpNice < 20)
6853 +           current->nice = LwpNice;
6854 +#else
6855 +       set_user_nice(current, LwpNice);
6856 +#endif
6857 +    }
6858 +#endif
6859 +
6860 +    elan3_swapin (ctxt, CTXT_NO_LWPS);
6861 +
6862 +    spin_lock_irqsave (&dev->IntrLock, flags);
6863 +
6864 +    /* If we're swapped out, and not detached (or exiting) then wait until we're swapped back in */
6865 +    /* since otherwise we could "spin" forever continually calling elan3_lwp() */
6866 +    if ((ctxt->Status & CTXT_SWAPPED_REASONS) && ! (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)))
6867 +       kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags);
6868 +
6869 +    for (;;)
6870 +    {
6871 +#if defined(DIGITAL_UNIX)
6872 +        if (thread_should_halt(current_thread()) || 
6873 +            CURSIG_CHECK(task_to_proc(current_thread()->task), u.np_uthread))
6874 +       {
6875 +           PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: exiting on %s\n", 
6876 +                    thread_should_halt(current_thread()) ? "halt" : "signal");
6877 +            break;
6878 +       }
6879 +#endif
6880 +
6881 +       if (ctxt->Status & CTXT_SWAPPED_REASONS)
6882 +       {
6883 +           PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting on swapped reasons\n");
6884 +           break;
6885 +       }
6886 +
6887 +       if (! (ctxt->inhibit))
6888 +       {
6889 +           if (FixupNetworkErrors (ctxt, &flags) == ESUCCESS &&
6890 +               HandleExceptions (ctxt, &flags) == ESUCCESS &&
6891 +               RestartContext (ctxt, &flags) == ESUCCESS)
6892 +               {
6893 +                   if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0)
6894 +                   {
6895 +                       PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n");
6896 +                       break;
6897 +                   }
6898 +               }
6899 +       }
6900 +       else
6901 +       {
6902 +           printk("elan3_lwp :: skipping as inhibited\n");
6903 +           if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0)
6904 +           {
6905 +               PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n");
6906 +               break;
6907 +           }
6908 +       }
6909 +
6910 +    }
6911 +
6912 +    /* Return EINVAL to elan3_syscall_lwp() when we want it to exit */
6913 +    res = (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)) ? EINVAL : 0;
6914 +
6915 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6916 +    
6917 +    elan3_swapout (ctxt, CTXT_NO_LWPS);
6918 +
6919 +    spin_lock_irqsave (&dev->IntrLock, flags);
6920 +    FixupNetworkErrors (ctxt, &flags);
6921 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6922 +
6923 +    return (res);
6924 +}
6925 +
6926 +void
6927 +SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp)
6928 +{
6929 +    ELAN3_DEV  *dev          = NULL;
6930 +    int        new_disabled = 0;
6931 +    int               ctxnum;
6932 +
6933 +    ASSERT (ctxt != NULL);
6934 +    dev  = ctxt->Device;
6935 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6936 +
6937 +    new_disabled = (ctxt->Input0Trap.State != CTXT_STATE_OK ||
6938 +                   ctxt->Input1Trap.State != CTXT_STATE_OK ||
6939 +                   (ctxt->Status & CTXT_INPUTTER_REASONS) != 0);
6940 +    
6941 +
6942 +    ctxnum   = ctxt->Capability.cap_mycontext;
6943 +
6944 +#ifndef __lock_lint  
6945 +    PRINTF2 (ctxt , DBG_IPROC, "SetInputterState: ctxnum %x %s attached\n", ctxnum, ctxt->Disabled ? "disabled " : "");
6946 +#endif /* __lock_lint */
6947 +        
6948 +    if (ctxt->Disabled != new_disabled)
6949 +    {
6950 +       PRINTF2 (ctxt, DBG_IPROC, "SetInputterState: ctxnum %x change %s\n", ctxnum, new_disabled ? "enabled to disabled" : "disabled to enabled");
6951 +       
6952 +       ctxt->Disabled = new_disabled;
6953 +
6954 +       /* synchronize the context filter for this context */
6955 +       elan3mmu_set_context_filter (dev, ctxnum, new_disabled, Pend, Maskp);
6956 +    }
6957 +}
6958 +
6959 +int
6960 +CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags)
6961 +{
6962 +    ELAN3_DEV *dev    = ctxt->Device;
6963 +    int       delay  = 1;
6964 +    int i, SeenComQueueEmpty;
6965 +
6966 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6967 +    ASSERT (cflags != DmaComQueueNotEmpty || dev->HaltDmaDequeueCount != 0);
6968 +
6969 +    /*
6970 +     * Flush the command processor queues and poll the queue to see it it empties.
6971 +     */
6972 +    if (dev->FlushCommandCount++ == 0)
6973 +       SetSchedStatusRegister (dev, 0, NULL);
6974 +
6975 +    /* 
6976 +     * Ensure previous writes have been flushed through the write buffers
6977 +     */
6978 +    wmb(); mmiob();
6979 +
6980 +    /*
6981 +     * If the command processor traps,  or it's taking too long to observe
6982 +     * the queue as emtpy,  then we need to force the interrupt handler to 
6983 +     * run for us.  So queue a halt operation for the dma processor.
6984 +     */
6985 +    SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags);
6986 +    for (i = 20; i > 0 || (how & ISSUE_COMMAND_CANT_WAIT); i--)
6987 +    {
6988 +       if (SeenComQueueEmpty || (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue)))
6989 +           break;
6990 +       
6991 +       mb();
6992 +       DELAY (delay);
6993 +
6994 +       if ((delay <<= 1) == 0) delay = 1;
6995 +
6996 +       SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags);
6997 +    }
6998 +
6999 +    if (--dev->FlushCommandCount == 0)
7000 +       SetSchedStatusRegister (dev, 0, NULL);
7001 +
7002 +    /*
7003 +     * If we've seen the command queue that we're interested in with nothing in it
7004 +     * and the command processor has not trapped then the commands we've
7005 +     * issued have been successfully processed.
7006 +     */
7007 +    if (SeenComQueueEmpty && ! (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue)))
7008 +    {
7009 +       PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: observed dma queue empty and command proc not trapped\n");
7010 +
7011 +       if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0)
7012 +           SetSchedStatusRegister (dev, 0, NULL);
7013 +
7014 +       return (ISSUE_COMMAND_OK);
7015 +    }
7016 +
7017 +    if ((how & ISSUE_COMMAND_CANT_WAIT) != 0)
7018 +       return (ISSUE_COMMAND_WAIT);
7019 +    
7020 +    /*
7021 +     * Halt the dma processor and wait for it to halt,  if the command we've issued has
7022 +     * trapped then the interrupt handler will have moved it to the context structure.
7023 +     */
7024 +    PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for dproc to halt\n");
7025 +    QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, WakeupLwp, ctxt);
7026 +    while (! ctxt->Halted)
7027 +    {
7028 +       PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for Halted - %d\n", ctxt->Halted);
7029 +
7030 +       kcondvar_wait (&ctxt->HaltWait, &dev->IntrLock, flags);
7031 +
7032 +       PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: woken for Halted - %d\n", ctxt->Halted);
7033 +    }
7034 +    ctxt->Halted = 0;
7035 +    
7036 +    PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: dproc halted, checking for trap\n");
7037 +    
7038 +    if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0)
7039 +       SetSchedStatusRegister (dev, 0, NULL);
7040 +
7041 +    return (ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ) ? ISSUE_COMMAND_OK : ISSUE_COMMAND_TRAPPED);
7042 +}
7043 +
7044 +int
7045 +WaitForCommandPort (ELAN3_CTXT *ctxt)
7046 +{
7047 +    ELAN3_DEV     *dev = ctxt->Device;
7048 +    int                  res;
7049 +    unsigned long flags;
7050 +
7051 +    spin_lock_irqsave (&dev->IntrLock, flags);
7052 +
7053 +    if (ctxt->Status & CTXT_DETACHED)
7054 +       res = EINVAL;
7055 +    else 
7056 +    {
7057 +       if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
7058 +       {
7059 +           ctxt->Status |= CTXT_WAITING_COMMAND;
7060 +           if (CTXT_IS_KERNEL(ctxt))
7061 +               kcondvar_wait (&ctxt->CommandPortWait, &dev->IntrLock, &flags);
7062 +           else 
7063 +               kcondvar_waitsig (&ctxt->CommandPortWait, &dev->IntrLock, &flags);
7064 +       }
7065 +       
7066 +       res = (!ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) ? EAGAIN : 0;
7067 +    }
7068 +       
7069 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7070 +
7071 +    return (res);
7072 +}
7073 +
7074 +static char *
7075 +CommandName (int offset)
7076 +{
7077 +    switch (offset)
7078 +    {
7079 +    case offsetof (E3_CommandPort, PutDma):    return ("PutDma");
7080 +    case offsetof (E3_CommandPort, GetDma):    return ("GetDma");
7081 +    case offsetof (E3_CommandPort, RunThread): return ("RunThread");
7082 +    case offsetof (E3_CommandPort, WaitEvent0):        return ("WaitEvent0");
7083 +    case offsetof (E3_CommandPort, WaitEvent1):        return ("WaitEvent1");
7084 +    case offsetof (E3_CommandPort, SetEvent):  return ("SetEvent");
7085 +    default:                                   return ("Bad Command");
7086 +    }
7087 +}
7088 +
7089 +int
7090 +IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int cflags)
7091 +{
7092 +    ELAN3_DEV     *dev = ctxt->Device;
7093 +    int                  res;
7094 +    unsigned long flags;
7095 +
7096 +    spin_lock_irqsave (&dev->IntrLock, flags);
7097 +
7098 +    if ((! (cflags & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS))
7099 +    {
7100 +       /*
7101 +        * Cannot issue commands for non-cproc traps if command port is trapped, 
7102 +        * nor if the dma/thread trap queues are full, or we're swapping out
7103 +        */
7104 +       PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_RETRY\n",
7105 +                CommandName (cmdoff), value);
7106 +
7107 +       res = ISSUE_COMMAND_RETRY;
7108 +    }
7109 +    else
7110 +    {
7111 +       PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_OK\n",
7112 +                CommandName (cmdoff), value);
7113 +
7114 +       mb();                                                   /* ensure writes to main memory completed */
7115 +       writel (value, ctxt->CommandPort + cmdoff);             /* issue command */
7116 +       mmiob();                                                /* and flush through IO writes */
7117 +
7118 +       res = ISSUE_COMMAND_OK;
7119 +    }
7120 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7121 +    
7122 +    return (res);
7123 +}
7124 +
7125 +int
7126 +IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int how)
7127 +{
7128 +    ELAN3_DEV     *dev    = ctxt->Device;
7129 +    int                  res;
7130 +    unsigned long flags;
7131 +
7132 +    /*
7133 +     * Since we may be issuing a command that could trap, and we're interested in
7134 +     * the outcome, the command port trap resolving code must be locked out.
7135 +     */
7136 +    kmutex_lock (&ctxt->CmdLock);
7137 +    spin_lock_irqsave (&dev->IntrLock, flags);
7138 +
7139 +    if ((! (how & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS))
7140 +    {
7141 +       PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_RETRY\n", value, item);
7142 +
7143 +       /*
7144 +        * Cannot issue commands for non-cproc traps if command port is trapped, 
7145 +        * nor if the dma/thread trap queues are full, or we're swapping out
7146 +        */
7147 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
7148 +       kmutex_unlock (&ctxt->CmdLock);
7149 +       return (ISSUE_COMMAND_RETRY);
7150 +    }
7151 +    
7152 +    ASSERT (item == NULL || ctxt->CommandPortItem == NULL);
7153 +
7154 +    /*
7155 +     * Stop the DMA processor from removing entries from the 
7156 +     * command port, and force the command processor to do this.
7157 +     * This means that if a trap occurs then it will be the command
7158 +     * processor that traps.
7159 +     */
7160 +    if (dev->HaltDmaDequeueCount++ == 0)
7161 +       SetSchedStatusRegister (dev, 0, NULL);
7162 +
7163 +    PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p]\n", value, item);
7164 +
7165 +    /*
7166 +     * Always issue the DMA to the 'write' command,  since we've asserted HaltDmaDequeue
7167 +     * the command processor will read the descriptor and transfer it to the run queue. 
7168 +     * The command processor looks at the dma_direction field to determine whether it is
7169 +     * a read or a write and whether to alter the dma_souce of the descriptr on the run 
7170 +     * queue
7171 +     */
7172 +    mb();                                                      /* ensure writes to main memory ccompleted */
7173 +    writel (value, ctxt->CommandPort + offsetof (E3_CommandPort, PutDma));
7174 +    mmiob();                                                   /* and flush through IO writes */
7175 +    
7176 +    res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags);
7177 +
7178 +    if (res == ISSUE_COMMAND_TRAPPED)
7179 +    {
7180 +       PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_TRAPPED\n", value, item);
7181 +       /*
7182 +        * Remember the item we're issueing so that if the command port traps the item will not
7183 +        * get freed off until the descriptor has been read after the command trap has been fixed
7184 +        * up.
7185 +        */
7186 +       if (item != NULL)
7187 +           ctxt->CommandPortItem = item;
7188 +    }
7189 +
7190 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7191 +    kmutex_unlock (&ctxt->CmdLock);
7192 +
7193 +    return (res);
7194 +}
7195 +
7196 +int
7197 +WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int how)
7198 +{
7199 +    ELAN3_DEV     *dev = ctxt->Device;
7200 +    int           res;
7201 +    unsigned long flags;
7202 +
7203 +    spin_lock_irqsave (&dev->IntrLock, flags);
7204 +
7205 +    res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags);
7206 +
7207 +    if (res == ISSUE_COMMAND_TRAPPED && item != NULL)
7208 +       ctxt->CommandPortItem = item;
7209 +
7210 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7211 +    
7212 +    return (res);
7213 +}
7214 +
7215 +void
7216 +FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, E3_FaultSave_BE *FaultSaveArea, int flags)
7217 +{
7218 +    ASSERT (! CTXT_IS_KERNEL (ctxt));
7219 +
7220 +    /*
7221 +     * This code re-issues the part of the set event that trapped.
7222 +     */
7223 +    switch (TrapType)
7224 +    {
7225 +    case MI_ChainedEventError:
7226 +       ElanException (ctxt, EXCEPTION_CHAINED_EVENT, proc, trap, FaultSaveArea->s.EventAddress);
7227 +       break;
7228 +       
7229 +
7230 +    case MI_SetEventReadWait:
7231 +       /*
7232 +        * Fault occured on the read for the event location. Just re-issue
7233 +        * setevent using EventAddress in E3_FaultSave
7234 +        */
7235 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_SetEventReadWait: re-issuing setevent %08x\n", 
7236 +                FaultSaveArea->s.EventAddress);
7237 +       
7238 +       ReissueEvent (ctxt, (E3_Addr) FaultSaveArea->s.EventAddress, flags);
7239 +       break;
7240 +
7241 +    case MI_DoSetEvent:
7242 +    {
7243 +       /*
7244 +        * Fault occured because the block write of a block copy event trapped.
7245 +        * Must grab the event type, source and dest then simulate the block copy and then
7246 +        * perform the set. Once the block copy is started the event location cannot be read
7247 +        * again.
7248 +        */
7249 +       E3_Event *EventPtr  = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7250 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7251 +       
7252 +       /*
7253 +        * Check that the event has the block copy bit
7254 +        * set in it,  since we couldn't trap here if it
7255 +        * didn't
7256 +        */
7257 +       if ((EventType & EV_TYPE_BCOPY) != EV_TYPE_BCOPY)
7258 +       {
7259 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: Unexpected type=%x\n", EventType);
7260 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7261 +           break;
7262 +       }
7263 +       
7264 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: RunEventType %x\n", EventType);
7265 +
7266 +       if (RunEventType (ctxt, FaultSaveArea, EventType))
7267 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7268 +
7269 +       break;
7270 +    }
7271 +    
7272 +    case MI_ThreadUpdateNonSysCntxBack:
7273 +    case MI_ThreadUpdateSysCntxBack:
7274 +    {
7275 +       /*
7276 +        * Fault occured because the block write of a block copy event trapped.
7277 +        * Must grab the event type, source and dest then simulate the block copy and then
7278 +        * run the thread. Once the block copy is started the event location cannot be read
7279 +        * again.
7280 +        */
7281 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7282 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7283 +
7284 +       /*
7285 +        * Check for the correct EventPtr type
7286 +        */
7287 +       if ((EventType & (EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_THREAD))
7288 +       {
7289 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: Unexpected type=%x for setevent trap. Should be thread\n", EventType);
7290 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7291 +           break;
7292 +       }
7293 +       
7294 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: RunEventType %x\n", EventType);
7295 +       if (RunEventType (ctxt, FaultSaveArea, EventType))
7296 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7297 +       break;
7298 +    }
7299 +    
7300 +    case MI_EventIntUpdateBPtr:
7301 +    {
7302 +       /*
7303 +        * Fault occured because the block write of a block copy event trapped.
7304 +        * Must grab the event type, source and dest then simulate the block copy and then
7305 +        * run the dma. Once the block copy is started the event location cannot be read
7306 +        * again.
7307 +        */
7308 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7309 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7310 +
7311 +       /*
7312 +        * Check for the correct EventPtr type
7313 +        */
7314 +       if ((EventType & (EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_EVIRQ))
7315 +       {
7316 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: Unexpected type=%x\n", EventType);
7317 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7318 +           break;
7319 +       }
7320 +
7321 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: RunEventType %x\n", EventType);
7322 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
7323 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7324 +       break;
7325 +    }
7326 +    
7327 +    case MI_RunDmaDesc:
7328 +    {
7329 +       /*
7330 +        * Fault occured because the block write of a block copy event trapped.
7331 +        * Must grab the event type, source and dest then simulate the block copy and then
7332 +        * run the dma. Once the block copy is started the event location cannot be read
7333 +        * again.
7334 +        */
7335 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7336 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7337 +
7338 +       /*
7339 +        * Check for the correct EventPtr type
7340 +        */
7341 +       if ((EventType & (EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_DMA))
7342 +       {
7343 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: Unexpected type=%x\n", EventType);
7344 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7345 +           break;
7346 +       }
7347 +
7348 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: RunEventType %x\n", EventType);
7349 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
7350 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7351 +       break;
7352 +    }
7353 +    
7354 +    case MI_WaitForCntxDmaDescRead:
7355 +    case MI_WaitForNonCntxDmaDescRead:
7356 +       /*
7357 +        * Fault occured on the read of the dma descriptor. Run dma using the
7358 +        * Fault Address in FaultSave.
7359 +        */
7360 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", FaultSaveArea->s.FaultAddress);
7361 +       
7362 +       RestartDmaPtr (ctxt, FaultSaveArea->s.FaultAddress);
7363 +       break;
7364 +    
7365 +    case MI_FinishedSetEvent:
7366 +       /*
7367 +        * Fault occured because the block write of a block copy event trapped.
7368 +        * Simulate the block copy.
7369 +        */
7370 +       if (SimulateBlockCopy (ctxt, FaultSaveArea->s.EventAddress))
7371 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7372 +       break;
7373 +       
7374 +    case MI_BlockCopyEvent:
7375 +    case MI_BlockCopyWaitForReadData:
7376 +    {
7377 +       /*
7378 +        * Fault occured on the read or write of the data for a block copy
7379 +        * event. Simulate the block copy using EventAddress in E3_FaultSave. Must also sample
7380 +        * the event type and then perform a run.
7381 +        */
7382 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7383 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7384 +
7385 +       PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: MI_BlockCopyWaitForReadData: BCopy read fault in BCopy event. Simulating BCopy.\n");
7386 +       
7387 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
7388 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7389 +       break;
7390 +    }
7391 +    
7392 +    case MI_EventQueueOverflow:
7393 +    case MI_ThreadQueueOverflow:
7394 +    case MI_DmaQueueOverflow:
7395 +       /* XXXX: should handle queue overflow */
7396 +       PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: Queue overflow\n");
7397 +
7398 +       ElanException (ctxt, EXCEPTION_QUEUE_OVERFLOW, proc, trap, FaultSaveArea, TrapType);
7399 +       break;
7400 +
7401 +    default:
7402 +       ElanException (ctxt, EXCEPTION_BUS_ERROR, proc, trap, FaultSaveArea, TrapType);
7403 +       break;
7404 +    }
7405 +}
7406 +
7407 +int
7408 +SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress)
7409 +{
7410 +    E3_Addr  SourcePtrElan;
7411 +    E3_Addr  DestPtrElan;
7412 +    unsigned DataType;
7413 +    int      i;
7414 +
7415 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
7416 +    {
7417 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
7418 +
7419 +       ElanException (ctxt, EXCEPTION_FAULTED, EVENT_PROC, NULL, EventAddress);
7420 +       return (TRUE);
7421 +    }
7422 +
7423 +    SourcePtrElan = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Source));
7424 +    DestPtrElan   = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Dest));
7425 +    DataType      = DestPtrElan & EV_BCOPY_DTYPE_MASK;
7426 +    DestPtrElan  &= ~EV_BCOPY_DTYPE_MASK;
7427 +
7428 +
7429 +    PRINTF3 (ctxt, DBG_EVENT, "SimulateBlockCopy: Event %08x SourcePtr %08x DestPtr %08x\n",
7430 +            EventAddress, SourcePtrElan, DestPtrElan);
7431 +
7432 +    if (SourcePtrElan & EV_WCOPY)
7433 +       ELAN3_OP_STORE32 (ctxt, DestPtrElan, SourcePtrElan);
7434 +    else
7435 +    {
7436 +       /*
7437 +        * NOTE: since the block copy could be to sdram, we issue the writes backwards,
7438 +        *       except we MUST ensure that the last item in the block is written last.
7439 +        */
7440 +#if defined(__LITTLE_ENDIAN__)
7441 +       /*
7442 +        * For little endian cpu's we don't need to worry about the data type.
7443 +        */
7444 +       for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
7445 +           ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7446 +
7447 +       i = E3_BLK_SIZE - sizeof (E3_uint64);
7448 +       ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7449 +#else
7450 +       switch (DataType)
7451 +       {
7452 +       case EV_TYPE_BCOPY_BYTE:
7453 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8))
7454 +               ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i));
7455 +           
7456 +           i = E3_BLK_SIZE - sizeof (E3_uint8);
7457 +           ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i));
7458 +           break;
7459 +
7460 +       case EV_TYPE_BCOPY_HWORD: 
7461 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16))
7462 +               ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i));
7463 +           
7464 +           i = E3_BLK_SIZE - sizeof (E3_uint16);
7465 +           ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i));
7466 +           break;
7467 +           
7468 +       case EV_TYPE_BCOPY_WORD:  
7469 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32))
7470 +               ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i));
7471 +           
7472 +           i = E3_BLK_SIZE - sizeof (E3_uint32);
7473 +           ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i));
7474 +           break;
7475 +           
7476 +       case EV_TYPE_BCOPY_DWORD: 
7477 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
7478 +               ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7479 +           
7480 +           i = E3_BLK_SIZE - sizeof (E3_uint64);
7481 +           ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7482 +           break;
7483 +       }
7484 +#endif
7485 +    }
7486 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
7487 +
7488 +    return (FALSE);
7489 +}
7490 +
7491 +void
7492 +ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr, int flags)
7493 +{
7494 +    PRINTF1 (ctxt, DBG_CMD, "ReissueEvent : Event=%08x\n", addr);
7495 +
7496 +    if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), addr, flags) == ISSUE_COMMAND_RETRY)
7497 +    {
7498 +       PRINTF1 (ctxt, DBG_CMD, "ReissueEvent: queue event %08x\n", addr);
7499 +
7500 +       kmutex_lock (&ctxt->SwapListsLock);
7501 +       ctxt->ItemCount[LIST_SETEVENT]++;
7502 +       ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_SETEVENT, addr);
7503 +       kmutex_unlock (&ctxt->SwapListsLock);
7504 +    }
7505 +}
7506 +
7507 +int
7508 +SetEventsNeedRestart (ELAN3_CTXT *ctxt)
7509 +{
7510 +    return (ctxt->ItemCount[LIST_SETEVENT] != 0);
7511 +}
7512 +
7513 +void
7514 +RestartSetEvents (ELAN3_CTXT *ctxt)
7515 +{
7516 +    void     *item;
7517 +    E3_uint32 EventPointer;
7518 +
7519 +    kmutex_lock (&ctxt->SwapListsLock);
7520 +    
7521 +    while (ctxt->ItemCount[LIST_SETEVENT])
7522 +    {
7523 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_SETEVENT, &item, &EventPointer))
7524 +           ctxt->ItemCount[LIST_SETEVENT] = 0;
7525 +       else
7526 +       {
7527 +           if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), EventPointer, FALSE) == ISSUE_COMMAND_RETRY)
7528 +           {
7529 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_SETEVENT, item);
7530 +               kmutex_unlock (&ctxt->SwapListsLock);
7531 +               return;
7532 +           }
7533 +           
7534 +           ctxt->ItemCount[LIST_SETEVENT]--;
7535 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
7536 +       }
7537 +    }
7538 +    kmutex_unlock (&ctxt->SwapListsLock);
7539 +}
7540 +
7541 +int
7542 +RunEventType(ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType)
7543 +{
7544 +    int failed = FALSE;
7545 +
7546 +    if ((EventType & EV_TYPE_BCOPY) != 0)
7547 +       failed = SimulateBlockCopy(ctxt, FaultSaveArea->s.EventAddress);
7548 +    
7549 +    if ((EventType & EV_TYPE_MASK) == EV_TYPE_THREAD)
7550 +       ReissueStackPointer (ctxt, EventType & ~(EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY));
7551 +    else if ((EventType & EV_TYPE_MASK) == EV_TYPE_DMA)
7552 +       RestartDmaPtr (ctxt, EventType & ~(EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY));
7553 +    else if ((EventType & EV_TYPE_EVIRQ) != 0)
7554 +       QueueEventInterrupt (ctxt, EventType & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY));
7555 +    else /* Chained event */
7556 +    {
7557 +       if ((EventType & ~EV_TYPE_BCOPY) != 0) /* not null setevent */
7558 +           ReissueEvent (ctxt, EventType & ~(EV_TYPE_MASK_CHAIN|EV_TYPE_MASK_BCOPY), FALSE);
7559 +    }
7560 +
7561 +    return (failed);
7562 +}
7563 +
7564 +void
7565 +WakeupLwp (ELAN3_DEV *dev, void *arg)
7566 +{
7567 +    ELAN3_CTXT    *ctxt = (ELAN3_CTXT *) arg;
7568 +    unsigned long flags;
7569 +
7570 +    PRINTF1 (ctxt, DBG_INTR, "WakeupLwp: %d\n", SPINLOCK_HELD (&dev->IntrLock));
7571 +
7572 +    spin_lock_irqsave (&dev->IntrLock, flags);
7573 +    ctxt->Halted = 1;
7574 +    kcondvar_wakeupone (&ctxt->HaltWait, &dev->IntrLock);
7575 +
7576 +    PRINTF0 (ctxt, DBG_INTR, "WakeupLwp: woken up context\n");
7577 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7578 +}
7579 +
7580 +void
7581 +QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie)
7582 +{
7583 +    ELAN3_DEV     *dev = ctxt->Device;
7584 +    unsigned long flags;
7585 +
7586 +    PRINTF1 (ctxt, DBG_EVENT, "QueueEventInterrupt: cookie %08x\n", cookie);
7587 +
7588 +    if (ELAN3_OP_EVENT (ctxt, cookie, OP_INTR) == OP_DEFER)
7589 +    {
7590 +       spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
7591 +
7592 +       if (ELAN3_QUEUE_REALLY_FULL (ctxt->EventCookieQ))
7593 +       {
7594 +           ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
7595 +           StartSwapoutContext (ctxt, 0, NULL);
7596 +       }
7597 +       else
7598 +       {
7599 +           *(ELAN3_QUEUE_BACK (ctxt->EventCookieQ, ctxt->EventCookies)) = cookie;
7600 +           
7601 +           ELAN3_QUEUE_ADD (ctxt->EventCookieQ);
7602 +           kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
7603 +           if (ELAN3_QUEUE_FULL (ctxt->EventCookieQ))
7604 +           {
7605 +               ctxt->Status |= CTXT_EVENT_QUEUE_FULL;
7606 +               StartSwapoutContext (ctxt, 0, NULL);
7607 +           }
7608 +       }
7609 +       spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
7610 +    }
7611 +}
7612 +
7613 +int
7614 +ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...)
7615 +{
7616 +    int     res;
7617 +    va_list ap;
7618 +
7619 +    va_start (ap, trap);
7620 +
7621 +    PRINTF2 (ctxt, DBG_FN, "ElanException: proc %d type %d\n", proc, type);
7622 +
7623 +    res = ELAN3_OP_EXCEPTION (ctxt, type, proc, trap, ap);
7624 +
7625 +    va_end (ap);
7626 +    
7627 +    return (res);
7628 +}
7629 +
7630 +
7631 +/*
7632 + * Local variables:
7633 + * c-file-style: "stroustrup"
7634 + * End:
7635 + */
7636 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/context_linux.c
7637 ===================================================================
7638 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/context_linux.c      2004-02-23 16:02:56.000000000 -0500
7639 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/context_linux.c   2005-07-28 14:52:52.787688008 -0400
7640 @@ -0,0 +1,229 @@
7641 +/*
7642 + *    Copyright (c) 2003 by Quadrics Limited.
7643 + * 
7644 + *    For licensing information please see the supplied COPYING file
7645 + *
7646 + */
7647 +
7648 +#ident "@(#)$Id: context_linux.c,v 1.28.2.3 2005/03/02 13:45:27 david Exp $"
7649 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/context_linux.c,v $*/
7650 +
7651 +#include <qsnet/kernel.h>
7652 +#include <qsnet/kpte.h>
7653 +
7654 +#include <elan3/elanregs.h>
7655 +#include <elan3/elandev.h>
7656 +#include <elan3/elanvp.h>
7657 +#include <elan3/elan3mmu.h>
7658 +#include <elan3/elanctxt.h>
7659 +#include <elan3/elandebug.h>
7660 +#include <elan3/urom_addrs.h>
7661 +#include <elan3/thread.h>
7662 +
7663 +int
7664 +LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr addr, int len, int protFault, int writeable)
7665 +{
7666 +    ELAN3MMU           *elan3mmu = ctxt->Elan3mmu;
7667 +    ELAN3MMU_RGN       *rgn;
7668 +    caddr_t            mainAddr;
7669 +    int                        perm;
7670 +    unsigned int        off;
7671 +    unsigned long       flags;
7672 +
7673 +    ASSERT (PAGE_ALIGNED (addr) && PAGE_ALIGNED (len));
7674 +
7675 +    PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr %08x len %08x%s%s\n", 
7676 +        addr, len, protFault ? " prot fault" : "", writeable ? " writeable" : "");
7677 +
7678 +    /* Ensure there's enough elan mmu tables for us to use */
7679 +    elan3mmu_expand (elan3mmu, addr, len, PTBL_LEVEL_3, 0);
7680 +
7681 +    while (len > 0) 
7682 +    {
7683 +       /*
7684 +        * Retrieve permission region and calculate main address
7685 +        */
7686 +       spin_lock (&elan3mmu->elan3mmu_lock);
7687 +
7688 +       rgn = elan3mmu_rgnat_elan (elan3mmu, addr);
7689 +       if (rgn == NULL) {
7690 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: no permission region at %lx %p\n", 
7691 +               (u_long) addr, rgn);
7692 +           spin_unlock (&elan3mmu->elan3mmu_lock);
7693 +           return (EFAULT);
7694 +       }
7695 +       mainAddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
7696 +
7697 +       ASSERT (PAGE_ALIGNED ((unsigned long)mainAddr));
7698 +
7699 +       spin_unlock (&elan3mmu->elan3mmu_lock);
7700 +
7701 +       /*
7702 +        * If we're tying to load a translation to the elan command port, 
7703 +        * then don't do it now, but mark the context to have it reloaded
7704 +        * just before we restart any threads. We do this because we don't
7705 +        * want to call into the segment driver since we could then block
7706 +        * waiting for the command port to become available.
7707 +        */
7708 +       if (mainAddr == ctxt->CommandPageMapping)
7709 +       {
7710 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr=%08x maps command port\n", addr);
7711 +
7712 +           spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
7713 +           UnloadCommandPageMapping (ctxt);
7714 +           spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
7715 +       }
7716 +       else 
7717 +       {
7718 +           struct vm_area_struct *area;
7719 +           struct mm_struct *mm = current->mm;
7720 +           pte_t *ptep_ptr;
7721 +           pte_t  ptep_value;
7722 +
7723 +           down_read (&current->mm->mmap_sem);
7724 +
7725 +           if ((area = find_vma_intersection(mm, (unsigned long)mainAddr, (unsigned long)mainAddr + PAGESIZE)) == NULL)
7726 +           {
7727 +               PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p no vma\n", mainAddr);
7728 +               up_read (&current->mm->mmap_sem);
7729 +               return EFAULT;
7730 +           }
7731 +
7732 +           if (writeable && !(area->vm_flags & VM_WRITE)) 
7733 +           {
7734 +               PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p not writeable\n", mainAddr);
7735 +               up_read (&current->mm->mmap_sem);
7736 +               return EFAULT;
7737 +           }
7738 +           
7739 +           spin_lock (&mm->page_table_lock);
7740 +
7741 +           /* dont deference the pointer after the unmap */
7742 +           ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr);  
7743 +           if (ptep_ptr) {
7744 +               ptep_value = *ptep_ptr;
7745 +               pte_unmap(ptep_ptr);
7746 +           }
7747 +
7748 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p %s %s\n", 
7749 +                   mainAddr, writeable ? "writeable" : "readonly", 
7750 +                   !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
7751 +                   writeable && !pte_write(ptep_value) ? "COW" : "OK");
7752 +           
7753 +           if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) 
7754 +           {  
7755 +               spin_unlock (&mm->page_table_lock);
7756 +
7757 +               get_user_pages (current, current->mm, (unsigned long) mainAddr, PAGE_SIZE, 
7758 +                               (area->vm_flags & VM_WRITE), 0, NULL, NULL);
7759 +
7760 +               spin_lock (&mm->page_table_lock);
7761 +
7762 +               /* dont deference the pointer after the unmap */
7763 +               ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr);  
7764 +               if (ptep_ptr) {
7765 +                   ptep_value = *ptep_ptr;
7766 +                   pte_unmap(ptep_ptr);
7767 +               }
7768 +
7769 +               if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) 
7770 +               {
7771 +                   spin_unlock (&mm->page_table_lock);
7772 +                   up_read (&current->mm->mmap_sem);
7773 +                   return EFAULT;
7774 +               }
7775 +           } 
7776 +
7777 +           /* don't allow user write access to kernel pages if not kernel */
7778 +           if (!pte_read(ptep_value))
7779 +           {
7780 +               spin_unlock (&mm->page_table_lock);
7781 +               up_read (&current->mm->mmap_sem);
7782 +               return EFAULT;
7783 +           }
7784 +
7785 +           if (writeable)
7786 +               pte_mkdirty(ptep_value);
7787 +           pte_mkyoung (ptep_value);
7788 +
7789 +           /* now load the elan pte */
7790 +           if (writeable)
7791 +               perm  = rgn->rgn_perm;
7792 +           else
7793 +               perm = ELAN3_PERM_READONLY(rgn->rgn_perm & ELAN3_PTE_PERM_MASK) | (rgn->rgn_perm & ~ELAN3_PTE_PERM_MASK);
7794 +
7795 +           for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE)
7796 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, pte_phys(ptep_value) + off, perm, PTE_LOAD | PTE_NO_SLEEP);
7797 +
7798 +           spin_unlock (&mm->page_table_lock);
7799 +           up_read (&current->mm->mmap_sem);
7800 +       }
7801 +
7802 +       len -= PAGESIZE;
7803 +       addr += PAGESIZE;
7804 +    }
7805 +    return (ESUCCESS);
7806 +}
7807 +
7808 +
7809 +/*
7810 + * LoadCommandPortTranslation:
7811 + *    explicitly load an elan translation to the command port.
7812 + *    but only do it if the command port is accessible.
7813 + *
7814 + *    we call this function just after we have restarted
7815 + *    and trapped commands,  since when a command traps
7816 + *    the elan translation to the command port is unloaded.
7817 + */
7818 +void
7819 +LoadCommandPortTranslation (ELAN3_CTXT *ctxt)
7820 +{
7821 +    ELAN3MMU     *elan3mmu = ctxt->Elan3mmu;
7822 +    ELAN3MMU_RGN *rgn;
7823 +    E3_Addr       addr;
7824 +    int                  perm;
7825 +    physaddr_t    phys;
7826 +    unsigned int  off;
7827 +    unsigned long flags;
7828 +
7829 +    PRINTF (ctxt, DBG_FAULT, "LoadCommandPortTranslation: SegAddr=%p Status=%x\n", ctxt->CommandPageMapping, ctxt->Status);
7830 +
7831 +    if (ctxt->CommandPageMapping != NULL  && !(ctxt->Status & CTXT_COMMAND_MAPPED_ELAN))
7832 +    {
7833 +       spin_lock (&elan3mmu->elan3mmu_lock);
7834 +       
7835 +       rgn = elan3mmu_rgnat_main (elan3mmu, ctxt->CommandPageMapping);
7836 +       if (rgn == (ELAN3MMU_RGN *) NULL) 
7837 +       {
7838 +           PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: no permission for command port\n");
7839 +           spin_unlock (&elan3mmu->elan3mmu_lock);
7840 +           return;
7841 +       }
7842 +       
7843 +       addr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase);
7844 +       perm = rgn->rgn_perm;
7845 +       phys = kmem_to_phys((caddr_t) ctxt->CommandPage);
7846 +
7847 +       spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
7848 +       if (ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) && !(ctxt->Status & CTXT_OTHERS_REASONS))
7849 +       {
7850 +           PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: load xlation addr=%08x phys=%llx perm=%d\n", 
7851 +                  addr, (unsigned long long)phys, perm);
7852 +
7853 +           ctxt->Status |= CTXT_COMMAND_MAPPED_ELAN;
7854 +
7855 +           for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
7856 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, phys + off, perm, PTE_LOAD | PTE_NO_SLEEP);
7857 +       }
7858 +       spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
7859 +       
7860 +       spin_unlock (&elan3mmu->elan3mmu_lock);
7861 +    }
7862 +}
7863 +
7864 +
7865 +/*
7866 + * Local variables:
7867 + * c-file-style: "stroustrup"
7868 + * End:
7869 + */
7870 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/cproc.c
7871 ===================================================================
7872 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/cproc.c      2004-02-23 16:02:56.000000000 -0500
7873 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/cproc.c   2005-07-28 14:52:52.788687856 -0400
7874 @@ -0,0 +1,539 @@
7875 +/*
7876 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
7877 + * 
7878 + *    For licensing information please see the supplied COPYING file
7879 + *
7880 + */
7881 +
7882 +#ident "@(#)$Id: cproc.c,v 1.46 2004/02/10 15:05:10 david Exp $"
7883 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/cproc.c,v $ */
7884 +
7885 +
7886 +#include <qsnet/kernel.h>
7887 +
7888 +#include <elan3/elanregs.h>
7889 +#include <elan3/elandev.h>
7890 +#include <elan3/elanvp.h>
7891 +#include <elan3/elan3mmu.h>
7892 +#include <elan3/elanctxt.h>
7893 +#include <elan3/elandebug.h>
7894 +#include <elan3/urom_addrs.h>
7895 +#include <elan3/vmseg.h>
7896 +
7897 +void
7898 +HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Maskp)
7899 +{
7900 +    E3_FaultSave_BE     FaultSave;
7901 +    CProcTrapBuf_BE    TrapBuf;
7902 +    COMMAND_TRAP       *trap;
7903 +    ELAN3_CTXT        *ctxt;
7904 +    sdramaddr_t         CurrTrap;
7905 +    sdramaddr_t         LastTrapAddr;
7906 +    int                NTrapEntries;
7907 +    int                        NewPend;
7908 +    unsigned long       flags;
7909 +
7910 +    /* 
7911 +     * Temporarily mask out the command processor interrupt, since
7912 +     * we may cause it be re-asserted when we re-issue the commands
7913 +     * from the overflow queue area.
7914 +     */
7915 +    DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
7916 +
7917 +    NewPend = read_reg32 (dev, Exts.InterruptReg);
7918 +
7919 +    do {
7920 +       if (NewPend & INT_ComQueue)
7921 +       {
7922 +           if ((read_reg32 (dev, ComQueueStatus) & ComQueueError) != 0)
7923 +           {
7924 +               printk ("elan%d: InterruptReg=%x ComQueueStatus=%x\n", dev->Instance,
7925 +                       read_reg32 (dev, Exts.InterruptReg), read_reg32 (dev, ComQueueStatus));
7926 +               panic ("elan: command queue has overflowed !!");
7927 +               /* NOTREACHED */
7928 +           }
7929 +
7930 +           BumpStat (dev, ComQueueHalfFull);
7931 +
7932 +           /*
7933 +            * Capture the other cpus and stop the threads processor then
7934 +            * allow the command processor to eagerly flush the command queue.
7935 +            */
7936 +           dev->FlushCommandCount++; dev->HaltThreadCount++;
7937 +           SetSchedStatusRegister (dev, Pend, Maskp);
7938 +
7939 +           CAPTURE_CPUS();
7940 +
7941 +           while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
7942 +               mb();
7943 +           
7944 +           /*
7945 +            * Let the threads processor run again, and release the cross call.
7946 +            */
7947 +           RELEASE_CPUS();
7948 +
7949 +           dev->FlushCommandCount--; dev->HaltThreadCount--;
7950 +           SetSchedStatusRegister (dev, Pend, Maskp);
7951 +
7952 +           /*
7953 +            * Re-sample the interrupt register to see if the command processor
7954 +            * has trapped while flushing the queue.  Preserve the INT_ComQueue
7955 +            * bit, so we can clear the ComQueueStatus register later.
7956 +            */
7957 +           NewPend = (read_reg32 (dev, Exts.InterruptReg) | INT_ComQueue);
7958 +       }
7959 +       
7960 +       CurrTrap = dev->CommandPortTraps[dev->CurrentCommandPortTrap];
7961 +       
7962 +       if (NewPend & INT_CProc)
7963 +       {
7964 +           BumpStat (dev, CProcTraps);
7965 +
7966 +           /*
7967 +            * Copy the MMU Fault Save area and zero it out for future traps.
7968 +            */
7969 +           elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), &FaultSave, sizeof (E3_FaultSave));
7970 +           elan3_sdram_zeroq_sdram      (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), sizeof (E3_FaultSave));
7971 +
7972 +           /*
7973 +            * First entry in the cproc trap save area is the value of Areg and Breg for the
7974 +            * uWord before the address fault.
7975 +            */
7976 +           TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf.Align64);
7977 +
7978 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.r.Breg >> 16));
7979 +           if (ctxt == NULL)
7980 +           {
7981 +               PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context invalid [%08x.%08x]\n", TrapBuf.r.Areg, TrapBuf.r.Breg);
7982 +               BumpStat (dev, InvalidContext);
7983 +           }
7984 +           else
7985 +           {
7986 +               if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ))
7987 +               {
7988 +                   if ((ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR) == 0)
7989 +                   {
7990 +                       ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
7991 +                       StartSwapoutContext (ctxt, Pend, Maskp);
7992 +                   }
7993 +               }
7994 +               else
7995 +               {
7996 +                   trap = ELAN3_QUEUE_BACK (ctxt->CommandTrapQ, ctxt->CommandTraps);
7997 +                   
7998 +                   trap->FaultSave     = FaultSave;
7999 +                   trap->Status.Status = read_reg32 (dev, Exts.CProcStatus.Status);
8000 +                   trap->TrapBuf       = TrapBuf;
8001 +                   
8002 +                   /*
8003 +                    * The command processor does not stop after it has trapped. It will continue
8004 +                    * to save commands for other contexts into the commands port save area.
8005 +                    * The valid context for the trap is held in FaultSave. As some of this
8006 +                    * trap code uses the context in the status register the local copy must be
8007 +                    * updated with the trap context.
8008 +                    */
8009 +                   trap->Status.s.Context = (TrapBuf.r.Breg >> 16);
8010 +                   
8011 +                   PRINTF4 (ctxt, DBG_INTR, "HandleCProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
8012 +                            trap->Status.s.WakeupFunction, trap->Status.s.Context,
8013 +                            trap->Status.s.SuspendAddr, MiToName(trap->Status.s.TrapType));
8014 +                   PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: Areg=%08x Breg=%08x\n", 
8015 +                            trap->TrapBuf.r.Areg, trap->TrapBuf.r.Breg);
8016 +                   
8017 +                   if (ELAN3_OP_CPROC_TRAP (ctxt, trap) == OP_DEFER)
8018 +                   {
8019 +                       ELAN3_QUEUE_ADD (ctxt->CommandTrapQ);
8020 +                       
8021 +                       PRINTF1 (ctxt, DBG_INTR, "HandleCProcTrap: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag);
8022 +                       
8023 +                       ctxt->FlagPage->CommandFlag = 1;
8024 +                       
8025 +                       kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
8026 +                   }
8027 +               }
8028 +
8029 +               UnloadCommandPageMapping (ctxt);
8030 +           }
8031 +       }
8032 +       
8033 +       /*
8034 +        * Now change the CommandPortTrap queue.
8035 +        * Must stop the command processor, wait for it to stop, find the final
8036 +        * entry in the current cproc trap save area, reset the comm port
8037 +        * trap save address to the other queue, clear the command port interrupt and
8038 +        * set it running normally again, and then let it go again. This is not very
8039 +        * time critical but it would be a good idea to prevent a higher priority
8040 +        * interrupt from slowing down the process to prevent to fifos filling.
8041 +        */
8042 +       spin_lock_irqsave (&dev->CProcLock, flags);
8043 +
8044 +       SET_SCHED_STATUS (dev, CProcStop);
8045 +
8046 +       while ((read_reg32 (dev, Exts.SchCntReg) & CProcStopped) == 0)
8047 +       {
8048 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for command processor to stop\n");
8049 +           mb();
8050 +       }
8051 +       
8052 +       /*
8053 +        * Remember how many entries are in the saved command queue,  and 
8054 +        * re-initialise it, before restarting the command processor.
8055 +        */
8056 +       NTrapEntries = (read_reg32 (dev, CProc_TrapSave_Addr) - dev->CommandPortTraps[dev->CurrentCommandPortTrap])/sizeof (E3_uint64);
8057 +       LastTrapAddr = dev->CommandPortTraps[dev->CurrentCommandPortTrap] + NTrapEntries*sizeof (TrapBuf);
8058 +
8059 +       dev->CurrentCommandPortTrap ^= 1;
8060 +       write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]);
8061 +
8062 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: command trap queue has %d entries\n", NTrapEntries);
8063 +
8064 +       if (NTrapEntries > ELAN3_COMMAND_TRAP_SIZE/sizeof (E3_uint64))
8065 +           panic ("HandleCProcTrap: command trap queue has overflowed\n");
8066 +       
8067 +       if (NewPend & INT_CProc)
8068 +       {
8069 +           /*
8070 +            * Clear the CProc interrupt and set it running normally again. Nothing should
8071 +            * be running now that could issue commands apart from this trap handler.
8072 +            */
8073 +           PULSE_SCHED_STATUS (dev, RestartCProc);
8074 +       }
8075 +       
8076 +       if (NewPend & INT_ComQueue)
8077 +       {
8078 +           /*
8079 +            * Write any value here to clear out the half full and error bits of the command
8080 +            * overflow queues. This will also remove the overflow interrupt.
8081 +            */
8082 +           write_reg32 (dev, ComQueueStatus, 0);
8083 +       }
8084 +       
8085 +       /*
8086 +        * And let the command processor start again
8087 +        */
8088 +       CLEAR_SCHED_STATUS (dev, CProcStop);
8089 +       
8090 +       /*
8091 +        * Now re-issue all the commands that were issued after the command port trapped.
8092 +        * Should halt the dma processor and force command sto be put onto the run queues
8093 +        * to ensure that a remote re-issued command is handled correctly. NOTE it is
8094 +        * not necessary to wait for the dma processor to stop and this will reduce the
8095 +        * performance impact. As CProcHalt is asserted all commands will be flushed
8096 +        * to the queues.
8097 +        */
8098 +       dev->HaltDmaDequeueCount++; dev->FlushCommandCount++;
8099 +       SetSchedStatusRegister (dev, Pend, Maskp);
8100 +       
8101 +       /*
8102 +        * XXXX: should we do a capture/release if the trap overflow
8103 +        *       area has a "large" number of commands in it,  since
8104 +        *       we will just stuff them all back in, together with 
8105 +        *       all those issued by the other cpus/thread processors.
8106 +        */
8107 +       while (CurrTrap != LastTrapAddr)
8108 +       {
8109 +           /* Read the next saved (but not trapped) command */
8110 +           TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf);
8111 +           
8112 +
8113 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.s.ContextType >> 16));
8114 +           
8115 +           if (ctxt == NULL)
8116 +           {
8117 +               PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context %x invalid\n", TrapBuf.s.ContextType >> 16);
8118 +               BumpStat (dev, InvalidContext);
8119 +           }
8120 +           else
8121 +           {
8122 +               if (!ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
8123 +               {
8124 +                   PRINTF3 (ctxt, DBG_INTR, "HandleCProcTrap: save command %x context %x - %08x\n",
8125 +                            (TrapBuf.s.ContextType>>3) & 0x3ff, TrapBuf.s.ContextType >> 17, TrapBuf.s.Addr);
8126 +                   
8127 +                   if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandQ))
8128 +                   {
8129 +                       ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8130 +                       StartSwapoutContext (ctxt, Pend, Maskp);
8131 +                   }
8132 +                   else
8133 +                   {
8134 +                       *ELAN3_QUEUE_BACK(ctxt->CommandQ, ctxt->Commands) = TrapBuf;
8135 +
8136 +                       ELAN3_QUEUE_ADD (ctxt->CommandQ);
8137 +                   }
8138 +                   continue;
8139 +               }
8140 +               
8141 +               /* Reissue the command to the command port for this context */
8142 +               PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: re-issue command %x - %08x\n",
8143 +                        (TrapBuf.s.ContextType>>5) & 0xff, TrapBuf.s.Addr);
8144 +               
8145 +               mb();
8146 +               if (ELAN3_OP_CPROC_REISSUE(ctxt, &TrapBuf) != OP_HANDLED)
8147 +                   ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf.s.ContextType>>5) & 0xff] = TrapBuf.s.Addr;
8148 +               mmiob();
8149 +           }
8150 +       }
8151 +       
8152 +       while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
8153 +       {
8154 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for queues to empty after reissueing commands\n");
8155 +           mb();
8156 +       }
8157 +       
8158 +       dev->HaltDmaDequeueCount--; dev->FlushCommandCount--;
8159 +       SetSchedStatusRegister (dev, Pend, Maskp);
8160 +       
8161 +       spin_unlock_irqrestore (&dev->CProcLock, flags);
8162 +
8163 +       /*
8164 +        * Re-read the interrupt register and see if we've got another command
8165 +        * port interrupt
8166 +        */
8167 +       NewPend = read_reg32 (dev, Exts.InterruptReg);
8168 +    } while ((NewPend & (INT_CProc | INT_ComQueue)) != 0);
8169 +
8170 +
8171 +    /*
8172 +     * Re-enable the command processor interrupt as we've finished 
8173 +     * polling it.
8174 +     */
8175 +    ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
8176 +}
8177 +
8178 +void
8179 +ResolveCProcTrap (ELAN3_CTXT *ctxt)
8180 +{
8181 +    ELAN3_DEV     *dev = ctxt->Device;
8182 +    COMMAND_TRAP *trap;
8183 +    int                  res;
8184 +    unsigned long flags;
8185 +
8186 +    kmutex_lock (&ctxt->CmdLock);
8187 +    spin_lock_irqsave (&dev->IntrLock, flags);
8188 +
8189 +    while (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ))
8190 +    {
8191 +       trap = ELAN3_QUEUE_MIDDLE(ctxt->CommandTrapQ, ctxt->CommandTraps);
8192 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
8193 +
8194 +       switch (trap->Status.s.TrapType)
8195 +       {
8196 +       case MI_EventIntUpdateBPtr:
8197 +       case MI_ChainedEventError:
8198 +       case MI_EventQueueOverflow:
8199 +       case MI_ThreadQueueOverflow:
8200 +       case MI_DmaQueueOverflow:
8201 +           PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: %s\n", MiToName (trap->Status.s.TrapType));
8202 +           break;
8203 +           
8204 +       default:
8205 +           /* All other traps are MMU related, we should have a fault address and FSR */
8206 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
8207 +           {
8208 +               PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: elan3_pagefault failed for address %08x\n", 
8209 +                        trap->FaultSave.s.FaultAddress);
8210 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, COMMAND_PROC, trap, &trap->FaultSave, res);
8211 +               
8212 +               /* Set the trap type to 0 so the command does not get re-issued */
8213 +               trap->Status.s.TrapType = 0;
8214 +           }
8215 +           break;
8216 +       }
8217 +       
8218 +       spin_lock_irqsave (&dev->IntrLock, flags);
8219 +
8220 +       ELAN3_QUEUE_CONSUME (ctxt->CommandTrapQ);
8221 +    }
8222 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
8223 +    kmutex_unlock (&ctxt->CmdLock);
8224 +}
8225 +
8226 +int
8227 +RestartCProcTrap (ELAN3_CTXT *ctxt)
8228 +{
8229 +    ELAN3_DEV     *dev      = ctxt->Device;
8230 +    COMMAND_TRAP  trap;
8231 +    void        *item;
8232 +    int                  res;
8233 +    unsigned long flags;
8234 +
8235 +    spin_lock_irqsave (&dev->IntrLock, flags);
8236 +
8237 +    while (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ))
8238 +    {
8239 +       trap = (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps));
8240 +       ELAN3_QUEUE_REMOVE (ctxt->CommandTrapQ);
8241 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
8242 +       
8243 +       BumpUserStat (ctxt, CProcTraps);
8244 +
8245 +       switch (trap.Status.s.TrapType)
8246 +       {
8247 +       case 0:
8248 +           res = ISSUE_COMMAND_OK;
8249 +           break;
8250 +           
8251 +       case MI_WaitForWaitEventDesc:
8252 +           /*
8253 +            * Fault occured on the read of wait event descriptor for wait event type 0.
8254 +            * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr
8255 +            * is in the Areg save value.
8256 +            */
8257 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 desc read fault %08x\n", 
8258 +                    trap.TrapBuf.r.Areg);
8259 +           
8260 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
8261 +           break;
8262 +
8263 +       case MI_WaitForEventReadTy0:
8264 +           /*
8265 +            * Fault occured on the read of event location for wait event type 0.
8266 +            * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr
8267 +            * is in the Areg save value.
8268 +            */
8269 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 event loc fault %08x\n",
8270 +                    trap.TrapBuf.r.Areg);
8271 +           
8272 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
8273 +           break;
8274 +           
8275 +       case MI_WaitForEventReadTy1:
8276 +           /*
8277 +            * Fault occured on the read of the event location for wait event type 1.
8278 +            * Areg has the original ptr and count.
8279 +            * Fault already fixed. Just re-issue the wait command using Areg and context.
8280 +            */
8281 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type1 event location read fault %08x\n",
8282 +                    trap.TrapBuf.r.Areg);
8283 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent1), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
8284 +           break;
8285 +           
8286 +       case MI_WaitForCntxDmaDescRead:
8287 +       case MI_WaitForNonCntxDmaDescRead:
8288 +           /*
8289 +            * Fault occured on the read of the dma descriptor. Run dma using the
8290 +            * Fault Address in FaultSave.
8291 +            */
8292 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", 
8293 +                    trap.FaultSave.s.FaultAddress);
8294 +           
8295 +           res = IssueDmaCommand (ctxt, trap.FaultSave.s.FaultAddress, NULL, ISSUE_COMMAND_FOR_CPROC);
8296 +           break;
8297 +           
8298 +       default:
8299 +           /*
8300 +            * Assume the fault will be fixed by FixupEventTrap.
8301 +            */
8302 +           FixupEventTrap (ctxt, COMMAND_PROC, &trap, trap.Status.s.TrapType, &trap.FaultSave, ISSUE_COMMAND_FOR_CPROC);
8303 +
8304 +           res = ISSUE_COMMAND_OK;
8305 +           break;
8306 +       }
8307 +
8308 +       switch (res)
8309 +       {
8310 +       case ISSUE_COMMAND_OK:                                  /* command re-issued ok*/
8311 +           break;
8312 +
8313 +       case ISSUE_COMMAND_TRAPPED:                             /* command trapped,  it will have been copied */
8314 +           return (EAGAIN);                                    /* to the back of the trap queue */
8315 +
8316 +       case ISSUE_COMMAND_RETRY:                               /* didn't issue command, so place back at front for */
8317 +           spin_lock_irqsave (&dev->IntrLock, flags);          /* later (after resolving other traps */
8318 +
8319 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ))
8320 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8321 +           else
8322 +           {
8323 +               ELAN3_QUEUE_ADD_FRONT(ctxt->CommandTrapQ);
8324 +               (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps)) = trap;
8325 +           }
8326 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
8327 +           return (EAGAIN);
8328 +
8329 +       default:
8330 +           return (EINVAL);
8331 +       }
8332 +       spin_lock_irqsave (&dev->IntrLock, flags);
8333 +    }  
8334 +
8335 +    /*
8336 +     * GNAT 5409 - if CommandPortItem was not NULL, but other reasons were set,
8337 +     *             then we'd not free the CommandPortItem even though we'd re-
8338 +     *             issued all trapped and overflowed commands.  Hence only return
8339 +     *             without clearing CommandPortItem if we will be called again as
8340 +     *             either CommandTrapQ or CommandQ is not empty.
8341 +     */
8342 +
8343 +    /* Now run the overflowed commands for this context */
8344 +    if (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ))
8345 +    {
8346 +       if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
8347 +       {
8348 +           PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: cannot issue overflowed commands\n");
8349 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
8350 +           return (EAGAIN);
8351 +       }
8352 +
8353 +       /*
8354 +        * Just re-issue the commands,  if one traps then the remainder will 
8355 +        * just get placed in the overflow queue again and the interrupt handler
8356 +        * will copy them back in here.
8357 +        *
8358 +        * Stop the dma processor from taking commands,  since one of the commands
8359 +        * could be a re-issued remote dma, which must be processed by the command
8360 +        * processor.
8361 +        */
8362 +       
8363 +       if (dev->HaltDmaDequeueCount++ == 0)
8364 +           SetSchedStatusRegister (dev, 0, NULL);
8365 +       
8366 +       while (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ))
8367 +       {
8368 +           CProcTrapBuf_BE *TrapBuf = ELAN3_QUEUE_FRONT (ctxt->CommandQ, ctxt->Commands);
8369 +           
8370 +           PRINTF2 (ctxt, DBG_CPROC, "RestartCProcTrap: re-issue command %x - %08x\n",
8371 +                    (TrapBuf->s.ContextType>>5) & 0xff, TrapBuf->s.Addr);
8372 +           mb();                                                       /* ensure writes to main memory completed */
8373 +           ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf->s.ContextType>>5) & 0xff] = TrapBuf->s.Addr;
8374 +           mmiob();                                            /* and flush through IO writes */
8375 +           
8376 +           ELAN3_QUEUE_REMOVE (ctxt->CommandQ);
8377 +       }
8378 +       
8379 +       /* observe the command processor having halted */
8380 +       res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, 0, &flags);
8381 +       
8382 +       if (res != ISSUE_COMMAND_OK)
8383 +       {
8384 +           PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: trapped after issueing overflowed commands\n");
8385 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
8386 +           return (EAGAIN);
8387 +       }
8388 +    }
8389 +
8390 +    /* remove the command port item, while holding the lock */
8391 +    item = ctxt->CommandPortItem;
8392 +    ctxt->CommandPortItem = NULL;
8393 +
8394 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
8395 +       
8396 +    if (item != NULL)                                          /* Free of any item that may have been stored */
8397 +    {                                                          /* because of the commandport trap */
8398 +       PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: commandPortItem %p\n", item);
8399 +
8400 +       kmutex_lock (&ctxt->SwapListsLock);
8401 +       ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item);
8402 +       kmutex_unlock (&ctxt->SwapListsLock);
8403 +    }
8404 +
8405 +    return (ESUCCESS);
8406 +}
8407 +
8408 +
8409 +/*
8410 + * Local variables:
8411 + * c-file-style: "stroustrup"
8412 + * End:
8413 + */
8414 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/dproc.c
8415 ===================================================================
8416 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/dproc.c      2004-02-23 16:02:56.000000000 -0500
8417 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/dproc.c   2005-07-28 14:52:52.789687704 -0400
8418 @@ -0,0 +1,553 @@
8419 +/*
8420 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
8421 + * 
8422 + *    For licensing information please see the supplied COPYING file
8423 + *
8424 + */
8425 +
8426 +#ident "@(#)$Id: dproc.c,v 1.52 2003/09/24 13:57:25 david Exp $"
8427 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/dproc.c,v $ */
8428 +
8429 +#include <qsnet/kernel.h>
8430 +
8431 +#include <elan3/elanregs.h>
8432 +#include <elan3/elandev.h>
8433 +#include <elan3/elanvp.h>
8434 +#include <elan3/elan3mmu.h>
8435 +#include <elan3/elanctxt.h>
8436 +#include <elan3/elandebug.h>
8437 +#include <elan3/urom_addrs.h>
8438 +#include <elan3/intrinsics.h>
8439 +#include <elan3/dma.h>
8440 +#include <elan3/vmseg.h>
8441 +
8442 +#define DMA_RETRY_FAIL_COUNT   8
8443 +
8444 +static void PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr);
8445 +
8446 +int
8447 +HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits)
8448 +{
8449 +    DMA_TRAP    *trap   = dev->DmaTrap;
8450 +
8451 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
8452 +
8453 +    /* Scoop out the trap information, before restarting the Elan */
8454 +    trap->Status.Status = read_reg32 (dev, Exts.DProcStatus.Status);
8455 +    
8456 +    ASSERT(trap->Status.s.WakeupFunction == WakeupNever);
8457 +
8458 +    /* copy the normal dma access fault type */
8459 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), &trap->FaultSave, sizeof (E3_FaultSave_BE));
8460 +    
8461 +    /* copy all 4 of the dma data fault type */
8462 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), &trap->Data0, 4*sizeof (E3_FaultSave_BE));
8463 +    
8464 +    /* Copy the DMA descriptor */
8465 +    copy_dma_regs (dev, &trap->Desc);
8466 +    
8467 +    /* Copy the packet info */
8468 +    trap->PacketInfo.Value = read_reg32 (dev, Exts.Dmas.DmaRds.DMA_PacketInfo.Value);
8469 +
8470 +    /* update device statistics */
8471 +    BumpStat (dev, DProcTraps);
8472 +    switch (trap->Status.s.TrapType)
8473 +    {
8474 +    case MI_DmaPacketTimedOutOrPacketError:
8475 +       if (trap->PacketInfo.s.PacketTimeout)
8476 +           BumpStat (dev, DmaOutputTimeouts);
8477 +       else if (trap->PacketInfo.s.PacketAckValue == C_ACK_ERROR)
8478 +           BumpStat (dev, DmaPacketAckErrors);
8479 +       break;
8480 +       
8481 +    case MI_DmaFailCountError:
8482 +       BumpStat (dev, DmaRetries);
8483 +       break;
8484 +    }
8485 +
8486 +    /* Must now zero all the FSRs so that a subsequent fault can be seen */
8487 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), sizeof (E3_FaultSave));
8488 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 4*sizeof (E3_FaultSave));
8489 +           
8490 +    *RestartBits |= RestartDProc;
8491 +    return (TRUE);
8492 +}
8493 +
8494 +void
8495 +DeliverDProcTrap (ELAN3_DEV *dev, DMA_TRAP *dmaTrap, E3_uint32 Pend)
8496 +{
8497 +    ELAN3_CTXT     *ctxt;
8498 +    E3_FaultSave_BE *FaultArea;
8499 +    DMA_TRAP       *trap;
8500 +    register int     i;
8501 +
8502 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
8503 +
8504 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, dmaTrap->Status.s.Context);
8505 +
8506 +    if (ctxt == NULL)
8507 +    {
8508 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverDProcTrap: context %x invalid\n", dmaTrap->Status.s.Context);
8509 +       BumpStat (dev, InvalidContext);
8510 +    }
8511 +    else
8512 +    {
8513 +       if (ELAN3_OP_DPROC_TRAP (ctxt, dmaTrap) == OP_DEFER)
8514 +       {
8515 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->DmaTrapQ))
8516 +           {
8517 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8518 +               StartSwapoutContext (ctxt, Pend, NULL);
8519 +           }
8520 +           else
8521 +           {
8522 +               trap = ELAN3_QUEUE_BACK (ctxt->DmaTrapQ, ctxt->DmaTraps);
8523 +               
8524 +               bcopy (dmaTrap, trap, sizeof (DMA_TRAP));
8525 +               
8526 +               PRINTF5 (ctxt, DBG_INTR, "DeliverDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x PacketInfo=%x TrapType=%s\n",
8527 +                        trap->Status.s.WakeupFunction, trap->Status.s.Context, 
8528 +                        trap->Status.s.SuspendAddr, trap->PacketInfo.Value, MiToName (trap->Status.s.TrapType));
8529 +               PRINTF3 (ctxt, DBG_INTR, "                    FaultAddr=%x EventAddr=%x FSR=%x\n",
8530 +                        trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
8531 +                        trap->FaultSave.s.FSR.Status);
8532 +               for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8533 +                   PRINTF4 (ctxt, DBG_INTR, "                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
8534 +                            FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
8535 +               
8536 +               PRINTF4 (ctxt, DBG_INTR, "                 type %08x size %08x source %08x dest %08x\n",
8537 +                        trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
8538 +               PRINTF2 (ctxt, DBG_INTR, "                 Dest event %08x cookie/proc %08x\n",
8539 +                        trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
8540 +               PRINTF2 (ctxt, DBG_INTR, "                 Source event %08x cookie/proc %08x\n",
8541 +                        trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
8542 +               ELAN3_QUEUE_ADD (ctxt->DmaTrapQ);
8543 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
8544 +               
8545 +               if (ELAN3_QUEUE_FULL (ctxt->DmaTrapQ))
8546 +               {
8547 +                   PRINTF0 (ctxt, DBG_INTR, "DeliverDProcTrap: dma queue full, must swap out\n");
8548 +                   ctxt->Status |= CTXT_DMA_QUEUE_FULL;
8549 +                   
8550 +                   StartSwapoutContext (ctxt, Pend, NULL);
8551 +               }
8552 +           }
8553 +       }
8554 +    }
8555 +}
8556 +
8557 +int
8558 +NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8559 +{
8560 +    ELAN3_DEV *dev = ctxt->Device;
8561 +
8562 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
8563 +    
8564 +    if (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ))
8565 +       return (0);
8566 +
8567 +    *trap = *ELAN3_QUEUE_FRONT (ctxt->DmaTrapQ, ctxt->DmaTraps);
8568 +    ELAN3_QUEUE_REMOVE (ctxt->DmaTrapQ);
8569 +    
8570 +    return (1);
8571 +}
8572 +
8573 +void
8574 +ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8575 +{
8576 +    E3_FaultSave_BE *FaultArea;
8577 +    int                     FaultHandled = 0;
8578 +    int                     res;
8579 +    register int     i;
8580 +    
8581 +    PRINTF4 (ctxt, DBG_DPROC, "ResolveDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
8582 +            trap->Status.s.WakeupFunction, trap->Status.s.Context, 
8583 +            trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
8584 +    PRINTF3 (ctxt, DBG_DPROC, "                    FaultAddr=%x EventAddr=%x FSR=%x\n",
8585 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
8586 +            trap->FaultSave.s.FSR.Status);
8587 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8588 +       PRINTF4 (ctxt, DBG_DPROC, "                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
8589 +                FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
8590 +
8591 +    PRINTF4 (ctxt, DBG_DPROC, "                  type %08x size %08x source %08x dest %08x\n",
8592 +            trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
8593 +    PRINTF2 (ctxt, DBG_DPROC, "                  Dest event %08x cookie/proc %08x\n",
8594 +            trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
8595 +    PRINTF2 (ctxt, DBG_DPROC, "                  Source event %08x cookie/proc %08x\n",
8596 +            trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
8597 +    
8598 +    BumpUserStat (ctxt, DProcTraps);
8599 +
8600 +    switch (trap->Status.s.TrapType)
8601 +    {
8602 +    case MI_DmaPacketTimedOutOrPacketError:
8603 +       /*
8604 +        * Faulted due to packet timeout or a PAckError.
8605 +        * Reset fail count and reissue the same desc.
8606 +        */
8607 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: got a PAckError or the output timed out. Rescheduling dma.\n");
8608 +       if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, DMA_PROC, trap) == OP_IGNORE)
8609 +       {
8610 +           BumpUserStat (ctxt, DmaRetries);
8611 +
8612 +           trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT;
8613 +
8614 +           RestartDmaTrap (ctxt, trap);
8615 +       }
8616 +       return;
8617 +
8618 +    case MI_DmaFailCountError:
8619 +       /*
8620 +        * Faulted due to dma fail count.
8621 +        * Reset fail count and reissue the same desc.
8622 +        */
8623 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: Reset dma fail count to %d\n", DMA_RETRY_FAIL_COUNT);
8624 +       
8625 +       if (ElanException (ctxt, EXCEPTION_DMA_RETRY_FAIL, DMA_PROC, trap) == OP_IGNORE)
8626 +       {
8627 +           BumpUserStat (ctxt, DmaRetries);
8628 +
8629 +           trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT;
8630 +
8631 +           RestartDmaTrap (ctxt, trap);
8632 +       }
8633 +       return;
8634 +
8635 +    case MI_TimesliceDmaQueueOverflow:
8636 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: dma timeslice queue overflow\n");
8637 +       RestartDmaTrap (ctxt, trap);
8638 +       return;
8639 +       
8640 +    case MI_UnimplementedError:
8641 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: unimplemented dma trap\n");
8642 +       if (ElanException (ctxt, EXCEPTION_UNIMPLEMENTED, DMA_PROC, trap) == OP_IGNORE)
8643 +           RestartDmaTrap (ctxt, trap);
8644 +       return;
8645 +
8646 +    case MI_EventQueueOverflow:
8647 +    case MI_ThreadQueueOverflow:
8648 +    case MI_DmaQueueOverflow:
8649 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n");
8650 +       FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0);
8651 +       return;
8652 +
8653 +    case MI_RemoteDmaCommand:
8654 +    case MI_RunDmaCommand:
8655 +    case MI_DequeueNonSysCntxDma:
8656 +    case MI_DequeueSysCntxDma:
8657 +       /*
8658 +        * The DMA processor has trapped due to outstanding prefetches from the previous 
8659 +        * dma.  The "current" dma has not been consumed, so we just ignore the trap
8660 +        */
8661 +       return;
8662 +
8663 +    case MI_WaitForRemoteDescRead2:
8664 +    case MI_ExecuteDmaDescriptorForRun:
8665 +       /*
8666 +        * The DMA processor has trapped while fetching the dma descriptor, so
8667 +        * zero it out to not confuse the user on an error
8668 +        */
8669 +       bzero (&trap->Desc, sizeof (trap->Desc));
8670 +       break;
8671 +    }
8672 +
8673 +    /*
8674 +     * All other uWords will have updated one of the fault areas,  so fix
8675 +     * any faults found in them.  If there were no faults found then it 
8676 +     * must have been a bus error
8677 +     */
8678 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8679 +    {
8680 +       if (FaultArea->s.FSR.Status != 0)
8681 +       {
8682 +           FaultHandled++;
8683 +
8684 +           ASSERT ((FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block64 ||
8685 +                   (FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block32);
8686 +           
8687 +           ASSERT (FaultArea->s.FaultContext == trap->Status.s.Context);
8688 +           
8689 +           if (((trap->Desc.s.dma_source & PAGEOFFSET) >= (PAGESIZE-E3_BLK_SIZE)) &&
8690 +               ((trap->Desc.s.dma_source & PAGEMASK) != ((trap->Desc.s.dma_source + trap->Desc.s.dma_size-1) & PAGEMASK)))
8691 +           {
8692 +               /* XXXX: dma started within last 64 bytes of the page
8693 +                *       terminate the process if it has pagefaulted */
8694 +               if (FaultArea->s.FaultAddress == (trap->Desc.s.dma_source & ~(E3_BLK_SIZE-1)))
8695 +               {
8696 +                   printk ("elan%d: invalid dma - context=%x source=%x\n", ctxt->Device->Instance, 
8697 +                           ctxt->Capability.cap_mycontext, trap->Desc.s.dma_source);
8698 +                   
8699 +                   if (ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0) != OP_IGNORE)
8700 +                       return;
8701 +               }
8702 +           }
8703 +
8704 +           if (trap->Desc.s.dma_size != 0 && (res = elan3_pagefault (ctxt, FaultArea, 1)) != ESUCCESS)
8705 +           {
8706 +               /* XXXX: Rev B Elans can prefetch data passed the end of the dma descriptor */
8707 +               /*       if the fault relates to this, then just ignore it */
8708 +               if (FaultArea->s.FaultAddress < (trap->Desc.s.dma_source+trap->Desc.s.dma_size) ||
8709 +                   FaultArea->s.FaultAddress > (trap->Desc.s.dma_source+trap->Desc.s.dma_size+E3_BLK_SIZE*2))
8710 +               {
8711 +                   PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n",
8712 +                            FaultArea->s.FaultAddress);
8713 +                   
8714 +                   if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, FaultArea, res) != OP_IGNORE)
8715 +                       return;
8716 +               }
8717 +           }
8718 +       }
8719 +    }
8720 +    
8721 +    if (trap->FaultSave.s.FSR.Status != 0)
8722 +    {
8723 +       FaultHandled++;
8724 +
8725 +       ASSERT (trap->FaultSave.s.FaultContext == trap->Status.s.Context);
8726 +
8727 +       if ((trap->FaultSave.s.FSR.Status & FSR_SizeMask) == FSR_RouteFetch)
8728 +       {
8729 +           res = ResolveVirtualProcess (ctxt, trap->FaultSave.s.FaultAddress & 0xffff); /* mask out cookie */
8730 +
8731 +           switch (res)
8732 +           {
8733 +           default:
8734 +               if (ElanException (ctxt, EXCEPTION_INVALID_PROCESS, DMA_PROC, trap, trap->FaultSave.s.FaultAddress, res) != OP_IGNORE)
8735 +                   return;
8736 +               
8737 +           case EAGAIN:
8738 +               /* XXXX; wait on trail blazing code */
8739 +
8740 +           case 0:
8741 +               break;
8742 +           }
8743 +       }
8744 +       else
8745 +       {
8746 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
8747 +           {
8748 +               PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n",
8749 +                        trap->FaultSave.s.FaultAddress);
8750 +
8751 +               if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, res) != OP_IGNORE)
8752 +                   return;
8753 +           }
8754 +       }
8755 +    }
8756 +
8757 +    if (! FaultHandled)
8758 +    {
8759 +       ElanBusError (ctxt->Device);
8760 +
8761 +       if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, EFAULT) != OP_IGNORE)
8762 +           return;
8763 +    }
8764 +
8765 +    switch (trap->Status.s.TrapType)
8766 +    {
8767 +    case MI_WaitForRemoteDescRead2:
8768 +       /*
8769 +        * Faulted while trying to read the dma descriptor for a read dma.
8770 +        * Fix fault and re-issue using FaultAddress.
8771 +        */
8772 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a remote dma descriptor at %x.\n",
8773 +                trap->FaultSave.s.FaultAddress);
8774 +       
8775 +       RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress);
8776 +       break;
8777 +       
8778 +    case MI_ExecuteDmaDescriptorForRun:
8779 +       /*
8780 +        * Faulted while trying to read the dma descriptor for a write dma.
8781 +        * Fix fault and re-issue using FaultAddress.
8782 +        */
8783 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a write dma descriptor at %x.\n", 
8784 +                trap->FaultSave.s.FaultAddress);
8785 +       
8786 +       RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress);
8787 +       break;
8788 +       
8789 +    case MI_WaitForRemoteRoutes1:
8790 +    case MI_WaitForRemoteRoutes2:
8791 +    case MI_SendRemoteDmaDesc:
8792 +    case MI_SendDmaIdentify:
8793 +    case MI_SendRemoteDmaRoutes2:
8794 +    case MI_WaitForDmaRoutes1:
8795 +    case MI_DmaLoop:
8796 +    case MI_ExitDmaLoop:
8797 +    case MI_GetDestEventValue:
8798 +    case MI_SendFinalUnlockTrans:
8799 +    case MI_SendNullSetEvent:
8800 +    case MI_SendFinalSetEvent:
8801 +    case MI_SendDmaEOP:
8802 +       /*
8803 +        * Faulted either fetching routes or fetching dma data.
8804 +        * Fix fault and re-issue using FaultAddress.
8805 +        */
8806 +
8807 +    case MI_SendEOPforRemoteDma:
8808 +    case MI_LookAtRemoteAck:
8809 +    case MI_FailedAckIfCCis0:
8810 +       /*
8811 +        * Possible fault when reading the remote desc into the dma data buffers
8812 +        */
8813 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap:  trapped reading a dma data or fetching a route\n");
8814 +       RestartDmaTrap (ctxt, trap);
8815 +       break;
8816 +       
8817 +    case MI_DequeueSysCntxDma:
8818 +    case MI_DequeueNonSysCntxDma:
8819 +    case MI_RemoteDmaCommand:
8820 +    case MI_RunDmaCommand:
8821 +       /*
8822 +        * It is possible that a dma can get back onto the queue while outstanding dma
8823 +        * have not finished trapping. In this case the trap can be ignored as the dma
8824 +        * state has been saved. It might trap again the next time it comes to the front
8825 +        * of the queue and be fixed then.
8826 +        */
8827 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trap after dma has finished. ignored\n");
8828 +       break;
8829 +       
8830 +    default:
8831 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n");
8832 +       FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0);
8833 +       break;
8834 +    }
8835 +}
8836 +
8837 +int
8838 +DProcNeedsRestart (ELAN3_CTXT *ctxt)
8839 +{
8840 +    return (ctxt->ItemCount[LIST_DMA_PTR] != 0 ||
8841 +           ctxt->ItemCount[LIST_DMA_DESC] != 0);
8842 +}
8843 +
8844 +void
8845 +RestartDProcItems (ELAN3_CTXT *ctxt)
8846 +{
8847 +    void      *item;
8848 +    E3_Addr    value;
8849 +    int               res;
8850 +    
8851 +    kmutex_lock (&ctxt->SwapListsLock);
8852 +    while (ctxt->ItemCount[LIST_DMA_PTR])
8853 +    {
8854 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_DMA_PTR, &item, &value))
8855 +           ctxt->ItemCount[LIST_DMA_PTR] = 0;
8856 +       else
8857 +       {
8858 +           PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue write dma at %x\n", value);
8859 +           PrintUserDma (ctxt, value);
8860 +
8861 +           res = IssueDmaCommand (ctxt, value, NULL, 0);
8862 +           
8863 +           if (res == ISSUE_COMMAND_RETRY)
8864 +           {
8865 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_PTR, item);
8866 +               kmutex_unlock (&ctxt->SwapListsLock);
8867 +               return;
8868 +           }
8869 +           
8870 +           ctxt->ItemCount[LIST_DMA_PTR]--;
8871 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
8872 +       }
8873 +    }
8874 +    
8875 +    while (ctxt->ItemCount[LIST_DMA_DESC])
8876 +    {
8877 +       if (! ELAN3_OP_GET_BLOCK_ITEM (ctxt, LIST_DMA_DESC, &item, &value))
8878 +           ctxt->ItemCount[LIST_DMA_DESC] = 0;
8879 +       else
8880 +       {
8881 +           PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue dma desc at %x\n", value);
8882 +           PrintUserDma (ctxt, value);
8883 +
8884 +           res = IssueDmaCommand (ctxt, value, item, 0);
8885 +
8886 +           switch (res)
8887 +           {
8888 +           case ISSUE_COMMAND_OK:
8889 +               ctxt->ItemCount[LIST_DMA_DESC]--;
8890 +               ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item);
8891 +               break;
8892 +               
8893 +           case ISSUE_COMMAND_RETRY:
8894 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_DESC, item);
8895 +               kmutex_unlock (&ctxt->SwapListsLock);
8896 +               return;
8897 +               
8898 +           case ISSUE_COMMAND_TRAPPED:
8899 +               ctxt->ItemCount[LIST_DMA_DESC]--;
8900 +               /* The item will be freed off when the command port trap */
8901 +               /* fixed up and the command successfully re-issued */
8902 +               break;
8903 +           }
8904 +       }
8905 +    }
8906 +
8907 +    kmutex_unlock (&ctxt->SwapListsLock);
8908 +}
8909 +
8910 +void
8911 +RestartDmaDesc(ELAN3_CTXT *ctxt, E3_DMA_BE *desc)
8912 +{
8913 +    kmutex_lock (&ctxt->SwapListsLock);
8914 +    if (desc->s.dma_direction != DMA_WRITE)
8915 +       desc->s.dma_direction = (desc->s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
8916 +
8917 +    ELAN3_OP_PUT_BLOCK_ITEM (ctxt, LIST_DMA_DESC, (E3_uint32 *) desc);
8918 +    ctxt->ItemCount[LIST_DMA_DESC]++;
8919 +
8920 +    kmutex_unlock (&ctxt->SwapListsLock);
8921 +}
8922 +
8923 +void
8924 +RestartDmaTrap(ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8925 +{
8926 +    /* Negative length DMAs are illegal, since they hangup the dma processor,
8927 +     * if they got generated then they will have been spotted by PollForDmahungup,
8928 +     * and delivered to us with a Dequeue  suspend address,
8929 +     *
8930 +     * GNAT sw-elan3/3908: Moved this check into this new function to avoid
8931 +     * it sampling old or invalid register state
8932 +     */
8933 +    if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
8934 +       ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0);
8935 +    else
8936 +       RestartDmaDesc (ctxt, &trap->Desc);
8937 +}
8938 +
8939 +void
8940 +RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr)
8941 +{
8942 +    kmutex_lock (&ctxt->SwapListsLock);
8943 +    ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_DMA_PTR, ptr);
8944 +    ctxt->ItemCount[LIST_DMA_PTR]++;
8945 +    kmutex_unlock (&ctxt->SwapListsLock);
8946 +}
8947 +
8948 +static void
8949 +PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr)
8950 +{
8951 +    E3_DMA *dma;
8952 +
8953 +    /* Dont call a function which takes locks unless we need to */
8954 +    if (!(elan3_debug & DBG_DPROC))
8955 +        return;
8956 +
8957 +    dma = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
8958 +
8959 +    PRINTF4 (ctxt, DBG_DPROC, "DMA: type %08x size %08x source %08x dest %08x\n",
8960 +            fuword ((int *) &dma->dma_type), fuword ((int *) &dma->dma_size), 
8961 +            fuword ((int *) &dma->dma_source), fuword ((int *) &dma->dma_dest));
8962 +    PRINTF4 (ctxt, DBG_DPROC, "DMA: Dest %08x %08x  Local %08x %08x\n",
8963 +            fuword ((int *) &dma->dma_destEvent), fuword ((int *) &dma->dma_destCookieProc), 
8964 +            fuword ((int *) &dma->dma_srcEvent), fuword ((int *) &dma->dma_srcCookieProc));
8965 +}
8966 +
8967 +/*
8968 + * Local variables:
8969 + * c-file-style: "stroustrup"
8970 + * End:
8971 + */
8972 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elan3mmu_generic.c
8973 ===================================================================
8974 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elan3mmu_generic.c   2004-02-23 16:02:56.000000000 -0500
8975 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elan3mmu_generic.c        2005-07-28 14:52:52.795686792 -0400
8976 @@ -0,0 +1,3255 @@
8977 +/*
8978 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
8979 + *
8980 + *    For licensing information please see the supplied COPYING file
8981 + *
8982 + */
8983 +
8984 +#ident "@(#)$Id: elan3mmu_generic.c,v 1.75.2.1 2004/12/14 10:19:51 mike Exp $"
8985 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_generic.c,v $*/
8986 +
8987 +#include <qsnet/kernel.h>
8988 +
8989 +#include <elan3/elanregs.h>
8990 +#include <elan3/elandev.h>
8991 +#include <elan3/elanvp.h>
8992 +#include <elan3/elan3mmu.h>
8993 +#include <elan3/elanctxt.h>
8994 +#include <elan3/elandebug.h>
8995 +#include <elan3/urom_addrs.h>
8996 +#include <elan3/thread.h>
8997 +
8998 +#ifdef CONFIG_MPSAS
8999 +#  define zero_all_ptbls
9000 +#endif
9001 +
9002 +/*
9003 + * Debugging
9004 + */
9005 +int    elan3mmu_debug = 0;
9006 +
9007 +#define        N_L3PTBL_MTX    (0x20)
9008 +#define        N_L2PTBL_MTX    (0x40)
9009 +#define        N_L1PTBL_MTX    (0x20)
9010 +
9011 +#define        L3PTBL_MTX_HASH(p) \
9012 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L3PTBL_MTX - 1))
9013 +static spinlock_t l3ptbl_lock[N_L3PTBL_MTX];
9014 +
9015 +#define        L2PTBL_MTX_HASH(p)   \
9016 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L2PTBL_MTX - 1))
9017 +static spinlock_t l2ptbl_lock[N_L2PTBL_MTX];
9018 +
9019 +#define        L1PTBL_MTX_HASH(p)   \
9020 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L1PTBL_MTX - 1))
9021 +static spinlock_t l1ptbl_lock[N_L1PTBL_MTX];
9022 +
9023 +
9024 +#define        BASE2VA(p)      ((E3_Addr)((p)->ptbl_base << 16))
9025 +#define        VA2BASE(v)      ((u_short)(((uintptr_t)(v)) >> 16))
9026 +
9027 +ELAN3MMU_GLOBAL_STATS  elan3mmu_global_stats;
9028 +
9029 +static void          elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *);
9030 +static void          elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags);
9031 +
9032 +static ELAN3_PTBL    *elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep);
9033 +static ELAN3_PTBL    *elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp);
9034 +
9035 +static ELAN3_PTBL    *elan3mmu_alloc_pte    (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx);
9036 +void                 elan3mmu_free_lXptbl  (ELAN3_DEV *dev, ELAN3_PTBL *ptbl);
9037 +
9038 +void                 elan3mmu_free_pte  (ELAN3_DEV *dev,  ELAN3MMU *elan3mmu,  ELAN3_PTBL *ptbl_ptr, int idx);
9039 +
9040 +static ELAN3_PTBL    *elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu);
9041 +static ELAN3_PTBL    *elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu,
9042 +                                           E3_Addr base, spinlock_t **plock, unsigned long *flags);
9043 +static ELAN3_PTBL    *elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu,
9044 +                                           E3_Addr base, spinlock_t **plock, unsigned long *flags);
9045 +
9046 +static int          elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl);
9047 +static ELAN3_PTBL    *elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr);
9048 +
9049 +static spinlock_t   *elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl);
9050 +
9051 +/*
9052 + * Encoding of MMU permissions against access type,
9053 + * to allow quick permission checking against access 
9054 + * type.
9055 + */
9056 +u_char elan3mmu_permissionTable[] =
9057 +{
9058 +    0xcc,      /* 11001100 ELAN3_PERM_NULL        */
9059 +    0x01,      /* 00000001 ELAN3_PERM_LOCALREAD   */
9060 +    0x05,      /* 00000101 ELAN3_PERM_READ        */
9061 +    0x33,      /* 00110011 ELAN3_PERM_NOREMOTE    */
9062 +    0x37,      /* 00110111 ELAN3_PERM_REMOTEREAD  */
9063 +    0x3f,      /* 00111111 ELAN3_PERM_REMOTEWRITE */
9064 +    0xf7,      /* 11110111 ELAN3_PERM_REMOTEEVENT */
9065 +    0xff,      /* 11111111 ELAN3_PERM_REMOTEALL          */
9066 +} ;
9067 +
9068 +void
9069 +elan3mmu_init()
9070 +{
9071 +    register int i;
9072 +
9073 +    HAT_PRINTF0 (1, "elan3mmu_init: initialising elan mmu\n");
9074 +
9075 +    for (i = 0; i < N_L1PTBL_MTX; i++)
9076 +       spin_lock_init (&l1ptbl_lock[i]);
9077 +
9078 +    for (i = 0; i < N_L2PTBL_MTX; i++)
9079 +       spin_lock_init (&l2ptbl_lock[i]);
9080 +
9081 +    for (i = 0; i < N_L3PTBL_MTX; i++)
9082 +       spin_lock_init (&l3ptbl_lock[i]);
9083 +
9084 +    elan3mmu_global_stats.version = ELAN3MMU_STATS_VERSION;
9085 +
9086 +    elan3mmu_init_osdep();
9087 +}
9088 +
9089 +void
9090 +elan3mmu_fini()
9091 +{
9092 +    register int i;
9093 +
9094 +    HAT_PRINTF0 (1, "elan3mmu_fini: finalising elan mmu\n");
9095 +
9096 +    for (i = 0; i < N_L1PTBL_MTX; i++)
9097 +       spin_lock_destroy (&l1ptbl_lock[i]);
9098 +
9099 +    for (i = 0; i < N_L2PTBL_MTX; i++)
9100 +       spin_lock_destroy (&l2ptbl_lock[i]);
9101 +
9102 +    for (i = 0; i < N_L3PTBL_MTX; i++)
9103 +       spin_lock_destroy (&l3ptbl_lock[i]);
9104 +
9105 +    elan3mmu_fini_osdep();
9106 +}
9107 +
9108 +ELAN3MMU *
9109 +elan3mmu_alloc (ELAN3_CTXT *ctxt)
9110 +{
9111 +    ELAN3MMU  *elan3mmu;
9112 +    ELAN3_PTBL *l1ptbl;
9113 +
9114 +    ALLOC_ELAN3MMU (elan3mmu, TRUE);
9115 +    
9116 +    spin_lock_init (&elan3mmu->elan3mmu_lock);
9117 +
9118 +    spin_lock (&elan3mmu->elan3mmu_lock);                      /* lock_lint */
9119 +
9120 +    elan3mmu->elan3mmu_ergns    = NULL;
9121 +    elan3mmu->elan3mmu_etail    = NULL;
9122 +    elan3mmu->elan3mmu_ergnlast = NULL;
9123 +    elan3mmu->elan3mmu_mrgns    = NULL;
9124 +    elan3mmu->elan3mmu_mtail    = NULL;
9125 +    elan3mmu->elan3mmu_mrgnlast = NULL;
9126 +    elan3mmu->elan3mmu_ctxt     = ctxt;
9127 +
9128 +    spin_lock_init (&elan3mmu->elan3mmu_lXptbl_lock);
9129 +    elan3mmu->elan3mmu_lXptbl   = NULL;
9130 +
9131 +    spin_unlock (&elan3mmu->elan3mmu_lock);                    /* lock_lint */
9132 +
9133 +    l1ptbl = elan3mmu_alloc_l1ptbl(ctxt->Device, 0, elan3mmu);
9134 +
9135 +    elan3mmu->elan3mmu_ctp      = (sdramaddr_t) 0;
9136 +    elan3mmu->elan3mmu_dev      = ctxt->Device;
9137 +    elan3mmu->elan3mmu_l1ptbl   = l1ptbl;
9138 +
9139 +    /* Ensure that there are at least some level 3 page tables,  since if a level 2 and */
9140 +    /* a level 3 table are allocated together, then the level 3 is allocated with the NO_ALLOC */
9141 +    /* flag,  thus there MUST be at least one that can be stolen or on the free list */
9142 +    if (elan3mmu->elan3mmu_dev->Level[PTBL_LEVEL_3].PtblFreeList == NULL)
9143 +       elan3mmu_create_ptbls (elan3mmu->elan3mmu_dev, PTBL_LEVEL_3, 0, 0);
9144 +
9145 +    HAT_PRINTF1 (1, "elan3mmu_alloc: elan3mmu %p\n", elan3mmu);
9146 +
9147 +    elan3mmu_alloc_osdep (elan3mmu);
9148 +
9149 +    return (elan3mmu);
9150 +}
9151 +
9152 +void 
9153 +elan3mmu_free (ELAN3MMU *elan3mmu)
9154 +{
9155 +    ELAN3MMU_RGN   *rgn;
9156 +    ELAN3_PTBL    *l1ptbl;
9157 +    spinlock_t    *l1lock;
9158 +    unsigned long   l1flags;
9159 +    unsigned long   flags;
9160 +
9161 +    HAT_PRINTF1 (1, "elan3mmu_free : elan3mmu %p\n", elan3mmu);
9162 +    
9163 +    /*
9164 +     * Invalidate the level1 page table,  since it's already removed
9165 +     * from the context table, there is no need to flush the tlb.
9166 +     */
9167 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
9168 +    elan3mmu->elan3mmu_l1ptbl = NULL;
9169 +    
9170 +    if (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, PTBL_LEVEL_1, &l1lock, &l1flags) == LK_PTBL_OK)
9171 +    {
9172 +       elan3mmu_l1inval (elan3mmu, l1ptbl, PTE_UNLOAD_NOFLUSH);
9173 +       elan3mmu_free_l1ptbl (elan3mmu->elan3mmu_dev, l1ptbl, l1lock, l1flags);
9174 +    }
9175 +
9176 +    /*
9177 +     * Free of any permission regions.
9178 +     */
9179 +    spin_lock (&elan3mmu->elan3mmu_lock);                                      /* lock_lint */
9180 +    while ((rgn = elan3mmu->elan3mmu_mrgns) != NULL)
9181 +    {
9182 +       spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);           /* lock_lint */
9183 +       elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase);
9184 +       elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase);
9185 +       spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);      /* lock_lint */
9186 +       
9187 +       FREE_ELAN3MMU_RGN (rgn);
9188 +    }
9189 +    elan3mmu->elan3mmu_mrgnlast = NULL;
9190 +    elan3mmu->elan3mmu_ergnlast = NULL;
9191 +
9192 +    /* 
9193 +     * Free the lXptbl list
9194 +     */
9195 +    ASSERT (elan3mmu->elan3mmu_lXptbl == NULL); /* XXXX MRH need to add list removal */
9196
9197 +    elan3mmu->elan3mmu_lXptbl = NULL;
9198 +    spin_lock_destroy (&elan3mmu->elan3mmu_lXptbl_lock);
9199 +
9200 +
9201 +    spin_unlock (&elan3mmu->elan3mmu_lock);                                    /* lock_lint */
9202 +
9203 +    spin_lock_destroy (&elan3mmu->elan3mmu_lock);
9204 +
9205 +    FREE_ELAN3MMU (elan3mmu);
9206 +}
9207 +
9208 +/*================================================================================*/
9209 +/* Interface routines to device driver */
9210 +static void
9211 +elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *arg)
9212 +{
9213 +    unsigned long flags;
9214 +
9215 +    spin_lock_irqsave (&dev->IntrLock, flags);
9216 +    ASSERT ((read_reg32 (dev, Exts.InterruptReg) & (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)) == 
9217 +           (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx));
9218 +
9219 +    dev->FilterHaltQueued = 0;
9220 +
9221 +    write_reg32 (dev, Input_Context_Fil_Flush, 0);
9222 +
9223 +    HAT_PRINTF0 (1, "elan3mmu_flush_context_filter completed\n");
9224 +
9225 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
9226 +}
9227 +
9228 +void
9229 +elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp)
9230 +{
9231 +    int         mctx = ctx & MAX_ROOT_CONTEXT_MASK;
9232 +    sdramaddr_t ctp  = dev->ContextTable + mctx * sizeof (E3_ContextControlBlock);
9233 +
9234 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
9235 +
9236 +    ASSERT ((mctx < 32 || mctx >= ELAN3_KCOMM_BASE_CONTEXT_NUM) ? (ctx & SYS_CONTEXT_BIT) : ! (ctx & SYS_CONTEXT_BIT));
9237 +
9238 +    elan3_sdram_writel (dev, ctp + offsetof (E3_ContextControlBlock, filter), 
9239 +                 ((ctx & SYS_CONTEXT_BIT) ? E3_CCB_CNTX0 : 0) | (disabled ? E3_CCB_DISCARD_ALL : 0));
9240 +
9241 +    HAT_PRINTF4 (1, "elan3mmu_set_context_filter: ctx %x [%lx] -> %s (%x)\n", ctx, ctp,
9242 +                disabled ? "up" : "down", elan3_sdram_readl (dev, ctp + offsetof (E3_ContextControlBlock, filter)));
9243 +
9244 +    /* queue a halt operation to flush the context filter while the inputter is halted */
9245 +    if (dev->FilterHaltQueued == 0)
9246 +    {
9247 +       dev->FilterHaltQueued = 1;
9248 +       QueueHaltOperation (dev, Pend, Maskp, INT_DiscardingSysCntx | INT_DiscardingNonSysCntx, 
9249 +                           elan3mmu_flush_context_filter, NULL);
9250 +    }
9251 +}
9252 +
9253 +int
9254 +elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask)
9255 +{
9256 +    sdramaddr_t ctp;
9257 +    ELAN3_PTP    trootptp;
9258 +
9259 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
9260 +
9261 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
9262 +    
9263 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
9264 +       return (EINVAL);
9265 +
9266 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
9267 +    
9268 +    trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP));
9269 +    
9270 +    if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID)
9271 +       return (EBUSY);
9272 +
9273 +    elan3mmu->elan3mmu_ctp = ctp;
9274 +    
9275 +    trootptp = PTBL_TO_PTADDR (elan3mmu->elan3mmu_l1ptbl) | ELAN3_ET_PTP;
9276 +    
9277 +    HAT_PRINTF4 (1, "elan3mmu_attach: ctp at %08lx : trootptp=%08x VPT_ptr=%08lx VPT_mask=%08x\n",
9278 +                ctp, trootptp, routeTable, routeMask);
9279 +    
9280 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), trootptp);
9281 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), routeTable);
9282 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), routeMask);
9283 +    
9284 +    return (ESUCCESS);
9285 +}
9286 +
9287 +void
9288 +elan3mmu_detach (ELAN3_DEV *dev, int ctx)
9289 +{
9290 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
9291 +    sdramaddr_t ctp;
9292 +    
9293 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
9294 +    
9295 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
9296 +       return;
9297 +    
9298 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
9299 +    
9300 +    HAT_PRINTF1 (1, "elan3mmu_detach: clearing ptp at %lx\n", ctp);
9301 +    
9302 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), invalidptp);
9303 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), 0);
9304 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), 0);
9305 +    
9306 +    ElanFlushTlb (dev);
9307 +}
9308 +
9309 +int
9310 +elan3mmu_reference (ELAN3MMU *elan3mmu, int ctx)
9311 +{
9312 +    ELAN3_DEV              *dev = elan3mmu->elan3mmu_dev;
9313 +    sdramaddr_t            ctp;
9314 +    E3_ContextControlBlock ccb;
9315 +    ELAN3_PTP               trootptp;
9316 +
9317 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
9318 +    
9319 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
9320 +       return (EINVAL);
9321 +
9322 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
9323 +
9324 +    trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP));
9325 +    
9326 +    if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID)
9327 +       return (EBUSY);
9328 +    
9329 +    elan3_sdram_copyl_from_sdram (dev, elan3mmu->elan3mmu_ctp, &ccb, sizeof (E3_ContextControlBlock));
9330 +    elan3_sdram_copyl_to_sdram (dev, &ccb, ctp, sizeof (E3_ContextControlBlock));
9331 +    
9332 +    return (ESUCCESS);
9333 +    
9334 +}
9335 +/*================================================================================*/
9336 +/* Elan permission regions. */
9337 +
9338 +/* elan address region management */
9339 +ELAN3MMU_RGN *
9340 +elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu,
9341 +                      E3_Addr addr, int tail)
9342 +{
9343 +    ELAN3MMU_RGN *next = NULL;
9344 +    ELAN3MMU_RGN *rgn;
9345 +    ELAN3MMU_RGN *hirgn;
9346 +    ELAN3MMU_RGN *lorgn;
9347 +    E3_Addr       base;
9348 +    E3_Addr       lastaddr;
9349 +    int                  forward;
9350 +
9351 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9352 +
9353 +    if (elan3mmu->elan3mmu_ergns == NULL)
9354 +       return (NULL);
9355 +
9356 +    rgn = elan3mmu->elan3mmu_ergnlast;
9357 +    if (rgn == NULL)
9358 +       rgn = elan3mmu->elan3mmu_ergns;
9359 +
9360 +    forward = 0;
9361 +    if ((u_long) (base = rgn->rgn_ebase) < (u_long)addr)
9362 +    {
9363 +       if ((u_long)addr <= ((u_long) base + rgn->rgn_len - 1))
9364 +           return (rgn);                                       /* ergnlast contained addr */
9365 +
9366 +       hirgn = elan3mmu->elan3mmu_etail;
9367 +
9368 +       if ((u_long) (lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < (u_long) addr)
9369 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
9370 +       
9371 +       if ((u_long) (addr - base) > (u_long) (lastaddr - addr))
9372 +           rgn = hirgn;
9373 +       else
9374 +       {
9375 +           rgn = rgn->rgn_enext;
9376 +           forward++;
9377 +       }
9378 +    }
9379 +    else
9380 +    {
9381 +       lorgn = elan3mmu->elan3mmu_ergns;
9382 +
9383 +       if ((u_long)lorgn->rgn_ebase > (u_long) addr)
9384 +           return (lorgn);                                     /* lowest regions is higher than addr */
9385 +       if ((u_long)(addr - lorgn->rgn_ebase) < (u_long) (base - addr))
9386 +       {
9387 +           rgn = lorgn;                                        /* search forward from head */
9388 +           forward++;
9389 +       }
9390 +    }
9391 +    if (forward)
9392 +    {
9393 +       while ((u_long)(rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr)
9394 +           rgn = rgn->rgn_enext;
9395 +
9396 +       if ((u_long)rgn->rgn_ebase <= (u_long)addr)
9397 +           elan3mmu->elan3mmu_ergnlast = rgn;
9398 +       return (rgn);
9399 +    }
9400 +    else
9401 +    {
9402 +       while ((u_long)rgn->rgn_ebase > (u_long)addr)
9403 +       {
9404 +           next = rgn;
9405 +           rgn = rgn->rgn_eprev;
9406 +       }
9407 +
9408 +       if ((u_long) (rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr)
9409 +           return (next);
9410 +       else
9411 +       {
9412 +           elan3mmu->elan3mmu_ergnlast = rgn;
9413 +           return (rgn);
9414 +       }
9415 +    }
9416 +}
9417 +
9418 +int
9419 +elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn)
9420 +{
9421 +    ELAN3MMU_RGN *rgn   = elan3mmu_findrgn_elan (elan3mmu, nrgn->rgn_ebase, 1);
9422 +    E3_Addr       nbase = nrgn->rgn_ebase;
9423 +    E3_Addr      ntop  = nbase + nrgn->rgn_len - 1; /* avoid wrap */
9424 +    E3_Addr      base;
9425 +
9426 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9427 +
9428 +    if (rgn == NULL)
9429 +    {
9430 +       elan3mmu->elan3mmu_ergns = elan3mmu->elan3mmu_etail = nrgn;
9431 +       nrgn->rgn_enext = nrgn->rgn_eprev = NULL;
9432 +    }
9433 +    else
9434 +    {
9435 +       base = rgn->rgn_ebase;
9436 +
9437 +       if ((u_long)(base + rgn->rgn_len - 1) < (u_long)nbase)  /* top of region below requested address */
9438 +       {                                                       /* so insert after region (and hence at end */
9439 +           nrgn->rgn_eprev = rgn;                              /* of list */
9440 +           nrgn->rgn_enext = NULL;
9441 +           rgn->rgn_enext = elan3mmu->elan3mmu_etail = nrgn;
9442 +       }
9443 +       else
9444 +       {
9445 +           if ((u_long)nbase >= (u_long)base || (u_long)ntop >= (u_long)base)
9446 +               return (-1);                                    /* overlapping region */
9447 +
9448 +           nrgn->rgn_enext = rgn;                              /* insert before region */
9449 +           nrgn->rgn_eprev = rgn->rgn_eprev;
9450 +           rgn->rgn_eprev  = nrgn;
9451 +           if (elan3mmu->elan3mmu_ergns == rgn)
9452 +               elan3mmu->elan3mmu_ergns = nrgn;
9453 +           else
9454 +               nrgn->rgn_eprev->rgn_enext = nrgn;
9455 +       }
9456 +    }
9457 +    elan3mmu->elan3mmu_ergnlast = nrgn;
9458 +    
9459 +    return (0);
9460 +}
9461 +
9462 +ELAN3MMU_RGN *
9463 +elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr)
9464 +{
9465 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0);
9466 +    
9467 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9468 +
9469 +    if (rgn == NULL || rgn->rgn_ebase != addr)
9470 +       return (NULL);
9471 +    
9472 +    elan3mmu->elan3mmu_ergnlast = rgn->rgn_enext;
9473 +    if (rgn == elan3mmu->elan3mmu_etail)
9474 +       elan3mmu->elan3mmu_etail = rgn->rgn_eprev;
9475 +    else
9476 +       rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev;
9477 +    
9478 +    if (rgn == elan3mmu->elan3mmu_ergns)
9479 +       elan3mmu->elan3mmu_ergns = rgn->rgn_enext;
9480 +    else
9481 +       rgn->rgn_eprev->rgn_enext = rgn->rgn_enext;
9482 +
9483 +    return (rgn);
9484 +}
9485 +
9486 +ELAN3MMU_RGN *
9487 +elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr)
9488 +{
9489 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0);
9490 +    E3_Addr       base;
9491 +
9492 +    if (rgn != NULL && (u_long)(base = rgn->rgn_ebase) <= (u_long)addr && (u_long)addr <= (u_long)(base + rgn->rgn_len - 1))
9493 +       return (rgn);
9494 +    return (NULL);
9495 +}
9496 +
9497 +/* main address region management */
9498 +ELAN3MMU_RGN *
9499 +elan3mmu_findrgn_main (ELAN3MMU *elan3mmu,
9500 +                      caddr_t addr, int tail)
9501 +{
9502 +    ELAN3MMU_RGN *next = NULL;
9503 +    ELAN3MMU_RGN *rgn;
9504 +    ELAN3MMU_RGN *hirgn;
9505 +    ELAN3MMU_RGN *lorgn;
9506 +    caddr_t       lastaddr;
9507 +    caddr_t       base;
9508 +    int                  forward;
9509 +
9510 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9511 +
9512 +    if (elan3mmu->elan3mmu_mrgns == NULL)
9513 +       return (NULL);
9514 +
9515 +    rgn = elan3mmu->elan3mmu_mrgnlast;
9516 +    if (rgn == NULL)
9517 +       rgn = elan3mmu->elan3mmu_mrgns;
9518 +
9519 +    forward = 0;
9520 +    if ((base = rgn->rgn_mbase) < addr)
9521 +    {
9522 +       if (addr <= (base + rgn->rgn_len - 1))
9523 +           return (rgn);                                       /* ergnlast contained addr */
9524 +
9525 +       hirgn = elan3mmu->elan3mmu_mtail;
9526 +       if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr)
9527 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
9528 +       
9529 +       if ((addr - base) > (lastaddr - addr))
9530 +           rgn = hirgn;
9531 +       else
9532 +       {
9533 +           rgn = rgn->rgn_mnext;
9534 +           forward++;
9535 +       }
9536 +    }
9537 +    else
9538 +    {
9539 +       lorgn = elan3mmu->elan3mmu_mrgns;
9540 +       if (lorgn->rgn_mbase > addr)
9541 +           return (lorgn);                                     /* lowest regions is higher than addr */
9542 +       if ((addr - lorgn->rgn_mbase) < (base - addr))
9543 +       {
9544 +           rgn = lorgn;                                        /* search forward from head */
9545 +           forward++;
9546 +       }
9547 +    }
9548 +    if (forward)
9549 +    {
9550 +       while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
9551 +           rgn = rgn->rgn_mnext;
9552 +
9553 +       if (rgn->rgn_mbase <= addr)
9554 +           elan3mmu->elan3mmu_mrgnlast = rgn;
9555 +       return (rgn);
9556 +    }
9557 +    else
9558 +    {
9559 +       while (rgn->rgn_mbase > addr)
9560 +       {
9561 +           next = rgn;
9562 +           rgn = rgn->rgn_mprev;
9563 +       }
9564 +       if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
9565 +           return (next);
9566 +       else
9567 +       {
9568 +           elan3mmu->elan3mmu_mrgnlast = rgn;
9569 +           return (rgn);
9570 +       }
9571 +    }
9572 +}
9573 +
9574 +int
9575 +elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn)
9576 +{
9577 +    ELAN3MMU_RGN *rgn   = elan3mmu_findrgn_main (elan3mmu, nrgn->rgn_mbase, 1);
9578 +    caddr_t       nbase = nrgn->rgn_mbase;
9579 +    caddr_t      ntop  = nbase + nrgn->rgn_len - 1;
9580 +    caddr_t      base;
9581 +
9582 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9583 +
9584 +    if (rgn == NULL)
9585 +    {
9586 +       elan3mmu->elan3mmu_mrgns = elan3mmu->elan3mmu_mtail = nrgn;
9587 +       nrgn->rgn_mnext = nrgn->rgn_mprev = NULL;
9588 +    }
9589 +    else
9590 +    {
9591 +       base = rgn->rgn_mbase;
9592 +
9593 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
9594 +       {                                                       /* so insert after region (and hence at end */
9595 +           nrgn->rgn_mprev = rgn;                              /* of list */
9596 +           nrgn->rgn_mnext = NULL;
9597 +           rgn->rgn_mnext = elan3mmu->elan3mmu_mtail = nrgn;
9598 +       }
9599 +       else
9600 +       {
9601 +           if (nbase >= base || ntop >= base)
9602 +               return (-1);                                    /* overlapping region */
9603 +
9604 +           nrgn->rgn_mnext = rgn;                              /* insert before region */
9605 +           nrgn->rgn_mprev = rgn->rgn_mprev;
9606 +           rgn->rgn_mprev  = nrgn;
9607 +           if (elan3mmu->elan3mmu_mrgns == rgn)
9608 +               elan3mmu->elan3mmu_mrgns = nrgn;
9609 +           else
9610 +               nrgn->rgn_mprev->rgn_mnext = nrgn;
9611 +       }
9612 +    }
9613 +    elan3mmu->elan3mmu_mrgnlast = nrgn;
9614 +    
9615 +    return (0);
9616 +}
9617 +
9618 +ELAN3MMU_RGN *
9619 +elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr)
9620 +{
9621 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
9622 +    
9623 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9624 +
9625 +    if (rgn == NULL || rgn->rgn_mbase != addr)
9626 +       return (NULL);
9627 +    
9628 +    elan3mmu->elan3mmu_mrgnlast = rgn->rgn_mnext;
9629 +    if (rgn == elan3mmu->elan3mmu_mtail)
9630 +       elan3mmu->elan3mmu_mtail = rgn->rgn_mprev;
9631 +    else
9632 +       rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev;
9633 +    
9634 +    if (rgn == elan3mmu->elan3mmu_mrgns)
9635 +       elan3mmu->elan3mmu_mrgns = rgn->rgn_mnext;
9636 +    else
9637 +       rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext;
9638 +
9639 +    return (rgn);
9640 +}
9641 +
9642 +ELAN3MMU_RGN *
9643 +elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr)
9644 +{
9645 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
9646 +    caddr_t       base;
9647 +
9648 +    if (rgn != NULL && (base = rgn->rgn_mbase) <= addr && addr <= (base + rgn->rgn_len - 1))
9649 +       return (rgn);
9650 +    return (NULL);
9651 +}
9652 +
9653 +int
9654 +elan3mmu_setperm (ELAN3MMU *elan3mmu,
9655 +                 caddr_t   maddr,
9656 +                 E3_Addr   eaddr,
9657 +                 u_int     len,
9658 +                 u_int     perm)
9659 +{
9660 +    ELAN3_DEV     *dev = elan3mmu->elan3mmu_dev;
9661 +    ELAN3MMU_RGN *nrgn;
9662 +    unsigned long  flags;
9663 +
9664 +    HAT_PRINTF4 (1, "elan3mmu_setperm: user %p elan %08x len %x perm %x\n", maddr, eaddr, len, perm);
9665 +
9666 +    if ((((uintptr_t) maddr) & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET)) 
9667 +    {
9668 +        HAT_PRINTF0 (1, "elan3mmu_setperm:  alignment failure\n");
9669 +       return (EINVAL);
9670 +    }
9671 +
9672 +    if (((uintptr_t) maddr + len - 1) < (uintptr_t) maddr || ((u_long)eaddr + len - 1) < (u_long)eaddr) 
9673 +    {
9674 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  range failure\n");
9675 +       return (EINVAL);
9676 +    }
9677 +
9678 +    ALLOC_ELAN3MMU_RGN(nrgn, TRUE);
9679 +    
9680 +    spin_lock (&elan3mmu->elan3mmu_lock);
9681 +    nrgn->rgn_mbase = maddr;
9682 +    nrgn->rgn_ebase = eaddr;
9683 +    nrgn->rgn_len   = len;
9684 +    nrgn->rgn_perm  = perm;
9685 +
9686 +    spin_lock_irqsave (&dev->IntrLock, flags);
9687 +    if (elan3mmu_addrgn_elan (elan3mmu, nrgn) < 0)
9688 +    {
9689 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  elan address exists\n");
9690 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
9691 +       spin_unlock (&elan3mmu->elan3mmu_lock);
9692 +
9693 +       FREE_ELAN3MMU_RGN (nrgn);
9694 +       return (EINVAL);
9695 +    }
9696 +    
9697 +    if (elan3mmu_addrgn_main (elan3mmu, nrgn) < 0)
9698 +    {
9699 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  main address exists\n");
9700 +       elan3mmu_removergn_elan (elan3mmu, eaddr);
9701 +
9702 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
9703 +       spin_unlock (&elan3mmu->elan3mmu_lock);
9704 +
9705 +       FREE_ELAN3MMU_RGN (nrgn);
9706 +       return (EINVAL);
9707 +    }
9708 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
9709 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9710 +
9711 +    return (ESUCCESS);
9712 +}
9713 +
9714 +void
9715 +elan3mmu_clrperm (ELAN3MMU *elan3mmu,
9716 +                 E3_Addr   addr,
9717 +                 u_int     len)
9718 +{
9719 +    E3_Addr       raddr;
9720 +    E3_Addr       rtop;
9721 +    ELAN3MMU_RGN *nrgn;
9722 +    ELAN3MMU_RGN *rgn;
9723 +    ELAN3MMU_RGN *rgn_next;
9724 +    u_int        ssize;
9725 +    unsigned long flags;
9726 +    int                  res;
9727 +
9728 +    HAT_PRINTF2 (1, "elan3mmu_clrperm: elan %08x len %x\n", addr, len);
9729 +
9730 +    raddr = (addr & PAGEMASK);
9731 +    rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET;
9732 +
9733 +    ALLOC_ELAN3MMU_RGN (nrgn, TRUE);
9734 +
9735 +    spin_lock (&elan3mmu->elan3mmu_lock);
9736 +    
9737 +    for (rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0); rgn != NULL; rgn = rgn_next)
9738 +    {
9739 +       if (rtop < rgn->rgn_ebase)                              /* rtop was in a gap */
9740 +           break;
9741 +       
9742 +       rgn_next = rgn->rgn_enext;                              /* Save next region pointer */
9743 +       
9744 +       if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) 
9745 +       {
9746 +           /* whole region is cleared */
9747 +           elan3mmu_unload (elan3mmu, rgn->rgn_ebase, rgn->rgn_len, PTE_UNLOAD);
9748 +           
9749 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9750 +           elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase);
9751 +           elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase);
9752 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9753 +
9754 +           FREE_ELAN3MMU_RGN (rgn);
9755 +       }
9756 +       else if (raddr <= rgn->rgn_ebase)
9757 +       {
9758 +           /* clearing at beginning, so shrink size and increment base ptrs */
9759 +           ssize = rtop - rgn->rgn_ebase + 1;
9760 +
9761 +           elan3mmu_unload (elan3mmu, rgn->rgn_ebase, ssize, PTE_UNLOAD);
9762 +           
9763 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9764 +           rgn->rgn_mbase += ssize;
9765 +           rgn->rgn_ebase += ssize;
9766 +           rgn->rgn_len   -= ssize;
9767 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9768 +           
9769 +       }
9770 +       else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
9771 +       {
9772 +           /* clearing at end, so just shrink length of region */
9773 +           ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
9774 +
9775 +           elan3mmu_unload (elan3mmu, raddr, ssize, PTE_UNLOAD);
9776 +
9777 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9778 +           rgn->rgn_len -= ssize;
9779 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9780 +       }
9781 +       else
9782 +       {
9783 +           /* the section to go is in the middle,  so need to  */
9784 +           /* split it into two regions */
9785 +           elan3mmu_unload (elan3mmu, raddr, rtop - raddr + 1, PTE_UNLOAD);
9786 +
9787 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9788 +
9789 +           ASSERT (nrgn != NULL);
9790 +
9791 +           nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);;
9792 +           nrgn->rgn_ebase = rtop + 1;
9793 +           nrgn->rgn_len   = ((rgn->rgn_ebase + rgn->rgn_len - 1) - rtop);
9794 +           nrgn->rgn_perm  = rgn->rgn_perm;
9795 +
9796 +           rgn->rgn_len = (raddr - rgn->rgn_ebase);            /* shrink original region */
9797 +
9798 +           res = elan3mmu_addrgn_elan (elan3mmu, nrgn);        /* insert new region */
9799 +           ASSERT (res == 0);                                  /* which cannot fail */
9800 +
9801 +           res = elan3mmu_addrgn_main (elan3mmu, nrgn);        
9802 +           ASSERT (res == 0);
9803 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
9804 +
9805 +           nrgn = NULL;
9806 +       }
9807 +    }
9808 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9809 +
9810 +    if (nrgn != NULL)
9811 +       FREE_ELAN3MMU_RGN (nrgn);
9812 +}
9813 +
9814 +int
9815 +elan3mmu_checkperm (ELAN3MMU *elan3mmu,
9816 +                   E3_Addr   addr,
9817 +                   u_int     len,
9818 +                   u_int     access)
9819 +{
9820 +    E3_Addr     raddr = (((E3_Addr) addr) & PAGEMASK);
9821 +    u_int        rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET;
9822 +    u_int       rsize = rtop - raddr + 1;
9823 +    ELAN3MMU_RGN *rgn;
9824 +
9825 +    HAT_PRINTF3 (1, "elan3mmu_checkperm: user %08x len %x access %x\n", addr, len, access);
9826 +    
9827 +    
9828 +    if ((raddr + rsize - 1) < raddr)
9829 +       return (ENOMEM);
9830 +    
9831 +    spin_lock (&elan3mmu->elan3mmu_lock);
9832 +    if ((rgn = elan3mmu_rgnat_elan (elan3mmu, raddr)) == (ELAN3MMU_RGN *) NULL)
9833 +    {
9834 +       spin_unlock (&elan3mmu->elan3mmu_lock);
9835 +       return (ENOMEM);
9836 +    }
9837 +    else
9838 +    {
9839 +       register int ssize;
9840 +       
9841 +       for (; rsize != 0; rsize -= ssize, raddr += ssize)
9842 +       {
9843 +           if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1))
9844 +           {
9845 +               rgn  = rgn->rgn_enext;
9846 +               
9847 +               if (rgn == NULL || raddr != rgn->rgn_ebase)
9848 +               {
9849 +                   spin_unlock (&elan3mmu->elan3mmu_lock);
9850 +                   return (ENOMEM);
9851 +               }
9852 +           }
9853 +           if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1))
9854 +               ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
9855 +           else
9856 +               ssize = rsize;
9857 +           
9858 +           HAT_PRINTF4 (1, "elan3mmu_checkperm : rgn %x -> %x perm %x access %x\n",
9859 +                        rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len, rgn->rgn_perm, access);
9860 +
9861 +           if (ELAN3_INCOMPAT_ACCESS (rgn->rgn_perm, access))
9862 +           {
9863 +               spin_unlock (&elan3mmu->elan3mmu_lock);
9864 +               return (EACCES);
9865 +           }
9866 +       }
9867 +    }
9868 +    
9869 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9870 +    
9871 +    return (ESUCCESS);
9872 +}
9873 +
9874 +caddr_t
9875 +elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr)
9876 +{
9877 +    ELAN3MMU_RGN *rgn;
9878 +    caddr_t      raddr;
9879 +    
9880 +    spin_lock (&elan3mmu->elan3mmu_lock);
9881 +    if ((rgn = elan3mmu_rgnat_elan (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL)
9882 +       raddr = NULL;
9883 +    else
9884 +       raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
9885 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9886 +
9887 +    return (raddr);
9888 +}
9889 +
9890 +E3_Addr
9891 +elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr)
9892 +{
9893 +    ELAN3MMU_RGN *rgn;
9894 +    E3_Addr       raddr;
9895 +
9896 +    spin_lock (&elan3mmu->elan3mmu_lock);
9897 +    if ((rgn = elan3mmu_rgnat_main (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL)
9898 +       raddr = (E3_Addr) 0;
9899 +    else
9900 +       raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
9901 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9902 +
9903 +    return (raddr);
9904 +}
9905 +
9906 +void
9907 +elan3mmu_displayrgns(ELAN3MMU *elan3mmu)
9908 +{
9909 +    ELAN3MMU_RGN *rgn;
9910 +
9911 +    spin_lock (&elan3mmu->elan3mmu_lock);
9912 +    HAT_PRINTF0 (1, "elan3mmu_displayrgns: main regions\n");
9913 +    for (rgn = elan3mmu->elan3mmu_mrgns; rgn; rgn = (rgn->rgn_mnext == elan3mmu->elan3mmu_mrgns) ? NULL : rgn->rgn_mnext)
9914 +       HAT_PRINTF5 (1, "    RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm);
9915 +    HAT_PRINTF0 (1, "elan3mmu_displayrgns: elan regions\n");
9916 +    for (rgn = elan3mmu->elan3mmu_ergns; rgn; rgn = (rgn->rgn_enext == elan3mmu->elan3mmu_ergns) ? NULL : rgn->rgn_enext)
9917 +       HAT_PRINTF5 (1, "    RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm);
9918 +
9919 +    spin_unlock (&elan3mmu->elan3mmu_lock);
9920 +}
9921 +
9922 +/*============================================================================*/
9923 +/* Private functions */
9924 +#define ELAN3_PTE_IS_VALID(ptbl, pte)  \
9925 +          ((ptbl->ptbl_flags & PTBL_KERNEL) ? \
9926 +          (pte&(~ELAN3_PTE_REF)) != elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu) : \
9927 +          ELAN3_PTE_VALID(pte))
9928 +
9929 +void
9930 +elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr)
9931 +{
9932 +    ELAN3_PTBL          *ptbl;
9933 +    sdramaddr_t                pte;
9934 +    spinlock_t        *lock;
9935 +    u_int              span;
9936 +    unsigned long       flags;
9937 +
9938 +    HAT_PRINTF3 (1, "elan3mmu_expand: elan3mmu %p %08x to %08x\n", elan3mmu, 
9939 +                addr, addr + len);
9940 +
9941 +    for ( ; len != 0; addr += span, len -= span)
9942 +    {
9943 +       /* as we asked for level 3 we know its a pte */
9944 +       pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
9945 +
9946 +       switch (level)
9947 +       {
9948 +       case PTBL_LEVEL_3:
9949 +           span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9950 +           break;
9951 +       case PTBL_LEVEL_2:
9952 +           span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET));
9953 +           break;
9954 +       default:
9955 +           span = len;
9956 +           break;
9957 +       }
9958 +       
9959 +       if (pte != (sdramaddr_t) 0)
9960 +           elan3mmu_unlock_ptbl (ptbl, lock, flags);
9961 +    }
9962 +}
9963 +
9964 +void
9965 +elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes)
9966 +{
9967 +    ELAN3_PTBL          *ptbl;
9968 +    sdramaddr_t                pte;
9969 +    spinlock_t        *lock;
9970 +    u_int              span;
9971 +    int                        len;
9972 +    int                        i;
9973 +    unsigned long       flags;
9974 +
9975 +    HAT_PRINTF3 (1, "elan3mmu_reserve: elan3mmu %p %08x to %08x\n", elan3mmu, 
9976 +                addr, addr + (npages << ELAN3_PAGE_SHIFT));
9977 +
9978 +    for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span)
9979 +    {
9980 +       /* as we asked for level 3 we know its a pte */
9981 +       pte = elan3mmu_ptealloc (elan3mmu, addr, 3, &ptbl, &lock, 0, &flags);
9982 +
9983 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
9984 +       
9985 +       if (ptes != NULL)
9986 +       {
9987 +           for (i = 0; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE)
9988 +               *ptes++ = pte;
9989 +           ptbl->ptbl_valid += (span >> ELAN3_PAGE_SHIFT);
9990 +
9991 +           HAT_PRINTF4 (2, "elan3mmu_reserve: inc valid for level %d ptbl %p to %d   (%d)\n", 
9992 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid, (span >> ELAN3_PAGE_SHIFT));
9993 +
9994 +       }
9995 +
9996 +       elan3mmu_unlock_ptbl (ptbl, lock, flags);
9997 +    }
9998 +}
9999 +
10000 +void
10001 +elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes)
10002 +{
10003 +    ELAN3_DEV           *dev = elan3mmu->elan3mmu_dev;
10004 +    ELAN3_PTBL          *ptbl;
10005 +    sdramaddr_t                pte;
10006 +    ELAN3_PTE          tpte;
10007 +    spinlock_t        *lock;
10008 +    u_int              span;
10009 +    int                        len;
10010 +    int                        i;
10011 +    int                        level;
10012 +    unsigned long       flags;
10013 +    
10014 +    HAT_PRINTF3 (1, "elan3mmu_release: elan3mmu %p %08x to %08x\n", elan3mmu, 
10015 +                addr, addr + (npages << ELAN3_PAGE_SHIFT));
10016 +
10017 +    if (ptes == NULL)
10018 +       return;
10019 +
10020 +    tpte = elan3mmu_kernel_invalid_pte (elan3mmu);
10021 +
10022 +    for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span)
10023 +    {
10024 +       /* as we asked for level 3 we know its a pte */
10025 +       pte = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags);
10026 +       ASSERT (level == PTBL_LEVEL_3);
10027 +
10028 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10029 +
10030 +
10031 +       for (i = 0 ; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE)
10032 +           elan3_writepte (dev, pte, tpte);
10033 +       ptbl->ptbl_valid -= (span >> ELAN3_PAGE_SHIFT);
10034 +
10035 +       HAT_PRINTF3 (2, "elan3mmu_release: inc valid for level %d ptbl %p to %d\n", 
10036 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
10037 +
10038 +       elan3mmu_unlock_ptbl (ptbl, lock, flags);
10039 +    }
10040 +    ElanFlushTlb (elan3mmu->elan3mmu_dev);
10041 +}
10042 +
10043 +void
10044 +elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr)
10045 +    
10046 +{
10047 +    ELAN3_DEV     *dev;
10048 +    ELAN3_PTBL    *ptbl;
10049 +    spinlock_t   *lock;
10050 +    unsigned long flags;
10051 +    ELAN3_PTE      newpte;
10052 +    ELAN3_PTE      oldpte;
10053 +    sdramaddr_t   pte;
10054 +
10055 +    ASSERT((level == PTBL_LEVEL_2) || (level == PTBL_LEVEL_3));
10056 +
10057 +    /* Generate the new pte which we're going to load */
10058 +    dev = elan3mmu->elan3mmu_dev;
10059 +
10060 +    newpte = elan3mmu_phys_to_pte (dev, paddr, perm);
10061 +    
10062 +    if (attr & PTE_LOAD_BIG_ENDIAN)
10063 +       newpte |= ELAN3_PTE_BIG_ENDIAN;
10064 +
10065 +    HAT_PRINTF4 (1, "elan3mmu_pteload: elan3mmu %p level %d addr %x pte %llx\n", elan3mmu, level, addr, (long long) newpte);
10066 +    HAT_PRINTF5 (1, "elan3mmu_pteload:%s%s%s perm=%d phys=%llx\n",
10067 +                (newpte & ELAN3_PTE_LOCAL)  ? " local" : "",
10068 +                (newpte & ELAN3_PTE_64_BIT)     ? " 64 bit" : "",
10069 +                (newpte & ELAN3_PTE_BIG_ENDIAN) ? " big-endian" : " little-endian",
10070 +                (u_int) (newpte & ELAN3_PTE_PERM_MASK) >> ELAN3_PTE_PERM_SHIFT,
10071 +                (unsigned long long) (newpte & ELAN3_PTE_PFN_MASK));
10072 +                 
10073 +    if (level == PTBL_LEVEL_3)
10074 +       pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
10075 +    else
10076 +    {
10077 +       sdramaddr_t ptp = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
10078 +
10079 +       pte = elan3mmu_ptp2pte (elan3mmu, ptp, level);
10080 +
10081 +       HAT_PRINTF3 (2, "elan3mmu_pteload: level %d ptp at %lx => pte at %lx\n", level, ptp, pte);
10082 +    }
10083 +
10084 +    if (pte == (sdramaddr_t) 0)
10085 +    {
10086 +       ASSERT (level == PTBL_LEVEL_3 && (attr & (PTE_NO_SLEEP | PTE_NO_STEAL)) == (PTE_NO_SLEEP | PTE_NO_STEAL));
10087 +       return;
10088 +    }
10089 +
10090 +    ASSERT (ptbl->ptbl_elan3mmu == elan3mmu);
10091 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == level);
10092 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
10093 +    
10094 +    oldpte = elan3_readpte (dev, pte);
10095 +
10096 +    HAT_PRINTF3 (2, "elan3mmu_pteload: modify pte at %lx from %llx to %llx\n", pte, (long long) oldpte, (long long) newpte);
10097 +
10098 +    if (ELAN3_PTE_IS_VALID(ptbl, oldpte))
10099 +    {
10100 +       ELAN3MMU_STAT(ptereload);
10101 +
10102 +       ASSERT ((newpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)) == (oldpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)));
10103 +       
10104 +       if ((newpte & ~ELAN3_RM_MASK) != (oldpte & ~ELAN3_RM_MASK))
10105 +       {
10106 +           /* We're modifying a valid translation, it must be mapping the same page */
10107 +           /* so we use elan3_modifypte to not affect the referenced and modified bits */
10108 +           elan3_modifypte (dev, pte, newpte);
10109 +
10110 +
10111 +           ElanFlushTlb (elan3mmu->elan3mmu_dev);
10112 +       }
10113 +    }
10114 +    else
10115 +    {
10116 +       ELAN3MMU_STAT(pteload);
10117 +
10118 +       ptbl->ptbl_valid++;
10119 +
10120 +       HAT_PRINTF3 (2, "elan3mmu_pteload: inc valid for level %d ptbl %p to %d\n", 
10121 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
10122 +
10123 +       HAT_PRINTF2 (2, "elan3mmu_pteload: write pte %lx to %llx\n", pte, (long long) newpte);
10124 +
10125 +       elan3_writepte (dev, pte, newpte);
10126 +
10127 +       if (ptbl->ptbl_flags & PTBL_KERNEL)
10128 +           ElanFlushTlb (elan3mmu->elan3mmu_dev);
10129 +
10130 +    }
10131 +
10132 +    elan3mmu_unlock_ptbl (ptbl, lock, flags);
10133 +}
10134 +
10135 +void
10136 +elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int attr)
10137 +{
10138 +    ELAN3_PTBL          *ptbl;
10139 +    sdramaddr_t         ptp;
10140 +    spinlock_t        *lock;
10141 +    int                        level;
10142 +    u_int              span;
10143 +    unsigned long      flags;
10144 +
10145 +    HAT_PRINTF3(1, "elan3mmu_unload (elan3mmu %p addr %x -> %x)\n", elan3mmu, addr, addr+len-1);
10146 +
10147 +    for (; len != 0; addr += span, len -= span)
10148 +    {
10149 +       ptp  = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags);
10150 +
10151 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10152 +
10153 +       if (ptp != (sdramaddr_t) 0)
10154 +       {
10155 +           HAT_PRINTF2 (2, "elan3mmu_unload: unload [%x,%x]\n", addr, addr + span);
10156 +           
10157 +           if ( level ==  PTBL_LEVEL_3 ) 
10158 +               elan3mmu_unload_loop (elan3mmu, ptbl, ptp - PTBL_TO_PTADDR(ptbl), span >> ELAN3_PAGE_SHIFT, attr);
10159 +           else
10160 +           {
10161 +               ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
10162 +               ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
10163 +               ELAN3_PTBL  *lXptbl;
10164 +               ELAN3_PTP    tptp;
10165 +               int         idx;
10166 +
10167 +               tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp);
10168 +
10169 +               ASSERT (ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE);
10170 +
10171 +               lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tptp);
10172 +               idx    = (PTP_TO_PT_PADDR(tptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;
10173 +
10174 +               if ( level == PTBL_LEVEL_1) 
10175 +                   span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET));
10176 +               else
10177 +                   span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10178 +
10179 +               /* invalidate the ptp. */
10180 +               elan3_writeptp (dev, ptp, invalidptp);
10181 +               if (! (attr & PTE_UNLOAD_NOFLUSH))
10182 +                   ElanFlushTlb (dev);     
10183 +    
10184 +               elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); 
10185 +
10186 +               ptbl->ptbl_valid--;
10187 +
10188 +               HAT_PRINTF3 (2, "elan3mmu_unload: dec valid for level %d ptbl %p to %d\n", 
10189 +                            PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);     
10190 +
10191 +           }
10192 +           elan3mmu_unlock_ptbl (ptbl, lock, flags);
10193 +       }
10194 +    }
10195 +}
10196 +
10197 +static void
10198 +elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags)
10199 +{
10200 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
10201 +    sdramaddr_t pte;
10202 +    ELAN3_PTE    tpte;
10203 +    int         last_valid = first_valid + nptes;
10204 +    int                i;
10205 +    
10206 +    HAT_PRINTF3 (1, "elan3mmu_unloadloop: ptbl %p entries [%d->%d]\n", ptbl, first_valid, last_valid);
10207 +
10208 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
10209 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3);
10210 +    
10211 +    pte = PTBL_TO_PTADDR(ptbl) + first_valid;
10212 +    
10213 +    for (i = first_valid; i < last_valid; i++, pte += ELAN3_PTE_SIZE)
10214 +    {
10215 +       if (ptbl->ptbl_valid == 0)
10216 +           break;
10217 +
10218 +       tpte = elan3_readpte (dev, pte);
10219 +       if (! ELAN3_PTE_IS_VALID(ptbl, tpte))
10220 +           continue;
10221 +       
10222 +       elan3mmu_pteunload (ptbl, pte, flags, NO_MLIST_LOCK);
10223 +    }
10224 +}
10225 +
10226 +void
10227 +elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock)
10228 +{
10229 +    ELAN3_DEV   *dev = ptbl->ptbl_elan3mmu->elan3mmu_dev;
10230 +    ELAN3_PTE    tpte;
10231 +
10232 +    ASSERT (PTBL_LEVEL (ptbl->ptbl_flags) == PTBL_LEVEL_3);
10233 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
10234 +
10235 +    HAT_PRINTF2 (1, "elan3mmu_pteunload: ptbl %p pte %lx\n", ptbl, pte);
10236 +
10237 +    ELAN3MMU_STAT (pteunload);
10238 +
10239 +    elan3_invalidatepte (dev, pte);
10240 +
10241 +    if (! (flags & PTE_UNLOAD_NOFLUSH))
10242 +       ElanFlushTlb (dev);
10243 +    
10244 +    tpte = ELAN3_INVALID_PTE;
10245 +    elan3_writepte (dev, pte, tpte);
10246 +    
10247 +    if (ptbl->ptbl_flags & PTBL_KERNEL)
10248 +    {
10249 +       tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu);
10250 +
10251 +       elan3_writepte (dev, pte, tpte);
10252 +    }
10253 +
10254 +    ptbl->ptbl_valid--;
10255 +
10256 +    HAT_PRINTF3 (2, "elan3mmu_pteunload: dec valid for level %d ptbl %p to %d\n", 
10257 +                PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
10258 +
10259 +}
10260 +
10261 +void
10262 +elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock)
10263 +{
10264 +
10265 +}
10266 +
10267 +/*
10268 + * Create more page tables at a given level for this Elan.
10269 + */
10270 +static ELAN3_PTBL *
10271 +elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep)
10272 +{
10273 +    sdramaddr_t          pts;
10274 +    ELAN3_PTBL    *ptbl;
10275 +    ELAN3_PTBL    *first;
10276 +    ELAN3_PTBL    *last;
10277 +    ELAN3_PTBL_GR *ptg;
10278 +    register int  i;
10279 +    register int  inc;
10280 +    
10281 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: create level %d ptbls\n", level);
10282 +
10283 +    pts = elan3_sdram_alloc (dev, PTBL_GROUP_SIZE);
10284 +    if (pts == (sdramaddr_t) 0)
10285 +    {
10286 +       HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot map elan pages\n");
10287 +
10288 +       ELAN3MMU_STAT (create_ptbl_failed);
10289 +       return (NULL);
10290 +    }
10291 +    
10292 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: pts at %lx\n", pts);
10293 +    
10294 +    ALLOC_PTBL_GR (ptg, !(attr & PTE_NO_SLEEP));               /* Allocate the group of page tables */
10295 +    if (ptg == NULL)                                           /* for this page */
10296 +    {
10297 +       HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot allocate page table group\n");
10298 +
10299 +       elan3_sdram_free (dev, pts, PTBL_GROUP_SIZE);
10300 +
10301 +       ELAN3MMU_STAT (create_ptbl_failed);
10302 +       return (NULL);
10303 +    }
10304 +
10305 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: ptg is %p\n", ptg);
10306 +    
10307 +    ElanSetPtblGr (dev, pts, ptg);
10308 +    
10309 +    HAT_PRINTF4 (2, "elan3mmu_create_ptbls: zeroing %d bytes at %lx, %d bytes at %p\n",
10310 +                PTBL_GROUP_SIZE, pts, (int) sizeof (ELAN3_PTBL_GR), ptg);
10311 +
10312 +#ifndef zero_all_ptbls
10313 +    elan3_sdram_zeroq_sdram (dev, pts, PTBL_GROUP_SIZE);               /* Ensure that all PTEs/PTPs are invalid */
10314 +#endif
10315 +    bzero ((caddr_t) ptg, sizeof (ELAN3_PTBL_GR));
10316 +    
10317 +    ptg->pg_addr  = pts;
10318 +    ptg->pg_level = level;
10319 +
10320 +    ptbl = ptg->pg_ptbls;                                      /* Initialise the index in all page tables */
10321 +    for (i = 0; i < PTBLS_PER_GROUP_MAX; i++)
10322 +    {
10323 +       ptbl->ptbl_index = (u_char) i;
10324 +       ptbl->ptbl_next  = (ELAN3_PTBL *) 0xdeaddead;
10325 +       ptbl++;
10326 +    }
10327 +    
10328 +    switch (level)                                             /* Determine the number of ptbls we can  */
10329 +    {                                                          /* allocate from this page, by jumping  */
10330 +    case PTBL_LEVEL_X: inc = PTBLS_PER_PTBL_LX; break;         /* multiples of the smallest. */
10331 +    case PTBL_LEVEL_1: inc = PTBLS_PER_PTBL_L1; break;
10332 +    case PTBL_LEVEL_2: inc = PTBLS_PER_PTBL_L2; break;
10333 +    case PTBL_LEVEL_3: inc = PTBLS_PER_PTBL_L3; break;
10334 +    default:           inc = PTBLS_PER_PTBL_L3; break;
10335 +    }
10336 +
10337 +    ptbl = ptg->pg_ptbls;                                      /* Chain them together */
10338 +    for (i = 0; i < PTBLS_PER_GROUP_MAX; i += inc, ptbl += inc)
10339 +       ptbl->ptbl_next = ptbl + inc;
10340 +
10341 +    first = ptg->pg_ptbls;                                     /* Determine list of */
10342 +    last  = first + PTBLS_PER_GROUP_MAX - inc;                 /* ptbls to add to free list */
10343 +    if (! keep)
10344 +       ptbl = NULL;
10345 +    else
10346 +    {
10347 +       ptbl  = first;
10348 +       first = first->ptbl_next;
10349 +    }
10350 +    
10351 +    spin_lock (&dev->Level[level].PtblLock);
10352 +    dev->Level[level].PtblTotal     += PTBLS_PER_GROUP_MAX/inc;                /* Increment the counts */
10353 +    dev->Level[level].PtblFreeCount += PTBLS_PER_GROUP_MAX/inc;
10354 +
10355 +    ELAN3MMU_SET_STAT (num_ptbl_level[level], dev->Level[level].PtblTotal);
10356 +
10357 +    if (keep)
10358 +       dev->Level[level].PtblFreeCount--;
10359 +    
10360 +    last->ptbl_next = dev->Level[level].PtblFreeList;                  /* And add to free list */
10361 +    dev->Level[level].PtblFreeList = first;
10362 +    spin_unlock (&dev->Level[level].PtblLock);
10363 +    
10364 +    spin_lock (&dev->PtblGroupLock);
10365 +    ptg->pg_next = dev->Level[level].PtblGroupList;
10366 +    dev->Level[level].PtblGroupList = ptg;
10367 +    spin_unlock (&dev->PtblGroupLock);
10368 +
10369 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: returning ptbl %p\n", ptbl);
10370 +    
10371 +    return (ptbl);
10372 +}
10373 +
10374 +static ELAN3_PTBL *
10375 +elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp)
10376 +{
10377 +    E3_Addr      ptpa  = PTP_TO_PT_PADDR(*ptp);
10378 +    ELAN3_PTBL_GR *pg    = ElanGetPtblGr (elan3mmu->elan3mmu_dev, (sdramaddr_t)ptpa & ~(PTBL_GROUP_SIZE-1));
10379 +    
10380 +    return (pg->pg_ptbls + ((ptpa - pg->pg_addr) >> ELAN3_PT_SHIFT));
10381 +}
10382 +
10383 +static ELAN3_PTBL *
10384 +elan3mmu_alloc_lXptbl (ELAN3_DEV *dev, int attr,  ELAN3MMU *elan3mmu)
10385 +{
10386 +    ELAN3_PTBL *ptbl = NULL;
10387 +
10388 +    spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10389 +    if (dev->Level[PTBL_LEVEL_X].PtblFreeList)
10390 +    {
10391 +       ptbl = dev->Level[PTBL_LEVEL_X].PtblFreeList;
10392 +
10393 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: found ptbl %p on free list\n", ptbl);
10394 +
10395 +       dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl->ptbl_next;
10396 +       dev->Level[PTBL_LEVEL_X].PtblFreeCount--;
10397 +    }
10398 +    spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10399 +    
10400 +    if (ptbl == NULL) 
10401 +    {
10402 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_X, attr, 1);
10403 +
10404 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: created level X ptbl %p\n", ptbl);
10405 +    }
10406 +
10407 +    if (ptbl == NULL)
10408 +    {
10409 +       if ((attr & PTE_NO_STEAL))
10410 +       {
10411 +           HAT_PRINTF0 (2, "elan3mmu_alloc_lXptbl: not allowed to steal ptbl for use at level 2\n");
10412 +           return NULL;
10413 +       }
10414 +
10415 +       ELAN3MMU_STAT(lX_alloc_l3);
10416 +
10417 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10418 +       
10419 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: stolen level3 ptbl %p used as level 2\n", ptbl);
10420 +    }
10421 +
10422 +    ptbl->ptbl_elan3mmu = elan3mmu;
10423 +    ptbl->ptbl_base     = 0;
10424 +    ptbl->ptbl_parent   = 0;
10425 +    ptbl->ptbl_flags    = PTBL_LEVEL_X | PTBL_ALLOCED;
10426 +    
10427 +    HAT_PRINTF2 (2, "elan3mmu_alloc_lXptbl: ptbl %p dev %p\n", ptbl, dev);
10428 +
10429 +#ifdef zero_all_ptbls
10430 +    elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_LX_ENTRIES*ELAN3_PTE_SIZE);
10431 +#endif
10432 +
10433 +    return (ptbl);
10434 +}
10435 +
10436 +static ELAN3_PTBL *
10437 +elan3mmu_alloc_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx)
10438 +{
10439 +    ELAN3_PTBL   * ptbl_ptr;
10440 +    int           index;
10441 +
10442 +    /* lock whilst looking for space */
10443 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10444 +    
10445 +    /* walk the lXptbl list */
10446 +    ptbl_ptr = elan3mmu->elan3mmu_lXptbl;
10447 +    while ( ptbl_ptr != NULL ) 
10448 +    {
10449 +       /* does this ptlb have any free ones */
10450 +       if (  (index = ptbl_ptr->ptbl_valid) < ELAN3_LX_ENTRIES) 
10451 +       {
10452 +           /*  better to search  from valid count as its likly to be free */
10453 +           index = ptbl_ptr->ptbl_valid; 
10454 +           do {
10455 +               if ((ptbl_ptr->ptbl_base & (1 << index)) == 0)
10456 +                   goto found;
10457 +
10458 +               /* move index on and wrap back to start if needed */
10459 +               if ((++index) == ELAN3_LX_ENTRIES) 
10460 +                   index = 0;
10461 +           } while (index != ptbl_ptr->ptbl_valid);
10462 +
10463 +           panic ("elan3mmu_alloc_pte: has ptbl valid < 32 when but no free pte's");
10464 +       }
10465 +       ptbl_ptr = ptbl_ptr->ptbl_parent;
10466 +    }
10467 +       
10468 +    /* unlock so we can create space */
10469 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); 
10470 +
10471 +    /* if create some more */
10472 +    ptbl_ptr = elan3mmu_alloc_lXptbl(dev, 0, elan3mmu);
10473 +
10474 +    /* get the lock again */
10475 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10476 +       
10477 +    /* add to front of list as its obviously got free ones on it */
10478 +    ptbl_ptr->ptbl_parent     = elan3mmu->elan3mmu_lXptbl;
10479 +    elan3mmu->elan3mmu_lXptbl = ptbl_ptr;
10480 +
10481 +    /* grap the first one */
10482 +    index = 0;
10483 +    
10484 + found:
10485 +    ptbl_ptr->ptbl_base |= (1 << index);
10486 +    ptbl_ptr->ptbl_valid++;
10487 +
10488 +    HAT_PRINTF3 (2, "elan3mmu_alloc_pte: inc valid for level %d ptbl %p to %d\n", 
10489 +                PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid);
10490 +
10491 +    /* release the loc and return it */
10492 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); 
10493 +
10494 +    *idx = index;
10495 +    return (ptbl_ptr);
10496 +}
10497 +
10498 +static ELAN3_PTBL *
10499 +elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu)
10500 +{
10501 +    ELAN3_PTBL *ptbl = NULL;
10502 +    ELAN3_PTBL *p;
10503 +    int i,j;
10504 +    
10505 +    spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10506 +    if (dev->Level[PTBL_LEVEL_1].PtblFreeList)
10507 +    {
10508 +       ptbl = dev->Level[PTBL_LEVEL_1].PtblFreeList;
10509 +       dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl->ptbl_next;
10510 +       dev->Level[PTBL_LEVEL_1].PtblFreeCount--;
10511 +    }
10512 +    spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10513 +    
10514 +    if (ptbl == NULL)
10515 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_1, attr, 1);
10516 +    
10517 +    if (ptbl == NULL)
10518 +       panic ("elan3mmu_alloc_l1ptbl: cannot alloc ptbl");
10519 +    
10520 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L1; i++, p++)
10521 +    {
10522 +       p->ptbl_elan3mmu = elan3mmu;
10523 +       p->ptbl_base     = VA2BASE (j);
10524 +       p->ptbl_flags    = PTBL_LEVEL_1 | PTBL_GROUPED;
10525 +       p->ptbl_parent   = NULL;
10526 +       
10527 +       j += L1_VA_PER_PTBL;
10528 +    }
10529 +    
10530 +    /* Now mark the real page table as allocated */
10531 +    /* level 1 ptbls are returned unlocked */
10532 +    ptbl->ptbl_flags = PTBL_LEVEL_1 | PTBL_ALLOCED;
10533 +    
10534 +    HAT_PRINTF2 (2, "elan3mmu_alloc_l1ptbl: ptbl %p dev %p\n", ptbl, dev);
10535 +
10536 +#ifdef zero_all_ptbls
10537 +    elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L1_ENTRIES*ELAN3_PTP_SIZE);
10538 +#endif
10539 +
10540 +    return (ptbl);
10541 +}
10542 +
10543 +static ELAN3_PTBL *
10544 +elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags)
10545 +{
10546 +    ELAN3_PTBL *ptbl = NULL;
10547 +    ELAN3_PTBL *p;
10548 +    int        i;
10549 +    int        j;
10550 +    unsigned long ptbl_flags;
10551 +
10552 +    spin_lock_irqsave (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags);
10553 +    if (dev->Level[PTBL_LEVEL_2].PtblFreeList)
10554 +    {
10555 +       ptbl = dev->Level[PTBL_LEVEL_2].PtblFreeList;
10556 +
10557 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: found ptbl %p on free list\n", ptbl);
10558 +
10559 +       dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl->ptbl_next;
10560 +       dev->Level[PTBL_LEVEL_2].PtblFreeCount--;
10561 +    }
10562 +    spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags);
10563 +    
10564 +    if (ptbl == NULL) 
10565 +    {
10566 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_2, attr, 1);
10567 +
10568 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: created level 2 ptbl %p\n", ptbl);
10569 +    }
10570 +
10571 +    if (ptbl == NULL)
10572 +    {
10573 +       if ((attr & PTE_NO_STEAL))
10574 +       {
10575 +           HAT_PRINTF0 (2, "elan3mmu_alloc_l2ptbl: not allowted to steal ptbl for use at level 2\n");
10576 +           return (NULL);
10577 +       }
10578 +
10579 +       ELAN3MMU_STAT(l2_alloc_l3);
10580 +
10581 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10582 +       
10583 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: stolen level3 ptbl %p used as level 2\n", ptbl);
10584 +    }
10585 +    
10586 +    *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_2, ptbl);
10587 +    spin_lock_irqsave (*plock, *flags);
10588 +    
10589 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L2; i++, p++)
10590 +    {
10591 +       p->ptbl_elan3mmu = elan3mmu;
10592 +       p->ptbl_base     = VA2BASE (base + j);
10593 +       p->ptbl_flags    = PTBL_LEVEL_2 | PTBL_GROUPED;
10594 +       p->ptbl_parent   = parent;
10595 +       
10596 +       j += L2_VA_PER_PTBL;
10597 +    }
10598 +    
10599 +    ptbl->ptbl_flags  = PTBL_LEVEL_2 | PTBL_ALLOCED | PTBL_LOCKED;
10600 +    
10601 +    HAT_PRINTF3 (2, "elan3mmu_alloc_l2ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base);
10602 +
10603 +#ifdef zero_all_ptbls
10604 +    elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L2_ENTRIES*ELAN3_PTP_SIZE);
10605 +#endif
10606 +
10607 +    return (ptbl);
10608 +}
10609 +
10610 +static ELAN3_PTBL *
10611 +elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags)
10612 +{
10613 +    ELAN3_PTBL *ptbl = NULL;
10614 +    ELAN3_PTBL *p;
10615 +    int               i;
10616 +    int               j;
10617 +    unsigned long ptbl_flags;
10618 +
10619 +    spin_lock_irqsave (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags);
10620 +    if (dev->Level[PTBL_LEVEL_3].PtblFreeList)
10621 +    {
10622 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: found ptbl %p on free list\n", ptbl);
10623 +
10624 +       ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10625 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next;
10626 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount--;
10627 +    }
10628 +    spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags);
10629 +    
10630 +    if (ptbl == NULL)
10631 +    {
10632 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1);
10633 +
10634 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: created level 3 ptbl %p\n", ptbl);
10635 +    }
10636 +
10637 +    if (ptbl == NULL)
10638 +    {
10639 +       if ((attr & PTE_NO_STEAL))
10640 +       {
10641 +           HAT_PRINTF0 (2, "elan3mmu_alloc_l3ptbl: not allowed to steal ptbl for use at level 3\n");
10642 +           return (NULL);
10643 +       }
10644 +
10645 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10646 +
10647 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: stolen level3 ptbl %p\n", ptbl);
10648 +    }
10649 +    
10650 +    *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_3, ptbl);
10651 +    spin_lock_irqsave (*plock,*flags);
10652 +    
10653 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L3; i++, p++)
10654 +    {
10655 +       p->ptbl_elan3mmu = elan3mmu;
10656 +       p->ptbl_base     = VA2BASE (base + j);
10657 +       p->ptbl_flags    = PTBL_LEVEL_3 | PTBL_GROUPED;
10658 +       p->ptbl_parent   = parent;
10659 +       
10660 +       j += L3_VA_PER_PTBL;
10661 +    }
10662 +    
10663 +    ptbl->ptbl_flags = PTBL_LEVEL_3 | PTBL_ALLOCED | PTBL_LOCKED;
10664 +    
10665 +    HAT_PRINTF3 (2, "elan3mmu_alloc_l3ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base);
10666 +
10667 +#ifdef zero_all_ptbls
10668 +    elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE);
10669 +#endif
10670 +
10671 +    return (ptbl);
10672 +}
10673 +
10674 +void 
10675 +elan3mmu_free_pte  (ELAN3_DEV *dev,  ELAN3MMU *elan3mmu,  ELAN3_PTBL *ptbl_ptr, int idx)
10676 +{  
10677 +    sdramaddr_t pte  = PTBL_TO_PTADDR (ptbl_ptr) | (idx * sizeof (ELAN3_PTE));
10678 +    ELAN3_PTE    tpte = ELAN3_INVALID_PTE;
10679 +    ELAN3_PTBL *prev;
10680 +
10681 +    /* ensure that the pte is invalid when free */
10682 +    elan3_writepte (dev, pte, tpte);
10683 +
10684 +    /* lock whilst removing */
10685 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10686 +
10687 +    HAT_PRINTF4 (2, "elan3mmu_free_pte idx %d   ptbl_ptr %p ptbl_base  %x  ptbl_ptr->ptbl_valid %d \n", 
10688 +                idx, ptbl_ptr, ptbl_ptr->ptbl_base, ptbl_ptr->ptbl_valid);
10689 +    /* make sure it was set */
10690 +    ASSERT ( ptbl_ptr->ptbl_base & (1 << idx) ); 
10691 +    ASSERT ( ptbl_ptr->ptbl_valid > 0  );
10692 +
10693 +    ptbl_ptr->ptbl_base &= ~(1 << idx);
10694 +    ptbl_ptr->ptbl_valid--;
10695 +
10696 +    HAT_PRINTF3 (2, "elan3mmu_free_pte: dec valid for level %d ptbl %p to %d\n", 
10697 +                PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid); 
10698
10699 +    /* was that the last one on this page */
10700 +    if ( ! ptbl_ptr->ptbl_valid ) 
10701 +    {
10702 +       /* so no bits should be set then */
10703 +       ASSERT ( ptbl_ptr->ptbl_base == 0 );
10704 +
10705 +       /* is this the first page ?? */
10706 +       if ( elan3mmu->elan3mmu_lXptbl == ptbl_ptr ) 
10707 +       {
10708 +           /* make the list start at the second element */
10709 +            elan3mmu->elan3mmu_lXptbl = ptbl_ptr->ptbl_parent;
10710 +
10711 +            /* put ptbl back on free list */
10712 +            elan3mmu_free_lXptbl(dev, ptbl_ptr);
10713 +
10714 +            /* unlock and return */
10715 +            spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
10716 +            return ;
10717 +       }
10718 +
10719 +       /* scan thro list looking for this page */
10720 +       prev = elan3mmu->elan3mmu_lXptbl;
10721 +       while ( prev->ptbl_parent != NULL ) 
10722 +       {
10723 +           if ( prev->ptbl_parent == ptbl_ptr ) /* its the next one */
10724 +           {
10725 +               /* remove element from chain */
10726 +               prev->ptbl_parent =  ptbl_ptr->ptbl_parent;
10727 +
10728 +               /* put ptbl back on free list */
10729 +               elan3mmu_free_lXptbl(dev, ptbl_ptr);
10730 +
10731 +               /* unlock and return */
10732 +               spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
10733 +               return ;
10734 +           }           
10735 +           prev = prev->ptbl_parent;
10736 +       }
10737 +       
10738 +               panic ("elan3mmu_free_pte: failed to find ptbl in chain");
10739 +       /* NOTREACHED */
10740 +    }
10741 +    
10742 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
10743 +}
10744 +
10745 +void
10746 +elan3mmu_free_lXptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl)
10747 +{
10748 +    ELAN3_PTBL_GR *ptg;
10749 +
10750 +    HAT_PRINTF2 (2, "elan3mmu_free_lXptbl: dev %p ptbl %p\n", dev, ptbl);
10751 +
10752 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10753 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10754 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_X);
10755 +    ASSERT (ptbl->ptbl_valid == 0);
10756 +   
10757 +    ptbl->ptbl_flags = 0;
10758 +
10759 +    ptg = PTBL_TO_GR(ptbl);
10760 +
10761 +    if (ptg->pg_level == PTBL_LEVEL_3)
10762 +    {
10763 +       ELAN3MMU_STAT(lX_freed_l3);
10764 +
10765 +       HAT_PRINTF1 (2, "elan3mmu_free_lXptbl: freeing stolen level 3 ptbl %p\n", ptbl);
10766 +
10767 +       /* this was really a level 3 ptbl which we had to steal */
10768 +       spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10769 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10770 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
10771 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
10772 +       spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10773 +    }
10774 +    else
10775 +    {
10776 +       spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10777 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_X].PtblFreeList;
10778 +       dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl;
10779 +       dev->Level[PTBL_LEVEL_X].PtblFreeCount++;
10780 +       spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10781 +    }
10782 +}
10783 +
10784 +void
10785 +elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
10786 +{
10787 +    HAT_PRINTF3 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p ptbl->ptbl_valid %x \n", dev, ptbl, ptbl->ptbl_valid);
10788 +
10789 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10790 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10791 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_1);
10792 +    ASSERT (ptbl->ptbl_valid == 0);
10793 +    
10794 +    HAT_PRINTF2 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p\n", dev, ptbl);
10795 +
10796 +    ptbl->ptbl_flags = 0;
10797 +    spin_unlock (lock);
10798 +    
10799 +    spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10800 +    ptbl->ptbl_next = dev->Level[PTBL_LEVEL_1].PtblFreeList;
10801 +    dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl;
10802 +    dev->Level[PTBL_LEVEL_1].PtblFreeCount++;
10803 +    spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10804 +
10805 +    local_irq_restore (flags);
10806 +}
10807 +
10808 +void
10809 +elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
10810 +{
10811 +    ELAN3_PTBL_GR *ptg;
10812 +
10813 +    HAT_PRINTF2 (2, "elan3mmu_free_l2ptbl: dev %p ptbl %p\n", dev, ptbl);
10814 +
10815 +    ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags));
10816 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10817 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10818 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_2);
10819 +    ASSERT (ptbl->ptbl_valid == 0);
10820 +   
10821 +    ptbl->ptbl_flags = 0;
10822 +    spin_unlock (lock);
10823 +
10824 +    ptg = PTBL_TO_GR(ptbl);
10825 +
10826 +    if (ptg->pg_level == PTBL_LEVEL_3)
10827 +    {
10828 +       ELAN3MMU_STAT(l2_freed_l3);
10829 +
10830 +       HAT_PRINTF1 (2, "elan3mmu_free_l2ptbl: freeing stolen level 3 ptbl %p\n", ptbl);
10831 +
10832 +       /* this was really a level 3 ptbl which we had to steal */
10833 +       spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10834 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10835 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
10836 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
10837 +       spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10838 +    }
10839 +    else
10840 +    {
10841 +       spin_lock (&dev->Level[PTBL_LEVEL_2].PtblLock);
10842 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_2].PtblFreeList;
10843 +       dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl;
10844 +       dev->Level[PTBL_LEVEL_2].PtblFreeCount++;
10845 +       spin_unlock (&dev->Level[PTBL_LEVEL_2].PtblLock);
10846 +    }  
10847 +    local_irq_restore (flags);
10848 +}
10849 +
10850 +void
10851 +elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
10852 +{
10853 +    ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags));
10854 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
10855 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
10856 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3);
10857 +    ASSERT (ptbl->ptbl_valid == 0);
10858 +    
10859 +    HAT_PRINTF2 (2, "elan3mmu_free_l3ptbl: dev %p ptbl %p\n", dev, ptbl);
10860 +
10861 +    if (ptbl->ptbl_flags & PTBL_KERNEL)                                /* if the ptbl has been used by the kernel */
10862 +    {                                                          /* then zero all the pte's, since they will */
10863 +       elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE);
10864 +    }
10865 +
10866 +    ptbl->ptbl_flags = 0;
10867 +    spin_unlock (lock);
10868 +    
10869 +    spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10870 +    ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10871 +    dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
10872 +    dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
10873 +    spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10874 +
10875 +    local_irq_restore (flags);
10876 +}
10877 +
10878 +void
10879 +elan3mmu_kernel_l3ptbl (ELAN3_PTBL *ptbl)
10880 +{
10881 +    ELAN3_DEV   *dev  = ptbl->ptbl_elan3mmu->elan3mmu_dev;
10882 +    sdramaddr_t pte  = PTBL_TO_PTADDR(ptbl);
10883 +    ELAN3_PTE    tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu);
10884 +    int                i;
10885 +
10886 +    ptbl->ptbl_flags |= PTBL_KERNEL;
10887 +    for (i = 0; i < ELAN3_L3_ENTRIES; i++, pte += ELAN3_PTE_SIZE)
10888 +    {
10889 +       elan3_writepte (dev, pte, tpte);
10890 +    }
10891 +}
10892 +       
10893 +#define PTBL_CAN_STEAL(flag)   (((flag) & (PTBL_KERNEL|PTBL_KEEP)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3))
10894 +#define PTBL_MAY_STEAL(flag)   (((flag) & (PTBL_KERNEL|PTBL_KEEP|PTBL_LOCKED)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3))
10895 +
10896 +static int
10897 +elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl)
10898 +{
10899 +    ELAN3_PTBL  *l2ptbl     = l3ptbl->ptbl_parent;
10900 +    E3_Addr     l2addr     = BASE2VA(l2ptbl);
10901 +    E3_Addr     l3addr     = BASE2VA(l3ptbl);
10902 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
10903 +    sdramaddr_t l2ptp;
10904 +    spinlock_t *l2lock;
10905 +    unsigned long l2flags;
10906 +
10907 +    HAT_PRINTF5 (1, "elan3mmu_steal_this_ptbl: l3ptbl %p (%x) l2ptbl %p (%x) l2addr %x\n",
10908 +                l3ptbl, l3ptbl->ptbl_flags, l2ptbl, l2ptbl->ptbl_flags, l2addr);
10909 +
10910 +    if (PTBL_CAN_STEAL (l3ptbl->ptbl_flags) &&
10911 +       elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_NOWAIT, l3ptbl->ptbl_elan3mmu, l2addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_OK)
10912 +    {
10913 +       ELAN3MMU_STAT(stolen_ptbls);
10914 +
10915 +       /* Locked both L3 and L2 page tables. */
10916 +       l2ptp = PTBL_TO_PTADDR (l2ptbl) + ELAN3_L2_INDEX(l3addr)*ELAN3_PTP_SIZE;
10917 +       
10918 +       /* detach the level 3 page table */
10919 +       elan3_writeptp (dev, l2ptp, invalidptp);
10920 +       ElanFlushTlb (dev);
10921 +
10922 +       l2ptbl->ptbl_valid--;
10923 +
10924 +       HAT_PRINTF3 (2, "elan3mmu_steal_this_ptbl: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
10925 +
10926 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
10927 +
10928 +       elan3mmu_unload_loop (l3ptbl->ptbl_elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, PTE_UNLOAD_NOFLUSH);
10929 +
10930 +       ASSERT (l3ptbl->ptbl_valid == 0);
10931 +
10932 +       l3ptbl->ptbl_flags = 0;
10933 +       return (1);
10934 +    }
10935 +    return (0);
10936 +}
10937 +
10938 +static ELAN3_PTBL *
10939 +elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr)
10940 +{
10941 +    ELAN3_PTBL_GR      *ptg;
10942 +    ELAN3_PTBL         *ptbl;
10943 +    spinlock_t         *lock;
10944 +    unsigned long        group_flags;
10945 +    unsigned long        ptbl_flags;
10946 +    register int        i;
10947 +
10948 +    HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: attr %x\n", attr);
10949 +
10950 +    spin_lock_irqsave (&dev->PtblGroupLock, group_flags);
10951 +
10952 +    ptg = dev->Level3PtblGroupHand;
10953 +
10954 +    if (ptg == NULL)
10955 +       ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList;
10956 +    
10957 +    for (;;)
10958 +    {
10959 +       while (ptg)
10960 +       {
10961 +           for (i = 0, ptbl = ptg->pg_ptbls; i < PTBLS_PER_GROUP_MAX; i++, ptbl++)
10962 +           {
10963 +               if (PTBL_MAY_STEAL (ptbl->ptbl_flags) &&
10964 +                   elan3mmu_lock_this_ptbl (ptbl, LK_PTBL_NOWAIT, &lock, &ptbl_flags) == LK_PTBL_OK)
10965 +               {
10966 +                   if (elan3mmu_steal_this_ptbl (dev, ptbl ))
10967 +                   {
10968 +                       HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: stolen ptbl %p\n", ptbl);
10969 +
10970 +                       elan3mmu_unlock_ptbl (ptbl, lock,ptbl_flags);
10971 +
10972 +                       dev->Level3PtblGroupHand = ptg->pg_next;
10973 +
10974 +                       spin_unlock_irqrestore (&dev->PtblGroupLock, group_flags);
10975 +
10976 +                       return (ptbl);
10977 +                   }
10978 +                   elan3mmu_unlock_ptbl (ptbl, lock, ptbl_flags);
10979 +               }
10980 +           }
10981 +           ptg = ptg->pg_next;
10982 +       }
10983 +       
10984 +       if (dev->Level[PTBL_LEVEL_3].PtblFreeList)
10985 +       {
10986 +           spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10987 +           ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList;
10988 +           if (ptbl != NULL)
10989 +           {
10990 +               dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next;
10991 +               dev->Level[PTBL_LEVEL_3].PtblFreeCount--;
10992 +           }
10993 +           spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
10994 +
10995 +           if (ptbl != NULL)
10996 +           {
10997 +               HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: found ptbl %p on free list\n", ptbl);
10998 +               break;
10999 +           }
11000 +       }
11001 +
11002 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1);
11003 +
11004 +       if (ptbl != NULL)
11005 +       {
11006 +           HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: created new ptbl %p\n", ptbl);
11007 +           break;
11008 +       }
11009 +       
11010 +       HAT_PRINTF0 (1, "elan3mmu_steal_l3ptbl: cannot find a ptbl, retrying\n");
11011 +       ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList;
11012 +    }
11013 +
11014 +    spin_unlock (&dev->PtblGroupLock);
11015 +    return (ptbl);
11016 +}
11017 +
11018 +sdramaddr_t
11019 +elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr addr, int *level, 
11020 +                 ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags)
11021 +{
11022 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
11023 +    ELAN3_PTBL  *l1ptbl;
11024 +    sdramaddr_t l1ptp;
11025 +    ELAN3_PTP    tl1ptp;
11026 +    E3_Addr     l1base;
11027 +    ELAN3_PTBL  *l2ptbl;
11028 +    sdramaddr_t l2ptp;
11029 +    ELAN3_PTP    tl2ptp;
11030 +    E3_Addr     l2base;
11031 +    ELAN3_PTBL  *l3ptbl;
11032 +    sdramaddr_t l3pte;
11033 +    spinlock_t *l1lock;
11034 +    spinlock_t *l2lock;
11035 +    spinlock_t *l3lock;
11036 +    unsigned long l1flags;
11037 +    unsigned long l2flags;
11038 +    unsigned long l3flags;
11039 +
11040 +    HAT_PRINTF2 (2, "elan3mmu_ptefind: elan3mmu %p addr %x\n", elan3mmu, addr);
11041 +
11042 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
11043 +    *level = 0;
11044 +
11045 +    if (l1ptbl == NULL)
11046 +       return ((sdramaddr_t) NULL);
11047 +
11048 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11049 +    l1base = ELAN3_L1_BASE(addr);
11050 +    
11051 +retryl1:
11052 +    tl1ptp = elan3_readptp (dev, l1ptp);
11053 +    
11054 +    HAT_PRINTF4 (2, "elan3mmu_ptefind: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp);
11055 +
11056 +    switch (ELAN3_PTP_TYPE(tl1ptp))
11057 +    {
11058 +    case ELAN3_ET_PTE:
11059 +       elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11060 +
11061 +       tl1ptp = elan3_readptp (dev, l1ptp);
11062 +       if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE)
11063 +       {
11064 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11065 +           goto retryl1;
11066 +       }
11067 +       
11068 +       *level = 1;
11069 +       *pptbl = l1ptbl;
11070 +       *plock = l1lock;
11071 +       *flags = l1flags;
11072 +       
11073 +       /* return with l1lock */
11074 +       return (l1ptp);  
11075 +
11076 +    case ELAN3_ET_INVALID:
11077 +       return ((sdramaddr_t) 0);
11078 +       
11079 +    case ELAN3_ET_PTP:
11080 +       break;
11081 +
11082 +    default:
11083 +       panic ("elan3mmu_ptefind: found bad entry in level 1 page table");
11084 +       /* NOTREACHED */
11085 +    }
11086 +    
11087 +    HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 2 ptbl from ptp %x\n", tl1ptp);
11088 +
11089 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11090 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11091 +    l2base = ELAN3_L2_BASE(addr);
11092 +    
11093 +    tl2ptp = elan3_readptp (dev, l2ptp);
11094 +    
11095 +    HAT_PRINTF4 (2, "elan3mmu_ptefind: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n", l2ptbl, l2ptp, l2base, tl2ptp);
11096 +
11097 +    switch (ELAN3_PTP_TYPE(tl2ptp))
11098 +    {
11099 +    case ELAN3_ET_PTE:
11100 +       switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11101 +       {
11102 +       case LK_PTBL_OK:
11103 +           tl2ptp = elan3_readptp (dev, l2ptp);
11104 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE)
11105 +           {
11106 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11107 +               goto retryl1;
11108 +           }
11109 +           
11110 +           *level = 2;
11111 +           *pptbl = l2ptbl;
11112 +           *plock = l2lock;
11113 +           *flags = l2flags;
11114 +           
11115 +           /* return with l2lock */
11116 +           return (l2ptp); 
11117 +           
11118 +       case LK_PTBL_MISMATCH:
11119 +           HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11120 +                        l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11121 +           
11122 +           /*
11123 +            * We've trogged down to this ptbl,  but someone has just
11124 +            * stolen it,  so try all over again.
11125 +            */
11126 +           goto retryl1;
11127 +           
11128 +       default:
11129 +           panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value");
11130 +           /* NOTREACHED */
11131 +       }
11132 +    case ELAN3_ET_INVALID:
11133 +       return ((sdramaddr_t) 0);
11134 +       
11135 +    case ELAN3_ET_PTP:
11136 +       break;
11137 +    default:
11138 +       panic ("elan3mmu_ptefind: found bad entry in level 2 page table");
11139 +       /* NOTREACHED */
11140 +    }
11141 +    
11142 +    HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 3 page table from ptp %x\n", tl2ptp);
11143 +
11144 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11145 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11146 +    
11147 +    HAT_PRINTF2 (2, "elan3mmu_ptefind: l3ptbl %p l3pte %lx\n", l3ptbl, l3pte);
11148 +                
11149 +    switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags))
11150 +    {
11151 +    case LK_PTBL_OK:
11152 +       *level = 3;
11153 +       *plock = l3lock;
11154 +       *pptbl = l3ptbl;
11155 +       *flags = l3flags;
11156 +
11157 +       return (l3pte);
11158 +       
11159 +    case LK_PTBL_FAILED:
11160 +       panic ("elan3mmu_ptefind: l3 lock failed");
11161 +       /* NOTREACHED */
11162 +
11163 +    case LK_PTBL_MISMATCH:
11164 +       HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11165 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr);
11166 +                    
11167 +       /*
11168 +        * We've trogged down to this ptbl,  but someone has just
11169 +        * stolen it,  so try all over again.
11170 +        */
11171 +       goto retryl1;
11172 +       
11173 +    default:
11174 +       panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value");
11175 +       /* NOTREACHED */
11176 +    }
11177 +    /* NOTREACHED */
11178 +    return ((sdramaddr_t) 0);
11179 +}
11180 +
11181 +sdramaddr_t 
11182 +elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level)
11183 +{
11184 +    ELAN3_PTP tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp);
11185 +
11186 +    ASSERT (level != 3 && ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE);
11187 +
11188 +    return PTP_TO_PT_PADDR(tptp);
11189 +}
11190 +
11191 +sdramaddr_t
11192 +elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr addr, int level, 
11193 +                  ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags)
11194 +{
11195 +    ELAN3_DEV   *dev     = elan3mmu->elan3mmu_dev;
11196 +    ELAN3_PTBL  *l1ptbl;
11197 +    ELAN3_PTBL  *lXptbl;
11198 +    int         idx;
11199 +    sdramaddr_t l1ptp;
11200 +    ELAN3_PTP    tl1ptp;
11201 +    E3_Addr     l1base;
11202 +    spinlock_t *l1lock;
11203 +    ELAN3_PTBL  *l2ptbl;
11204 +    sdramaddr_t l2ptp;
11205 +    ELAN3_PTP    tl2ptp;
11206 +    E3_Addr     l2base;
11207 +    spinlock_t *l2lock;
11208 +    ELAN3_PTBL  *l3ptbl;
11209 +    sdramaddr_t l3pte;
11210 +    E3_Addr     l3base;
11211 +    spinlock_t *l3lock;
11212 +
11213 +    unsigned long l1flags;
11214 +    unsigned long l2flags;
11215 +    unsigned long l3flags;
11216 +
11217 +    HAT_PRINTF2 (2, "elan3mmu_ptealloc: elan3mmu %p addr %x\n", elan3mmu, addr);
11218 +
11219 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
11220 +    if (l1ptbl == NULL)
11221 +       return ((sdramaddr_t) 0);
11222 +
11223 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11224 +    l1base = ELAN3_L1_BASE(addr);
11225 +               
11226 +retryl1:
11227 +    tl1ptp = elan3_readptp (dev, l1ptp);
11228 +
11229 +    HAT_PRINTF5 (2, "elan3mmu_ptealloc: l1ptbl %p 1ptp %lx l1base %x (%x) : tl1ptp %x\n",
11230 +                l1ptbl, l1ptp, l1base, l1ptbl->ptbl_base, tl1ptp);
11231 +
11232 +    switch (ELAN3_PTP_TYPE(tl1ptp))
11233 +    {
11234 +    case ELAN3_ET_PTE:
11235 +       if (level == PTBL_LEVEL_1)
11236 +       {
11237 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11238 +
11239 +           tl1ptp = elan3_readptp (dev, l1ptp);
11240 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE)
11241 +           {
11242 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11243 +               goto retryl1;
11244 +           }
11245 +           
11246 +           *pptbl = l1ptbl;
11247 +           *plock = l1lock;
11248 +           *flags = l1flags;
11249 +
11250 +           /* return holding l1lock */
11251 +           return (l1ptp);
11252 +       }
11253 +       panic ("elan3mmu_ptealloc: found pte in level 1 page table");
11254 +       /* NOTREACHED */
11255 +
11256 +    case ELAN3_ET_PTP:
11257 +       if (level == PTBL_LEVEL_1)
11258 +           panic ("elan3mmu_ptealloc: found PTP when loading a level 1 PTE\n");
11259 +       break;
11260 +
11261 +    case ELAN3_ET_INVALID:
11262 +       if (level == PTBL_LEVEL_1)
11263 +       {
11264 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu,  &idx)) == NULL)
11265 +               return ((sdramaddr_t) 0);
11266 +
11267 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11268 +
11269 +           tl1ptp = elan3_readptp (dev, l1ptp);
11270 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11271 +           {
11272 +               /* raced with someone else, whose got there first */
11273 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
11274 +
11275 +               /* drop the l1lock and retry */
11276 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11277 +               goto retryl1;
11278 +           }
11279 +           
11280 +           tl1ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
11281 +           
11282 +           elan3_writeptp (dev, l1ptp, tl1ptp);
11283 +
11284 +           *pptbl = l1ptbl;
11285 +           *plock = l1lock;
11286 +           *flags = l1flags;
11287 +
11288 +           /* return holding l1lock */
11289 +           return (l1ptp);
11290 +       }
11291 +
11292 +       if (level == PTBL_LEVEL_2)
11293 +       {
11294 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL)
11295 +               return ((sdramaddr_t) 0);
11296 +
11297 +           if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL)
11298 +           {
11299 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); 
11300 +               return ((sdramaddr_t) 0);
11301 +           }
11302 +
11303 +           /* Connect l2ptbl to the new LX pte */
11304 +           l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE;
11305 +           tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
11306 +
11307 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11308 +
11309 +           /* Now need to lock the l1 ptbl */
11310 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11311 +
11312 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11313 +           elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags);
11314 +
11315 +           tl1ptp = elan3_readptp (dev, l1ptp);
11316 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11317 +           {
11318 +               HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free l2 ptbl/lx pte\n");
11319 +               
11320 +               tl2ptp = ELAN3_INVALID_PTP;
11321 +               elan3_writeptp (dev, l2ptp, tl2ptp);
11322 +               
11323 +               HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11324 +               HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11325 +               
11326 +               elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags);
11327 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
11328 +
11329 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11330 +
11331 +               goto retryl1;
11332 +           }
11333 +           
11334 +           /* Now have L1 locked,  so install the L2 ptbl */
11335 +           l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11336 +           tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP;
11337 +           l1ptbl->ptbl_valid++;
11338 +
11339 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11340 +                        PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11341 +           
11342 +           elan3_writeptp (dev, l1ptp, tl1ptp);
11343 +           
11344 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp);
11345 +
11346 +           /* unordered unlock - lock l1ptbl, lock l2ptbl, unlock l1ptbl */
11347 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l2flags); /* need to unlock with the l2flags to keep irq order correct */
11348 +
11349 +           *pptbl = l2ptbl;
11350 +           *plock = l2lock;
11351 +           *flags = l1flags; /* return the l1flags here as we have released the l2flags already to keep order */
11352 +
11353 +           /* return holding l2lock */
11354 +           return (l2ptp);
11355 +       }
11356 +
11357 +       HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocating level 2 and level 3 page tables\n");
11358 +
11359 +       /* Allocate a level 2 and level 3 page table and link them together */
11360 +       if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL)
11361 +           return ((sdramaddr_t) 0);
11362 +
11363 +       if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr | PTE_NO_SLEEP, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL)
11364 +       {
11365 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11366 +           return ((sdramaddr_t) 0);
11367 +       }
11368 +
11369 +       ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags));
11370 +       ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2);
11371 +       ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags));
11372 +       ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3);
11373 +
11374 +       HAT_PRINTF6 (2, "elan3mmu_ptealloc: l2ptbl %p (%x,%x) l3ptbl %p (%x,%x)\n",
11375 +                    l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_base,
11376 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_base);
11377 +
11378 +       if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt))
11379 +       {
11380 +           l2ptbl->ptbl_flags |= PTBL_KERNEL;
11381 +           elan3mmu_kernel_l3ptbl (l3ptbl);
11382 +       }
11383 +       
11384 +       /*
11385 +        * Connect L3 ptbl to the new L2 ptbl.
11386 +        */
11387 +       l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE;
11388 +       tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP;
11389 +
11390 +       l2ptbl->ptbl_valid = 1;
11391 +
11392 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: set valid for level %d ptbl %p to %d\n", 
11393 +                    PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11394 +
11395 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11396 +
11397 +       elan3_writeptp (dev, l2ptp, tl2ptp);
11398 +
11399 +       /* 
11400 +        * Now need to lock the l1 ptbl - to maintain lock ordering
11401 +        * we set the PTBL_KEEP bit to stop the l3 ptbl from being 
11402 +        * stolen and drop the locks in the order we aquired them
11403 +        */
11404 +       l3ptbl->ptbl_flags |= PTBL_KEEP;
11405 +
11406 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11407 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11408 +
11409 +       elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11410 +       elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11411 +           
11412 +       l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11413 +          
11414 +       /* Now have l1 and l3 ptbls locked,  so install the new l2 ptbl into the l1. */
11415 +       tl1ptp = elan3_readptp (dev, l1ptp);
11416 +
11417 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: l1ptp %lx is %x\n", l1ptp, tl1ptp);
11418 +
11419 +       if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11420 +       {
11421 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free l2/l3 ptbls\n");
11422 +
11423 +           /* free off the level 3 page table */
11424 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l3 ptbl %p (%x)\n", l3ptbl, l3ptbl->ptbl_flags);
11425 +
11426 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11427 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11428 +
11429 +           /* and unlock the level 1 ptbl */
11430 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11431 +           
11432 +           /* lock the level 2 page table, and clear out the PTP, then free it */
11433 +           (void) elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags);
11434 +
11435 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: locked l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11436 +           
11437 +           tl2ptp = ELAN3_INVALID_PTP;
11438 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11439 +           l2ptbl->ptbl_valid = 0;
11440 +
11441 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: set to 0 valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
11442 +
11443 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11444 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11445 +
11446 +           elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags);
11447 +
11448 +           goto retryl1;
11449 +       }
11450 +       
11451 +       HAT_PRINTF4 (2, "elan3mmu_ptealloc: l1ptbl is %p (%x), l3ptbl is %p (%x)\n", 
11452 +                    l1ptbl, l1ptbl->ptbl_flags, l3ptbl, l3ptbl->ptbl_flags);
11453 +
11454 +       /* Now have L1 and L3 locked,  so install the L2 ptbl */
11455 +       l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11456 +       tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP;
11457 +       l1ptbl->ptbl_valid++;
11458 +
11459 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11460 +                    PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11461 +
11462 +       elan3_writeptp (dev, l1ptp, tl1ptp);
11463 +
11464 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp);
11465 +
11466 +       /* unordered unlock - lock l1ptbl, lock l3ptbl, unlock l1ptbl */
11467 +       elan3mmu_unlock_ptbl (l1ptbl, l1lock, l3flags); /* free using l3flags to keep irq ordering */
11468 +
11469 +       l3pte = PTBL_TO_PTADDR (l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11470 +
11471 +       /* Level 3 ptbl is already locked,  so just return the pte */
11472 +       *pptbl = l3ptbl;
11473 +       *plock = l3lock;
11474 +       *flags = l1flags; /* return l1flags to keep irq ordering */
11475 +
11476 +       return (l3pte);
11477 +
11478 +    default:
11479 +       panic ("elan3mmu_ptealloc: found bad entry in level 1 page table");
11480 +       /* NOTREACHED */
11481 +    }
11482 +
11483 +    HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 2 ptbl from ptp %x\n", tl1ptp);
11484 +
11485 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11486 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11487 +    l2base = ELAN3_L2_BASE(addr);
11488 +
11489 +    tl2ptp = elan3_readptp (dev, l2ptp);
11490 +
11491 +    HAT_PRINTF5 (2, "elan3mmu_ptealloc: l2ptbl %p l2ptp %lx l2base %x (%x) : tl2ptp %x\n",
11492 +                l2ptbl, l2ptp, l2base, l2ptbl->ptbl_base, tl2ptp);
11493 +
11494 +    switch (ELAN3_PTP_TYPE(tl2ptp))
11495 +    {
11496 +    case ELAN3_ET_PTE:
11497 +       if (level == PTBL_LEVEL_2) {
11498 +           /* this is a pointer to a pte,  we should just return it */
11499 +
11500 +           switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11501 +           {
11502 +           case LK_PTBL_OK:
11503 +               break;
11504 +       
11505 +           case LK_PTBL_FAILED:
11506 +               panic ("elan3mmu_ptealloc: l2 lock failed");
11507 +               /* NOTREACHED */
11508 +               
11509 +           case LK_PTBL_MISMATCH:
11510 +               HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11511 +                            l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11512 +               
11513 +               /*
11514 +                * We've trogged down to this ptbl,  but someone has just
11515 +                * stolen it,  so try all over again.
11516 +                */
11517 +               goto retryl1;
11518 +               
11519 +           default:
11520 +               panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value");
11521 +               /* NOTREACHED */
11522 +           }
11523 +
11524 +
11525 +           tl2ptp = elan3_readptp (dev, l2ptp);
11526 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE)
11527 +           {
11528 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11529 +               goto retryl1;
11530 +           }
11531 +
11532 +           *pptbl = l2ptbl;
11533 +           *plock = l2lock;
11534 +           *flags = l2flags;
11535 +
11536 +           /* return holdind l2lock */
11537 +           return (l2ptp);
11538 +       }
11539 +       panic ("elan3mmu: found pte in level 2 page table");
11540 +       /* NOTREACHED */
11541 +
11542 +    case ELAN3_ET_PTP:
11543 +       break;
11544 +
11545 +    case ELAN3_ET_INVALID:
11546 +       if (level == PTBL_LEVEL_2) 
11547 +       {
11548 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL)
11549 +               return ((sdramaddr_t) 0);
11550 +
11551 +           switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11552 +           {
11553 +           case LK_PTBL_OK:
11554 +               break;
11555 +       
11556 +           case LK_PTBL_FAILED:
11557 +               panic ("elan3mmu_ptealloc: l2 lock failed");
11558 +               /* NOTREACHED */
11559 +               
11560 +           case LK_PTBL_MISMATCH:
11561 +               HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11562 +                            l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11563 +               
11564 +               /*
11565 +                * We've trogged down to this ptbl,  but someone has just
11566 +                * stolen it,  so try all over again.
11567 +                */
11568 +               goto retryl1;
11569 +               
11570 +           default:
11571 +               panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value");
11572 +               /* NOTREACHED */
11573 +           }
11574 +
11575 +           tl2ptp = elan3_readptp (dev, l2ptp);
11576 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID)
11577 +           {
11578 +               HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free lx pte\n");
11579 +
11580 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
11581 +
11582 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11583 +               goto retryl1;
11584 +           }
11585 +
11586 +           /* Connect l2ptbl to the new LX pte */
11587 +           tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
11588 +                  
11589 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11590 +                        PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11591 +           
11592 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11593 +           
11594 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l2ptp %lx to %x\n", l2ptp, tl2ptp);
11595 +
11596 +           *pptbl = l2ptbl;
11597 +           *plock = l2lock;
11598 +           *flags = l2flags;
11599 +
11600 +           /* return holding l2lock */
11601 +           return (l2ptp);
11602 +       }
11603 +       HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocate level 3 page table\n");
11604 +
11605 +       if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL)
11606 +           return ((sdramaddr_t) 0);
11607 +
11608 +       if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt))
11609 +           elan3mmu_kernel_l3ptbl (l3ptbl);
11610 +
11611 +       /* 
11612 +        * Now need to lock the l2 ptbl - to maintain lock ordering
11613 +        * we set the PTBL_KEEP bit to stop the l3 ptbl from being 
11614 +        * stolen and drop the locks in the order we aquired them
11615 +        */
11616 +       l3ptbl->ptbl_flags |= PTBL_KEEP;
11617 +
11618 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11619 +
11620 +       if (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_MISMATCH)
11621 +       {
11622 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: l2ptbl freed, free l3 ptbl and try again\n");
11623 +             
11624 +           elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11625 +
11626 +           /* free off the level 3 page table, and try again */
11627 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11628 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11629 +           
11630 +           goto retryl1;
11631 +       }
11632 +
11633 +       elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11634 +
11635 +       l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11636 +
11637 +       /* Now have L2 and L3 ptbls locked, see if someone has beaten us to it. */
11638 +       tl2ptp = elan3_readptp (dev, l2ptp);
11639 +
11640 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: l2ptp at %lx is %x\n", l2ptp, tl2ptp);
11641 +
11642 +       if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID)
11643 +       {
11644 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free l3 ptbl and try again\n");
11645 +
11646 +           /* free off the level 3 page table, and try again */
11647 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11648 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11649 +           
11650 +           /* Someone has allocated the ptbl before us */
11651 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11652 +           
11653 +           goto retryl1;
11654 +       }
11655 +
11656 +       ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags));
11657 +
11658 +       /* Install the L3 ptbl into the L2 one */
11659 +       l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11660 +       tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP;
11661 +       l2ptbl->ptbl_valid++;
11662 +
11663 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n",
11664 +                    PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11665 +
11666 +       elan3_writeptp (dev, l2ptp, tl2ptp);
11667 +
11668 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11669 +
11670 +       /* unordered unlock - lock l2ptbl, lock l3ptbl, unlock l2ptbl */
11671 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l3flags); /* free with the l3flags to keep irq ordering */
11672 +
11673 +       l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11674 +       
11675 +       /* Level 3 ptbl is already locked, so just return the pte */
11676 +       *pptbl = l3ptbl;
11677 +       *plock = l3lock;
11678 +       *flags = l2flags; /* return l2flags to keep irq ordering */
11679 +
11680 +       return (l3pte);
11681 +
11682 +    default:
11683 +       panic ("elan3mmu_ptealloc: found bad entry in level 2 page table");
11684 +       /* NOTREACHED */
11685 +    }
11686 +
11687 +    HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 3 page table from ptp %x\n", tl2ptp);
11688 +
11689 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11690 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11691 +    l3base = ELAN3_L3_BASE(addr);
11692 +
11693 +    HAT_PRINTF4 (2, "elan3mmu_ptealloc: l3ptbl %p 3pte %lx l3base %x (%x)\n",
11694 +                l3ptbl, l3pte, l3base, l3ptbl->ptbl_base);
11695 +                
11696 +    if (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags) == LK_PTBL_OK)
11697 +    {
11698 +       *pptbl = l3ptbl;
11699 +       *plock = l3lock;
11700 +       *flags = l3flags;
11701 +
11702 +       return (l3pte);
11703 +    }
11704 +
11705 +    /* got all the way down here,  but its been nicked before we could lock it */
11706 +    /* so try all over again */
11707 +    goto retryl1;
11708 +}
11709 +
11710 +void
11711 +elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int attr)
11712 +{
11713 +    ELAN3_DEV     *dev = elan3mmu->elan3mmu_dev;
11714 +    ELAN3_PTP      invalidptp = ELAN3_INVALID_PTP;
11715 +    ELAN3_PTP      tl1ptp;
11716 +    sdramaddr_t   l1ptp;
11717 +    E3_Addr       addr;
11718 +    spinlock_t   *l2lock;
11719 +    ELAN3_PTBL    *l2ptbl;
11720 +    ELAN3_PTBL    *lXptbl;
11721 +    int           idx;
11722 +    int                  i;
11723 +    int                  ret;
11724 +    unsigned long flags;
11725 +
11726 +    l1ptp = PTBL_TO_PTADDR(l1ptbl);
11727 +
11728 +    HAT_PRINTF2 (1, "elan3mmu_l1inval: l1ptbl %p l1ptp %lx\n", l1ptbl, l1ptp);
11729 +
11730 +    for (i = 0, addr = 0; i < ELAN3_L1_ENTRIES; i++, l1ptp += ELAN3_PTP_SIZE)
11731 +    {
11732 +       tl1ptp = elan3_readptp (dev, l1ptp);
11733 +       switch (ELAN3_PTP_TYPE(tl1ptp))
11734 +       {
11735 +       case ELAN3_ET_PTE:
11736 +           lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11737 +           idx    = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
11738 +
11739 +           HAT_PRINTF3 (2, "elan3mmu_l1inval: l1ptbl %p : lXptbl %p idx %d\n",
11740 +                        l1ptbl, lXptbl, idx);
11741 +
11742 +           /* invalidate the L1 pte. */
11743 +           elan3_writeptp (dev, l1ptp, invalidptp);
11744 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11745 +               ElanFlushTlb (dev);         
11746 +
11747 +           l1ptbl->ptbl_valid--;
11748 +           elan3mmu_free_pte ( dev, elan3mmu,  lXptbl, idx); 
11749 +
11750 +           HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n",
11751 +                    PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11752 +           
11753 +           break;
11754 +
11755 +       case ELAN3_ET_PTP:
11756 +           HAT_PRINTF5 (2, "elan3mmu_l1inval: l1ptbl %p : ptp %lx (%x) addr %x (%d)\n",
11757 +                        l1ptbl, l1ptp, tl1ptp, addr, i);
11758 +
11759 +           /* invalidate the L1 ptp. */
11760 +           elan3_writeptp (dev, l1ptp, invalidptp);
11761 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11762 +               ElanFlushTlb (dev);
11763 +
11764 +           /* invalidate the level 2 page table */
11765 +           l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11766 +           ret    = elan3mmu_l2inval (elan3mmu, l2ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l2lock, &flags);
11767 +
11768 +           ASSERT ((l2ptbl->ptbl_flags & PTBL_KEEP) == 0);
11769 +           
11770 +           if (ret == LK_PTBL_OK)
11771 +           {
11772 +               if (((l2ptbl->ptbl_flags & PTBL_KEEP) == 0) && l2ptbl->ptbl_valid == 0)
11773 +               {
11774 +                   HAT_PRINTF1 (2, "elan3mmu_l1inval: free l2ptbl %p\n", l2ptbl);
11775 +                   
11776 +                   l1ptbl->ptbl_valid--;
11777 +                   elan3mmu_free_l2ptbl (elan3mmu->elan3mmu_dev, l2ptbl, l2lock, flags);
11778 +
11779 +                   HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", 
11780 +                                PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11781 +               }
11782 +               else
11783 +               {
11784 +                   /* need to keep this page table,  so even though its now empty, */
11785 +                   /* chain it back in */
11786 +                   HAT_PRINTF1 (2, "elan3mmu_l1inval: keep l2ptbl %p\n", l2ptbl);
11787 +
11788 +                   elan3_writeptp (dev, l1ptp, tl1ptp);
11789 +                   elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags);
11790 +               }    
11791 +           }
11792 +           else
11793 +           {
11794 +               l1ptbl->ptbl_valid--;
11795 +
11796 +               HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", 
11797 +                            PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11798 +           }
11799 +           break;
11800 +           
11801 +       case ELAN3_ET_INVALID:
11802 +           break;
11803 +
11804 +       default:
11805 +           panic ("elan3mmu_l1inval: found invalid entry in level 1 page table");
11806 +           /* NOTREACHED */
11807 +       }
11808 +
11809 +       if (l1ptbl->ptbl_valid == 0)
11810 +           break;
11811 +
11812 +       addr += ELAN3_L1_SIZE;
11813 +    }
11814 +}
11815 +
11816 +int
11817 +elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int attr, E3_Addr addr, spinlock_t **pl2lock, unsigned long *flags)
11818 +{
11819 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
11820 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
11821 +    ELAN3_PTP    tl2ptp;
11822 +    sdramaddr_t l2ptp;
11823 +    spinlock_t *l3lock;
11824 +    unsigned long l3flags;
11825 +    ELAN3_PTBL  *l3ptbl;
11826 +    ELAN3_PTBL  *lXptbl;
11827 +    int         idx;
11828 +    int                i;
11829 +    int                ret;
11830 +
11831 +    HAT_PRINTF2 (1, "elan3mmu_l2inval: l2ptbl %p addr %x\n", l2ptbl, addr);
11832 +
11833 +    ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2);
11834 +    ASSERT (PTBL_LEVEL (l2ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_1);
11835 +
11836 +    ret = elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, pl2lock, flags);
11837 +
11838 +    ASSERT (ret == LK_PTBL_OK);
11839 +    ASSERT (l2ptbl->ptbl_elan3mmu == elan3mmu);
11840 +    ASSERT (l2ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu);
11841 +
11842 +    l2ptp = PTBL_TO_PTADDR(l2ptbl);
11843 +
11844 +    for (i = 0; i < ELAN3_L2_ENTRIES; i++, l2ptp += ELAN3_PTP_SIZE)
11845 +    {
11846 +       tl2ptp = elan3_readptp (dev, l2ptp);
11847 +       switch (ELAN3_PTP_TYPE(tl2ptp))
11848 +       {
11849 +       case ELAN3_ET_PTE:
11850 +           lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11851 +           idx    = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
11852 +
11853 +           HAT_PRINTF3 (2, "elan3mmu_l2inval: l2ptbl %p : lXptbl %p idx %d\n",
11854 +                        l2ptbl, lXptbl, idx);
11855 +
11856 +           /* invalidate the L2 pte. */
11857 +           elan3_writeptp (dev, l2ptp, invalidptp);
11858 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11859 +               ElanFlushTlb (dev);
11860 +
11861 +           l2ptbl->ptbl_valid--;
11862 +           elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); 
11863 +
11864 +           HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
11865 +
11866 +           break;
11867 +           
11868 +       case ELAN3_ET_PTP:
11869 +           HAT_PRINTF5 (2, "elan3mmu_l2inval: l2ptbl %p : ptp %lx (%x) addr %x (%d)\n",
11870 +                        l2ptbl, l2ptp, tl2ptp, addr, i);
11871 +
11872 +           /* invalidate the L2 ptp. */
11873 +           elan3_writeptp (dev, l2ptp, invalidptp);
11874 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
11875 +               ElanFlushTlb (dev);
11876 +           
11877 +           /* unload the level 3 page table */
11878 +           l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11879 +           ret = elan3mmu_l3inval (elan3mmu, l3ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l3lock, &l3flags);
11880 +           
11881 +           if (ret == LK_PTBL_OK)
11882 +           {
11883 +               if ((l3ptbl->ptbl_flags & PTBL_KEEP) == 0 && l3ptbl->ptbl_valid == 0)
11884 +               {
11885 +                   /* decrement the valid count of the level 2 page table, and */
11886 +                   /* free off the level 3 page table */
11887 +                   HAT_PRINTF1 (2, "elan3mmu_l2inval: free l3ptbl %p\n", l3ptbl);
11888 +
11889 +                   l2ptbl->ptbl_valid--;
11890 +                   elan3mmu_free_l3ptbl (elan3mmu->elan3mmu_dev, l3ptbl, l3lock, l3flags);
11891 +
11892 +                   HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", 
11893 +                                PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11894 +               }
11895 +               else
11896 +               {
11897 +                   /* need to keep this page table,  so even though its now empty, */
11898 +                   /* chain it back in */
11899 +                   HAT_PRINTF1 (2, "elan3mmu_l2inval: keep l3ptbl %p\n", l3ptbl);
11900 +
11901 +                   elan3_writeptp (dev, l2ptp, tl2ptp);
11902 +                   elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11903 +               }
11904 +           }
11905 +           else
11906 +           {
11907 +               l2ptbl->ptbl_valid--;
11908 +
11909 +               HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", 
11910 +                            PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11911 +           }
11912 +           break;
11913 +           
11914 +       case ELAN3_ET_INVALID:
11915 +           break;
11916 +
11917 +       default:
11918 +           panic ("elan3mmu_l2inval: found pte in level 2 page table");
11919 +           /* NOTREACHED */
11920 +       }
11921 +
11922 +       if (l2ptbl->ptbl_valid == 0)
11923 +           break;
11924 +
11925 +       addr += ELAN3_L2_SIZE;
11926 +    }
11927 +
11928 +    ASSERT (PTBL_IS_LOCKED(l2ptbl->ptbl_flags));
11929 +
11930 +    return (ret);
11931 +}
11932 +
11933 +int 
11934 +elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int attr, E3_Addr addr, spinlock_t **pl3lock, unsigned long *flags)
11935 +{
11936 +    int ret;
11937 +
11938 +    HAT_PRINTF3 (2, "elan3mmu_l3inval: l3ptbl %p parent %p addr %x\n", l3ptbl, l3ptbl->ptbl_parent, addr);
11939 +
11940 +    ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_parent->ptbl_flags));
11941 +    ASSERT (PTBL_LEVEL (l3ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_2);
11942 +    ASSERT (l3ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu);
11943 +    ASSERT (l3ptbl->ptbl_parent->ptbl_base == VA2BASE (ELAN3_L2_BASE(addr)));
11944 +    
11945 +    ret = elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, pl3lock, flags);
11946 +
11947 +    ASSERT (ret == LK_PTBL_OK);
11948 +    ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3);
11949 +
11950 +    elan3mmu_unload_loop (elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, attr);
11951 +
11952 +    ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags));
11953 +
11954 +    return (ret);
11955 + }
11956 +
11957 +int
11958 +elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags)
11959 +{
11960 +    int         level = PTBL_LEVEL (ptbl->ptbl_flags);
11961 +    spinlock_t *lock  = elan3mmu_ptbl_to_lock (level, ptbl);
11962 +
11963 +    local_irq_save (*flags);
11964 +
11965 +    if ((flag & LK_PTBL_NOWAIT) == 0)
11966 +       spin_lock (lock);
11967 +    else if (! spin_trylock (lock)) {
11968 +       local_irq_restore (*flags);
11969 +       return (LK_PTBL_FAILED);
11970 +    }
11971 +    
11972 +    if (level != PTBL_LEVEL (ptbl->ptbl_flags))
11973 +    {
11974 +       spin_unlock (lock);     
11975 +       local_irq_restore (*flags);
11976 +       return (LK_PTBL_MISMATCH);
11977 +    }
11978 +
11979 +    ptbl->ptbl_flags |= PTBL_LOCKED;
11980 +    *plock = lock;
11981 +    return (LK_PTBL_OK);
11982 +}
11983 +
11984 +int
11985 +elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags)
11986 +{
11987 +    spinlock_t *lock = elan3mmu_ptbl_to_lock (level, ptbl);
11988 +    int         res  = LK_PTBL_MISMATCH;
11989 +
11990 +    local_irq_save (*flags);
11991 +    
11992 +    if ((flag & LK_PTBL_NOWAIT) == 0)
11993 +       spin_lock (lock);
11994 +    else if (spin_trylock (lock) == 0) {
11995 +       local_irq_restore(*flags);
11996 +       return (LK_PTBL_FAILED);
11997 +    }
11998 +    
11999 +    if (PTBL_LEVEL (ptbl->ptbl_flags) != level)
12000 +    {
12001 +       res = LK_PTBL_MISMATCH;
12002 +       goto mismatch;
12003 +    }
12004 +    
12005 +    /* We have the right mutex,  so check that its the ptbl we want. */
12006 +    switch (level)
12007 +    {
12008 +    case PTBL_LEVEL_1: va = ELAN3_L1_BASE(va); break;
12009 +    case PTBL_LEVEL_2: va = ELAN3_L2_BASE(va); break;
12010 +    case PTBL_LEVEL_3: va = ELAN3_L3_BASE(va); break;
12011 +    }
12012 +
12013 +    if (ptbl->ptbl_elan3mmu != elan3mmu || ptbl->ptbl_base != VA2BASE(va))
12014 +    {
12015 +       res = LK_PTBL_MISMATCH;
12016 +       goto mismatch;
12017 +    }
12018 +
12019 +    ASSERT ((ptbl->ptbl_flags & PTBL_LOCKED) == 0);
12020 +    ptbl->ptbl_flags |= PTBL_LOCKED;
12021 +
12022 +    *plock = lock;
12023 +    return (LK_PTBL_OK);
12024 +
12025 +mismatch:
12026 +    if (! (flag & LK_PTBL_FAILOK))
12027 +       panic ("elan3mmu: failed to lock ptbl\n");
12028 +       
12029 +    spin_unlock (lock);
12030 +    local_irq_restore(*flags);
12031 +    return (res);
12032 +}
12033 +
12034 +void
12035 +elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
12036 +{
12037 +    ptbl->ptbl_flags &= ~PTBL_LOCKED;
12038 +    spin_unlock_irqrestore (lock,flags);
12039 +}
12040 +
12041 +static spinlock_t *
12042 +elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl)
12043 +{
12044 +    switch (level)
12045 +    {
12046 +    case PTBL_LEVEL_3: return (&l3ptbl_lock[L3PTBL_MTX_HASH(ptbl)]);
12047 +    case PTBL_LEVEL_2: return (&l2ptbl_lock[L2PTBL_MTX_HASH(ptbl)]);
12048 +    case PTBL_LEVEL_1: return (&l1ptbl_lock[L1PTBL_MTX_HASH(ptbl)]);
12049 +    case PTBL_LEVEL_X: 
12050 +       panic ("elan3mmu: ptbl_to_lock, bad level X");
12051 +    default:
12052 +       panic ("elan3mmu: ptbl_to_lock, bad level");
12053 +       /* NOTREACHED */
12054 +    }
12055 +    return (NULL);
12056 +}
12057 +
12058 +void
12059 +elan3mmu_display (ELAN3MMU *elan3mmu, E3_Addr addr)
12060 +{
12061 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
12062 +    ELAN3_PTBL  *l1ptbl;
12063 +    sdramaddr_t l1ptp;
12064 +    spinlock_t *l1lock;
12065 +    ELAN3_PTE    tl1pte;
12066 +    ELAN3_PTP    tl1ptp;
12067 +    E3_Addr     l1base;
12068 +    ELAN3_PTBL  *l2ptbl;
12069 +    sdramaddr_t l2ptp;
12070 +    ELAN3_PTE    tl2pte;
12071 +    spinlock_t *l2lock;
12072 +    ELAN3_PTP    tl2ptp;
12073 +    E3_Addr     l2base;
12074 +    ELAN3_PTBL  *l3ptbl;
12075 +    sdramaddr_t l3pte;
12076 +    ELAN3_PTE    tl3pte;
12077 +    spinlock_t *l3lock;
12078 +    ELAN3_PTBL  *lXptbl;
12079 +    int         idx;
12080 +    unsigned long flags;
12081 +
12082 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: elan3mmu %p addr %x\n", elan3mmu, addr);
12083 +
12084 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
12085 +
12086 +    if (l1ptbl == NULL)
12087 +       return;
12088 +
12089 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
12090 +    l1base = ELAN3_L1_BASE(addr);
12091 +    
12092 +    tl1ptp = elan3_readptp (dev, l1ptp);
12093 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp);
12094 +    
12095 +    switch (ELAN3_PTP_TYPE(tl1ptp))
12096 +    {
12097 +    case ELAN3_ET_PTE:
12098 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 1 page table for pte %x\n", tl1ptp);
12099 +    
12100 +       lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
12101 +       idx    = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
12102 +       
12103 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx);
12104 +
12105 +       tl1pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE));
12106 +
12107 +       switch (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &flags))
12108 +       {
12109 +       case LK_PTBL_OK:
12110 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, flags);
12111 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 1 l1pte matches value %llx\n", (long long) tl1pte);
12112 +           break;
12113 +           
12114 +       case LK_PTBL_FAILED:
12115 +           panic ("elan3mmu_display: l1 lock failed");
12116 +           /* NOTREACHED */
12117 +           
12118 +       case LK_PTBL_MISMATCH:
12119 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 1 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
12120 +                         l1ptbl, l1ptbl->ptbl_flags, l1ptbl->ptbl_elan3mmu, l1ptbl->ptbl_base, elan3mmu, addr, (long long)tl1pte);
12121 +           
12122 +           break;
12123 +       default:
12124 +           panic ("elan3mmu_display: lvl 1 elan3mmu_lock_ptbl returned bad value");
12125 +           /* NOTREACHED */
12126 +       }
12127 +       return;
12128 +       
12129 +    case ELAN3_ET_INVALID:
12130 +       return;
12131 +       
12132 +    case ELAN3_ET_PTP:
12133 +       break;
12134 +       
12135 +    default:
12136 +       panic ("elan3mmu_display: found bad entry in level 1 page table");
12137 +       /* NOTREACHED */
12138 +    }
12139 +    
12140 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 2 ptbl from ptp %x\n", tl1ptp);
12141 +    
12142 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
12143 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
12144 +    l2base = ELAN3_L2_BASE(addr);
12145 +    
12146 +    tl2ptp = elan3_readptp (dev, l2ptp);
12147 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n",
12148 +                l2ptbl, l2ptp, l2base, tl2ptp);
12149 +    
12150 +    switch (ELAN3_PTP_TYPE(tl2ptp))
12151 +    {
12152 +    case ELAN3_ET_PTE:
12153 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 2 page table for pte %x\n", tl2ptp);
12154 +    
12155 +       lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12156 +       idx    = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
12157 +       
12158 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx);
12159 +
12160 +       tl2pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE));
12161 +
12162 +       switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &flags))
12163 +       {
12164 +       case LK_PTBL_OK:
12165 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags);
12166 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 2 l1pte matches value %llx\n", (long long)tl2pte);
12167 +           break;
12168 +           
12169 +       case LK_PTBL_FAILED:
12170 +           panic ("elan3mmu_display: l2 lock failed");
12171 +           /* NOTREACHED */
12172 +           
12173 +       case LK_PTBL_MISMATCH:
12174 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 2 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
12175 +                         l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr, (long long) tl2pte);
12176 +           
12177 +           break;
12178 +       default:
12179 +           panic ("elan3mmu_display: lvl 2 elan3mmu_lock_ptbl returned bad value");
12180 +           /* NOTREACHED */
12181 +       }
12182 +       return;
12183 +       
12184 +    case ELAN3_ET_INVALID:
12185 +       return;
12186 +       
12187 +    case ELAN3_ET_PTP:
12188 +       break;
12189 +
12190 +    default:
12191 +       panic ("elan3mmu_display: found bad entry in level 2 page table");
12192 +       /* NOTREACHED */
12193 +    }
12194 +    
12195 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 3 page table from ptp %x\n", tl2ptp);
12196 +    
12197 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12198 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
12199 +    
12200 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3ptbl %p l3pte %lx\n",l3ptbl, l3pte);
12201 +    
12202 +    tl3pte = elan3_readpte (dev, l3pte);
12203 +    switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &flags))
12204 +    {
12205 +    case LK_PTBL_OK:
12206 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, flags);
12207 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3pte matches value %llx\n", (long long) tl3pte);
12208 +       break;
12209 +       
12210 +    case LK_PTBL_FAILED:
12211 +       panic ("elan3mmu_display: l3 lock failed");
12212 +       /* NOTREACHED */
12213 +       
12214 +    case LK_PTBL_MISMATCH:
12215 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
12216 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr, (long long) tl3pte);
12217 +       
12218 +       break;
12219 +       
12220 +    default:
12221 +       panic ("elan3mmu_display: elan3mmu_lock_ptbl returned bad value");
12222 +       /* NOTREACHED */
12223 +    }
12224 +}
12225 +
12226 +
12227 +/*
12228 + * Local variables:
12229 + * c-file-style: "stroustrup"
12230 + * End:
12231 + */
12232 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elan3mmu_linux.c
12233 ===================================================================
12234 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elan3mmu_linux.c     2004-02-23 16:02:56.000000000 -0500
12235 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elan3mmu_linux.c  2005-07-28 14:52:52.795686792 -0400
12236 @@ -0,0 +1,284 @@
12237 +/*
12238 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12239 + *
12240 + *    For licensing information please see the supplied COPYING file
12241 + *
12242 + */
12243 +
12244 +#ident "@(#)$Id: elan3mmu_linux.c,v 1.50.2.3 2004/12/14 10:19:51 mike Exp $"
12245 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_linux.c,v $*/
12246 +
12247 +#include <qsnet/kernel.h>
12248 +#include <qsnet/kpte.h>
12249 +
12250 +#include <elan3/elanregs.h>
12251 +#include <elan3/elandev.h>
12252 +#include <elan3/elanvp.h>
12253 +#include <elan3/elan3mmu.h>
12254 +#include <elan3/elanctxt.h>
12255 +#include <elan3/elandebug.h>
12256 +#include <elan3/urom_addrs.h>
12257 +#include <elan3/thread.h>
12258 +
12259 +/*
12260 + * Strategy for syncing main <-> elan pte's:
12261 + * 
12262 + *   Install callbacks for linux flush_tlb_page(), flush_tlb_range(),
12263 + *   flush_tlb_all(), and flush_tlb_mm() so when a main PTE changes,
12264 + *   the elan translations, if any, are invalidated.  They can then be
12265 + *   faulted in again with the correct physical page, perms, etc., on demand. 
12266 + * 
12267 + *   Callbacks are stacked on the mm_struct, one per context.  We also stack
12268 + *   a ctxt pointer so we don't have to do lookups on every call.
12269 + *
12270 + *   Sanity check -- we clearly want to flush the elan PTEs in these 
12271 + *   situations, all of which are covered by tlb_flush_{page,range}()
12272 + *
12273 + *     1) kernel/vmscan.c::try_to_swap_out() swaps out a page
12274 + *
12275 + *     2) kernel/mremap.c::copy_one_pte() moves a page as a result of the 
12276 + *     mremap system call
12277 + * 
12278 + *     3) kernel/mprotect.c::change_pte_range() changes the permissions of a 
12279 + *     page as the result of the mprotect system call
12280 + *
12281 + * Other Notes: 
12282 + * 
12283 + *   Dirty a page in the mains page tables when it is faulted into the elan.
12284 + *   This way it will not be thrown away by the swapper.
12285 + * 
12286 + *   Pages write protected for COW are copied by elan3mmu_main_pagefault()
12287 + *   when a writeable translation is loaded into the elan.
12288 + */
12289 +
12290 +caddr_t             elan3mmu_kernel_invalid_space;
12291 +ELAN3_PTE     elan3mmu_kernel_invalid_pte_val;
12292 +
12293 +void
12294 +elan3mmu_init_osdep (void)
12295 +{
12296 +    pte_t *pte;
12297 +
12298 +    KMEM_GETPAGES (elan3mmu_kernel_invalid_space, caddr_t, 1, TRUE);
12299 +
12300 +    ASSERT(elan3mmu_kernel_invalid_space != NULL);
12301 +
12302 +    pte = find_pte_kernel ((unsigned long) elan3mmu_kernel_invalid_space);
12303 +
12304 +    elan3mmu_kernel_invalid_pte_val = ELAN3_PTE_64_BIT | (pte_phys(*pte) & ELAN3_PTE_PFN_MASK) | ELAN3_PERM_REMOTEREAD | ELAN3_ET_PTE;
12305 +
12306 +#ifdef __alpha
12307 +    /*
12308 +     * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to
12309 +     *       set any of bits 63:48, then we will set them all by setting bit 48/
12310 +     */
12311 +    if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull)
12312 +        elan3mmu_kernel_invalid_pte_val |= (1ull << 48);
12313 +    else
12314 +       elan3mmu_kernel_invalid_pte_val |= alpha_mv.pci_dac_offset;
12315 +#endif
12316 +
12317 +    HAT_PRINTF(0x10, "elan3mmu_invalid_space at %p phys=%llx pte=%llx\n", elan3mmu_kernel_invalid_space, 
12318 +              (unsigned long long) pte_phys(*pte), (unsigned long long) elan3mmu_kernel_invalid_pte_val);
12319 +}
12320 +
12321 +void
12322 +elan3mmu_fini_osdep()
12323 +{
12324 +    KMEM_FREEPAGES (elan3mmu_kernel_invalid_space, 1);
12325 +}
12326 +
12327 +void
12328 +elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu)
12329 +{
12330 +    elan3mmu->elan3mmu_coproc_mm = current->mm;
12331 +}
12332 +
12333 +/*
12334 + * Convert physical page frame number to elan pte.
12335 + */
12336 +ELAN3_PTE
12337 +elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm)
12338 +{
12339 +    ELAN3_PTE newpte;
12340 +    
12341 +    ASSERT (paddr != 0);
12342 +    
12343 +    if ((paddr & dev->SdramPhysMask) == dev->SdramPhysBase)            /* SDRAM, turn on PTE_LOCAL bit */
12344 +    {
12345 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx SDRAM\n", (unsigned long long) paddr);
12346 +       
12347 +       newpte = ELAN3_PTE_LOCAL | (paddr & ELAN3_PTE_PFN_MASK & ~dev->SdramPhysMask) | perm | ELAN3_ET_PTE;
12348 +    }
12349 +#if defined(LINUX_ALPHA)
12350 +    else if ((paddr & dev->PciPhysMask) == dev->PciPhysBase)
12351 +    {
12352 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx PCI\n", (unsigned long long) paddr);
12353 +       newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK & ~dev->PciPhysMask) | perm | ELAN3_ET_PTE;
12354 +    }
12355 +#endif
12356 +    else                                               /* main memory, must convert to PCI view */
12357 +    {
12358 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx is main memory\n", (unsigned long long) paddr);
12359 +
12360 +       /* main memory, just set the architecture specific PTE_BYPASS bit */
12361 +       /* This requires the Tsunami chipset being programmed to support
12362 +        * the monster window option. This is in linux-2.4.5 and later kernels 
12363 +        * and is also patched into the RH 7.1/2.4.3-12 Alpha kernel
12364 +        */
12365 +       newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK) | perm | ELAN3_ET_PTE;
12366 +
12367 +#ifdef __alpha
12368 +       /*
12369 +        * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to
12370 +        *       set any of bits 63:48, then we will set them all by setting bit 48/
12371 +        */
12372 +       if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull)
12373 +            newpte |= (1ull << 48);
12374 +        else
12375 +           newpte |= alpha_mv.pci_dac_offset;
12376 +#endif
12377 +    }
12378 +
12379 +    if ( ELAN3_PERM_WRITEABLE( perm )) 
12380 +       newpte |= ( ELAN3_PTE_MOD | ELAN3_PTE_REF );
12381 +    else
12382 +       newpte |= ( ELAN3_PTE_REF ) ;
12383 +
12384 +    return (newpte);
12385 +}
12386 +
12387 +ELAN3_PTE
12388 +elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu)
12389 +{
12390 +    if (elan3mmu->elan3mmu_dev->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVB)
12391 +       return (elan3mmu_kernel_invalid_pte_val);
12392 +    return (ELAN3_INVALID_PTE);
12393 +}
12394 +
12395 +/* 
12396 + * Invalidate a range of addresses for specified context.
12397 + */
12398 +void
12399 +elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len)
12400 +{
12401 +    E3_Addr       eaddr;
12402 +    ELAN3MMU_RGN *rgn;
12403 +    unsigned long span;
12404 +
12405 +    spin_lock (&elan3mmu->elan3mmu_lock);
12406 +
12407 +    for (; len; len -= span, addr += span)
12408 +    {
12409 +       rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
12410 +
12411 +       if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr)
12412 +           span = len;
12413 +       else if (rgn->rgn_mbase > addr)
12414 +           span = MIN(len, rgn->rgn_mbase - addr);
12415 +       else
12416 +       {
12417 +           span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr);
12418 +           eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
12419 +
12420 +            HAT_PRINTF(0x10, "  unloading eaddr %x main %p (%ld pages)\n", 
12421 +             eaddr, addr, btopr(span));
12422 +           elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD);
12423 +       }                       /* takes care of elan tlb flush also */
12424 +    }
12425 +
12426 +    spin_unlock (&elan3mmu->elan3mmu_lock);
12427 +}
12428 +
12429 +/*
12430 + *
12431 + */
12432 +void
12433 +elan3mmu_update_range (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t vaddr, E3_Addr eaddr, u_int len, u_int perm)
12434 +{
12435 +    u_int roperm = ELAN3_PERM_READONLY(perm & ELAN3_PTE_PERM_MASK) | (perm & ~ELAN3_PTE_PERM_MASK);
12436 +    u_int off;
12437 +
12438 +    HAT_PRINTF3(1, "elan3mmu_update_range (elan3mmu %p addr %p -> %p)\n", elan3mmu, vaddr, vaddr+len-1);
12439 +
12440 +    while (len > 0)
12441 +    {
12442 +       pte_t *pte_ptr;
12443 +       pte_t  pte_value;
12444 +
12445 +       pte_ptr = find_pte_map(mm, (unsigned long)vaddr);
12446 +       if (pte_ptr) {
12447 +           pte_value = *pte_ptr;
12448 +           pte_unmap(pte_ptr);
12449 +       }
12450 +
12451 +       HAT_PRINTF(0x10, "  elan3mmu_update_range %x (%p) %s\n", eaddr, vaddr, 
12452 +               !pte_ptr ? "invalid" : pte_none(pte_value) ? "none " : !pte_present(pte_value) ? "swapped " : 
12453 +               !pte_write(pte_value) ? "RO/COW" : "OK");
12454 +       
12455 +       if (pte_ptr && !pte_none(pte_value) && pte_present(pte_value))
12456 +           for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE)
12457 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, eaddr + off, pte_phys(pte_value) + off, pte_write(pte_value) ? perm : roperm, PTE_LOAD|PTE_NO_SLEEP|PTE_NO_STEAL);
12458 +       vaddr += PAGESIZE;
12459 +       eaddr += PAGESIZE;
12460 +       len   -= PAGESIZE;
12461 +    }
12462 +}
12463 +
12464 +/* 
12465 + * Update a range of addresses for specified context.
12466 + */
12467 +void
12468 +elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm,caddr_t vaddr, unsigned long len)
12469 +{
12470 +    E3_Addr       eaddr;
12471 +    ELAN3MMU_RGN *rgn;
12472 +    unsigned long span;
12473 +
12474 +    spin_lock (&elan3mmu->elan3mmu_lock);
12475 +
12476 +    for (; len; len -= span, vaddr += span)
12477 +    {
12478 +       rgn = elan3mmu_findrgn_main (elan3mmu, vaddr, 0);
12479 +
12480 +       if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < vaddr)
12481 +           span = len;
12482 +       else if (rgn->rgn_mbase > vaddr)
12483 +           span = MIN(len, rgn->rgn_mbase - vaddr);
12484 +       else
12485 +       {
12486 +           span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - vaddr);
12487 +           eaddr = rgn->rgn_ebase + (vaddr - rgn->rgn_mbase);
12488 +
12489 +            HAT_PRINTF(0x10, "  updating eaddr %u main %p (%ld pages)\n", 
12490 +             eaddr, vaddr, btopr(span));
12491 +           
12492 +           elan3mmu_update_range(elan3mmu, mm, vaddr, eaddr, span, rgn->rgn_perm);
12493 +       }                       
12494 +    }
12495 +
12496 +    spin_unlock (&elan3mmu->elan3mmu_lock);
12497 +}
12498 +
12499 +/* 
12500 + * Invalidate all ptes for the given context.
12501 + */
12502 +void
12503 +elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu)
12504 +{
12505 +    ELAN3_PTBL  *l1ptbl   = (elan3mmu ? elan3mmu->elan3mmu_l1ptbl : NULL);
12506 +    spinlock_t *l1mtx;
12507 +    unsigned long flags;
12508 +
12509 +    if (l1ptbl && elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, 1, &l1mtx, &flags) == LK_PTBL_OK)
12510 +    {
12511 +       elan3mmu_l1inval(elan3mmu, elan3mmu->elan3mmu_l1ptbl, 0);
12512 +       elan3mmu_unlock_ptbl (l1ptbl, l1mtx, flags);
12513 +    }
12514 +}
12515 +
12516 +/*
12517 + * Local variables:
12518 + * c-file-style: "stroustrup"
12519 + * End:
12520 + */
12521 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elan3ops.c
12522 ===================================================================
12523 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elan3ops.c   2004-02-23 16:02:56.000000000 -0500
12524 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elan3ops.c        2005-07-28 14:52:52.796686640 -0400
12525 @@ -0,0 +1,170 @@
12526 +/*
12527 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12528 + *
12529 + *    For licensing information please see the supplied COPYING file
12530 + *
12531 + */
12532 +
12533 +#ident "@(#)$Id: elan3ops.c,v 1.4 2003/09/24 13:57:25 david Exp $"
12534 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elan3ops.c,v $*/
12535 +
12536 +#include <qsnet/kernel.h>
12537 +#include <elan/elanmod.h>
12538 +
12539 +#include <elan3/elanregs.h>
12540 +#include <elan3/elandev.h>
12541 +#include <elan3/elan3ops.h>
12542 +
12543 +extern ELAN_STATS_OPS elan3_device_stats_ops;
12544 +
12545 +ELAN_DEV_OPS elan3_dev_ops = {
12546 +
12547 +       get_position,
12548 +       set_position,   
12549 +
12550 +       ELAN_DEV_OPS_VERSION
12551 +};
12552 +
12553 +ELAN_STATS_OPS elan3_device_stats_ops = {
12554 +       ELAN_STATS_OPS_VERSION,
12555 +
12556 +       stats_get_index_name,
12557 +       stats_get_block,
12558 +       stats_clear_block
12559 +};
12560 +
12561 +static char *elan3_device_stats_names[ELAN3_NUM_STATS] = 
12562 +{
12563 +               "version field",                 /* not cleared */
12564 +               "elan interrupts",
12565 +               "tlb flushes",
12566 +               "traps with invalid context",
12567 +               "interrupts com queue half full",
12568 +               "cproc traps",
12569 +               "dproc traps",
12570 +               "tproc traps",
12571 +               "iproc traps",
12572 +               "event interrupts",
12573 +               "elan page faults",
12574 +               "EopBadAcks",
12575 +               "EopResets",
12576 +               "InputterBadLength",
12577 +               "InputterCRCDiscards",
12578 +               "InputterCRCErrors",
12579 +               "InputterCRCBad",       
12580 +               "errors in dma data",
12581 +               "errors after dma identify",
12582 +               "errors after thread identify",
12583 +               "dma retries",
12584 +               "dma output timeouts",
12585 +               "dma packet ack errors",
12586 +               "forced tproc traps",
12587 +               "too many instruction traps",
12588 +               "output timeouts",
12589 +               "packet ack errors",
12590 +               "LockError",
12591 +               "DeskewError",
12592 +               "PhaseError",
12593 +               "DataError",
12594 +               "FifoOvFlow0",
12595 +               "FifoOvFlow1",
12596 +               "link error value on data error",
12597 +               "correctable ecc errors",
12598 +               "uncorrectable ecc errors",
12599 +               "multiple ecc errors",
12600 +               "sdram bytes free",              /* not cleared */
12601 +               "longest interrupt in ticks",
12602 +               "punts of event int's to thread",
12603 +               "reschedules of event int's thread"
12604 +};
12605 +
12606 +int 
12607 +stats_get_index_name (void *arg, uint  index, caddr_t name)
12608 +{
12609 +       copyout (elan3_device_stats_names[index], name, strlen (elan3_device_stats_names[index]) + 1  /* with \0 */);
12610 +
12611 +       return (0);
12612 +}
12613 +
12614 +int
12615 +stats_get_block (void *arg, uint entries, ulong *value)
12616 +{
12617 +       ELAN3_DEV *dev = (ELAN3_DEV *) arg;
12618 +
12619 +       if ( entries >  ELAN3_NUM_STATS ) /* if space too big only send valid portion */
12620 +               entries = ELAN3_NUM_STATS;
12621 +       
12622 +       copyout(&dev->Stats, value, sizeof(ulong) * entries);
12623 +
12624 +       return (0);
12625 +}
12626 +
12627 +int 
12628 +stats_clear_block (void *arg)
12629 +{
12630 +       ELAN3_DEV *dev = (ELAN3_DEV *) arg;
12631 +       u_long   *ptr = (u_long *) &dev->Stats;
12632 +       int                n;
12633 +       
12634 +       for (n = 0; n < ELAN3_NUM_STATS; n++)
12635 +       {
12636 +               switch (n) 
12637 +               {
12638 +               case offsetof (ELAN3_STATS, Version)/sizeof(u_long):
12639 +               case offsetof (ELAN3_STATS, SdramBytesFree)/sizeof(u_long):
12640 +                       break;
12641 +               default:
12642 +                       ptr[n] = (ulong)0;
12643 +               }
12644 +       }
12645 +       return (0);
12646 +}
12647 +
12648 +int 
12649 +get_position (void *user_data, ELAN_POSITION *position)
12650 +{
12651 +       ELAN3_DEV *dev = (ELAN3_DEV *)user_data;
12652 +
12653 +       copyout(&dev->Position, position, sizeof(ELAN_POSITION));
12654 +
12655 +       return (0);     
12656 +}
12657 +
12658 +int 
12659 +set_position (void *user_data, unsigned short nodeId, unsigned short numNodes)
12660 +{
12661 +       ELAN3_DEV *dev = (ELAN3_DEV *)user_data;
12662 +
12663 +       if (ComputePosition (&dev->Position, nodeId, numNodes, dev->Devinfo.dev_num_down_links_value) != 0)
12664 +               return (EINVAL);
12665 +       
12666 +       return (0);     
12667 +}
12668 +
12669 +int
12670 +elan3_register_dev_stats(ELAN3_DEV * dev) 
12671 +{
12672 +       char name[ELAN_STATS_NAME_MAX_LEN+1];
12673 +
12674 +       sprintf (name, ELAN3_STATS_DEV_FMT, dev->Instance);
12675 +
12676 +       elan_stats_register(&dev->StatsIndex,
12677 +                              name,
12678 +                              sizeof (elan3_device_stats_names)/sizeof (elan3_device_stats_names[0]),
12679 +                              &elan3_device_stats_ops,
12680 +                              (void *)dev);
12681 +
12682 +       return (0);
12683 +}
12684 +
12685 +void
12686 +elan3_deregister_dev_stats(ELAN3_DEV * dev) 
12687 +{
12688 +       elan_stats_deregister(dev->StatsIndex);
12689 +}
12690 +
12691 +/*
12692 + * Local variables:
12693 + * c-file-style: "linux"
12694 + * End:
12695 + */
12696 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elandebug.c
12697 ===================================================================
12698 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elandebug.c  2004-02-23 16:02:56.000000000 -0500
12699 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elandebug.c       2005-07-28 14:52:52.796686640 -0400
12700 @@ -0,0 +1,151 @@
12701 +/*
12702 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12703 + * 
12704 + *    For licensing information please see the supplied COPYING file
12705 + *
12706 + */
12707 +
12708 +#ident "@(#)$Id: elandebug.c,v 1.25 2003/09/24 13:57:25 david Exp $"
12709 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandebug.c,v $*/
12710 +
12711 +#include <qsnet/kernel.h>
12712 +#include <elan3/elanregs.h>
12713 +#include <elan3/elandev.h>
12714 +#include <elan3/elanvp.h>
12715 +#include <elan3/elan3mmu.h>
12716 +#include <elan3/elanctxt.h>
12717 +#include <elan3/elandebug.h>
12718 +
12719 +
12720 +void
12721 +elan3_debugf (void *p, unsigned int mode, char *fmt,...)
12722 +{
12723 +    char prefix[128];
12724 +
12725 +#if defined (DIGITAL_UNIX)
12726 +#define PREFIX_FMT     "[%lx.%08x]"
12727 +#define PREFIX_VAL     (int)CURTHREAD()
12728 +#else
12729 +#define PREFIX_FMT     "[%lx.%04d]"
12730 +#define PREFIX_VAL     (current->pid)
12731 +#endif
12732 +
12733 +    if ((unsigned long) p > DBG_NTYPES)
12734 +    {
12735 +       ELAN3_CTXT *ctxt = (ELAN3_CTXT *) p;
12736 +
12737 +        if (elan3_debug_display_ctxt && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) != elan3_debug_display_ctxt)
12738 +            return;
12739 +        if (elan3_debug_ignore_ctxt  && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) == elan3_debug_ignore_ctxt)
12740 +            return;
12741
12742 +       if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
12743 +           sprintf (prefix, PREFIX_FMT " (XXX) ", lbolt, PREFIX_VAL);
12744 +       else
12745 +           sprintf (prefix, PREFIX_FMT " (%03x) ", lbolt, PREFIX_VAL,
12746 +                    ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK);
12747 +    }
12748 +    else
12749 +    {
12750 +       char *what;
12751 +
12752 +       if (elan3_debug_ignore_dev & (1 << ((unsigned long) p)))
12753 +           return;
12754 +
12755 +       switch ((unsigned long) p)
12756 +       {
12757 +       case (int) DBG_DEVICE: what = "dev"; break;
12758 +       case (int) DBG_KCOMM:  what = "kcm"; break;
12759 +       case (int) DBG_ICS:    what = "ics"; break;
12760 +       case (int) DBG_USER:   what = "usr"; break;
12761 +       default:               what = NULL; break;
12762 +       }
12763 +           
12764 +       if (what)
12765 +           sprintf (prefix, PREFIX_FMT " [%s] ", lbolt,  PREFIX_VAL, what);
12766 +       else
12767 +           sprintf (prefix, PREFIX_FMT " [%3d] ", lbolt,  PREFIX_VAL, (int)(long)what);
12768 +    }
12769 +
12770 +    {
12771 +       va_list       ap;
12772 +
12773 +       va_start (ap, fmt);
12774 +       qsnet_vdebugf ((((mode & elan3_debug_buffer)?QSNET_DEBUG_BUFFER:0)|((mode & elan3_debug_console)?QSNET_DEBUG_CONSOLE:0)) , prefix, fmt, ap);
12775 +       va_end (ap);
12776 +    }
12777 +}
12778 +
12779 +
12780 +void
12781 +elan3_alloc_panicstate (ELAN3_DEV *dev, int allocsdram)
12782 +{
12783 +    register int bank;
12784 +
12785 +    if (dev->PanicState.RegPtr == NULL)
12786 +       KMEM_ZALLOC (dev->PanicState.RegPtr, E3_Regs *, sizeof (E3_Regs), 1);
12787 +
12788 +    if (allocsdram)
12789 +       for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12790 +           if (dev->PanicState.Sdram[bank] == NULL && dev->SdramBanks[bank].Size)
12791 +               KMEM_ZALLOC (dev->PanicState.Sdram[bank], char *, dev->SdramBanks[bank].Size, 1);
12792 +}
12793 +
12794 +void
12795 +elan3_free_panicstate (ELAN3_DEV *dev)
12796 +{
12797 +    register int bank;
12798 +
12799 +    if (dev->PanicState.RegPtr != NULL)
12800 +       KMEM_FREE (dev->PanicState.RegPtr, sizeof (E3_Regs));
12801 +
12802 +    for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12803 +       if (dev->PanicState.Sdram[bank] != NULL && dev->SdramBanks[bank].Size)
12804 +           KMEM_FREE (dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size);
12805 +
12806 +    bzero (&dev->PanicState, sizeof (dev->PanicState));
12807 +}
12808 +
12809 +void
12810 +elan3_save_panicstate (ELAN3_DEV *dev)
12811 +{
12812 +    register int bank;
12813 +    
12814 +    if (dev->PanicState.RegPtr)
12815 +    {
12816 +       printk ("elan%d: saving state on panic .....\n", dev->Devinfo.dev_instance);
12817 +
12818 +       bcopy ((void *) dev->RegPtr, (void *) dev->PanicState.RegPtr, sizeof (E3_Regs));
12819 +       
12820 +       for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
12821 +           if (dev->SdramBanks[bank].Size && dev->PanicState.Sdram[bank])
12822 +               elan3_sdram_copyq_from_sdram (dev, (bank << ELAN3_SDRAM_BANK_SHIFT), dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size);
12823 +       
12824 +    }
12825 +}
12826 +
12827 +int
12828 +elan3_assfail (ELAN3_DEV *dev, char *string, char *file, int line)
12829 +{
12830 +    if (panicstr)
12831 +       return (0);
12832 +
12833 +    printk ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
12834 +
12835 +#if defined(LINUX)
12836 +    elan3_save_panicstate (dev);
12837 +
12838 +    panic ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
12839 +#else
12840 +    cmn_err (CE_PANIC, "elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
12841 +#endif
12842 +    /*NOTREACHED*/
12843 +    return (0);
12844 +}
12845 +
12846 +
12847 +/*
12848 + * Local variables:
12849 + * c-file-style: "stroustrup"
12850 + * End:
12851 + */
12852 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elandev_generic.c
12853 ===================================================================
12854 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elandev_generic.c    2004-02-23 16:02:56.000000000 -0500
12855 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elandev_generic.c 2005-07-28 14:52:52.799686184 -0400
12856 @@ -0,0 +1,1862 @@
12857 +/*
12858 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12859 + * 
12860 + *    For licensing information please see the supplied COPYING file
12861 + *
12862 + */
12863 +
12864 +#ident "@(#)$Id: elandev_generic.c,v 1.111.2.3 2004/11/15 11:12:36 mike Exp $"
12865 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_generic.c,v $*/
12866 +
12867 +#include <qsnet/kernel.h>
12868 +#include <qsnet/kthread.h>
12869 +
12870 +#include <elan3/dma.h>
12871 +#include <elan3/elanregs.h>
12872 +#include <elan3/elandev.h>
12873 +#include <elan3/elanvp.h>
12874 +#include <elan3/elan3mmu.h>
12875 +#include <elan3/elanctxt.h>
12876 +#include <elan3/elandebug.h>
12877 +#include <elan3/elansyscall.h>
12878 +#include <elan3/urom_addrs.h>
12879 +#include <elan3/elan3ops.h>
12880 +
12881 +/*
12882 + * Module globals, configurable from system file.
12883 + */
12884 +u_int  elan3_debug                  = 0;
12885 +u_int  elan3_debug_console       = 0;
12886 +u_int  elan3_debug_buffer           = -1;
12887 +u_int  elan3_debug_ignore_dev       = 0;
12888 +u_int  elan3_debug_ignore_kcomm     = 0;
12889 +u_int  elan3_debug_ignore_ctxt      = 0;
12890 +u_int  elan3_debug_display_ctxt     = 0;
12891 +
12892 +int    eventint_punt_loops;
12893 +int    eventint_punt_ticks;
12894 +int    eventint_resched_ticks;
12895 +
12896 +static void InitialiseDmaBuffers (ELAN3_DEV *dev, ioaddr_t CmdPort);
12897 +static int  ProbeSdram (ELAN3_DEV *dev);
12898 +static void InitialiseSdram (ELAN3_DEV *dev);
12899 +static void ReEnableErrorInterrupts (void *arg);
12900 +void        PollForDmaHungup (void *arg);
12901 +static void elan3_event_interrupt (ELAN3_DEV *dev);
12902 +
12903 +/*
12904 + * BaseAddr is ptr to the start of a table aligned on a power of two byte address.
12905 + * SizePower must be in the range of 6 to 12. It defines the number of valid contexts as
12906 + * shown below.
12907 + *
12908 + * SizePower   Valid Contexts  Table size in bytes.
12909 + *     6            64               1k
12910 + *     7           128               2k
12911 + *     8           256               4K
12912 + *     9           512               8k
12913 + *    10          1024              16k
12914 + *    11          2048              32k
12915 + *    12          4096              64k
12916 + */
12917 +#define GEN_CONTEXT_PTR(BaseAddr, SizePower) (((E3_uint32) BaseAddr) | \
12918 +                                             (~((1 << ((SizePower) - 6)) - 1) & 0x3f))
12919 +
12920 +int
12921 +InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort)
12922 +{
12923 +    E3_IprocTrapHeader_BE   TrapCleanup[4];
12924 +    E3_ContextControlBlock  ContextControlBlock;
12925 +    sdramaddr_t             ptr;
12926 +    int                            res;
12927 +    int                            i;
12928 +
12929 +    eventint_punt_loops    = 100;
12930 +    eventint_punt_ticks    = (hz/100);
12931 +    eventint_resched_ticks = (hz/4);
12932 +    
12933 +    dev->Stats.Version     = ELAN3_STATS_VERSION;
12934 +    dev->Position.pos_mode = ELAN_POS_UNKNOWN;
12935 +
12936 +    /*
12937 +     * The elan should have already been reset, so the interrupt mask
12938 +     * should be 0 and the schedule status register should be set to
12939 +     * its initial state
12940 +     */
12941 +    ASSERT (dev->InterruptMask == 0);
12942 +    ASSERT ((read_reg32 (dev, Exts.SchCntReg) & HaltStopAndExtTestMask) == Sched_Initial_Value);
12943 +
12944 +    /*
12945 +     * Write any value here to clear out the half full and error bits of the command
12946 +     * overflow queues.
12947 +     */
12948 +    write_reg32 (dev, ComQueueStatus, 0);
12949 +
12950 +    /* Initialise the cache tags before touching the SDRAM */
12951 +    /* we initialise them to "map" the bottom of SDRAM */
12952 +    for (i = 0; i < E3_NumCacheLines; i++)
12953 +    {
12954 +       write_cache_tag (dev, Tags[i][0].Value, 0x0000000000000000ULL);
12955 +       write_cache_tag (dev, Tags[i][1].Value, 0x0000080000000000ULL);
12956 +       write_cache_tag (dev, Tags[i][2].Value, 0x0000100000000000ULL);
12957 +       write_cache_tag (dev, Tags[i][3].Value, 0x0000180000000000ULL);
12958 +    }
12959 +
12960 +#ifndef CONFIG_MPSAS
12961 +    for (i = 0; i < E3_NumCacheLines*(E3_CACHELINE_SIZE/sizeof(E3_uint64)); i++)
12962 +    {
12963 +       write_cache_set (dev, Set0[i], 0xcac1ecac1ecac1e0ULL);
12964 +       write_cache_set (dev, Set1[i], 0xcac1ecac1ecac1e1ULL);
12965 +       write_cache_set (dev, Set2[i], 0xcac1ecac1ecac1e2ULL);
12966 +       write_cache_set (dev, Set3[i], 0xcac1ecac1ecac1e3ULL);
12967 +    }
12968 +#endif
12969 +
12970 +    if ((res = ProbeSdram(dev)) != ESUCCESS)
12971 +       return (res);
12972 +
12973 +    /* Enable all cache sets before initialising the sdram allocators */
12974 +    write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg |= CONT_EN_ALL_SETS));
12975 +
12976 +    InitialiseSdram (dev);
12977 +
12978 +    dev->TAndQBase              = elan3_sdram_alloc (dev, ELAN3_TANDQ_SIZE);
12979 +    dev->ContextTable           = elan3_sdram_alloc (dev, ELAN3_CONTEXT_SIZE);
12980 +    dev->ContextTableSize       = ELAN3_NUM_CONTEXTS;
12981 +    dev->CommandPortTraps[0]    = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE);
12982 +    dev->CommandPortTraps[1]    = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE);
12983 +    dev->CurrentCommandPortTrap = 0;
12984 +
12985 +    PRINTF3 (DBG_DEVICE, DBG_CONFIG, "InitialiseElan: ContextTable %08lx TAndQ %08lx CommandPortTrap %08lx\n",
12986 +            dev->ContextTable, dev->TAndQBase, dev->CommandPortTraps[0]);
12987 +
12988 +    /* Allocate the thread amd dma trap areas */
12989 +    KMEM_ZALLOC (dev->ThreadTrap, THREAD_TRAP *, sizeof (THREAD_TRAP), TRUE);
12990 +    KMEM_ZALLOC (dev->DmaTrap, DMA_TRAP *, sizeof (DMA_TRAP), TRUE);
12991 +
12992 +    /* Allocate the ctxt table */
12993 +    KMEM_ZALLOC (dev->CtxtTable,  ELAN3_CTXT **, dev->ContextTableSize * sizeof ( ELAN3_CTXT *), TRUE);
12994 +
12995 +    /* Initialise halt queue list */
12996 +    dev->HaltOperationsTailpp   = &dev->HaltOperations;
12997 +
12998 +    /* From elan3/code/harness/elanstuff.c */
12999 +    /* Init the clock. */
13000 +    write_ureg64 (dev, Clock.NanoSecClock, 0);
13001 +    
13002 +    /* Init the instruction count reg. */
13003 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
13004 +    
13005 +    /* Init the stats control reg. Must be done before the count regs.*/
13006 +    write_ureg32 (dev, StatCont.StatsControl, 0);
13007 +    
13008 +    /* Init the stats count regs. */
13009 +    write_ureg32 (dev, StatCounts[0].s.StatsCount, 0);
13010 +    write_ureg32 (dev, StatCounts[1].s.StatsCount, 0);
13011 +    write_ureg32 (dev, StatCounts[2].s.StatsCount, 0);
13012 +    write_ureg32 (dev, StatCounts[3].s.StatsCount, 0);
13013 +    write_ureg32 (dev, StatCounts[4].s.StatsCount, 0);
13014 +    write_ureg32 (dev, StatCounts[5].s.StatsCount, 0);
13015 +    write_ureg32 (dev, StatCounts[6].s.StatsCount, 0);
13016 +    write_ureg32 (dev, StatCounts[7].s.StatsCount, 0);
13017 +    
13018 +    /*
13019 +     * Initialise the Context_Ptr and Fault_Base_Ptr
13020 +     */
13021 +    write_reg32 (dev, Fault_Base_Ptr, dev->TAndQBase + offsetof(E3_TrapAndQueue, IProcSysCntx));
13022 +    write_reg32 (dev, Context_Ptr, GEN_CONTEXT_PTR (dev->ContextTable, ELAN3_LN2_NUM_CONTEXTS));
13023 +
13024 +    /* scrub the TProc Registers */
13025 +    for (i = 0; i < 8; i++)
13026 +       write_reg32 (dev, Globals[i], 0xdeadbabe);
13027 +    for (i = 0; i < 8; i++)
13028 +       write_reg32 (dev, Outs[i], 0xdeadbabe);
13029 +    for (i = 0; i < 8; i++)
13030 +       write_reg32 (dev, Locals[i], 0xdeadbabe);
13031 +    for (i = 0; i < 8; i++)
13032 +       write_reg32 (dev, Ins[i], 0xdeadbabe);
13033 +
13034 +    /*
13035 +     * Initialise the Queue pointers.  Arrange them so that the starting positions are
13036 +     * farthest apart in one set of the cache. Thus 512 bytes apart,  but with cntx0
13037 +     * thread the same as the interrupt queue.
13038 +     */
13039 +    write_reg32 (dev, TProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0]));
13040 +    write_reg32 (dev, TProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0]));
13041 +    write_reg32 (dev, TProc_SysCntx_FPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80]));
13042 +    write_reg32 (dev, TProc_SysCntx_BPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80]));
13043 +    
13044 +    write_reg32 (dev, DProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]));
13045 +    write_reg32 (dev, DProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]));
13046 +    write_reg32 (dev, DProc_SysCntx_FPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10]));
13047 +    write_reg32 (dev, DProc_SysCntx_BPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10]));
13048 +    
13049 +    dev->Event_Int_Queue_FPtr = dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]);
13050 +    write_reg32 (dev, Event_Int_Queue_FPtr, dev->Event_Int_Queue_FPtr);
13051 +    write_reg32 (dev, Event_Int_Queue_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]));
13052 +    
13053 +    
13054 +    /* Initialise Input_Trap_Base to last 8 Kbytes of trap area, uCode adds the right offset */
13055 +    write_reg32 (dev, Input_Trap_Base, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]));
13056 +    
13057 +    /* Ptr to word used to save the SP to when a thread deschedules */
13058 +    write_reg32 (dev, Thread_SP_Save_Ptr, dev->TAndQBase + offsetof (E3_TrapAndQueue, Thread_SP_Save));
13059 +    
13060 +    /* Initialise the command trap base */
13061 +    write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[0]);
13062 +    
13063 +    /* Initialise the set event tracing registers */
13064 +    write_reg32 (dev, Event_Trace_Ptr, 0);
13065 +    write_reg32 (dev, Event_Trace_Mask, 0);
13066 +    
13067 +    /* Initialise Tlb_Line_Value to zero. The TLB cannot be read while either the */
13068 +    /* uCode or thread proc might be running. Must be set to 0. */
13069 +    write_reg64 (dev, Tlb_Line_Value, 0);
13070 +
13071 +    /* Control register. Cache everything, Enable MMU, RefreshRate=3, CasLatency=1, StartSDR */
13072 +    dev->Cache_Control_Reg |= CONT_MMU_ENABLE | CONT_EN_ALL_SETS | CONT_CACHE_ALL | CONT_ENABLE_ECC;
13073 +
13074 +#if ELAN3_PAGE_SHIFT == 13
13075 +    dev->Cache_Control_Reg |= CONT_ENABLE_8K_PAGES;
13076 +#endif
13077 +
13078 +    write_reg32 (dev, Cache_Control_Reg.ContReg,  dev->Cache_Control_Reg);
13079 +
13080 +    /*
13081 +     * Initialise the context table to be discard for all contexts
13082 +     */
13083 +    ContextControlBlock.rootPTP  = 0;
13084 +    ContextControlBlock.filter   = E3_CCB_DISCARD_ALL;
13085 +    ContextControlBlock.VPT_mask = 0;
13086 +    ContextControlBlock.VPT_ptr  = 0;
13087 +
13088 +    for (i = 0, ptr = dev->ContextTable; i < ELAN3_NUM_CONTEXTS; i++, ptr += sizeof (E3_ContextControlBlock))
13089 +       elan3_sdram_copyl_to_sdram (dev, &ContextControlBlock, ptr, sizeof (E3_ContextControlBlock));
13090 +
13091 +    /* From elan3/code/trap_handler/init.c */
13092 +    /*
13093 +     * Initialise the Trap And Queue area in Elan SDRAM.
13094 +     */
13095 +    TrapCleanup[0].s.TrTypeCntx.TypeContext = 0;
13096 +    TrapCleanup[0].s.TrAddr                = 0;
13097 +    TrapCleanup[0].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13098 +    TrapCleanup[0].s.TrData0               = 0;
13099 +    TrapCleanup[1].s.TrTypeCntx.TypeContext = 0;
13100 +    TrapCleanup[1].s.TrAddr                = 0;
13101 +    TrapCleanup[1].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13102 +    TrapCleanup[1].s.TrData0               = 0;
13103 +    TrapCleanup[2].s.TrTypeCntx.TypeContext = 0;
13104 +    TrapCleanup[2].s.TrAddr                = 0;
13105 +    TrapCleanup[2].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13106 +    TrapCleanup[2].s.TrData0               = 0;
13107 +    TrapCleanup[3].s.TrTypeCntx.TypeContext = 0;
13108 +    TrapCleanup[3].s.TrAddr                = 0;
13109 +    TrapCleanup[3].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13110 +    TrapCleanup[3].s.TrData0               = 0;
13111 +
13112 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FaultContext),  0);
13113 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FSR.Status), 0);
13114 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FaultContext), 0);
13115 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FSR.Status), 0);
13116 +    
13117 +    /* Must now zero all the FSRs so that a subsequent Fault can be seen */ 
13118 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), 16);
13119 +
13120 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), 16);
13121 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 64);
13122 +    
13123 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16);
13124 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16);
13125 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16);
13126 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16);
13127 +
13128 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]), 64);
13129 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]), 64);
13130 +
13131 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]), 64);
13132 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]), 64);
13133 +
13134 +    InitialiseDmaBuffers(dev, CmdPort);
13135 +
13136 +    /* reserve a halt operation for flushing the context filter */
13137 +    ReserveHaltOperations (dev, 1, TRUE);
13138 +
13139 +    /* Allow the Thread/Dma to run */
13140 +    CLEAR_SCHED_STATUS (dev, HaltThread | HaltDmas);
13141 +
13142 +    /* Enable All Interrrupts */
13143 +    SET_INT_MASK (dev, (INT_PciMemErr | INT_SDRamInt | INT_EventInterrupt | INT_LinkError | INT_ComQueue |
13144 +                       INT_TProc | INT_CProc | INT_DProc | INT_IProcCh1NonSysCntx | 
13145 +                       INT_IProcCh1SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh0SysCntx));
13146 +
13147 +    /* Take the link out of boundary scan */
13148 +    SET_SCHED_LINK_VALUE (dev, 0, 0);
13149 +    
13150 +    /* And clear any link errors */
13151 +    PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
13152 +
13153 +    /* XXXX: clear discard context 0,  AFTER setting up the kernel comms */
13154 +    CLEAR_SCHED_STATUS (dev, DiscardSysCntxIn | DiscardNonSysCntxIn);
13155 +
13156 +    /* Start a thread to handle excessive Event Interrrupts */
13157 +    if (kernel_thread_create (elan3_event_interrupt, (caddr_t) dev) == NULL)
13158 +    {
13159 +       panic ("InitialiseElan: cannot start elan3_event_interrupt\n");
13160 +       return (EFAIL);
13161 +    }
13162 +    dev->EventInterruptThreadStarted = 1;
13163 +
13164 +    ReserveHaltOperations (dev, 1, TRUE);
13165 +
13166 +    PollForDmaHungup (dev);
13167 +
13168 +    /* register the device and stats with elanmod for RMS */
13169 +    dev->DeviceIdx = elan_dev_register(&dev->Devinfo, &elan3_dev_ops, (void *) dev);
13170 +    
13171 +    elan3_register_dev_stats(dev);
13172 +
13173 +    return (ESUCCESS);
13174 +}
13175 +
13176 +static void
13177 +InitialiseDmaBuffers(ELAN3_DEV *dev, ioaddr_t CmdPort)
13178 +{
13179 +   register int i;
13180 +
13181 +   /* GNAT sw-elan3/3908:
13182 +    * Clear down the power on state of the Dma_Desc registers to make sure we don't
13183 +    * try and interpret them when a trap happens.
13184 +    */
13185 +   write_reg32 (dev, Dma_Desc.dma_type,            0);
13186 +   write_reg32 (dev, Dma_Desc.dma_size,            0);
13187 +   write_reg32 (dev, Dma_Desc.dma_source,          0);
13188 +   write_reg32 (dev, Dma_Desc.dma_dest,            0);
13189 +   write_reg32 (dev, Dma_Desc.dma_destEvent,       0);
13190 +   write_reg32 (dev, Dma_Desc.dma_destCookieVProc, 0);
13191 +   write_reg32 (dev, Dma_Desc.dma_srcEvent,        0);
13192 +   write_reg32 (dev, Dma_Desc.dma_srcCookieVProc,  0);
13193 +   
13194 +   /*
13195 +    * The following is a sequence of writes to remove X's from the dma buffers and 
13196 +    * registers. It is only safe to write these registers after reset and before any
13197 +    * dma's have been issued. The chip will NOT function corectly if they are written at
13198 +    * any other time or in a different order.
13199 +    */
13200 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
13201 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdDmaType, 0);
13202 +   write_reg64 (dev, Exts.Dmas.DmaWrs.ResetAckNLdBytesToWr, ((u_longlong_t)0x1000) << 32);
13203 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdBytesToRd, ((u_longlong_t)0x100) << 32);
13204 +
13205 +   for (i=0;i<(4*8);i++)
13206 +       write_reg64 (dev, Dma_Alignment_Port[0], 0);
13207 +
13208 +   /*
13209 +    * This is used to clear out X's from some of the trap registers. This is required to
13210 +    * prevent the first traps from possibly writting X's into the SDram and upsetting the
13211 +    * ECC value. It requires that the trap save area registers have been set up but does
13212 +    * not require any translations to be ready.
13213 +    */
13214 +   writel (-1, CmdPort + offsetof (E3_CommandPort, SetEvent));
13215 +   while ((read_reg32 (dev, Exts.InterruptReg) & INT_CProc) == 0)
13216 +   {
13217 +       mb();
13218 +       DELAY (1);
13219 +   }
13220 +
13221 +   write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]);
13222 +   
13223 +   PULSE_SCHED_STATUS(dev, RestartCProc);
13224 +}
13225 +
13226 +void
13227 +FinaliseElan (ELAN3_DEV *dev)
13228 +{
13229 +    ELAN3_PTBL_GR *ptg;
13230 +    ELAN3_HALTOP  *op;
13231 +    ELAN3_HALTOP  *chain = NULL;
13232 +    int           bank;
13233 +    int                  indx;
13234 +    int                  size;
13235 +    unsigned long flags;
13236 +    int           level;
13237 +
13238 +    elan_stats_deregister (dev->StatsIndex);
13239 +    elan_dev_deregister(&dev->Devinfo);
13240 +
13241 +    /* Cancel the dma poller */
13242 +    cancel_timer_fn (&dev->DmaPollTimeoutId);
13243 +
13244 +    /* release it's halt operation */
13245 +    ReleaseHaltOperations (dev, 1);
13246 +
13247 +    /* stop all kernel threads */
13248 +    dev->ThreadsShouldStop = 1;
13249 +
13250 +    spin_lock_irqsave (&dev->IntrLock, flags);
13251 +    while (dev->EventInterruptThreadStarted && !dev->EventInterruptThreadStopped)
13252 +    {
13253 +       kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock);
13254 +       kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags);
13255 +    }
13256 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13257 +
13258 +    /* Set the interrupt mask to 0 and the schedule control register to run nothing */
13259 +    SET_INT_MASK (dev, 0);
13260 +    SET_SCHED_STATUS (dev, DiscardNonSysCntxIn | DiscardSysCntxIn | HaltThread | HaltDmas);
13261 +
13262 +    /* Cancel any link error timeout */
13263 +    if (timer_fn_queued(&dev->ErrorTimeoutId))
13264 +       cancel_timer_fn (&dev->ErrorTimeoutId);
13265 +
13266 +    /* Free of and page tables that have been allocated */
13267 +    spin_lock (&dev->PtblGroupLock);
13268 +    for(level=0; level<4; level++) 
13269 +    {
13270 +       while ((ptg = dev->Level[level].PtblGroupList) != NULL)
13271 +       {
13272 +           dev->Level[level].PtblGroupList = ptg->pg_next;
13273 +
13274 +           elan3_sdram_free (dev, ptg->pg_addr, PTBL_GROUP_SIZE);
13275 +           FREE_PTBL_GR(ptg);
13276 +       }
13277 +    }
13278
13279 +    spin_unlock (&dev->PtblGroupLock);
13280 +
13281 +    /* Free of all halt operations */
13282 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
13283 +    while ((op = dev->FreeHaltOperations) != NULL)
13284 +    {
13285 +       dev->FreeHaltOperations = op->Next;
13286 +
13287 +       /* Keep a list of 'freed' ops for later KMEM_FREE call */
13288 +       op->Next = chain;
13289 +       chain = op;
13290 +    }
13291 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
13292 +
13293 +    /* Have now dropped the spinlock - can call KMEM_FREE */
13294 +    while ((op = chain) != NULL)
13295 +    {
13296 +       chain = op->Next;
13297 +
13298 +       KMEM_FREE (op, sizeof (ELAN3_HALTOP));
13299 +    }
13300 +       
13301 +    /* Free of the ctxt table */
13302 +    KMEM_FREE (dev->CtxtTable,  dev->ContextTableSize * sizeof (ELAN3_CTXT *));
13303 +
13304 +    /* Free of the thread and dma atrap areas */
13305 +    KMEM_FREE (dev->ThreadTrap, sizeof (THREAD_TRAP));
13306 +    KMEM_FREE (dev->DmaTrap, sizeof (DMA_TRAP));
13307 +
13308 +    /* Free of the memsegs and pages */
13309 +    for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
13310 +    {
13311 +       if (dev->SdramBanks[bank].Size)
13312 +       {
13313 +           UnmapDeviceRegister (dev, &dev->SdramBanks[bank].Handle);
13314 +
13315 +           KMEM_FREE (dev->SdramBanks[bank].PtblGroups, sizeof (ELAN3_PTBL_GR *) * (dev->SdramBanks[bank].Size / PTBL_GROUP_SIZE));
13316 +
13317 +           for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= dev->SdramBanks[bank].Size; indx++, size <<= 1)
13318 +               KMEM_FREE (dev->SdramBanks[bank].Bitmaps[indx], sizeof (bitmap_t)*BT_BITOUL(dev->SdramBanks[bank].Size/size));
13319 +       }
13320 +    }
13321 +    elan3_sdram_fini (dev);
13322 +}
13323 +
13324 +#define INIT_PATTERN(offset)   (0xBEEC000000000011ull | ((u_longlong_t)(offset)) << 16)
13325 +#define FREE_PATTERN(offset)   (0xBEEC000000000022ull | ((u_longlong_t)(offset)) << 16)
13326 +
13327 +static int
13328 +ProbeSdram (ELAN3_DEV *dev)
13329 +{
13330 +    int                          Instance;
13331 +    u_int                Bank;
13332 +    int                          MemSpaceSize;
13333 +    int                          BankMaxSize;
13334 +    int                          BankOffset;
13335 +    int                          BankSize;
13336 +    ioaddr_t             BankBase;
13337 +    ioaddr_t             PageBase;
13338 +    ioaddr_t             PageBase1;
13339 +    ioaddr_t             PageBase2;
13340 +    DeviceMappingHandle   BankHandle;
13341 +    DeviceMappingHandle   PageHandle;
13342 +    DeviceMappingHandle   PageHandle1;
13343 +    DeviceMappingHandle   PageHandle2;
13344 +    register int          i;
13345 +    u_longlong_t         value;
13346 +    extern int            sdram_bank_limit;
13347 +
13348 +    /* NOTE: The Cache control register is set to only enable cache set 0 */
13349 +    /*       and has ECC disabled */
13350 +    Instance = dev->Instance;
13351 +    
13352 +    /* Determine the size of the SDRAM from the BAR register */
13353 +    if (DeviceRegisterSize (dev, ELAN3_BAR_SDRAM, &MemSpaceSize) != ESUCCESS)
13354 +    {
13355 +       printk ("elan%d: cannot determine SDRAM size\n", Instance);
13356 +       return (EFAIL);
13357 +    }
13358 +
13359 +    elan3_sdram_init (dev);
13360 +
13361 +    BankMaxSize = MemSpaceSize / ELAN3_SDRAM_NUM_BANKS;
13362 +
13363 +    for (Bank = 0; Bank < ELAN3_SDRAM_NUM_BANKS; Bank++)
13364 +    {
13365 +       BankOffset = Bank * BankMaxSize;
13366 +       
13367 +       PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: Probing RAM Bank %d (max size %08x)\n", Instance, Bank, BankMaxSize);
13368 +       
13369 +       /* Probe the memory bank by mapping two pages that are the size of the cache apart */
13370 +       /* this guarantees that when we store the second pattern we displace the first pattern */
13371 +       /* from the cache, also store the second pattern again the size of the cache up again */
13372 +       /* to ensure that the SDRAM wires don't stay floating at pattern1 */
13373 +
13374 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, PAGESIZE, &BankHandle) != ESUCCESS)
13375 +       {
13376 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
13377 +           continue;
13378 +       }
13379 +       
13380 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase1, BankOffset + ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle1) != ESUCCESS)
13381 +       {
13382 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
13383 +           UnmapDeviceRegister (dev, &BankHandle);
13384 +           continue;
13385 +       }
13386 +
13387 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase2, BankOffset + 2*ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle2) != ESUCCESS)
13388 +       {
13389 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
13390 +           UnmapDeviceRegister (dev, &BankHandle);
13391 +           UnmapDeviceRegister (dev, &PageHandle1);
13392 +           continue;
13393 +       }
13394 +
13395 +#define PATTERN0       (0x5555555555555555L)
13396 +#define PATTERN1       (0xAAAAAAAAAAAAAAAAL)
13397 +       writeq (PATTERN0, (u_longlong_t *) BankBase);
13398 +       writeq (PATTERN1, (u_longlong_t *) PageBase1);
13399 +       writeq (PATTERN1, (u_longlong_t *) PageBase2);
13400 +
13401 +       mmiob();
13402 +
13403 +       value = readq ((u_longlong_t *) BankBase);
13404 +
13405 +       if (value != PATTERN0)
13406 +       {
13407 +           UnmapDeviceRegister (dev, &BankHandle);
13408 +           UnmapDeviceRegister (dev, &PageHandle1);
13409 +           UnmapDeviceRegister (dev, &PageHandle2);
13410 +           continue;
13411 +       }
13412 +
13413 +       writeq (PATTERN1, (u_longlong_t *) BankBase);
13414 +       writeq (PATTERN0, (u_longlong_t *) PageBase1);
13415 +       writeq (PATTERN0, (u_longlong_t *) PageBase2);
13416 +
13417 +       mmiob();
13418 +       
13419 +       value = readq ((u_longlong_t *) BankBase);
13420 +       if (value != PATTERN1)
13421 +       {
13422 +           UnmapDeviceRegister (dev, &BankHandle);
13423 +           UnmapDeviceRegister (dev, &PageHandle1);
13424 +           UnmapDeviceRegister (dev, &PageHandle2);
13425 +           continue;
13426 +       }
13427 +       UnmapDeviceRegister (dev, &PageHandle1);
13428 +       UnmapDeviceRegister (dev, &PageHandle2);
13429 +
13430 +       /* Bank is present, so work out its size,  we store tha maximum size at the base */
13431 +       /* and then store the address at each address  on every power of two address until */
13432 +       /* we reach the minimum mappable size (PAGESIZE), we then read back the value at the */
13433 +       /* base to determine the bank size */
13434 +       writeq ((u_longlong_t) BankMaxSize, (u_longlong_t *) BankBase);
13435 +
13436 +       for (BankSize = (BankMaxSize>>1); BankSize > PAGESIZE; BankSize >>= 1)
13437 +       {
13438 +           if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase, BankOffset + BankSize, PAGESIZE, &PageHandle) == ESUCCESS)
13439 +           {
13440 +               writeq (BankSize, (u_longlong_t *) PageBase);
13441 +               UnmapDeviceRegister (dev, &PageHandle);
13442 +           }
13443 +       }
13444 +       mmiob();
13445 +
13446 +       BankSize = (u_long) readq ((u_longlong_t *) BankBase);
13447 +       
13448 +       if (sdram_bank_limit == 0 || BankSize <= (sdram_bank_limit * 1024 * 1024))
13449 +           printk ("elan%d: memory bank %d is %dK\n", Instance, Bank, BankSize / 1024);
13450 +       else
13451 +       {
13452 +           BankSize = (sdram_bank_limit * 1024 * 1024);
13453 +           printk ("elan%d: limit memory bank %d to %dK\n", Instance, Bank, BankSize / 1024);
13454 +       }
13455 +
13456 +       UnmapDeviceRegister (dev, &BankHandle);
13457 +       
13458 +       /* Now map all of this bank into the kernel */
13459 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, BankSize, &BankHandle) != ESUCCESS)
13460 +       {
13461 +           printk ("elan%d: Cannot initialise memory bank %d\n", Instance, Bank);
13462 +           continue;
13463 +       }
13464 +       
13465 +       dev->SdramBanks[Bank].Size    = BankSize;
13466 +       dev->SdramBanks[Bank].Mapping = BankBase;
13467 +       dev->SdramBanks[Bank].Handle  = BankHandle;
13468 +
13469 +#ifndef CONFIG_MPSAS
13470 +       /* Initialise it for ECC */
13471 +       preemptable_start {
13472 +           for (i = 0; i < BankSize; i += 8)
13473 +           {
13474 +               elan3_sdram_writeq (dev, (Bank << ELAN3_SDRAM_BANK_SHIFT) | i, INIT_PATTERN(BankOffset+i));
13475 +
13476 +               preemptable_check();
13477 +           }
13478 +       } preemptable_end;
13479 +#endif
13480 +    }
13481 +    
13482 +    return (ESUCCESS);
13483 +}
13484 +
13485 +static void
13486 +InitialiseSdram (ELAN3_DEV *dev)
13487 +{
13488 +    int indx, size, b;
13489 +
13490 +    for (b = 0; b < ELAN3_SDRAM_NUM_BANKS; b++)
13491 +    {
13492 +       ELAN3_SDRAM_BANK *bank = &dev->SdramBanks[b];
13493 +
13494 +       if (bank->Size == 0)
13495 +           continue;
13496 +
13497 +       /* allocate a ptbl group pointer for each possible ptbl group in this bank */
13498 +       KMEM_ZALLOC (bank->PtblGroups, ELAN3_PTBL_GR **, sizeof (ELAN3_PTBL_GR *) * bank->Size/PTBL_GROUP_SIZE, TRUE);
13499 +           
13500 +       /* allocate the buddy allocator bitmaps */
13501 +       for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->Size; indx++, size <<= 1)
13502 +           KMEM_ZALLOC (bank->Bitmaps[indx], bitmap_t *, sizeof (bitmap_t)*BT_BITOUL(bank->Size/size), TRUE);
13503 +           
13504 +       /* and add it to the sdram buddy allocator */
13505 +       elan3_sdram_add (dev, (b << ELAN3_SDRAM_BANK_SHIFT), (b << ELAN3_SDRAM_BANK_SHIFT) + bank->Size);
13506 +    }
13507 +}
13508 +
13509 +#include <elan3/vpd.h>
13510 +
13511 +int
13512 +ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency)
13513 +{
13514 +    DeviceMappingHandle RomHandle;
13515 +    unsigned char      *RomBase;
13516 +    unsigned char      *PCIDataPtr;
13517 +    unsigned char      *VPDPtr;
13518 +    unsigned char      *lim;
13519 +    int                        type;
13520 +    int                        i, len, len2;
13521 +    char               name[3] = "XX";
13522 +    char               value[256];
13523 +    int                        finished = 0;
13524 +
13525 +    
13526 +    /* default valud for CAS latency is 3 */
13527 +    (*CasLatency) = CAS_LATENCY_3;
13528 +
13529 +    if (MapDeviceRegister (dev, ELAN3_BAR_EBUS, (ioaddr_t *) &RomBase, ELAN3_EBUS_ROM_OFFSET, ELAN3_EBUS_ROM_SIZE, &RomHandle) != ESUCCESS)
13530 +    {
13531 +       printk ("elan%d: Cannot map ROM\n", dev->Instance);
13532 +       return (EFAIL);
13533 +    }
13534 +    
13535 +    /* Check the ROM signature */
13536 +    if (RomBase[0] != 0x55 || RomBase[1] != 0xAA)
13537 +    {
13538 +       printk ("elan%d: Invalid ROM signature %02x %02x\n", dev->Instance, RomBase[0], RomBase[1]);
13539 +       return (ESUCCESS);
13540 +    }
13541 +    
13542 +    PCIDataPtr = RomBase + ((RomBase[0x19] << 8) | RomBase[0x18]);
13543 +
13544 +    /* check the pci data structure */
13545 +    if (PCIDataPtr[0] != 'P' || PCIDataPtr[1] != 'C' || PCIDataPtr[2] != 'I' || PCIDataPtr[3] != 'R')
13546 +    {
13547 +       printk ("elan%d: Invalid PCI Data structure\n", dev->Instance);
13548 +       return (ESUCCESS);
13549 +    }
13550 +    
13551 +    /* Extract the VPD pointer */
13552 +    VPDPtr = RomBase + ((PCIDataPtr[9] << 8) | PCIDataPtr[8]);
13553 +
13554 +    if (VPDPtr == RomBase)
13555 +    {
13556 +       printk ("elan%d: No Vital Product Data\n", dev->Instance);
13557 +       return (ESUCCESS);
13558 +    }
13559 +
13560 +    while (! finished)
13561 +    {
13562 +       type = *VPDPtr++;
13563 +       
13564 +       if (type & LARGE_RESOURCE_BIT)
13565 +       {
13566 +           len = *(VPDPtr++);
13567 +           len += *(VPDPtr++) << 8;
13568 +
13569 +           switch (type & ~LARGE_RESOURCE_BIT)
13570 +           {
13571 +           case LARGE_RESOURCE_STRING:
13572 +               printk ("elan%d: ", dev->Instance);
13573 +               for (i = 0; i < len; i++)
13574 +                   printk ("%c", *VPDPtr++);
13575 +               printk ("\n");
13576 +               break;
13577 +               
13578 +           case LARGE_RESOURCE_VENDOR_DEFINED:
13579 +               VPDPtr += len;
13580 +               break;
13581 +               
13582 +           case LARGE_RESOURCE_VITAL_PRODUCT_DATA:
13583 +               for (lim = VPDPtr + len; VPDPtr < lim; )
13584 +               {
13585 +                   name[0] = *VPDPtr++;
13586 +                   name[1] = *VPDPtr++;
13587 +                   len2    = *VPDPtr++;
13588 +
13589 +                   for (i = 0; i < len2 && VPDPtr < lim; i++)
13590 +                       value[i] = *VPDPtr++;
13591 +                   value[i] = '\0';
13592 +
13593 +                   if (! strcmp (name, "SN"))
13594 +                       printk ("elan%d: Serial Number - %s\n", dev->Instance, value);
13595 +
13596 +                   if (! strcmp (name, "Z0"))
13597 +                       (*CasLatency) = (strcmp (value, "CAS_LATENCY_2") ? CAS_LATENCY_3 : CAS_LATENCY_2);
13598 +               }
13599 +               break;
13600 +               
13601 +           default:
13602 +               printk ("elan%d: unknown large resource %x\n", dev->Instance, type);
13603 +               finished = 1;
13604 +               break;
13605 +           }
13606 +       }
13607 +       else
13608 +       {
13609 +           len = type & 0x7;
13610 +
13611 +           switch (type >> 3)
13612 +           {
13613 +           case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID:
13614 +               VPDPtr += len;
13615 +               break;
13616 +
13617 +           case SMALL_RESOURCE_VENDOR_DEFINED:
13618 +               VPDPtr += len;
13619 +               break;
13620 +               
13621 +           case SMALL_RESOURCE_END_TAG:
13622 +               finished = 1;
13623 +               break;
13624 +               
13625 +           default:
13626 +               printk ("elan%d: unknown small resource %x\n", dev->Instance, type >> 3);
13627 +               finished = 1;
13628 +               break;
13629 +           }
13630 +       }
13631 +    }
13632 +    
13633 +    UnmapDeviceRegister (dev, &RomHandle);
13634 +    return (ESUCCESS);
13635 +}
13636 +
13637 +void
13638 +ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, ELAN3_PTBL_GR *ptg)
13639 +{
13640 +    int bank = offset >> ELAN3_SDRAM_BANK_SHIFT;
13641 +    
13642 +    dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE] = ptg;
13643 +}
13644 +
13645 +ELAN3_PTBL_GR *
13646 +ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset)
13647 +{
13648 +    int bank = offset >> ELAN3_SDRAM_BANK_SHIFT;
13649 +    
13650 +    return (dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE]);
13651 +}
13652 +
13653 +void
13654 +ElanFlushTlb (ELAN3_DEV *dev)
13655 +{
13656 +    unsigned long flags;
13657 +
13658 +    spin_lock_irqsave (&dev->TlbLock, flags);
13659 +    BumpStat (dev, TlbFlushes);
13660 +
13661 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH);
13662 +    mmiob();
13663 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
13664 +
13665 +    while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED))
13666 +       mb();
13667 +}
13668 +
13669 +void
13670 +KillNegativeDma (ELAN3_DEV *dev, void *arg)
13671 +{
13672 +    DMA_TRAP     *trap    = dev->DmaTrap;
13673 +    E3_Status_Reg status;
13674 +    sdramaddr_t   FPtr, BPtr;
13675 +    sdramaddr_t   Base, Top;
13676 +    unsigned long flags;
13677 +
13678 +    spin_lock_irqsave (&dev->IntrLock, flags);
13679 +
13680 +    ASSERT (read_reg32 (dev, Exts.InterruptReg) & INT_DProcHalted);
13681 +
13682 +    /* Initialise the trap to deliver to the offending user process */
13683 +    trap->Status.Status   = read_reg32 (dev, Exts.DProcStatus.Status);
13684 +    trap->PacketInfo.Value = 0;
13685 +    
13686 +    bzero (&trap->FaultSave, sizeof (trap->FaultSave));
13687 +    bzero (&trap->Data0, sizeof (trap->Data0));
13688 +    bzero (&trap->Data1, sizeof (trap->Data1));
13689 +    bzero (&trap->Data2, sizeof (trap->Data2));
13690 +    bzero (&trap->Data3, sizeof (trap->Data3));
13691 +
13692 +    /* run down the kernel dma run queue and panic on a -ve length dma */
13693 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
13694 +    BPtr  = read_reg32 (dev, DProc_SysCntx_BPtr);
13695 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
13696 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
13697 +    
13698 +    while (FPtr != BPtr)
13699 +    {
13700 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE));
13701 +       
13702 +       if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
13703 +           panic ("KillNegativeDma: -ve sized kernel dma\n");
13704 +
13705 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
13706 +    }
13707 +
13708 +    /* run down the user dma run queue and "remove" and -ve length dma's */
13709 +    FPtr  = read_reg32 (dev, DProc_NonSysCntx_FPtr);
13710 +    BPtr  = read_reg32 (dev, DProc_NonSysCntx_BPtr);
13711 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]);
13712 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]);
13713 +    
13714 +    while (FPtr != BPtr)
13715 +    {
13716 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE));
13717 +       
13718 +       if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
13719 +       {
13720 +           PRINTF3 (NULL, DBG_INTR, "KillNegativeDma: remove dma - context %d size %d SuspendAddr %x\n", 
13721 +                    trap->Desc.s.dma_u.s.Context, trap->Desc.s.dma_size, trap->Status.s.SuspendAddr);
13722 +
13723 +           trap->Status.s.TrapType = trap->Status.s.SuspendAddr;
13724 +           trap->Status.s.Context  = trap->Desc.s.dma_u.s.Context;
13725 +
13726 +           DeliverDProcTrap (dev, trap, 0);
13727 +
13728 +           /*
13729 +            * Remove the DMA from the queue by replacing it with one with
13730 +            * zero size and no events.
13731 +            *
13732 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
13733 +            * to mark the approriate run queue as empty.
13734 +            */
13735 +           trap->Desc.s.dma_type            = 0;
13736 +           trap->Desc.s.dma_size            = 0;
13737 +           trap->Desc.s.dma_source          = (E3_Addr) 0;
13738 +           trap->Desc.s.dma_dest            = (E3_Addr) 0;
13739 +           trap->Desc.s.dma_destCookieVProc = (E3_Addr) 0;
13740 +           trap->Desc.s.dma_srcEvent        = (E3_Addr) 0;
13741 +           trap->Desc.s.dma_srcCookieVProc  = (E3_Addr) 0;
13742 +
13743 +           elan3_sdram_copyq_to_sdram (dev, &trap->Desc, FPtr, sizeof (E3_DMA_BE));
13744 +       }
13745 +
13746 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
13747 +    }
13748 +
13749 +    status.Status = read_reg32 (dev, Exts.DProcStatus.Status);
13750 +
13751 +    if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || 
13752 +       status.s.SuspendAddr == MI_DequeueSysCntxDma ||
13753 +       status.s.SuspendAddr == MI_DmaLoop)
13754 +    {
13755 +       PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: unlock dma processor\n");
13756 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
13757 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType,   0);
13758 +       mmiob();
13759 +       
13760 +       DELAY (10);
13761 +       
13762 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
13763 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType,   0);
13764 +       mmiob();
13765 +    }
13766 +
13767 +    PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: dma processor restarted\n");
13768 +
13769 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13770 +
13771 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1);
13772 +}
13773 +
13774 +void
13775 +ForceTProcTrap (ELAN3_DEV *dev, void *arg)
13776 +{
13777 +    printk ("elan%d: forced tproc trap .....\n", dev->Instance);
13778 +
13779 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1);
13780 +}
13781 +
13782 +void
13783 +PollForDmaHungup (void *arg)
13784 +{
13785 +    ELAN3_DEV     *dev   = (ELAN3_DEV *) arg;
13786 +    unsigned long flags;
13787 +    E3_Status_Reg status;
13788 +    E3_uint32     insn1, insn3;
13789 +    register int  i;
13790 +
13791 +    if (read_reg32 (dev, Dma_Desc.dma_size) > E3_MAX_DMA_SIZE)
13792 +    {
13793 +       status.Status = read_reg32 (dev, Exts.DProcStatus);
13794 +
13795 +       PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: size %x SuspendAddr %x\n", read_reg32 (dev, Dma_Desc.dma_size), status.s.SuspendAddr);
13796 +
13797 +       if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || 
13798 +           status.s.SuspendAddr == MI_DequeueSysCntxDma ||
13799 +           status.s.SuspendAddr == MI_DmaLoop)
13800 +       {
13801 +           printk ("elan%d: PollForDmaHungup: size %x context %d SuspendAddr %x\n", 
13802 +                   dev->Instance, read_reg32 (dev, Dma_Desc.dma_size),
13803 +                   status.s.Context, status.s.SuspendAddr);
13804 +       
13805 +           PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: dma_size %x status %x\n",
13806 +                    read_reg32 (dev, Dma_Desc.dma_size), status.Status);
13807 +           
13808 +           spin_lock_irqsave (&dev->IntrLock, flags);
13809 +           QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, KillNegativeDma, NULL);
13810 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
13811 +           
13812 +           return;
13813 +       }
13814 +    }
13815 +
13816 +    status.Status = read_reg32 (dev, Exts.TProcStatus);
13817 +    if (status.s.WakeupFunction == WakeupStopped)
13818 +    {
13819 +       E3_uint32 PC = read_reg32 (dev, ExecutePC);
13820 +
13821 +       /* See if it's likely that the thread is really "stuck" on a waitevent/break 
13822 +        * instruction ......... */
13823 +       for (i = 0; i < 10; i++)
13824 +       {
13825 +           status.Status = read_reg32 (dev, Exts.TProcStatus);
13826 +           insn1         = read_reg32 (dev, IBufferReg[1]);
13827 +           insn3         = read_reg32 (dev, IBufferReg[3]);
13828 +           
13829 +           if (! (status.s.WakeupFunction == WakeupStopped && read_reg32 (dev, ExecutePC) == PC &&     /* stopping and it could be a break/waitevent */
13830 +                  (insn1 == 0x81a00000 || insn3 == 0x81a00000 ||                                       /* break instruction */
13831 +                   insn1 == 0x81b00000 || insn3 == 0x81b00000)))                                       /* waitevent instruction  */
13832 +               break;
13833 +       }
13834 +
13835 +       if (i == 10)
13836 +       {
13837 +           printk ("elan%d: forcing tproc trap from %s instruction at pc %x\n", dev->Instance, 
13838 +                   (insn1 == 0x81a00000 || insn3 == 0x81a00000) ? "break" : "waitevent", PC);
13839 +
13840 +           spin_lock_irqsave (&dev->IntrLock, flags);
13841 +           QueueHaltOperation (dev, 0, NULL, INT_TProcHalted, ForceTProcTrap, NULL);
13842 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
13843 +           return;
13844 +       }
13845 +    }
13846 +
13847 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 10);
13848 +}
13849 +
13850 +/*=======================================================================================*/
13851 +/*
13852 + * Interrupt handler.
13853 + */
13854 +static void
13855 +ReEnableErrorInterrupts (void *arg)
13856 +{
13857 +    ELAN3_DEV     *dev = (ELAN3_DEV *) arg;
13858 +    unsigned long flags;
13859 +
13860 +    spin_lock_irqsave (&dev->IntrLock, flags);
13861 +
13862 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
13863 +       ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
13864 +
13865 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "ReEnableErrorInterrupts: IntMask=%x\n", read_reg32 (dev, Exts.InterruptMask));
13866 +
13867 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13868 +}
13869 +
13870 +void
13871 +CheckForExcessiveErrorRate (ELAN3_DEV *dev)
13872 +{
13873 +    if (dev->ErrorTime == (lbolt/hz))
13874 +    {
13875 +       if (dev->ErrorsPerTick++ > 100)
13876 +       {
13877 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "CheckForExcessiveErrorRate: too many links errors, disabling interrupt\n");
13878 +
13879 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
13880 +
13881 +           schedule_timer_fn (&dev->ErrorTimeoutId, ReEnableErrorInterrupts, (void *) dev, hz);
13882 +       }
13883 +    }
13884 +    else
13885 +    {
13886 +       dev->ErrorTime     = (lbolt/hz);
13887 +       dev->ErrorsPerTick = 0;
13888 +    }
13889 +}
13890 +/*=======================================================================================*/
13891 +/*
13892 + * Interrupt handler.
13893 + */
13894 +static void
13895 +HandlePciMemErr (ELAN3_DEV *dev)
13896 +{
13897 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "HandlePciMemErr : masking out interrupt\n");
13898 +    
13899 +    ElanBusError (dev);
13900 +    panic ("elan pci memory error\n");
13901 +}
13902 +
13903 +static void
13904 +HandleSDRamInterrupt (ELAN3_DEV *dev)
13905 +{
13906 +    E3_uint32     EccStatus0 = read_reg32 (dev, ECC_STATUS0);
13907 +    E3_uint32     EccStatus1 = read_reg32 (dev, ECC_STATUS1);
13908 +    unsigned long flags;
13909 +
13910 +    PRINTF5 (DBG_DEVICE, DBG_INTR, "elan: ECC error - Addr=%x UE=%x CE=%x ME=%x Syn=%x\n",
13911 +            EccStatus0 & ECC_ADDR_MASK, EccStatus0 & ECC_UE_MASK, 
13912 +            EccStatus0 & ECC_CE_MASK, EccStatus0 & ECC_ME_MASK, 
13913 +            EccStatus1 & ECC_SYN_MASK);
13914 +
13915 +    if (EccStatus0 & (ECC_UE_MASK|ECC_CE_MASK))
13916 +    {
13917 +       printk ("elan%d: ECC memory error (Address=%08x Syndrome=%02x %s%s%s)\n",
13918 +               dev->Instance, 
13919 +               (EccStatus0 & ECC_ADDR_MASK), (EccStatus1 & ECC_SYN_MASK), 
13920 +               (EccStatus0 & ECC_UE_MASK) ? "Uncorrectable "   : "",
13921 +               (EccStatus0 & ECC_CE_MASK) ? "Correctable "     : "",
13922 +               (EccStatus0 & ECC_ME_MASK) ? "Multiple Errors " : "");
13923 +    }
13924 +
13925 +    if (EccStatus0 & ECC_UE_MASK)
13926 +       panic ("elan: Uncorrectable ECC memory error");
13927 +    if (EccStatus0 & ECC_CE_MASK)
13928 +       BumpStat (dev, CorrectableErrors);
13929 +    if (EccStatus0 & ECC_ME_MASK)
13930 +       BumpStat (dev, MultipleErrors);
13931 +
13932 +    /*
13933 +     * Clear the interrupt and reset the error flags.
13934 +     * Note. Might loose an UE or CE if it occurs between reading the status and
13935 +     *       clearing the interrupt. I don't think this matters very much as the
13936 +     *   status reg will only be used to identify a bad simm.
13937 +     */
13938 +
13939 +    spin_lock_irqsave (&dev->TlbLock, flags);
13940 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | CLEAR_SDRAM_ERROR);
13941 +    mmiob();
13942 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
13943 +
13944 +    CheckForExcessiveErrorRate (dev);
13945 +}
13946 +
13947 +static int
13948 +HandleEventInterrupt (ELAN3_DEV *dev, int nticks, unsigned long *flags)
13949 +{
13950 +    E3_uint32 Fptr  = dev->Event_Int_Queue_FPtr;
13951 +    E3_uint32 Bptr  = read_reg32 (dev, Event_Int_Queue_BPtr);                                          /* PCI read */
13952 +    long      tlim  = lbolt + nticks;
13953 +    long      count = 0;
13954 +    ELAN3_CTXT *ctxt;
13955 +
13956 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
13957 +    ASSERT ((dev->InterruptMask & INT_EventInterrupt) == 0);
13958 +           
13959 +    while (Fptr != Bptr)
13960 +    {
13961 +       while (Fptr != Bptr)
13962 +       {
13963 +           E3_EventInt_BE  EvInt;
13964 +           E3_uint32       Context;
13965 +
13966 +           /* If we're running in the interrupt handler and have seen a high
13967 +            * rate of event interrupts then punt to the thread  - however on 
13968 +            * Linux the elan interrupt handler can block the timer interrupt,
13969 +            * and so lbolt (jiffies) is not incremented, hence we punt after
13970 +            a number of loops instead */
13971 +#if defined(LINUX)
13972 +           if (in_interrupt() && ++count > eventint_punt_loops)
13973 +               return (EAGAIN);
13974 +#endif
13975 +
13976 +           if (nticks && ((int) (lbolt - tlim)) > 0)
13977 +           {
13978 +               PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x punting to thread\n", Fptr, Bptr);
13979 +               return (EAGAIN);
13980 +           }
13981 +
13982 +           elan3_sdram_copyq_from_sdram (dev, Fptr, (void *) &EvInt, 8);                               /* PCI read */
13983 +           
13984 +           /* The context number is held in the top 16 bits of the EventContext */
13985 +           Context = (EvInt.s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK;
13986 +           
13987 +           PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Context %d : Cookie %x\n", Context, EvInt.s.IntCookie);
13988 +           
13989 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, Context);
13990 +
13991 +           /* Work out new fptr, and store it in the device, since we'll be dropping the IntrLock */
13992 +           Fptr = E3_EVENT_INTQ_NEXT(Fptr);
13993 +           dev->Event_Int_Queue_FPtr = Fptr;
13994 +
13995 +           if (ctxt == NULL)
13996 +           {
13997 +               PRINTF3 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x context %d invalid\n",
13998 +                        Fptr, Bptr, Context);
13999 +               BumpStat (dev, InvalidContext);
14000 +           }
14001 +           else
14002 +           {
14003 +               BumpStat (dev, EventInterrupts);
14004 +               
14005 +               spin_unlock_irqrestore (&dev->IntrLock, *flags);
14006 +               QueueEventInterrupt (ctxt, EvInt.s.IntCookie);
14007 +               spin_lock_irqsave (&dev->IntrLock, *flags);
14008 +           }
14009 +           
14010 +           /* Re-read the FPtr, since we've dropped the IntrLock */
14011 +           Fptr = dev->Event_Int_Queue_FPtr;
14012 +           
14013 +           /* Store the new FPtr to the elan, this also clears the interrupt. */
14014 +           write_reg32 (dev, Event_Int_Queue_FPtr, Fptr);                                      /* PCI write */
14015 +
14016 +           mmiob();
14017 +       }
14018 +
14019 +       mb();
14020 +       Bptr = read_reg32 (dev, Event_Int_Queue_BPtr);                                          /* PCI read */
14021 +    }
14022 +
14023 +    return (ESUCCESS);
14024 +}
14025 +
14026 +int
14027 +SetLinkBoundaryScan (ELAN3_DEV *dev)
14028 +{
14029 +    int           res = ESUCCESS;
14030 +    unsigned long flags;
14031 +
14032 +    spin_lock_irqsave (&dev->IntrLock, flags);
14033 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
14034 +       res = EAGAIN;
14035 +    else
14036 +    {
14037 +       PRINTF0 (DBG_DEVICE, DBG_BSCAN, "SetLinkBoundaryScan: setting link into boundary scan mode\n");
14038 +
14039 +       /*
14040 +        * We're going to set the link into boundary scan mode,  so firstly
14041 +        * set the inputters to discard everything.
14042 +        */
14043 +       if (dev->DiscardAllCount++ == 0)
14044 +           SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL);
14045 +
14046 +       /*
14047 +        * Now disable the error interrupts
14048 +        */
14049 +       DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
14050 +       
14051 +       /*
14052 +        * And set the link into boundary scan mode, and drive
14053 +        * a reset token onto the link.
14054 +        */
14055 +       SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken);
14056 +    }
14057 +
14058 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14059 +
14060 +    return (res);
14061 +}
14062 +
14063 +void
14064 +ClearLinkBoundaryScan (ELAN3_DEV *dev)
14065 +{
14066 +    unsigned long flags;
14067 +
14068 +    spin_lock_irqsave (&dev->IntrLock, flags);
14069 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
14070 +    {
14071 +       PRINTF0 (DBG_DEVICE, DBG_BSCAN, "ClearLinkBoundaryScan: taking link out of boundary scan mode\n");
14072 +
14073 +       /*
14074 +        * Take the link out of boundary scan 
14075 +        */
14076 +       SET_SCHED_LINK_VALUE (dev, 0, 0);
14077 +
14078 +       /*
14079 +        * Clear any link errors.
14080 +        */
14081 +       PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
14082 +
14083 +       /*
14084 +        * Re-enable the error interrupts.
14085 +        */
14086 +       if (! timer_fn_queued(&dev->ErrorTimeoutId))
14087 +           ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
14088 +
14089 +       /*
14090 +        * And stop the inputter from discarding all packets.
14091 +        */
14092 +       if (--dev->DiscardAllCount == 0)
14093 +           SetSchedStatusRegister (dev, 0, NULL);
14094 +    }
14095 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14096 +}
14097 +
14098 +int
14099 +WriteBoundaryScanValue (ELAN3_DEV *dev, int value)
14100 +{
14101 +    int           res = 0;
14102 +    unsigned long flags;
14103 +
14104 +    spin_lock_irqsave (&dev->IntrLock, flags);
14105 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
14106 +    {
14107 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: driving value 0x%x onto link\n", value);
14108 +       SET_SCHED_LINK_VALUE (dev, 1, value);
14109 +
14110 +       res = read_reg32 (dev, Exts.LinkState);
14111 +
14112 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: return 0x%x\n", res);
14113 +    }
14114 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14115 +
14116 +    return (res);
14117 +}
14118 +
14119 +int
14120 +ReadBoundaryScanValue(ELAN3_DEV *dev, int link)
14121 +{
14122 +    int           res;
14123 +    unsigned long flags;
14124 +
14125 +    spin_lock_irqsave (&dev->IntrLock, flags);
14126 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
14127 +    {
14128 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: set linkval 0x%x\n",  link);
14129 +       SET_SCHED_LINK_VALUE (dev, 0, link);
14130 +    }
14131 +    res = read_reg32 (dev, Exts.LinkState);
14132 +    PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: return 0x%x\n", res);
14133 +
14134 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14135 +
14136 +    return (res);
14137 +}
14138 +
14139 +static int
14140 +ReadLinkVal (ELAN3_DEV *dev, int link)
14141 +{
14142 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
14143 +       SET_SCHED_LINK_VALUE (dev, 0, link);
14144 +    
14145 +    return (read_reg32 (dev, Exts.LinkState));
14146 +}
14147 +
14148 +static void
14149 +HandleLinkError (ELAN3_DEV *dev)
14150 +{
14151 +    E3_uint32 value = read_reg32 (dev, Exts.LinkErrorTypes);
14152 +
14153 +    PRINTF1 (DBG_DEVICE, DBG_LINKERR, "HandleLinkError: LinkErrorTypes %08x - clearing\n", value);
14154 +    
14155 +    if (value & LS_LockError)   BumpStat (dev, LockError);
14156 +    if (value & LS_DeskewError) BumpStat (dev, DeskewError);
14157 +    if (value & LS_PhaseError)  BumpStat (dev, PhaseError);
14158 +    if (value & LS_DataError)   BumpStat (dev, DataError);
14159 +    if (value & LS_FifoOvFlow0) BumpStat (dev, FifoOvFlow0);
14160 +    if (value & LS_FifoOvFlow1) BumpStat (dev, FifoOvFlow1);
14161 +
14162 +    if (value & LS_DataError)
14163 +       dev->Stats.LinkErrorValue = ReadLinkVal (dev, 12) | (ReadLinkVal (dev, 13) << 9);
14164 +
14165 +    PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
14166 +
14167 +    CheckForExcessiveErrorRate (dev);
14168 +}
14169 +
14170 +static void
14171 +HandleErrorInterrupt (ELAN3_DEV *dev, E3_uint32 Pend)
14172 +{
14173 +    if (Pend & INT_PciMemErr)
14174 +       HandlePciMemErr (dev);
14175 +    
14176 +    if (Pend & INT_SDRamInt)
14177 +       HandleSDRamInterrupt (dev);
14178 +    
14179 +    if (Pend & INT_LinkError)
14180 +       HandleLinkError (dev);
14181 +}
14182 +       
14183 +static void
14184 +HandleAnyIProcTraps (ELAN3_DEV *dev, E3_uint32 Pend)
14185 +{
14186 +    E3_uint32       RestartBits = 0;
14187 +    
14188 +    if (Pend & INT_IProcCh0SysCntx)
14189 +    {
14190 +       HandleIProcTrap (dev, 0, Pend,
14191 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx),
14192 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]),
14193 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrData[0]));
14194 +                        
14195 +       RestartBits |= RestartCh0SysCntx;
14196 +    }
14197 +    
14198 +    if (Pend & INT_IProcCh1SysCntx)
14199 +    {
14200 +       HandleIProcTrap (dev, 1, Pend,
14201 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx),
14202 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]),
14203 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrData[0]));
14204 +                        
14205 +       RestartBits |= RestartCh1SysCntx;
14206 +    }
14207 +
14208 +    if (Pend & INT_IProcCh0NonSysCntx)
14209 +    {
14210 +       HandleIProcTrap (dev, 0, Pend,
14211 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx),
14212 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]),
14213 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrData[0]));
14214 +
14215 +       RestartBits |= RestartCh0NonSysCntx;
14216 +    }
14217 +    
14218 +
14219 +    if (Pend & INT_IProcCh1NonSysCntx)
14220 +    {
14221 +       HandleIProcTrap (dev, 1, Pend,
14222 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx),
14223 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]),
14224 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrData[0]));
14225 +       RestartBits |= RestartCh1NonSysCntx;
14226 +    }
14227 +
14228 +    PULSE_SCHED_STATUS (dev, RestartBits);
14229 +}
14230 +
14231 +static void
14232 +elan3_event_interrupt (ELAN3_DEV *dev)
14233 +{
14234 +    unsigned long flags;
14235 +
14236 +    kernel_thread_init("elan3_event_int");
14237 +
14238 +    spin_lock_irqsave (&dev->IntrLock, flags);
14239 +    for (;;)
14240 +    {
14241 +       /* Make sure we never sleep with the EventInterrupt disabled */
14242 +       if (! (dev->InterruptMask & INT_EventInterrupt))
14243 +       {
14244 +           if (HandleEventInterrupt (dev, eventint_resched_ticks, &flags) != ESUCCESS)
14245 +               BumpStat (dev, EventRescheds);
14246 +           
14247 +           ENABLE_INT_MASK (dev, INT_EventInterrupt);
14248 +       }
14249 +
14250 +       if (dev->ThreadsShouldStop)
14251 +           break;
14252 +
14253 +       kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags);
14254 +    }
14255 +    
14256 +    dev->EventInterruptThreadStopped = 1;
14257 +    kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock);
14258 +
14259 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14260 +
14261 +    kernel_thread_exit ();
14262 +}
14263 +
14264 +int
14265 +InterruptHandler (ELAN3_DEV *dev)
14266 +{
14267 +    E3_uint32     Mask;
14268 +    E3_uint32     Pend;
14269 +    E3_uint32     RestartBits;
14270 +    int           deliverDProcTrap;
14271 +    int                  deliverTProcTrap;
14272 +    static long   lboltsave;
14273 +    int           loop_count = 0; 
14274 +    unsigned long flags;
14275 +    int  tproc_delivered;
14276 +
14277 +    spin_lock_irqsave (&dev->IntrLock, flags);
14278 +
14279 +    BumpStat (dev, Interrupts);
14280 +
14281 +    Mask = dev->InterruptMask;
14282 +    Pend = read_reg32 (dev, Exts.InterruptReg);                                                /* PCI read */
14283 +
14284 +    /* Save the lbolt so we know how long in do loop or in event handling */
14285 +    lboltsave = lbolt;
14286 +
14287 +    if ((Pend & Mask) == INT_EventInterrupt)
14288 +    {
14289 +       DISABLE_INT_MASK (dev, INT_EventInterrupt);
14290 +
14291 +       if (HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS)
14292 +           ENABLE_INT_MASK (dev, INT_EventInterrupt);
14293 +       else
14294 +       {
14295 +           BumpStat (dev, EventPunts);
14296 +
14297 +           kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock);
14298 +       }
14299 +
14300 +        if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
14301 +            dev->Stats.LongestInterrupt = (lbolt - lboltsave);
14302 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
14303 +       return (ESUCCESS);
14304 +    }
14305 +
14306 +    if ((Pend & Mask) == 0)
14307 +    {
14308 +       PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Spurious Pend %x Mask %x SchedStatus %x\n", 
14309 +                Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
14310 +
14311 +        if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
14312 +            dev->Stats.LongestInterrupt = (lbolt - lboltsave);
14313 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
14314 +       return (EFAIL);
14315 +    }
14316 +
14317 +    PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", 
14318 +            Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
14319 +
14320 +    do {
14321 +       loop_count++;
14322 +       RestartBits = 0;
14323 +
14324 +       if (Pend & Mask & (INT_CProc | INT_ComQueue))
14325 +           HandleCProcTrap (dev, Pend, &Mask);
14326 +
14327 +       tproc_delivered = 0;
14328 +
14329 +       if (Pend & Mask & INT_TProc) {
14330 +           ELAN_REG_REC(Pend);
14331 +           tproc_delivered = 1;
14332 +           deliverTProcTrap = HandleTProcTrap (dev, &RestartBits);
14333 +       }
14334 +       else
14335 +           deliverTProcTrap = 0;
14336 +
14337 +       if (Pend & Mask & INT_DProc)
14338 +           deliverDProcTrap = HandleDProcTrap (dev, &RestartBits);
14339 +       else
14340 +           deliverDProcTrap = 0;
14341 +
14342 +       ASSERT ((RestartBits & RestartDProc) == 0 || (read_reg32 (dev, Exts.DProcStatus.Status) >> 29) == 4);
14343 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR.Status))      == 0);
14344 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
14345 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
14346 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
14347 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
14348 +
14349 +       PULSE_SCHED_STATUS (dev, RestartBits);          /* Restart any processors which had trapped. */
14350 +       SET_INT_MASK (dev, Mask);                       /* And install the new interrupt mask */
14351 +
14352 +       if ((Pend & Mask & INT_TProc) && deliverTProcTrap)
14353 +           DeliverTProcTrap (dev, dev->ThreadTrap, Pend);
14354 +
14355 +       if ((Pend & Mask & INT_DProc) && deliverDProcTrap)
14356 +           DeliverDProcTrap (dev, dev->DmaTrap, Pend);
14357 +
14358 +       if (Pend & Mask & INT_Inputters)
14359 +           HandleAnyIProcTraps (dev, Pend);
14360 +       
14361 +       if (Pend & Mask & INT_EventInterrupt)
14362 +       {
14363 +           DISABLE_INT_MASK (dev, INT_EventInterrupt);
14364 +           
14365 +           if (loop_count == 1 && HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS) /* always punt to the thread if we've */
14366 +               ENABLE_INT_MASK (dev, INT_EventInterrupt);                                              /* been round the loop once */
14367 +           else
14368 +           {
14369 +               BumpStat (dev, EventPunts);
14370 +
14371 +               kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock);
14372 +           }
14373 +       }
14374 +
14375 +       if (Pend & (INT_Halted | INT_Discarding))
14376 +           ProcessHaltOperations (dev, Pend);
14377 +
14378 +       if (Pend & Mask & INT_ErrorInterrupts)
14379 +           HandleErrorInterrupt (dev, Pend);
14380 +
14381 +       Mask = dev->InterruptMask;
14382 +       Pend = read_reg32 (dev, Exts.InterruptReg);     /* PCI read */
14383 +       
14384 +       if (tproc_delivered)
14385 +           ELAN_REG_REC(Pend);
14386 +
14387 +       PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", 
14388 +                Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
14389 +    }  while ((Pend & Mask) != 0);
14390 +
14391 +    if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
14392 +        dev->Stats.LongestInterrupt = (lbolt - lboltsave);
14393 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14394 +
14395 +    PRINTF2 (DBG_DEVICE, DBG_INTR, "InterruptHandler: lbolt is %lx; start lbolt is %lx\n", 
14396 +            lbolt, lboltsave);
14397 +
14398 +    return (ESUCCESS);
14399 +}
14400 +
14401 +void
14402 +SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp)
14403 +{
14404 +    E3_uint32 HaltMask  = dev->HaltOperationsMask;
14405 +    E3_uint32 Mask      = Maskp ? *Maskp : dev->InterruptMask;
14406 +    E3_uint32 ClearBits = 0;
14407 +    E3_uint32 SetBits   = 0;
14408 +
14409 +    PRINTF5 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: HaltOperationsMask=%x HaltAll=%d HaltDmaDequeue=%d HaltThread=%d DiscardAll=%d\n",
14410 +            HaltMask, dev->HaltAllCount, dev->HaltDmaDequeueCount, dev->HaltThreadCount, dev->DiscardAllCount);
14411 +
14412 +    if (dev->FlushCommandCount)
14413 +       SetBits |= FlushCommandQueues;
14414 +    
14415 +    if ((HaltMask & INT_DProcHalted) || dev->HaltAllCount)
14416 +    {
14417 +       SetBits |= HaltDmas | HaltDmaDequeue;
14418 +       if (Pend & INT_DProcHalted)
14419 +           Mask &= ~INT_DProcHalted;
14420 +       else
14421 +           Mask |= INT_DProcHalted;
14422 +    }
14423 +
14424 +    if (dev->HaltDmaDequeueCount)
14425 +    {
14426 +       SetBits |= HaltDmaDequeue;
14427 +       if (Pend & INT_DProcHalted)
14428 +           Mask &= ~INT_DProcHalted;
14429 +       else
14430 +           Mask |= INT_DProcHalted;
14431 +    }
14432 +
14433 +    if ((HaltMask & INT_TProcHalted) || dev->HaltAllCount || dev->HaltThreadCount)
14434 +    {
14435 +       SetBits |= HaltThread;
14436 +       if (Pend & INT_TProcHalted)
14437 +           Mask &= ~INT_TProcHalted;
14438 +       else
14439 +           Mask |= INT_TProcHalted;
14440 +    }
14441 +
14442 +    if ((HaltMask & INT_DiscardingSysCntx) || dev->DiscardAllCount)
14443 +    {
14444 +       SetBits |= DiscardSysCntxIn;
14445 +       if (Pend & INT_DiscardingSysCntx)
14446 +           Mask &= ~INT_DiscardingSysCntx;
14447 +       else
14448 +           Mask |= INT_DiscardingSysCntx;
14449 +    }
14450 +
14451 +    if ((HaltMask & INT_DiscardingNonSysCntx) || dev->DiscardNonContext0Count || dev->DiscardAllCount)
14452 +    {
14453 +       SetBits |= DiscardNonSysCntxIn;
14454 +       if (Pend & INT_DiscardingNonSysCntx)
14455 +           Mask &= ~INT_DiscardingNonSysCntx;
14456 +       else
14457 +           Mask |= INT_DiscardingNonSysCntx;
14458 +    }
14459 +
14460 +    if (dev->HaltNonContext0Count)
14461 +       SetBits |= StopNonSysCntxs;
14462 +
14463 +    ClearBits = SetBits ^ (FlushCommandQueues | HaltDmas | HaltDmaDequeue | HaltThread |
14464 +                          DiscardSysCntxIn | DiscardNonSysCntxIn | StopNonSysCntxs);
14465 +
14466 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: SetBits=%x InterruptMask=%x InterruptReg=%x Mask=%x\n",
14467 +            SetBits, dev->InterruptMask, read_reg32 (dev, Exts.InterruptReg), Mask);
14468 +
14469 +    MODIFY_SCHED_STATUS (dev, SetBits, ClearBits);
14470 +
14471 +    if (Maskp)
14472 +       *Maskp = Mask;                                          /* copyback new interrupt mask */
14473 +    else
14474 +       SET_INT_MASK(dev, Mask);
14475 +}
14476 +
14477 +void
14478 +FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op)
14479 +{
14480 +    unsigned long flags;
14481 +
14482 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14483 +    op->Next = dev->FreeHaltOperations;
14484 +    dev->FreeHaltOperations = op;
14485 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14486 +}
14487 +
14488 +int
14489 +ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep)
14490 +{
14491 +    ELAN3_HALTOP   *op;
14492 +    unsigned long flags;
14493 +
14494 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14495 +    while ((dev->NumHaltOperations - dev->ReservedHaltOperations) < count)
14496 +    {
14497 +       spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14498 +
14499 +       KMEM_ZALLOC (op, ELAN3_HALTOP *, sizeof (ELAN3_HALTOP), cansleep);
14500 +
14501 +       if (op == NULL)
14502 +           return (FALSE);
14503 +
14504 +       spin_lock_irqsave (&dev->FreeHaltLock, flags);
14505 +
14506 +       dev->NumHaltOperations++;
14507 +
14508 +       op->Next = dev->FreeHaltOperations;
14509 +       dev->FreeHaltOperations = op;
14510 +    }
14511 +                   
14512 +    dev->ReservedHaltOperations += count;
14513 +    
14514 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14515 +
14516 +    return (TRUE);
14517 +}
14518 +
14519 +void
14520 +ReleaseHaltOperations (ELAN3_DEV *dev, int count)
14521 +{
14522 +    unsigned long flags;
14523 +
14524 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14525 +    dev->ReservedHaltOperations -= count;
14526 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14527 +}
14528 +
14529 +void
14530 +QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp, 
14531 +                   E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement)
14532 +{
14533 +    ELAN3_HALTOP *op;
14534 +
14535 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
14536 +    
14537 +    spin_lock (&dev->FreeHaltLock);
14538 +    op = dev->FreeHaltOperations;
14539 +
14540 +    ASSERT (op != NULL);
14541 +
14542 +    dev->FreeHaltOperations = op->Next;
14543 +    spin_unlock (&dev->FreeHaltLock);
14544 +
14545 +    op->Mask      = ReqMask;
14546 +    op->Function  = (void (*)(void *, void *))Function;
14547 +    op->Arguement = Arguement;
14548 +
14549 +    dev->HaltOperationsMask |= ReqMask;                                /* Add our bits to the global bits needed. */
14550 +    SetSchedStatusRegister (dev, Pend, Maskp);                 /* Set the control register and the interrupt mask */
14551 +
14552 +    /*
14553 +     * If the condition is already satisfied, then SetSchedStatusRegister will
14554 +     * have masked out the interrupt, so re-enable it now to take it straight
14555 +     * away
14556 +     */
14557 +    if (Maskp == NULL)
14558 +    {
14559 +       if ((read_reg32 (dev, Exts.InterruptReg) & ReqMask) == ReqMask)
14560 +           ENABLE_INT_MASK (dev, ReqMask);
14561 +    }
14562 +    else
14563 +    {
14564 +       if ((Pend & ReqMask) == ReqMask)
14565 +           *Maskp |= ReqMask;
14566 +    }
14567 +
14568 +    *dev->HaltOperationsTailpp = op;                           /* Queue at end of list, since ProcessHaltOperations */
14569 +    dev->HaltOperationsTailpp = &op->Next;                     /* drops the IntrLock while running down the list */
14570 +    op->Next = NULL;
14571 +}
14572 +                   
14573 +void
14574 +ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend)
14575 +{
14576 +    E3_uint32     Mask;
14577 +    ELAN3_HALTOP  *op;
14578 +    ELAN3_HALTOP **prevp;
14579 +    E3_uint32     haltMask;
14580 +    ELAN3_HALTOP  *next;
14581 +
14582 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: Pend %x\n", Pend);
14583 +
14584 +    for (;;)
14585 +    {
14586 +       ELAN3_HALTOP  *head = NULL;
14587 +       ELAN3_HALTOP **tailp = &head;
14588 +
14589 +       /*
14590 +        * Generate a list of halt operations which can be called now.
14591 +        */
14592 +       for (haltMask = 0, prevp = &dev->HaltOperations; (op = *prevp) != NULL; )
14593 +       {
14594 +           if ((Pend & op->Mask) != op->Mask)
14595 +           {
14596 +               haltMask |= op->Mask;
14597 +               prevp = &op->Next;
14598 +           }
14599 +           else
14600 +           {
14601 +               *prevp = op->Next;                              /* remove from list */
14602 +               if (op->Next == NULL)
14603 +                   dev->HaltOperationsTailpp = prevp;
14604 +               
14605 +               *tailp = op;                                    /* add to local list */
14606 +               op->Next = NULL;
14607 +               tailp = &op->Next;
14608 +           }
14609 +       }
14610 +
14611 +       if (head == NULL)                                       /* nothing to do, so update */
14612 +       {                                                       /* the schedule status register */
14613 +           dev->HaltOperationsMask = haltMask;                 /* and the interrupt mask */
14614 +           SetSchedStatusRegister (dev, Pend, NULL);
14615 +           return;
14616 +       }
14617 +
14618 +       /*
14619 +        * flush the command queues, before calling any operations
14620 +        */
14621 +       Mask = dev->InterruptMask;
14622 +       
14623 +       if (dev->FlushCommandCount++ == 0)
14624 +           SetSchedStatusRegister (dev, Pend, &Mask);
14625 +       
14626 +       if ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
14627 +       {
14628 +           if (dev->HaltThreadCount++ == 0)
14629 +               SetSchedStatusRegister (dev, Pend, &Mask);
14630 +
14631 +           CAPTURE_CPUS();
14632 +
14633 +           while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
14634 +               mb();
14635 +
14636 +           RELEASE_CPUS();
14637 +                   
14638 +           if (--dev->HaltThreadCount == 0)
14639 +               SetSchedStatusRegister (dev, Pend, &Mask);
14640 +       }
14641 +               
14642 +       if (read_reg32 (dev, Exts.InterruptReg) & INT_CProc)
14643 +       {
14644 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: command processor has trapped\n");
14645 +           HandleCProcTrap (dev, Pend, &Mask);
14646 +       }
14647 +       
14648 +       if (--dev->FlushCommandCount == 0)
14649 +           SetSchedStatusRegister (dev, Pend, &Mask);
14650 +       
14651 +       PRINTF2 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: interrupt mask %08x -> %08x\n", 
14652 +                dev->InterruptMask, Mask);
14653 +       
14654 +       SET_INT_MASK (dev, Mask);
14655 +       spin_unlock (&dev->IntrLock);
14656 +
14657 +       /*
14658 +        * now process the list of operations
14659 +        * we have
14660 +        */
14661 +       for (op = head; op != NULL; op = next)
14662 +       {
14663 +           next = op->Next;
14664 +
14665 +           op->Function (dev, op->Arguement);
14666 +           
14667 +           FreeHaltOperation (dev, op);
14668 +       }
14669 +
14670 +       spin_lock (&dev->IntrLock);
14671 +    }
14672 +}
14673 +
14674 +int
14675 +ComputePosition (ELAN_POSITION *pos, unsigned nodeId, unsigned numNodes, unsigned numDownLinksVal)
14676 +{
14677 +    int i, lvl, n;
14678 +    char numDownLinks[ELAN_MAX_LEVELS];
14679 +
14680 +    if (nodeId >= numNodes)
14681 +       return (EINVAL);
14682 +
14683 +    for (i = 0; i < ELAN_MAX_LEVELS; i++, numDownLinksVal >>= 4)
14684 +       numDownLinks[i] = numDownLinksVal & 7;
14685 +    
14686 +    for (lvl = 0, n = numNodes; n > ((lvl % 3) == 2 ? 8 : 4) && lvl < ELAN_MAX_LEVELS; lvl++)
14687 +    {
14688 +       if (numDownLinks[lvl] == 0)
14689 +           numDownLinks[lvl] = 4;
14690 +       
14691 +       if ((n % numDownLinks[lvl]) != 0)
14692 +           return (EINVAL);
14693 +       
14694 +       n /= numDownLinks[lvl];
14695 +    }
14696 +
14697 +    if (numDownLinks[lvl] == 0)
14698 +       numDownLinks[lvl] = n;
14699 +
14700 +    if (numDownLinks[lvl] != n)
14701 +       return (EINVAL);
14702 +
14703 +    for (i = 0; i <= lvl; i++)
14704 +       pos->pos_arity[i] = numDownLinks[lvl - i];
14705 +
14706 +    pos->pos_nodes  = numNodes;
14707 +    pos->pos_levels = lvl + 1;
14708 +    pos->pos_nodeid = nodeId;
14709 +    pos->pos_mode   = ELAN_POS_MODE_SWITCHED;
14710 +
14711 +    return (0);
14712 +}
14713 +
14714 +/*
14715 + * Local variables:
14716 + * c-file-style: "stroustrup"
14717 + * End:
14718 + */
14719 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elandev_linux.c
14720 ===================================================================
14721 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elandev_linux.c      2004-02-23 16:02:56.000000000 -0500
14722 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elandev_linux.c   2005-07-28 14:52:52.803685576 -0400
14723 @@ -0,0 +1,2358 @@
14724 +/*
14725 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
14726 + *
14727 + *    For licensing information please see the supplied COPYING file
14728 + *
14729 + */
14730 +
14731 +#ident "$Id: elandev_linux.c,v 1.102.2.5 2005/03/07 16:27:44 david Exp $"
14732 +/*     $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_linux.c,v $*/
14733 +
14734 +#include <qsnet/kernel.h>
14735 +#include <qsnet/kpte.h>
14736 +
14737 +#include <linux/config.h>
14738 +#include <linux/mm.h>
14739 +#include <linux/pci.h>
14740 +#include <linux/reboot.h>
14741 +#include <linux/notifier.h>
14742 +
14743 +#include <linux/init.h>
14744 +#include <linux/module.h>
14745 +
14746 +#include <linux/pci.h>
14747 +#include <linux/ptrack.h>
14748 +
14749 +#include <asm/uaccess.h>
14750 +#include <asm/io.h>
14751 +#include <asm/pgalloc.h>
14752 +#include <asm/pgtable.h>
14753 +
14754 +#include <elan/devinfo.h>
14755 +#include <elan/elanmod.h>
14756 +
14757 +#include <elan3/elanregs.h>
14758 +#include <elan3/elandev.h>
14759 +#include <elan3/elanvp.h>
14760 +#include <elan3/elanio.h>
14761 +#include <elan3/elan3mmu.h>
14762 +#include <elan3/elanctxt.h>
14763 +#include <elan3/elandebug.h>
14764 +#include <elan3/elansyscall.h>
14765 +
14766 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)
14767 +#error please use a 2.2 series kernel or newer
14768 +#endif
14769 +
14770 +/* Minor numbers encoded as :
14771 + *   [5:0]     device number
14772 + *   [15:6]    function number
14773 + */
14774 +#define ELAN3_DEVICE_MASK          0x3F
14775 +
14776 +#define ELAN3_MINOR_CONTROL      0
14777 +#define ELAN3_MINOR_MEM          1
14778 +#define ELAN3_MINOR_USER        2
14779 +#define ELAN3_MINOR_SHIFT        6
14780 +
14781 +#define ELAN3_DEVICE(inode)    (MINOR(inode->i_rdev) & ELAN3_DEVICE_MASK)
14782 +#define ELAN3_MINOR(inode)     (MINOR(inode->i_rdev) >> ELAN3_MINOR_SHIFT)
14783 +
14784 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
14785 +#      define SetPageReserved(page)    set_bit(PG_reserved, &(page)->flags)
14786 +#      define ClearPageReserved(page)  clear_bit(PG_reserved, &(page)->flags)
14787 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
14788 +typedef void irqreturn_t;
14789 +#endif
14790 +#       define IRQ_NONE
14791 +#       define IRQ_HANDLED
14792 +#       define IRQ_RETVAL(x)
14793 +#endif
14794 +
14795 +
14796 +/*
14797 + * Function prototypes.
14798 + */
14799 +static int     elanattach(int instance, struct pci_dev *pcidev);
14800 +static int     elandetach(int instance);
14801 +
14802 +static int     elan3_open (struct inode *inode, struct file *file);
14803 +static int     elan3_ioctl (struct inode *inode, struct file *file, 
14804 +                            unsigned int cmd, unsigned long arg);
14805 +static int     elan3_mmap (struct file *file, struct vm_area_struct *vm_area);
14806 +static int     elan3_release (struct inode *inode, struct file *file);
14807 +
14808 +static int      elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer);
14809 +static int      elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer);
14810 +
14811 +static irqreturn_t InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs);
14812 +
14813 +static int     ConfigurePci(ELAN3_DEV *dev);
14814 +static int     ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr);
14815 +
14816 +static void     elan3_shutdown_devices(int panicing);
14817 +
14818 +/*
14819 + * Globals. 
14820 + */
14821 +static ELAN3_DEV       *elan3_devices[ELAN3_MAX_CONTROLLER];
14822 +static int       NodeId = ELAN3_INVALID_NODE;
14823 +static int       NumNodes;
14824 +static int       DownLinks;
14825 +static int       RandomRoutingDisabled;
14826 +int              BackToBackMaster;
14827 +int              BackToBackSlave;
14828 +int              enable_sdram_writecombining;
14829 +int             sdram_bank_limit;
14830 +extern int       LwpNice;
14831 +
14832 +char *    elan_reg_rec_file [ELAN_REG_REC_MAX];
14833 +int       elan_reg_rec_line [ELAN_REG_REC_MAX];
14834 +long      elan_reg_rec_lbolt[ELAN_REG_REC_MAX];
14835 +int       elan_reg_rec_cpu  [ELAN_REG_REC_MAX];
14836 +E3_uint32 elan_reg_rec_reg  [ELAN_REG_REC_MAX];
14837 +int       elan_reg_rec_index;
14838 +
14839 +MODULE_AUTHOR("Quadrics Ltd.");
14840 +MODULE_DESCRIPTION("Elan3 Device Driver");
14841 +
14842 +MODULE_LICENSE("GPL");
14843 +
14844 +MODULE_PARM(NodeId,"i");
14845 +MODULE_PARM(NumNodes,"i");
14846 +MODULE_PARM(RandomRoutingDisabled,"i");
14847 +MODULE_PARM(DownLinks,"i");
14848 +MODULE_PARM(BackToBackMaster,"i");
14849 +MODULE_PARM(BackToBackSlave,"i");
14850 +MODULE_PARM(LwpNice, "i");
14851 +MODULE_PARM(elan3_debug, "i");
14852 +MODULE_PARM(elan3_debug_console, "i");
14853 +MODULE_PARM(elan3_debug_buffer, "i");
14854 +MODULE_PARM(elan3mmu_debug, "i");
14855 +MODULE_PARM(sdram_bank_limit, "i");
14856 +
14857 +/* elan3/os/context.c */
14858 +EXPORT_SYMBOL(elan3_alloc);
14859 +EXPORT_SYMBOL(elan3_attach);
14860 +EXPORT_SYMBOL(elan3_doattach);
14861 +EXPORT_SYMBOL(elan3_free);
14862 +EXPORT_SYMBOL(elan3_detach);
14863 +EXPORT_SYMBOL(elan3_dodetach);
14864 +EXPORT_SYMBOL(elan3_block_inputter);
14865 +EXPORT_SYMBOL(CheckCommandQueueFlushed);
14866 +
14867 +/* elan3/os/sdram.c */
14868 +EXPORT_SYMBOL(elan3_sdram_alloc);
14869 +EXPORT_SYMBOL(elan3_sdram_free);
14870 +EXPORT_SYMBOL(elan3_sdram_to_phys);
14871 +EXPORT_SYMBOL(elan3_sdram_writeb);
14872 +EXPORT_SYMBOL(elan3_sdram_writew);
14873 +EXPORT_SYMBOL(elan3_sdram_writel);
14874 +EXPORT_SYMBOL(elan3_sdram_writeq);
14875 +EXPORT_SYMBOL(elan3_sdram_readb);
14876 +EXPORT_SYMBOL(elan3_sdram_readw);
14877 +EXPORT_SYMBOL(elan3_sdram_readl);
14878 +EXPORT_SYMBOL(elan3_sdram_readq);
14879 +EXPORT_SYMBOL(elan3_sdram_zerob_sdram);
14880 +EXPORT_SYMBOL(elan3_sdram_zerow_sdram);
14881 +EXPORT_SYMBOL(elan3_sdram_zerol_sdram);
14882 +EXPORT_SYMBOL(elan3_sdram_zeroq_sdram);
14883 +EXPORT_SYMBOL(elan3_sdram_copyb_to_sdram);
14884 +EXPORT_SYMBOL(elan3_sdram_copyw_to_sdram);
14885 +EXPORT_SYMBOL(elan3_sdram_copyl_to_sdram);
14886 +EXPORT_SYMBOL(elan3_sdram_copyq_to_sdram);
14887 +EXPORT_SYMBOL(elan3_sdram_copyb_from_sdram);
14888 +EXPORT_SYMBOL(elan3_sdram_copyw_from_sdram);
14889 +EXPORT_SYMBOL(elan3_sdram_copyl_from_sdram);
14890 +EXPORT_SYMBOL(elan3_sdram_copyq_from_sdram);
14891 +
14892 +/* elan3/os/tproc.c */
14893 +EXPORT_SYMBOL(DeliverTProcTrap);
14894 +EXPORT_SYMBOL(HandleTProcTrap);
14895 +EXPORT_SYMBOL(SaveThreadToStack);
14896 +
14897 +/* elan3/os/tprocinsts.c */
14898 +EXPORT_SYMBOL(RollThreadToClose);
14899 +
14900 +/* elan3/os/iproc.c */
14901 +EXPORT_SYMBOL(InspectIProcTrap);
14902 +EXPORT_SYMBOL(IProcTrapString);
14903 +EXPORT_SYMBOL(SimulateUnlockQueue);
14904 +
14905 +/* elan3/os/cproc.c */
14906 +EXPORT_SYMBOL(HandleCProcTrap);
14907 +
14908 +/* elan3/os/route_table.c */
14909 +EXPORT_SYMBOL(GenerateRoute);
14910 +EXPORT_SYMBOL(LoadRoute);
14911 +EXPORT_SYMBOL(InvalidateRoute);
14912 +EXPORT_SYMBOL(ValidateRoute);
14913 +EXPORT_SYMBOL(ClearRoute);
14914 +EXPORT_SYMBOL(GenerateProbeRoute);
14915 +EXPORT_SYMBOL(GenerateCheckRoute);
14916 +
14917 +/* elan3/os/elandev_generic.c */
14918 +EXPORT_SYMBOL(elan3_debug);
14919 +EXPORT_SYMBOL(QueueHaltOperation);
14920 +EXPORT_SYMBOL(ReleaseHaltOperations);
14921 +EXPORT_SYMBOL(ReserveHaltOperations);
14922 +
14923 +/* elan3/vm/elan3mmu_generic.c */
14924 +EXPORT_SYMBOL(elan3mmu_pteload);
14925 +EXPORT_SYMBOL(elan3mmu_unload);
14926 +EXPORT_SYMBOL(elan3mmu_set_context_filter);
14927 +EXPORT_SYMBOL(elan3mmu_reserve);
14928 +EXPORT_SYMBOL(elan3mmu_attach);
14929 +EXPORT_SYMBOL(elan3mmu_detach);
14930 +EXPORT_SYMBOL(elan3mmu_release);
14931 +/* elan3/vm/elan3mmu_linux.c */
14932 +EXPORT_SYMBOL(elan3mmu_phys_to_pte);
14933 +EXPORT_SYMBOL(elan3mmu_kernel_invalid_pte);
14934 +
14935 +/* elan3/os/elan3_debug.c */
14936 +EXPORT_SYMBOL(elan3_debugf);
14937 +
14938 +/* elan3/os/minames.c */
14939 +EXPORT_SYMBOL(MiToName);
14940 +
14941 +/* elan3/os/elandev_generic.c */
14942 +EXPORT_SYMBOL(MapDeviceRegister);
14943 +EXPORT_SYMBOL(UnmapDeviceRegister);
14944 +
14945 +EXPORT_SYMBOL(elan_reg_rec_lbolt);
14946 +EXPORT_SYMBOL(elan_reg_rec_file);
14947 +EXPORT_SYMBOL(elan_reg_rec_index);
14948 +EXPORT_SYMBOL(elan_reg_rec_cpu);
14949 +EXPORT_SYMBOL(elan_reg_rec_reg);
14950 +EXPORT_SYMBOL(elan_reg_rec_line);
14951 +
14952 +/*
14953 + * Standard device entry points.
14954 + */
14955 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
14956 +
14957 +#include <linux/dump.h>
14958 +
14959 +static int      elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer);
14960 +
14961 +static struct notifier_block elan3_dump_notifier = 
14962 +{
14963 +    notifier_call:     elan3_dump_event,
14964 +    priority:          0,
14965 +};
14966 +
14967 +static int
14968 +elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
14969 +{
14970 +    if ( event == DUMP_BEGIN )
14971 +       elan3_shutdown_devices (FALSE);
14972 +
14973 +    return (NOTIFY_DONE);
14974 +}
14975 +
14976 +#endif
14977 +
14978 +static struct file_operations elan3_fops = {
14979 +        ioctl:   elan3_ioctl,          /* ioctl */
14980 +        mmap:    elan3_mmap,           /* mmap */
14981 +        open:    elan3_open,           /* open */
14982 +        release: elan3_release,                /* release */
14983 +};
14984 +
14985 +static struct notifier_block elan3_reboot_notifier = 
14986 +{
14987 +    notifier_call:     elan3_reboot_event,
14988 +    priority:          0,
14989 +};
14990 +
14991 +static struct notifier_block elan3_panic_notifier = 
14992 +{
14993 +    notifier_call:     elan3_panic_event,
14994 +    priority:          0,
14995 +};
14996 +
14997 +ELAN3_DEV *
14998 +elan3_device (int instance)
14999 +{
15000 +       if (instance < 0 || instance >= ELAN3_MAX_CONTROLLER)
15001 +           return ((ELAN3_DEV *) NULL);
15002 +       return elan3_devices[instance];
15003 +}
15004 +EXPORT_SYMBOL(elan3_device);
15005 +
15006 +/*
15007 + * Called at rmmod time.  elandetach() for each card + general cleanup.
15008 + */
15009 +#ifdef MODULE
15010 +static void __exit elan3_exit(void)
15011 +{
15012 +       int i;
15013 +
15014 +       printk("elan: preparing to remove module\n");
15015 +
15016 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
15017 +       unregister_dump_notifier (&elan3_dump_notifier);
15018 +#endif
15019 +       unregister_reboot_notifier (&elan3_reboot_notifier);
15020 +       notifier_chain_unregister (&panic_notifier_list, &elan3_panic_notifier);
15021 +
15022 +       /* call elandetach() for each device configured. */
15023 +       for (i = 0; i < ELAN3_MAX_CONTROLLER; i++)
15024 +               if (elan3_devices[i] != NULL)
15025 +                       elandetach(i);
15026 +
15027 +       FinaliseNetworkErrorResolver();
15028 +       elan3mmu_fini();
15029 +
15030 +       cookie_fini();
15031 +       unregister_chrdev(ELAN3_MAJOR, ELAN3_NAME);
15032 +
15033 +       elan3_procfs_fini();
15034 +
15035 +       printk("elan: module removed\n");
15036 +}
15037 +
15038 +/*
15039 + * Called at insmod time.  First we perform general driver initialization,
15040 + * then call elanattach() for each card.
15041 + */
15042 +#ifdef MODULE
15043 +static int __init elan3_init(void)
15044 +#else
15045 +__initfunc(int elan3_init(void))
15046 +#endif
15047 +{
15048 +       int e;
15049 +       int boards;
15050 +       struct pci_dev *dev;
15051 +       char revid;
15052 +
15053 +       elan_reg_rec_index=0;
15054 +       {
15055 +           int i;
15056 +           for(i=0;i<ELAN_REG_REC_MAX;i++)
15057 +               elan_reg_rec_file[i] = NULL;
15058 +       }       
15059 +
15060 +       /* register major/minor num */
15061 +       e = register_chrdev(ELAN3_MAJOR, ELAN3_NAME, &elan3_fops);
15062 +       if (e < 0)
15063 +               return e;
15064 +
15065 +       elan3_procfs_init ();
15066 +
15067 +       cookie_init();
15068 +       elan3mmu_init();
15069 +       InitialiseNetworkErrorResolver();
15070 +
15071 +       /* call elanattach() for each device found on PCI */
15072 +       memset(elan3_devices, 0, sizeof(elan3_devices));
15073 +       boards = 0;
15074 +       for (dev = NULL; (dev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN3, dev)) != NULL ;) 
15075 +       {
15076 +           pci_read_config_byte (dev, PCI_REVISION_ID, &revid);
15077 +
15078 +           if (revid == PCI_REVISION_ID_ELAN3_REVA)
15079 +               printk ("elan at pci %s - RevA device not supported\n", dev->slot_name);
15080 +           else
15081 +           {
15082 +               if (boards < ELAN3_MAX_CONTROLLER)
15083 +                       /* Count successfully attached devices */ 
15084 +                       boards += ((elanattach(boards, dev) == 0) ? 1 : 0);
15085 +               else
15086 +               {
15087 +                   printk ("elan: max controllers = %d\n", ELAN3_MAX_CONTROLLER);
15088 +                   break;
15089 +               }
15090 +           }
15091 +       }
15092 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
15093 +       register_dump_notifier (&elan3_dump_notifier);
15094 +#endif
15095 +       register_reboot_notifier (&elan3_reboot_notifier);
15096 +       notifier_chain_register (&panic_notifier_list, &elan3_panic_notifier);
15097 +
15098 +       return 0;
15099 +}
15100 +
15101 +/* Declare the module init and exit functions */
15102 +module_init(elan3_init);
15103 +module_exit(elan3_exit);
15104 +
15105 +#endif
15106 +
15107 +static void
15108 +elan3_shutdown_devices(int panicing)
15109 +{
15110 +    ELAN3_DEV *dev;
15111 +    unsigned long flags;
15112 +    register int i;
15113 +
15114 +    local_irq_save (flags);
15115 +    for (i = 0; i < ELAN3_MAX_CONTROLLER; i++)
15116 +    {
15117 +       if ((dev = elan3_devices[i]) != NULL)
15118 +       {
15119 +           if (! panicing) spin_lock (&dev->IntrLock);
15120 +           
15121 +           printk(KERN_INFO "elan%d: forcing link into reset\n", dev->Instance);
15122 +
15123 +           /*
15124 +            * We're going to set the link into boundary scan mode,  so firstly
15125 +            * set the inputters to discard everything.
15126 +            */
15127 +           if (dev->DiscardAllCount++ == 0)
15128 +               SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL);
15129 +
15130 +           dev->LinkShutdown = 1;
15131 +           
15132 +           /*
15133 +            * Now disable the error interrupts
15134 +            */
15135 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
15136 +           
15137 +           /*
15138 +            * And set the link into boundary scan mode, and drive
15139 +            * a reset token onto the link.
15140 +            */
15141 +           SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken);
15142 +
15143 +           if (! panicing) spin_unlock (&dev->IntrLock);
15144 +       }
15145 +    }
15146 +    local_irq_restore (flags);
15147 +}
15148 +
15149 +static int
15150 +elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
15151 +{
15152 +    if (! (event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
15153 +       return (NOTIFY_DONE);
15154 +
15155 +    elan3_shutdown_devices (FALSE);
15156 +
15157 +    return (NOTIFY_DONE);
15158 +}
15159 +
15160 +static int
15161 +elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
15162 +{
15163 +    elan3_shutdown_devices (TRUE);
15164 +
15165 +    return (NOTIFY_DONE);
15166 +}
15167 +
15168 +#include <elan3/elan3ops.h>
15169 +/*
15170 + * Called by init_module() for each card discovered on PCI.
15171 + */
15172 +static int
15173 +elanattach(int instance, struct pci_dev *pcidev)
15174 +{
15175 +       ELAN3_DEV *dev;
15176 +       int ramSize;
15177 +       int level;
15178 +       ioaddr_t sdramAddr, cmdPortAddr, intPalAddr;
15179 +       DeviceMappingHandle handle;
15180 +
15181 +       printk("elan%d: attach, irq=%d\n", instance, pcidev->irq);
15182 +
15183 +       /*
15184 +        * Allocate the ELAN3_DEV structure.
15185 +        */
15186 +       KMEM_ZALLOC(dev, ELAN3_DEV *, sizeof(ELAN3_DEV), TRUE);
15187 +       if (dev == NULL) {
15188 +               printk ("elan%d: KMEM_ALLOC failed\n", instance);
15189 +               return (-ENOMEM);
15190 +       }
15191 +       elan3_devices[instance] = dev;
15192 +       dev->Osdep.pci = pcidev;
15193 +
15194 +       dev->Instance = instance;
15195 +
15196 +       /* Initialise the device information */
15197 +       pci_read_config_word (pcidev, PCI_VENDOR_ID,   &dev->Devinfo.dev_vendor_id);
15198 +       pci_read_config_word (pcidev, PCI_DEVICE_ID,   &dev->Devinfo.dev_device_id);
15199 +       pci_read_config_byte (pcidev, PCI_REVISION_ID, &dev->Devinfo.dev_revision_id);
15200 +
15201 +       dev->Devinfo.dev_instance             = instance;
15202 +       dev->Devinfo.dev_rail                 = instance;
15203 +       dev->Devinfo.dev_driver_version       = 0;
15204 +       dev->Devinfo.dev_num_down_links_value = DownLinks;
15205 +
15206 +       dev->Position.pos_mode                = ELAN_POS_UNKNOWN;
15207 +       dev->Position.pos_random_disabled     = RandomRoutingDisabled;
15208 +       
15209 +       /*
15210 +        * Set up PCI config regs.
15211 +        */
15212 +       if (ConfigurePci(dev) != ESUCCESS)
15213 +           goto fail0;
15214 +
15215 +       /*
15216 +        * Determine the PFnums of the SDRAM and command port
15217 +        */
15218 +       if (MapDeviceRegister(dev, ELAN3_BAR_SDRAM, &sdramAddr, 0, PAGESIZE, &handle) != ESUCCESS)
15219 +           goto fail1;
15220 +
15221 +       DeviceRegisterSize(dev, ELAN3_BAR_SDRAM, &ramSize);
15222 +       
15223 +       dev->SdramPhysMask = ~((physaddr_t) ramSize - 1);
15224 +       dev->SdramPhysBase = kmem_to_phys((void *) sdramAddr);
15225 +
15226 +       UnmapDeviceRegister (dev, &handle);
15227 +
15228 +#if defined(LINUX_ALPHA)
15229 +       /*
15230 +        * consider a physical address to be on the same pci bus
15231 +        * as us if it's physical address is "close" to our sdram
15232 +        * physical address.
15233 +        * this is almost certainly incorrect for large memory (> 2Gb)
15234 +        * i386 machines - and is only correct for alpha for 32 bit
15235 +        * base address registers.
15236 +        *
15237 +        * Modified this to match the Tru64 driver value;
15238 +        * i.e. PciPhysMask = 0xfffffffffffc0000
15239 +        */
15240 +#  define PCI_ADDR_MASK (0x7FFFFFFFl)
15241 +
15242 +       dev->PciPhysMask = ~PCI_ADDR_MASK;
15243 +       dev->PciPhysBase = dev->SdramPhysBase & dev->PciPhysMask;
15244 +#endif
15245 +       /*
15246 +        * Now reset the elan chip.
15247 +        */
15248 +       if (MapDeviceRegister(dev, ELAN3_BAR_REGISTERS, &dev->RegPtr, 0, 0, &dev->RegHandle) != ESUCCESS)
15249 +           goto fail1;
15250 +
15251 +       if (MapDeviceRegister(dev, ELAN3_BAR_EBUS, &intPalAddr, ELAN3_EBUS_INTPAL_OFFSET, PAGESIZE,
15252 +                             &handle) != ESUCCESS)
15253 +           goto fail2;
15254 +
15255 +       ResetElan(dev, intPalAddr);     
15256 +
15257 +       UnmapDeviceRegister (dev, &handle);
15258 +
15259 +       /* 
15260 +        * Initialise the device mutex's which must be accessible from the 
15261 +        * interrupt handler.  
15262 +        */
15263 +       kcondvar_init (&dev->IntrWait);
15264 +       spin_lock_init (&dev->IntrLock);
15265 +       spin_lock_init (&dev->TlbLock);
15266 +       spin_lock_init (&dev->CProcLock);
15267 +       spin_lock_init (&dev->FreeHaltLock);
15268 +       for(level=0; level<4; level++)
15269 +           spin_lock_init (&dev->Level[level].PtblLock);
15270 +       spin_lock_init (&dev->PtblGroupLock);
15271 +
15272 +       /*
15273 +        * Add the interrupt handler,  
15274 +        */
15275 +       if (request_irq(dev->Osdep.pci->irq, InterruptHandlerWrapper, 
15276 +           SA_SHIRQ, "elan3", dev) != 0) {
15277 +               printk ("elan%d: request_irq failed\n", instance);
15278 +               goto fail3;
15279 +       }
15280 +
15281 +       if (MapDeviceRegister(dev, ELAN3_BAR_COMMAND_PORT, &cmdPortAddr, 0, PAGESIZE, &handle) != ESUCCESS)
15282 +           goto fail4;
15283 +       
15284 +       if (InitialiseElan(dev, cmdPortAddr) == EFAIL) {
15285 +               printk ("elan%d: InitialiseElan failed\n", instance);
15286 +               UnmapDeviceRegister (dev, &handle);
15287 +               goto fail4;
15288 +       }
15289 +       UnmapDeviceRegister (dev, &handle);
15290 +
15291 +       /* If our nodeid is defined, then set it now */
15292 +       if (NodeId != ELAN3_INVALID_NODE && ComputePosition (&dev->Position, NodeId, NumNodes, DownLinks) == 0)
15293 +       {
15294 +           if (RandomRoutingDisabled & ((1 << (dev->Position.pos_levels-1))-1))
15295 +               printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing disabled 0x%x)\n", 
15296 +                       dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes, RandomRoutingDisabled);
15297 +           else
15298 +               printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing ok)\n",
15299 +                       dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes);
15300 +       }
15301 +
15302 +       if (BackToBackMaster || BackToBackSlave)
15303 +       {
15304 +           dev->Position.pos_mode     = ELAN_POS_MODE_BACKTOBACK;
15305 +           dev->Position.pos_nodeid   = (BackToBackMaster == 0);
15306 +           dev->Position.pos_nodes    = 2;
15307 +           dev->Position.pos_levels   = 1;
15308 +           dev->Position.pos_arity[0] = 2;
15309 +
15310 +           printk ("elan%d: back-to-back %s - elan node %d\n", dev->Instance,
15311 +                   BackToBackMaster ? "master" : "slave", dev->Position.pos_nodeid);
15312 +       }
15313 +
15314 +       elan3_procfs_device_init (dev);
15315 +       
15316 +       /* Success */
15317 +       return (0);
15318 +
15319 +fail4:
15320 +       free_irq(dev->Osdep.pci->irq, dev);
15321 +
15322 +fail3:
15323 +       kcondvar_destroy (&dev->IntrWait);
15324 +       spin_lock_destroy (&dev->IntrLock);
15325 +       spin_lock_destroy (&dev->InfoLock);
15326 +       spin_lock_destroy (&dev->TlbLock);
15327 +       spin_lock_destroy (&dev->CProcLock);
15328 +       spin_lock_destroy (&dev->FreeHaltLock);
15329 +       spin_lock_destroy (&dev->Level1PtblLock);
15330 +       spin_lock_destroy (&dev->Level2PtblLock);
15331 +       spin_lock_destroy (&dev->Level3PtblLock);
15332 +       spin_lock_destroy (&dev->PtblGroupLock);
15333 +
15334 +fail2:
15335 +       UnmapDeviceRegister (dev, &dev->RegHandle);
15336 +
15337 +fail1:
15338 +       pci_disable_device (dev->Osdep.pci);
15339 +fail0:
15340 +       KMEM_FREE(dev, sizeof(ELAN3_DEV));
15341 +
15342 +       elan3_devices[instance] = NULL;
15343 +       
15344 +       /* Failure */
15345 +       return (-ENODEV);
15346 +}
15347 +
15348 +/*
15349 + * Called by elan3_exit() for each board found on PCI.
15350 + */
15351 +static int
15352 +elandetach(int instance)
15353 +{
15354 +       ELAN3_DEV *dev = elan3_devices[instance];
15355 +
15356 +       printk("elan%d: detach\n", instance);
15357 +
15358 +       elan3_procfs_device_fini (dev);
15359 +
15360 +       FinaliseElan (dev);
15361 +
15362 +       UnmapDeviceRegister (dev, &dev->RegHandle);
15363 +
15364 +       free_irq(dev->Osdep.pci->irq, dev);
15365 +
15366 +       pci_disable_device(dev->Osdep.pci);
15367 +
15368 +       kcondvar_destroy (&dev->IntrWait);
15369 +       spin_lock_destroy (&dev->IntrLock);
15370 +       spin_lock_destroy (&dev->InfoLock);
15371 +       spin_lock_destroy (&dev->TlbLock);
15372 +       spin_lock_destroy (&dev->CProcLock);
15373 +       spin_lock_destroy (&dev->FreeHaltLock);
15374 +       spin_lock_destroy (&dev->Level1PtblLock);
15375 +       spin_lock_destroy (&dev->Level2PtblLock);
15376 +       spin_lock_destroy (&dev->Level3PtblLock);
15377 +       spin_lock_destroy (&dev->PtblGroupLock);
15378 +
15379 +       KMEM_FREE(dev, sizeof(ELAN3_DEV));
15380 +       elan3_devices[instance] = NULL; 
15381 +
15382 +       return 0;
15383 +}
15384 +
15385 +/*
15386 + * generic ioctls - available on control and user devices.
15387 + */
15388 +
15389 +static int
15390 +device_stats_ioctl (ELAN3_DEV *dev, unsigned long arg)
15391 +{
15392 +    ELAN3IO_STATS_STRUCT *args;
15393 +
15394 +    KMEM_ALLOC(args, ELAN3IO_STATS_STRUCT *, sizeof(ELAN3IO_STATS_STRUCT), TRUE);
15395 +       
15396 +    if (args == NULL)
15397 +       return (-ENOMEM);
15398 +
15399 +    if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_STATS_STRUCT)))
15400 +    {
15401 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15402 +       return (-EFAULT);
15403 +    }
15404 +
15405 +    switch (args->which)
15406 +    {
15407 +    case ELAN3_SYS_STATS_DEVICE:
15408 +       if (copy_to_user (args->ptr, &dev->Stats, sizeof (ELAN3_STATS)))
15409 +       {
15410 +           KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15411 +           return (-EFAULT);
15412 +       }
15413 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15414 +       return (0);
15415 +
15416 +    case ELAN3_SYS_STATS_MMU:
15417 +       if (copy_to_user (args->ptr, &elan3mmu_global_stats, sizeof (ELAN3MMU_GLOBAL_STATS)))
15418 +       {
15419 +           KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15420 +           return (-EFAULT);
15421 +       }
15422 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15423 +       return (0);
15424 +           
15425 +    default:
15426 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15427 +       return (-EINVAL);
15428 +    }
15429 +}
15430 +
15431 +/*
15432 + * /dev/elan3/controlX - control device
15433 + *
15434 + */
15435 +
15436 +typedef struct control_private
15437 +{
15438 +    u_int              pr_boundary_scan;
15439 +} CONTROL_PRIVATE;
15440 +
15441 +static int
15442 +control_open (struct inode *inode, struct file *file)
15443 +{
15444 +    CONTROL_PRIVATE *pr;
15445 +
15446 +    KMEM_ALLOC(pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), TRUE);
15447 +
15448 +    if (pr == NULL)
15449 +       return (-ENOMEM);
15450 +
15451 +    pr->pr_boundary_scan = 0;
15452 +    
15453 +    file->private_data = (void *) pr;
15454 +
15455 +    MOD_INC_USE_COUNT;
15456 +
15457 +    return (0);
15458 +}
15459 +
15460 +static int
15461 +control_release (struct inode *inode, struct file *file)
15462 +{
15463 +    ELAN3_DEV        *dev = elan3_devices[ELAN3_DEVICE(inode)];
15464 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
15465 +
15466 +    if (pr->pr_boundary_scan)
15467 +       ClearLinkBoundaryScan(dev);
15468 +
15469 +    KMEM_FREE (pr, sizeof(CONTROL_PRIVATE));
15470 +
15471 +    MOD_DEC_USE_COUNT;
15472 +    return (0);
15473 +}
15474 +
15475 +static int
15476 +control_ioctl (struct inode *inode, struct file *file, 
15477 +              unsigned int cmd, unsigned long arg)
15478 +{
15479 +    ELAN3_DEV        *dev = elan3_devices[ELAN3_DEVICE(inode)];
15480 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
15481 +    int                     res;
15482 +
15483 +    switch (cmd) 
15484 +    {
15485 +    case ELAN3IO_SET_BOUNDARY_SCAN:
15486 +       if (SetLinkBoundaryScan (dev) == 0)
15487 +           pr->pr_boundary_scan = 1;
15488 +       return (0);
15489 +
15490 +    case ELAN3IO_CLEAR_BOUNDARY_SCAN:
15491 +       if (pr->pr_boundary_scan == 0)
15492 +           return (-EINVAL);
15493 +
15494 +       pr->pr_boundary_scan = 0;
15495 +
15496 +       ClearLinkBoundaryScan (dev);
15497 +       return (0);
15498 +
15499 +    case ELAN3IO_READ_LINKVAL:
15500 +    {
15501 +       E3_uint32 val;
15502 +
15503 +       if (pr->pr_boundary_scan == 0)
15504 +           return (-EINVAL);
15505 +
15506 +       if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32)))
15507 +           return (-EFAULT);
15508 +
15509 +       val = ReadBoundaryScanValue (dev, val);
15510 +
15511 +       if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32)))
15512 +           return (-EFAULT);
15513 +       return (0);
15514 +    }
15515 +       
15516 +    case ELAN3IO_WRITE_LINKVAL:
15517 +    {
15518 +       E3_uint32 val;
15519 +
15520 +       if (pr->pr_boundary_scan == 0)
15521 +           return (-EINVAL);
15522 +
15523 +       if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32)))
15524 +           return (-EFAULT);
15525 +
15526 +       val = WriteBoundaryScanValue (dev, val);
15527 +
15528 +       if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32)))
15529 +           return (-EFAULT);
15530 +       
15531 +       return (0);
15532 +    }
15533 +
15534 +    case ELAN3IO_SET_POSITION:
15535 +    {
15536 +       ELAN3IO_SET_POSITION_STRUCT args;
15537 +
15538 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_POSITION_STRUCT)))
15539 +           return (-EFAULT);
15540 +       
15541 +       if (ComputePosition (&dev->Position, args.nodeId, args.numNodes, dev->Devinfo.dev_num_down_links_value) != 0)
15542 +           return (-EINVAL);
15543 +
15544 +       return (0);
15545 +    }
15546 +
15547 +    case ELAN3IO_SET_DEBUG:
15548 +    {
15549 +       ELAN3IO_SET_DEBUG_STRUCT args;
15550 +
15551 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_DEBUG_STRUCT)))
15552 +           return (-EFAULT);
15553 +
15554 +       if (! strcmp (args.what, "elan3_debug"))
15555 +           elan3_debug = args.value;
15556 +       else if (! strcmp (args.what, "elan3_debug_console"))
15557 +           elan3_debug_console = args.value;
15558 +       else if (! strcmp (args.what, "elan3_debug_buffer"))
15559 +           elan3_debug_buffer = args.value;
15560 +       else if (! strcmp (args.what, "elan3_debug_ignore_dev"))
15561 +           elan3_debug_ignore_dev = args.value;
15562 +       else if (! strcmp (args.what, "elan3_debug_ignore_ctxt"))
15563 +           elan3_debug_ignore_ctxt = args.value;
15564 +       else if (! strcmp (args.what, "elan3mmu_debug"))
15565 +           elan3mmu_debug = args.value;
15566 +       
15567 +       return (0);
15568 +    }
15569 +
15570 +    case ELAN3IO_NETERR_SERVER:
15571 +    {
15572 +       ELAN3IO_NETERR_SERVER_STRUCT args;
15573 +
15574 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_NETERR_SERVER_STRUCT)))
15575 +           return (-EFAULT);
15576 +       
15577 +       res = AddNeterrServerSyscall (args.elanid, args.addr, args.name, NULL);
15578 +       return (set_errno (res));
15579 +    }
15580 +    
15581 +    case ELAN3IO_NETERR_FIXUP:
15582 +    {
15583 +       NETERR_MSG *msg;
15584 +
15585 +       KMEM_ALLOC(msg, NETERR_MSG *, sizeof (NETERR_MSG), TRUE);
15586 +
15587 +       if (msg == NULL)
15588 +           return (set_errno (ENOMEM));
15589 +       
15590 +       if (copy_from_user (msg, (void *) arg, sizeof (NETERR_MSG)))
15591 +           res = EFAULT;
15592 +       else
15593 +           res = ExecuteNetworkErrorFixup (msg);
15594 +
15595 +       KMEM_FREE (msg, sizeof (NETERR_MSG));
15596 +       return (set_errno (res));
15597 +    }
15598 +
15599 +    case ELAN3IO_STATS:
15600 +       return (device_stats_ioctl (dev, arg));
15601 +
15602 +    case ELAN3IO_GET_DEVINFO:
15603 +    {
15604 +       if (copy_to_user ((void *) arg, &dev->Devinfo, sizeof (ELAN_DEVINFO)))
15605 +           return (-EFAULT);
15606 +       return (0);
15607 +    }
15608 +
15609 +    case ELAN3IO_GET_POSITION:
15610 +    {
15611 +       if (copy_to_user ((void *) arg, &dev->Position, sizeof (ELAN_POSITION)))
15612 +           return (-EFAULT);
15613 +       return (0);
15614 +    }
15615 +    default:
15616 +       return (-EINVAL);
15617 +    }
15618 +}
15619 +
15620 +static int
15621 +control_mmap (struct file *file, struct vm_area_struct *vma)
15622 +{
15623 +    ELAN3_DEV         *dev   = elan3_devices[ELAN3_DEVICE(file->f_dentry->d_inode)];
15624 +    int                space = OFF_TO_SPACE(vma->vm_pgoff << PAGE_SHIFT);
15625 +    int                off   = OFF_TO_OFFSET(vma->vm_pgoff << PAGE_SHIFT);
15626 +    int                size;
15627 +    ioaddr_t           addr;
15628 +    DeviceMappingHandle handle;
15629 +    physaddr_t         phys;
15630 +    
15631 +    if (space < ELAN3_BAR_SDRAM || space > ELAN3_BAR_EBUS)
15632 +       return (-EINVAL);
15633 +
15634 +    if (off < 0 || DeviceRegisterSize (dev, space, &size) != ESUCCESS || off > size)
15635 +       return (-EINVAL);
15636 +
15637 +    if (MapDeviceRegister(dev, space, &addr, off, PAGESIZE, &handle) != ESUCCESS)
15638 +       return (-EINVAL);
15639 +
15640 +    phys = kmem_to_phys((caddr_t) addr);
15641 +    UnmapDeviceRegister(dev, &handle);
15642 +
15643 +#ifdef NO_RMAP
15644 +    if (remap_page_range(vma->vm_start, phys, vma->vm_end - vma->vm_start, vma->vm_page_prot))
15645 +#else
15646 +    if (remap_page_range(vma, vma->vm_start, phys, vma->vm_end - vma->vm_start, vma->vm_page_prot))
15647 +#endif
15648 +       return (-EAGAIN);
15649 +
15650 +    return (0);
15651 +}
15652 +
15653 +/*
15654 + * /dev/elan3/sdramX - sdram access device
15655 + */
15656 +typedef struct mem_page
15657 +{
15658 +    struct mem_page *pg_next;
15659 +    sdramaddr_t      pg_addr;
15660 +    u_long          pg_pgoff;
15661 +    u_int           pg_ref;
15662 +} MEM_PAGE;
15663 +
15664 +#define MEM_HASH_SIZE  32
15665 +#define MEM_HASH(pgoff)        ((pgoff) & (MEM_HASH_SIZE-1))
15666 +
15667 +typedef struct mem_private
15668 +{
15669 +    ELAN3_DEV   *pr_dev;
15670 +    MEM_PAGE   *pr_pages[MEM_HASH_SIZE];
15671 +    spinlock_t  pr_lock;
15672 +} MEM_PRIVATE;
15673 +
15674 +static void 
15675 +mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg)
15676 +{
15677 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref);
15678 +
15679 +    elan3_sdram_free (pr->pr_dev, pg->pg_addr, PAGE_SIZE);
15680 +    KMEM_FREE (pg, sizeof(MEM_PAGE));
15681 +}
15682 +
15683 +static MEM_PAGE *
15684 +mem_getpage (MEM_PRIVATE *pr, u_long pgoff, virtaddr_t addr)
15685 +{
15686 +    int       hashval = MEM_HASH (pgoff);
15687 +    MEM_PAGE *npg = NULL;
15688 +    MEM_PAGE *pg;
15689 +
15690 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx\n", pr, pgoff, addr);
15691 +    
15692 + again:
15693 +    spin_lock (&pr->pr_lock);
15694 +    for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next)
15695 +       if (pg->pg_pgoff == pgoff)
15696 +           break;
15697 +    
15698 +    if (pg != NULL)
15699 +    {
15700 +       PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx -> found %p addr=%lx\n", pr, pgoff, addr, pg, pg->pg_addr);
15701 +
15702 +       pg->pg_ref++;
15703 +       spin_unlock (&pr->pr_lock);
15704 +
15705 +       if (npg != NULL)                                        /* we'd raced and someone else had created */
15706 +           mem_freepage (pr, npg);                             /* this page - so free of our new one*/
15707 +       return (pg);
15708 +    }
15709 +    
15710 +    if (npg != NULL)                                           /* didn't find the page, so inset the */
15711 +    {                                                          /* new one we've just created */
15712 +       npg->pg_next = pr->pr_pages[hashval];
15713 +       pr->pr_pages[hashval] = npg;
15714 +       
15715 +       spin_unlock (&pr->pr_lock);
15716 +       return (npg);
15717 +    }
15718 +    
15719 +    spin_unlock (&pr->pr_lock);                                        /* drop spinlock before creating a new page */
15720 +    
15721 +    KMEM_ALLOC(npg, MEM_PAGE *, sizeof (MEM_PAGE), TRUE);
15722 +
15723 +    if (npg == NULL)
15724 +       return (NULL);
15725 +
15726 +    if ((npg->pg_addr = elan3_sdram_alloc (pr->pr_dev, PAGE_SIZE)) == 0)
15727 +    {
15728 +       KMEM_FREE (npg, sizeof (MEM_PAGE));
15729 +       return (NULL);
15730 +    }
15731 +
15732 +    /* zero the page before returning it to the user */
15733 +    elan3_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, PAGE_SIZE);
15734 +    
15735 +    npg->pg_pgoff = pgoff;
15736 +    npg->pg_ref   = 1;
15737 +    
15738 +    /* created a new page - so have to rescan before inserting it */
15739 +    goto again;
15740 +}
15741 +
15742 +static void
15743 +mem_droppage (MEM_PRIVATE *pr, u_long pgoff, int dontfree)
15744 +{
15745 +    MEM_PAGE **ppg;
15746 +    MEM_PAGE  *pg;
15747 +
15748 +    spin_lock (&pr->pr_lock);
15749 +    for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next)
15750 +       if ((*ppg)->pg_pgoff == pgoff)
15751 +           break;
15752 +
15753 +    pg = *ppg;
15754 +
15755 +    ASSERT (*ppg != NULL);
15756 +    
15757 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree);
15758 +
15759 +    if (--pg->pg_ref == 0 && !dontfree)
15760 +    {
15761 +       *ppg = pg->pg_next;
15762 +
15763 +       mem_freepage (pr, pg);
15764 +    }
15765 +
15766 +    spin_unlock (&pr->pr_lock);
15767 +}
15768 +
15769 +static int
15770 +mem_open (struct inode *inode, struct file *file)
15771 +{
15772 +    ELAN3_DEV    *dev = elan3_devices[ELAN3_DEVICE(inode)];
15773 +    MEM_PRIVATE *pr;
15774 +    register int i;
15775 +
15776 +    KMEM_ALLOC(pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), TRUE);
15777 +
15778 +    if (pr == NULL)
15779 +       return (-ENOMEM);
15780 +
15781 +    spin_lock_init (&pr->pr_lock);
15782 +    pr->pr_dev = dev;
15783 +    for (i = 0; i < MEM_HASH_SIZE; i++)
15784 +       pr->pr_pages[i] = NULL;
15785 +
15786 +    file->private_data = (void *) pr;
15787 +    
15788 +    MOD_INC_USE_COUNT;
15789 +    return (0);
15790 +}
15791 +
15792 +static int
15793 +mem_release (struct inode *node, struct file *file)
15794 +{
15795 +    MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data;
15796 +    MEM_PAGE    *pg, *next;
15797 +    int          i;
15798 +
15799 +    /* free off any pages that we'd allocated */
15800 +    spin_lock (&pr->pr_lock);
15801 +    for (i = 0; i < MEM_HASH_SIZE; i++)
15802 +    {
15803 +       for (pg = pr->pr_pages[i]; pg; pg = next)
15804 +       {
15805 +           next = pg->pg_next;
15806 +           mem_freepage (pr, pg);
15807 +       }
15808 +    }
15809 +    spin_unlock (&pr->pr_lock);
15810 +
15811 +    KMEM_FREE (pr, sizeof (MEM_PRIVATE));
15812 +
15813 +    MOD_DEC_USE_COUNT;
15814 +    return (0);
15815 +}
15816 +
15817 +static int
15818 +mem_ioctl (struct inode *inode, struct file *file, 
15819 +                 unsigned int cmd, unsigned long arg)
15820 +{
15821 +    return (-EINVAL);
15822 +}
15823 +
15824 +static void mem_vma_open(struct vm_area_struct *vma)
15825 +{
15826 +    MEM_PRIVATE   *pr = (MEM_PRIVATE *) vma->vm_private_data;
15827 +    unsigned long addr;
15828 +    unsigned long pgoff;
15829 +
15830 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
15831 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
15832 +
15833 +    preemptable_start {
15834 +       for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) {
15835 +           mem_getpage (pr, pgoff, addr);
15836 +           preemptable_check();
15837 +       }
15838 +    } preemptable_end;
15839 +}
15840 +
15841 +static void mem_vma_close(struct vm_area_struct *vma)
15842 +{
15843 +    MEM_PRIVATE  *pr  = (MEM_PRIVATE *) vma->vm_private_data;
15844 +    unsigned long addr;
15845 +    unsigned long pgoff;
15846 +
15847 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
15848 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
15849 +
15850 +    /* NOTE: the call to close may not have the same vm_start/vm_end values as 
15851 +     *       were passed into mmap()/open() - since if an partial unmap had occured
15852 +     *       then the vma could have been shrunk or even split.
15853 +     *
15854 +     *       if a the vma is split then an vma_open() will be called for the top
15855 +     *       portion - thus causing the reference counts to become incorrect.
15856 +     *
15857 +     * We drop the reference to any pages we're notified about - so they get freed
15858 +     * earlier than when the device is finally released.
15859 +     */
15860 +    for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
15861 +       mem_droppage (pr, pgoff, 0);
15862 +}
15863 +
15864 +static struct vm_operations_struct mem_vm_ops = {
15865 +    open:              mem_vma_open,
15866 +    close:             mem_vma_close,
15867 +};
15868 +
15869 +static int
15870 +mem_mmap (struct file *file, struct vm_area_struct *vma)
15871 +{
15872 +    MEM_PRIVATE  *pr = (MEM_PRIVATE *) file->private_data;
15873 +    MEM_PAGE     *pg;
15874 +    unsigned long addr;
15875 +    unsigned long pgoff;
15876 +
15877 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx prot=%lx file=%p\n",
15878 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_page_prot.pgprot , file);
15879 +
15880 +    preemptable_start {
15881 +       for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
15882 +       {
15883 +           if ((pg = mem_getpage (pr, pgoff, addr)) == NULL)
15884 +               goto failed;
15885 +           
15886 +#ifdef LINUX_SPARC
15887 +           pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
15888 +           pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
15889 +#elif defined(pgprot_noncached)
15890 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
15891 +#endif
15892 +           
15893 +#if defined(__ia64__)
15894 +           if (enable_sdram_writecombining)
15895 +               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
15896 +#endif
15897 +           PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: addr %lx -> pg=%p addr=%lx phys=%llx flags=%lx prot=%lx\n",
15898 +                   addr, pg, pg->pg_addr, (long long) elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), vma->vm_flags, vma->vm_page_prot.pgprot);
15899 +           
15900 +#ifdef NO_RMAP
15901 +           if (remap_page_range (addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), PAGE_SIZE, vma->vm_page_prot))
15902 +#else
15903 +           if (remap_page_range (vma, addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), PAGE_SIZE, vma->vm_page_prot))
15904 +#endif
15905 +           {
15906 +               mem_droppage (pr, pgoff, 0);                    /* drop our reference to this page */
15907 +               goto failed;
15908 +           }
15909 +
15910 +           preemptable_check();
15911 +       }
15912 +    } preemptable_end;
15913 +
15914 +    /* Don't try to swap out Elan SDRAM pages.. */
15915 +    vma->vm_flags |= VM_RESERVED;
15916 +    
15917 +    /*
15918 +     * Don't dump SDRAM pages to a core file 
15919 +     * (Pity I would really like to do this but it crashes in elf_core_dump() as
15920 +     * it can only handle pages that are in the mem_map area (addy 11/01/2002))
15921 +     */
15922 +    vma->vm_flags |= VM_IO;
15923 +
15924 +    vma->vm_ops          = &mem_vm_ops;
15925 +    vma->vm_file         = file;
15926 +    vma->vm_private_data = (void *) pr;
15927 +
15928 +    return (0);
15929 +
15930 + failed:
15931 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: failed\n");
15932 +
15933 +    /* free of any pages we've already allocated/referenced */
15934 +    while ((--pgoff) >= vma->vm_pgoff)
15935 +       mem_droppage (pr, pgoff, 0);
15936 +
15937 +    return (-ENOMEM);
15938 +}
15939 +
15940 +/*
15941 + * /dev/elan3/userX - control device
15942 + *
15943 + * "user_private" can be referenced from a number of places
15944 + *   1) the "file" structure.
15945 + *   2) the "mm" ioproc ops
15946 + *   3) the "mmap" of the command port.
15947 + *
15948 + */
15949 +typedef struct user_private
15950 +{
15951 +    spinlock_t        pr_lock;
15952 +    atomic_t         pr_mappings;
15953 +    atomic_t          pr_ref;
15954 +    ELAN3_CTXT        *pr_ctxt;
15955 +    struct mm_struct *pr_mm;
15956 +    struct ioproc_ops pr_ioproc;
15957 +} USER_PRIVATE;
15958 +
15959 +static void
15960 +user_free (USER_PRIVATE *pr)
15961 +{
15962 +    /* Have to unreserve the FlagPage or else we leak memory like a sieve! */
15963 +    ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) pr->pr_ctxt->FlagPage)));
15964 +
15965 +    elan3_detach(pr->pr_ctxt);
15966 +    elan3_free (pr->pr_ctxt);
15967 +
15968 +    KMEM_FREE (pr, sizeof(USER_PRIVATE));
15969 +
15970 +    MOD_DEC_USE_COUNT;
15971 +}
15972 +
15973 +static void
15974 +user_ioproc_release (void *arg, struct mm_struct *mm)
15975 +{
15976 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15977 +
15978 +    PRINTF3 (pr->pr_ctxt, DBG_SEG, "user_ioproc_release: ctxt=%p pr=%p ref=%d\n",
15979 +            pr->pr_ctxt, pr, atomic_read (&pr->pr_ref));
15980 +
15981 +    elan3mmu_pte_ctxt_unload (pr->pr_ctxt->Elan3mmu);
15982 +
15983 +    pr->pr_mm = NULL;
15984 +
15985 +    if (atomic_dec_and_test (&pr->pr_ref))
15986 +       user_free (pr);
15987 +}
15988 +
15989 +/*
15990 + * On 2.4 kernels we get passed a mm_struct, whereas on 2.6 kernels
15991 + * we get the vma which is more usefull
15992 + */
15993 +#if defined(IOPROC_MM_STRUCT_ARG)
15994 +static void
15995 +user_ioproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
15996 +{
15997 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
15998 +
15999 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
16000 +
16001 +    ASSERT(start <= end);
16002 +
16003 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
16004 +}
16005 +
16006 +static void
16007 +user_ioproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
16008 +{
16009 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16010 +
16011 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
16012 +
16013 +    ASSERT(start <= end);
16014 +
16015 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
16016 +}
16017 +
16018 +static void
16019 +user_ioproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
16020 +{
16021 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16022 +
16023 +    ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0));
16024 +
16025 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_range: start=%lx end=%lx\n", start, end);
16026 +
16027 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, mm,(caddr_t) start, end-start);
16028 +}
16029 +
16030 +static void
16031 +user_ioproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
16032 +{
16033 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16034 +
16035 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
16036 +
16037 +    ASSERT(start <= end);
16038 +
16039 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
16040 +}
16041 +
16042 +#else
16043 +
16044 +static void
16045 +user_ioproc_sync_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
16046 +{
16047 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16048 +
16049 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
16050 +
16051 +    ASSERT(start <= end);
16052 +
16053 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16054 +}
16055 +
16056 +static void
16057 +user_ioproc_invalidate_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
16058 +{
16059 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16060 +
16061 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
16062 +
16063 +    ASSERT(start <= end);
16064 +
16065 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16066 +}
16067 +
16068 +static void
16069 +user_ioproc_update_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
16070 +{
16071 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16072 +
16073 +    ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0));
16074 +
16075 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_range: start=%lx end=%lx\n", start, end);
16076 +
16077 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16078 +}
16079 +
16080 +static void
16081 +user_ioproc_change_protection (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
16082 +{
16083 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16084 +
16085 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
16086 +
16087 +    ASSERT(start <= end);
16088 +
16089 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16090 +}
16091 +#endif /* defined(IOPROC_NO_VMA_RANGE) */
16092 +
16093 +static void
16094 +user_ioproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
16095 +{
16096 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16097 +
16098 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_page: addr=%lx\n", addr);
16099 +
16100 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
16101 +}
16102 +
16103 +static void
16104 +user_ioproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
16105 +{
16106 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16107 +    
16108 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_page: addr=%lx\n", addr);
16109 +
16110 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
16111 +}
16112 +
16113 +static void
16114 +user_ioproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
16115 +{
16116 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16117 +
16118 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_page: addr=%lx\n", addr);
16119 +
16120 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu,vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
16121 +}
16122 +
16123 +int
16124 +user_ptrack_handler (void *arg, int phase, struct task_struct *child)
16125 +{
16126 +    USER_PRIVATE *pr   = (USER_PRIVATE *) arg;
16127 +    ELAN3_CTXT    *ctxt = pr->pr_ctxt;
16128 +
16129 +    PRINTF5 (pr->pr_ctxt, DBG_FN, "user_ptrack_handler: ctxt=%p pr=%p ref=%d phase %d mm->ref %d\n", 
16130 +            pr->pr_ctxt, pr, atomic_read (&pr->pr_ref), phase, atomic_read (&current->mm->mm_count));
16131 +
16132 +    if (phase == PTRACK_PHASE_EXIT)
16133 +    {
16134 +       /* this will force the helper thread to exit */
16135 +       elan3_swapout (ctxt, CTXT_EXITING);
16136 +       
16137 +       if (atomic_dec_and_test (&pr->pr_ref))
16138 +           user_free (pr);
16139 +    }  
16140 +    return PTRACK_FINISHED;
16141 +}
16142 +
16143 +static int
16144 +user_open (struct inode *inode, struct file *file)
16145 +{
16146 +    ELAN3_DEV     *dev = elan3_devices[ELAN3_DEVICE(inode)];
16147 +    USER_PRIVATE *pr;
16148 +    ELAN3_CTXT    *ctxt;
16149 +
16150 +    if (dev == NULL)
16151 +       return (-ENXIO);
16152 +
16153 +    KMEM_ALLOC(pr, USER_PRIVATE *, sizeof (USER_PRIVATE), TRUE);
16154 +
16155 +    if (pr == NULL)
16156 +       return (-ENOMEM);
16157 +    
16158 +    if ((ctxt = elan3_alloc (dev, 0)) == NULL)
16159 +    {
16160 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
16161 +       return (-ENOMEM);
16162 +    }
16163 +
16164 +    if (sys_init (ctxt) == NULL)
16165 +    {
16166 +       elan3_detach(ctxt);
16167 +       elan3_free (ctxt);
16168 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
16169 +       return (-ENOMEM);
16170 +    }
16171 +
16172 +    /* initialise refcnt to 3 - one for "file", one for XA handler, one for the ioproc ops */
16173 +    atomic_set (&pr->pr_ref, 3);
16174 +
16175 +    atomic_set (&pr->pr_mappings, 0);
16176 +    spin_lock_init (&pr->pr_lock);
16177 +
16178 +    pr->pr_ctxt = ctxt;
16179 +    pr->pr_mm   = current->mm;
16180 +
16181 +    /* register an ptrack handler to force the helper thread to exit when we do */
16182 +    if (ptrack_register (user_ptrack_handler, pr) < 0)
16183 +    {
16184 +       elan3_detach(ctxt);
16185 +       elan3_free (ctxt);
16186 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
16187 +       return (-ENOMEM);
16188 +    }
16189 +
16190 +    /* register a ioproc callback to notify us of translation changes */
16191 +    
16192 +    pr->pr_ioproc.arg               = (void *) pr;
16193 +    pr->pr_ioproc.release           = user_ioproc_release;
16194 +    pr->pr_ioproc.sync_range        = user_ioproc_sync_range;
16195 +    pr->pr_ioproc.invalidate_range  = user_ioproc_invalidate_range;
16196 +    pr->pr_ioproc.update_range      = user_ioproc_update_range;
16197 +    pr->pr_ioproc.change_protection = user_ioproc_change_protection;
16198 +    pr->pr_ioproc.sync_page         = user_ioproc_sync_page;
16199 +    pr->pr_ioproc.invalidate_page   = user_ioproc_invalidate_page;
16200 +    pr->pr_ioproc.update_page       = user_ioproc_update_page;
16201 +    
16202 +    spin_lock (&current->mm->page_table_lock);
16203 +    ioproc_register_ops (current->mm, &pr->pr_ioproc);
16204 +    spin_unlock (&current->mm->page_table_lock);
16205 +
16206 +    file->private_data = (void *) pr;
16207 +
16208 +    PRINTF2 (pr->pr_ctxt, DBG_FN, "user_open: done ctxt=%p pr=%p\n", ctxt, pr);
16209 +
16210 +    MOD_INC_USE_COUNT;
16211 +    return (0);
16212 +}
16213 +
16214 +static int
16215 +user_release (struct inode *inode, struct file *file)
16216 +{
16217 +    USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data;
16218 +    
16219 +    PRINTF3 (pr->pr_ctxt, DBG_FN, "user_release: ctxt=%p pr=%p ref=%d\n", pr->pr_ctxt, pr,
16220 +            atomic_read (&pr->pr_ref));
16221 +
16222 +    if (atomic_dec_and_test (&pr->pr_ref))
16223 +       user_free (pr);
16224 +
16225 +    return (0);
16226 +}
16227 +
16228 +static int
16229 +user_ioctl (struct inode *inode, struct file *file, 
16230 +           unsigned int cmd, unsigned long arg)
16231 +{
16232 +    USER_PRIVATE *pr   = (USER_PRIVATE *) file->private_data;
16233 +    ELAN3_CTXT    *ctxt = pr->pr_ctxt;
16234 +    SYS_CTXT     *sctx = (SYS_CTXT *) ctxt->Private;
16235 +    int           res  = 0;
16236 +
16237 +    if (current->mm != pr->pr_mm)
16238 +       return (-EINVAL);
16239 +    
16240 +    PRINTF4 (ctxt, DBG_FN, "user_ioctl: ctxt=%p cmd=%x(%d) arg=%lx\n", ctxt, cmd, _IOC_NR(cmd), arg);
16241 +
16242 +    switch (cmd)
16243 +    {
16244 +    case ELAN3IO_FREE:
16245 +       if (atomic_read (&pr->pr_mappings) > 0)
16246 +           return (-EINVAL);
16247 +       
16248 +       spin_lock (&current->mm->page_table_lock);
16249 +       if (pr->pr_mm != current->mm)
16250 +           spin_unlock (&current->mm->page_table_lock);
16251 +       else
16252 +       {
16253 +           ioproc_unregister_ops (current->mm, &pr->pr_ioproc);
16254 +           spin_unlock (&current->mm->page_table_lock);
16255 +
16256 +           user_ioproc_release (pr, current->mm);
16257 +       }
16258 +
16259 +       if (ptrack_registered (user_ptrack_handler, pr))
16260 +       {
16261 +           ptrack_deregister (user_ptrack_handler, pr);
16262 +           user_ptrack_handler (pr, PTRACK_PHASE_EXIT, NULL);
16263 +       }
16264 +       break;
16265 +       
16266 +    case ELAN3IO_ATTACH:
16267 +    {
16268 +       ELAN_CAPABILITY *cap;
16269 +
16270 +       KMEM_ALLOC(cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE);
16271 +
16272 +       if (cap == NULL)
16273 +           return (set_errno (EFAULT));
16274 +
16275 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
16276 +           res = EFAULT;
16277 +       else
16278 +       {
16279 +           if ((res = elan3_attach (ctxt, cap)) == 0)
16280 +           {
16281 +               if (copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY)))
16282 +               {
16283 +                   elan3_detach (ctxt);
16284 +                   res = EFAULT;
16285 +               }
16286 +           }
16287 +       }
16288 +       KMEM_FREE (cap, sizeof(ELAN_CAPABILITY));
16289 +       break;
16290 +    }
16291 +    
16292 +    case ELAN3IO_DETACH:
16293 +       spin_lock (&pr->pr_lock);
16294 +       if (atomic_read (&pr->pr_mappings) > 0)
16295 +           res = EINVAL;
16296 +       else
16297 +           elan3_detach (ctxt);
16298 +       spin_unlock (&pr->pr_lock);
16299 +       break;
16300 +
16301 +    case ELAN3IO_ADDVP:
16302 +    {
16303 +       ELAN3IO_ADDVP_STRUCT *args;
16304 +
16305 +       KMEM_ALLOC(args, ELAN3IO_ADDVP_STRUCT *, sizeof (ELAN3IO_ADDVP_STRUCT), TRUE);
16306 +
16307 +       if (args == NULL)
16308 +           return (set_errno (ENOMEM));
16309 +       
16310 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_ADDVP_STRUCT)))
16311 +           res = EFAULT;
16312 +       else
16313 +       {
16314 +           if ( (res=elan3_addvp (ctxt, args->process, &args->capability)) != 0)
16315 +               PRINTF0 (ctxt, DBG_FN, "ELAN3IO_ADDVP elan3_addvp failed \n");  
16316 +       }
16317 +
16318 +       KMEM_FREE (args, sizeof (ELAN3IO_ADDVP_STRUCT));
16319 +       break;
16320 +    }
16321 +
16322 +    case ELAN3IO_REMOVEVP:
16323 +       res = elan3_removevp (ctxt, arg);
16324 +       break;
16325 +       
16326 +    case ELAN3IO_BCASTVP:
16327 +    {
16328 +       ELAN3IO_BCASTVP_STRUCT args;
16329 +
16330 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_BCASTVP_STRUCT)))
16331 +           return (-EFAULT);
16332 +       
16333 +       res = elan3_addbcastvp (ctxt, args.process, args.lowvp, args.highvp);
16334 +       break;
16335 +    }
16336 +
16337 +    case ELAN3IO_LOAD_ROUTE:
16338 +    {
16339 +       ELAN3IO_LOAD_ROUTE_STRUCT args;
16340 +
16341 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
16342 +           return (-EFAULT);
16343 +       
16344 +       res = elan3_load_route (ctxt, args.process, args.flits);
16345 +       break;
16346 +    }
16347 +
16348 +    case ELAN3IO_CHECK_ROUTE:
16349 +    {
16350 +       ELAN3IO_CHECK_ROUTE_STRUCT args;
16351 +
16352 +       args.routeError = 0;
16353 +
16354 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
16355 +           return (-EFAULT);
16356 +       
16357 +       if ((res = elan3_check_route (ctxt, args.process, args.flits, & args.routeError)) ==  ESUCCESS)
16358 +       {
16359 +           if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
16360 +               return (-EFAULT);
16361 +       }
16362 +       break;
16363 +    }
16364 +
16365 +    case ELAN3IO_PROCESS_2_LOCATION:
16366 +    {
16367 +       ELAN3IO_PROCESS_2_LOCATION_STRUCT args;
16368 +       ELAN_LOCATION                    loc;
16369 +
16370 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT)))
16371 +           return (-EFAULT);
16372 +
16373 +       krwlock_write (&ctxt->VpLock);
16374 +       loc = ProcessToLocation (ctxt, NULL, args.process , NULL);
16375 +       krwlock_done (&ctxt->VpLock);
16376 +
16377 +       args.loc = loc;
16378 +
16379 +       if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT)))
16380 +           return (-EFAULT);
16381 +
16382 +       break;
16383 +    }
16384 +
16385 +    case ELAN3IO_GET_ROUTE:
16386 +    {
16387 +       ELAN3IO_GET_ROUTE_STRUCT args;
16388 +
16389 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_ROUTE_STRUCT)))
16390 +           return (-EFAULT);
16391 +       
16392 +       if ((res = elan3_get_route (ctxt, args.process, args.flits)) ==  ESUCCESS)
16393 +       {
16394 +           if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_GET_ROUTE_STRUCT)))
16395 +               return (-EFAULT);
16396 +       }
16397 +       break;
16398 +    }
16399 +
16400 +    case ELAN3IO_RESET_ROUTE:
16401 +    {
16402 +       ELAN3IO_RESET_ROUTE_STRUCT args;
16403 +
16404 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_RESET_ROUTE_STRUCT)))
16405 +           return (-EFAULT);
16406 +       
16407 +       res = elan3_reset_route (ctxt, args.process);
16408 +       break;
16409 +    }
16410 +
16411 +    case ELAN3IO_VP2NODEID:
16412 +    {
16413 +       ELAN3IO_VP2NODEID_STRUCT *vp2nodeId;
16414 +       ELAN_LOCATION           location;
16415 +
16416 +       KMEM_ALLOC (vp2nodeId, ELAN3IO_VP2NODEID_STRUCT *, sizeof(ELAN3IO_VP2NODEID_STRUCT), TRUE);
16417 +       if (vp2nodeId == NULL) 
16418 +           return (set_errno (ENOMEM));
16419 +
16420 +       if (copy_from_user (vp2nodeId, (void *) arg, sizeof (ELAN3IO_VP2NODEID_STRUCT))) {
16421 +           KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
16422 +           return (-EFAULT);
16423 +       }
16424 +
16425 +       krwlock_write (&ctxt->VpLock);
16426 +       location = ProcessToLocation (ctxt, NULL, vp2nodeId->process , NULL);
16427 +       krwlock_done (&ctxt->VpLock);
16428 +
16429 +       vp2nodeId->nodeId = location.loc_node;
16430 +       if (copy_to_user ( (void *) arg, vp2nodeId, sizeof (ELAN3IO_VP2NODEID_STRUCT))) {
16431 +           KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
16432 +           return (-EFAULT);
16433 +       }
16434 +
16435 +       KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
16436 +
16437 +       break;
16438 +    }
16439 +
16440 +    case ELAN3IO_PROCESS:
16441 +       return (elan3_process (ctxt));
16442 +
16443 +    case ELAN3IO_SETPERM:
16444 +    {
16445 +       ELAN3IO_SETPERM_STRUCT args;
16446 +
16447 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SETPERM_STRUCT)))
16448 +           return (-EFAULT);
16449 +
16450 +       res = elan3mmu_setperm (ctxt->Elan3mmu, args.maddr, args.eaddr, args.len, args.perm);
16451 +       break;
16452 +    }
16453 +
16454 +    case ELAN3IO_CLEARPERM:
16455 +    {
16456 +       ELAN3IO_CLEARPERM_STRUCT args;
16457 +
16458 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CLEARPERM_STRUCT)))
16459 +           return (-EFAULT);
16460 +
16461 +       elan3mmu_clrperm (ctxt->Elan3mmu, args.eaddr, args.len);
16462 +       break;
16463 +    }
16464 +
16465 +    case ELAN3IO_CHANGEPERM:
16466 +    {
16467 +       ELAN3IO_CHANGEPERM_STRUCT args;
16468 +
16469 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CHANGEPERM_STRUCT)))
16470 +           return (-EFAULT);
16471 +
16472 +       res = EINVAL;
16473 +       break;
16474 +    }
16475 +
16476 +    case ELAN3IO_HELPER_THREAD:
16477 +       res = elan3_lwp (ctxt);
16478 +       break;
16479 +       
16480 +    case ELAN3IO_WAITCOMMAND:
16481 +       res = WaitForCommandPort (ctxt);
16482 +       break;
16483 +
16484 +    case ELAN3IO_BLOCK_INPUTTER:
16485 +       elan3_block_inputter (ctxt, arg);
16486 +       break;
16487 +
16488 +    case ELAN3IO_SET_FLAGS:
16489 +       sctx->Flags = arg;
16490 +       break;
16491 +
16492 +    case ELAN3IO_SET_SIGNAL:
16493 +       sctx->signal = arg;
16494 +       break;
16495 +
16496 +    case ELAN3IO_WAITEVENT:
16497 +       res = sys_waitevent (ctxt, (E3_Event *) arg);
16498 +       break;
16499 +
16500 +    case ELAN3IO_ALLOC_EVENTCOOKIE:
16501 +       res = cookie_alloc_cookie (sctx->Table, arg);
16502 +       break;
16503 +
16504 +    case ELAN3IO_FREE_EVENTCOOKIE:
16505 +       res = cookie_free_cookie (sctx->Table, arg);
16506 +       break;
16507 +
16508 +    case ELAN3IO_ARM_EVENTCOOKIE:
16509 +       res = cookie_arm_cookie (sctx->Table, arg);
16510 +       break;
16511 +
16512 +    case ELAN3IO_WAIT_EVENTCOOKIE:
16513 +       res = cookie_wait_cookie (sctx->Table, arg);
16514 +       break;
16515 +
16516 +    case ELAN3IO_SWAPSPACE:
16517 +       if (fuword (&((SYS_SWAP_SPACE *) arg)->Magic) != SYS_SWAP_MAGIC)
16518 +           return (set_errno (EINVAL));
16519 +       
16520 +       ((SYS_CTXT *) ctxt->Private)->Swap = (SYS_SWAP_SPACE *) arg;
16521 +       break;
16522 +
16523 +    case ELAN3IO_EXCEPTION_SPACE:
16524 +       if (fuword (&((SYS_EXCEPTION_SPACE *) arg)->Magic) != SYS_EXCEPTION_MAGIC)
16525 +           return (set_errno (EINVAL));
16526 +
16527 +       ((SYS_CTXT *) ctxt->Private)->Exceptions = (SYS_EXCEPTION_SPACE *) arg;
16528 +       break;
16529 +
16530 +    case ELAN3IO_GET_EXCEPTION:
16531 +    {
16532 +       SYS_EXCEPTION *exception;
16533 +
16534 +       if (((SYS_CTXT *) ctxt->Private)->Exceptions == NULL)
16535 +           return (set_errno (EINVAL));
16536 +       
16537 +       KMEM_ALLOC(exception, SYS_EXCEPTION *, sizeof (SYS_EXCEPTION), TRUE);
16538 +
16539 +       if (exception == NULL)
16540 +           return (set_errno (ENOMEM));
16541 +
16542 +       if ((res = sys_getException (((SYS_CTXT *) ctxt->Private), exception)) == 0 &&
16543 +           copy_to_user ((void *) arg, exception, sizeof (SYS_EXCEPTION)))
16544 +           res = EFAULT;
16545 +       
16546 +       KMEM_FREE (exception, sizeof (SYS_EXCEPTION));
16547 +       break;
16548 +    }
16549 +    
16550 +    case ELAN3IO_UNLOAD:
16551 +    {
16552 +       ELAN3MMU             *elan3mmu = ctxt->Elan3mmu;
16553 +       ELAN3IO_UNLOAD_STRUCT args;
16554 +       int                   span;
16555 +       unsigned long         flags;
16556 +       E3_Addr               eaddr;
16557 +       caddr_t               addr;
16558 +       size_t                len;
16559 +
16560 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_UNLOAD_STRUCT)))
16561 +           return (-EFAULT);
16562 +
16563 +       addr = (caddr_t) args.addr;
16564 +       len  = args.len;
16565 +
16566 +       if (((unsigned long) addr & PAGEMASK) || (len & PAGEMASK) || (len < 0))
16567 +           return -EINVAL;
16568 +
16569 +       spin_lock_irqsave (&elan3mmu->elan3mmu_lock, flags);
16570 +       for (; len; len -= span, addr += span)
16571 +       {
16572 +           ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
16573 +           
16574 +           if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr)
16575 +               span = len;
16576 +           else if (rgn->rgn_mbase > addr)
16577 +               span = MIN(len, rgn->rgn_mbase - addr);
16578 +           else
16579 +           {
16580 +               span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr);
16581 +               eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
16582 +               
16583 +               elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD);
16584 +           }
16585 +       }
16586 +       spin_unlock_irqrestore (&elan3mmu->elan3mmu_lock, flags);
16587 +       
16588 +       return 0;
16589 +    }
16590 +
16591 +    case ELAN3IO_GET_DEVINFO:
16592 +    {
16593 +       ELAN3IO_GET_DEVINFO_STRUCT args;
16594 +
16595 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_DEVINFO_STRUCT)))
16596 +           return (-EFAULT);
16597 +       
16598 +       if (copy_to_user ((void *) args.devinfo, &ctxt->Device->Devinfo, sizeof (ELAN_DEVINFO))) 
16599 +           res = EFAULT;
16600 +       break;
16601 +    }
16602 +
16603 +    case ELAN3IO_GET_POSITION:
16604 +    {
16605 +       ELAN3IO_GET_POSITION_STRUCT args;
16606 +
16607 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_POSITION_STRUCT)))
16608 +           return (-EFAULT);   
16609 +
16610 +       if (copy_to_user ((void *) args.position, &ctxt->Device->Position, sizeof (ELAN_POSITION)))
16611 +           res = EFAULT;
16612 +       break;
16613 +    }
16614 +
16615 +    default:
16616 +       return (-EINVAL);
16617 +    }
16618 +
16619 +    return (res ? set_errno (res) : 0);
16620 +}
16621 +
16622 +static void user_vma_open(struct vm_area_struct *vma)
16623 +{
16624 +    USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data;
16625 +
16626 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
16627 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
16628 +
16629 +    if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE)
16630 +       if (atomic_dec_and_test (&pr->pr_mappings))
16631 +           pr->pr_ctxt->CommandPageMapping = NULL;
16632 +}
16633 +
16634 +static void user_vma_close(struct vm_area_struct *vma)
16635 +{
16636 +    USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data;
16637 +
16638 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
16639 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
16640 +
16641 +    if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE)
16642 +       atomic_inc (&pr->pr_mappings);
16643 +}
16644 +
16645 +static struct vm_operations_struct user_vm_ops = {
16646 +    open:              user_vma_open,
16647 +    close:             user_vma_close,
16648 +};
16649 +
16650 +static int
16651 +user_mmap (struct file *file, struct vm_area_struct *vma)
16652 +{
16653 +    USER_PRIVATE  *pr   = (USER_PRIVATE *) file->private_data;
16654 +    ELAN3_CTXT     *ctxt = pr->pr_ctxt; 
16655 +    ioaddr_t       ioaddr;
16656 +
16657 +    /* 
16658 +     * NOTE - since we need to maintain the reference count on
16659 +     *        the user_private we only permit single page 
16660 +     *        mmaps - this means that we will certainly see
16661 +     *        the correct number of closes to maintain the
16662 +     *        the reference count correctly.
16663 +     */
16664 +    
16665 +    if ((vma->vm_end - vma->vm_start) != PAGE_SIZE)
16666 +       return (-EINVAL);
16667 +
16668 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx flags=%lx prot=%lx file=%p\n",
16669 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags, vma->vm_page_prot.pgprot, vma->vm_file);
16670 +
16671 +    switch (vma->vm_pgoff)
16672 +    {
16673 +    default:
16674 +       return (-EINVAL);
16675 +       
16676 +    case ELAN3IO_OFF_COMMAND_PAGE:
16677 +       spin_lock (&pr->pr_lock);
16678 +       if (ctxt->CommandPage == (ioaddr_t) 0 || atomic_read (&pr->pr_mappings) != 0)
16679 +       {
16680 +           PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: command port - %s\n", ctxt->CommandPort ? "already mapped" : "not attached");
16681 +           spin_unlock (&pr->pr_lock);
16682 +           return (-EINVAL);
16683 +       }
16684 +#ifdef LINUX_SPARC
16685 +       pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
16686 +       pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
16687 +#elif defined(pgprot_noncached)
16688 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
16689 +#endif
16690 +
16691 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: commandport at %lx phys %llx prot %lx\n", 
16692 +               vma->vm_start, (unsigned long long) kmem_to_phys ((void *) ctxt->CommandPort), vma->vm_page_prot.pgprot);
16693 +
16694 +       /* Don't try to swap out physical pages.. */
16695 +       vma->vm_flags |= VM_RESERVED;
16696 +    
16697 +       /*
16698 +        * Don't dump addresses that are not real memory to a core file.
16699 +        */
16700 +       vma->vm_flags |= VM_IO;
16701 +
16702 +#ifdef NO_RMAP
16703 +       if (remap_page_range (vma->vm_start, kmem_to_phys ((void *) ctxt->CommandPage), vma->vm_end - vma->vm_start, vma->vm_page_prot))
16704 +#else 
16705 +       if (remap_page_range (vma, vma->vm_start, kmem_to_phys ((void *) ctxt->CommandPage), vma->vm_end - vma->vm_start, vma->vm_page_prot))
16706 +#endif
16707 +       {
16708 +           spin_unlock (&pr->pr_lock);
16709 +           return (-EAGAIN);
16710 +       }
16711 +       ctxt->CommandPageMapping = (void *) vma->vm_start;
16712 +       
16713 +       atomic_inc (&pr->pr_mappings);
16714 +       
16715 +       spin_unlock (&pr->pr_lock);
16716 +       break;
16717 +
16718 +    case ELAN3IO_OFF_UREG_PAGE:
16719 +#ifdef LINUX_SPARC
16720 +       pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
16721 +       pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
16722 +#elif defined(pgprot_noncached)
16723 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
16724 +#endif
16725 +       ioaddr = ctxt->Device->RegPtr + (offsetof (E3_Regs, URegs) & PAGEMASK);
16726 +
16727 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: user_regs at %lx phys %llx prot %lx\n", vma->vm_start, 
16728 +               (unsigned long long) kmem_to_phys ((void *) ioaddr), vma->vm_page_prot.pgprot);
16729 +
16730 +       /* Don't try to swap out physical pages.. */
16731 +       vma->vm_flags |= VM_RESERVED;
16732 +    
16733 +       /*
16734 +        * Don't dump addresses that are not real memory to a core file.
16735 +        */
16736 +       vma->vm_flags |= VM_IO;
16737 +
16738 +#ifdef NO_RMAP
16739 +       if (remap_page_range (vma->vm_start, kmem_to_phys ((void *) ioaddr),
16740 +#else
16741 +       if (remap_page_range (vma, vma->vm_start, kmem_to_phys ((void *) ioaddr),
16742 +#endif
16743 +                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
16744 +           return (-EAGAIN);
16745 +       break;
16746 +       
16747 +    case ELAN3IO_OFF_FLAG_PAGE:
16748 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: flag page at %lx phys %llx\n", vma->vm_start, 
16749 +               (unsigned long long) kmem_to_phys ((void *) ctxt->FlagPage));
16750 +
16751 +       /* we do not want to have this area swapped out, lock it */
16752 +       vma->vm_flags |= VM_LOCKED;
16753 +
16754 +       /* Mark the page as reserved or else the remap_page_range() doesn't remap it */
16755 +       SetPageReserved(pte_page(*find_pte_kernel((unsigned long) ctxt->FlagPage)));
16756 +       
16757 +#ifdef NO_RMAP
16758 +       if (remap_page_range (vma->vm_start, kmem_to_phys ((void *) ctxt->FlagPage),
16759 +#else
16760 +       if (remap_page_range (vma, vma->vm_start, kmem_to_phys ((void *) ctxt->FlagPage),
16761 +#endif
16762 +                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
16763 +           return (-EAGAIN);
16764 +       break;
16765 +    }
16766 +
16767 +    ASSERT (vma->vm_ops == NULL);
16768 +    
16769 +    vma->vm_ops          = &user_vm_ops;
16770 +    vma->vm_file         = file;
16771 +    vma->vm_private_data = (void *) pr;
16772 +    
16773 +    return (0);
16774 +}
16775 +
16776 +/* driver entry points */
16777 +static int
16778 +elan3_open (struct inode *inode, struct file *file)
16779 +{
16780 +    if (elan3_devices[ELAN3_DEVICE(inode)] == NULL)
16781 +       return (-ENXIO);
16782 +
16783 +    PRINTF (DBG_DEVICE, DBG_FN, "elan3_open: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file);
16784 +    
16785 +    switch (ELAN3_MINOR (inode))
16786 +    {
16787 +    case ELAN3_MINOR_CONTROL:
16788 +       return (control_open (inode, file));
16789 +    case ELAN3_MINOR_MEM:
16790 +       return (mem_open (inode, file));
16791 +    case ELAN3_MINOR_USER:
16792 +       return (user_open (inode, file));
16793 +    default:
16794 +       return (-ENXIO);
16795 +    }
16796 +}
16797 +
16798 +static int
16799 +elan3_release (struct inode *inode, struct file *file)
16800 +{
16801 +    PRINTF (DBG_DEVICE, DBG_FN, "elan3_release: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file);
16802 +    
16803 +    switch (ELAN3_MINOR (inode))
16804 +    {
16805 +    case ELAN3_MINOR_CONTROL:
16806 +       return (control_release (inode, file));
16807 +    case ELAN3_MINOR_MEM:
16808 +       return (mem_release (inode, file));
16809 +    case ELAN3_MINOR_USER:
16810 +       return (user_release (inode, file));
16811 +    default:
16812 +       return (-ENXIO);
16813 +    }
16814 +}
16815 +
16816 +static int
16817 +elan3_ioctl (struct inode *inode, struct file *file, 
16818 +            unsigned int cmd, unsigned long arg)
16819 +{
16820 +    switch (ELAN3_MINOR (inode))
16821 +    {
16822 +    case ELAN3_MINOR_CONTROL:
16823 +       return (control_ioctl (inode, file, cmd, arg));
16824 +    case ELAN3_MINOR_MEM:
16825 +       return (mem_ioctl (inode, file, cmd, arg));
16826 +    case ELAN3_MINOR_USER:
16827 +       return (user_ioctl (inode, file, cmd, arg));
16828 +    default:
16829 +       return (-ENXIO);
16830 +    }
16831 +}
16832 +
16833 +
16834 +static int
16835 +elan3_mmap (struct file *file, struct vm_area_struct *vma)
16836 +{
16837 +    PRINTF (DBG_DEVICE, DBG_SEG, "elan3_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx flags=%lx prot=%lx\n", 
16838 +           ELAN3_DEVICE (file->f_dentry->d_inode), ELAN3_MINOR (file->f_dentry->d_inode),
16839 +           vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags, vma->vm_page_prot.pgprot);
16840 +
16841 +    switch (ELAN3_MINOR (file->f_dentry->d_inode))
16842 +    {
16843 +    case ELAN3_MINOR_CONTROL:
16844 +       return (control_mmap (file, vma));
16845 +    case ELAN3_MINOR_MEM:
16846 +       return (mem_mmap (file, vma));
16847 +    case ELAN3_MINOR_USER:
16848 +       return (user_mmap (file, vma));
16849 +    default:
16850 +       return (-ENXIO);
16851 +    }
16852 +}
16853 +
16854 +static irqreturn_t
16855 +InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs)
16856 +{
16857 +    if (InterruptHandler ((ELAN3_DEV *)dev_id) == 0)
16858 +       return IRQ_HANDLED;
16859 +    else
16860 +       return IRQ_NONE;
16861 +}
16862 +
16863 +
16864 +/* 
16865 + * Elan specific PCI configuration registers.
16866 + */
16867 +
16868 +#define PCI_CONF_PARITY_PHYS_LO         0x40
16869 +#define PCI_CONF_PARITY_PHYS_HI         0x44
16870 +#define PCI_CONF_PARITY_PHASE_ADDR      0x46
16871 +#define PCI_CONF_PARITY_MASTER_TYPE     0x47
16872 +#define PCI_CONF_ELAN3_CTRL              0x48
16873
16874 +#define ECTRL_EXTEND_LATENCY            (1 << 0)
16875 +#define ECTRL_ENABLE_PREFETCH           (1 << 1)
16876 +#define ECTRL_SOFTWARE_INTERNAL_RESET   (1 << 2)
16877 +#define ECTRL_REDUCED_RETRY_RATE        (1 << 3)
16878 +#define ECTRL_CLOCK_DIVIDE_RATE_SHIFT   4
16879 +#define ECTRL_COMMS_DIVIDE_RATE_SHIFT   10
16880 +#define ECTRL_FORCE_COMMSCLK_LOCAL      (1 << 14)
16881 +
16882 +/*
16883 + * Configure PCI.
16884 + */
16885 +static int
16886 +ConfigurePci(ELAN3_DEV *dev)
16887 +{
16888 +       struct pci_dev *pci = dev->Osdep.pci;
16889 +       u32 rom_address;
16890 +
16891 +       if (pci_enable_device(pci))
16892 +           return (ENXIO);
16893 +
16894 +       /* disable ROM */
16895 +       pci_read_config_dword(pci, PCI_ROM_ADDRESS, &rom_address);
16896 +       rom_address &= ~PCI_ROM_ADDRESS_ENABLE;
16897 +       pci_write_config_dword(pci, PCI_ROM_ADDRESS, rom_address);
16898 +       mb();
16899 +
16900 +       /* this is in 32-bit WORDS */
16901 +       pci_write_config_byte(pci, PCI_CACHE_LINE_SIZE, (64 >> 2));
16902 +       mb();
16903 +
16904 +       /* allow 40 ticks to respond, 16 data phases */
16905 +       pci_write_config_byte(pci, PCI_LATENCY_TIMER, 255);
16906 +       mb();
16907 +
16908 +       /* don't enable PCI_COMMAND_SERR--see note in elandev_dunix.c */
16909 +       pci_write_config_word(pci, PCI_COMMAND, PCI_COMMAND_MEMORY 
16910 +           | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY);
16911 +       mb();
16912 +
16913 +       return ESUCCESS;
16914 +}
16915 +
16916 +/* 
16917 + * Reset chip to a known state.
16918 + */
16919 +static int
16920 +ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr)
16921 +{
16922 +       struct pci_dev *pci = dev->Osdep.pci;
16923 +       int instance = dev->Instance;
16924 +       u32 val;
16925 +       u8 revid;
16926 +       int CasLatency;
16927 +       int res;
16928 +
16929 +       /* determine rev of board */
16930 +       pci_read_config_byte(pci, PCI_REVISION_ID, &revid);
16931 +
16932 +       /* GNAT 2328 - don't set ECTRL_ENABLE_PREFETCH on Elan rev A */
16933 +       val = ECTRL_EXTEND_LATENCY | (39 << ECTRL_CLOCK_DIVIDE_RATE_SHIFT)
16934 +           | (6 << ECTRL_COMMS_DIVIDE_RATE_SHIFT);
16935 +       switch (revid) 
16936 +       {
16937 +               case PCI_REVISION_ID_ELAN3_REVA:
16938 +                       printk("elan%d: is an elan3 (revision a) - not supported\n", instance);
16939 +                       return (EFAIL);
16940 +
16941 +               case PCI_REVISION_ID_ELAN3_REVB:        
16942 +                       val |= ECTRL_ENABLE_PREFETCH;
16943 +                       if (BackToBackMaster)
16944 +                               val |= ECTRL_FORCE_COMMSCLK_LOCAL;
16945 +                       printk("elan%d: is an elan3 (revision b)\n", instance);
16946 +                       break;
16947 +               default:
16948 +                       printk("elan%d: unsupported elan3 revision %d\n", 
16949 +                           instance, revid);
16950 +                       return EFAIL;
16951 +       }
16952 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val);
16953 +       mb();
16954 +
16955 +       /*
16956 +        * GNAT: 2474
16957 +        * Hit reset on the Elan, then we MUST initialise the schedule status
16958 +        * register to drive reset on the link before the link can come out
16959 +        * of reset (15 uS). We need to keep it like this until we've 
16960 +        * initialised SDRAM
16961 +        */
16962 +       pci_read_config_dword(pci, PCI_CONF_ELAN3_CTRL, &val);
16963 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, 
16964 +           val | ECTRL_SOFTWARE_INTERNAL_RESET);
16965 +       mb();
16966 +
16967 +       /* Read the Vital Product Data to determine the cas latency */
16968 +       if ((res = ReadVitalProductData (dev, &CasLatency)) != ESUCCESS)
16969 +           return (res);
16970 +
16971 +       /*
16972 +        * Now clear the Software internal reset bit, and start the sdram
16973 +        */
16974 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val);
16975 +       mb();
16976 +
16977 +       /* 
16978 +        * Enable SDRAM before sizing and initalising it for ECC.
16979 +        * NOTE: don't enable all sets of the cache (yet), nor ECC 
16980 +        */
16981 +       dev->Cache_Control_Reg = (CasLatency | REFRESH_RATE_16US);
16982 +
16983 +       write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg | SETUP_SDRAM));
16984 +       mb();
16985 +
16986 +       INIT_SCHED_STATUS(dev, Sched_Initial_Value);
16987 +
16988 +       /*
16989 +        * Set the interrupt mask to 0 and enable the interrupt PAL
16990 +        * by writing any value to it.
16991 +        */
16992 +       SET_INT_MASK (dev, 0);
16993 +       writeb (0, intPalAddr);
16994
16995 +       return ESUCCESS;
16996 +}
16997 +
16998 +/*
16999 + * Determine the size of elan PCI address spaces.  EFAIL is returned if 
17000 + * unused or invalid BAR is specified, or if board reports I/O mapped space.
17001 + */
17002 +int
17003 +DeviceRegisterSize(ELAN3_DEV *dev, int rnumber, int *sizep)
17004 +{
17005 +       struct pci_dev *pdev = dev->Osdep.pci;
17006 +
17007 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
17008 +       *sizep = pci_resource_size(pdev, rnumber);
17009 +#else
17010 +       *sizep = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1;
17011 +#endif
17012 +       return ESUCCESS;
17013 +}
17014 +
17015 +/*
17016 + * Map PCI memory into kernel virtual address space.  On the alpha, 
17017 + * we just return appropriate kseg address, and Unmap is a no-op.
17018 + */
17019 +int
17020 +MapDeviceRegister(ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp,
17021 +                 int off, int len, DeviceMappingHandle *handlep)
17022 +{      
17023 +       struct pci_dev *pdev = dev->Osdep.pci;
17024 +
17025 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
17026 +       u64 base = pci_get_base_address(pdev, rnumber);
17027 +       *addrp = (ioaddr_t) pci_base_to_kseg(base + off, pdev->bus->number);
17028 +
17029 +#else
17030 +       if (len == 0)
17031 +           len = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1;
17032 +       
17033 +       if (len == 0)
17034 +           return (EINVAL);
17035 +
17036 +       *addrp = (ioaddr_t) ioremap_nocache (pci_resource_start(pdev, rnumber) + off, len);
17037 +#endif
17038 +
17039 +       *handlep = (void *) *addrp;
17040 +
17041 +       return (*addrp ? ESUCCESS : ENOMEM);
17042 +}
17043 +void
17044 +UnmapDeviceRegister(ELAN3_DEV *dev, DeviceMappingHandle *handlep)
17045 +{
17046 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
17047 +    iounmap (*handlep);
17048 +#endif
17049 +}
17050 +
17051 +void
17052 +ElanBusError (ELAN3_DEV *dev)
17053 +{
17054 +       struct pci_dev  *pci = dev->Osdep.pci;  
17055 +       u8  phaseaddr, type;
17056 +       u16 status, cmd, physhi;
17057 +       u32 physlo;
17058
17059 +       printk("elan%d: bus error occured\n", dev->Instance);
17060 +
17061 +       pci_read_config_word (pci, PCI_STATUS,                  &status);
17062 +       pci_read_config_word (pci, PCI_COMMAND,                 &cmd);
17063 +       pci_read_config_dword(pci, PCI_CONF_PARITY_PHYS_LO,     &physlo);
17064 +       pci_read_config_word (pci, PCI_CONF_PARITY_PHYS_HI,     &physhi);
17065 +       pci_read_config_byte (pci, PCI_CONF_PARITY_PHASE_ADDR,  &phaseaddr); 
17066 +       pci_read_config_byte (pci, PCI_CONF_PARITY_MASTER_TYPE, &type);
17067 +
17068 +#define PCI_CONF_STAT_FORMAT   "\20" \
17069 +       "\6SIXTY_SIX_MHZ\7UDF\10FAST_BACK\11PARITY" \
17070 +       "\14SIG_TARGET_ABORT\15REC_TARGET_ABORT\16REC_MASTER_ABORT" \
17071 +       "\17SIG_SYSTEM_ERROR\20DETECTED_PARITY"
17072 +
17073 +       printk ("elan%d: status %x cmd %4x physaddr %04x%08x phase %x type %x\n",
17074 +               dev->Instance, status, cmd, physhi, physlo, phaseaddr, type);
17075 +}
17076 +
17077 +/*
17078 + * Local variables:
17079 + * c-file-style: "stroustrup"
17080 + * End:
17081 + */
17082 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/elansyscall.c
17083 ===================================================================
17084 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/elansyscall.c        2004-02-23 16:02:56.000000000 -0500
17085 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/elansyscall.c     2005-07-28 14:52:52.805685272 -0400
17086 @@ -0,0 +1,1230 @@
17087 +/*
17088 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
17089 + * 
17090 + *    For licensing information please see the supplied COPYING file
17091 + *
17092 + */
17093 +
17094 +#ident "@(#)$Id: elansyscall.c,v 1.99.2.1 2004/10/28 17:08:56 david Exp $"
17095 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elansyscall.c,v $*/
17096 +
17097 +#include <qsnet/kernel.h>
17098 +#include <qsnet/autoconf.h>
17099 +
17100 +#include <elan/elanmod.h>
17101 +#include <elan3/elanregs.h>
17102 +#include <elan3/elandev.h>
17103 +#include <elan3/elanvp.h>
17104 +#include <elan3/elan3mmu.h>
17105 +#include <elan3/elanctxt.h>
17106 +#include <elan3/elandebug.h>
17107 +#include <elan3/elansyscall.h>
17108 +#include <elan/devinfo.h>
17109 +
17110 +static int       sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap);
17111 +static int       sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep);
17112 +static int       sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep);
17113 +static void      sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_uint32 value);
17114 +static void      sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr);
17115 +static void      sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *item);
17116 +static void      sys_freeWordItem (ELAN3_CTXT *ctxt, void *item);
17117 +static void      sys_freeBlockItem (ELAN3_CTXT *ctxt, void *item);
17118 +static int       sys_countItems (ELAN3_CTXT *ctxt, int list);
17119 +static int       sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
17120 +static void      sys_swapin (ELAN3_CTXT *ctxt);
17121 +static void      sys_swapout (ELAN3_CTXT *ctxt);
17122 +static void      sys_freePrivate (ELAN3_CTXT *ctxt);
17123 +static int       sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef);
17124 +static int       sys_startFaultCheck (ELAN3_CTXT *ctxt);
17125 +static void      sys_endFaultCheck (ELAN3_CTXT *ctxt);
17126 +static E3_uint8  sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr);
17127 +static void      sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
17128 +static E3_uint16 sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr);
17129 +static void      sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
17130 +static E3_uint32 sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr);
17131 +static void      sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
17132 +static E3_uint64 sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr);
17133 +static void      sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
17134 +
17135 +static ELAN3_OPS elan3_sys_ops = {
17136 +    ELAN3_OPS_VERSION,         /* Version */
17137 +
17138 +    sys_exception,             /* Exception */
17139 +    sys_getWordItem,           /* GetWordItem */
17140 +    sys_getBlockItem,          /* GetBlockItem */
17141 +    sys_putWordItem,           /* PutWordItem */
17142 +    sys_putBlockItem,          /* PutBlockItem */
17143 +    sys_putbackItem,           /* PutbackItem */
17144 +    sys_freeWordItem,          /* FreeWordItem */
17145 +    sys_freeBlockItem,         /* FreeBlockItem */
17146 +    sys_countItems,            /* CountItems */
17147 +    sys_event,                 /* Event */
17148 +    sys_swapin,                        /* Swapin */
17149 +    sys_swapout,               /* Swapout */
17150 +    sys_freePrivate,           /* FreePrivate */
17151 +    sys_fixupNetworkError,     /* FixupNetworkError */
17152 +    NULL,                      /* DProcTrap */
17153 +    NULL,                      /* TProcTrap */
17154 +    NULL,                      /* IProcTrap */
17155 +    NULL,                      /* CProcTrap */
17156 +    NULL,                      /* CProcReissue */
17157 +    sys_startFaultCheck,       /* StartFaultCheck */
17158 +    sys_endFaultCheck,          /* EndFaultCheck */
17159 +    sys_load8,                 /* Load8 */
17160 +    sys_store8,                        /* Store8 */
17161 +    sys_load16,                        /* Load16 */
17162 +    sys_store16,               /* Store16 */
17163 +    sys_load32,                        /* Load32 */
17164 +    sys_store32,               /* Store32 */
17165 +    sys_load64,                        /* Load64 */
17166 +    sys_store64                        /* Store64 */
17167 +};
17168 +
17169 +va_list null_valist;
17170 +
17171 +SYS_CTXT *
17172 +sys_init (ELAN3_CTXT *ctxt)
17173 +{
17174 +    SYS_CTXT *sctx;
17175 +
17176 +    /* Allocate and initialise the context private data */
17177 +    KMEM_ZALLOC (sctx, SYS_CTXT *, sizeof  (SYS_CTXT), TRUE);
17178 +
17179 +    if (sctx == NULL)
17180 +       return ((SYS_CTXT *) NULL);
17181 +
17182 +    sctx->Swap    = NULL;
17183 +    sctx->Armed   = 0;
17184 +    sctx->Backoff = 1;
17185 +    sctx->Table   = cookie_alloc_table ((unsigned long) ELAN3_MY_TASK_HANDLE(), 0);
17186 +    sctx->signal  = SIGSEGV;
17187 +
17188 +    if (sctx->Table == NULL)
17189 +    {
17190 +       KMEM_FREE (sctx, sizeof (SYS_CTXT));
17191 +       return ((SYS_CTXT *) NULL);
17192 +    }
17193 +
17194 +    kmutex_init  (&sctx->Lock);
17195 +    spin_lock_init (&sctx->WaitLock);
17196 +    kcondvar_init (&sctx->NetworkErrorWait);
17197 +    
17198 +    /* Install my context operations and private data */
17199 +    ctxt->Operations = &elan3_sys_ops;
17200 +    ctxt->Private    = (void *) sctx;
17201 +    
17202 +    return (sctx);
17203 +}
17204 +
17205 +/* returns -ve on error or ELAN_CAP_OK or ELAN_CAP_RMS */
17206 +/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */
17207 +int 
17208 +elan3_validate_cap(ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use)
17209 +{
17210 +     /* Don't allow a user process to attach to system context */
17211 +    if (ELAN3_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN3_SYSTEM_CONTEXT (cap->cap_highcontext)
17212 +       || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM  || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM)
17213 +    {
17214 +       PRINTF2 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: lctx %x hctx %x \n",cap->cap_lowcontext,  cap->cap_highcontext);
17215 +       PRINTF3 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: bit %x  low %x high %x\n", ((cap->cap_lowcontext) & SYS_CONTEXT_BIT),
17216 +                E3_NUM_CONTEXT_0, ELAN3_KCOMM_BASE_CONTEXT_NUM);
17217 +
17218 +
17219 +       PRINTF0 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: user process cant attach to system cap\n");
17220 +       return (-EINVAL);
17221 +    }
17222
17223 +    if (cap->cap_type & ELAN_CAP_TYPE_HWTEST)
17224 +    {
17225 +       if (!(cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)) /* cant have a bit map */
17226 +       {
17227 +           PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST must have ELAN_CAP_TYPE_NO_BITMAP\n");
17228 +           return (-EINVAL);
17229 +       }
17230 +       
17231 +       if (cap->cap_lowcontext != cap->cap_highcontext) 
17232 +       {
17233 +           PRINTF2 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST (cap->cap_lowcontext != cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext) ;
17234 +           return (-EINVAL);
17235 +       }
17236 +       
17237 +       if ( ! (ELAN3_HWTEST_CONTEXT(cap->cap_lowcontext) && ELAN3_HWTEST_CONTEXT(cap->cap_highcontext)))
17238 +       {
17239 +           PRINTF3 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST HWTEST_BASE_CONTEXT %d %d %d \n" , ELAN3_HWTEST_BASE_CONTEXT_NUM,cap->cap_lowcontext ,ELAN3_HWTEST_TOP_CONTEXT_NUM);
17240 +           return (-EINVAL);
17241 +       }
17242 +       
17243 +       if (cap->cap_lownode != ELAN_CAP_UNINITIALISED || cap->cap_highnode != ELAN_CAP_UNINITIALISED)
17244 +       {
17245 +           PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST nodes != ELAN_CAP_UNINITIALISED\n");
17246 +           return (-EINVAL);
17247 +       }
17248 +
17249 +       return ELAN_CAP_OK;
17250 +    }
17251 +
17252 +    return elanmod_classify_cap(&dev->Position, cap, use);
17253 +}
17254 +
17255 +int
17256 +sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event)
17257 +{
17258 +    SYS_CTXT    *sctx = (SYS_CTXT *) ctxt->Private;
17259 +    EVENT_COOKIE cookie;
17260 +
17261 +    if (ctxt->Device->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVA)
17262 +       return (EINVAL);
17263 +
17264 +    cookie = fuword ((int *) &event->ev_Type) & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY);
17265 +
17266 +    if (cookie_alloc_cookie (sctx->Table, cookie) != ESUCCESS)
17267 +       return (EINVAL);
17268 +
17269 +    cookie_arm_cookie (sctx->Table, cookie);
17270 +
17271 +    if (fuword ((int *) &event->ev_Count) > 0)
17272 +       cookie_wait_cookie (sctx->Table, cookie);
17273 +    
17274 +    cookie_free_cookie (sctx->Table, cookie);
17275 +    
17276 +    return (ESUCCESS);
17277 +}
17278 +
17279 +static void *
17280 +sys_getItem (SYS_SWAP_SPACE *sp, int list)
17281 +{
17282 +    void *itemp = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]);
17283 +    void *next;
17284 +    
17285 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_getItem: sp=%p list=%d head=%p itemp=%p\n",
17286 +            sp, list, &sp->ItemListsHead[list], itemp);
17287 +    
17288 +    if (itemp == NULL)
17289 +       return (NULL);
17290 +
17291 +    next = (void *) fuptr_noerr ((void *) itemp);
17292 +
17293 +    suptr_noerr ((void *) &sp->ItemListsHead[list], (void *) next);
17294 +    if (next == NULL)
17295 +       suptr_noerr ((void *) &sp->ItemListsTailp[list], (void *)&sp->ItemListsHead[list]);
17296 +    return (itemp);
17297 +}
17298 +
17299 +static void
17300 +sys_putItemBack (SYS_SWAP_SPACE *sp, int list, void *itemp)
17301 +{
17302 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemBack: sp=%p list=%d itemp=%p value=%08x\n",
17303 +            sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value));
17304 +
17305 +    suptr_noerr ((void **) itemp, NULL);                                                       /* item->Next = NULL */
17306 +    suptr_noerr ((void **) fuptr_noerr ((void **) &sp->ItemListsTailp[list]), (void *)itemp);  /* *Tailp = item */
17307 +    suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp);                         /* Tailp = &item->Next */
17308 +}
17309 +
17310 +static void
17311 +sys_putItemFront (SYS_SWAP_SPACE *sp, int list, void *itemp)
17312 +{
17313 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemFront: sp=%p list=%d itemp=%p value=%08x\n",
17314 +            sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value));
17315 +
17316 +    suptr_noerr ((void **) itemp, fuptr_noerr ((void **) &sp->ItemListsHead[list]));           /* item->Next = Head */
17317 +    suptr_noerr ((void **) &sp->ItemListsHead[list], (void *) itemp);                          /* Head = item */
17318 +
17319 +    if (fuptr_noerr ((void **) &sp->ItemListsTailp[list]) == (void *) &sp->ItemListsHead[list])        /* if (Tailp == &Head) */
17320 +       suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp);                      /*    Tailp = &Item->Next */
17321 +}
17322 +
17323 +
17324 +static int
17325 +sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep)
17326 +{
17327 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17328 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17329 +    SYS_WORD_ITEM  *item;
17330 +    int                    res;
17331 +    label_t        ljb;
17332 +
17333 +    kmutex_lock (&sctx->Lock);
17334 +    
17335 +    if (on_fault (&ljb))
17336 +    {
17337 +       no_fault();
17338 +       kmutex_unlock (&sctx->Lock);
17339 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17340 +       return (0);
17341 +    }
17342 +
17343 +    item = (SYS_WORD_ITEM *) sys_getItem (sp, list);
17344 +
17345 +    if (item == NULL)
17346 +       res = 0;
17347 +    else
17348 +    {
17349 +       if (list == LIST_DMA_PTR)
17350 +           sctx->Armed = TRUE;
17351 +
17352 +       *itemp  = (void *) item;
17353 +       *valuep = (E3_Addr) fuword_noerr ((E3_int32 *) &item->Value);
17354 +
17355 +       PRINTF3 (ctxt, DBG_SYSCALL, "sys_getWordItem: list=%d -> item=%p value=%08x\n", list, *itemp, *valuep);
17356 +
17357 +       res = 1;
17358 +    }
17359 +    
17360 +    no_fault();
17361 +    kmutex_unlock (&sctx->Lock);
17362 +
17363 +    return (res);
17364 +}
17365 +
17366 +static int
17367 +sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep)
17368 +{
17369 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17370 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17371 +    SYS_BLOCK_ITEM *item;
17372 +    int                    res;
17373 +    label_t        ljb;
17374 +
17375 +    kmutex_lock (&sctx->Lock);
17376 +    
17377 +    if (on_fault (&ljb))
17378 +    {
17379 +       no_fault();
17380 +       kmutex_unlock (&sctx->Lock);
17381 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17382 +       return (0);
17383 +    }
17384 +
17385 +    item = sys_getItem (sp, list);
17386 +
17387 +    if (item == NULL)
17388 +       res = 0;
17389 +    else
17390 +    {
17391 +       E3_uint32 *dest = fuptr_noerr ((void **) &item->Pointer);
17392 +
17393 +       if (list == LIST_DMA_DESC)
17394 +           sctx->Armed = TRUE;
17395 +
17396 +       *itemp  = (void *) item;
17397 +       *valuep = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t) dest);
17398 +
17399 +       PRINTF3 (ctxt, DBG_SYSCALL, "sys_getBlockItem: list=%d -> item=%p addr=%08x\n", list, *itemp, *valuep);
17400 +       PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17401 +                fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), 
17402 +                fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3]));
17403 +       PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17404 +                fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]),
17405 +                fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7]));
17406 +
17407 +       
17408 +       res = 1;
17409 +    }
17410 +    
17411 +    no_fault();
17412 +    kmutex_unlock (&sctx->Lock);
17413 +
17414 +    return (res);
17415 +}
17416 +
17417 +static void
17418 +sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_Addr value)
17419 +{
17420 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17421 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17422 +    SYS_WORD_ITEM  *item;
17423 +    label_t        ljp;
17424 +
17425 +    kmutex_lock (&sctx->Lock);
17426 +
17427 +    PRINTF2 (ctxt,DBG_SYSCALL, "sys_putWordItem: list=%x value=%x\n", list, value);
17428 +
17429 +    if (on_fault (&ljp))
17430 +    {
17431 +       no_fault();
17432 +       kmutex_unlock (&sctx->Lock);
17433 +       
17434 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17435 +       return;
17436 +    }
17437 +
17438 +    item = sys_getItem (sp, LIST_FREE_WORD);
17439 +
17440 +    PRINTF1 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p\n", item);
17441 +
17442 +    if (item == NULL)
17443 +    {
17444 +       no_fault();
17445 +       kmutex_unlock (&sctx->Lock);
17446 +       
17447 +       sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist);
17448 +       return;
17449 +    }
17450 +    
17451 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: storing value=%08x at %p\n", value, &item->Value);
17452 +
17453 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p value=%08x\n", item, value);
17454 +
17455 +    suword_noerr ((E3_int32 *) &item->Value, value);                                           /* write "value" into item */
17456 +
17457 +    sys_putItemBack (sp, list, item);
17458 +
17459 +    no_fault();
17460 +    kmutex_unlock (&sctx->Lock);
17461 +}
17462 +
17463 +static void
17464 +sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr)
17465 +{
17466 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17467 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17468 +    SYS_BLOCK_ITEM *item;
17469 +    label_t        ljp;
17470 +    E3_uint32      *source;
17471 +    E3_uint32      *dest;
17472 +
17473 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: list=%x ptr=%p\n", list, ptr);
17474 +
17475 +    kmutex_lock (&sctx->Lock);
17476 +    
17477 +    if (on_fault (&ljp))
17478 +    {
17479 +       no_fault();
17480 +       kmutex_unlock (&sctx->Lock);
17481 +       
17482 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17483 +       return;
17484 +    }
17485 +
17486 +    item = sys_getItem (sp, LIST_FREE_BLOCK);                  /* get an item from the freelist. */
17487 +
17488 +    if (item == NULL)
17489 +    {
17490 +       no_fault();
17491 +       kmutex_unlock (&sctx->Lock);
17492 +       
17493 +       sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist);
17494 +       return;
17495 +    }
17496 +
17497 +    /*
17498 +     * The block will have been read using 64 bit reads,  since we have
17499 +     * to write it to user memory using 32 bit writes, we need to perform
17500 +     * an endian swap on the Ultrasparc.
17501 +     */
17502 +    dest   = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer);
17503 +    source = (E3_uint32 *) ptr;
17504 +
17505 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: item=%p dest=%p\n",item, dest);
17506 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17507 +           source[0^WordEndianFlip], source[1^WordEndianFlip], source[2^WordEndianFlip], source[3^WordEndianFlip]);
17508 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17509 +            source[4^WordEndianFlip], source[5^WordEndianFlip], source[6^WordEndianFlip], source[7^WordEndianFlip]);
17510 +
17511 +    suword_noerr ((E3_int32 *) &dest[7], (E3_int32) source[7^WordEndianFlip]);
17512 +    suword_noerr ((E3_int32 *) &dest[6], (E3_int32) source[6^WordEndianFlip]);
17513 +    suword_noerr ((E3_int32 *) &dest[5], (E3_int32) source[5^WordEndianFlip]);
17514 +    suword_noerr ((E3_int32 *) &dest[4], (E3_int32) source[4^WordEndianFlip]);
17515 +    suword_noerr ((E3_int32 *) &dest[3], (E3_int32) source[3^WordEndianFlip]);
17516 +    suword_noerr ((E3_int32 *) &dest[2], (E3_int32) source[2^WordEndianFlip]);
17517 +    suword_noerr ((E3_int32 *) &dest[1], (E3_int32) source[1^WordEndianFlip]);
17518 +    suword_noerr ((E3_int32 *) &dest[0], (E3_int32) source[0^WordEndianFlip]);
17519 +
17520 +    sys_putItemBack (sp, list, item);                          /* chain onto list of items. */
17521 +
17522 +    no_fault();
17523 +    kmutex_unlock (&sctx->Lock);
17524 +}
17525 +
17526 +static void
17527 +sys_freeWordItem (ELAN3_CTXT *ctxt, void *itemp)
17528 +{
17529 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17530 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17531 +    label_t        ljp;
17532 +
17533 +    kmutex_lock (&sctx->Lock);
17534 +    
17535 +    if (on_fault (&ljp))
17536 +    {
17537 +       no_fault();
17538 +       kmutex_unlock (&sctx->Lock);
17539 +       
17540 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_WORD, (void *) NULL, null_valist);
17541 +       return;
17542 +    }
17543 +
17544 +    sys_putItemBack (sp, LIST_FREE_WORD, itemp);
17545 +
17546 +    no_fault();
17547 +    kmutex_unlock (&sctx->Lock);
17548 +}
17549 +
17550 +static void
17551 +sys_freeBlockItem (ELAN3_CTXT *ctxt, void *itemp)
17552 +{
17553 +    SYS_CTXT       *sctx = (SYS_CTXT *) ctxt->Private;
17554 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17555 +    SYS_BLOCK_ITEM *item = (SYS_BLOCK_ITEM *)itemp;
17556 +    E3_uint32      *dest;
17557 +    label_t        ljp;
17558 +
17559 +    kmutex_lock (&sctx->Lock);
17560 +    
17561 +    if (on_fault (&ljp))
17562 +    {
17563 +       no_fault();
17564 +       kmutex_unlock (&sctx->Lock);
17565 +       
17566 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_BLOCK, (void *) NULL, null_valist);
17567 +       return;
17568 +    }
17569 +#ifdef DEBUG_PRINTF
17570 +    dest = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer);
17571 +
17572 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_freeBlockItem: item=%p dest=%p\n", item, dest);
17573 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17574 +            fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), 
17575 +            fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3]));
17576 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17577 +            fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]),
17578 +            fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7]));
17579 +#endif
17580 +
17581 +    sys_putItemBack (sp, LIST_FREE_BLOCK, itemp);
17582 +
17583 +    no_fault();
17584 +    kmutex_unlock (&sctx->Lock);
17585 +}
17586 +
17587 +static void
17588 +sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *itemp)
17589 +{
17590 +    SYS_CTXT       *sctx = (SYS_CTXT *) ctxt->Private;
17591 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17592 +    label_t        ljp;
17593 +
17594 +    kmutex_lock (&sctx->Lock);
17595 +    
17596 +    if (on_fault (&ljp))
17597 +    {
17598 +       no_fault();
17599 +       kmutex_unlock (&sctx->Lock);
17600 +       
17601 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17602 +       return;
17603 +    }
17604 +
17605 +    sys_putItemFront (sp, list, itemp);
17606 +
17607 +    no_fault();
17608 +    kmutex_unlock (&sctx->Lock);
17609 +}
17610 +
17611 +static int
17612 +sys_countItems (ELAN3_CTXT *ctxt, int list)
17613 +{
17614 +    SYS_CTXT      *sctx  = (SYS_CTXT *) ctxt->Private;
17615 +    SYS_SWAP_SPACE *sp    = sctx->Swap;
17616 +    int                    count = 0;
17617 +    void          *item;
17618 +    label_t        ljb;
17619 +
17620 +    kmutex_lock (&sctx->Lock);
17621 +    
17622 +    if (on_fault (&ljb))
17623 +    {
17624 +       no_fault();
17625 +       kmutex_unlock (&sctx->Lock);
17626 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17627 +       return (0);
17628 +    }
17629 +
17630 +    for (item = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]); 
17631 +        item != NULL;
17632 +        item = (void *) fuptr_noerr ((void **) item))
17633 +    {
17634 +       count++;
17635 +    }
17636 +
17637 +    no_fault();
17638 +    kmutex_unlock (&sctx->Lock);
17639 +
17640 +    return (count);
17641 +}
17642 +
17643 +
17644 +long sys_longTime;
17645 +long sys_shortTime;
17646 +int  sys_waitTicks;
17647 +int  sys_maxBackoff;
17648 +
17649 +#define SYS_LONG_TIME          MAX((hz * 5) / 1000, 1)         /* 5 ms */
17650 +#define SYS_SHORT_TIME         MAX((hz * 2) / 1000, 1)         /* 2 ms */
17651 +#define SYS_WAIT_TICKS         MAX((hz * 1) / 1000, 1)         /* 1 ms  - backoff granularity */
17652 +#define SYS_MAX_BACKOFF                MAX((hz * 5) / 1000, 1)         /* 5 ms  - max backoff for "nacked" packets*/
17653 +#define SYS_TIMEOUT_BACKOFF    MAX((hz * 10) / 1000, 1)        /* 10 ms - backoff for output timeout (point to point) */
17654 +#define SYS_BCAST_BACKOFF      MAX((hz * 50) / 1000, 1)        /* 50 ms - backoff for output timeout (broadcast) */
17655 +#define SYS_NETERR_BACKOFF     MAX((hz * 10) / 1000, 1)        /* 10 ms - delay for network error in dma data */
17656 +
17657 +static void
17658 +sys_backoffWait (ELAN3_CTXT *ctxt, int ticks)
17659 +{
17660 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
17661 +    long      t;
17662 +
17663 +    spin_lock (&sctx->WaitLock);
17664 +
17665 +    t = lbolt - sctx->Time;
17666 +
17667 +    if (sys_longTime   == 0) sys_longTime   = SYS_LONG_TIME;
17668 +    if (sys_shortTime  == 0) sys_shortTime  = SYS_SHORT_TIME;
17669 +    if (sys_waitTicks  == 0) sys_waitTicks  = SYS_WAIT_TICKS;
17670 +    if (sys_maxBackoff == 0) sys_maxBackoff = SYS_MAX_BACKOFF;
17671 +
17672 +    if (t > sys_longTime)                                      /* It's a long time since the last trap */
17673 +       sctx->Backoff = 0;                                      /* so set the backoff back down to 0 */
17674 +
17675 +    if (ticks)
17676 +    {
17677 +       PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d ticks [%lx]\n", ticks, t);
17678 +       kcondvar_timedwait (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + ticks);
17679 +    }
17680 +    else if (sctx->Armed)
17681 +    {
17682 +       if (t < sys_shortTime)                                  /* It's been a short time since the last */
17683 +       {                                                       /* trap, so increase the backoff */
17684 +           sctx->Backoff++;
17685 +           
17686 +           if (sctx->Backoff > sys_maxBackoff)
17687 +               sctx->Backoff = sys_maxBackoff;
17688 +       }
17689 +
17690 +       PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d [%lx]\n", sctx->Backoff, t);
17691 +
17692 +       if (sctx->Backoff)
17693 +           kcondvar_timedwaitsig (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + sctx->Backoff * sys_waitTicks);
17694 +
17695 +       sctx->Armed = 0;
17696 +    }
17697 +    else
17698 +    {
17699 +       PRINTF1 (ctxt, DBG_DPROC, "sys_backoffWait : Not Waiting - %d\n", sctx->Backoff);
17700 +
17701 +    }
17702 +    sctx->Time = lbolt;
17703 +
17704 +    spin_unlock (&sctx->WaitLock);
17705 +}
17706 +
17707 +static int
17708 +trapSize (int proc)
17709 +{
17710 +    switch (proc)
17711 +    {
17712 +    case DMA_PROC:     return (sizeof (DMA_TRAP));
17713 +    case THREAD_PROC:  return (sizeof (THREAD_TRAP));
17714 +    case COMMAND_PROC: return (sizeof (COMMAND_TRAP));
17715 +    case INPUT_PROC:   return (sizeof (INPUT_TRAP));
17716 +    default:           return (0);
17717 +    }
17718 +}
17719 +
17720 +static int
17721 +sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trapp, va_list ap)
17722 +{
17723 +    SYS_CTXT *sctx  = (SYS_CTXT *) ctxt->Private;
17724 +    int              res;
17725 +
17726 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_exception: type %d proc %d\n", type, proc);
17727 +
17728 +    switch (type)
17729 +    {
17730 +    case EXCEPTION_INVALID_ADDR:
17731 +    {
17732 +       E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *);
17733 +       int              res       = va_arg (ap, int);
17734 +       
17735 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, res, 0);
17736 +       break;
17737 +    }
17738 +    
17739 +    case EXCEPTION_UNIMP_INSTR:
17740 +    {
17741 +       E3_uint32 instr = va_arg (ap, E3_uint32);
17742 +       
17743 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, instr);
17744 +       break;
17745 +    }
17746 +    
17747 +    case EXCEPTION_INVALID_PROCESS:
17748 +    {
17749 +       E3_uint32 vproc = va_arg (ap, E3_uint32);
17750 +       int       res  = va_arg (ap, int);
17751 +
17752 +       switch (proc)
17753 +       {
17754 +       case DMA_PROC:
17755 +           if (sctx->Flags & ELAN3_SYS_FLAG_DMA_BADVP)
17756 +           {
17757 +               DMA_TRAP *trap = (DMA_TRAP *) trapp;
17758 +
17759 +               if (trap->Desc.s.dma_direction != DMA_WRITE)
17760 +                   trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent;
17761 +
17762 +               trap->Desc.s.dma_direction       = DMA_WRITE;
17763 +               trap->Desc.s.dma_size            = 0;
17764 +               trap->Desc.s.dma_source          = (E3_Addr) 0;
17765 +               trap->Desc.s.dma_dest            = (E3_Addr) 0;
17766 +               trap->Desc.s.dma_destEvent       = (E3_Addr) 0;
17767 +               trap->Desc.s.dma_destCookieVProc = 0;
17768 +               trap->Desc.s.dma_srcCookieVProc  = 0;
17769 +               
17770 +               return (OP_IGNORE);
17771 +           }
17772 +           break;
17773 +
17774 +       case THREAD_PROC:
17775 +           if (sctx->Flags & ELAN3_SYS_FLAG_THREAD_BADVP)
17776 +           {
17777 +               THREAD_TRAP *trap = (THREAD_TRAP *) trapp;
17778 +
17779 +               trap->TrapBits.s.PacketAckValue = E3_PAckError;
17780 +               
17781 +               return (OP_IGNORE);
17782 +           }
17783 +           break;
17784 +       }
17785 +           
17786 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, vproc);
17787 +       break;
17788 +    }
17789 +    
17790 +    case EXCEPTION_FAULTED:
17791 +    {
17792 +       E3_Addr addr = va_arg (ap, E3_Addr);
17793 +
17794 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr);
17795 +       break;
17796 +    }
17797 +    
17798 +    case EXCEPTION_QUEUE_OVERFLOW:
17799 +    {
17800 +       E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *);
17801 +       int              trapType  = va_arg (ap, int);
17802 +       
17803 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, 0, trapType);
17804 +       break;
17805 +    }
17806 +    
17807 +    case EXCEPTION_COMMAND_OVERFLOW:
17808 +    {
17809 +       int count = va_arg (ap, int);
17810 +       
17811 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, count);
17812 +       break;
17813 +    }
17814 +    
17815 +    case EXCEPTION_CHAINED_EVENT:
17816 +    {
17817 +       E3_Addr addr = va_arg (ap, E3_Addr);
17818 +       
17819 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr);
17820 +       break;
17821 +    }
17822 +    
17823 +    case EXCEPTION_DMA_RETRY_FAIL:
17824 +    case EXCEPTION_PACKET_TIMEOUT:
17825 +       if (proc != DMA_PROC)
17826 +           sys_backoffWait (ctxt, SYS_TIMEOUT_BACKOFF);
17827 +       else
17828 +       {
17829 +           DMA_TRAP *trap = (DMA_TRAP *) trapp;
17830 +           
17831 +           if (sctx->Flags & ELAN3_SYS_FLAG_DMAFAIL)
17832 +           {
17833 +               E3_BlockCopyEvent *event;
17834 +
17835 +               if (trap->Desc.s.dma_direction != DMA_WRITE)
17836 +                   trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent;
17837 +
17838 +               /* change the source word to be E3_EVENT_FAILED */
17839 +               if ((event = (E3_BlockCopyEvent *) elan3mmu_mainaddr (ctxt->Elan3mmu, trap->Desc.s.dma_srcEvent)) == NULL)
17840 +               {
17841 +                   sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0);
17842 +                   break;
17843 +               }
17844 +
17845 +               suword (&event->ev_Source, E3_EVENT_FAILED);
17846 +               wmb(); mmiob();
17847 +               
17848 +               trap->Desc.s.dma_direction       = DMA_WRITE;
17849 +               trap->Desc.s.dma_size            = 0;
17850 +               trap->Desc.s.dma_source          = (E3_Addr) 0;
17851 +               trap->Desc.s.dma_dest            = (E3_Addr) 0;
17852 +               trap->Desc.s.dma_destEvent       = (E3_Addr) 0;
17853 +               trap->Desc.s.dma_destCookieVProc = 0;
17854 +               trap->Desc.s.dma_srcCookieVProc  = 0;
17855 +               
17856 +               return (OP_IGNORE);
17857 +           }
17858 +
17859 +           if (type == EXCEPTION_DMA_RETRY_FAIL)
17860 +               sys_backoffWait (ctxt, 0);
17861 +           else
17862 +           {
17863 +               ELAN_LOCATION location;
17864 +
17865 +               krwlock_read (&ctxt->VpLock);
17866 +               location = ProcessToLocation (ctxt, NULL, trap->Desc.s.dma_direction == DMA_WRITE ? 
17867 +                                             trap->Desc.s.dma_destVProc : trap->Desc.s.dma_srcVProc, NULL);
17868 +               krwlock_done (&ctxt->VpLock);
17869 +               
17870 +               sys_backoffWait (ctxt, location.loc_node == ELAN3_INVALID_NODE ? SYS_BCAST_BACKOFF : SYS_TIMEOUT_BACKOFF);
17871 +           }
17872 +       }
17873 +       return (OP_IGNORE);
17874 +       
17875 +    case EXCEPTION_NETWORK_ERROR:
17876 +    {
17877 +       INPUT_TRAP       *trap  = (INPUT_TRAP *) trapp;
17878 +       NETERR_RESOLVER **rvpp  = va_arg (ap, NETERR_RESOLVER **);
17879 +
17880 +       ASSERT (trap->State == CTXT_STATE_NETWORK_ERROR);
17881 +
17882 +       if (! (sctx->Flags & ELAN3_SYS_FLAG_NETERR) && (trap->DmaIdentifyTransaction || trap->ThreadIdentifyTransaction))
17883 +       {
17884 +           if ((*rvpp) != (NETERR_RESOLVER *) NULL)
17885 +               res = (*rvpp)->Status;
17886 +           else if ((res = QueueNetworkErrorResolver (ctxt, trap, rvpp)) == ESUCCESS)
17887 +           {
17888 +               /* Successfully queued the network error resolver */
17889 +               return (OP_HANDLED);
17890 +           }
17891 +
17892 +           /* network error resolution has failed - either a bad cookie or */
17893 +           /* an rpc error has occured */
17894 +           sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, 0);
17895 +       }
17896 +       else
17897 +       {
17898 +           /* Must be an overlaped dma packet. Must wait long enough to
17899 +            * ensure that the sending dma'er has tried to send the next
17900 +            * packet and had it discarded. In the real world this should
17901 +            * be greater than an output timeout. (About 8mSec) */
17902 +           
17903 +           sys_backoffWait (ctxt, SYS_NETERR_BACKOFF);
17904 +           
17905 +           /* set this inputter state to be ok, since we've been called 
17906 +            * by the lwp it will lower the context filter for us, so 
17907 +            * re-enabling the inputter,  note we don't need to execute
17908 +            * any of the packet since the dma process will re-transmit
17909 +            * it after receiving a nack for the next packet */
17910 +           trap->State = CTXT_STATE_OK;
17911 +           
17912 +           return (OP_HANDLED);
17913 +       }
17914 +       break;
17915 +    }
17916 +    
17917 +    default:
17918 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0);
17919 +       break;
17920 +    }
17921 +    
17922 +    if (type != EXCEPTION_DEBUG)
17923 +#ifdef LINUX
17924 +#ifdef NO_NPTL
17925 +       psignal (CURPROC()->p_opptr, sctx->signal);
17926 +#else
17927 +       psignal (CURPROC()->parent, sctx->signal);
17928 +#endif
17929 +#else
17930 +       psignal (CURPROC(), sctx->signal);
17931 +#endif
17932 +    return (OP_HANDLED);
17933 +}
17934 +
17935 +static int
17936 +sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag)
17937 +{
17938 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
17939 +
17940 +    cookie_fire_cookie (sctx->Table, cookie);
17941 +
17942 +    return (OP_HANDLED); 
17943 +}
17944 +
17945 +static void
17946 +sys_swapin (ELAN3_CTXT *ctxt)
17947 +{
17948 +    PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapin\n");
17949 +}
17950 +
17951 +static void
17952 +sys_swapout (ELAN3_CTXT *ctxt)
17953 +{
17954 +    PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapout\n");
17955 +}
17956 +
17957 +static void
17958 +sys_freePrivate (ELAN3_CTXT *ctxt)
17959 +{
17960 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
17961 +
17962 +    cookie_free_table (sctx->Table);
17963 +
17964 +    kmutex_destroy (&sctx->Lock);
17965 +    spin_lock_destroy (&sctx->WaitLock);
17966 +    kcondvar_destroy (&sctx->NetworkErrorWait);
17967 +
17968 +    KMEM_FREE (sctx, sizeof (SYS_CTXT));
17969 +    ctxt->Private = NULL;
17970 +}
17971 +
17972 +static int
17973 +sys_checkThisDma (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, E3_DMA *dma)
17974 +{
17975 +    E3_DmaType type;
17976 +    E3_uint32  cookie;
17977 +    E3_uint32  cvproc;
17978 +    int               ignore;
17979 +    int               match;
17980 +
17981 +    type.type = fuword_noerr ((int *) &dma->dma_type);
17982 +
17983 +    if (type.s.direction == DMA_WRITE)
17984 +    {
17985 +       cookie = fuword_noerr ((int *) &dma->dma_srcCookieVProc);
17986 +       cvproc = fuword_noerr ((int *) &dma->dma_destCookieVProc);
17987 +    }
17988 +    else
17989 +    {
17990 +       cookie = fuword_noerr ((int *) &dma->dma_destCookieVProc);
17991 +       cvproc = fuword_noerr ((int *) &dma->dma_srcCookieVProc);
17992 +    }
17993 +
17994 +    PRINTF5 (ctxt, DBG_NETERR, "sys_checkThisDma: dir = %d cookie = %08x cvproc = %08x CookieVProc %08x DstProcess %04x\n",
17995 +            type.s.direction, cookie, cvproc, nef->Message.CookieVProc, nef->Message.DstProcess);
17996 +
17997 +    /* A DMA matches a network errror fixup if it's going to the right place (or is a broadcast)
17998 +     * and the approriate cookie matches, except that we ignore DMA's which don't have a destEvent
17999 +     * since they don't have any atomic behaviour (though they still send the identify) */
18000 +
18001 +    ignore = (type.s.direction == DMA_WRITE && cookie == 0 &&
18002 +             fuword_noerr ((int *) &dma->dma_destEvent) == 0);
18003 +    match  = (nef->Message.CookieVProc == cookie &&
18004 +             (nef->Message.DstProcess == (cvproc & DMA_PROCESS_MASK) || nef->Message.WaitForEop));
18005 +
18006 +    PRINTF2 (ctxt, DBG_NETERR, "  -> %s %s\n", ignore ? "ignore" : match ? "matched" : "not-matched", nef->Message.WaitForEop ? "wait for eop" : "");
18007 +
18008 +    if (match && !ignore && !nef->Message.WaitForEop)
18009 +    {
18010 +       PRINTF0 (ctxt, DBG_NETERR, "sys_checkThisDma: nuking the dma\n");
18011 +
18012 +       /* NOTE - we access the dma descriptor backwards since it could exist in sdram */
18013 +       if (type.s.direction != DMA_WRITE)
18014 +           suword_noerr ((int *) &dma->dma_srcEvent, 0);
18015 +
18016 +       suword_noerr ((int *) &dma->dma_destEvent, 0);
18017 +       suword_noerr ((int *) &dma->dma_dest,      0);
18018 +       suword_noerr ((int *) &dma->dma_source,    0);
18019 +       suword_noerr ((int *) &dma->dma_size,      0);
18020 +
18021 +       if (type.s.direction != DMA_WRITE)
18022 +           suword_noerr ((int *) &dma->dma_type, fuword_noerr ((int *) &dma->dma_type) & E3_DMA_CONTEXT_MASK);
18023 +
18024 +       wmb(); mmiob();
18025 +    }
18026 +
18027 +    return (match && !ignore);
18028 +}
18029 +
18030 +static int
18031 +sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef)
18032 +{
18033 +    SYS_CTXT       *sctx    = (SYS_CTXT *) ctxt->Private;
18034 +    SYS_SWAP_SPACE *sp      = sctx->Swap;
18035 +    int                    matched = 0;
18036 +    SYS_WORD_ITEM  *wordp;
18037 +    SYS_BLOCK_ITEM *blockp;
18038 +    label_t        ljb;
18039 +    int                    res;
18040 +
18041 +    PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError %08x %08x %08x\n", 
18042 +            nef->Message.CookieAddr, nef->Message.CookieVProc, nef->Message.NextCookie);
18043 +
18044 +    if (nef->Message.CookieAddr == (E3_Addr) 0)                        /* It's a DMA which requires fixing up */
18045 +    {
18046 +       kmutex_lock (&sctx->Lock);
18047 +
18048 +       if (on_fault (&ljb))
18049 +           res = EFAULT;
18050 +       else
18051 +       {
18052 +           /* scan the dma ptr list */
18053 +           for (wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_PTR]);
18054 +                wordp != NULL; 
18055 +                wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &wordp->Next))
18056 +           {
18057 +               E3_uint32 value = fuword_noerr ((int *) &wordp->Value);
18058 +               E3_DMA    *dma  = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, value);
18059 +
18060 +               PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Value %08x dma %p\n", wordp, value, dma);
18061 +
18062 +               matched += sys_checkThisDma (ctxt, nef, dma);
18063 +           }
18064 +       
18065 +           /* scan the dma desc list */
18066 +           for (blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_DESC]);
18067 +                blockp != NULL; 
18068 +                blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &blockp->Next))
18069 +           {
18070 +               E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &blockp->Pointer);
18071 +               
18072 +               PRINTF2 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Pointer %p\n", blockp, dma);
18073 +
18074 +               matched += sys_checkThisDma (ctxt, nef, dma);
18075 +           }
18076 +           
18077 +           /* If we've still not found it, then check the command port item */
18078 +           /* it MUST be present as a command waiting to be executed, as */
18079 +           /* otherwise it could have already happened and we will claim to */
18080 +           /* have found it, but not realy */
18081 +           if (ctxt->CommandPortItem != NULL)
18082 +           {
18083 +               E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &((SYS_BLOCK_ITEM *) ctxt->CommandPortItem)->Pointer);
18084 +
18085 +               if (sys_checkThisDma (ctxt, nef, dma))
18086 +               {
18087 +                   printk ("!!! it's the command port item - need to ensure that the command exists\n");
18088 +                   matched++;
18089 +               }
18090 +           }
18091 +
18092 +           res = matched ? ESUCCESS : ESRCH;
18093 +       }
18094 +       no_fault();
18095 +       kmutex_unlock (&sctx->Lock);
18096 +
18097 +       if (matched > 1)
18098 +           ElanException (ctxt, EXCEPTION_COOKIE_ERROR, DMA_PROC, NULL, NULL, nef->Message.CookieVProc);
18099 +    }
18100 +    else                                                       /* It's a thread which requires fixing up */
18101 +    {
18102 +       E3_int32  *cookiePtr = (E3_int32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, nef->Message.CookieAddr);
18103 +       E3_uint32  curval    = fuword_noerr (cookiePtr);
18104 +
18105 +       if (curval == nef->Message.CookieVProc)         /* thread doesn't think it's been done */
18106 +       {
18107 +           if (! nef->Message.WaitForEop)
18108 +           {
18109 +               suword_noerr (cookiePtr, nef->Message.NextCookie);
18110 +               mb(); mmiob();
18111 +           }
18112 +           
18113 +           res = ESUCCESS;
18114 +       }
18115 +       else                                                    /* thread thinks that it's been executed */
18116 +       {
18117 +           res = ESRCH;
18118 +       }
18119 +    }
18120 +    
18121 +    CompleteNetworkErrorFixup (ctxt, nef, res);
18122 +
18123 +    return (OP_HANDLED);
18124 +}
18125 +
18126 +
18127 +static int
18128 +sys_startFaultCheck (ELAN3_CTXT *ctxt)
18129 +{
18130 +    return (0);
18131 +}
18132 +
18133 +static void
18134 +sys_endFaultCheck (ELAN3_CTXT *ctxt)
18135 +{
18136 +    wmb();
18137 +}
18138 +
18139 +static E3_uint8
18140 +sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr)
18141 +{
18142 +    E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18143 +
18144 +    return (fubyte_noerr (maddr));
18145 +}
18146 +
18147 +static void
18148 +sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val)
18149 +{
18150 +    E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18151 +
18152 +    subyte_noerr (maddr, val);
18153 +    wmb(); mmiob();
18154 +}
18155 +
18156 +static E3_uint16
18157 +sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr)
18158 +{
18159 +    E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18160 +
18161 +    return (fusword_noerr (maddr));
18162 +}
18163 +
18164 +static void
18165 +sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val)
18166 +{
18167 +    E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18168 +
18169 +    susword_noerr (maddr, val);
18170 +    wmb(); mmiob();
18171 +}
18172 +
18173 +static E3_uint32
18174 +sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr)
18175 +{
18176 +    E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18177 +
18178 +    return (fuword_noerr (maddr));
18179 +}
18180 +
18181 +static void
18182 +sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val)
18183 +{
18184 +    E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18185 +
18186 +    suword_noerr (maddr, val);
18187 +    wmb(); mmiob();
18188 +}
18189 +
18190 +static E3_uint64
18191 +sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr)
18192 +{
18193 +    E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18194 +
18195 +    return (fulonglong_noerr ((long long *) maddr));
18196 +}
18197 +
18198 +static void
18199 +sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val)
18200 +{
18201 +    E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18202 +
18203 +    sulonglong_noerr ((long long *) maddr, val);
18204 +    wmb(); mmiob();
18205 +}
18206 +
18207 +
18208 +void
18209 +sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t trapp, int size,
18210 +                 E3_FaultSave_BE *faultSave, u_long res, u_long value)
18211 +{
18212 +    SYS_EXCEPTION      *ex_ptr;
18213 +    int                        front;
18214 +    int                        back;
18215 +    int                        count;
18216 +    label_t            ljp;
18217 +
18218 +    PRINTF4 (DBG_DEVICE, DBG_FN, "sys_addException: type %d proc %d res %ld value %ld\n",
18219 +            type, proc, res, value);
18220 +
18221 +    KMEM_ZALLOC (ex_ptr, SYS_EXCEPTION *, sizeof  (SYS_EXCEPTION), TRUE);
18222 +
18223 +    if (ex_ptr != NULL)
18224 +    {
18225 +       bzero ((caddr_t) ex_ptr, sizeof (SYS_EXCEPTION));
18226 +
18227 +       ex_ptr->Type  = type;
18228 +       ex_ptr->Proc  = proc;
18229 +       ex_ptr->Res   = res;
18230 +       ex_ptr->Value = value;
18231 +       
18232 +       if (trapp && size)
18233 +           bcopy (trapp, (caddr_t) &ex_ptr->Union, size);
18234 +       if (faultSave)
18235 +           bcopy ((caddr_t) faultSave, (caddr_t) &ex_ptr->FaultArea, sizeof (E3_FaultSave_BE));
18236 +    }
18237 +
18238 +    kmutex_lock (&sctx->Lock);
18239 +    if (! on_fault (&ljp))
18240 +    {
18241 +       front = fuword_noerr (&sctx->Exceptions->Front);
18242 +       back  = fuword_noerr (&sctx->Exceptions->Back);
18243 +       count = fuword_noerr (&sctx->Exceptions->Count);
18244 +
18245 +       if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count)
18246 +           suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1);
18247 +       else if (((front+1) % count ) == back)
18248 +           suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1);
18249 +       else
18250 +       {
18251 +           if (ex_ptr != NULL)
18252 +               copyout_noerr ((caddr_t) ex_ptr, (caddr_t) &sctx->Exceptions->Exceptions[front], sizeof (SYS_EXCEPTION));
18253 +           else
18254 +           {
18255 +               suword_noerr (&sctx->Exceptions->Exceptions[front].Type, EXCEPTION_ENOMEM);
18256 +               suword_noerr (&sctx->Exceptions->Exceptions[front].Proc, 0);
18257 +           }
18258 +           suword_noerr (&sctx->Exceptions->Front, (front + 1) % count);
18259 +       }
18260 +
18261 +       /* always reset the magic number in case it's been overwritten */
18262 +       /* so that 'edb' can find the exception page in the core file */
18263 +       suword_noerr (&sctx->Exceptions->Magic, SYS_EXCEPTION_MAGIC);
18264 +    }
18265 +    no_fault();
18266 +    kmutex_unlock (&sctx->Lock);
18267 +    
18268 +    if (ex_ptr != NULL)
18269 +       KMEM_FREE (ex_ptr, sizeof  (SYS_EXCEPTION));
18270 +}
18271 +
18272 +int
18273 +sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex)
18274 +{
18275 +    int     front;
18276 +    int     back;
18277 +    int     count;
18278 +    int     res;
18279 +    label_t ljp;
18280 +
18281 +    if (sctx->Exceptions == NULL)
18282 +       return (EINVAL);
18283 +
18284 +    kmutex_lock (&sctx->Lock);
18285 +    if (on_fault (&ljp))
18286 +    {
18287 +       no_fault();
18288 +       kmutex_unlock (&sctx->Lock);
18289 +       return (EFAULT);
18290 +    }
18291 +    
18292 +    front = fuword_noerr (&sctx->Exceptions->Front);
18293 +    back  = fuword_noerr (&sctx->Exceptions->Back);
18294 +    count = fuword_noerr (&sctx->Exceptions->Count);
18295 +
18296 +    if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count || back == front)
18297 +       res = EINVAL;
18298 +    else
18299 +    {
18300 +       copyin_noerr ((caddr_t) &sctx->Exceptions->Exceptions[back], (caddr_t) ex, sizeof (SYS_EXCEPTION));
18301 +       suword_noerr (&sctx->Exceptions->Back, (back+1) % count);
18302 +
18303 +       res = ESUCCESS;
18304 +    }
18305 +    no_fault();
18306 +    kmutex_unlock (&sctx->Lock);
18307 +
18308 +    return (res);
18309 +}
18310 +
18311 +
18312 +/*
18313 + * Local variables:
18314 + * c-file-style: "stroustrup"
18315 + * End:
18316 + */
18317 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/eventcookie.c
18318 ===================================================================
18319 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/eventcookie.c        2004-02-23 16:02:56.000000000 -0500
18320 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/eventcookie.c     2005-07-28 14:52:52.806685120 -0400
18321 @@ -0,0 +1,324 @@
18322 +/*
18323 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
18324 + * 
18325 + *    For licensing information please see the supplied COPYING file
18326 + *
18327 + */
18328 +
18329 +#ident "@(#)$Id: eventcookie.c,v 1.7 2003/08/13 10:03:03 fabien Exp $"
18330 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/eventcookie.c,v $*/
18331 +
18332 +#include <qsnet/kernel.h>
18333 +#include <elan3/elanregs.h>
18334 +#include <elan3/elandev.h>
18335 +#include <elan3/elanvp.h>
18336 +#include <elan3/elan3mmu.h>
18337 +#include <elan3/elanctxt.h>
18338 +#include <elan3/elandebug.h>
18339 +#include <elan3/urom_addrs.h>
18340 +#include <elan3/thread.h>
18341 +#include <elan3/vmseg.h>
18342 +
18343 +static EVENT_COOKIE_TABLE *cookie_tables;
18344 +static spinlock_t         cookie_table_lock;
18345 +
18346 +/*
18347 + * cookie_drop_entry:
18348 + *   drop the reference to a cookie held 
18349 + *   by the cookie table
18350 + */
18351 +static void
18352 +cookie_drop_entry (EVENT_COOKIE_ENTRY *ent)
18353 +{
18354 +    unsigned long flags;
18355 +
18356 +    spin_lock_irqsave (&ent->ent_lock, flags);
18357 +    if (--ent->ent_ref != 0)
18358 +    {
18359 +       ent->ent_fired = ent->ent_cookie;
18360 +       kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
18361 +
18362 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18363 +    }
18364 +    else
18365 +    {
18366 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18367 +
18368 +       spin_lock_destroy (&ent->ent_lock);
18369 +       kcondvar_destroy (&ent->ent_wait);
18370 +
18371 +       KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY));
18372 +    }
18373 +}
18374 +
18375 +void
18376 +cookie_init()
18377 +{
18378 +    spin_lock_init (&cookie_table_lock);
18379 +}
18380 +
18381 +void
18382 +cookie_fini()
18383 +{
18384 +    spin_lock_destroy (&cookie_table_lock);
18385 +}
18386 +
18387 +EVENT_COOKIE_TABLE *
18388 +cookie_alloc_table (unsigned long task, unsigned long handle)
18389 +{
18390 +    EVENT_COOKIE_TABLE *tbl, *ntbl;
18391 +
18392 +    KMEM_ZALLOC (ntbl, EVENT_COOKIE_TABLE *, sizeof (EVENT_COOKIE_TABLE), TRUE);
18393 +
18394 +    if (ntbl == NULL)
18395 +       return (NULL);
18396 +
18397 +    spin_lock (&cookie_table_lock);
18398 +    
18399 +    for (tbl = cookie_tables; tbl; tbl = tbl->tbl_next)
18400 +       if (tbl->tbl_task == task && tbl->tbl_handle == handle)
18401 +           break;
18402 +    
18403 +    if (tbl != NULL)
18404 +       tbl->tbl_ref++;
18405 +    else
18406 +    {
18407 +       spin_lock_init (&ntbl->tbl_lock);
18408 +
18409 +       ntbl->tbl_task    = task;
18410 +       ntbl->tbl_handle  = handle;
18411 +       ntbl->tbl_ref     = 1;
18412 +       ntbl->tbl_entries = NULL;
18413 +
18414 +       if ((ntbl->tbl_next = cookie_tables) != NULL)
18415 +           cookie_tables->tbl_prev = ntbl;
18416 +       cookie_tables = ntbl;
18417 +       ntbl->tbl_prev = NULL;
18418 +    }
18419 +    spin_unlock (&cookie_table_lock);
18420 +
18421 +    if (tbl == NULL)
18422 +       return (ntbl);
18423 +    else
18424 +    {
18425 +       KMEM_FREE (ntbl, sizeof (EVENT_COOKIE_TABLE));
18426 +       return (tbl);
18427 +    }    
18428 +}
18429 +
18430 +void
18431 +cookie_free_table (EVENT_COOKIE_TABLE *tbl)
18432 +{
18433 +    EVENT_COOKIE_ENTRY *ent;
18434 +
18435 +    spin_lock (&cookie_table_lock);
18436 +    if (tbl->tbl_ref > 1)
18437 +    {
18438 +       tbl->tbl_ref--;
18439 +       spin_unlock (&cookie_table_lock);
18440 +       return;
18441 +    }
18442 +    
18443 +    if (tbl->tbl_prev)
18444 +       tbl->tbl_prev->tbl_next = tbl->tbl_next;
18445 +    else
18446 +       cookie_tables = tbl->tbl_next;
18447 +    if (tbl->tbl_next)
18448 +       tbl->tbl_next->tbl_prev = tbl->tbl_prev;
18449 +    
18450 +    spin_unlock (&cookie_table_lock);
18451 +    
18452 +    /* NOTE - table no longer visible to other threads
18453 +     *        no need to aquire tbl_lock */
18454 +    while ((ent = tbl->tbl_entries) != NULL)
18455 +    {
18456 +       if ((tbl->tbl_entries = ent->ent_next) != NULL)
18457 +           ent->ent_next->ent_prev = NULL;
18458 +       
18459 +       cookie_drop_entry (ent);
18460 +    }
18461 +    spin_lock_destroy (&tbl->tbl_lock);
18462 +
18463 +    KMEM_FREE (tbl, sizeof (EVENT_COOKIE_TABLE));
18464 +}
18465 +
18466 +int
18467 +cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18468 +{
18469 +    EVENT_COOKIE_ENTRY *ent, *nent;
18470 +    unsigned long flags;
18471 +
18472 +    KMEM_ZALLOC (nent, EVENT_COOKIE_ENTRY *, sizeof (EVENT_COOKIE_ENTRY), TRUE);
18473 +    
18474 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18475 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18476 +       if (ent->ent_cookie == cookie)
18477 +           break;
18478 +
18479 +    if (ent == NULL)
18480 +    {
18481 +       kcondvar_init (&nent->ent_wait);
18482 +       spin_lock_init (&nent->ent_lock);
18483 +
18484 +       nent->ent_ref    = 1;
18485 +       nent->ent_cookie = cookie;
18486 +
18487 +       if ((nent->ent_next = tbl->tbl_entries) != NULL)
18488 +           tbl->tbl_entries->ent_prev = nent;
18489 +       tbl->tbl_entries = nent;
18490 +       nent->ent_prev = NULL;
18491 +    }
18492 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18493 +
18494 +    if (ent == NULL)
18495 +       return (ESUCCESS);
18496 +    else
18497 +    {
18498 +       KMEM_FREE (nent, sizeof (EVENT_COOKIE_ENTRY));
18499 +       return (EINVAL);
18500 +    }
18501 +}
18502 +
18503 +int
18504 +cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18505 +{
18506 +    EVENT_COOKIE_ENTRY *ent;
18507 +    unsigned long flags;
18508 +
18509 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18510 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18511 +       if (ent->ent_cookie == cookie)
18512 +           break;
18513 +    
18514 +    if (ent == NULL)
18515 +    {
18516 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18517 +       return (EINVAL);
18518 +    }
18519 +
18520 +    if (ent->ent_prev == NULL)
18521 +       tbl->tbl_entries = ent->ent_next;
18522 +    else
18523 +       ent->ent_prev->ent_next = ent->ent_next;
18524 +
18525 +    if (ent->ent_next != NULL)
18526 +       ent->ent_next->ent_prev = ent->ent_prev;
18527 +    
18528 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18529 +
18530 +    cookie_drop_entry (ent);
18531 +
18532 +    return (ESUCCESS);
18533 +}
18534 +
18535 +/*
18536 + * cookie_fire_cookie:
18537 + *    fire the cookie - this is called from the event interrupt.
18538 + */
18539 +int
18540 +cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18541 +{
18542 +    EVENT_COOKIE_ENTRY *ent;
18543 +    unsigned long flags;
18544 +
18545 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18546 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18547 +       if (ent->ent_cookie == cookie)
18548 +           break;
18549 +    
18550 +    if (ent == NULL)
18551 +    {
18552 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18553 +       return (EINVAL);
18554 +    }
18555 +           
18556 +    spin_lock (&ent->ent_lock);
18557 +    ent->ent_fired = cookie;
18558 +    kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
18559 +    spin_unlock (&ent->ent_lock);
18560 +
18561 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18562 +
18563 +    return (ESUCCESS);
18564 +}    
18565 +
18566 +/*
18567 + * cookie_wait_cookie:
18568 + *    deschedule on a cookie if it has not already fired.
18569 + *    note - if the cookie is removed from the table, then
18570 + *           we free it off when we're woken up.
18571 + */
18572 +int
18573 +cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18574 +{
18575 +    EVENT_COOKIE_ENTRY *ent;
18576 +    unsigned long flags;
18577 +    
18578 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18579 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18580 +       if (ent->ent_cookie == cookie)
18581 +           break;
18582 +    
18583 +    if (ent == NULL)
18584 +    {
18585 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18586 +       return (EINVAL);
18587 +    }
18588 +
18589 +    spin_lock (&ent->ent_lock);
18590 +    spin_unlock (&tbl->tbl_lock);
18591 +
18592 +    if (ent->ent_fired != 0)
18593 +    {
18594 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18595 +       return (ESUCCESS);
18596 +    }
18597 +
18598 +    ent->ent_ref++;
18599 +    kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags);
18600 +    
18601 +    if (--ent->ent_ref > 0)
18602 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18603 +    else
18604 +    {
18605 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18606 +       
18607 +       spin_lock_destroy (&ent->ent_lock);
18608 +       kcondvar_destroy (&ent->ent_wait);
18609 +
18610 +       KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY));
18611 +    }
18612 +    return (ESUCCESS);
18613 +}
18614 +
18615 +int
18616 +cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18617 +{
18618 +    EVENT_COOKIE_ENTRY *ent;
18619 +    unsigned long flags;
18620 +
18621 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18622 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18623 +       if (ent->ent_cookie == cookie)
18624 +           break;
18625 +    
18626 +    if (ent == NULL)
18627 +    {
18628 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18629 +       return (EINVAL);
18630 +    }
18631 +           
18632 +    spin_lock (&ent->ent_lock);
18633 +    ent->ent_fired = 0;
18634 +    spin_unlock (&ent->ent_lock);
18635 +
18636 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18637 +
18638 +    return (ESUCCESS);
18639 +}
18640 +
18641 +/*
18642 + * Local variables:
18643 + * c-file-style: "stroustrup"
18644 + * End:
18645 + */
18646 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/iproc.c
18647 ===================================================================
18648 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/iproc.c      2004-02-23 16:02:56.000000000 -0500
18649 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/iproc.c   2005-07-28 14:52:52.808684816 -0400
18650 @@ -0,0 +1,925 @@
18651 +/*
18652 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
18653 + * 
18654 + *    For licensing information please see the supplied COPYING file
18655 + *
18656 + */
18657 +
18658 +#ident "@(#)$Id: iproc.c,v 1.47 2003/09/24 13:57:25 david Exp $"
18659 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/iproc.c,v $ */
18660 +
18661 +#include <qsnet/kernel.h>
18662 +
18663 +#include <elan3/elanregs.h>
18664 +#include <elan3/elandev.h>
18665 +#include <elan3/elanvp.h>
18666 +#include <elan3/elan3mmu.h>
18667 +#include <elan3/elanctxt.h>
18668 +#include <elan3/elandebug.h>
18669 +#include <elan3/urom_addrs.h>
18670 +#include <elan3/trtype.h>
18671 +#include <elan3/vmseg.h>
18672 +
18673 +
18674 +static int TrSizeTable[] = {0, 8, 16, 32, 64};
18675 +
18676 +static void  ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr);
18677 +static void  SimulateBlockWrite  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18678 +static void  SimulateWriteWord   (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18679 +static void  SimulateWriteDWord  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18680 +static void  SimulateTraceRoute  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
18681 +static void  BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp);
18682 +
18683 +void
18684 +HandleIProcTrap (ELAN3_DEV           *dev, 
18685 +                int                 Channel,
18686 +                E3_uint32           Pend,
18687 +                sdramaddr_t         FaultSaveOff,
18688 +                sdramaddr_t         TransactionsOff,
18689 +                sdramaddr_t         DataOff)
18690 +{
18691 +    E3_IprocTrapHeader_BE Transaction0;
18692 +    ELAN3_CTXT          *ctxt;
18693 +    INPUT_TRAP           *trap;
18694 +    register int          i;
18695 +
18696 +    /*
18697 +     * Read the 1st set of transactions, so we can determine the 
18698 +     * context for the trap 
18699 +     */
18700 +    elan3_sdram_copyq_from_sdram (dev, TransactionsOff, (void *) &Transaction0, 16);
18701 +    
18702 +    BumpStat (dev, IProcTraps);
18703 +    BumpInputterStats (dev, &Transaction0);
18704 +
18705 +    if (Transaction0.s.TrTypeCntx.s.TypeCntxInvalid)
18706 +    {
18707 +       /*
18708 +        * The context is not valid. This will occur if the packet
18709 +        * trapped for an EopError with no IdentTrans or an error corrupted the context
18710 +        * giving a CRC error on the first transaction and the Ack had not been returned.
18711 +        */
18712 +       if (Transaction0.s.TrTypeCntx.s.LastTrappedTrans)
18713 +       {
18714 +           PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: Error on EOP without a good context, ignoring trap\n");
18715 +       }
18716 +       else
18717 +       {
18718 +           /* Check that only crap has been received.  If not then die. */
18719 +           if (! Transaction0.s.IProcTrapStatus.s.BadLength &&
18720 +               (Transaction0.s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_GOOD)
18721 +           {
18722 +               printk ("iproc: Did not have a valid context for the trap area.\n");
18723 +               printk ("iproc: TrTypeCntx=%x TrAddr=%x TrData0=%x IProcTrapStatus=%x\n",
18724 +                        Transaction0.s.TrTypeCntx.TypeContext, Transaction0.s.TrAddr,
18725 +                        Transaction0.s.TrData0, Transaction0.s.IProcTrapStatus.Status);
18726 +               panic ("elan3: iproc did not have a valid context");
18727 +               /* NOTREACHED */
18728 +           }
18729 +           PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: First transaction is bad, ignoring trap\n");
18730 +       }
18731 +    }
18732 +    else
18733 +    {
18734 +       ctxt = ELAN3_DEV_CTX_TABLE(dev, Transaction0.s.TrTypeCntx.s.Context);
18735 +       
18736 +       if (ctxt == NULL)
18737 +       {
18738 +           PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleIProcTrap: context %x invalid\n", 
18739 +                    Transaction0.s.TrTypeCntx.s.Context);
18740 +
18741 +           BumpStat (dev, InvalidContext);
18742 +       }
18743 +       else
18744 +       {
18745 +           trap = (Channel == 0) ? &ctxt->Input0Trap : &ctxt->Input1Trap;
18746 +
18747 +           ASSERT (trap->State == CTXT_STATE_OK);
18748 +           
18749 +           trap->Transactions[0] = Transaction0;
18750 +
18751 +           PRINTF1 (ctxt, DBG_INTR, "HandleIProcTrap: %s\n", IProcTrapString (&trap->Transactions[0], NULL));
18752 +           /*
18753 +            * Copy the rest of the transactions into the trap area.
18754 +            */
18755 +           for (i = 0; !(trap->Transactions[i].s.TrTypeCntx.s.LastTrappedTrans);)
18756 +           {
18757 +               if (++i >= MAX_TRAPPED_TRANS)
18758 +               {
18759 +                   trap->Overflow = 1;
18760 +                   break;
18761 +               }
18762 +
18763 +               elan3_sdram_copyq_from_sdram (dev, TransactionsOff + i*sizeof (E3_IprocTrapHeader), (void *) &trap->Transactions[i], 16);
18764 +
18765 +               PRINTF1 (ctxt, DBG_INTR, "                 %s\n", IProcTrapString (&trap->Transactions[i], NULL));
18766 +
18767 +               BumpInputterStats (dev, &trap->Transactions[i]);
18768 +           }
18769 +           
18770 +           /*
18771 +            * Remember the number of transactions we've copied.
18772 +            */
18773 +           trap->NumTransactions = i+1;
18774 +
18775 +           PRINTF1 (ctxt, DBG_INTR, "                 NumTransactions = %d\n", trap->NumTransactions);
18776 +           
18777 +           /*
18778 +            * Copy all the data blocks in one go to let the Elan prefetcher work 
18779 +            */
18780 +           elan3_sdram_copyq_from_sdram (dev, DataOff, trap->DataBuffers, trap->NumTransactions*sizeof (E3_IprocTrapData));
18781 +
18782 +           /*
18783 +            * Copy fault save area and clear out for next time round.
18784 +            */
18785 +           elan3_sdram_copyq_from_sdram (dev, FaultSaveOff, (void *) &trap->FaultSave, 16);
18786 +           elan3_sdram_zeroq_sdram (dev, FaultSaveOff, 16);
18787 +
18788 +           if (ELAN3_OP_IPROC_TRAP (ctxt, trap, Channel) == OP_DEFER)
18789 +           {
18790 +               /*
18791 +                * Mark the trap as valid and set the inputter state to 
18792 +                * raise the context filter.
18793 +                */
18794 +               trap->State = CTXT_STATE_TRAPPED;
18795 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
18796 +               
18797 +               SetInputterStateForContext (ctxt, Pend, NULL);
18798 +           }
18799 +       }
18800 +    }
18801 +}
18802 +
18803 +void
18804 +InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap)
18805 +{
18806 +    int              i;
18807 +    int              StatusValid;
18808 +
18809 +    trap->AckSent                  = 0;
18810 +    trap->BadTransaction            = 0;
18811 +    
18812 +    trap->TrappedTransaction        = NULL;
18813 +    trap->TrappedDataBuffer        = NULL;
18814 +    trap->WaitForEopTransaction     = NULL;
18815 +    trap->WaitForEopDataBuffer      = NULL;
18816 +    trap->DmaIdentifyTransaction    = NULL;
18817 +    trap->ThreadIdentifyTransaction = NULL;
18818 +    trap->LockQueuePointer          = (E3_Addr) 0;
18819 +    trap->UnlockQueuePointer        = (E3_Addr) 0;
18820 +
18821 +    /*
18822 +     * Now scan all the transactions received 
18823 +     */
18824 +    for (i = 0; i < trap->NumTransactions ; i++)
18825 +    {
18826 +       E3_IprocTrapHeader_BE *hdrp = &trap->Transactions[i];
18827 +       E3_IprocTrapData_BE   *datap = &trap->DataBuffers[i];
18828 +
18829 +       StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid != 0;
18830 +       
18831 +       if (StatusValid && hdrp->s.IProcTrapStatus.s.AckSent)   /* Remember if we've sent the ack back */
18832 +           trap->AckSent = 1;
18833 +       
18834 +       if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)              /* Check for EOP */
18835 +       {
18836 +           ASSERT (i == trap->NumTransactions - 1);
18837 +
18838 +           switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
18839 +           {
18840 +           case EOP_GOOD:
18841 +               /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */  
18842 +               /* unless it was a flood, in which case someone must have sent an ack */
18843 +               /* but not necessarily us */
18844 +               break;
18845 +
18846 +           case EOP_BADACK:
18847 +               BumpUserStat (ctxt, EopBadAcks);
18848 +
18849 +               /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if
18850 +                * we sent a PAckOk. We can clear tinfo.AckSent. */
18851 +               if (trap->AckSent == 1)
18852 +               {
18853 +                   PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: Network error destroyed PAckOk\n");
18854 +                   trap->AckSent = 0;
18855 +               }
18856 +               break;
18857 +
18858 +           case EOP_ERROR_RESET:
18859 +               BumpUserStat (ctxt, EopResets);
18860 +
18861 +               /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */
18862 +               trap->BadTransaction = 1;
18863 +               break;
18864 +
18865 +           default:
18866 +               panic ("InspectIProcTrap: invalid EOP type in status register\n");
18867 +               /* NOTREACHED */
18868 +           }
18869 +           continue;
18870 +       }
18871 +
18872 +       PRINTF2 (ctxt, DBG_IPROC, "InspectIProcTrap: %2d: %s\n", i, IProcTrapString (hdrp, datap));
18873 +       
18874 +       if (! StatusValid)                                      /* We're looking at transactions stored before the trap */
18875 +       {                                                       /* these should only be identifies and lock transactions */
18876 +
18877 +           if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT)
18878 +               panic ("InspectIProcTrap: writeblock transaction found in input trap header before trap occured\n");
18879 +
18880 +           switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
18881 +           {
18882 +           case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK:
18883 +               if (trap->LockQueuePointer)                             /* Already seen a LOCKQUEUE transaction in this packet, */
18884 +               {                                               /* the user program should not have done this !! */
18885 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18886 +                   return;
18887 +               }
18888 +
18889 +               trap->LockQueuePointer = (E3_Addr) hdrp->s.TrAddr;      /* Remember the queue pointer in case we need to unlock it */
18890 +               break;
18891 +
18892 +           case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK:
18893 +               if (trap->DmaIdentifyTransaction ||             /* Already seen an identify transaction in this packet */
18894 +                   trap->ThreadIdentifyTransaction)            /* the user program should not have done this */
18895 +               {                                                       
18896 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18897 +                   return;
18898 +               }
18899 +               trap->DmaIdentifyTransaction = hdrp;
18900 +               break;
18901 +
18902 +           case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK:
18903 +               if (trap->DmaIdentifyTransaction ||             /* Already seen an identify transaction in this packet */
18904 +                   trap->ThreadIdentifyTransaction)            /* the user program should not have done this */
18905 +               {                                                       
18906 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18907 +                   return;
18908 +               }
18909 +               trap->ThreadIdentifyTransaction = hdrp;
18910 +               break;
18911 +               
18912 +           default:
18913 +               panic ("InspectIProcTrap: invalid transaction found in input trap header before trap occured\n");
18914 +               /* NOTREACHED */
18915 +           }
18916 +           continue;
18917 +       }
18918 +
18919 +       if (StatusValid && trap->TrappedTransaction == NULL)    /* Remember the transaction which caused the */
18920 +       {                                                       /* trap */
18921 +           trap->TrappedTransaction = hdrp;
18922 +           trap->TrappedDataBuffer  = datap;
18923 +       }
18924 +
18925 +       if(hdrp->s.IProcTrapStatus.s.BadLength ||
18926 +          ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR) ||
18927 +          ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD))
18928 +       {
18929 +           int j;
18930 +           PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: transaction has a bad crc\n");
18931 +           for (j=0; j<TRANS_DATA_WORDS; j+=4)
18932 +              PRINTF5 (ctxt, DBG_IPROC, "InspectIProcTrap: Data %0d %8x %8x %8x %8x\n",
18933 +                       j, datap->TrData[j], datap->TrData[j+1], datap->TrData[j+2], datap->TrData[j+3]);
18934 +           trap->BadTransaction = 1;
18935 +           continue;
18936 +       }
18937 +       
18938 +       /* No more to do if it's a writeblock transaction */
18939 +       if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT)
18940 +           continue;
18941 +
18942 +       
18943 +       if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap &&
18944 +           (hdrp->s.TrTypeCntx.s.Type & TR_WAIT_FOR_EOP) != 0)
18945 +       {
18946 +           /*
18947 +            * This is a wait for eop transaction that has trapped because the inputer
18948 +            * then received a EopError. The next transaction saved should always be an
18949 +            * EopError.
18950 +            */
18951 +           PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: got a trapped WaitForEop transaction due to EopError\n");
18952 +           
18953 +           trap->WaitForEopTransaction = hdrp;
18954 +           trap->WaitForEopDataBuffer  = datap;
18955 +           continue;
18956 +       }
18957 +
18958 +       switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
18959 +       {
18960 +       case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:
18961 +           if (trap->UnlockQueuePointer)
18962 +           {
18963 +               ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
18964 +               return;
18965 +           }
18966 +           trap->UnlockQueuePointer = (E3_Addr) hdrp->s.TrAddr;
18967 +           break;
18968 +       }
18969 +    }
18970 +}
18971 +
18972 +void
18973 +ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp)
18974 +{
18975 +    ELAN3_DEV     *dev = ctxt->Device;
18976 +    int           res;
18977 +    unsigned long flags;
18978 +
18979 +    ASSERT (! CTXT_IS_KERNEL (ctxt));
18980 +
18981 +    BumpUserStat (ctxt, IProcTraps);
18982 +
18983 +    InspectIProcTrap (ctxt, trap);
18984 +
18985 +    /*
18986 +     * fixup page fault if we've trapped because of one.
18987 +     */
18988 +    if (trap->FaultSave.s.FaultContext != 0)
18989 +    {
18990 +       /*
18991 +        * If it's a WRITEBLOCK transaction, then see if we remember faulting
18992 +        * before it, and try and prefault in a sensible amount past it.
18993 +        */
18994 +       int                fixedFault = FALSE;
18995 +       INPUT_FAULT_SAVE  *entry;
18996 +       INPUT_FAULT_SAVE **predp;
18997 +       int                npages;
18998 +
18999 +       if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0 && /* a DMA packet */
19000 +           trap->LockQueuePointer == (E3_Addr) 0 &&                                    /* but not a queueing DMA */
19001 +           trap->TrappedTransaction->s.TrAddr != 0)                                    /* and not a DMA to 0 */
19002 +       {
19003 +           spin_lock (&ctxt->InputFaultLock);
19004 +           
19005 +           for (predp = &ctxt->InputFaultList; (entry = *predp)->Next != NULL ; predp = &entry->Next)
19006 +           {
19007 +               if (entry->Addr == trap->TrappedTransaction->s.TrAddr)
19008 +                   break;
19009 +           }
19010 +           
19011 +           *predp = entry->Next;
19012 +           entry->Next = ctxt->InputFaultList;
19013 +           ctxt->InputFaultList = entry;
19014 +           
19015 +           if (entry->Addr == trap->TrappedTransaction->s.TrAddr)
19016 +           {
19017 +               if ((entry->Count <<= 1) > MAX_INPUT_FAULT_PAGES)
19018 +                   entry->Count = MAX_INPUT_FAULT_PAGES;
19019 +           }
19020 +           else
19021 +           {
19022 +               entry->Count = MIN_INPUT_FAULT_PAGES;
19023 +           }
19024 +           
19025 +           entry->Addr = trap->TrappedTransaction->s.TrAddr + (entry->Count * PAGESIZE);
19026 +           npages = entry->Count;
19027 +           
19028 +           spin_unlock (&ctxt->InputFaultLock);
19029 +           
19030 +           if (elan3_pagefault (ctxt, &trap->FaultSave, npages) != ESUCCESS)
19031 +           {
19032 +               PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - failed\n", 
19033 +                        npages, trap->TrappedTransaction->s.TrAddr);
19034 +           }
19035 +           else
19036 +           {
19037 +               PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - succeeded\n", 
19038 +                        npages, trap->TrappedTransaction->s.TrAddr);
19039 +               
19040 +               fixedFault = TRUE;
19041 +           }
19042 +       }
19043 +
19044 +       /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */
19045 +       /* the packet will have been nacked */
19046 +       if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) &&      /* a DMA packet */
19047 +           trap->LockQueuePointer == 0 && trap->UnlockQueuePointer &&                  /* a queueing DMA */
19048 +           trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress)       /* and missed lockqueue */
19049 +       {
19050 +           fixedFault = TRUE;
19051 +       }
19052 +
19053 +       if (! fixedFault)
19054 +       {
19055 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
19056 +           {
19057 +               PRINTF1 (ctxt, DBG_IPROC, "ResolveIProcTrap: elan3_pagefault failed at %x\n", 
19058 +                        trap->FaultSave.s.FaultAddress);
19059 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, INPUT_PROC, trap, &trap->FaultSave, res);
19060 +               return;
19061 +           }
19062 +       }
19063 +    }
19064 +
19065 +    if (! trap->AckSent && trap->LockQueuePointer)                     /* Queued DMA */
19066 +    {                                                                  /* The ack was not sent, so the queue will be locked. */
19067 +       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE);      /* We must unlock it. */
19068 +    }
19069 +
19070 +    if (trap->AckSent && trap->BadTransaction)
19071 +    {
19072 +       if (trap->DmaIdentifyTransaction)
19073 +       {
19074 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Dma identify needs network resultion\n");
19075 +
19076 +           BumpStat (dev, DmaIdentifyNetworkErrors);
19077 +           BumpUserStat (ctxt, DmaIdentifyNetworkErrors);
19078 +
19079 +           if (trap->WaitForEopTransaction)
19080 +               PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n");
19081 +       }
19082 +       else if (trap->ThreadIdentifyTransaction)
19083 +       {
19084 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Thread identify needs network resolution\n");
19085 +
19086 +           BumpStat (dev, ThreadIdentifyNetworkErrors);
19087 +           BumpUserStat (ctxt, ThreadIdentifyNetworkErrors);
19088 +
19089 +           if (trap->WaitForEopTransaction)
19090 +               PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n");
19091 +       }
19092 +       else
19093 +       {
19094 +           BumpStat (dev, DmaNetworkErrors);
19095 +           BumpUserStat (ctxt, DmaNetworkErrors);
19096 +       }
19097 +    }
19098 +
19099 +    spin_lock_irqsave (&dev->IntrLock, flags);
19100 +    
19101 +    if (! trap->AckSent)
19102 +    {
19103 +       PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack not sent, lowering context filter\n");
19104 +
19105 +       trap->State = CTXT_STATE_OK;
19106 +    }
19107 +    else
19108 +    {
19109 +       if (trap->BadTransaction)
19110 +       {
19111 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on bad transaction\n");
19112 +           trap->State = CTXT_STATE_NETWORK_ERROR;
19113 +       }
19114 +       else
19115 +       {
19116 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on packet to be re-executed\n");
19117 +           trap->State = CTXT_STATE_NEEDS_RESTART;
19118 +       }
19119 +    }
19120 +
19121 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
19122 +
19123 +    if (trap->AckSent && trap->BadTransaction)
19124 +       ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, rvpp);
19125 +}
19126 +
19127 +int
19128 +RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap)
19129 +{
19130 +    PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: %d transactions\n", trap->NumTransactions);
19131 +
19132 +    if (trap->TrappedTransaction == NULL)                      /* No transaction trapped - probably a network */
19133 +       return (ESUCCESS);                                      /* error */
19134 +
19135 +    while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans)
19136 +    {
19137 +       E3_IprocTrapHeader_BE *hdrp = trap->TrappedTransaction;
19138 +       E3_IprocTrapData_BE   *datap = trap->TrappedDataBuffer;
19139 +       
19140 +       ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0);
19141 +
19142 +       PRINTF2 (ctxt, DBG_IPROC, "RestartIProc: TrType=0x%x Status=0x%x\n",
19143 +                hdrp->s.TrTypeCntx.TypeContext, hdrp->s.IProcTrapStatus.Status);
19144 +       
19145 +       if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0)
19146 +       {
19147 +           PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr);
19148 +           SimulateBlockWrite (ctxt, hdrp, datap);
19149 +       }
19150 +       else
19151 +       {
19152 +           switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
19153 +           {
19154 +           case TR_SETEVENT & TR_OPCODE_TYPE_MASK:
19155 +               PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: SETEVENT : %x\n", hdrp->s.TrAddr);
19156 +
19157 +               if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_InputDoTrap)
19158 +                   FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus), &trap->FaultSave, FALSE);
19159 +               else if (hdrp->s.TrAddr)
19160 +               {
19161 +                   if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), hdrp->s.TrAddr, FALSE) != ISSUE_COMMAND_OK)
19162 +                       return (EAGAIN);
19163 +               }
19164 +               break;
19165 +
19166 +           case TR_WRITEWORD & TR_OPCODE_TYPE_MASK:
19167 +               SimulateWriteWord (ctxt, hdrp, datap);
19168 +               break;
19169 +
19170 +           case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK:
19171 +               SimulateWriteDWord (ctxt, hdrp, datap);
19172 +               break;
19173 +               
19174 +           case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:
19175 +               if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap)
19176 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19177 +               else
19178 +               {
19179 +                   switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus))
19180 +                   {
19181 +                   case MI_WaitForUnLockDescRead:
19182 +                       /*
19183 +                        * Fault occured on the read of the queue descriptor - since the ack
19184 +                        * has been sent we need to move the queue on one slot.
19185 +                        */
19186 +                       PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TR_UNLOCKQUEUE : desc read fault\n");
19187 +
19188 +                       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE);
19189 +                       
19190 +                       if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent),
19191 +                                         hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK)
19192 +                       {
19193 +                           /* Failed to issue setevent to complete queue unlock, since we've already unlocked */
19194 +                           /* the queue, we should "convert" this transaction into a setevent transaction that */
19195 +                           /* hasn't trapped */
19196 +                           PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n");
19197 +
19198 +                           ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET);
19199 +                           return (EAGAIN);
19200 +                       }
19201 +                       break;
19202 +                       
19203 +                   case MI_DoSetEvent:
19204 +                       /*
19205 +                        * Fault occured on either the write to unlock the queue or during 
19206 +                        * processing of the event.  Test the fault address against the
19207 +                        * queue address to find out which - in this case, since the ack
19208 +                        * has been sent we need to move the queue on one slot.
19209 +                        */
19210 +                       if (trap->FaultSave.s.FaultAddress == trap->LockQueuePointer)
19211 +                       {
19212 +                           PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: fixed unlock queue write to unlock fault\n");
19213 +
19214 +                           SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE);
19215 +                           
19216 +                           if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent),
19217 +                                             hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK)
19218 +                           {
19219 +                               /* Failed to issue setevent to complete queue unlock, since we've already unlocked */
19220 +                               /* the queue, we should "convert" this transaction into a setevent transaction that */
19221 +                               /* hasn't trapped */
19222 +                               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n");
19223 +                               
19224 +                               ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET);
19225 +                               return (EFAIL);
19226 +                           }
19227 +                           break;
19228 +                       }
19229 +                       /*DROPTHROUGH*/
19230 +                       
19231 +                   default:
19232 +                       FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus),
19233 +                                       &trap->FaultSave, FALSE);
19234 +                       break;
19235 +                   }
19236 +                   trap->LockQueuePointer = trap->UnlockQueuePointer = 0;
19237 +               }
19238 +               break;
19239 +
19240 +           case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:
19241 +               /* Just ignore send-discard transactions */
19242 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: ignore SENDDISCARD\n");
19243 +               break;
19244 +
19245 +           case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:
19246 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: REMOTEDMA\n");         
19247 +
19248 +               /* modify the dma type since it will still be a "read" dma */
19249 +               ((E3_DMA_BE *) datap)->s.dma_type &= ~(DMA_TYPE_READ | E3_DMA_CONTEXT_MASK);
19250 +               ((E3_DMA_BE *) datap)->s.dma_type |= DMA_TYPE_ISREMOTE;
19251 +
19252 +               RestartDmaDesc (ctxt, (E3_DMA_BE *) datap);
19253 +               break;
19254 +
19255 +           case TR_TRACEROUTE & TR_OPCODE_TYPE_MASK:
19256 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TRACEROUTE\n");
19257 +               SimulateTraceRoute (ctxt, hdrp, datap);
19258 +               break;
19259 +
19260 +           default:
19261 +               ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19262 +               break;
19263 +           }
19264 +       }
19265 +
19266 +       /*
19267 +        * We've successfully processed this transaction, so move onto the 
19268 +        * next one.
19269 +        */
19270 +       trap->TrappedTransaction++;
19271 +       trap->TrappedDataBuffer++;
19272 +    }
19273 +    
19274 +    return (ESUCCESS);
19275 +}
19276 +
19277 +static void
19278 +ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr)
19279 +{
19280 +    hdrp->s.TrTypeCntx.s.Type           = TR_SETEVENT;
19281 +    hdrp->s.TrTypeCntx.s.StatusRegValid = 0;
19282 +    hdrp->s.TrAddr                      = Addr;
19283 +}
19284 +
19285 +void
19286 +SimulateBlockWrite (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19287 +{
19288 +    void     *saddr  = (void *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f));
19289 +    unsigned  nbytes = (hdrp->s.TrTypeCntx.s.Type) & TR_PARTSIZE_MASK;
19290 +    int       i;
19291 +
19292 +    if (nbytes == 0)
19293 +       nbytes = sizeof (E3_IprocTrapData_BE);
19294 +
19295 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19296 +    {
19297 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19298 +
19299 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateBlockWrite: faulted at %x\n", hdrp->s.TrAddr);
19300 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19301 +       return;
19302 +    }
19303 +
19304 +    /*
19305 +     * NOTE: since the block copy could be to sdram, we issue the writes backwards,
19306 +     *       except we MUST ensure that the last item in the block is written last.
19307 +     */
19308 +    switch (((hdrp->s.TrTypeCntx.s.Type) >> TR_TYPE_SHIFT) & TR_TYPE_MASK)
19309 +    {
19310 +    case TR_TYPE_BYTE:                                         /* 8 bit */
19311 +       for (i = nbytes - (2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8))
19312 +           ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]);
19313 +       i = nbytes - sizeof (E3_uint8);
19314 +       ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]);
19315 +       break;
19316 +       
19317 +    case TR_TYPE_SHORT:                                                /* 16 bit */
19318 +       for (i = nbytes - (2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16))
19319 +       ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]);
19320 +       i = nbytes - sizeof (E3_uint16);
19321 +       ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]);
19322 +       break;
19323 +       
19324 +    case TR_TYPE_WORD:                                         /* 32 bit */
19325 +       for (i = nbytes - (2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32))
19326 +           ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]);
19327 +       i = nbytes - sizeof (E3_uint32);
19328 +       ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]);
19329 +       break;
19330 +       
19331 +    case TR_TYPE_DWORD:                                                /* 64 bit  */
19332 +       for (i = nbytes - (2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
19333 +           ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]);
19334 +       i = nbytes - sizeof (E3_uint64);
19335 +       ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]);
19336 +       break;
19337 +    }
19338 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19339 +}
19340 +
19341 +void
19342 +SimulateWriteWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19343 +{
19344 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19345 +    {
19346 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19347 +
19348 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteWord: faulted at %x\n", hdrp->s.TrAddr);
19349 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19350 +       return;
19351 +    }
19352 +
19353 +    ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr, ((E3_uint32 *) datap)[WordEndianFlip]);
19354 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19355 +}
19356 +
19357 +void
19358 +SimulateWriteDWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19359 +{
19360 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19361 +    {
19362 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19363 +
19364 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteDWord: faulted at %x\n", hdrp->s.TrAddr);
19365 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19366 +       return;
19367 +    }
19368 +
19369 +    ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr, ((E3_uint64 *) datap)[0]);
19370 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19371 +}
19372 +
19373 +void
19374 +SimulateTraceRoute (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19375 +{
19376 +    E3_uint32 *saddr  = (E3_uint32 *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f));
19377 +    unsigned   nwords = TrSizeTable[(hdrp->s.TrTypeCntx.s.Type >> TR_SIZE_SHIFT) & TR_SIZE_MASK] / sizeof (E3_uint32);
19378 +    int        i;
19379 +
19380 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19381 +    {
19382 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19383 +
19384 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateTraceRoute: faulted at %x\n", hdrp->s.TrAddr);
19385 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19386 +       return;
19387 +    }
19388 +    
19389 +    for (i = nwords-2; i >= 0; i--)
19390 +       ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]);
19391 +
19392 +    i = nwords-1;
19393 +    ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]);
19394 +
19395 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19396 +}
19397 +
19398 +void
19399 +SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck)
19400 +{
19401 +    E3_uint32 QueueLock;
19402 +    E3_Addr   QueueBPTR;
19403 +    E3_Addr   QueueFPTR;
19404 +    E3_uint64 QueueStateAndBPTR;
19405 +
19406 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19407 +    {
19408 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19409 +
19410 +       PRINTF1 (ctxt, DBG_IPROC, "UnlockQueue: faulted with QueuePointer %x\n", QueuePointer);
19411 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, QueuePointer);
19412 +       return;
19413 +    }
19414 +    
19415 +    if (SentAck)
19416 +    {
19417 +       QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_bptr));
19418 +       QueueFPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_fptr));
19419 +
19420 +       if (QueueBPTR == ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_top)))     /* move on back pointer */
19421 +           QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_base));
19422 +       else
19423 +           QueueBPTR += ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_size));
19424 +       
19425 +       QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state));
19426 +
19427 +       if (QueueBPTR == QueueFPTR)                             /* and set full bit if fptr == bptr */
19428 +           QueueLock |= E3_QUEUE_FULL;
19429 +       
19430 +       QueueLock &= ~E3_QUEUE_LOCKED;
19431 +       
19432 +       QueueStateAndBPTR = (E3_uint64)QueueLock << 32 | QueueBPTR;
19433 +
19434 +       ELAN3_OP_STORE64 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueStateAndBPTR);
19435 +    }
19436 +    else
19437 +    {
19438 +       QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state));
19439 +
19440 +       QueueLock &= ~E3_QUEUE_LOCKED;
19441 +       
19442 +       ELAN3_OP_STORE32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueLock);
19443 +    }
19444 +
19445 +    no_fault();
19446 +}
19447 +
19448 +static void
19449 +BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp)
19450 +{
19451 +    if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)                 /* EOP */
19452 +    {
19453 +       switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
19454 +       {
19455 +       case EOP_BADACK:
19456 +           BumpStat (dev, EopBadAcks);
19457 +           break;
19458 +       case EOP_ERROR_RESET:
19459 +           BumpStat (dev, EopResets);
19460 +           break;
19461 +       }
19462 +    }
19463 +    else if (hdrp->s.TrTypeCntx.s.StatusRegValid)
19464 +    {
19465 +       /*
19466 +        * Errors are tested in order of badness. i.e. badlength will prevent a BadCrc and so on...
19467 +        */
19468 +       if (hdrp->s.IProcTrapStatus.s.BadLength)
19469 +           BumpStat (dev, InputterBadLength);
19470 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD)
19471 +           BumpStat (dev, InputterCRCBad);
19472 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR)
19473 +           BumpStat (dev, InputterCRCErrors);
19474 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_DISCARD)
19475 +           BumpStat (dev, InputterCRCDiscards);
19476 +    }
19477 +}
19478 +
19479 +char *
19480 +IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19481 +{
19482 +    static char buffer[256];
19483 +    static char typeString[256];
19484 +    static char statusString[256];
19485 +    char *ptr;
19486 +    E3_Addr     Addr        = hdrp->s.TrAddr;
19487 +    E3_uint32   Type        = hdrp->s.TrTypeCntx.s.Type;
19488 +    E3_uint32   Context     = hdrp->s.TrTypeCntx.s.Context;
19489 +    E3_uint32   StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid;
19490 +    
19491 +    if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)
19492 +    {
19493 +       switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
19494 +       {
19495 +       case EOP_GOOD:          sprintf (typeString, "EOP GOOD"); break;
19496 +       case EOP_BADACK:        sprintf (typeString, "EOP BADACK"); break;
19497 +       case EOP_ERROR_RESET:   sprintf (typeString, "EOP ERROR RESET"); break;
19498 +       default:                sprintf (typeString, "EOP - bad status"); break;
19499 +       }
19500 +       sprintf (buffer, "%15s Cntx=%08x", typeString, Context);
19501 +    }
19502 +    else
19503 +    {
19504 +       if (Type & TR_WRITEBLOCK_BIT)
19505 +       {
19506 +           switch ((Type >> TR_TYPE_SHIFT) & TR_TYPE_MASK)
19507 +           {
19508 +           case TR_TYPE_BYTE:  ptr = "Byte";    break;
19509 +           case TR_TYPE_SHORT: ptr = "Short";   break;
19510 +           case TR_TYPE_WORD:  ptr = "Word";    break;
19511 +           case TR_TYPE_DWORD: ptr = "Double";  break;
19512 +           default:            ptr = "Unknown"; break;
19513 +           }
19514 +           
19515 +           sprintf (typeString, "WriteBlock Type=%s Size=%2d", ptr, Type & TR_PARTSIZE_MASK);
19516 +       }
19517 +       else
19518 +       {
19519 +           switch (Type & TR_OPCODE_TYPE_MASK)
19520 +           {
19521 +           case TR_SETEVENT & TR_OPCODE_TYPE_MASK:             sprintf (typeString, "Setevent"); break;
19522 +           case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Remote DMA"); break;
19523 +           case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Lock Queue"); break;
19524 +           case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "Unlock Queue"); break;
19525 +           case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "Send Discard"); break;
19526 +           case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "DMA Identify"); break;
19527 +           case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK:       sprintf (typeString, "Thread Identify"); break;
19528 +           case TR_GTE & TR_OPCODE_TYPE_MASK:                  sprintf (typeString, "GTE"); break;
19529 +           case TR_LT & TR_OPCODE_TYPE_MASK:                   sprintf (typeString, "LT"); break;
19530 +           case TR_EQ & TR_OPCODE_TYPE_MASK:                   sprintf (typeString, "EQ"); break;
19531 +           case TR_NEQ & TR_OPCODE_TYPE_MASK:                  sprintf (typeString, "NEQ"); break;
19532 +           case TR_WRITEWORD & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Write Word"); break;
19533 +           case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK:      sprintf (typeString, "Write Double"); break;
19534 +           case TR_ATOMICADDWORD & TR_OPCODE_TYPE_MASK:        sprintf (typeString, "Atomic Add"); break;
19535 +           case TR_TESTANDWRITE & TR_OPCODE_TYPE_MASK:         sprintf (typeString, "Test and Write"); break;
19536 +           default:                                            sprintf (typeString, "Type=%d", Type & TR_OPCODE_TYPE_MASK); break;
19537 +           }
19538 +       }
19539 +       sprintf (buffer, "%15s Addr=%08x Cntx=%08x", typeString, Addr, Context);
19540 +       /*(Type & TR_SENDACK)      ? " Sendack" : "", */
19541 +       /*(Type & TR_LAST_TRANS)   ? " LastTrans" : "", */
19542 +       /*(Type & TR_WAIT_FOR_EOP) ? " WaitForEop" : ""); */
19543 +    }
19544 +    
19545 +    if (StatusValid)
19546 +    {
19547 +       sprintf (statusString, " Type=%s %x", MiToName (hdrp->s.IProcTrapStatus.s.TrapType), hdrp->s.IProcTrapStatus.Status);
19548 +       strcat (buffer, statusString);
19549 +
19550 +       if (hdrp->s.IProcTrapStatus.s.BadLength)
19551 +           strcat (buffer, " BadLength");
19552 +       switch (hdrp->s.IProcTrapStatus.Status & CRC_MASK)
19553 +       {
19554 +       case CRC_STATUS_DISCARD:
19555 +           strcat (buffer, " CRC Discard");
19556 +           break;
19557 +       case CRC_STATUS_ERROR:
19558 +           strcat (buffer, " CRC Error");
19559 +           break;
19560 +
19561 +       case CRC_STATUS_BAD:
19562 +           strcat (buffer, " CRC Bad");
19563 +           break;
19564 +       }
19565 +    }
19566 +
19567 +    return (buffer);
19568 +}
19569 +
19570 +
19571 +/*
19572 + * Local variables:
19573 + * c-file-style: "stroustrup"
19574 + * End:
19575 + */
19576 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/Makefile
19577 ===================================================================
19578 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/Makefile     2004-02-23 16:02:56.000000000 -0500
19579 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/Makefile  2005-07-28 14:52:52.808684816 -0400
19580 @@ -0,0 +1,15 @@
19581 +#
19582 +# Makefile for Quadrics QsNet
19583 +#
19584 +# Copyright (c) 2002-2004 Quadrics Ltd
19585 +#
19586 +# File: drivers/net/qsnet/elan3/Makefile
19587 +#
19588 +
19589 +
19590 +#
19591 +
19592 +obj-$(CONFIG_ELAN3)    += elan3.o
19593 +elan3-objs     := context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o
19594 +
19595 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
19596 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/Makefile.conf
19597 ===================================================================
19598 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/Makefile.conf        2004-02-23 16:02:56.000000000 -0500
19599 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/Makefile.conf     2005-07-28 14:52:52.808684816 -0400
19600 @@ -0,0 +1,10 @@
19601 +# Flags for generating QsNet Linux Kernel Makefiles
19602 +MODNAME                =       elan3.o
19603 +MODULENAME     =       elan3
19604 +KOBJFILES      =       context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o
19605 +EXPORT_KOBJS   =       elandev_linux.o procfs_linux.o
19606 +CONFIG_NAME    =       CONFIG_ELAN3
19607 +SGALFC         =       
19608 +# EXTRALINES START
19609 +
19610 +# EXTRALINES END
19611 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/minames.c
19612 ===================================================================
19613 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/minames.c    2004-02-23 16:02:56.000000000 -0500
19614 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/minames.c 2005-07-28 14:52:52.809684664 -0400
19615 @@ -0,0 +1,38 @@
19616 +/*
19617 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
19618 + *
19619 + *    For licensing information please see the supplied COPYING file
19620 + *
19621 + */
19622 +
19623 +#ident "@(#)$Id: minames.c,v 1.12 2003/06/07 15:57:49 david Exp $"
19624 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/minames.c,v $*/
19625 +
19626 +#include <qsnet/kernel.h>
19627 +#include <elan3/urom_addrs.h>
19628 +
19629 +caddr_t
19630 +MiToName (int mi)
19631 +{
19632 +    static char space[32];
19633 +    static struct {
19634 +       int   mi;
19635 +       char *name;
19636 +    } info[] = {
19637 +#include <elan3/minames.h>
19638 +    };
19639 +    register int i;
19640 +
19641 +
19642 +    for (i = 0; i < sizeof(info)/sizeof(info[0]); i++)
19643 +       if (info[i].mi == mi)
19644 +           return (info[i].name);
19645 +    sprintf (space, "MI %x", mi);
19646 +    return (space);
19647 +}
19648 +
19649 +/*
19650 + * Local variables:
19651 + * c-file-style: "stroustrup"
19652 + * End:
19653 + */
19654 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/network_error.c
19655 ===================================================================
19656 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/network_error.c      2004-02-23 16:02:56.000000000 -0500
19657 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/network_error.c   2005-07-28 14:52:52.810684512 -0400
19658 @@ -0,0 +1,777 @@
19659 +/*
19660 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
19661 + *
19662 + *    For licensing information please see the supplied COPYING file
19663 + *
19664 + */
19665 +
19666 +#ident "@(#)$Id: network_error.c,v 1.32.2.1 2004/10/28 11:54:57 david Exp $"
19667 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/network_error.c,v $*/
19668 +
19669 +#include <qsnet/kernel.h>
19670 +#include <qsnet/kthread.h>
19671 +
19672 +#include <elan3/elanregs.h>
19673 +#include <elan3/elandev.h>
19674 +#include <elan3/elanvp.h>
19675 +#include <elan3/elan3mmu.h>
19676 +#include <elan3/elanctxt.h>
19677 +#include <elan3/elan3mmu.h>
19678 +#include <elan3/elandebug.h>
19679 +
19680 +#ifdef DIGITAL_UNIX
19681 +#include <sys/cred.h>
19682 +#include <sys/mbuf.h>
19683 +#include <sys/utsname.h>
19684 +#include <net/if.h>
19685 +#include <netinet/in.h>
19686 +#include <netinet/in_var.h>
19687 +
19688 +#include <rpc/types.h>
19689 +#include <rpc/auth.h>
19690 +#include <rpc/xdr.h>
19691 +#include <rpc/clnt.h>
19692 +
19693 +typedef xdrproc_t kxdrproc_t;
19694 +#endif
19695 +
19696 +#ifdef LINUX
19697 +#include <linux/sunrpc/types.h>
19698 +#include <linux/sunrpc/auth.h>
19699 +#include <linux/sunrpc/xdr.h>
19700 +#include <linux/sunrpc/clnt.h>
19701 +
19702 +#include <linux/utsname.h>
19703 +#define SYS_NMLN       __NEW_UTS_LEN
19704 +#endif
19705 +
19706 +#include <elan3/neterr_rpc.h>
19707 +
19708 +spinlock_t       ResolveRequestLock;
19709 +kcondvar_t       ResolveRequestWait;
19710 +
19711 +NETERR_RESOLVER  *ResolveRequestHead;
19712 +NETERR_RESOLVER **ResolveRequestTailp = &ResolveRequestHead;
19713 +int              ResolveRequestCount;
19714 +int              ResolveRequestThreads;
19715 +int              ResolveRequestMaxThreads = 4;
19716 +int              ResolveRequestTimeout = 60;
19717 +
19718 +typedef struct neterr_server
19719 +{
19720 +    struct neterr_server *Next;
19721 +    struct neterr_server *Prev;
19722 +    unsigned             ElanId;
19723 +
19724 +    char                *Name;
19725 +    int                          RefCount;
19726 +    struct sockaddr_in    Addr;
19727 +} NETERR_SERVER;
19728 +
19729 +#define NETERR_HASH_ENTRIES    64
19730 +#define NETERR_HASH(elanid)    (((unsigned) elanid) % NETERR_HASH_ENTRIES)
19731 +NETERR_SERVER *NeterrServerHash[NETERR_HASH_ENTRIES];
19732 +kmutex_t       NeterrServerLock;
19733 +
19734 +static NETERR_SERVER *FindNeterrServer (int elanId);
19735 +static void           DereferenceNeterrServer (NETERR_SERVER *server);
19736 +static int            CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg);
19737 +
19738 +void
19739 +InitialiseNetworkErrorResolver ()
19740 +{
19741 +    spin_lock_init (&ResolveRequestLock);
19742 +    kcondvar_init (&ResolveRequestWait);
19743 +    
19744 +    ResolveRequestHead  = NULL;
19745 +    ResolveRequestTailp = &ResolveRequestHead;
19746 +
19747 +    kmutex_init (&NeterrServerLock);
19748 +}
19749 +
19750 +void
19751 +FinaliseNetworkErrorResolver ()
19752 +{
19753 +    spin_lock_destroy (&ResolveRequestLock);
19754 +    kcondvar_destroy (&ResolveRequestWait);
19755 +    
19756 +    kmutex_destroy (&NeterrServerLock);
19757 +}
19758 +
19759 +static NETERR_RESOLVER *
19760 +AllocateNetworkErrorResolver (void)
19761 +{
19762 +    NETERR_RESOLVER *rvp;
19763 +
19764 +    KMEM_ZALLOC (rvp, NETERR_RESOLVER *, sizeof (NETERR_RESOLVER), TRUE);
19765 +    spin_lock_init (&rvp->Lock);
19766 +
19767 +    return (rvp);
19768 +}
19769 +
19770 +void
19771 +FreeNetworkErrorResolver (NETERR_RESOLVER *rvp)
19772 +{
19773 +    spin_lock_destroy (&rvp->Lock);
19774 +    KMEM_FREE (rvp, sizeof (NETERR_RESOLVER));
19775 +}
19776 +
19777 +static void
19778 +elan3_neterr_resolver (void)
19779 +{
19780 +    NETERR_RESOLVER *rvp;
19781 +    NETERR_SERVER   *server;
19782 +    int                     status;
19783 +    unsigned long    flags;
19784 +
19785 +    kernel_thread_init("elan3_neterr_resolver");
19786 +    spin_lock (&ResolveRequestLock);
19787 +
19788 +    while ((rvp = ResolveRequestHead) != NULL)
19789 +    {
19790 +       if ((ResolveRequestHead = rvp->Next) == NULL)
19791 +           ResolveRequestTailp = &ResolveRequestHead;
19792 +       
19793 +       spin_unlock (&ResolveRequestLock);
19794 +
19795 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: rvp = %p\n", rvp);
19796 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      Rail          %d\n", rvp->Message.Rail);
19797 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      SrcCapability %s\n", CapabilityString (&rvp->Message.SrcCapability));
19798 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      DstCapability %s\n", CapabilityString (&rvp->Message.DstCapability));
19799 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieAddr    %08x\n", rvp->Message.CookieAddr);
19800 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieVProc   %08x\n", rvp->Message.CookieVProc);
19801 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      NextCookie    %08x\n", rvp->Message.NextCookie);
19802 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      WaitForEop    %08x\n", rvp->Message.WaitForEop);
19803 +       
19804 +       if ((server = FindNeterrServer (rvp->Location.loc_node)) == NULL)
19805 +           status = ECONNREFUSED;
19806 +       else if (ResolveRequestTimeout && ((int)(lbolt - rvp->Timestamp)) > (ResolveRequestTimeout*HZ))
19807 +       {
19808 +           printk ("elan_neterr: rpc to '%s' timedout - context %d killed\n", server->Name, rvp->Message.SrcCapability.cap_mycontext);
19809 +           status = ECONNABORTED;
19810 +       }
19811 +       else
19812 +       {
19813 +           status = CallNeterrServer (server, &rvp->Message);
19814 +
19815 +           DereferenceNeterrServer (server);
19816 +       }
19817 +       
19818 +       if ((status == EINTR || status == ETIMEDOUT) && rvp->Ctxt != NULL)
19819 +       {
19820 +           PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: retry rvp=%p\n", rvp);
19821 +           spin_lock (&ResolveRequestLock);
19822 +           rvp->Next = NULL;
19823 +           *ResolveRequestTailp = rvp;
19824 +           ResolveRequestTailp = &rvp->Next;
19825 +       }
19826 +       else
19827 +       {
19828 +           rvp->Status = status;
19829 +           
19830 +           spin_lock (&rvp->Lock);
19831 +           
19832 +           if (rvp->Ctxt != NULL)
19833 +           {
19834 +               PRINTF2 (rvp->Ctxt, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for ctxt %p\n", rvp, rvp->Ctxt);
19835 +               spin_lock_irqsave (&rvp->Ctxt->Device->IntrLock, flags);
19836 +               
19837 +               rvp->Completed = TRUE;
19838 +               
19839 +               kcondvar_wakeupall (&rvp->Ctxt->Wait, &rvp->Ctxt->Device->IntrLock);
19840 +               
19841 +               /*
19842 +                * drop the locks out of order since the rvp can get freeed
19843 +                * as soon as we drop the IntrLock - so cannot reference the
19844 +                * rvp after this.
19845 +                */
19846 +               
19847 +               spin_unlock (&rvp->Lock);
19848 +               spin_unlock_irqrestore (&rvp->Ctxt->Device->IntrLock, flags);
19849 +           }
19850 +           else
19851 +           {
19852 +               PRINTF2 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for deceased ctxt %p\n", rvp, rvp->Ctxt);
19853 +               spin_unlock (&rvp->Lock);
19854 +               FreeNetworkErrorResolver (rvp);
19855 +           }
19856 +           
19857 +           spin_lock (&ResolveRequestLock);
19858 +           ResolveRequestCount--;
19859 +       }
19860 +    }
19861 +
19862 +    ResolveRequestThreads--;
19863 +
19864 +    spin_unlock (&ResolveRequestLock);
19865 +    kernel_thread_exit();
19866 +}
19867 +
19868 +int
19869 +QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp)
19870 +{
19871 +    int                           isdma   = trap->DmaIdentifyTransaction != NULL;
19872 +    E3_IprocTrapHeader_BE *hdrp    = isdma ? trap->DmaIdentifyTransaction : trap->ThreadIdentifyTransaction;
19873 +    E3_uint32              process = isdma ? (hdrp->s.TrAddr & 0xFFFF) : (hdrp->s.TrData0 & 0xFFFF);
19874 +    NETERR_RESOLVER       *rvp;
19875 +
19876 +    PRINTF2 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: process = %d %s\n", process, isdma ? "(dma)" : "(thread)");
19877 +
19878 +    if ((rvp = AllocateNetworkErrorResolver()) == NULL)
19879 +    {
19880 +       PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot allocate resolver\n");
19881 +       return (ENOMEM);
19882 +    }
19883 +
19884 +    rvp->Message.Rail = ctxt->Device->Devinfo.dev_rail;
19885 +
19886 +    krwlock_read (&ctxt->VpLock);
19887 +    rvp->Location = ProcessToLocation (ctxt, NULL, process, &rvp->Message.SrcCapability);
19888 +    krwlock_done (&ctxt->VpLock);
19889 +
19890 +    if (rvp->Location.loc_node == ELAN3_INVALID_NODE)
19891 +    {
19892 +       PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: invalid elan id\n");
19893 +
19894 +       FreeNetworkErrorResolver (rvp);
19895 +       return (EINVAL);
19896 +    }
19897 +
19898 +    rvp->Message.DstCapability = ctxt->Capability;
19899 +    rvp->Message.DstProcess    = elan3_process (ctxt);
19900 +    rvp->Message.WaitForEop    = (trap->WaitForEopTransaction != NULL);
19901 +
19902 +    if (isdma)
19903 +    {
19904 +       rvp->Message.CookieAddr  = 0;
19905 +       rvp->Message.CookieVProc = hdrp->s.TrAddr;
19906 +       rvp->Message.NextCookie  = 0;
19907 +    }
19908 +    else
19909 +    {
19910 +       rvp->Message.CookieAddr  = hdrp->s.TrAddr;
19911 +       rvp->Message.CookieVProc = hdrp->s.TrData0;
19912 +       rvp->Message.NextCookie  = hdrp->s.TrData1;
19913 +    }
19914 +
19915 +    rvp->Completed = FALSE;
19916 +    rvp->Ctxt      = ctxt;
19917 +    rvp->Timestamp = lbolt;
19918 +
19919 +    spin_lock (&ResolveRequestLock);
19920 +
19921 +    rvp->Next = NULL;
19922 +    *ResolveRequestTailp = rvp;
19923 +    ResolveRequestTailp = &rvp->Next;
19924 +    ResolveRequestCount++;
19925 +
19926 +    kcondvar_wakeupone (&ResolveRequestWait, &ResolveRequestLock);
19927 +
19928 +    if (ResolveRequestCount < ResolveRequestThreads || ResolveRequestThreads >= ResolveRequestMaxThreads)
19929 +       spin_unlock (&ResolveRequestLock);
19930 +    else
19931 +    {
19932 +       ResolveRequestThreads++;
19933 +
19934 +       spin_unlock (&ResolveRequestLock);
19935 +       if (kernel_thread_create (elan3_neterr_resolver, NULL) == NULL)
19936 +       {
19937 +           spin_lock (&ResolveRequestLock);
19938 +           ResolveRequestThreads--;
19939 +           spin_unlock (&ResolveRequestLock);
19940 +           
19941 +           if (ResolveRequestThreads == 0)
19942 +           {
19943 +               PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot thread pool\n");
19944 +
19945 +               FreeNetworkErrorResolver (rvp);
19946 +               return (ENOMEM);
19947 +           }
19948 +       }
19949 +    }
19950 +
19951 +    *rvpp = rvp;
19952 +    return (ESUCCESS);
19953 +}
19954 +
19955 +void
19956 +CancelNetworkErrorResolver (NETERR_RESOLVER *rvp)
19957 +{
19958 +    spin_lock (&rvp->Lock);
19959 +
19960 +    PRINTF2 (rvp->Ctxt, DBG_NETERR, "CancelNetworkErrorResolver: rvp=%p %s\n", rvp, rvp->Completed ? "Completed" : "Pending");
19961 +
19962 +    if (rvp->Completed)
19963 +    {
19964 +       spin_unlock (&rvp->Lock);
19965 +       FreeNetworkErrorResolver (rvp);
19966 +    }
19967 +    else
19968 +    {
19969 +       rvp->Ctxt = NULL;
19970 +       spin_unlock (&rvp->Lock);
19971 +    }
19972 +}
19973 +
19974 +static NETERR_FIXUP *
19975 +AllocateNetworkErrorFixup (void)
19976 +{
19977 +    NETERR_FIXUP *nef;
19978 +
19979 +    KMEM_ZALLOC (nef, NETERR_FIXUP *, sizeof (NETERR_FIXUP), TRUE);
19980 +
19981 +    if (nef == (NETERR_FIXUP *) NULL)
19982 +       return (NULL);
19983 +
19984 +    kcondvar_init (&nef->Wait);
19985 +
19986 +    return (nef);
19987 +}
19988 +
19989 +static void
19990 +FreeNetworkErrorFixup (NETERR_FIXUP *nef)
19991 +{
19992 +    kcondvar_destroy (&nef->Wait);
19993 +    KMEM_FREE (nef, sizeof (NETERR_FIXUP));
19994 +}
19995 +
19996 +int
19997 +ExecuteNetworkErrorFixup (NETERR_MSG *msg)
19998 +{
19999 +    ELAN3_DEV      *dev;
20000 +    ELAN3_CTXT   *ctxt;
20001 +    NETERR_FIXUP  *nef;
20002 +    NETERR_FIXUP **predp;
20003 +    int                   rc;
20004 +    unsigned long  flags;
20005 +
20006 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "ExecuteNetworkErrorFixup: msg = %p\n", msg);
20007 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      Rail          %d\n", msg->Rail);
20008 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      SrcCapability %s\n", CapabilityString (&msg->SrcCapability));
20009 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      DstCapability %s\n", CapabilityString (&msg->DstCapability));
20010 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieAddr    %08x\n", msg->CookieAddr);
20011 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieVProc   %08x\n", msg->CookieVProc);
20012 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      NextCookie    %08x\n", msg->NextCookie);
20013 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      WaitForEop    %08x\n", msg->WaitForEop);
20014 +       
20015 +    if ((dev = elan3_device (msg->Rail)) == NULL)
20016 +       return (ESRCH);
20017 +
20018 +    if ((nef = AllocateNetworkErrorFixup()) == NULL)
20019 +       return (ENOMEM);
20020 +
20021 +    if (nef == (NETERR_FIXUP *) NULL)
20022 +       return (ENOMEM);
20023 +    
20024 +    bcopy (msg, &nef->Message, sizeof (NETERR_MSG));
20025 +
20026 +    spin_lock_irqsave (&dev->IntrLock, flags);
20027 +    
20028 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, msg->SrcCapability.cap_mycontext);
20029 +
20030 +    if (ctxt == NULL)
20031 +       rc = ESRCH;
20032 +    else if (!ELAN_CAP_MATCH (&msg->SrcCapability, &ctxt->Capability))
20033 +       rc = EPERM;
20034 +    else
20035 +    {  
20036 +       if (ctxt->Status & CTXT_NO_LWPS)
20037 +           rc = EAGAIN;
20038 +       else
20039 +       {
20040 +           for (predp = &ctxt->NetworkErrorFixups; *predp != NULL; predp = &(*predp)->Next)
20041 +               ;
20042 +           nef->Next = NULL;
20043 +           *predp = nef;
20044 +           
20045 +           kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
20046 +
20047 +           while (! nef->Completed)
20048 +               kcondvar_wait (&nef->Wait, &dev->IntrLock, &flags);
20049 +
20050 +           rc = nef->Status;
20051 +       }
20052 +    }
20053 +    
20054 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
20055 +
20056 +    FreeNetworkErrorFixup (nef);
20057 +
20058 +    return (rc);
20059 +}
20060 +
20061 +void
20062 +CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status)
20063 +{
20064 +    ELAN3_DEV *dev = ctxt->Device;
20065 +    unsigned long flags;
20066 +
20067 +    PRINTF2 (ctxt, DBG_NETERR, "CompleteNetworkErrorFixup: %p %d\n", nef, status);
20068 +
20069 +    spin_lock_irqsave (&dev->IntrLock, flags);
20070 +
20071 +    nef->Status = status;
20072 +    nef->Completed = TRUE;
20073 +    kcondvar_wakeupone (&nef->Wait, &dev->IntrLock);
20074 +
20075 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
20076 +}
20077 +
20078 +
20079 +static NETERR_SERVER *
20080 +NewNeterrServer (int elanId, struct sockaddr_in *addr, char *name)
20081 +{
20082 +    NETERR_SERVER *server;
20083 +
20084 +    KMEM_ZALLOC (server, NETERR_SERVER *, sizeof (NETERR_SERVER), TRUE);
20085 +    KMEM_ALLOC  (server->Name, char *, strlen (name)+1, TRUE);
20086 +
20087 +    bcopy (addr, &server->Addr, sizeof (struct sockaddr_in));
20088 +    bcopy (name, server->Name, strlen (name)+1);
20089 +
20090 +    server->ElanId   = elanId;
20091 +    server->RefCount = 1;
20092 +    
20093 +    return (server);
20094 +}
20095 +
20096 +static void
20097 +DeleteNeterrServer (NETERR_SERVER *server)
20098 +{
20099 +    KMEM_FREE (server->Name, strlen(server->Name)+1);
20100 +    KMEM_FREE (server, sizeof (NETERR_SERVER));
20101 +}
20102 +
20103 +static NETERR_SERVER *
20104 +FindNeterrServer (int elanId)
20105 +{
20106 +    NETERR_SERVER *server;
20107 +    
20108 +    kmutex_lock (&NeterrServerLock);
20109 +    
20110 +    for (server = NeterrServerHash[NETERR_HASH(elanId)]; server != NULL; server = server->Next)
20111 +       if (server->ElanId == elanId)
20112 +           break;
20113 +
20114 +    if (server != NULL)
20115 +       server->RefCount++;
20116 +    kmutex_unlock (&NeterrServerLock);
20117 +
20118 +    return (server);
20119 +}
20120 +
20121 +static void
20122 +DereferenceNeterrServer (NETERR_SERVER *server)
20123 +{
20124 +    kmutex_lock (&NeterrServerLock);
20125 +    if ((--server->RefCount) == 0)
20126 +       DeleteNeterrServer (server);
20127 +    kmutex_unlock  (&NeterrServerLock);
20128 +}
20129 +
20130 +int
20131 +AddNeterrServer (int elanId, struct sockaddr_in *addr, char *name)
20132 +{
20133 +    NETERR_SERVER *server;
20134 +    NETERR_SERVER *old;
20135 +    int            hashval = NETERR_HASH(elanId);
20136 +
20137 +    server = NewNeterrServer (elanId, addr, name);
20138 +    
20139 +    if (server == NULL)
20140 +       return (ENOMEM);
20141 +    
20142 +    kmutex_lock (&NeterrServerLock);
20143 +    for (old = NeterrServerHash[hashval]; old != NULL; old = old->Next)
20144 +       if (old->ElanId == elanId)
20145 +           break;
20146 +    
20147 +    /* remove "old" server from hash table */
20148 +    if (old != NULL)
20149 +    {
20150 +       if (old->Prev)
20151 +           old->Prev->Next = old->Next;
20152 +       else
20153 +           NeterrServerHash[hashval] = old->Next;
20154 +       if (old->Next)
20155 +           old->Next->Prev = old->Prev;
20156 +    }
20157 +
20158 +    /* insert "new" server into hash table */
20159 +    if ((server->Next = NeterrServerHash[hashval]) != NULL)
20160 +       server->Next->Prev = server;
20161 +    server->Prev = NULL;
20162 +    NeterrServerHash[hashval] = server;
20163 +
20164 +    kmutex_unlock (&NeterrServerLock);
20165 +
20166 +    if (old != NULL)
20167 +       DereferenceNeterrServer (old);
20168 +    
20169 +    return (ESUCCESS);
20170 +}
20171 +
20172 +int
20173 +AddNeterrServerSyscall (int elanId, void *addrp, void *namep, char *unused)
20174 +{
20175 +    struct sockaddr_in addr;
20176 +    char              *name;
20177 +    int                error;
20178 +    int                nob;
20179 +
20180 +    /* Sanity check the supplied elanId argument */
20181 +    if (elanId < 0)
20182 +       return ( set_errno(EINVAL) );
20183 +
20184 +    KMEM_ALLOC (name, caddr_t, SYS_NMLN, TRUE);
20185 +    
20186 +    if (copyin ((caddr_t) addrp, (caddr_t) &addr, sizeof (addr)) ||
20187 +       copyinstr ((caddr_t) namep, name, SYS_NMLN, &nob))
20188 +    {
20189 +       error = EFAULT;
20190 +    }
20191 +    else
20192 +    {
20193 +       PRINTF2 (DBG_DEVICE, DBG_NETERR, "AddNeterrServer: '%s' at elanid %d\n", name, elanId);
20194 +
20195 +       error = AddNeterrServer (elanId, &addr, name);
20196 +    }
20197 +    KMEM_FREE (name, SYS_NMLN);
20198 +
20199 +    return (error ? set_errno(error) : ESUCCESS);
20200 +}
20201 +
20202 +
20203 +#if defined(DIGITAL_UNIX)
20204 +static int
20205 +CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg)
20206 +{
20207 +    cred_t        *cr = crget();
20208 +    struct rpc_err  rpcerr;
20209 +    extern cred_t  *kcred;
20210 +    struct timeval  wait;
20211 +    enum clnt_stat  rc;
20212 +    int                    status;
20213 +    CLIENT         *clnt;
20214 +    int             error;
20215 +
20216 +    PRINTF4 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) - family=%d port=%d addr=%08x\n", server->Name,
20217 +            server->Addr.sin_family, server->Addr.sin_port, server->Addr.sin_addr.s_addr);
20218 +
20219 +    if ((clnt = clntkudp_create (&server->Addr, (struct sockaddr_in *)0, NETERR_PROGRAM, NETERR_VERSION, 1, cr)) == NULL)
20220 +    {
20221 +       PRINTF1 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): clntkudp_create error\n", server->Name);
20222 +
20223 +       return (ENOMEM);
20224 +    }
20225 +    
20226 +    wait.tv_sec  = NETERR_RPC_TIMEOUT;
20227 +    wait.tv_usec = 0;
20228 +    
20229 +    PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL timeout = %d\n", server->Name, NETERR_RPC_TIMEOUT);
20230 +   
20231 +    rc = CLNT_CALL(clnt, NETERR_FIXUP_RPC, xdr_neterr_msg, (void *)msg, xdr_int, (void *) &status, wait);
20232 +
20233 +    PRINTF3 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL -> %d (%s)\n", server->Name, rc, clnt_sperrno(rc));;
20234 +
20235 +    switch (rc)
20236 +    {
20237 +    case RPC_SUCCESS:
20238 +       break;
20239 +
20240 +    case RPC_INTR:
20241 +       status = EINTR;
20242 +       break;
20243 +
20244 +    case RPC_TIMEDOUT:
20245 +       status = ETIMEDOUT;
20246 +       break;
20247 +
20248 +    default:
20249 +       printf ("CallNeterrServer(%s): %s\n", server->Name, clnt_sperrno(status));
20250 +       status = ENOENT;
20251 +       break;
20252 +    }
20253 +
20254 +    CLNT_DESTROY(clnt);
20255 +
20256 +    crfree(cr);
20257 +    
20258 +    ASSERT(rc == RPC_SUCCESS || status != 0);
20259 +
20260 +    PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): status=%d\n", server->Name, status);
20261 +
20262 +    return (status);
20263 +}
20264 +#endif
20265 +
20266 +#if defined(LINUX)
20267 +
20268 +#define xdrsize(type) ((sizeof(type) + 3) >> 2)
20269 +
20270 +static int
20271 +xdr_error(struct rpc_rqst *req, u32 *p, void *dummy)
20272 +{
20273 +    return -EIO;
20274 +}
20275 +
20276 +static int
20277 +xdr_decode_int(struct rpc_rqst *req, u32 *p, int *res)
20278 +{ 
20279 +    *res = ntohl(*p++);
20280 +    return 0;
20281 +}
20282 +
20283 +#define XDR_capability_sz ((12 + BT_BITOUL(ELAN3_MAX_VPS)) * sizeof (u32))
20284 +
20285 +static int
20286 +xdr_encode_capability(u32 *p, ELAN_CAPABILITY *cap)
20287 +{
20288 +    u32 *pp = p;
20289 +
20290 +    /* basic xdr unit is u32 - for opaque types we must round up to that */
20291 +    memcpy(p, &cap->cap_userkey, sizeof(cap->cap_userkey));
20292 +    p += xdrsize(cap->cap_userkey);
20293 +
20294 +    *p++ = htonl(cap->cap_version);
20295 +    ((u16 *) (p++))[1] = htons(cap->cap_type);
20296 +    *p++ = htonl(cap->cap_lowcontext);
20297 +    *p++ = htonl(cap->cap_highcontext);
20298 +    *p++ = htonl(cap->cap_mycontext);
20299 +    *p++ = htonl(cap->cap_lownode);
20300 +    *p++ = htonl(cap->cap_highnode);
20301 +    *p++ = htonl(cap->cap_railmask);
20302 +
20303 +    memcpy(p, &cap->cap_bitmap[0], sizeof(cap->cap_bitmap));
20304 +    p += xdrsize(cap->cap_bitmap);
20305 +
20306 +    ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_capability_sz);
20307 +
20308 +    return (p - pp);
20309 +}
20310 +
20311 +
20312 +#define XDR_neterr_sz  (((1 + 5) * sizeof (u32)) + (2*XDR_capability_sz))
20313 +
20314 +static int
20315 +xdr_encode_neterr_msg(struct rpc_rqst *req, u32 *p, NETERR_MSG *msg)
20316 +{
20317 +    u32 *pp = p;
20318 +
20319 +    *p++ = htonl(msg->Rail);
20320 +
20321 +    p += xdr_encode_capability(p, &msg->SrcCapability);
20322 +    p += xdr_encode_capability(p, &msg->DstCapability);
20323 +
20324 +    *p++ = htonl(msg->DstProcess);
20325 +    *p++ = htonl(msg->CookieAddr);
20326 +    *p++ = htonl(msg->CookieVProc);
20327 +    *p++ = htonl(msg->NextCookie);
20328 +    *p++ = htonl(msg->WaitForEop);
20329 +
20330 +    ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_neterr_sz);
20331 +
20332 +    req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
20333 +
20334 +    return 0;
20335 +}
20336 +
20337 +static struct rpc_procinfo neterr_procedures[2] = 
20338 +{
20339 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
20340 +#      define RPC_ID_NULL      "neterr_null"
20341 +#      define RPC_ID_FIXUP_RPC "neterr_fixup_rpc"
20342 +#else
20343 +#      define RPC_ID_NULL      NETERR_NULL_RPC
20344 +#      define RPC_ID_FIXUP_RPC NETERR_FIXUP_RPC
20345 +#endif
20346 +    {  
20347 +       RPC_ID_NULL,                    /* procedure name or number*/
20348 +       (kxdrproc_t) xdr_error,         /* xdr encode fun */
20349 +        (kxdrproc_t) xdr_error,        /* xdr decode fun */
20350 +       0,                              /* req buffer size */
20351 +       0,                              /* call count */
20352 +    },
20353 +    {  
20354 +       RPC_ID_FIXUP_RPC,
20355 +        (kxdrproc_t) xdr_encode_neterr_msg,
20356 +        (kxdrproc_t) xdr_decode_int,
20357 +       XDR_neterr_sz,
20358 +       0,                      
20359 +    },
20360 +};
20361 +
20362 +static struct rpc_version neterr_version1 = 
20363 +{
20364 +    1,                         /* version */
20365 +    2,                         /* number of procedures */
20366 +    neterr_procedures  /* procedures */
20367 +};
20368 +
20369 +static struct rpc_version *neterr_version[] = 
20370 +{
20371 +    NULL,
20372 +    &neterr_version1,
20373 +};
20374 +
20375 +static struct rpc_stat neterr_stats;
20376 +
20377 +static struct rpc_program neterr_program = 
20378 +{
20379 +    NETERR_SERVICE,
20380 +    NETERR_PROGRAM,
20381 +    sizeof(neterr_version)/sizeof(neterr_version[0]),
20382 +    neterr_version,
20383 +    &neterr_stats,
20384 +};
20385 +
20386 +static int
20387 +CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg)
20388 +{
20389 +    struct rpc_xprt   *xprt;
20390 +    struct rpc_clnt   *clnt;
20391 +    struct rpc_timeout to;
20392 +    int                rc, status;
20393 +    
20394 +    PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s)\n", server->Name);
20395 +
20396 +    xprt_set_timeout(&to, 1, NETERR_RPC_TIMEOUT * HZ);
20397 +
20398 +    if ((xprt = xprt_create_proto(IPPROTO_UDP, &server->Addr, &to)) == NULL)
20399 +    {
20400 +       PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) xprt_create_proto failed\n", server->Name);
20401 +       return EFAIL;
20402 +    }
20403 +
20404 +    if ((clnt = rpc_create_client(xprt, server->Name, &neterr_program, NETERR_VERSION, RPC_AUTH_NULL)) == NULL)
20405 +    {
20406 +       PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) rpc_create_client failed\n", server->Name);
20407 +       xprt_destroy (xprt);
20408 +       
20409 +       return EFAIL;
20410 +    }
20411 +
20412 +    clnt->cl_softrtry = 1;
20413 +    clnt->cl_chatty   = 0;
20414 +    clnt->cl_oneshot  = 1;
20415 +    clnt->cl_intr     = 0;
20416 +
20417 +    if ((rc = rpc_call(clnt, NETERR_FIXUP_RPC, msg, &status, 0)) < 0)
20418 +    {
20419 +       /* RPC error has occured - determine whether we should retry */
20420 +
20421 +       status = ETIMEDOUT;
20422 +    }
20423 +
20424 +    PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): -> %d\n", server->Name, status);
20425 +
20426 +    return (status);
20427 +}
20428 +
20429 +#endif /* defined(LINUX) */
20430 +
20431 +/*
20432 + * Local variables:
20433 + * c-file-style: "stroustrup"
20434 + * End:
20435 + */
20436 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/procfs_linux.c
20437 ===================================================================
20438 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/procfs_linux.c       2004-02-23 16:02:56.000000000 -0500
20439 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/procfs_linux.c    2005-07-28 14:52:52.810684512 -0400
20440 @@ -0,0 +1,195 @@
20441 +/*
20442 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20443 + *
20444 + *    For licensing information please see the supplied COPYING file
20445 + *
20446 + */
20447 +
20448 +#ident "@(#)$Id: procfs_linux.c,v 1.21 2003/09/24 13:57:25 david Exp $"
20449 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/procfs_linux.c,v $*/
20450 +
20451 +#include <qsnet/kernel.h>
20452 +
20453 +#include <elan3/elanregs.h>
20454 +#include <elan3/elandev.h>
20455 +#include <elan3/elandebug.h>
20456 +#include <elan3/elan3mmu.h>
20457 +#include <elan3/elanvp.h>
20458 +
20459 +#include <linux/module.h>
20460 +#include <linux/ctype.h>
20461 +
20462 +#include <qsnet/procfs_linux.h>
20463 +
20464 +struct proc_dir_entry *elan3_procfs_root;
20465 +struct proc_dir_entry *elan3_config_root;
20466 +
20467 +static int
20468 +proc_read_position (char *page, char **start, off_t off,
20469 +                   int count, int *eof, void *data)
20470 +{
20471 +    ELAN3_DEV *dev = (ELAN3_DEV *) data;
20472 +    int       len;
20473 +
20474 +    if (dev->Position.pos_mode == ELAN_POS_UNKNOWN)
20475 +       len = sprintf (page, "<unknown>\n");
20476 +    else
20477 +       len = sprintf (page, 
20478 +                      "NodeId                 %d\n"
20479 +                      "NumLevels              %d\n"
20480 +                      "NumNodes               %d\n",
20481 +                      dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes);
20482 +
20483 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
20484 +}
20485 +
20486 +static int
20487 +proc_write_position (struct file *file, const char *buf, unsigned long count, void *data)
20488 +{
20489 +    ELAN3_DEV *dev      = (ELAN3_DEV *) data;
20490 +    unsigned  nodeid   = ELAN3_INVALID_NODE;
20491 +    unsigned  numnodes = 0;
20492 +    char     *page, *p;
20493 +    int       res;
20494 +
20495 +    if (count == 0)
20496 +       return (0);
20497 +
20498 +    if (count >= PAGE_SIZE)
20499 +       return (-EINVAL);
20500 +
20501 +    if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
20502 +       return (-ENOMEM);
20503 +
20504 +    MOD_INC_USE_COUNT;
20505 +
20506 +    if (copy_from_user (page, buf, count))
20507 +       res = -EFAULT;
20508 +    else
20509 +    {
20510 +       page[count] = '\0';
20511 +       
20512 +       if (page[count-1] == '\n')
20513 +           page[count-1] = '\0';
20514 +
20515 +       if (! strcmp (page, "<unknown>"))
20516 +       {
20517 +           dev->Position.pos_mode      = ELAN_POS_UNKNOWN;
20518 +           dev->Position.pos_nodeid    = ELAN3_INVALID_NODE;
20519 +           dev->Position.pos_nodes     = 0;
20520 +           dev->Position.pos_levels    = 0;
20521 +       }
20522 +       else
20523 +       {
20524 +           for (p = page; *p; )
20525 +           {
20526 +               while (isspace (*p))
20527 +                   p++;
20528 +               
20529 +               if (! strncmp (p, "NodeId=", strlen("NodeId=")))
20530 +                   nodeid   = simple_strtoul (p + strlen ("NodeId="), NULL, 0);
20531 +               if (! strncmp (p, "NumNodes=", strlen ("NumNodes=")))
20532 +                   numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0);
20533 +               
20534 +               while (*p && !isspace(*p))
20535 +                   p++;
20536 +           }
20537 +
20538 +           if (ComputePosition (&dev->Position, nodeid, numnodes, dev->Devinfo.dev_num_down_links_value) != 0)
20539 +               printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->Instance, nodeid, numnodes);
20540 +           else
20541 +               printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->Instance, dev->Position.pos_nodeid,
20542 +                       dev->Position.pos_nodes, dev->Position.pos_levels);
20543 +       }
20544 +    }
20545 +
20546 +    MOD_DEC_USE_COUNT;
20547 +    free_page ((unsigned long) page);
20548 +
20549 +    return (count);
20550 +}
20551 +
20552 +
20553 +void
20554 +elan3_procfs_device_init (ELAN3_DEV *dev)
20555 +{
20556 +    struct proc_dir_entry *dir, *p;
20557 +    char name[NAME_MAX];
20558 +
20559 +    sprintf (name, "device%d", dev->Instance);
20560 +    dir = dev->Osdep.procdir = proc_mkdir (name, elan3_procfs_root);
20561 +
20562 +    if ((p = create_proc_entry ("position", 0, dir)) != NULL)
20563 +    {
20564 +       p->read_proc  = proc_read_position;
20565 +       p->write_proc = proc_write_position;
20566 +       p->data       = dev;
20567 +       p->owner      = THIS_MODULE;
20568 +    }
20569 +
20570 +}
20571 +
20572 +void
20573 +elan3_procfs_device_fini (ELAN3_DEV *dev)
20574 +{
20575 +    struct proc_dir_entry *dir = dev->Osdep.procdir;
20576 +    char name[NAME_MAX];
20577 +
20578 +    remove_proc_entry ("position", dir);
20579 +
20580 +    sprintf (name, "device%d", dev->Instance);
20581 +    remove_proc_entry (name, elan3_procfs_root);
20582 +}
20583 +
20584 +void
20585 +elan3_procfs_init()
20586 +{
20587 +    extern int eventint_punt_loops;
20588 +    extern int ResolveRequestTimeout;
20589 +
20590 +    elan3_procfs_root = proc_mkdir("elan3",  qsnet_procfs_root);
20591 +
20592 +    elan3_config_root = proc_mkdir("config", elan3_procfs_root);
20593 +
20594 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug",           &elan3_debug,           0);
20595 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug_console",   &elan3_debug_console,   0);
20596 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug_buffer",    &elan3_debug_buffer,    0);
20597 +    qsnet_proc_register_hex (elan3_config_root, "elan3mmu_debug",      &elan3mmu_debug,      0);
20598 +    qsnet_proc_register_int (elan3_config_root, "eventint_punt_loops", &eventint_punt_loops, 0);
20599 +    qsnet_proc_register_int (elan3_config_root, "neterr_timeout",      &ResolveRequestTimeout, 0);
20600 +
20601 +#if defined(__ia64__)
20602 +    {
20603 +       extern int enable_sdram_writecombining;
20604 +       qsnet_proc_register_int (elan3_config_root, "enable_sdram_writecombining", &enable_sdram_writecombining, 0);
20605 +    }
20606 +#endif
20607 +}
20608 +
20609 +void
20610 +elan3_procfs_fini()
20611 +{
20612 +#if defined(__ia64__)
20613 +    remove_proc_entry ("enable_sdram_writecombining", elan3_config_root);
20614 +#endif
20615 +    remove_proc_entry ("neterr_timeout",      elan3_config_root);
20616 +    remove_proc_entry ("eventint_punt_loops", elan3_config_root);
20617 +    remove_proc_entry ("elan3mmu_debug",      elan3_config_root);
20618 +    remove_proc_entry ("elan3_debug_buffer",    elan3_config_root);
20619 +    remove_proc_entry ("elan3_debug_console",   elan3_config_root);
20620 +    remove_proc_entry ("elan3_debug",           elan3_config_root);
20621 +
20622 +    remove_proc_entry ("config",  elan3_procfs_root);
20623 +    remove_proc_entry ("version", elan3_procfs_root);
20624
20625 +    remove_proc_entry ("elan3",  qsnet_procfs_root);
20626 +}
20627 +
20628 +EXPORT_SYMBOL(elan3_procfs_root);
20629 +EXPORT_SYMBOL(elan3_config_root);
20630 +
20631 +/*
20632 + * Local variables:
20633 + * c-file-style: "stroustrup"
20634 + * End:
20635 + */
20636 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/quadrics_version.h
20637 ===================================================================
20638 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/quadrics_version.h   2004-02-23 16:02:56.000000000 -0500
20639 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/quadrics_version.h        2005-07-28 14:52:52.811684360 -0400
20640 @@ -0,0 +1 @@
20641 +#define QUADRICS_VERSION "4.31qsnet"
20642 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/routecheck.c
20643 ===================================================================
20644 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/routecheck.c 2004-02-23 16:02:56.000000000 -0500
20645 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/routecheck.c      2005-07-28 14:52:52.811684360 -0400
20646 @@ -0,0 +1,313 @@
20647 +/*
20648 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20649 + *
20650 + *    For licensing information please see the supplied COPYING file
20651 + *
20652 + */
20653 +
20654 +/* ------------------------------------------------------------- */
20655 +
20656 +#include <qsnet/kernel.h>
20657 +
20658 +#include <elan3/elanregs.h>
20659 +#include <elan3/elandev.h>
20660 +#include <elan3/elanvp.h>
20661 +#include <elan3/elan3mmu.h>
20662 +#include <elan3/elanctxt.h>
20663 +#include <elan3/elandebug.h>
20664 +#include <elan3/urom_addrs.h>
20665 +#include <elan3/thread.h>
20666 +#include <elan3/vmseg.h>
20667 +
20668 +/* ---------------------------------------------------------------------- */
20669 +typedef struct elan3_net_location {
20670 +    int netid;
20671 +    int plane;
20672 +    int level;
20673 +} ELAN3_NET_LOCATION;
20674 +/* ---------------------------------------------------------------------- */
20675 +#define FLIT_LINK_ARRAY_MAX (ELAN3_MAX_LEVELS*2)
20676 +/* ---------------------------------------------------------------------- */
20677 +int 
20678 +elan3_route_follow_link( ELAN3_CTXT *ctxt, ELAN3_NET_LOCATION *loc, int link)
20679 +{
20680 +    ELAN_POSITION *pos = &ctxt->Position;
20681 +
20682 +    if ((link<0) || (link>7)) 
20683 +    {
20684 +       PRINTF1 (ctxt, DBG_VP, "elan3_route_follow_link: link (%d) out of range \n",link);
20685 +       return (ELAN3_ROUTE_INVALID);
20686 +    }   
20687 +
20688 +    /* going up or down ? */
20689 +    if ( link >= pos->pos_arity[loc->level] ) 
20690 +    {
20691 +       /* Up */
20692 +       if (loc->level >= pos->pos_levels)
20693 +           loc->plane = 0;
20694 +       else
20695 +       {
20696 +           if ((loc->level == 1) && (pos->pos_arity[0]  == 8)) /* oddness in some machines ie 512 */
20697 +               loc->plane = (16 * ( loc->plane / 8 )) + (4 * ( loc->plane % 4)) 
20698 +                   +(link - pos->pos_arity[loc->level]);
20699 +           else
20700 +               loc->plane = (loc->plane * (8 - pos->pos_arity[loc->level]))
20701 +                   +(link - pos->pos_arity[loc->level]);
20702 +       }
20703 +       loc->level--; 
20704 +       if ( loc->level < 0 )
20705 +       {
20706 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the top\n");
20707 +           return (ELAN3_ROUTE_INVALID_LEVEL);
20708 +       }
20709 +       loc->netid = loc->netid / pos->pos_arity[loc->level];
20710 +    }
20711 +    else
20712 +    {
20713 +       /* going down */
20714 +       if ((loc->level == 0) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */
20715 +           loc->netid = link % 2;
20716 +       else
20717 +           loc->netid =(loc->netid * pos->pos_arity[loc->level])+link;
20718 +
20719 +       loc->level++;
20720 +       if (loc->level > pos->pos_levels)
20721 +       {
20722 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the bottom\n");
20723 +           return (ELAN3_ROUTE_INVALID_LEVEL); 
20724 +       }
20725 +
20726 +       if ( loc->level >= (pos->pos_levels-1))
20727 +           loc->plane = 0;
20728 +       else
20729 +           if ((loc->level == 1) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */
20730 +               loc->plane = (((loc->plane)>>2)*2) - ( ((loc->plane)>>2) & 3  ) + ((link<2)?0:4); /* ((p/4) % 4) */
20731 +           else 
20732 +               loc->plane = loc->plane/(8-pos->pos_arity[loc->level]);
20733 +    }
20734 +    return (ELAN3_ROUTE_SUCCESS);
20735 +}
20736 +/* ---------------------------------------------------------------------- */
20737 +int /* assumes they are connected, really only used for finding the MyLink */
20738 +elan3_route_get_mylink (ELAN_POSITION *pos, ELAN3_NET_LOCATION *locA, ELAN3_NET_LOCATION *locB)
20739 +{
20740 +    /* whats the My Link for locA to LocB */
20741 +    if ( locA->level > locB->level ) 
20742 +       return locB->plane - (locA->plane * (8 - pos->pos_arity[locA->level])) + pos->pos_arity[locA->level];
20743 +    
20744 +    return  locB->netid - (locA->netid * pos->pos_arity[locA->level]);
20745 +}
20746 +/* ---------------------------------------------------------------------- */
20747 +#define FIRST_GET_HIGH_PRI(FLIT)    (FLIT & FIRST_HIGH_PRI)
20748 +#define FIRST_GET_AGE(FLIT)         ((FLIT & FIRST_AGE(15))>>11)
20749 +#define FIRST_GET_TIMEOUT(FLIT)     ((FLIT & FIRST_TIMEOUT(3))>>9)
20750 +#define FIRST_GET_NEXT(FLIT)        ((FLIT & FIRST_PACKED(3))>>7)
20751 +#define FIRST_GET_ROUTE(FLIT)       (FLIT & 0x7f)
20752 +#define FIRST_GET_BCAST(FLIT)       (FLIT & 0x40)
20753 +#define FIRST_GET_IS_INVALID(FLIT)  ((FLIT & 0x78) == 0x08)
20754 +#define FIRST_GET_TYPE(FLIT)        ((FLIT & 0x30)>>4)
20755 +#define PRF_GET_ROUTE(FLIT,N)       ((FLIT >> (N*4)) & 0x0F)
20756 +#define PRF_GET_IS_MYLINK(ROUTE)    (ROUTE == PACKED_MYLINK)
20757 +#define PRF_GET_IS_NORMAL(ROUTE)    (ROUTE & 0x8)
20758 +#define PRF_GET_NORMAL_LINK(ROUTE)  (ROUTE & 0x7)
20759 +#define PRF_MOVE_ON(INDEX,NEXT)     do { if (NEXT==3) {NEXT=0;INDEX++;} else {NEXT++; }} while (0);
20760 +/* ---------------------------------------------------------------------- */
20761 +int /* turn level needed or -1 if not possible */
20762 +elan3_route_get_min_turn_level( ELAN_POSITION *pos, int nodeId)
20763 +{
20764 +    int l,range = 1;
20765 +
20766 +    for(l=pos->pos_levels-1;l>=0;l--)
20767 +    {
20768 +       range = range * pos->pos_arity[l];
20769 +       
20770 +       if ( ((pos->pos_nodeid - (pos->pos_nodeid % range)) <= nodeId ) 
20771 +            && (nodeId <= (pos->pos_nodeid - (pos->pos_nodeid % range)+range -1))) 
20772 +           return l;
20773 +    }
20774 +    return -1;
20775 +}
20776 +/* ---------------------------------------------------------------------- */
20777 +int  
20778 +elan3_route_check(ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNodeId)
20779 +{
20780 +    ELAN3_NET_LOCATION lastLoc,currLoc;
20781 +    int               err;
20782 +    int               turnLevel;
20783 +    int               goingDown;
20784 +    int               lnk,index,next,val;
20785 +    ELAN_POSITION    *pos = &ctxt->Position;
20786 +   
20787 +    /* is the dest possible */
20788 +    if ( (destNodeId <0 ) || (destNodeId >= pos->pos_nodes))
20789 +       return  (ELAN3_ROUTE_PROC_RANGE);
20790 +
20791 +    /* 
20792 +     * walk the route, 
20793 +     * - to see if we get there 
20794 +     * - checking we dont turn around 
20795 +     */
20796 +    currLoc.netid = pos->pos_nodeid;         /* the elan */
20797 +    currLoc.plane = 0;
20798 +    currLoc.level = pos->pos_levels;
20799 +
20800 +    turnLevel = currLoc.level; /* track the how far the route goes in */
20801 +    goingDown = 0;             /* once set we cant go up again ie only one change of direction */
20802 +
20803 +    /* move onto the network from the elan */
20804 +    if ((err=elan3_route_follow_link(ctxt,&currLoc,4)) != ELAN3_ROUTE_SUCCESS) 
20805 +    {
20806 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: initial elan3_route_follow_link failed\n");
20807 +       return err;
20808 +    }
20809 +    /* do the first part of flit */
20810 +    switch ( FIRST_GET_TYPE(flits[0]) ) 
20811 +    {
20812 +    case 0  /* sent */   : { lnk = (flits[0] & 0x7);                                 break; }    
20813 +    case PACKED_MYLINK  : { lnk = pos->pos_nodeid % pos->pos_arity[pos->pos_levels-1];    break; }
20814 +    case PACKED_ADAPTIVE : { lnk = 7; /* all routes are the same just check one */    break; }
20815 +    default : 
20816 +       PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected first flit (%d)\n",flits[0]);
20817 +       return (ELAN3_ROUTE_INVALID); 
20818 +    }
20819 +    
20820 +    /* move along this link and check new location */
20821 +    memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */
20822 +    if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS ) 
20823 +    {
20824 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: elan3_route_follow_link failed\n");
20825 +       return err;
20826 +    }
20827 +    if ((currLoc.level > pos->pos_levels) || (currLoc.level < 0 )) 
20828 +    { 
20829 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n");
20830 +       return (ELAN3_ROUTE_INVALID_LEVEL);
20831 +    }
20832 +    if ( lastLoc.level < currLoc.level ) 
20833 +    {
20834 +       turnLevel = lastLoc.level;
20835 +       goingDown = 1;
20836 +    }
20837 +    else 
20838 +    {
20839 +       if (turnLevel > currLoc.level)
20840 +           turnLevel =  currLoc.level;
20841 +       if  (goingDown) 
20842 +       {
20843 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n");
20844 +           return (ELAN3_ROUTE_OCILATES);
20845 +       }
20846 +    }   
20847 +
20848 +    /* loop on doing the remaining flits */
20849 +    index = 1;
20850 +    next  = FIRST_GET_NEXT(flits[0]);
20851 +    val   = PRF_GET_ROUTE(flits[index],next);
20852 +    while(val)
20853 +    {
20854 +       if (PRF_GET_IS_NORMAL(val) ) 
20855 +           lnk = PRF_GET_NORMAL_LINK(val);
20856 +       else
20857 +       {
20858 +         switch ( val ) 
20859 +         {
20860 +         case  PACKED_MYLINK : 
20861 +         {
20862 +             lnk = elan3_route_get_mylink(pos, &currLoc,&lastLoc);
20863 +             break;
20864 +         }
20865 +         default : 
20866 +             PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected packed flit (%d)\n",val);
20867 +             return (ELAN3_ROUTE_INVALID);
20868 +         }
20869 +       }
20870 +
20871 +       /* move along this link and check new location */
20872 +       memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */
20873 +       if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS) 
20874 +           return err;
20875 +       
20876 +       if ((currLoc.level > pos->pos_levels ) || ( currLoc.level < 0 ))
20877 +       { 
20878 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n");
20879 +           return (ELAN3_ROUTE_INVALID_LEVEL);
20880 +       }
20881 +
20882 +       if ( lastLoc.level < currLoc.level ) 
20883 +           goingDown = 1;
20884 +       else 
20885 +       {
20886 +           if (turnLevel > currLoc.level)
20887 +               turnLevel =  currLoc.level;
20888 +           if  (goingDown) 
20889 +           {
20890 +               PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n");
20891 +               return (ELAN3_ROUTE_OCILATES);
20892 +           }
20893 +       }   
20894 +
20895 +       /* move to next part of flit */
20896 +       PRF_MOVE_ON(index,next);
20897 +       if ( index >= MAX_FLITS)
20898 +       {
20899 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route too long\n");
20900 +           return (ELAN3_ROUTE_TOO_LONG);
20901 +       }
20902 +       /* extract the new value */
20903 +       val = PRF_GET_ROUTE(flits[index],next);
20904 +    }
20905 +
20906 +    /* have we got to where we want ? */
20907 +    if ((currLoc.level != pos->pos_levels) || (currLoc.netid != destNodeId))
20908 +    {
20909 +       PRINTF2 (ctxt, DBG_VP, "elan3_route_check: goes to %d instead of %d\n",currLoc.netid , destNodeId );
20910 +       return (ELAN3_ROUTE_WRONG_DEST);
20911 +    }
20912 +
20913 +    /*
20914 +     * there is the case of src == dest 
20915 +     * getTurnLevel returns pos->pos_levels, and turnLevel is (pos->pos_levels -1) 
20916 +     * then we assume they really want to  go onto the network.
20917 +     * otherwise we check that the turn at the appriate level
20918 +     */
20919 +    if ( (pos->pos_nodeid != destNodeId) || ( turnLevel != (pos->pos_levels -1)) )
20920 +    {
20921 +       int lev;
20922 +       if ((lev = elan3_route_get_min_turn_level(pos,destNodeId)) == -1) 
20923 +       {
20924 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: cant calculate turn level\n");
20925 +           return (ELAN3_ROUTE_INVALID); /* not sure this can happen here as checks above should protect me */
20926 +       }
20927 +       if (turnLevel != lev) 
20928 +       {
20929 +           PRINTF2 (ctxt, DBG_VP, "elan3_route_check: turn level should be %d but is %d \n", lev, turnLevel);
20930 +           return (ELAN3_ROUTE_TURN_LEVEL);
20931 +       }
20932 +    }
20933 +    return (ELAN3_ROUTE_SUCCESS);
20934 +}
20935 +/* ---------------------------------------------------------------------- */
20936 +int
20937 +elan3_route_broadcast_check(ELAN3_CTXT *ctxt , E3_uint16 *flits, int lowNode, int highNode ) 
20938 +{
20939 +    E3_uint16 flitsTmp[MAX_FLITS];
20940 +    int       nflits,i;
20941 +    
20942 +    nflits = GenerateRoute (&ctxt->Position, flitsTmp, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
20943 +    
20944 +    for(i=0;i<nflits;i++)
20945 +       if ( flitsTmp[i] != flits[i] ) 
20946 +       {
20947 +           PRINTF3 (ctxt, DBG_VP, "elan3_route_broadcast_check:  flit[%d] %d (should be %d)\n",i,flits[i],flitsTmp[i]);
20948 +           return (ELAN3_ROUTE_INVALID);   
20949 +       }
20950 +    
20951 +    return (ELAN3_ROUTE_SUCCESS);
20952 +}
20953 +/* ---------------------------------------------------------------------- */
20954 +
20955 +/*
20956 + * Local variables:
20957 + * c-file-style: "stroustrup"
20958 + * End:
20959 + */
20960 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/route_table.c
20961 ===================================================================
20962 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/route_table.c        2004-02-23 16:02:56.000000000 -0500
20963 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/route_table.c     2005-07-28 14:52:52.812684208 -0400
20964 @@ -0,0 +1,560 @@
20965 +/*
20966 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20967 + *
20968 + *    For licensing information please see the supplied COPYING file
20969 + *
20970 + */
20971 +
20972 +#ident "$Id: route_table.c,v 1.23 2003/09/24 13:57:25 david Exp $"
20973 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/route_table.c,v $ */
20974 +
20975 +#include <qsnet/kernel.h>
20976 +
20977 +#include <elan3/elanregs.h>
20978 +#include <elan3/elandev.h>
20979 +#include <elan3/elanvp.h>
20980 +#include <elan3/elan3mmu.h>
20981 +#include <elan3/elanctxt.h>
20982 +#include <elan3/elandebug.h>
20983 +
20984 +static sdramaddr_t
20985 +AllocateLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int ctxnum, E3_uint64 *smallRoute)
20986 +{
20987 +    int                  bit = -1;
20988 +    ELAN3_ROUTES  *rent;
20989 +    unsigned long flags;
20990 +    
20991 +    spin_lock_irqsave (&tbl->Lock, flags);
20992 +    
20993 +    for (rent = tbl->LargeRoutes; rent; rent = rent->Next)
20994 +    {
20995 +       if ((bit = bt_freebit (rent->Bitmap, NROUTES_PER_BLOCK)) != -1)
20996 +           break;
20997 +    }
20998 +    
20999 +    if (bit == -1)                                             /* No spare entries in large routes */
21000 +    {                                                          /* so allocate a new page */
21001 +       PRINTF0 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: allocate route entries\n");
21002 +       
21003 +       spin_unlock_irqrestore (&tbl->Lock, flags);
21004 +
21005 +       KMEM_ZALLOC(rent, ELAN3_ROUTES *, sizeof (ELAN3_ROUTES), TRUE);
21006 +       
21007 +       if (rent == (ELAN3_ROUTES *) NULL)
21008 +           return ((sdramaddr_t) 0);
21009 +       
21010 +       rent->Routes = elan3_sdram_alloc (dev, PAGESIZE);
21011 +       if (rent->Routes == (sdramaddr_t) 0)
21012 +       {
21013 +           KMEM_FREE (rent, sizeof (ELAN3_ROUTES));
21014 +           return ((sdramaddr_t) 0);
21015 +       }
21016 +
21017 +       spin_lock_irqsave (&tbl->Lock, flags);
21018 +
21019 +       /* Add to list of large routes */
21020 +       rent->Next       = tbl->LargeRoutes;
21021 +       tbl->LargeRoutes = rent;
21022 +
21023 +       /* and use entry 0 */
21024 +       bit = 0;
21025 +    }
21026 +    
21027 +    /* Set the bit in the bitmap to mark this route as allocated */
21028 +    BT_SET (rent->Bitmap, bit);
21029 +    
21030 +    /* And generate the small route pointer and the pointer to the large routes */
21031 +    (*smallRoute) = BIG_ROUTE_PTR(rent->Routes + (bit*NBYTES_PER_LARGE_ROUTE), ctxnum);
21032 +
21033 +    PRINTF4 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: rent %p using entry %d at %lx with route pointer %llx\n",
21034 +            rent, bit, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), (long long) (*smallRoute));
21035 +
21036 +    /* Invalidate the large route */
21037 +    elan3_sdram_zeroq_sdram (dev, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), NBYTES_PER_LARGE_ROUTE);
21038 +
21039 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21040 +
21041 +    return (rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE));
21042 +}
21043 +
21044 +static void
21045 +FreeLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, E3_uint64 smallRoute)
21046 +{
21047 +    E3_Addr      addr = (E3_Addr) (smallRoute & ((1ULL << ROUTE_CTXT_SHIFT)-1));
21048 +    ELAN3_ROUTES *rent;
21049 +
21050 +    PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: free route %llx\n", (long long) smallRoute);
21051 +
21052 +    ASSERT (SPINLOCK_HELD (&tbl->Lock));
21053 +
21054 +    for (rent = tbl->LargeRoutes; rent; rent = rent->Next)
21055 +    {
21056 +       if (rent->Routes <= addr && (rent->Routes + ROUTE_BLOCK_SIZE) > addr)
21057 +       {
21058 +           int indx = (addr - rent->Routes)/NBYTES_PER_LARGE_ROUTE;
21059 +           
21060 +           PRINTF2 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: rent=%p indx=%d\n", rent, indx);
21061 +           
21062 +           BT_CLEAR(rent->Bitmap, indx);
21063 +           return;
21064 +       }
21065 +    }
21066 +
21067 +    panic ("elan: FreeLargeRoute - route not found in large route tables");
21068 +}
21069 +
21070 +static void
21071 +FreeLargeRoutes (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl)
21072 +{
21073 +    ELAN3_ROUTES *rent;
21074 +
21075 +    while ((rent = tbl->LargeRoutes) != NULL)
21076 +    {
21077 +       PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoutes: free rent %p\n", rent);
21078 +
21079 +       tbl->LargeRoutes = rent->Next;
21080 +
21081 +       elan3_sdram_free (dev, rent->Routes, PAGESIZE);
21082 +       
21083 +       KMEM_FREE (rent, sizeof(ELAN3_ROUTES));
21084 +    }
21085 +}
21086 +
21087 +int
21088 +GetRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits)
21089 +{
21090 +    E3_uint64  routeValue;
21091 +    sdramaddr_t largeRouteOff;
21092 +  
21093 +    if (process < 0 || process >= tbl->Size)
21094 +       return (EINVAL);
21095 +
21096 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21097 +
21098 +    if (routeValue & ROUTE_PTR)
21099 +    {
21100 +       largeRouteOff = (routeValue & ROUTE_PTR_MASK);
21101 +       
21102 +       routeValue = elan3_sdram_readq (dev, largeRouteOff + 0);
21103 +       flits[0] = routeValue & 0xffff;
21104 +       flits[1] = (routeValue >> 16)  & 0xffff;
21105 +       flits[2] = (routeValue >> 32)  & 0xffff;
21106 +       flits[3] = (routeValue >> 48)  & 0xffff;
21107 +       
21108 +       routeValue = elan3_sdram_readq (dev, largeRouteOff + 8);
21109 +       flits[4] = routeValue & 0xffff;
21110 +       flits[5] = (routeValue >> 16)  & 0xffff;
21111 +       flits[6] = (routeValue >> 32)  & 0xffff;
21112 +       flits[6] = (routeValue >> 48)  & 0xffff;
21113 +    }
21114 +    else
21115 +    {
21116 +       flits[0] = routeValue & 0xffff;
21117 +       flits[1] = (routeValue >> 16)  & 0xffff;
21118 +       flits[2] = (routeValue >> 32)  & 0xffff;
21119 +    }
21120 +
21121 +    return (ESUCCESS);
21122 +}
21123 +
21124 +ELAN3_ROUTE_TABLE *
21125 +AllocateRouteTable (ELAN3_DEV *dev, int size)
21126 +{
21127 +    ELAN3_ROUTE_TABLE *tbl;
21128 +
21129 +    KMEM_ZALLOC (tbl, ELAN3_ROUTE_TABLE *, sizeof (ELAN3_ROUTE_TABLE), TRUE);
21130 +
21131 +    if (tbl == (ELAN3_ROUTE_TABLE *) NULL)
21132 +       return (NULL);
21133 +    
21134 +    tbl->Size  = size;
21135 +    tbl->Table = elan3_sdram_alloc (dev, size*NBYTES_PER_SMALL_ROUTE);
21136 +
21137 +    if (tbl->Table == 0)
21138 +    {
21139 +       KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE));
21140 +       return (NULL);
21141 +    }
21142 +    spin_lock_init (&tbl->Lock);
21143 +
21144 +    /* zero the route table */
21145 +    elan3_sdram_zeroq_sdram (dev, tbl->Table, size*NBYTES_PER_SMALL_ROUTE);
21146 +
21147 +    return (tbl);
21148 +}
21149 +
21150 +void
21151 +FreeRouteTable (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl)
21152 +{
21153 +    elan3_sdram_free (dev, tbl->Table, tbl->Size*NBYTES_PER_SMALL_ROUTE);
21154 +
21155 +    FreeLargeRoutes (dev, tbl);
21156 +
21157 +    spin_lock_destroy (&tbl->Lock);
21158 +
21159 +    KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE));
21160 +}
21161 +
21162 +int
21163 +LoadRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, int ctxnum, int nflits, E3_uint16 *flits)
21164 +{
21165 +    E3_uint64    routeValue;
21166 +    E3_uint64    largeRouteValue;
21167 +    sdramaddr_t   largeRouteOff;
21168 +    unsigned long flags;
21169 +
21170 +    if (process < 0 || process >= tbl->Size)
21171 +       return (EINVAL);
21172 +
21173 +    PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: table %lx process %d ctxnum %x\n", tbl->Table ,process, ctxnum);
21174 +
21175 +    if (nflits < 4)
21176 +    {
21177 +       spin_lock_irqsave (&tbl->Lock, flags);
21178 +
21179 +       /* See if we're replacing a "large" route */
21180 +       routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21181 +       if (routeValue & ROUTE_PTR)
21182 +           FreeLargeRoute (dev, tbl, routeValue);
21183 +
21184 +       routeValue = SMALL_ROUTE(flits, ctxnum);
21185 +
21186 +       if ( routeValue &  ROUTE_PTR)
21187 +           PRINTF0 (DBG_DEVICE, DBG_VP, "SHOULD BE  A SMALL ROUTE !!!!!!!\n");
21188 +
21189 +       PRINTF2 (DBG_DEVICE, DBG_VP, "LoadRoute: loading small route %d  %llx\n", process, (long long) routeValue);
21190 +       elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, routeValue);
21191 +    }
21192 +    else
21193 +    {
21194 +       E3_uint64 value0 = BIG_ROUTE0(flits);
21195 +       E3_uint64 value1 = BIG_ROUTE1(flits);
21196 +
21197 +       if ((largeRouteOff = AllocateLargeRoute (dev, tbl, ctxnum, &largeRouteValue)) == (sdramaddr_t) 0)
21198 +           return (ENOMEM);
21199 +
21200 +       spin_lock_irqsave (&tbl->Lock, flags);
21201 +           
21202 +       routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21203 +
21204 +       if ((routeValue & ROUTE_PTR) == 0)
21205 +           elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, largeRouteValue);
21206 +       else
21207 +       {
21208 +           FreeLargeRoute (dev, tbl, largeRouteValue);
21209 +
21210 +           largeRouteOff   = (routeValue & ROUTE_PTR_MASK);
21211 +       }
21212 +
21213 +       PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: loading large route %d - %llx %llx\n", process, 
21214 +                (long long) value0, (long long) value1);
21215 +
21216 +       elan3_sdram_writeq (dev, largeRouteOff + 0, value0);
21217 +       elan3_sdram_writeq (dev, largeRouteOff + 8, value1);
21218 +    }
21219 +
21220 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21221 +    return (ESUCCESS);
21222 +}
21223 +void
21224 +InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
21225 +{
21226 +    E3_uint64 routeValue;
21227 +    unsigned long flags;
21228 +
21229 +    if (process < 0 || process >= tbl->Size)
21230 +       return;
21231 +
21232 +    spin_lock_irqsave (&tbl->Lock, flags);
21233 +
21234 +    /* unset ROUTE_VALID
21235 +     * does not matter if its short or long, will check when we re-use it
21236 +     */
21237 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21238 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue & (~ROUTE_VALID)));
21239 +
21240 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21241 +}
21242 +void
21243 +ValidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
21244 +{
21245 +    E3_uint64 routeValue;
21246 +    unsigned long flags;
21247 +
21248 +    if (process < 0 || process >= tbl->Size)
21249 +       return;
21250 +
21251 +    PRINTF2 (DBG_DEVICE, DBG_VP, "ValidateRoute: table %ld process %d  \n", tbl->Table ,process);
21252 +
21253 +    spin_lock_irqsave (&tbl->Lock, flags);
21254 +
21255 +    /* set ROUTE_VALID
21256 +     */
21257 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21258 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue | ROUTE_VALID));
21259 +
21260 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21261 +}
21262 +void
21263 +ClearRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
21264 +{
21265 +    E3_uint64 routeValue;
21266 +    unsigned long flags;
21267 +
21268 +    if (process < 0 || process >= tbl->Size)
21269 +       return;
21270 +
21271 +    spin_lock_irqsave (&tbl->Lock, flags);
21272 +
21273 +    PRINTF2 (DBG_DEVICE, DBG_VP, "ClearRoute: table %ld process %d  \n", tbl->Table ,process);
21274 +
21275 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21276 +
21277 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, 0);
21278 +
21279 +    if (routeValue & ROUTE_PTR)
21280 +       FreeLargeRoute (dev, tbl, routeValue);
21281 +
21282 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21283 +}
21284 +
21285 +static int
21286 +ElanIdEqual (ELAN_POSITION *pos, int level, int ida, int idb)
21287 +{
21288 +    int l;
21289 +
21290 +    for (l = pos->pos_levels-1; l >= level; l--)
21291 +    {
21292 +       ida /= pos->pos_arity[l];
21293 +       idb /= pos->pos_arity[l];
21294 +    }
21295 +       
21296 +    return (ida == idb);
21297 +}
21298 +
21299 +static int
21300 +RouteDown (ELAN_POSITION *pos, int level, int elanid)
21301 +{
21302 +    int l;
21303 +
21304 +    for (l = (pos->pos_levels - 1); level < pos->pos_levels - 1; level++, l--)
21305 +    {  
21306 +       if (  pos->pos_arity[l] )
21307 +           elanid /= pos->pos_arity[l];    
21308 +    }
21309 +    elanid %= pos->pos_arity[l];
21310 +
21311 +    return elanid;
21312 +}
21313 +
21314 +static int
21315 +InitPackedAndFlits (u_char *packed, E3_uint16 *flits)
21316 +{
21317 +    int rb = 0;
21318 +
21319 +    bzero ((caddr_t) packed, MAX_PACKED+4);
21320 +    bzero ((caddr_t) flits, MAX_FLITS * sizeof (E3_uint16));
21321 +
21322 +    /* Initialise 4 bytes of packed, so that the "padding" */
21323 +    /* NEVER terminates with 00, as this is recognised as */
21324 +    /* as CRC flit */
21325 +    packed[rb++] = 0xF;
21326 +    packed[rb++] = 0xF;
21327 +    packed[rb++] = 0xF;
21328 +    packed[rb++] = 0xF;
21329 +
21330 +    return (rb);
21331 +}
21332 +
21333 +static int
21334 +PackThemRoutesUp (E3_uint16 *flits, u_char *packed, int rb, int timeout, int highPri)
21335 +{
21336 +    int i, nflits;
21337 +
21338 +    flits[0] |= FIRST_TIMEOUT(timeout);
21339 +    if (highPri)
21340 +       flits[0] |= FIRST_HIGH_PRI;
21341 +
21342 +    /* round up the number of route bytes to flits */
21343 +    /* and subtract the 4 extra we've padded out with */
21344 +    nflits = (rb-1)/4;
21345 +    
21346 +    for (i = nflits; i > 0; i--)
21347 +    {
21348 +       flits[i] = (packed[rb-1] << 12 |
21349 +                   packed[rb-2] << 8  |
21350 +                   packed[rb-3] << 4  |
21351 +                   packed[rb-4] << 0);
21352 +       rb -= 4;
21353 +    }
21354 +    
21355 +    /* Now set the position of the first packed route  */
21356 +    /* byte in the 2nd 16 bit flit, taking account of the */
21357 +    /* 4 byte padding */
21358 +    flits[0] |= FIRST_PACKED (4-rb);
21359 +    
21360 +    return (nflits+1);
21361 +}
21362 +
21363 +int
21364 +GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri)
21365 +{
21366 +    int     broadcast  = (lowid != highid);
21367 +    int     rb         = 0;
21368 +    int            first      = 1;
21369 +    int     noRandom   = 0;
21370 +    int     level;
21371 +    u_char  packed[MAX_PACKED+4];
21372 +    int     numDownLinks;
21373 +
21374 +    rb = InitPackedAndFlits (packed, flits);
21375 +
21376 +    for (level = pos->pos_levels-1;                            /* Move up out of the elan */
21377 +        level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) &&
21378 +                        ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--)
21379 +    {
21380 +       noRandom |= pos->pos_random_disabled & (1 << (pos->pos_levels-1-level));
21381 +    }
21382 +    
21383 +    for (level = pos->pos_levels-1;                            /* Move up out of the elan */
21384 +        level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) &&
21385 +                        ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--)
21386 +    {
21387 +       numDownLinks = pos->pos_arity [level];
21388 +       if (first)
21389 +       {
21390 +           if (broadcast || noRandom)
21391 +               flits[0] = FIRST_BCAST_TREE;
21392 +           else
21393 +           {
21394 +               if (numDownLinks == 4) 
21395 +                   flits[0] = FIRST_ADAPTIVE;
21396 +               else
21397 +                   flits[0] = FIRST_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) ));
21398 +           }
21399 +           first = 0;
21400 +       }
21401 +       else
21402 +       {
21403 +           if (broadcast || noRandom)
21404 +               packed[rb++] = PACKED_BCAST_TREE;
21405 +           else 
21406 +           {
21407 +               if (numDownLinks == 4) 
21408 +                   packed[rb++] = PACKED_ADAPTIVE;
21409 +               else
21410 +                   packed[rb++] = PACKED_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) ));
21411 +           }               
21412 +       }
21413 +    }
21414 +    
21415 +    while (level < pos->pos_levels)
21416 +    {
21417 +       int lowRoute  = RouteDown (pos, level, lowid);
21418 +       int highRoute = RouteDown (pos, level, highid);
21419 +
21420 +       if (first)
21421 +       {
21422 +           if (broadcast)
21423 +               flits[0] = FIRST_BCAST(highRoute, lowRoute);
21424 +           else
21425 +               flits[0] = FIRST_ROUTE(lowRoute);
21426 +
21427 +           first = 0;
21428 +       }
21429 +       else
21430 +       {
21431 +           if (broadcast)
21432 +           {
21433 +               packed[rb++] = PACKED_BCAST0(highRoute, lowRoute);
21434 +               packed[rb++] = PACKED_BCAST1(highRoute, lowRoute);
21435 +           }
21436 +           else
21437 +               packed[rb++] = PACKED_ROUTE(lowRoute);
21438 +       }
21439 +       
21440 +       level++;
21441 +    }
21442 +
21443 +#ifdef ELITE_REVA_SUPPORTED
21444 +    if (broadcast && (pos->pos_levels == 3))
21445 +    {
21446 +      packed[rb++] = PACKED_BCAST0(0, 0);
21447 +      packed[rb++] = PACKED_BCAST1(0, 0);
21448 +    }
21449 +#endif
21450 +
21451 +    return (PackThemRoutesUp (flits, packed, rb, timeout, highPri));
21452 +}
21453 +
21454 +int
21455 +GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive)
21456 +{
21457 +    int     notfirst = 0;
21458 +    int     l, rb;
21459 +    u_char  packed[MAX_PACKED+4];
21460 +
21461 +    rb = InitPackedAndFlits (packed, flits);
21462 +
21463 +    for (l = pos->pos_levels-1; l > level; l--)
21464 +       if (! notfirst++)
21465 +           flits[0] = adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE;
21466 +       else
21467 +           packed[rb++] = adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE;
21468 +
21469 +    if (! notfirst++ ) 
21470 +       flits[0] = FIRST_MYLINK;
21471 +    else
21472 +       packed[rb++] = PACKED_MYLINK;
21473 +
21474 +    for (l++ /* consume mylink */; l < pos->pos_levels; l++)
21475 +       if (! notfirst++)
21476 +           flits[0] = FIRST_ROUTE (RouteDown (pos, l, pos->pos_nodeid));
21477 +       else
21478 +           packed[rb++] = PACKED_ROUTE (RouteDown (pos, l, pos->pos_nodeid));
21479 +
21480 +
21481 +    return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY));
21482 +}
21483 +
21484 +
21485 +/*
21486 + * In this case "level" is the number of levels counted from the bottom.
21487 + */
21488 +int
21489 +GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive )
21490 +{
21491 +    int            first = 1;
21492 +    int     i, rb;
21493 +    u_char  packed[MAX_PACKED+4];
21494 +
21495 +    rb = InitPackedAndFlits (packed, flits);
21496 +
21497 +    /* Generate "up" routes */
21498 +    for (i = 0; i < level; i++)
21499 +    {
21500 +       if (first)
21501 +           flits[0] = linkup ? FIRST_ROUTE(linkup[i]) : adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE;
21502 +       else
21503 +           packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE;
21504 +       first = 0;
21505 +    }
21506 +
21507 +    /* Generate a "to-me" route down */
21508 +    if (first)
21509 +       flits[0] = FIRST_MYLINK;
21510 +    else
21511 +       packed[rb++] = PACKED_MYLINK;
21512 +
21513 +    for (i = level-1; i >= 0; i--)
21514 +       packed[rb++] =  PACKED_ROUTE(linkdown[i]);
21515 +
21516 +    return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY));
21517 +}
21518 +
21519 +
21520 +/*
21521 + * Local variables:
21522 + * c-file-style: "stroustrup"
21523 + * End:
21524 + */
21525 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/sdram.c
21526 ===================================================================
21527 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/sdram.c      2004-02-23 16:02:56.000000000 -0500
21528 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/sdram.c   2005-07-28 14:52:52.814683904 -0400
21529 @@ -0,0 +1,807 @@
21530 +/*
21531 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
21532 + * 
21533 + *    For licensing information please see the supplied COPYING file
21534 + *
21535 + */
21536 +
21537 +#ident "@(#)$Id: sdram.c,v 1.17 2003/09/24 13:57:25 david Exp $"
21538 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/sdram.c,v $*/
21539 +
21540 +
21541 +#include <qsnet/kernel.h>
21542 +
21543 +#include <elan3/elanregs.h>
21544 +#include <elan3/elandev.h>
21545 +#include <elan3/elandebug.h>
21546 +
21547 +/* sdram access functions */
21548 +#define sdram_off_to_bank(dev,off)     (&dev->SdramBanks[(off) >> ELAN3_SDRAM_BANK_SHIFT])
21549 +#define sdram_off_to_offset(dev,off)   ((off) & (ELAN3_SDRAM_BANK_SIZE-1))
21550 +#define sdram_off_to_bit(dev,indx,off) (sdram_off_to_offset(dev,off) >> (SDRAM_MIN_BLOCK_SHIFT+(indx)))
21551 +
21552 +#define sdram_off_to_mapping(dev,off)  (sdram_off_to_bank(dev,off)->Mapping + sdram_off_to_offset(dev,off))
21553 +    
21554 +unsigned char
21555 +elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t off)
21556 +{
21557 +    return (readb ((unsigned char *) sdram_off_to_mapping(dev, off)));
21558 +}
21559 +
21560 +unsigned short
21561 +elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t off)
21562 +{
21563 +    return (readw ((unsigned short *) sdram_off_to_mapping(dev, off)));
21564 +}
21565 +
21566 +unsigned int
21567 +elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t off)
21568 +{
21569 +    return (readl ((unsigned int *) sdram_off_to_mapping(dev, off)));
21570 +}
21571 +
21572 +unsigned long long
21573 +elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t off)
21574 +{
21575 +    return (readq ((unsigned long long *) sdram_off_to_mapping(dev, off)));
21576 +}
21577 +
21578 +void
21579 +elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t off, unsigned char val)
21580 +{
21581 +    writeb (val, (unsigned char *) sdram_off_to_mapping(dev, off));
21582 +    wmb();
21583 +}
21584 +
21585 +void
21586 +elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t off, unsigned short val)
21587 +{
21588 +    writew (val, (unsigned short *) sdram_off_to_mapping(dev, off));
21589 +    wmb();
21590 +}
21591 +
21592 +void
21593 +elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t off, unsigned int val)
21594 +{
21595 +    writel (val, (unsigned int *) sdram_off_to_mapping(dev, off));
21596 +    wmb();
21597 +}
21598 +
21599 +void
21600 +elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t off, unsigned long long val)
21601 +{
21602 +    writeq (val, (unsigned long long *) sdram_off_to_mapping(dev, off));
21603 +    wmb();
21604 +}
21605 +
21606 +void
21607 +elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21608 +{
21609 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21610 +}
21611 +
21612 +void
21613 +elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21614 +{
21615 +#ifdef __LITTLE_ENDIAN__
21616 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21617 +#else
21618 +#error incorrect for big endian
21619 +#endif
21620 +}
21621 +
21622 +void
21623 +elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21624 +{
21625 +#ifdef __LITTLE_ENDIAN__
21626 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21627 +#else
21628 +#error incorrect for big endian
21629 +#endif
21630 +}
21631 +
21632 +void
21633 +elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
21634 +{
21635 +#ifdef __LITTLE_ENDIAN__
21636 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
21637 +#else
21638 +#error incorrect for big endian
21639 +#endif
21640 +}
21641 +
21642 +#define E3_WRITEBUFFER_SIZE            16
21643 +#define E3_WRITEBUFFER_OFFSET(x)       (((unsigned long) x) & (E3_WRITEBUFFER_SIZE-1))
21644 +#define E3_WRITEBUFFER_BASE(x)         (((unsigned long) x) & ~((unsigned long) (E3_WRITEBUFFER_SIZE-1)))
21645 +
21646 +void
21647 +elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21648 +{
21649 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21650 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21651 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21652 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21653 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t);
21654 +    int        i;
21655 +
21656 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21657 +    {
21658 +       for (i = 0; i < nbytes/sizeof(uint8_t); i++)
21659 +           writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]);
21660 +       wmb();
21661 +    }
21662 +    else
21663 +    {
21664 +       if (ntop < E3_WRITEBUFFER_SIZE)
21665 +       {
21666 +           slim -= ntop;
21667 +           dlim -= ntop;
21668 +           
21669 +           for (i = 0; i < ntop/sizeof(uint8_t); i++)
21670 +               writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]);
21671 +           wmb();
21672 +       }
21673 +       
21674 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21675 +       {
21676 +           dlim -= E3_WRITEBUFFER_SIZE;
21677 +           slim -= E3_WRITEBUFFER_SIZE;
21678 +
21679 +           for (i = 0; i < E3_WRITEBUFFER_SIZE/sizeof (uint8_t); i++)
21680 +               writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]);
21681 +           wmb();
21682 +       }
21683 +       
21684 +       if (nbase < E3_WRITEBUFFER_SIZE)
21685 +       {
21686 +           for (i = 0; i < nbase/sizeof(uint8_t); i++)
21687 +               writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]);
21688 +           wmb();
21689 +       }
21690 +    }
21691 +}
21692 +
21693 +void
21694 +elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21695 +{
21696 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21697 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21698 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21699 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t);
21700 +    int        i;
21701 +
21702 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21703 +    {
21704 +       for (i = 0; i < nbytes/sizeof(uint8_t); i++)
21705 +           writeb (0, &((uint8_t *) dbase)[i]);
21706 +       wmb();
21707 +    }
21708 +    else
21709 +    {
21710 +       if (ntop < E3_WRITEBUFFER_SIZE)
21711 +       {
21712 +           dlim -= ntop;
21713 +           
21714 +           for (i = 0; i < ntop/sizeof(uint8_t); i++)
21715 +               writeb (0, &((uint8_t *) dlim)[i]);
21716 +           wmb();
21717 +       }
21718 +       
21719 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21720 +       {
21721 +           dlim -= E3_WRITEBUFFER_SIZE;
21722 +
21723 +           writeq (0, &((uint64_t *) dlim)[0]);
21724 +           writeq (0, &((uint64_t *) dlim)[1]);
21725 +
21726 +           wmb();
21727 +       }
21728 +       
21729 +       if (nbase < E3_WRITEBUFFER_SIZE)
21730 +       {
21731 +           for (i = 0; i < nbase/sizeof(uint8_t); i++)
21732 +               writeb (0, &((uint8_t *) dbase)[i]);
21733 +           wmb();
21734 +       }
21735 +    }
21736 +}
21737 +
21738 +void
21739 +elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21740 +{
21741 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21742 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21743 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21744 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21745 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t);
21746 +    int        i;
21747 +
21748 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21749 +    {
21750 +       for (i = 0; i < nbytes/sizeof(uint16_t); i++)
21751 +           writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]);
21752 +       wmb();
21753 +    }
21754 +    else
21755 +    {
21756 +       if (ntop < E3_WRITEBUFFER_SIZE)
21757 +       {
21758 +           slim -= ntop;
21759 +           dlim -= ntop;
21760 +
21761 +           for (i = 0; i < ntop/sizeof(uint16_t); i++)
21762 +               writew (((uint16_t *) slim)[i], &((uint16_t *) dlim)[i]);
21763 +           wmb();
21764 +       }
21765 +       
21766 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21767 +       {
21768 +           dlim -= E3_WRITEBUFFER_SIZE;
21769 +           slim -= E3_WRITEBUFFER_SIZE;
21770 +
21771 +           writew (((uint16_t *) slim)[0], &((uint16_t *) dlim)[0]);
21772 +           writew (((uint16_t *) slim)[1], &((uint16_t *) dlim)[1]);
21773 +           writew (((uint16_t *) slim)[2], &((uint16_t *) dlim)[2]);
21774 +           writew (((uint16_t *) slim)[3], &((uint16_t *) dlim)[3]);
21775 +           writew (((uint16_t *) slim)[4], &((uint16_t *) dlim)[4]);
21776 +           writew (((uint16_t *) slim)[5], &((uint16_t *) dlim)[5]);
21777 +           writew (((uint16_t *) slim)[6], &((uint16_t *) dlim)[6]);
21778 +           writew (((uint16_t *) slim)[7], &((uint16_t *) dlim)[7]);
21779 +           wmb();
21780 +       }
21781 +       
21782 +       if (nbase < E3_WRITEBUFFER_SIZE)
21783 +       {
21784 +           for (i = 0; i < nbase/sizeof(uint16_t); i++)
21785 +               writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]);
21786 +           wmb();
21787 +       }
21788 +    }
21789 +}
21790 +
21791 +void
21792 +elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21793 +{
21794 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21795 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21796 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21797 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t);
21798 +    int        i;
21799 +
21800 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21801 +    {
21802 +       for (i = 0; i < nbytes/sizeof(uint16_t); i++)
21803 +           writew (0, &((uint16_t *) dbase)[i]);
21804 +       wmb();
21805 +    }
21806 +    else
21807 +    {
21808 +       if (ntop < E3_WRITEBUFFER_SIZE)
21809 +       {
21810 +           dlim -= ntop;
21811 +           
21812 +           for (i = 0; i < ntop/sizeof(uint16_t); i++)
21813 +               writew (0, &((uint16_t *) dlim)[i]);
21814 +           wmb();
21815 +       }
21816 +       
21817 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21818 +       {
21819 +           dlim -= E3_WRITEBUFFER_SIZE;
21820 +
21821 +           writeq (0, &((uint64_t *) dlim)[0]);
21822 +           writeq (0, &((uint64_t *) dlim)[1]);
21823 +           wmb();
21824 +       }
21825 +       
21826 +       if (nbase < E3_WRITEBUFFER_SIZE)
21827 +       {
21828 +           for (i = 0; i < nbase/sizeof(uint16_t); i++)
21829 +               writew (0, &((uint16_t *) dbase)[i]);
21830 +           wmb();
21831 +       }
21832 +    }
21833 +}
21834 +
21835 +void
21836 +elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21837 +{
21838 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21839 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21840 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21841 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21842 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t);
21843 +    int        i;
21844 +
21845 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21846 +    {
21847 +       for (i = 0; i < nbytes/sizeof(uint32_t); i++)
21848 +           writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]);
21849 +       wmb();
21850 +    }
21851 +    else
21852 +    {
21853 +       if (ntop < E3_WRITEBUFFER_SIZE)
21854 +       {
21855 +           slim -= ntop;
21856 +           dlim -= ntop;
21857 +
21858 +           for (i = 0; i < ntop/sizeof(uint32_t); i++)
21859 +               writel (((uint32_t *) slim)[i], &((uint32_t *) dlim)[i]);
21860 +           wmb();
21861 +       }
21862 +       
21863 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21864 +       {
21865 +           dlim -= E3_WRITEBUFFER_SIZE;
21866 +           slim -= E3_WRITEBUFFER_SIZE;
21867 +
21868 +           writel (((uint32_t *) slim)[0], &((uint32_t *) dlim)[0]);
21869 +           writel (((uint32_t *) slim)[1], &((uint32_t *) dlim)[1]);
21870 +           writel (((uint32_t *) slim)[2], &((uint32_t *) dlim)[2]);
21871 +           writel (((uint32_t *) slim)[3], &((uint32_t *) dlim)[3]);
21872 +           wmb();
21873 +       }
21874 +       
21875 +       if (nbase < E3_WRITEBUFFER_SIZE)
21876 +       {
21877 +           for (i = 0; i < nbase/sizeof(uint32_t); i++)
21878 +               writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]);
21879 +           wmb();
21880 +       }
21881 +    }
21882 +}
21883 +
21884 +void
21885 +elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21886 +{
21887 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21888 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21889 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21890 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t);
21891 +    int        i;
21892 +
21893 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21894 +    {
21895 +       for (i = 0; i < nbytes/sizeof(uint32_t); i++)
21896 +           writel (0, &((uint32_t *) dbase)[i]);
21897 +       wmb();
21898 +    }
21899 +    else
21900 +    {
21901 +       if (ntop < E3_WRITEBUFFER_SIZE)
21902 +       {
21903 +           dlim -= ntop;
21904 +
21905 +           for (i = 0; i < ntop/sizeof(uint32_t); i++)
21906 +               writel (0, &((uint32_t *) dlim)[i]);
21907 +           wmb();
21908 +       }
21909 +       
21910 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21911 +       {
21912 +           dlim -= E3_WRITEBUFFER_SIZE;
21913 +
21914 +           writeq (0, &((uint64_t *) dlim)[0]);
21915 +           writeq (0, &((uint64_t *) dlim)[1]);
21916 +           wmb();
21917 +       }
21918 +       
21919 +       if (nbase < E3_WRITEBUFFER_SIZE)
21920 +       {
21921 +           for (i = 0; i < nbase/sizeof(uint32_t); i++)
21922 +               writel (0, &((uint32_t *) dbase)[i]);
21923 +           wmb();
21924 +       }
21925 +    }
21926 +}
21927 +
21928 +void
21929 +elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
21930 +{
21931 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21932 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21933 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
21934 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21935 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t);
21936 +
21937 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21938 +    {
21939 +       writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]);
21940 +       wmb();
21941 +    }
21942 +    else
21943 +    {
21944 +       if (ntop < E3_WRITEBUFFER_SIZE)
21945 +       {
21946 +           slim -= ntop;
21947 +           dlim -= ntop;
21948 +
21949 +           writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]);
21950 +           wmb();
21951 +       }
21952 +       
21953 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21954 +       {
21955 +           dlim -= E3_WRITEBUFFER_SIZE;
21956 +           slim -= E3_WRITEBUFFER_SIZE;
21957 +
21958 +           writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]);
21959 +           writeq (((uint64_t *) slim)[1], &((uint64_t *) dlim)[1]);
21960 +           wmb();
21961 +       }
21962 +       
21963 +       if (nbase < E3_WRITEBUFFER_SIZE)
21964 +       {
21965 +           writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]);
21966 +           wmb();
21967 +       }
21968 +    }
21969 +}
21970 +
21971 +void
21972 +elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
21973 +{
21974 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
21975 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
21976 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
21977 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t);
21978 +
21979 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
21980 +    {
21981 +       writeq (0, &((uint64_t *) dbase)[0]);
21982 +       wmb();
21983 +    }
21984 +    else
21985 +    {
21986 +       if (ntop < E3_WRITEBUFFER_SIZE)
21987 +       {
21988 +           dlim -= ntop;
21989 +
21990 +           writeq (0, &((uint64_t *) dlim)[0]);
21991 +           wmb();
21992 +       }
21993 +       
21994 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
21995 +       {
21996 +           dlim -= E3_WRITEBUFFER_SIZE;
21997 +           
21998 +           writeq (0, &((uint64_t *) dlim)[0]);
21999 +           writeq (0, &((uint64_t *) dlim)[1]);
22000 +           wmb();
22001 +       }
22002 +       
22003 +       if (nbase < E3_WRITEBUFFER_SIZE)
22004 +       {
22005 +           writeq (0, &((uint64_t *) dbase)[0]);
22006 +           wmb();
22007 +       }
22008 +    }
22009 +}
22010 +
22011 +physaddr_t
22012 +elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t off)
22013 +{
22014 +#if defined(DIGITAL_UNIX)
22015 +    return (KSEG_TO_PHYS (sdram_off_to_mapping (dev, off)));
22016 +#elif defined(LINUX)
22017 +    return (kmem_to_phys ((void *) sdram_off_to_mapping (dev, off)));
22018 +#endif    
22019 +}
22020 +
22021 +/* sdram buddy allocator */
22022 +#define read_next(dev, block)          elan3_sdram_readl(dev, block + 0)
22023 +#define read_prev(dev, block)          elan3_sdram_readl(dev, block + 4)
22024 +#define write_next(dev, block, val)    (elan3_sdram_writel(dev, block + 0, val), val)
22025 +#define write_prev(dev, block, val)    (elan3_sdram_writel(dev, block + 4, val), val)
22026 +
22027 +#define freelist_insert(dev,idx,block)\
22028 +do {\
22029 +    sdramaddr_t next = dev->SdramFreeLists[(idx)];\
22030 +\
22031 +    /*\
22032 +     * block->prev = NULL;\
22033 +     * block->next = next;\
22034 +     * if (next != NULL)\
22035 +     *    next->prev = block;\
22036 +     * freelist = block;\
22037 +     */\
22038 +    write_prev (dev, block, (sdramaddr_t) 0);\
22039 +    write_next (dev, block, next);\
22040 +    if (next != (sdramaddr_t) 0)\
22041 +       write_prev (dev, next, block);\
22042 +    dev->SdramFreeLists[idx] = block;\
22043 +\
22044 +    dev->SdramFreeCounts[idx]++;\
22045 +    dev->Stats.SdramBytesFree += (SDRAM_MIN_BLOCK_SIZE << idx);\
22046 +} while (0)
22047 +
22048 +#define freelist_remove(dev,idx,block)\
22049 +do {\
22050 +    /*\
22051 +     * if (block->prev)\
22052 +     *     block->prev->next = block->next;\
22053 +     * else\
22054 +     *     dev->SdramFreeLists[idx] = block->next;\
22055 +     * if (block->next)\
22056 +     *     block->next->prev = block->prev;\
22057 +     */\
22058 +    sdramaddr_t blocknext = read_next (dev, block);\
22059 +    sdramaddr_t blockprev = read_prev (dev, block);\
22060 +\
22061 +    if (blockprev)\
22062 +       write_next (dev, blockprev, blocknext);\
22063 +    else\
22064 +       dev->SdramFreeLists[idx] = blocknext;\
22065 +    if (blocknext)\
22066 +       write_prev (dev, blocknext, blockprev);\
22067 +\
22068 +    dev->SdramFreeCounts[idx]--;\
22069 +    dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\
22070 +} while (0)
22071 +
22072 +#define freelist_removehead(dev,idx,block)\
22073 +do {\
22074 +    sdramaddr_t blocknext = read_next (dev, block);\
22075 +\
22076 +    if ((dev->SdramFreeLists[idx] = blocknext) != 0)\
22077 +       write_prev (dev, blocknext, 0);\
22078 +\
22079 +    dev->SdramFreeCounts[idx]--;\
22080 +    dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\
22081 +} while (0)
22082 +
22083 +#if defined(DEBUG)
22084 +static int
22085 +display_blocks (ELAN3_DEV *dev, int indx, char *string)
22086 +{
22087 +    sdramaddr_t block;
22088 +    int nbytes = 0;
22089 +
22090 +    printk ("%s - indx %d\n", string, indx);
22091 +    for (block = dev->SdramFreeLists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block))
22092 +    {
22093 +       printk ("  %lx", block);
22094 +       nbytes += (SDRAM_MIN_BLOCK_SIZE << indx);
22095 +    }
22096 +    printk ("\n");
22097 +
22098 +    return (nbytes);
22099 +}
22100 +
22101 +
22102 +void
22103 +elan3_sdram_display (ELAN3_DEV *dev, char *string)
22104 +{
22105 +    int indx;
22106 +    int nbytes = 0;
22107 +
22108 +    printk ("elan3_sdram_display: dev=%p\n", dev);
22109 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
22110 +       if (dev->SdramFreeLists[indx] != (sdramaddr_t) 0)
22111 +           nbytes += display_blocks (dev, indx, string);
22112 +    printk ("\n%d bytes free\n", nbytes);
22113 +}
22114 +
22115 +void
22116 +elan3_sdram_verify (ELAN3_DEV *dev)
22117 +{
22118 +    int indx, size, nbits, i, b;
22119 +    sdramaddr_t block;
22120 +
22121 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
22122 +    {
22123 +       unsigned count = 0;
22124 +
22125 +       for (block = dev->SdramFreeLists[indx]; block; block = read_next (dev, block), count++)
22126 +       {
22127 +           ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block);
22128 +           unsigned         off  = sdram_off_to_offset (dev, block);
22129 +           int              bit  = sdram_off_to_bit (dev, indx, block);
22130 +
22131 +           if ((block & (size-1)) != 0)
22132 +               printk ("elan3_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx);
22133 +           
22134 +           if (bank == NULL || off > bank->Size)
22135 +               printk ("elan3_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx);
22136 +           else if (BT_TEST (bank->Bitmaps[indx], bit) == 0)
22137 +               printk ("elan3_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx);
22138 +           else
22139 +           {
22140 +               for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1)
22141 +               {
22142 +                   bit = sdram_off_to_bit (dev, i, block);
22143 +
22144 +                   for (b = 0; b < nbits; b++)
22145 +                       if (BT_TEST(bank->Bitmaps[i], bit + b))
22146 +                           printk ("elan3_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b);
22147 +               }
22148 +           }
22149 +       }
22150 +
22151 +       if (dev->SdramFreeCounts[indx] != count)
22152 +           printk ("elan3_sdram_verify: indx=%x expected %d got %d\n", indx, dev->SdramFreeCounts[indx], count);
22153 +    }
22154 +}
22155 +
22156 +#endif /* defined(DEBUG) */
22157 +
22158 +static void
22159 +free_block (ELAN3_DEV *dev, sdramaddr_t block, int indx)
22160 +{
22161 +    ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block);
22162 +    unsigned        bit  = sdram_off_to_bit(dev, indx, block);
22163 +    unsigned         size = SDRAM_MIN_BLOCK_SIZE << indx;
22164 +
22165 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%lx indx=%d bit=%x\n", block, indx, bit);
22166 +
22167 +    ASSERT ((block & (size-1)) == 0);
22168 +    ASSERT (BT_TEST (bank->Bitmaps[indx], bit) == 0);
22169 +    
22170 +    while (BT_TEST (bank->Bitmaps[indx], bit ^ 1))
22171 +    {
22172 +       sdramaddr_t buddy = block ^ size;
22173 +       
22174 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%lx buddy=%lx indx=%d\n", block, buddy, indx);
22175 +
22176 +       BT_CLEAR (bank->Bitmaps[indx], bit ^ 1);
22177 +
22178 +       freelist_remove (dev, indx, buddy);
22179 +       
22180 +       block = (block < buddy) ? block : buddy;
22181 +       indx++;
22182 +       size <<= 1;
22183 +       bit >>= 1;
22184 +    }
22185 +
22186 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%lx indx=%d bit=%x\n", block, indx, bit);
22187 +
22188 +    freelist_insert (dev, indx, block);
22189 +
22190 +    BT_SET (bank->Bitmaps[indx], bit);
22191 +}
22192 +
22193 +void
22194 +elan3_sdram_init (ELAN3_DEV *dev)
22195 +{
22196 +    int indx;
22197 +
22198 +    spin_lock_init (&dev->SdramLock);
22199 +
22200 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
22201 +    {
22202 +       dev->SdramFreeLists[indx]  = (sdramaddr_t) 0;
22203 +       dev->SdramFreeCounts[indx] = 0;
22204 +    }
22205 +}
22206 +
22207 +void
22208 +elan3_sdram_fini (ELAN3_DEV *dev)
22209 +{
22210 +    spin_lock_destroy (&dev->SdramLock);
22211 +}
22212 +
22213 +void
22214 +elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top)
22215 +{
22216 +    register int indx;
22217 +    register unsigned long size;
22218 +
22219 +    /* align to the minimum block size */
22220 +    base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
22221 +    top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
22222 +
22223 +    /* don't allow 0 as a valid "base" */
22224 +    if (base == 0)
22225 +       base = E3_CACHE_SIZE;
22226 +
22227 +    /* carve the bottom to the biggest boundary */
22228 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
22229 +    {
22230 +       if ((base & size) == 0)
22231 +           continue;
22232 +
22233 +       if ((base + size) > top)
22234 +           break;
22235 +
22236 +       free_block (dev, base, indx);
22237 +       
22238 +       base += size;
22239 +    }
22240 +
22241 +    /* carve the top down to the biggest boundary */
22242 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
22243 +    {
22244 +       if ((top & size) == 0)
22245 +           continue;
22246 +
22247 +       if ((top - size) < base)
22248 +           break;
22249 +
22250 +       free_block (dev, (top - size), indx);
22251 +       
22252 +       top -= size;
22253 +    }
22254 +
22255 +    /* now free of the space in between */
22256 +    while (base < top)
22257 +    {
22258 +       free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1));
22259 +
22260 +       base += SDRAM_MAX_BLOCK_SIZE;
22261 +    }
22262 +}
22263 +
22264 +sdramaddr_t
22265 +elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes)
22266 +{
22267 +    sdramaddr_t block;
22268 +    register int i, indx;
22269 +    unsigned long size;
22270 +    unsigned long flags;
22271 +
22272 +    spin_lock_irqsave (&dev->SdramLock, flags);
22273 +
22274 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
22275 +       ;
22276 +
22277 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx);
22278 +
22279 +    /* find the smallest block which is big enough for this allocation */
22280 +    for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1)
22281 +       if (dev->SdramFreeLists[i])
22282 +           break;
22283 +    
22284 +    if (i == SDRAM_NUM_FREE_LISTS)
22285 +    {
22286 +       spin_unlock_irqrestore (&dev->SdramLock, flags);
22287 +       return ((sdramaddr_t) 0);
22288 +    }
22289 +    
22290 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: use block=%lx indx=%d\n", dev->SdramFreeLists[i], i);
22291 +
22292 +    /* remove the block from the free list */
22293 +    freelist_removehead (dev, i, (block = dev->SdramFreeLists[i]));
22294 +
22295 +    /* clear the approriate bit in the bitmap */
22296 +    BT_CLEAR (sdram_off_to_bank (dev, block)->Bitmaps[i], sdram_off_to_bit (dev,i, block));
22297 +
22298 +    /* and split it up as required */
22299 +    while (i-- > indx)
22300 +       free_block (dev, block + (size >>= 1), i);
22301 +
22302 +    PRINTF1 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: return block=%lx\n", block);
22303 +
22304 +    spin_unlock_irqrestore (&dev->SdramLock, flags);
22305 +
22306 +    ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0);
22307 +
22308 +    return ((sdramaddr_t) block);
22309 +}
22310 +
22311 +void
22312 +elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t block, int nbytes)
22313 +{
22314 +    register int indx;
22315 +    unsigned long size;
22316 +    unsigned long flags;
22317 +
22318 +    spin_lock_irqsave (&dev->SdramLock, flags);
22319 +
22320 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
22321 +       ;
22322 +
22323 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_free: indx=%d block=%lx\n", indx, block);
22324 +    
22325 +    free_block (dev, block, indx);
22326 +
22327 +    spin_unlock_irqrestore (&dev->SdramLock, flags);
22328 +}
22329 +
22330 +
22331 +
22332 +/*
22333 + * Local variables:
22334 + * c-file-style: "stroustrup"
22335 + * End:
22336 + */
22337 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/tproc.c
22338 ===================================================================
22339 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/tproc.c      2004-02-23 16:02:56.000000000 -0500
22340 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/tproc.c   2005-07-28 14:52:52.815683752 -0400
22341 @@ -0,0 +1,778 @@
22342 +/*
22343 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
22344 + *
22345 + *    For licensing information please see the supplied COPYING file
22346 + *
22347 + */
22348 +
22349 +#ident "@(#)$Id: tproc.c,v 1.51.2.1 2004/11/15 11:12:36 mike Exp $"
22350 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc.c,v $ */
22351 +
22352 +#include <qsnet/kernel.h>
22353 +
22354 +#include <elan3/elanregs.h>
22355 +#include <elan3/elandev.h>
22356 +#include <elan3/elanvp.h>
22357 +#include <elan3/elan3mmu.h>
22358 +#include <elan3/elanctxt.h>
22359 +#include <elan3/elandebug.h>
22360 +#include <elan3/urom_addrs.h>
22361 +#include <elan3/thread.h>
22362 +#include <elan3/elansyscall.h>
22363 +#include <elan3/threadsyscall.h>
22364 +#include <elan3/intrinsics.h>
22365 +#include <elan3/vmseg.h>
22366 +
22367 +int
22368 +HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits)
22369 +{
22370 +    THREAD_TRAP  *trap  = dev->ThreadTrap;
22371 +    int           delay = 1;
22372 +
22373 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
22374 +
22375 +    trap->Status.Status  = read_reg32 (dev, Exts.TProcStatus);
22376 +    trap->sp             = read_reg32 (dev, Thread_Desc_SP);
22377 +    trap->pc             = read_reg32 (dev, ExecutePC);
22378 +    trap->npc            = read_reg32 (dev, ExecuteNPC);
22379 +    trap->StartPC        = read_reg32 (dev, StartPC);
22380 +    trap->mi             = GET_STATUS_TRAPTYPE(trap->Status);
22381 +    trap->TrapBits.Bits  = read_reg32 (dev, TrapBits.Bits);
22382 +    trap->DirtyBits.Bits = read_reg32 (dev, DirtyBits.Bits);
22383 +
22384 +    if ( ! (trap->Status.s.WakeupFunction == SleepOneTick) ) {
22385 +       int p,i;
22386 +       E3_uint32 reg = read_reg32 (dev, Exts.InterruptReg);    
22387 +
22388 +       ELAN_REG_REC(reg);
22389 +       p = elan_reg_rec_index;
22390 +       for(i=0;i<ELAN_REG_REC_MAX;i++) {
22391 +           if (elan_reg_rec_file[i] != NULL ) 
22392 +               printk("Elan Reg Record[%2d](%ld): cpu %d  reg %x [%d:%s]\n", p, elan_reg_rec_lbolt[p], elan_reg_rec_cpu[p], elan_reg_rec_reg[p],
22393 +                      elan_reg_rec_line[p], elan_reg_rec_file[p]);
22394 +           p = ( (p+1) % ELAN_REG_REC_MAX);
22395 +       }
22396 +    }
22397 +    
22398 +    ASSERT(trap->Status.s.WakeupFunction == SleepOneTick);
22399 +
22400 +    /* copy the four access fault areas */
22401 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc),          (void *) &trap->FaultSave, 16);
22402 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), (void *) &trap->DataFaultSave, 16);
22403 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), (void *) &trap->InstFaultSave, 16);
22404 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), (void *) &trap->OpenFaultSave, 16);
22405 +    
22406 +    /* copy the registers,  note the endian swap flips the odd registers into the even registers
22407 +       and visa versa. */
22408 +    copy_thread_regs (dev, trap->Registers);
22409 +
22410 +    /*
22411 +     * If the output was open then the ack may not have returned yet. Must wait for the
22412 +     * ack to become valid and update trap_dirty with the new value. Will simulate the
22413 +     * instructions later.
22414 +     */
22415 +    if (trap->TrapBits.s.OutputWasOpen)
22416 +    {
22417 +       trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits);
22418 +       while (! trap->TrapBits.s.AckBufferValid)
22419 +       {
22420 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "tproc: waiting for ack to become valid\n");
22421 +           trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits);
22422 +           DELAY (delay);
22423 +
22424 +           if ((delay <<= 1) == 0) delay = 1;
22425 +       }
22426 +    }
22427 +    
22428 +    /* update device statistics */
22429 +    BumpStat (dev, TProcTraps);
22430 +    switch (trap->mi)
22431 +    {
22432 +    case MI_UnimplementedError:
22433 +       if (trap->TrapBits.s.ForcedTProcTrap)
22434 +           BumpStat (dev, ForcedTProcTraps);
22435 +       if (trap->TrapBits.s.ThreadTimeout)
22436 +       {
22437 +           if (trap->TrapBits.s.PacketTimeout)
22438 +               BumpStat (dev, ThreadOutputTimeouts);
22439 +           else if (trap->TrapBits.s.PacketAckValue == E3_PAckError)
22440 +               BumpStat (dev, ThreadPacketAckErrors);
22441 +       }
22442 +       if (trap->TrapBits.s.TrapForTooManyInsts)
22443 +           BumpStat (dev, TrapForTooManyInsts);
22444 +       break;
22445 +    }
22446 +
22447 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16);
22448 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16);
22449 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16);
22450 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16);
22451 +
22452 +    *RestartBits |= RestartTProc;
22453 +
22454 +    return (TRUE);
22455 +}
22456 +
22457 +void
22458 +DeliverTProcTrap (ELAN3_DEV *dev, THREAD_TRAP *threadTrap, E3_uint32 Pend)
22459 +{
22460 +    ELAN3_CTXT      *ctxt;
22461 +    THREAD_TRAP      *trap;
22462 +
22463 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
22464 +
22465 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, threadTrap->Status.s.Context);
22466 +
22467 +    if (ctxt == NULL)
22468 +    {
22469 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverTProcTrap: context %x invalid\n", threadTrap->Status.s.Context);
22470 +       BumpStat (dev, InvalidContext);
22471 +    }
22472 +    else
22473 +    {
22474 +       if (ELAN3_OP_TPROC_TRAP (ctxt, threadTrap) == OP_DEFER)
22475 +       {
22476 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->ThreadTrapQ))
22477 +           {
22478 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
22479 +               StartSwapoutContext (ctxt, Pend, NULL);
22480 +           }
22481 +           else
22482 +           {
22483 +               trap = ELAN3_QUEUE_BACK (ctxt->ThreadTrapQ, ctxt->ThreadTraps);
22484 +               
22485 +               bcopy (threadTrap, trap, sizeof (THREAD_TRAP));
22486 +               
22487 +               PRINTF4 (ctxt, DBG_INTR, "DeliverTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n",
22488 +                        trap->sp, trap->pc, trap->npc, trap->StartPC);
22489 +               PRINTF3 (ctxt, DBG_INTR, "       mi=%s trap=%08x dirty=%08x\n",
22490 +                        MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits);
22491 +               PRINTF3 (ctxt, DBG_INTR, "       FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
22492 +                        trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
22493 +               PRINTF3 (ctxt, DBG_INTR, "       DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22494 +                        trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
22495 +               PRINTF3 (ctxt, DBG_INTR, "       InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22496 +                        trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
22497 +               PRINTF3 (ctxt, DBG_INTR, "       OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22498 +                        trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
22499 +               
22500 +               PRINTF4 (ctxt, DBG_INTR, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
22501 +                        trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
22502 +                        trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
22503 +               PRINTF4 (ctxt, DBG_INTR, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
22504 +                        trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
22505 +                        trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
22506 +               PRINTF4 (ctxt, DBG_INTR, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
22507 +                        trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
22508 +                        trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
22509 +               PRINTF4 (ctxt, DBG_INTR, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
22510 +                        trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
22511 +                        trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
22512 +               PRINTF4 (ctxt, DBG_INTR, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
22513 +                        trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)],
22514 +                        trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
22515 +               PRINTF4 (ctxt, DBG_INTR, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
22516 +                        trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)],
22517 +                        trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
22518 +               PRINTF4 (ctxt, DBG_INTR, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
22519 +                        trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)],
22520 +                        trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
22521 +               PRINTF4 (ctxt, DBG_INTR, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
22522 +                        trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)],
22523 +                        trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
22524 +               
22525 +               ELAN3_QUEUE_ADD (ctxt->ThreadTrapQ);
22526 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
22527 +               
22528 +               if (ELAN3_QUEUE_FULL (ctxt->ThreadTrapQ))
22529 +               {
22530 +                   PRINTF0 (ctxt, DBG_INTR, "DeliverTProcTrap: thread queue full,  must swap out\n");
22531 +                   ctxt->Status |= CTXT_THREAD_QUEUE_FULL;
22532 +                   
22533 +                   StartSwapoutContext (ctxt, Pend, NULL);
22534 +               }
22535 +           }
22536 +       }
22537 +    }
22538 +}
22539 +
22540 +int
22541 +NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
22542 +{
22543 +    ELAN3_DEV *dev = ctxt->Device;
22544 +
22545 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
22546 +    
22547 +    if (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ))
22548 +       return (0);
22549 +
22550 +    *trap = *ELAN3_QUEUE_FRONT (ctxt->ThreadTrapQ, ctxt->ThreadTraps);
22551 +    ELAN3_QUEUE_REMOVE (ctxt->ThreadTrapQ);
22552 +    
22553 +    return (1);
22554 +}
22555 +
22556 +void
22557 +ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
22558 +{
22559 +    int       i;
22560 +    int       res;
22561 +    E3_Addr   StackPointer;
22562 +
22563 +    PRINTF4 (ctxt, DBG_TPROC, "ResolveTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n",
22564 +            trap->sp, trap->pc, trap->npc, trap->StartPC);
22565 +    PRINTF3 (ctxt, DBG_TPROC, "       mi=%s trap=%08x dirty=%08x\n",
22566 +            MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits);
22567 +    PRINTF3 (ctxt, DBG_TPROC, "       FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
22568 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
22569 +    PRINTF3 (ctxt, DBG_TPROC, "       DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22570 +            trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
22571 +    PRINTF3 (ctxt, DBG_TPROC, "       InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22572 +            trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
22573 +    PRINTF3 (ctxt, DBG_TPROC, "       OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22574 +            trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
22575 +    
22576 +    PRINTF4 (ctxt, DBG_TPROC, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
22577 +            trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
22578 +            trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
22579 +    PRINTF4 (ctxt, DBG_TPROC, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
22580 +            trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
22581 +            trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
22582 +    PRINTF4 (ctxt, DBG_TPROC, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
22583 +            trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
22584 +            trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
22585 +    PRINTF4 (ctxt, DBG_TPROC, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
22586 +            trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
22587 +            trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
22588 +    PRINTF4 (ctxt, DBG_TPROC, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
22589 +            trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)],
22590 +            trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
22591 +    PRINTF4 (ctxt, DBG_TPROC, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
22592 +            trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)],
22593 +            trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
22594 +    PRINTF4 (ctxt, DBG_TPROC, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
22595 +            trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)],
22596 +            trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
22597 +    PRINTF4 (ctxt, DBG_TPROC, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
22598 +            trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)],
22599 +            trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
22600 +           
22601 +
22602 +    BumpUserStat (ctxt, TProcTraps);
22603 +
22604 +    switch (trap->mi)
22605 +    {
22606 +    case MI_UnimplementedError:
22607 +    {
22608 +       /*
22609 +        * This occurs if the threads processor trapped. All other cases will be for the ucode
22610 +        * thread trapping.
22611 +        */
22612 +       int restart = 1;
22613 +       int skip    = 0;
22614 +       
22615 +       PRINTF1 (ctxt, DBG_TPROC, "TProc: Mi=Unimp. Using trap->TrapBits=%x\n", trap->TrapBits.Bits);
22616 +       
22617 +       /*
22618 +        * Data Access Exception.
22619 +        */
22620 +       if (trap->TrapBits.s.DataAccessException)
22621 +       {
22622 +           ASSERT (CTXT_IS_KERNEL(ctxt) || trap->DataFaultSave.s.FSR.Status == 0 ||
22623 +                   ctxt->Capability.cap_mycontext == trap->DataFaultSave.s.FaultContext);
22624 +
22625 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: DataAccessException %08x\n", trap->DataFaultSave.s.FaultAddress);
22626 +
22627 +           if ((res = elan3_pagefault (ctxt, &trap->DataFaultSave, 1)) != ESUCCESS)
22628 +           {
22629 +               PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for data %08x\n",
22630 +                        trap->DataFaultSave.s.FaultAddress);
22631 +
22632 +               if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE)
22633 +                   restart = 0;
22634 +           }
22635 +       }
22636 +       
22637 +       /* 
22638 +        * Instruction Access Exception.
22639 +        */
22640 +       if (trap->TrapBits.s.InstAccessException)
22641 +       {
22642 +           ASSERT (CTXT_IS_KERNEL (ctxt) || trap->InstFaultSave.s.FSR.Status == 0 ||
22643 +                   ctxt->Capability.cap_mycontext == trap->InstFaultSave.s.FaultContext);
22644 +           
22645 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: InstAccessException %08x\n", trap->InstFaultSave.s.FaultAddress);
22646 +
22647 +           if ((res = elan3_pagefault (ctxt, &trap->InstFaultSave, 1)) != ESUCCESS)
22648 +           {
22649 +               PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for inst %08x\n",
22650 +                        trap->InstFaultSave.s.FaultAddress);
22651 +
22652 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->InstFaultSave, res);
22653 +               restart = 0;
22654 +           }
22655 +       }
22656 +       
22657 +       /*
22658 +        * Forced TProc trap/Unimplemented instruction
22659 +        *
22660 +        *  If there is a force tproc trap then don't look at 
22661 +        *  the unimplemented instruction bit - since it can
22662 +        *  be set in obscure circumstances.
22663 +        */
22664 +       if (trap->TrapBits.s.ForcedTProcTrap)
22665 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: forced tproc trap, restarting\n");
22666 +       else if (trap->TrapBits.s.Unimplemented)
22667 +       {
22668 +           E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK);
22669 +
22670 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: unimplemented instruction %08x\n", instr);
22671 +
22672 +           if ((instr & OPCODE_MASK) == OPCODE_Ticc &&
22673 +               (instr & OPCODE_IMM)  == OPCODE_IMM &&
22674 +               (Ticc_COND(instr)     == Ticc_TA))
22675 +           {
22676 +               switch (INSTR_IMM(instr))
22677 +               {
22678 +               case ELAN3_ELANCALL_TRAPNUM:
22679 +                   /*
22680 +                    * Since the thread cannot easily access the global variable which holds
22681 +                    * the elan system call number, we provide a different trap for the elan
22682 +                    * system call, and copy the system call number into %g1 before calling
22683 +                    * ThreadSyscall().
22684 +                    */
22685 +                   BumpUserStat (ctxt, ThreadElanCalls);
22686 +
22687 +                   if (ThreadElancall (ctxt, trap, &skip) != ESUCCESS)
22688 +                   {
22689 +                       ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap);
22690 +                       restart = 0;
22691 +                   }
22692 +                   break;
22693 +
22694 +               case ELAN3_SYSCALL_TRAPNUM:
22695 +                   BumpUserStat (ctxt, ThreadSystemCalls);
22696 +
22697 +                   if (ThreadSyscall (ctxt, trap, &skip) != ESUCCESS)
22698 +                   {
22699 +                       ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap);
22700 +                       restart = 0;
22701 +                   }
22702 +                   break;
22703 +
22704 +               case ELAN3_DEBUG_TRAPNUM:
22705 +                   ElanException (ctxt, EXCEPTION_DEBUG, THREAD_PROC, trap);
22706 +                   skip = 1;
22707 +                   break;
22708 +                   
22709 +               case ELAN3_ABORT_TRAPNUM:
22710 +               default:
22711 +                   ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr);
22712 +                   restart = 0;
22713 +                   break;
22714 +               }
22715 +                   
22716 +           }
22717 +           else
22718 +           {
22719 +               ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr);
22720 +               restart = 0;
22721 +           }
22722 +       }
22723 +       
22724 +       /*
22725 +        * Faulted fetching routes.
22726 +        */
22727 +       if (trap->TrapBits.s.OpenRouteFetch)
22728 +       {
22729 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: OpenRouteFetch %08x\n", trap->OpenFaultSave.s.FaultAddress);
22730 +           
22731 +           if ((res = ResolveVirtualProcess (ctxt, trap->OpenFaultSave.s.FaultAddress)) != ESUCCESS &&
22732 +               ElanException (ctxt, EXCEPTION_INVALID_PROCESS, THREAD_PROC, trap, trap->DataFaultSave.s.FaultAddress, res) != OP_IGNORE)
22733 +           {
22734 +               restart = 0;
22735 +           }
22736 +           else if (RollThreadToClose (ctxt, trap, E3_PAckDiscard) != ESUCCESS)        /* Force a discard */
22737 +           {
22738 +               restart = 0;
22739 +           }
22740 +       }
22741 +       
22742 +       /*
22743 +        * Thread Timeout
22744 +        */
22745 +       if (trap->TrapBits.s.ThreadTimeout)
22746 +       {
22747 +           if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, THREAD_PROC, trap) != OP_IGNORE)
22748 +               restart = 0;
22749 +           else
22750 +           {
22751 +               PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: timeout or PAckError!\n");
22752 +               
22753 +               /* Might deschedule the thread for a while or mark the link error here. */
22754 +               if (! trap->TrapBits.s.OutputWasOpen && RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS)
22755 +               {
22756 +                   restart = 0;
22757 +               }
22758 +           }
22759 +       }
22760 +       
22761 +       /*
22762 +        * Open exception
22763 +        */
22764 +       if (trap->TrapBits.s.OpenException)
22765 +       {
22766 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: open exception\n");
22767 +           if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE)
22768 +               restart = 0;
22769 +       }
22770 +       
22771 +       /*
22772 +        * Too many instructions.
22773 +        */
22774 +       if (trap->TrapBits.s.TrapForTooManyInsts)
22775 +       {
22776 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: too many instructions\n");
22777 +           if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE)
22778 +               restart = 0;
22779 +       }
22780 +       
22781 +       if (restart)
22782 +       {
22783 +           /*
22784 +            * If the output was open when the trap was taken then the trap code must move
22785 +            * the PC on past the close instruction and simulate the effect of all the instructions
22786 +            * that do not output onto the link. The value of the ack received is then used to
22787 +            * simulate the close instruction.
22788 +            */
22789 +           if (trap->TrapBits.s.OutputWasOpen && RollThreadToClose(ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS)
22790 +           {
22791 +               /*
22792 +                * Don't restart if we couldn't roll it forweards 
22793 +                * to a close instruction.
22794 +                */
22795 +               break;
22796 +           }
22797 +
22798 +           /*
22799 +            * We must check back 3 instructions from the PC,  and if we see the
22800 +            * c_close_cookie() sequence then we must execute the instructions to
22801 +            * the end of it.
22802 +            */
22803 +           /* XXXX: code to be written */
22804 +           
22805 +           StackPointer = SaveThreadToStack (ctxt, trap, skip);
22806 +           
22807 +           ReissueStackPointer (ctxt, StackPointer);
22808 +       }
22809 +       
22810 +       break;
22811 +    }
22812 +    
22813 +    /*
22814 +     * This case is different from the others as %o6 has been overwritten with
22815 +     * the SP. The real PC can be read from StartPC and written back
22816 +     * into %o6 on the stack.
22817 +     */
22818 +    case MI_TProcNext:                 /* Reading the outs block */
22819 +    {
22820 +       E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22821 +
22822 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22823 +       {
22824 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
22825 +
22826 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing StartPc to o6\n");
22827 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22828 +           break;
22829 +       }
22830 +       ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->StartPC & PC_MASK);
22831 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22832 +       /* DROPTHROUGH */
22833 +    }
22834 +    /*
22835 +     * all of these will be generated when starting up a thread.
22836 +     * Just re-issue the command after fixing the trap. The ucode keeps the startup
22837 +     * from trap information in Thread_Desc_SP while it is still loading the regs.
22838 +     */
22839 +    case MI_WaitForGlobalsRead:                /* Reading the globals block (trap restart) */
22840 +    case MI_WaitForNPCRead:            /* Reading the nPC, V and C (trap restart) */
22841 +    case MI_WaitForPCload:             /* Reading the PC, N and Z (trap restart) */
22842 +    case MI_WaitForInsRead:            /* Reading the ins block (trap restart) */
22843 +    case MI_WaitForLocals:             /* Reading the ins block (trap restart) */
22844 +    case MI_WaitForPCload2:            /* Reading the PC (normal thread start) */
22845 +    case MI_WaitForSpStore:            /* Writing the SP to the outs block */
22846 +       PRINTF2 (ctxt, DBG_TPROC, "ResolveTProcTrap: %s %08x\n", MiToName (trap->mi), trap->InstFaultSave.s.FaultAddress);
22847 +
22848 +       if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
22849 +       {
22850 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n",
22851 +                    trap->FaultSave.s.FaultAddress);
22852 +           if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, &trap->FaultSave, trap, res) != OP_IGNORE)
22853 +               break;
22854 +       }
22855 +
22856 +       ReissueStackPointer (ctxt, trap->sp);
22857 +       break;
22858 +       
22859 +       /*
22860 +        * These traps could occur after the threads proc has stopped (either for a wait,
22861 +        * break, or suspend, but not a trap). Must simulate the uCode's job.
22862 +        */
22863 +    case MI_WaitForOutsWrite:          /* Writing the outs block */
22864 +    case MI_WaitForNPCWrite:           /* Writing the nPC block */
22865 +    {
22866 +       E3_uint32 DeschedBits = (trap->TrapBits.Bits & E3_TProcDescheduleMask);
22867 +       E3_Addr   stack       = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22868 +       
22869 +       PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: trapped on %s while stopping a thread\n", MiToName(trap->mi));
22870 +       
22871 +       /*
22872 +        * Copy npc into o6.
22873 +        */
22874 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)] = trap->npc;
22875 +       
22876 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22877 +       {
22878 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
22879 +
22880 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing outs to stack\n");
22881 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22882 +           break;
22883 +       }
22884 +       
22885 +       /*
22886 +        * Now write the outs back to the stack. NOTE then endian flip is undone.
22887 +        */
22888 +       for (i = 0; i < 8; i++)
22889 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]), trap->Registers[REG_OUTS+(i^WordEndianFlip)]);
22890 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22891 +       
22892 +       /*
22893 +        * thread has been saved. Now find out why the thread proc stopped.
22894 +        */
22895 +       if (DeschedBits == E3_TProcDescheduleSuspend)
22896 +       {
22897 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: suspend instruction executed\n");
22898 +           break;
22899 +       }
22900 +       
22901 +       /*
22902 +        * Break. Just reissue the command.
22903 +        */
22904 +       if (DeschedBits == E3_TProcDescheduleBreak)
22905 +       {
22906 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: break instruction, reissue sp %08x\n", trap->sp);
22907 +           ReissueStackPointer (ctxt, trap->sp);
22908 +           break;
22909 +       }
22910 +       
22911 +       ASSERT (DeschedBits == E3_TProcDescheduleWait);
22912 +        
22913 +       /* DROPTHROUGH to fix up a wait event */
22914 +    }
22915 +    
22916 +    /*
22917 +     * Trapped here trying to execute a wait instruction. All the thread state has already
22918 +     * been saved and the trap has been fixed so simplest thing to do is to start the
22919 +     * thread up at the wait instruction again.
22920 +     */
22921 +    case MI_WaitForEventWaitAddr:      /* Reading back the %o0,%o1 pair for a
22922 +                                          wait event instr. */
22923 +    case MI_WaitForWaitEventAccess:    /* Locked dword read of the event location.
22924 +                                          Note that this read is done with write
22925 +                                          permissions so we never get a trap on the write */
22926 +    {
22927 +       E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
22928 +       
22929 +       if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
22930 +       {
22931 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n", 
22932 +                    trap->FaultSave.s.FaultAddress);
22933 +           if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE)
22934 +               break;
22935 +       }
22936 +
22937 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
22938 +       {
22939 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
22940 +
22941 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing pc to stack\n");
22942 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
22943 +           break;
22944 +       }
22945 +
22946 +       ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->pc);
22947 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
22948 +       
22949 +       ReissueStackPointer (ctxt, trap->sp);
22950 +       break;
22951 +    }
22952 +    
22953 +    /*
22954 +     * Assume the fault will be fixed by FixupEventTrap.
22955 +     */
22956 +    default:
22957 +       FixupEventTrap (ctxt, THREAD_PROC, trap, trap->mi, &trap->FaultSave, 0);
22958 +       break;
22959 +    }
22960 +}
22961 +
22962 +int
22963 +TProcNeedsRestart (ELAN3_CTXT *ctxt)
22964 +{
22965 +    return (ctxt->ItemCount[LIST_THREAD] != 0);
22966 +}
22967 +
22968 +void
22969 +RestartTProcItems (ELAN3_CTXT *ctxt)
22970 +{
22971 +    void     *item;
22972 +    E3_uint32 StackPointer;
22973 +
22974 +    kmutex_lock (&ctxt->SwapListsLock);
22975 +    
22976 +    while (ctxt->ItemCount[LIST_THREAD])
22977 +    {
22978 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_THREAD, &item, &StackPointer))
22979 +           ctxt->ItemCount[LIST_THREAD] = 0;
22980 +       else
22981 +       {
22982 +           if (IssueCommand (ctxt, offsetof (E3_CommandPort, RunThread), StackPointer, 0) == ISSUE_COMMAND_RETRY)
22983 +           {
22984 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_THREAD, item);
22985 +               kmutex_unlock (&ctxt->SwapListsLock);
22986 +               return;
22987 +           }
22988 +           
22989 +           ctxt->ItemCount[LIST_THREAD]--;
22990 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
22991 +       }
22992 +    }
22993 +    kmutex_unlock (&ctxt->SwapListsLock);
22994 +}
22995 +
22996 +E3_Addr
22997 +SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction)
22998 +{
22999 +    E3_Addr      stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
23000 +    E3_Addr      orflag;
23001 +    register int i;
23002 +
23003 +    /*
23004 +     * When the thread deschedules normally, the N & Z flags are written 
23005 +     * to the stack in o6, and the V & C flags are lost.
23006 +     * Since the Elan will store the NPC into o6 (to skip the instruction), 
23007 +     * the CC flags are visible to the trap handler in the trapped PC and NPC.   
23008 +     * If the instruction needs to be re-executed then the CC flags need to be
23009 +     * kept in the right place to be read in when the thread re-starts.
23010 +     *
23011 +     * PC  has N & Z from trapped NPC.
23012 +     * NPC has V & C from trapped PC.
23013 +     */
23014 +    if (SkipInstruction)
23015 +    {
23016 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)]    = trap->npc;
23017 +       trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = ((trap->npc & PC_MASK) + 4) | (trap->pc & CC_MASK);
23018 +    }
23019 +    else
23020 +    {
23021 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)]    = (trap->pc & PC_MASK) | (trap->npc & CC_MASK);
23022 +       trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = (trap->npc & PC_MASK) | (trap->pc & CC_MASK);
23023 +    }
23024 +    
23025 +    if (ELAN3_OP_START_FAULT_CHECK(ctxt))
23026 +    {
23027 +       PRINTF0 (ctxt, DBG_TPROC, "RestartThread: faulted writing out thread\n");
23028 +       ELAN3_OP_END_FAULT_CHECK(ctxt);
23029 +
23030 +       ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
23031 +       return ((E3_Addr) 0);
23032 +    }
23033 +
23034 +
23035 +#ifdef DEBUG_PRINTF
23036 +    PRINTF4 (ctxt, DBG_TPROC, "SaveThreadToStack: SP=%08x PC=%08x NPC=%08x DIRTY=%08x\n",
23037 +            trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits);
23038 +    if (trap->DirtyBits.s.GlobalsDirty)
23039 +    {
23040 +       PRINTF4 (ctxt, DBG_TPROC, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
23041 +                trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
23042 +                trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
23043 +       PRINTF4 (ctxt, DBG_TPROC, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
23044 +                trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
23045 +                trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
23046 +    }
23047 +    if (trap->DirtyBits.s.OutsDirty)
23048 +    {
23049 +       PRINTF4 (ctxt, DBG_TPROC, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
23050 +                trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
23051 +                trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
23052 +       PRINTF4 (ctxt, DBG_TPROC, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
23053 +                trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
23054 +                trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
23055 +    }
23056 +    if (trap->DirtyBits.s.LocalsDirty)
23057 +    {
23058 +       PRINTF4 (ctxt, DBG_TPROC, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
23059 +                trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
23060 +                trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
23061 +       PRINTF4 (ctxt, DBG_TPROC, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
23062 +                trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
23063 +                trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
23064 +    }
23065 +    if (trap->DirtyBits.s.InsDirty)
23066 +    {
23067 +       PRINTF4 (ctxt, DBG_TPROC, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
23068 +                trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
23069 +                trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
23070 +       PRINTF4 (ctxt, DBG_TPROC, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
23071 +                trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
23072 +                trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
23073 +    }
23074 +#endif 
23075 +    
23076 +    PRINTF1 (ctxt, DBG_TPROC, "flushing registers to stack %08x\n", stack);
23077 +
23078 +    /* 
23079 +     * NOTE - store the register to the stack in reverse order, since the stack 
23080 +     * will be allocated in sdram, and we cannot use the sdram accessing functions 
23081 +     * here, as it is "mapped" in user-space.
23082 +     */
23083 +    for (i = 0; i < 8; i++)
23084 +    {
23085 +       if (trap->DirtyBits.s.GlobalsDirty & (1 << i))
23086 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Globals[i]), trap->Registers[REG_GLOBALS+(i^WordEndianFlip)]);
23087 +       if (trap->DirtyBits.s.OutsDirty & (1 << i))
23088 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]),    trap->Registers[REG_OUTS+(i^WordEndianFlip)]);
23089 +       if (trap->DirtyBits.s.LocalsDirty & (1 << i))
23090 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Locals[i]),  trap->Registers[REG_LOCALS+(i^WordEndianFlip)]);
23091 +       if (trap->DirtyBits.s.InsDirty & (1 << i))
23092 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Ins[i]),     trap->Registers[REG_INS+(i^WordEndianFlip)]);
23093 +    }
23094 +
23095 +    /* always restore all registers */
23096 +    orflag = ThreadRestartFromTrapBit | ThreadReloadAllRegs;
23097 +    
23098 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
23099 +
23100 +    return (trap->sp | orflag);
23101 +}
23102 +
23103 +void
23104 +ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer)
23105 +{
23106 +    PRINTF1 (ctxt, DBG_TPROC, "ReissueStackPointer : Queue SP %08x\n", StackPointer);
23107 +    
23108 +    kmutex_lock (&ctxt->SwapListsLock);
23109 +    ctxt->ItemCount[LIST_THREAD]++;
23110 +    ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_THREAD, StackPointer);
23111 +    kmutex_unlock (&ctxt->SwapListsLock);
23112 +}
23113 +
23114 +
23115 +/*
23116 + * Local variables:
23117 + * c-file-style: "stroustrup"
23118 + * End:
23119 + */
23120 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/tprocinsts.c
23121 ===================================================================
23122 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/tprocinsts.c 2004-02-23 16:02:56.000000000 -0500
23123 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/tprocinsts.c      2005-07-28 14:52:52.816683600 -0400
23124 @@ -0,0 +1,401 @@
23125 +/*
23126 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23127 + *
23128 + *    For licensing information please see the supplied COPYING file
23129 + *
23130 + */
23131 +
23132 +#ident "@(#)$Id: tprocinsts.c,v 1.20 2003/09/24 13:57:25 david Exp $"
23133 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tprocinsts.c,v $*/
23134 +
23135 +#include <qsnet/kernel.h>
23136 +
23137 +#include <elan3/elanregs.h>
23138 +#include <elan3/elandev.h>
23139 +#include <elan3/elanvp.h>
23140 +#include <elan3/elan3mmu.h>
23141 +#include <elan3/elanctxt.h>
23142 +#include <elan3/elandebug.h>
23143 +#include <elan3/urom_addrs.h>
23144 +#include <elan3/thread.h>
23145 +#include <elan3/vmseg.h>
23146 +#include <elan3/elan3mmu.h>
23147 +
23148 +#define MAXINSTR       256             /* # Instructions to look at while looking for close */
23149 +
23150 +static E3_uint32 ALU (ELAN3_CTXT *ctxt,
23151 +                     E3_uint32 fcode, E3_uint32 X, E3_uint32 Y,
23152 +                     E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V);
23153 +
23154 +char *OpcodeNames[] =
23155 +{
23156 +   "ADD   ",
23157 +   "AND   ",
23158 +   "OR    ",
23159 +   "XOR   ",
23160 +   "SUB   ",
23161 +   "ANDN  ",
23162 +   "ORN   ",
23163 +   "XNOR  ",
23164 +   "ADDX  ",
23165 +   "UNIP  ",
23166 +   "UMUL  ",
23167 +   "SMUL  ",
23168 +   "SUBX  ",
23169 +   "UNIP  ",
23170 +   "UDIV  ",
23171 +   "SDIV  ",
23172 +   "ADDcc ",
23173 +   "ANDcc ",
23174 +   "ORcc  ",
23175 +   "XORcc ",
23176 +   "SUBcc ",
23177 +   "ANDNcc",
23178 +   "ORNcc ",
23179 +   "XNORcc",
23180 +   "ADDXcc",
23181 +   "UNIPcc",
23182 +   "UMULcc",
23183 +   "SMULcc",
23184 +   "SUBXcc",
23185 +   "UNIPcc",
23186 +   "UDIVcc",
23187 +   "SDIVcc"
23188 +};
23189 +
23190 +#define REGISTER_VALUE(trap, rN)               (((rN) == 0) ? 0 : (trap)->Registers[(rN)^WordEndianFlip])
23191 +#define ASSIGN_REGISTER(trap, rN, value)       ((rN) != 0 ? trap->Registers[(rN)^WordEndianFlip] = (value) : 0)
23192 +
23193 +int
23194 +RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal)
23195 +{
23196 +    E3_Addr   pc      = (trap->pc & PC_MASK);
23197 +    E3_Addr   npc     = (trap->npc & PC_MASK);
23198 +    E3_uint32 Z       = (trap->npc & PSR_Z_BIT) ? 1 : 0;
23199 +    E3_uint32 N       = (trap->npc & PSR_N_BIT) ? 1 : 0;
23200 +    E3_uint32 C       = (trap->pc  & PSR_C_BIT) ? 1 : 0;
23201 +    E3_uint32 V       = (trap->pc  & PSR_V_BIT) ? 1 : 0;
23202 +    E3_uint32 instr;
23203 +    E3_Addr   addr;
23204 +
23205 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
23206 +    {
23207 +    failed:
23208 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
23209 +
23210 +       ElanException (ctxt, EXCEPTION_SIMULATION_FAILED, THREAD_PROC, trap);
23211 +       return (EFAULT);
23212 +    }
23213 +
23214 +    /*
23215 +     * Thread trapped with output open, or while closing,
23216 +     * so roll the PC forwards to the instruction after the
23217 +     * next c_close, and execute that with the register
23218 +     * specified in c_close set to the trap which occured.
23219 +     * (This is not 1 which means an ACK)
23220 +     */
23221 +    PRINTF1 (ctxt, DBG_TPROC, "RollThreadToClose: roll pc %x to c_close\n", pc);
23222 +    
23223 +    for (;;)
23224 +    {
23225 +       instr = ELAN3_OP_LOAD32 (ctxt, pc);
23226 +
23227 +       PRINTF2 (ctxt, DBG_TPROC, "RollThreadToClose: PC=%x INSTR=%x\n", pc, instr);
23228 +
23229 +       switch (OPCODE_CLASS(instr))
23230 +       {
23231 +       case OPCODE_CLASS_0:
23232 +           switch ((instr) & OPCODE_CLASS0_MASK)
23233 +           {
23234 +           case OPCODE_SETHI:
23235 +               PRINTF3 (ctxt, DBG_TPROC, "PC %x : sethi r%d = %x\n", pc, INSTR_RD(instr), instr << 10);
23236 +
23237 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), instr << 10);
23238 +               break;
23239 +
23240 +           case OPCODE_SENDREG:
23241 +               PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendreg\n", pc);
23242 +               break;
23243 +               
23244 +           case OPCODE_SENDMEM:
23245 +               PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendmem\n", pc);
23246 +               break;
23247 +               
23248 +           case OPCODE_BICC:
23249 +           {
23250 +               int     DoBranch   = (instr >> 28) & 1;
23251 +               int     CondBranch = 1;
23252 +               E3_Addr OldnPC     = npc;
23253 +
23254 +               PRINTF5 (ctxt, DBG_TPROC, "PC %x : Bicc Z=%x N=%x C=%x V=%x ", pc, Z, N, C, V);
23255 +               switch (instr & OPCODE_BICC_MASK)
23256 +               {
23257 +               case OPCODE_BICC_BN:    CondBranch = 0;                 break;
23258 +               case OPCODE_BICC_BE:    DoBranch ^= Z;                  break;
23259 +               case OPCODE_BICC_BLE:   DoBranch ^= Z | (N ^ V);        break;
23260 +               case OPCODE_BICC_BL:    DoBranch ^= N ^ V;              break;
23261 +               case OPCODE_BICC_BLEU:  DoBranch ^= C | Z;              break;
23262 +               case OPCODE_BICC_BCS:   DoBranch ^= C;                  break;
23263 +               case OPCODE_BICC_BNEG:  DoBranch ^= N;                  break;
23264 +               case OPCODE_BICC_BVS:   DoBranch ^= V;                  break;
23265 +               }
23266 +
23267 +               /* Do the branch */
23268 +               if (DoBranch != 0)
23269 +               {
23270 +                   npc = pc + (((instr & 0x3fffff) << 2) |
23271 +                               (((instr & 0x200000) != 0) ? 0xff000000 : 0));
23272 +                   
23273 +                   PRINTF2 (ctxt, DBG_TPROC, "PC %x : branch taken to %x\n", pc, npc);
23274 +               }
23275 +               else
23276 +               {
23277 +                   npc = npc + 4;
23278 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch not taken\n", pc);
23279 +               }
23280 +               pc = OldnPC;
23281 +
23282 +               /* Test if the next is annuled */
23283 +               if (((instr & OPCODE_BICC_ANNUL) != 0) &
23284 +                   ((DoBranch == 0) | (CondBranch == 0)))
23285 +               {
23286 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch annulled\n", pc);
23287 +
23288 +                   pc = npc;
23289 +                   npc += 4;
23290 +               }
23291 +
23292 +               /*
23293 +                * we've already consumed the instruction - so continue rather
23294 +                * than break;
23295 +                */
23296 +               continue;
23297 +           }
23298 +           
23299 +           default:
23300 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 0 instr %x\n", pc, instr);
23301 +               goto failed;
23302 +           }
23303 +           break;
23304 +
23305 +       case OPCODE_CLASS_1:
23306 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 1 instr %x\n", pc, instr);
23307 +               goto failed;
23308 +               
23309 +       case OPCODE_CLASS_2:
23310 +       {
23311 +           E3_uint32 X = REGISTER_VALUE (trap, INSTR_RS1(instr));
23312 +           E3_uint32 Y = (instr & OPCODE_IMM) ? INSTR_IMM(instr) : REGISTER_VALUE (trap, INSTR_RS2(instr));
23313 +           
23314 +           if ((instr & OPCODE_NOT_ALUOP) == 0)
23315 +           {
23316 +               E3_uint32 fcode  = (instr >> OPCODE_FCODE_SHIFT) & OPCODE_FCODE_MASK;
23317 +               E3_uint32 result = ALU (ctxt, fcode, X, Y, &Z, &N, &C, &V);
23318 +
23319 +               PRINTF5 (ctxt, DBG_TPROC, "PC %x : %s %x %x -> %x", pc, OpcodeNames[fcode], X, Y, result);
23320 +               PRINTF4 (ctxt, DBG_TPROC, "        Z=%x N=%x C=%x V=%x\n", Z, N, C, V);
23321 +               
23322 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), result);
23323 +           }
23324 +           else
23325 +           {
23326 +               switch (instr & OPCODE_MASK)
23327 +               {
23328 +               case OPCODE_OPEN:
23329 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_open\n", pc);
23330 +                   break;
23331 +
23332 +               case OPCODE_CLOSE:
23333 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc);
23334 +                   goto found_close;
23335 +
23336 +               case OPCODE_SLL:
23337 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SLL\n", pc);
23338 +
23339 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X << Y);
23340 +                   break;
23341 +                   
23342 +               case OPCODE_SRL:
23343 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRL\n", pc);
23344 +                   
23345 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y);
23346 +                   break;
23347 +                   
23348 +               case OPCODE_SRA:
23349 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRA\n", pc);
23350 +                   
23351 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y);
23352 +                   break;
23353 +                   
23354 +               case OPCODE_BREAKTEST:
23355 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAKTEST  not allowed while open\n", pc);
23356 +                   goto failed;
23357 +                   
23358 +               case OPCODE_BREAK:
23359 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAK not allowed while open\n", pc);
23360 +                   goto failed;
23361 +
23362 +               case OPCODE_SUSPEND:
23363 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SUSPEND not allowed while open\n", pc);
23364 +                   goto failed;
23365 +                   
23366 +               case OPCODE_WAIT:
23367 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : WAIT not allowed while open\n", pc);
23368 +                   goto failed;
23369 +
23370 +               default:
23371 +                   PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 2 instr %x\n", pc, instr);
23372 +                   goto failed;
23373 +               }
23374 +           }
23375 +           break;
23376 +       }
23377 +       
23378 +       case OPCODE_CLASS_3:
23379 +       {
23380 +           if ((instr & OPCODE_IMM) != 0)
23381 +               addr = REGISTER_VALUE (trap, INSTR_RS1(instr)) + INSTR_IMM(instr);
23382 +           else
23383 +               addr = (REGISTER_VALUE (trap, INSTR_RS1(instr)) + 
23384 +                       REGISTER_VALUE (trap, INSTR_RS2(instr)));
23385 +
23386 +           switch (instr & OPCODE_MASK)
23387 +           {
23388 +           case OPCODE_LD:
23389 +               PRINTF3 (ctxt, DBG_TPROC, "PC %x : LD [%x], r%d\n", pc, addr, INSTR_RD(instr));
23390 +               
23391 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), ELAN3_OP_LOAD32 (ctxt, addr));
23392 +               break;
23393 +               
23394 +           case OPCODE_LDD:
23395 +           case OPCODE_LDBLOCK16:
23396 +           case OPCODE_LDBLOCK32:
23397 +           case OPCODE_LDBLOCK64:
23398 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : LDBLOCKx @ %x is not possible while output open\n", pc, addr);
23399 +               goto failed;
23400 +           
23401 +           case OPCODE_ST:
23402 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : ST @ %x\n", pc, addr);
23403 +               
23404 +               ELAN3_OP_STORE32 (ctxt, addr, REGISTER_VALUE (trap, INSTR_RD(instr)));
23405 +               break;
23406 +                             
23407 +           case OPCODE_STD:
23408 +           case OPCODE_STBLOCK16:
23409 +           case OPCODE_STBLOCK32:
23410 +           case OPCODE_STBLOCK64:
23411 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : STD @ %x is not posisble while output open\n", pc, addr);
23412 +               goto failed;
23413 +
23414 +           case OPCODE_SWAP:
23415 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : SWAP @ %x is not posible while output open\n", pc, addr);
23416 +               goto failed;
23417 +               
23418 +           default:
23419 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 3 instr %x\n", pc, instr);
23420 +               goto failed;
23421 +           }
23422 +           break;
23423 +       }}
23424 +
23425 +       pc = npc;
23426 +       npc += 4;
23427 +    }
23428 +    
23429 +found_close:
23430 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
23431 +
23432 +    PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc);
23433 +    
23434 +    /*
23435 +     * Found the new pc, and have the close instruction in *instr
23436 +     */
23437 +    ASSIGN_REGISTER (trap, INSTR_RD(instr), PAckVal);
23438 +    
23439 +    /*
23440 +     * Move to instruction after close.
23441 +    */
23442 +    trap->pc = npc;
23443 +    
23444 +    /* Insert the value of Z and N from the close inst */
23445 +    trap->npc = (npc + 4) | ((PAckVal == E3_PAckOk) ? 1 :
23446 +                            (PAckVal == E3_PAckTestFail) ? 2 : 0);
23447 +
23448 +    return (ESUCCESS);
23449 +}
23450 +
23451 +E3_uint32
23452 +ALU (ELAN3_CTXT *ctxt,
23453 +     E3_uint32 fcode, E3_uint32 X, E3_uint32 Y,
23454 +     E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V)
23455 +{
23456 +    E3_uint32 XMSB, YMSB, ZMSB, Cprime;
23457 +    E3_uint32 Yprime;
23458 +    E3_uint32 Result=0;
23459 +    
23460 +    Yprime = ((fcode >> 2) & 1) ? ~Y : Y;
23461 +    Cprime = ((fcode >> 2) & 1) ^ (*C & ((fcode >> 3) & 1));
23462 +    XMSB             = (X >> 31) & 1;
23463 +    YMSB             = (Yprime >> 31) & 1;
23464 +    /* mul or div */
23465 +    if ((fcode & 0xa) == 0xa)
23466 +    {
23467 +       PRINTF0 (ctxt, DBG_TPROC, "ALU: tried a multiply or a divide\n");
23468 +       return (0);
23469 +    }
23470 +
23471 +    switch (fcode & 3)
23472 +    {
23473 +       /*ADD */
23474 +    case 0:
23475 +       Result = X + Yprime + Cprime ;
23476 +       if ((fcode & 0x10) == 0)
23477 +           return (Result);
23478 +       
23479 +       ZMSB   = Result >> 31;
23480 +       *V = ((XMSB & YMSB & ~ZMSB) | (~XMSB &~YMSB &  ZMSB));
23481 +       *C = ((fcode >> 2) & 1) ^ ( (XMSB & YMSB) | (~ZMSB & (XMSB | YMSB)));
23482 +       break;
23483 +       
23484 +       /*AND */
23485 +    case 1:
23486 +       Result = X & Yprime ;
23487 +       if ((fcode & 0x10) == 0)
23488 +           return (Result);
23489 +       
23490 +       *V = 0;
23491 +       *C = 0;
23492 +       break;
23493 +       
23494 +       /*OR  */
23495 +    case 2:
23496 +       Result = X | Yprime ;
23497 +       if ((fcode & 0x10) == 0)
23498 +           return (Result);
23499 +       
23500 +       *V = 0;
23501 +       *C = 0;
23502 +       break;
23503 +       
23504 +       /*XOR */
23505 +    case 3:
23506 +       Result = X ^ Yprime ;
23507 +       if ((fcode & 0x10) == 0)
23508 +           return (Result);
23509 +       
23510 +       *V = 0;
23511 +       *C = 0;
23512 +       break;
23513 +    }
23514 +    
23515 +    *Z = (Result == 0) ? 1 : 0;
23516 +    *N = (Result >> 31) & 1;
23517 +
23518 +    return (Result);
23519 +}
23520 +
23521 +/*
23522 + * Local variables:
23523 + * c-file-style: "stroustrup"
23524 + * End:
23525 + */
23526 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/tproc_linux.c
23527 ===================================================================
23528 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/tproc_linux.c        2004-02-23 16:02:56.000000000 -0500
23529 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/tproc_linux.c     2005-07-28 14:52:52.817683448 -0400
23530 @@ -0,0 +1,215 @@
23531 +/*
23532 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23533 + *
23534 + *    For licensing information please see the supplied COPYING file
23535 + *
23536 + */
23537 +
23538 +#ident "$Id: tproc_linux.c,v 1.19.2.1 2004/10/28 17:08:56 david Exp $"
23539 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc_linux.c,v $*/
23540 +
23541 +#include <qsnet/kernel.h>
23542 +#include <qsnet/autoconf.h>
23543 +
23544 +#include <asm/mman.h>
23545 +#include <linux/file.h>
23546 +
23547 +#ifdef NO_ABI
23548 +#include <asm/poll.h>
23549 +extern asmlinkage long sys_open(const char *, int, int);
23550 +extern asmlinkage ssize_t sys_write(unsigned int, const char *, size_t);
23551 +extern asmlinkage ssize_t sys_read(unsigned int, char *, size_t);
23552 +extern asmlinkage off_t sys_lseek(unsigned int, off_t, unsigned int);
23553 +extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long);
23554 +extern asmlinkage long sys_kill(int, int); 
23555 +#else
23556 +#      include <linux/syscalls.h>
23557 +#endif
23558 +
23559 +#include <elan3/elanregs.h>
23560 +#include <elan3/elandev.h>
23561 +#include <elan3/elanvp.h>
23562 +#include <elan3/elan3mmu.h>
23563 +#include <elan3/elanctxt.h>
23564 +#include <elan3/elandebug.h>
23565 +#include <elan3/urom_addrs.h>
23566 +#include <elan3/thread.h>
23567 +#include <elan3/elansyscall.h>
23568 +#include <elan3/threadsyscall.h>
23569 +
23570 +/*
23571 + * NOTE: system calls from kernel on Linux are different on alpha and i386 
23572 + *       on alpha they return -errno on failure 
23573 + *       on i386  they return -1 on failure and set errno 
23574 + */
23575 +
23576 +static void
23577 +ReturnSyscall (THREAD_TRAP *trap, unsigned long rc, int *skip)
23578 +{
23579 +    if (rc >= (unsigned long) (-130))
23580 +    {
23581 +       trap->pc |= PSR_C_BIT;  /* clear carry to indicate failure */
23582 +
23583 +       trap->Registers[REG_OUTS+(0^WordEndianFlip)] = -rc;
23584 +    } 
23585 +    else 
23586 +    {
23587 +       trap->pc &= ~PSR_C_BIT; /* set carry to indicate success */
23588 +       trap->Registers[REG_OUTS+(0^WordEndianFlip)] = rc;
23589 +    }
23590 +    trap->Registers[REG_OUTS+(1^WordEndianFlip)] = 0;
23591 +    *skip = 1;
23592 +}
23593 +
23594 +static void 
23595 +dump_regs(ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
23596 +{
23597 +    PRINTF (ctxt, DBG_TPROC, "               OUTS %08x %08x %08x %08x\n",
23598 +      trap->Registers[REG_OUTS+(0^WordEndianFlip)], 
23599 +      trap->Registers[REG_OUTS+(1^WordEndianFlip)],
23600 +      trap->Registers[REG_OUTS+(2^WordEndianFlip)], 
23601 +      trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
23602 +    PRINTF (ctxt, DBG_TPROC, "                    %08x %08x %08x %08x\n",
23603 +      trap->Registers[REG_OUTS+(4^WordEndianFlip)], 
23604 +      trap->Registers[REG_OUTS+(5^WordEndianFlip)],
23605 +      trap->Registers[REG_OUTS+(6^WordEndianFlip)], 
23606 +      trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
23607 +}
23608 +
23609 +int
23610 +ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip)
23611 +{
23612 +    int                  code;
23613 +    caddr_t       maddr;
23614 +    struct file  *file;
23615 +    unsigned long rc;
23616 +    int           i;
23617 +    uintptr_t     av[6];
23618 +    uintptr_t     ptr;
23619 +   
23620 +    PRINTF (ctxt, DBG_TPROC, "ThreadSyscall: PC %08x G1 %08x\n", 
23621 +      trap->pc, trap->Registers[REG_GLOBALS+(1^WordEndianFlip)]);
23622 +    dump_regs(ctxt, trap);
23623 +
23624 +    code = trap->Registers[REG_GLOBALS+(1^WordEndianFlip)];
23625 +
23626 +    /* Copy the system call arguments from %o0-%o5 */
23627 +    for (i = 0; i < 6; i++)
23628 +       av[i] = trap->Registers[REG_OUTS+(i^WordEndianFlip)];
23629 +    
23630 +    rc = (unsigned long) -EINVAL;
23631 +
23632 +    switch (code) {
23633 +       case ELAN3_SYS_open:
23634 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
23635 +           if (maddr != NULL)
23636 +               rc = sys_open((const char *)maddr, av[1], av[2]);
23637 +           break;
23638 +
23639 +       case ELAN3_SYS_close:
23640 +           rc = sys_close(av[0]);
23641 +           break;
23642 +
23643 +       case ELAN3_SYS_write:
23644 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]);
23645 +           if (maddr != NULL)
23646 +               rc = sys_write(av[0], (const char *)maddr, av[2]);
23647 +           break;
23648 +
23649 +       case ELAN3_SYS_read:
23650 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]);
23651 +           if (maddr != NULL)
23652 +               rc = sys_read(av[0], (char *)maddr, av[2]);
23653 +           break;
23654 +
23655 +       case ELAN3_SYS_poll:
23656 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
23657 +           if (maddr != NULL)
23658 +               rc = sys_poll((struct pollfd *)maddr, av[1], av[2]);
23659 +           break;
23660 +       
23661 +       case ELAN3_SYS_lseek:
23662 +           rc = sys_lseek(av[0], av[1], av[2]);
23663 +           break;
23664 +           
23665 +       case ELAN3_SYS_mmap:
23666 +           if ((E3_Addr) av[0] == (E3_Addr) 0)
23667 +               maddr = NULL;
23668 +           else if ((maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0])) == NULL)
23669 +               break;
23670 +       
23671 +           file = NULL;
23672 +           /* GNAT 5515: If *not* anonymous memory need to do fget */
23673 +           if ((av[3] & MAP_ANONYMOUS) == 0 && (file = fget (av[4])) == NULL)
23674 +           {
23675 +               rc = -EBADF;
23676 +               break;
23677 +           }
23678 +
23679 +           down_write (&current->mm->mmap_sem);
23680 +           ptr = do_mmap_pgoff (file, (unsigned long) maddr, av[1], av[2], av[3], av[5] >>PAGE_SHIFT);
23681 +           up_write (&current->mm->mmap_sem);
23682 +
23683 +           if (file)
23684 +               fput (file);
23685 +           
23686 +           if (IS_ERR((void *) ptr))
23687 +               rc = PTR_ERR((void *) ptr);
23688 +           else
23689 +               rc = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t)ptr);
23690 +
23691 +           break;
23692 +       
23693 +       case ELAN3_SYS_munmap:
23694 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
23695 +
23696 +#ifdef AC
23697 +           if (maddr != NULL)
23698 +               rc = do_munmap(current->mm, (unsigned long) maddr, av[1], 1);
23699 +#else
23700 +           if (maddr != NULL)
23701 +               rc = do_munmap(current->mm, (unsigned long) maddr, av[1]);
23702 +#endif
23703 +           break;
23704 +
23705 +       case ELAN3_SYS_kill:
23706 +           rc = sys_kill(av[0], av[1]);
23707 +           break;
23708 +
23709 +       case ELAN3_SYS_getpid:
23710 +           rc = current->pid;
23711 +           break;
23712 +
23713 +       default:
23714 +           return EINVAL;
23715 +    }
23716 +    ReturnSyscall(trap, rc, skip);
23717 +    return ESUCCESS;
23718 +}
23719 +
23720 +
23721 +int
23722 +ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip)
23723 +{
23724 +       int ret = ESUCCESS;
23725 +
23726 +       PRINTF (ctxt, DBG_TPROC, "ThreadElancall: PC %08x\n", trap->pc);
23727 +       dump_regs(ctxt, trap);
23728 +
23729 +       /*
23730 +        * Elan system call 'type' is passed in o0
23731 +        */
23732 +       switch (trap->Registers[REG_OUTS+(0^WordEndianFlip)]) 
23733 +       {
23734 +       default:
23735 +               ret = EINVAL;
23736 +               break;
23737 +       }
23738 +       return ret;
23739 +}
23740 +
23741 +/*
23742 + * Local variables:
23743 + * c-file-style: "stroustrup"
23744 + * End:
23745 + */
23746 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan3/virtual_process.c
23747 ===================================================================
23748 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan3/virtual_process.c    2004-02-23 16:02:56.000000000 -0500
23749 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan3/virtual_process.c 2005-07-28 14:52:52.818683296 -0400
23750 @@ -0,0 +1,884 @@
23751 +/*
23752 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23753 + *
23754 + *    For licensing information please see the supplied COPYING file
23755 + *
23756 + */
23757 +
23758 +#ident "@(#)$Id: virtual_process.c,v 1.68 2004/06/07 13:50:10 mike Exp $"
23759 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/virtual_process.c,v $*/
23760 +
23761 +#include <qsnet/kernel.h>
23762 +
23763 +#include <elan/elanmod.h>
23764 +#include <elan3/elanregs.h>
23765 +#include <elan3/elandev.h>
23766 +#include <elan3/elanvp.h>
23767 +#include <elan3/elan3mmu.h>
23768 +#include <elan3/elanctxt.h>
23769 +#include <elan3/elandebug.h>
23770 +#include <elan3/urom_addrs.h>
23771 +#include <elan3/thread.h>
23772 +#include <elan3/vmseg.h>
23773 +#include <elan3/elansyscall.h>
23774 +
23775 +static ELAN3_VPSEG *
23776 +InstallSegment (ELAN3_CTXT *ctxt, int process, int entries)
23777 +{
23778 +    ELAN3_VPSEG **prevSeg, *seg;
23779 +    int lastTop = -1;
23780 +    int        top     = process + entries-1;
23781 +
23782 +    ASSERT (krwlock_is_write_locked (&ctxt->VpLock));
23783 +
23784 +    for (prevSeg = &ctxt->VpSegs; (seg = (*prevSeg)) != NULL; prevSeg = &seg->Next)
23785 +    {
23786 +       int thisTop = seg->Process + seg->Entries - 1;
23787 +
23788 +        if (process < seg->Process && (process <= lastTop || top >= seg->Process))
23789 +       {
23790 +           /*
23791 +            * Overlaps with last segment, or this one 
23792 +            */
23793 +           return (NULL);
23794 +       }
23795 +       if (seg->Process > process)
23796 +           break;
23797 +       
23798 +       lastTop = thisTop;
23799 +    }
23800 +    
23801 +    KMEM_ZALLOC (seg, ELAN3_VPSEG *, sizeof (ELAN3_VPSEG), TRUE);
23802 +    
23803 +    if (seg == (ELAN3_VPSEG *) NULL)
23804 +       return (NULL);
23805 +    
23806 +    seg->Process = process;
23807 +    seg->Entries = entries;
23808 +
23809 +
23810 +    PRINTF2 (ctxt, DBG_VP, "InstallSegment: add seg %p before %p\n", seg, *prevSeg);
23811 +
23812 +    seg->Next = *prevSeg;
23813 +    *prevSeg = seg;
23814 +
23815 +    return (seg);
23816 +}
23817 +
23818 +static int 
23819 +RemoveSegment (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg)
23820 +{
23821 +    ELAN3_VPSEG **prevSeg, *thisSeg;
23822 +
23823 +    ASSERT (krwlock_is_write_locked (&ctxt->VpLock));
23824 +
23825 +    for (prevSeg = &ctxt->VpSegs; (thisSeg = (*prevSeg)) != NULL; prevSeg = &thisSeg->Next)
23826 +    {
23827 +       if (thisSeg == seg)
23828 +           break;
23829 +    }
23830 +
23831 +    if (thisSeg == (ELAN3_VPSEG *) NULL)
23832 +       return (EINVAL);
23833 +
23834 +
23835 +    PRINTF2 (ctxt, DBG_VP, "RemoveSegment: remove seg %p next %p\n", thisSeg, thisSeg->Next);
23836 +
23837 +    *prevSeg = thisSeg->Next;
23838 +    
23839 +    KMEM_FREE ((caddr_t) seg, sizeof (ELAN3_VPSEG));
23840 +
23841 +    return (ESUCCESS);
23842 +}
23843 +
23844 +static ELAN3_VPSEG *
23845 +FindSegment (ELAN3_CTXT *ctxt, int low, int high)
23846 +{
23847 +    ELAN3_VPSEG *seg;
23848 +
23849 +    ASSERT(krwlock_is_locked (&ctxt->VpLock));
23850 +    
23851 +    for (seg = ctxt->VpSegs; seg; seg = seg->Next)
23852 +    {
23853 +       if (seg->Process <= low && (seg->Process + seg->Entries) > high)
23854 +           return (seg);
23855 +    }
23856 +
23857 +    return ((ELAN3_VPSEG *) NULL);
23858 +}
23859 +
23860 +ELAN_LOCATION
23861 +ProcessToLocation (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap)
23862 +{
23863 +    ELAN_LOCATION location;
23864 +    int           nnodes,nctxs;
23865 +    int           node,ctx,i;
23866 +
23867 +    ASSERT(krwlock_is_locked (&ctxt->VpLock));
23868 +
23869 +    location.loc_node    = ELAN3_INVALID_NODE;
23870 +    location.loc_context = -1;
23871 +
23872 +    PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d seg %p cap %p\n", process, seg, cap);
23873 +
23874 +    if (seg == NULL)
23875 +       seg = FindSegment (ctxt, process, process);
23876 +
23877 +    if (!seg || (seg->Type != ELAN3_VPSEG_P2P))
23878 +       return (location);
23879 +
23880 +    cap    = &seg->SegCapability;
23881 +    nnodes = ELAN_CAP_NUM_NODES (cap);
23882 +    nctxs  = ELAN_CAP_NUM_CONTEXTS (cap);
23883 +
23884 +    switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
23885 +    {
23886 +    case ELAN_CAP_TYPE_BLOCK:
23887 +    {
23888 +       int entries = ELAN_CAP_ENTRIES(cap);
23889 +
23890 +       for (node = 0, i = 0; node < nnodes && i < entries; node++)
23891 +       {
23892 +           for (ctx = 0; ctx < nctxs && i < entries; ctx++)
23893 +           {
23894 +               if (( seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, ctx + (node * nctxs)))
23895 +               {
23896 +                   if (i++ == (process - seg->Process))
23897 +                   { 
23898 +                       location.loc_node    = seg->SegCapability.cap_lownode    + node;
23899 +                       location.loc_context = seg->SegCapability.cap_lowcontext + ctx;
23900 +                       goto found;
23901 +                   }
23902 +               }
23903 +           }
23904 +       }
23905 +       break;
23906 +    }
23907 +    case ELAN_CAP_TYPE_CYCLIC:
23908 +    {
23909 +       int entries = ELAN_CAP_ENTRIES(cap);
23910 +
23911 +       for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++)
23912 +       {
23913 +           for (node = 0; node < nnodes && i < entries; node++)
23914 +           {
23915 +               if ((seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, node + (ctx * nnodes)))
23916 +               {                                   
23917 +                   if (i++ ==  (process - seg->Process))
23918 +                   { 
23919 +                       location.loc_node    = seg->SegCapability.cap_lownode    + node;
23920 +                       location.loc_context = seg->SegCapability.cap_lowcontext + ctx;
23921 +                       goto found;
23922 +                   }
23923 +               }
23924 +           }
23925 +       }
23926 +       break;  
23927 +    }
23928 +    default:
23929 +       break;
23930 +    }
23931 +       
23932 + found:
23933 +    
23934 +    PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d -> Node %d Context %d\n", process, location.loc_node,  location.loc_context);
23935 +
23936 +    if (cap != NULL)
23937 +    {
23938 +       bcopy ((caddr_t) &seg->SegCapability, (caddr_t) cap, sizeof (ELAN_CAPABILITY));
23939 +       cap->cap_mycontext = location.loc_context;
23940 +    }
23941 +
23942 +    return (location);
23943 +}
23944 +
23945 +int
23946 +LocationToProcess (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, ELAN_LOCATION loc, ELAN_CAPABILITY *cap)
23947 +{
23948 +    int nnodes,nctxs;
23949 +    int node,ctx,i;
23950 +
23951 +    if (seg == NULL)
23952 +       return ELAN3_INVALID_PROCESS;
23953 +
23954 +    if (!seg || (seg->Type != ELAN3_VPSEG_P2P))
23955 +       return ELAN3_INVALID_PROCESS;
23956 +
23957 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
23958 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
23959 +
23960 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
23961 +    {
23962 +    case ELAN_CAP_TYPE_BLOCK:
23963 +    {
23964 +       int entries = ELAN_CAP_ENTRIES(cap);
23965 +
23966 +       for (node = 0, i = 0; node < nnodes && i < entries; node++)
23967 +       {
23968 +           for (ctx = 0; ctx < nctxs && i < entries; ctx++)
23969 +           {
23970 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctx + (node * nctxs)))
23971 +               {
23972 +                   if ((loc.loc_node    == (cap->cap_lownode + node) ) 
23973 +                       && (loc.loc_context == (cap->cap_lowcontext + ctx) ))
23974 +                   {
23975 +                       return (i + seg->Process);
23976 +                   }
23977 +                   i++;
23978 +               }
23979 +           }
23980 +       }
23981 +       break;
23982 +    }  
23983 +    case ELAN_CAP_TYPE_CYCLIC:
23984 +    {
23985 +       int entries = ELAN_CAP_ENTRIES(cap);
23986 +
23987 +       for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++)
23988 +       {
23989 +           for (node = 0; node < nnodes && i < entries; node++)
23990 +           {
23991 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (ctx * nnodes)))
23992 +               {
23993 +                   if ((loc.loc_node   == (cap->cap_lownode + node) ) 
23994 +                       && (loc.loc_context == (cap->cap_lowcontext + ctx) ))
23995 +                   {
23996 +                       return (i + seg->Process);
23997 +                   }
23998 +                   i++;
23999 +                   
24000 +               }
24001 +           }
24002 +       }
24003 +       break;
24004 +    }  
24005 +    default:
24006 +       break;
24007 +    }
24008 +       
24009 +    return ELAN3_INVALID_PROCESS;
24010 +}
24011 +
24012 +int
24013 +elan3_addvp (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap)
24014 +{
24015 +    ELAN3_DEV       *dev = ctxt->Device;
24016 +    ELAN_POSITION    *pos = &ctxt->Position;
24017 +    ELAN3_VPSEG       *seg;
24018 +    int                      i;
24019 +    int                      nodeOff;
24020 +    int                      ctxOff;
24021 +    int                      nnodes;
24022 +    int                      nctxs;
24023 +    E3_uint16         flits[MAX_FLITS];
24024 +    int               nflits;
24025 +    int               entries;
24026 +
24027 +    PRINTF2 (ctxt, DBG_VP, "elan3_addvp: %d -> %s\n", process, CapabilityString (cap));
24028 +
24029 +    entries = ELAN_CAP_ENTRIES(cap);
24030 +    if (entries <= 0 || (process + entries) > ELAN3_MAX_VPS)
24031 +       return (EINVAL);
24032 +
24033 +    /*
24034 +     * Scan the virtual process segment list, to add this entry, and ensure that
24035 +     * the ranges don't overlap.
24036 +     */
24037 +    krwlock_write (&ctxt->VpLock);
24038 +
24039 +    /* check cap. */
24040 +    switch (elan3_validate_cap (ctxt->Device, cap, ELAN_USER_P2P))
24041 +    {
24042 +    case ELAN_CAP_OK:
24043 +       /* nothing */
24044 +       break;
24045 +
24046 +    case ELAN_CAP_RMS:
24047 +       if ( elan_validate_map(cap, cap) != ESUCCESS)
24048 +       {
24049 +           krwlock_done (&ctxt->VpLock);
24050 +           return (EINVAL);
24051 +       }
24052 +       break;
24053 +
24054 +    default:
24055 +       krwlock_done (&ctxt->VpLock);
24056 +       return (EINVAL);
24057 +    }
24058 +
24059 +    if ((seg = InstallSegment (ctxt, process, entries)) == NULL)
24060 +    {
24061 +       PRINTF0 (ctxt, DBG_VP, "elan3_addvp: failed to find a seg\n");
24062 +       krwlock_done (&ctxt->VpLock);
24063 +       return (EINVAL);
24064 +    }
24065 +    
24066 +    seg->Type                        = ELAN3_VPSEG_P2P;
24067 +    seg->SegCapability               = *cap;
24068 +    seg->SegCapability.cap_mycontext = ELAN_CAP_UNINITIALISED;
24069 +
24070 +    PRINTF3 (ctxt, DBG_VP, "elan3_addvp: segment type %x  %d %d\n",
24071 +           seg->SegCapability.cap_type, seg->Process, entries);
24072 +
24073 +
24074 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
24075 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
24076 +
24077 +    /* position not determined, so cannot load any routes, the hwtest
24078 +     * process must explicitly set it's own routes */
24079 +    
24080 +    if (!(cap->cap_type & ELAN_CAP_TYPE_HWTEST) && (pos->pos_mode != ELAN_POS_UNKNOWN))
24081 +    {
24082 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
24083 +       {
24084 +       case ELAN_CAP_TYPE_BLOCK:
24085 +           for (nodeOff = 0, i = 0; nodeOff < nnodes && i < entries; nodeOff++)
24086 +           {
24087 +               for (ctxOff = 0; ctxOff < nctxs && i < entries; ctxOff++)
24088 +               {
24089 +                   if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
24090 +                   {
24091 +                       /* Don't load a route if there's no switch and trying to talk to myself */
24092 +                       if (pos->pos_mode == ELAN_POS_MODE_SWITCHED ||
24093 +                           (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) ||
24094 +                           (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid))
24095 +                       {
24096 +                           PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n",
24097 +                                    seg->Process + i, cap->cap_lownode +nodeOff, cap->cap_lowcontext +ctxOff);
24098 +                           
24099 +                           nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff,
24100 +                                                   DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24101 +                           
24102 +
24103 +
24104 +                           LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext + ctxOff, nflits, flits);  
24105 +                       }
24106 +                       
24107 +                       i++;
24108 +                   }
24109 +               }
24110 +           }
24111 +           break;
24112 +           
24113 +       case ELAN_CAP_TYPE_CYCLIC:
24114 +           for (ctxOff = 0, i = 0; ctxOff < nctxs && i < entries; ctxOff++)
24115 +           {
24116 +               for (nodeOff = 0; nodeOff < nnodes && i < entries; nodeOff++)
24117 +               {
24118 +                   if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
24119 +                   {
24120 +                       /* Don't load a route if there's no switch and trying to talk to myself */
24121 +                       if (pos->pos_mode == ELAN_POS_MODE_SWITCHED ||
24122 +                           (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) ||
24123 +                           (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid))
24124 +                       {
24125 +                           PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n",
24126 +                                    seg->Process + i, cap->cap_lownode + nodeOff, cap->cap_lowcontext +ctxOff);
24127 +                       
24128 +                           nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff,
24129 +                                                   DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24130 +                           
24131 +
24132 +                           LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext +ctxOff, nflits, flits);  
24133 +                       } 
24134 +                       i++;                
24135 +                   }
24136 +               }
24137 +           }
24138 +           break;      
24139 +       default:
24140 +           break;
24141 +       }
24142 +    }
24143 +  
24144 +    krwlock_done (&ctxt->VpLock);
24145 +
24146 +    return (ESUCCESS);
24147 +}
24148 +
24149 +int
24150 +elan3_removevp (ELAN3_CTXT *ctxt, int process)
24151 +{
24152 +    ELAN3_VPSEG *seg;
24153 +    ELAN3_VPSEG *next;
24154 +    int                i;
24155 +
24156 +    krwlock_write (&ctxt->VpLock);
24157 +
24158 +    PRINTF1 (ctxt, DBG_VP, "elan3_removevp: remove process %d\n", process);
24159 +
24160 +    if (process == ELAN3_INVALID_PROCESS)
24161 +       seg = ctxt->VpSegs;
24162 +    else
24163 +       seg = FindSegment (ctxt, process, process);
24164 +
24165 +    if (seg == (ELAN3_VPSEG *) NULL)
24166 +    {
24167 +       krwlock_done (&ctxt->VpLock);
24168 +       return (EINVAL);
24169 +    }
24170 +    
24171 +    do {
24172 +       PRINTF3 (ctxt, DBG_VP, "elan3_removevp: segment is %p [%x,%x]\n",
24173 +                seg, seg->Process, seg->Process+seg->Entries);
24174 +
24175 +       for (i = 0; i < seg->Entries; i++)
24176 +           ClearRoute (ctxt->Device, ctxt->RouteTable, seg->Process+i);
24177 +
24178 +        /* get Next pointer value before structure is free'd */
24179 +        next = seg->Next;      
24180 +       RemoveSegment (ctxt, seg);
24181 +
24182 +    } while (process == ELAN3_INVALID_PROCESS && (seg = next) != NULL);
24183 +    
24184 +    krwlock_done (&ctxt->VpLock);
24185 +
24186 +    return (ESUCCESS);
24187 +}
24188 +
24189 +int
24190 +elan3_addbcastvp (ELAN3_CTXT *ctxt, int process, int lowProc, int highProc)
24191 +{
24192 +    ELAN_POSITION *pos = &ctxt->Position;
24193 +    ELAN3_VPSEG    *seg;
24194 +    ELAN3_VPSEG    *aseg;
24195 +    int            virtualProcess;
24196 +    E3_uint64     routeValue;
24197 +
24198 +    PRINTF3 (ctxt, DBG_VP, "elan3_addbcastvp: process %d [%d,%d]\n", process, lowProc, highProc);
24199 +
24200 +    if (lowProc > highProc || pos->pos_mode != ELAN_POS_MODE_SWITCHED)
24201 +       return (EINVAL);
24202 +    
24203 +    krwlock_write (&ctxt->VpLock);
24204 +
24205 +    if ((aseg = FindSegment (ctxt, lowProc, highProc)) == NULL || (aseg->Type != ELAN3_VPSEG_P2P))
24206 +    {
24207 +       PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to p2p segment\n", lowProc, highProc);
24208 +       krwlock_done (&ctxt->VpLock);
24209 +       return (EINVAL);
24210 +    }
24211 +
24212 +    /* check aseg->SegCapability */    
24213 +    switch (elan3_validate_cap (ctxt->Device, &aseg->SegCapability, ELAN_USER_BROADCAST))
24214 +    {
24215 +    case ELAN_CAP_OK:
24216 +       /* nothing */
24217 +       break;
24218 +       
24219 +    case ELAN_CAP_RMS:
24220 +       if ( elan_validate_map(&ctxt->Capability, &aseg->SegCapability) != ESUCCESS )
24221 +       {
24222 +           krwlock_done (&ctxt->VpLock);
24223 +           return (EINVAL);
24224 +       }
24225 +       break;
24226 +
24227 +    default:
24228 +       krwlock_done (&ctxt->VpLock);
24229 +       return (EINVAL);
24230 +    }
24231 +
24232 +    if ( ProcessToLocation (ctxt, aseg, lowProc,  NULL).loc_context != 
24233 +        ProcessToLocation (ctxt, aseg, highProc, NULL).loc_context)
24234 +    {
24235 +       PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to single context\n", lowProc, highProc);
24236 +       krwlock_done (&ctxt->VpLock);
24237 +       return (EINVAL);
24238 +    }
24239 +    
24240 +    if ((seg = InstallSegment (ctxt, process, 1)) == NULL)
24241 +    {
24242 +       krwlock_done (&ctxt->VpLock);
24243 +       return (EINVAL);
24244 +    }
24245 +
24246 +    seg->Type        = ELAN3_VPSEG_BROADCAST;
24247 +    seg->SegLowProc  = lowProc;
24248 +    seg->SegHighProc = highProc;
24249 +
24250 +    PRINTF4 (ctxt, DBG_VP, "elan3_addbcastvp: installed seg %p Type %d LowProc %d HighProc %d\n",
24251 +           seg, seg->Type, seg->SegLowProc, seg->SegHighProc);
24252 +
24253 +    for (virtualProcess = lowProc; virtualProcess <= highProc; virtualProcess++)
24254 +    {
24255 +       if (virtualProcess < 0 || virtualProcess >= ctxt->RouteTable->Size)
24256 +           routeValue = 0;
24257 +       else
24258 +           routeValue = elan3_sdram_readq ( ctxt->Device, ctxt->RouteTable->Table + virtualProcess * NBYTES_PER_SMALL_ROUTE);
24259 +       
24260 +       if (! (routeValue & ROUTE_VALID))
24261 +       {
24262 +           PRINTF2 (ctxt, DBG_VP, "loadvp[%x]: broadcast %x not valid\n", 
24263 +                    ctxt->Capability.cap_mycontext, virtualProcess);
24264 +           break;
24265 +       }
24266 +    }
24267 +           
24268 +    if (virtualProcess > highProc)                     /* All vps now present */
24269 +    {                                          /* so load up broadcast route */
24270 +       E3_uint16     flits[MAX_FLITS];
24271 +       ELAN_LOCATION low    = ProcessToLocation (ctxt, aseg, lowProc,   NULL);
24272 +       ELAN_LOCATION high   = ProcessToLocation (ctxt, aseg, highProc,  NULL);
24273 +       int           nflits = GenerateRoute (pos, flits, low.loc_node, high.loc_node, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24274 +       
24275 +       PRINTF6 (ctxt, DBG_VP, "loadvp[%x]: broadcast %d -> %x.%x [%x.%x]\n", ctxt->Capability.cap_mycontext,
24276 +                seg->Process, low.loc_node, high.loc_node, 
24277 +                low.loc_context, high.loc_context);
24278 +       
24279 +       LoadRoute ( ctxt->Device, ctxt->RouteTable, seg->Process, low.loc_context, nflits, flits);
24280 +    }
24281 +
24282 +    krwlock_done (&ctxt->VpLock);
24283 +
24284 +    return (ESUCCESS);
24285 +}
24286 +
24287 +int
24288 +elan3_process (ELAN3_CTXT *ctxt)
24289 +{
24290 +    int           res = ELAN3_INVALID_PROCESS;
24291 +    ELAN3_VPSEG   *seg;
24292 +    ELAN_LOCATION loc;
24293 +
24294 +    krwlock_write (&ctxt->VpLock);
24295 +
24296 +    loc.loc_node    = ctxt->Position.pos_nodeid;
24297 +    loc.loc_context = ctxt->Capability.cap_mycontext;
24298 +
24299 +    for (seg = ctxt->VpSegs ; seg; seg = seg->Next)
24300 +    {
24301 +       if (seg->Type == ELAN3_VPSEG_P2P &&
24302 +           seg->SegCapability.cap_lowcontext  <= ctxt->Capability.cap_mycontext &&
24303 +           seg->SegCapability.cap_highcontext >= ctxt->Capability.cap_mycontext &&
24304 +           seg->SegCapability.cap_lownode     <= ctxt->Position.pos_nodeid &&
24305 +           seg->SegCapability.cap_highnode    >= ctxt->Position.pos_nodeid)
24306 +       {
24307 +           if ((res=LocationToProcess (ctxt,seg,loc,&ctxt->Capability)) != ELAN3_INVALID_PROCESS)
24308 +           {
24309 +                krwlock_done (&ctxt->VpLock);
24310 +                return res;
24311 +           }
24312 +       }
24313 +    }
24314 +
24315 +    krwlock_done (&ctxt->VpLock);
24316 +
24317 +    return (res);
24318 +}
24319 +
24320 +int
24321 +elan3_check_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError)
24322 +{
24323 +    PRINTF5 (ctxt, DBG_VP, "elan3_check_route: vp=%d flits=%04x %04x %04x %04x\n",
24324 +            process, flits[0], flits[1], flits[2], flits[3]);
24325 +    PRINTF4 (ctxt, DBG_VP, "                            %04x %04x %04x %04x\n",
24326 +            flits[4], flits[5], flits[6], flits[7]);
24327 +
24328 +    krwlock_read (&ctxt->VpLock);
24329 +    *routeError=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node);
24330 +    krwlock_done (&ctxt->VpLock);
24331 +
24332 +    return (ESUCCESS); /* the call is a success tho the errorcode may be set */
24333 +}
24334 +
24335 +int
24336 +elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits)
24337 +{
24338 +    ELAN3_VPSEG *seg;
24339 +    int                res = 0;
24340 +    int                nflits;
24341 +    int         err;
24342 +
24343 +    PRINTF5 (ctxt, DBG_VP, "elan3_load_route: vp=%d flits=%04x %04x %04x %04x\n",
24344 +            process, flits[0], flits[1], flits[2], flits[3]);
24345 +    PRINTF4 (ctxt, DBG_VP, "                            %04x %04x %04x %04x\n",
24346 +            flits[4], flits[5], flits[6], flits[7]);
24347 +
24348 +    krwlock_write (&ctxt->VpLock);
24349 +
24350 +    /* check the route is valid */
24351 +    if (!(ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST))
24352 +    {
24353 +       /* must have already attached to define my context number */
24354 +       if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
24355 +       {
24356 +           krwlock_done (&ctxt->VpLock);
24357 +           return (EINVAL);
24358 +       }
24359 +
24360 +       if ((err=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node)) != ELAN3_ROUTE_SUCCESS)
24361 +       {
24362 +           krwlock_done (&ctxt->VpLock);
24363 +           return (EINVAL);
24364 +       }
24365 +    }
24366 +
24367 +    if ((seg = FindSegment (ctxt, process, process)) == NULL || seg->Type != ELAN3_VPSEG_P2P)
24368 +    {
24369 +       krwlock_done (&ctxt->VpLock);
24370 +       return (EINVAL);
24371 +    }
24372 +
24373 +    /* Calculate number of flits in this route */
24374 +    for (nflits = 0; nflits < MAX_FLITS && flits[nflits]; nflits++)
24375 +       ;
24376 +    
24377 +    res = LoadRoute (ctxt->Device, ctxt->RouteTable, process, ProcessToLocation (ctxt, seg, process, NULL).loc_context, nflits, flits);
24378 +
24379 +    krwlock_done (&ctxt->VpLock);
24380 +
24381 +    return (res);
24382 +}
24383 +
24384 +int
24385 +elan3_get_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits)
24386 +{
24387 +    ELAN3_VPSEG *seg;
24388 +    int                res = 0;
24389 +
24390 +    PRINTF1 (ctxt, DBG_VP, "elan3_get_route: vp=%d \n",  process);
24391 +
24392 +    krwlock_write (&ctxt->VpLock);
24393 +
24394 +    if (ctxt->RouteTable == NULL)  /* is there a route table */
24395 +    {
24396 +       krwlock_done (&ctxt->VpLock);
24397 +       return (EINVAL);
24398 +    }
24399 +
24400 +    if ((seg = FindSegment (ctxt, process, process)) != NULL && seg->Type != ELAN3_VPSEG_P2P)
24401 +    {
24402 +       krwlock_done (&ctxt->VpLock);
24403 +       return (EINVAL);
24404 +    }
24405 +    
24406 +    if (seg == NULL)
24407 +    {
24408 +       krwlock_done (&ctxt->VpLock);
24409 +       return (EINVAL);
24410 +    }
24411 +    
24412 +    res = GetRoute (ctxt->Device, ctxt->RouteTable, process, flits);
24413 +    
24414 +    krwlock_done (&ctxt->VpLock);
24415 +
24416 +    return (res);
24417 +}
24418 +
24419 +int
24420 +elan3_reset_route (ELAN3_CTXT *ctxt, int process)
24421 +{
24422 +    E3_uint16     flits[MAX_FLITS];
24423 +
24424 +    PRINTF1 (ctxt, DBG_VP, "elan3_reset_route: vp=%d \n",  process);
24425
24426 +    GenerateRoute (&ctxt->Position, flits, process, process, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24427 +    
24428 +    return elan3_load_route(ctxt,process,flits);
24429 +}
24430 +
24431 +int
24432 +ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process)
24433 +{
24434 +    E3_uint16    flits[MAX_FLITS];
24435 +    ELAN3_DEV     *dev = ctxt->Device;
24436 +    int                  res = ESUCCESS;
24437 +    ELAN3_VPSEG   *seg;
24438 +    ELAN3_VPSEG   *aseg;
24439 +    E3_uint64    routeValue;
24440 +
24441 +    krwlock_read (&ctxt->VpLock);
24442 +
24443 +    PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: vp=%d \n",  process);
24444 +
24445 +    if (ctxt->RouteTable == NULL || process < 0 || process >= ctxt->RouteTable->Size)
24446 +    {
24447 +       krwlock_done (&ctxt->VpLock);
24448 +       return (EINVAL);
24449 +    }
24450 +
24451 +    if (! (seg = FindSegment (ctxt, process, process)))
24452 +    {
24453 +       PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: cannot find segment for virtual process %d\n", process);
24454 +       krwlock_done (&ctxt->VpLock);
24455 +       return (EINVAL);
24456 +    }
24457 +    
24458 +    /* check cap. */
24459 +    switch (elan3_validate_cap (ctxt->Device, &seg->SegCapability, ((seg->Type == ELAN3_VPSEG_P2P) ? ELAN_USER_P2P : ELAN_USER_BROADCAST)))
24460 +    {
24461 +    case ELAN_CAP_OK:
24462 +       /* nothing */
24463 +       break;
24464 +
24465 +    case ELAN_CAP_RMS:
24466 +       if ( elan_validate_map(&ctxt->Capability, &seg->SegCapability) != ESUCCESS)
24467 +       {
24468 +           krwlock_done (&ctxt->VpLock);
24469 +           return (EINVAL);
24470 +       }
24471 +       break;
24472 +
24473 +    default:
24474 +       krwlock_done (&ctxt->VpLock);
24475 +       return (EINVAL);
24476 +    }
24477 +
24478 +    BumpUserStat (ctxt, LoadVirtualProcess);
24479 +
24480 +    routeValue = elan3_sdram_readq (dev, ctxt->RouteTable->Table + process * NBYTES_PER_SMALL_ROUTE);
24481 +    if (routeValue & ROUTE_VALID)                              /* Virtual process already */
24482 +    {                                                          /* loaded */
24483 +       krwlock_done (&ctxt->VpLock);
24484 +       return (ESUCCESS);                      
24485 +    }
24486 +    
24487 +    switch (seg->Type)
24488 +    {
24489 +    case ELAN3_VPSEG_P2P:
24490 +       switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24491 +       {
24492 +       case ELAN_CAP_TYPE_BLOCK:
24493 +       case ELAN_CAP_TYPE_CYCLIC:
24494 +           if ((res = elan_validate_map (&ctxt->Capability,&seg->SegCapability)) == ESUCCESS &&
24495 +               (res = GetRoute(dev, ctxt->RouteTable ,process,  flits)) == ESUCCESS)
24496 +           {
24497 +               if (elan3_route_check(ctxt, flits, ProcessToLocation (ctxt, seg, process, NULL).loc_node))
24498 +                   res = EINVAL;
24499 +               else
24500 +                   ValidateRoute(dev, ctxt->RouteTable, process);
24501 +           }
24502 +           break;
24503 +       default:
24504 +           res = EINVAL;
24505 +           break;
24506 +       }
24507 +       break;
24508 +
24509 +    case ELAN3_VPSEG_BROADCAST:
24510 +       /* Find the segment that this broadcast range spans. */
24511 +       aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc);
24512 +       
24513 +       if (aseg == NULL || (aseg->Type != ELAN3_VPSEG_P2P) || !(aseg->SegCapability.cap_type & ELAN_CAP_TYPE_BROADCASTABLE))
24514 +       {
24515 +           PRINTF2 (ctxt, DBG_VP, "resolveVirtualProcess: %d -> EINVAL (%s)\n", process, 
24516 +                    (aseg == NULL ? "no segment" : ((seg->Type != ELAN3_VPSEG_P2P) ? "not point to point" :
24517 +                                                    "not broadcastable")));
24518 +           res = EINVAL;
24519 +           break;
24520 +       }
24521 +       
24522 +       switch (aseg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24523 +       {
24524 +       case ELAN_CAP_TYPE_BLOCK:
24525 +       case ELAN_CAP_TYPE_CYCLIC:
24526 +       {
24527 +           ELAN_LOCATION lowNode  = ProcessToLocation (ctxt,aseg,seg->SegLowProc  , NULL);
24528 +           ELAN_LOCATION highNode = ProcessToLocation (ctxt,aseg,seg->SegHighProc , NULL);
24529 +
24530 +
24531 +           if ((res = elan_validate_map (&ctxt->Capability,&aseg->SegCapability)) == ESUCCESS &&
24532 +               (res=GetRoute(dev, ctxt->RouteTable ,process,  flits)) == ESUCCESS)
24533 +           {
24534 +               if (elan3_route_broadcast_check(ctxt,flits, lowNode.loc_node , highNode.loc_node ) != ELAN3_ROUTE_SUCCESS )
24535 +                   res = EINVAL;
24536 +               else
24537 +                   ValidateRoute(dev, ctxt->RouteTable, process);
24538 +           }
24539 +           break;
24540 +       }
24541 +
24542 +       default:
24543 +           res = EINVAL;
24544 +           break;
24545 +       }
24546 +    default:
24547 +       res  = EINVAL;
24548 +       break;
24549 +    }
24550 +
24551 +    krwlock_done (&ctxt->VpLock);
24552 +    return (res);
24553 +}        
24554 +
24555 +void
24556 +UnloadVirtualProcess (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
24557 +{
24558 +    ELAN3_DEV        *dev  = ctxt->Device;
24559 +    ELAN3_VPSEG      *seg;
24560 +    ELAN_CAPABILITY *scap;
24561 +    int              i;
24562 +
24563 +    for (seg = ctxt->VpSegs; seg; seg = seg->Next)
24564 +    {
24565 +       switch (seg->Type)
24566 +       {
24567 +       case ELAN3_VPSEG_P2P:
24568 +           scap = &seg->SegCapability;
24569 +           
24570 +           if (cap == NULL || ELAN_CAP_MATCH (scap, cap))
24571 +           {
24572 +               PRINTF2 (ctxt, DBG_VP, "unloadvp: segment [%x.%x]\n", 
24573 +                        seg->Process, seg->Process + seg->Entries-1);
24574 +               
24575 +               for (i = 0; i < seg->Entries; i++)
24576 +                   InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i);
24577 +           }
24578 +           break;
24579 +
24580 +       case ELAN3_VPSEG_BROADCAST:
24581 +           for (i = 0; i < seg->Entries; i++)
24582 +           {
24583 +               ELAN3_VPSEG *aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc);
24584 +               
24585 +               if (aseg != NULL && ELAN_CAP_MATCH(&aseg->SegCapability, cap))
24586 +               {
24587 +                   PRINTF1 (ctxt, DBG_VP, "unloadvp: broadcast vp %d\n", seg->Process);
24588 +               
24589 +                   InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i);
24590 +               }
24591 +           }
24592 +       }
24593 +    }
24594 +}
24595 +
24596 +caddr_t
24597 +CapabilityString (ELAN_CAPABILITY *cap)
24598 +{
24599 +#define CAPSTR_LEN     200
24600 +#define NCAPSTRS       4
24601 +    static char       space[CAPSTR_LEN*NCAPSTRS];
24602 +    static int        bufnum;
24603 +    static spinlock_t lock;
24604 +    static int       lockinitialised;
24605 +    int                      num;
24606 +    unsigned long     flags;
24607 +
24608 +    if (! lockinitialised)
24609 +    {
24610 +       spin_lock_init (&lock);
24611 +       lockinitialised = 1;
24612 +    }
24613 +
24614 +    spin_lock_irqsave (&lock, flags);
24615 +    
24616 +    if ((num = ++bufnum) == NCAPSTRS)
24617 +       num = bufnum = 0;
24618 +    spin_unlock_irqrestore (&lock, flags);
24619 +
24620 +    sprintf (space + (num * CAPSTR_LEN), "%4x %4x %4x %4x %4x %4x %4x [%x.%x.%x.%x]", cap->cap_type,
24621 +            cap->cap_lownode, cap->cap_highnode, 
24622 +            cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext,  ELAN_CAP_ENTRIES(cap),
24623 +            cap->cap_userkey.key_values[0],  cap->cap_userkey.key_values[1],
24624 +            cap->cap_userkey.key_values[2],  cap->cap_userkey.key_values[3]);
24625 +
24626 +    return (space + (num * CAPSTR_LEN));
24627 +}
24628 +
24629 +
24630 +/*
24631 + * Local variables:
24632 + * c-file-style: "stroustrup"
24633 + * End:
24634 + */
24635 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/debug.c
24636 ===================================================================
24637 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/debug.c      2004-02-23 16:02:56.000000000 -0500
24638 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/debug.c   2005-07-28 14:52:52.819683144 -0400
24639 @@ -0,0 +1,94 @@
24640 +/*
24641 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
24642 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
24643 + * 
24644 + *    For licensing information please see the supplied COPYING file
24645 + *
24646 + */
24647 +
24648 +#ident "@(#)$Id: debug.c,v 1.16 2004/07/07 11:22:33 addy Exp $"
24649 +/*      $Source: /cvs/master/quadrics/elan4mod/debug.c,v $*/
24650 +
24651 +#include <qsnet/kernel.h>
24652 +
24653 +#include <elan4/debug.h>
24654 +#include <elan4/device.h>
24655 +
24656 +unsigned       elan4_debug           = 0;
24657 +unsigned       elan4_debug_toconsole = 0;
24658 +unsigned       elan4_debug_tobuffer  = DBG_ALL;
24659 +
24660 +unsigned       elan4_debug_display_ctxt;
24661 +unsigned       elan4_debug_ignore_ctxt;
24662 +unsigned       elan4_debug_ignore_type;
24663 +
24664 +void
24665 +elan4_debug_init()
24666 +{
24667 +    if ((elan4_debug & elan4_debug_tobuffer) != 0)
24668 +       qsnet_debug_alloc();
24669 +}
24670 +
24671 +void
24672 +elan4_debug_fini()
24673 +{
24674 +}
24675 +
24676 +void
24677 +elan4_debugf (void *type, int mode, char *fmt,...)
24678 +{
24679 +    char    prefix[128];
24680 +    int     where = 0;
24681 +    va_list ap;
24682 +
24683 +    if ((mode & elan4_debug_tobuffer) != 0 || type == DBG_BUFFER)
24684 +       where |= QSNET_DEBUG_BUFFER;
24685 +    if ((mode & elan4_debug_toconsole) != 0 || type == DBG_CONSOLE)
24686 +       where |= QSNET_DEBUG_CONSOLE;
24687 +
24688 +    if (where == 0)
24689 +       return;
24690 +    
24691 +    if ((unsigned long) type > DBG_NTYPES)
24692 +    {
24693 +       ELAN4_CTXT *ctxt = (ELAN4_CTXT *) type;
24694 +
24695 +        if (elan4_debug_display_ctxt && ctxt->ctxt_num != elan4_debug_display_ctxt)
24696 +            return;
24697 +        if (elan4_debug_ignore_ctxt  && ctxt->ctxt_num == elan4_debug_ignore_ctxt)
24698 +            return;
24699 +
24700 +       sprintf (prefix, "[%08ld.%04d] elan4 (%03x) ", lbolt,  current->pid, ctxt->ctxt_num);
24701 +    }
24702 +    else if ((unsigned long) type == (int) DBG_CONSOLE)
24703 +       prefix[0] = '\0';
24704 +    else
24705 +    {
24706 +       char *what;
24707 +
24708 +       if (elan4_debug_ignore_type & (1 << ((unsigned long) type)))
24709 +           return;
24710 +
24711 +       switch ((unsigned long) type)
24712 +       {
24713 +       case (int) DBG_DEVICE: what = "dev"; break;
24714 +       case (int) DBG_USER:   what = "usr"; break;
24715 +       default:               what = NULL; break;
24716 +       }
24717 +           
24718 +       if (what)
24719 +           sprintf (prefix, "[%08ld.%04d] elan4 [%s] ", lbolt, current->pid, what);
24720 +       else
24721 +           sprintf (prefix, "[%08ld.%04d] elan4 [%3d] ", lbolt, current->pid, (int)(long)type);
24722 +    }
24723 +
24724 +    va_start(ap,fmt);
24725 +    qsnet_vdebugf (where, prefix, fmt, ap);
24726 +    va_end (ap);
24727 +}
24728 +
24729 +/*
24730 + * Local variables:
24731 + * c-file-style: "stroustrup"
24732 + * End:
24733 + */
24734 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/device.c
24735 ===================================================================
24736 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/device.c     2004-02-23 16:02:56.000000000 -0500
24737 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/device.c  2005-07-28 14:52:52.824682384 -0400
24738 @@ -0,0 +1,2916 @@
24739 +/*
24740 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
24741 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
24742 + * 
24743 + *    For licensing information please see the supplied COPYING file
24744 + *
24745 + */
24746 +
24747 +#ident "@(#)$Id: device.c,v 1.87.6.11 2005/03/18 13:48:53 david Exp $"
24748 +/*      $Source: /cvs/master/quadrics/elan4mod/device.c,v $*/
24749 +
24750 +#include <qsnet/kernel.h>
24751 +#include <qsnet/kthread.h>
24752 +
24753 +#include <elan4/sdram.h>
24754 +#include <elan4/debug.h>
24755 +#include <elan4/device.h>
24756 +#include <elan4/commands.h>
24757 +#include <elan4/trtype.h>
24758 +#include <elan4/neterr.h>
24759 +
24760 +#include <elan4/i2c.h>
24761 +#include <elan3/vpd.h>
24762 +
24763 +/* allow this code to compile against an Eagle elanmod */
24764 +#ifdef __ELANMOD_DEVICE_H
24765 +#define ELAN_DEV_OPS           ELANMOD_DEV_OPS
24766 +#define ELAN_DEV_OPS_VERSION   ELANMOD_DEV_OPS_VERSION
24767 +#define elan_dev_register      elanmod_dev_register
24768 +#define elan_dev_deregister    elanmod_dev_deregister
24769 +#endif
24770 +
24771 +/* XXXX configurational defines */
24772 +
24773 +#if defined (CONFIG_MPSAS)
24774 +#define HASH_0_SIZE_VAL                        (12 + 6)
24775 +#define HASH_1_SIZE_VAL                        (2 + 6)
24776 +#define CTXT_TABLE_SHIFT               8
24777 +#define LN2_MAX_CQS                    8               /* 256 */
24778 +#else
24779 +#define HASH_0_SIZE_VAL                        (13 + 6)
24780 +#define HASH_1_SIZE_VAL                        (2 + 6)
24781 +#define CTXT_TABLE_SHIFT               12
24782 +#define LN2_MAX_CQS                    10              /* 1024 */
24783 +#endif
24784 +
24785 +unsigned int elan4_hash_0_size_val       = HASH_0_SIZE_VAL;
24786 +unsigned int elan4_hash_1_size_val       = HASH_1_SIZE_VAL;
24787 +unsigned int elan4_ctxt_table_shift      = CTXT_TABLE_SHIFT;
24788 +unsigned int elan4_ln2_max_cqs           = LN2_MAX_CQS;
24789 +unsigned int elan4_dmaq_highpri_size     = 2;                  /* 8192 entries */
24790 +unsigned int elan4_threadq_highpri_size  = 1;                  /* 1024 entries */
24791 +unsigned int elan4_dmaq_lowpri_size      = 2;                  /* 8192 entries */
24792 +unsigned int elan4_threadq_lowpri_size   = 1;                  /* 1024 entries */
24793 +unsigned int elan4_interruptq_size       = 0;                  /* 1024 entries */
24794 +unsigned int elan4_mainint_punt_loops    = 1;
24795 +unsigned int elan4_mainint_resched_ticks = 0;
24796 +unsigned int elan4_linkport_lock        = 0xbe0fcafe;          /* default link port lock */
24797 +unsigned int elan4_eccerr_recheck        = 1;
24798 +
24799 +static int 
24800 +elan4_op_get_position (void *arg, ELAN_POSITION *ptr)
24801 +{
24802 +    ELAN4_DEV     *dev = (ELAN4_DEV *)arg;
24803 +    ELAN_POSITION  pos;
24804 +
24805 +    elan4_get_position (dev, &pos);
24806 +
24807 +    return copyout (&pos, ptr, sizeof (ELAN_POSITION));
24808 +}
24809 +
24810 +static int 
24811 +elan4_op_set_position (void *arg, unsigned short nodeid, unsigned short numnodes)
24812 +{
24813 +    /* XXXXX 
24814 +
24815 +       ELAN4_DEV *dev = (ELAN4_DEV *) arg;
24816 +
24817 +       compute_position (&pos, nodeid, numnode, num_down_links_value);
24818 +
24819 +       return elan4_set_position (dev, pos);
24820 +    */
24821 +    return EINVAL;
24822 +}
24823 +
24824 +ELAN_DEV_OPS elan4_dev_ops = 
24825 +{
24826 +    elan4_op_get_position,
24827 +    elan4_op_set_position,
24828 +
24829 +    ELAN_DEV_OPS_VERSION
24830 +};
24831 +
24832 +static E4_uint32
24833 +elan4_read_filter (ELAN4_DEV *dev, unsigned networkctx)
24834 +{
24835 +    return (elan4_sdram_readl (dev, dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) + 
24836 +                              offsetof (E4_ContextControlBlock, Filter)));
24837 +}
24838 +
24839 +static void
24840 +elan4_write_filter (ELAN4_DEV *dev, unsigned networkctx, E4_uint32 value)
24841 +{
24842 +    elan4_sdram_writel (dev, (dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) +
24843 +                       offsetof (E4_ContextControlBlock, Filter)), value);
24844 +    pioflush_sdram(dev);
24845 +}
24846 +
24847 +void
24848 +elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg)
24849 +{
24850 +    E4_uint32 setbits  = 0;
24851 +    E4_uint32 intmask  = 0;
24852 +    E4_uint32 haltmask;
24853 +    E4_uint32 next_sched;
24854 +    E4_uint32 next_intmask;
24855 +    unsigned long flags;
24856 +
24857 +    spin_lock_irqsave (&dev->dev_intmask_lock, flags);
24858 +
24859 +    haltmask = (dev->dev_haltop_mask | dev->dev_haltop_active);
24860 +
24861 +    if ((haltmask & INT_DProcHalted) || dev->dev_halt_all_count || dev->dev_halt_dproc_count)
24862 +       setbits |= SCH_DProcHalt;
24863 +    
24864 +    if ((haltmask & INT_TProcHalted) || dev->dev_halt_all_count || dev->dev_halt_tproc_count)
24865 +       setbits |= SCH_TProcHalt;
24866 +
24867 +    if ((haltmask & INT_CProcHalted) || dev->dev_halt_all_count || dev->dev_halt_cproc_count)
24868 +       setbits |= SCH_CProcHalt;
24869 +
24870 +    if ((haltmask & INT_DiscardingLowPri) || dev->dev_discard_all_count || dev->dev_discard_lowpri_count)
24871 +       setbits |= SCH_DiscardLowPriInput;
24872 +    
24873 +    if ((haltmask & INT_DiscardingHighPri) || dev->dev_discard_all_count || dev->dev_discard_highpri_count)
24874 +       setbits |= SCH_DiscardHighPriInput;
24875 +    
24876 +    if (dev->dev_halt_lowpri_count)
24877 +       setbits |= SCH_StopLowPriQueues;
24878 +    
24879 +    if (haltmask & INT_DProcHalted) intmask |= INT_DProcHalted;
24880 +    if (haltmask & INT_TProcHalted) intmask |= INT_TProcHalted;
24881 +    if (haltmask & INT_CProcHalted) intmask |= INT_CProcHalted;
24882 +    if (haltmask & INT_DiscardingLowPri) intmask |= INT_DiscardingLowPri;
24883 +    if (haltmask & INT_DiscardingHighPri) intmask |= INT_DiscardingHighPri;
24884 +
24885 +    next_intmask = (dev->dev_intmask     & ~(INT_Halted | INT_Discarding)) | (intmask & ~intreg);
24886 +    next_sched   = (dev->dev_schedstatus & ~(SCH_Halt | SCH_Discard))      | setbits;
24887 +
24888 +    PRINTF5 (DBG_DEVICE, DBG_REGISTER, "elan4_set_schedstatus: haltmask=%x setbits=%x intmask=%x next_sched=%x next_intmask=%x\n",
24889 +            haltmask, setbits, intmask, next_sched, next_intmask);
24890 +
24891 +    CHANGE_INT_MASK (dev, next_intmask);
24892 +    CHANGE_SCHED_STATUS (dev, next_sched);
24893 +
24894 +    spin_unlock_irqrestore (&dev->dev_intmask_lock, flags);
24895 +}
24896 +
24897 +void
24898 +elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op)
24899 +{
24900 +    unsigned long flags;
24901 +
24902 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24903 +
24904 +    /* add to the end of the halt operations list */
24905 +    list_add_tail (&op->op_link, &dev->dev_haltop_list);
24906 +
24907 +    if ((dev->dev_haltop_mask & op->op_mask) != op->op_mask)
24908 +    {
24909 +       dev->dev_haltop_mask |= op->op_mask;
24910 +       
24911 +       elan4_set_schedstatus (dev, 0);
24912 +    }
24913 +    
24914 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
24915 +}
24916 +
24917 +void
24918 +elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op)
24919 +{
24920 +    unsigned long flags;
24921 +
24922 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
24923 +
24924 +    op->op_cookie = INTOP_ONESHOT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK);
24925 +
24926 +    list_add_tail (&op->op_link, &dev->dev_intop_list);
24927 +
24928 +    writeq ((op->op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, cq->cq_mapping);
24929 +
24930 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
24931 +}
24932 +
24933 +void
24934 +elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op)
24935 +{
24936 +    unsigned long flags;
24937 +
24938 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
24939 +
24940 +    op->op_cookie = INTOP_PERSISTENT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK);
24941 +
24942 +    list_add_tail (&op->op_link, &dev->dev_intop_list);
24943 +
24944 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
24945 +}
24946 +
24947 +void
24948 +elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op)
24949 +{
24950 +    unsigned long flags;
24951 +
24952 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
24953 +    list_del (&op->op_link);
24954 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
24955 +}
24956 +
24957 +static __inline__ void
24958 +__issue_dma_flushop_cmd (ELAN4_CQ *cq)
24959 +{
24960 +    writeq (DMA_ShMemWrite | RUN_DMA_CMD, cq->cq_mapping);
24961 +    writeq (0 /* cookie */,               cq->cq_mapping);
24962 +    writeq (0 /* vproc */,                cq->cq_mapping);
24963 +    writeq (0 /* srcAddr */,              cq->cq_mapping);
24964 +    writeq (0 /* dstAddr */,              cq->cq_mapping);
24965 +    writeq (0 /* srcEvent */,             cq->cq_mapping);
24966 +    writeq (0 /* dstEvent */,             cq->cq_mapping);
24967 +    writeq (SET_EVENT_CMD,                cq->cq_mapping);
24968 +}
24969 +
24970 +static void
24971 +handle_dma_flushops_intop (ELAN4_DEV *dev, void *arg)
24972 +{
24973 +    unsigned int  hipri        = ((unsigned long) arg & 1);
24974 +    E4_uint64     status       = dev->dev_dma_flushop[hipri].status;
24975 +    ELAN4_CQ     *cq           = dev->dev_dma_flushop[hipri].cq;
24976 +    sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
24977 +    E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
24978 +    E4_uint32     completedPtr = CQ_CompletedPtr(queuePtrs);
24979 +    E4_uint32     size         = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
24980 +    unsigned long flags;
24981 +
24982 +    /*
24983 +     * Since we're called from a main interrupt which was issued through the approriate
24984 +     * flushcq the command queue descriptor for dma flushing can no longer be in the 
24985 +     * insert cache, nor can it be in the extractor (as it's trapped), hence it is
24986 +     * safe to modify the completed pointer
24987 +     */
24988 +
24989 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
24990 +
24991 +    ASSERT (status != 0);
24992 +
24993 +    /* skip over either the DMA/SETEVENT or just the SETEVENT depending on the trap type */
24994 +    if (CPROC_TrapType (status) == CommandProcDmaQueueOverflow)
24995 +       completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 64) & (size - 1));
24996 +    else
24997 +       completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 8) & (size - 1));
24998 +    
24999 +    elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs) + 4,
25000 +                       ((queuePtrs >> 32) & ~CQ_PtrOffsetMask) | (completedPtr & CQ_PtrOffsetMask));
25001 +    
25002 +    elan4_restartcq (dev, dev->dev_dma_flushop[hipri].cq);
25003 +
25004 +    if (! list_empty (&dev->dev_dma_flushop[hipri].list))
25005 +       __issue_dma_flushop_cmd (dev->dev_dma_flushop[hipri].cq);
25006 +
25007 +    dev->dev_dma_flushop[hipri].status = 0;
25008 +    
25009 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25010 +
25011 +}
25012 +
25013 +static void
25014 +handle_dma_flushops (ELAN4_DEV *dev, E4_uint64 status, int cqnum)
25015 +{
25016 +    unsigned int       hipri  = (cqnum == elan4_cq2num(dev->dev_dma_flushop[1].cq) ? 1 : 0);
25017 +    ELAN4_CQ          *cq     = dev->dev_dma_flushop[hipri].cq;
25018 +    ELAN4_CQ          *flushq = dev->dev_flush_cq[elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1)];
25019 +    struct list_head  *ops;
25020 +    unsigned long      flags;
25021 +    int                       qfull,count;
25022 +    E4_uint64         queuePtrs;
25023 +    LIST_HEAD(list);
25024 +    
25025 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25026 +    
25027 +    ASSERT (cqnum == elan4_cq2num (dev->dev_dma_flushop[hipri].cq));
25028 +    ASSERT (! list_empty (&dev->dev_dma_flushop[hipri].list));
25029 +    ASSERT (dev->dev_dma_flushop[hipri].status == 0);
25030 +    
25031 +    /* remove the whole list */
25032 +    ops = dev->dev_dma_flushop[hipri].list.next;
25033 +
25034 +    list_del_init (&dev->dev_dma_flushop[hipri].list);
25035 +    
25036 +    /* and add it to our local list */
25037 +    list_add_tail (&list, ops);
25038 +    
25039 +    /* now determine whether the queue was full - since it cannot be empty 
25040 +     * then if the front and back pointers are the same then it is full */
25041 +    queuePtrs = hipri ? read_reg64 (dev, DProcHighPriPtrs) : read_reg64 (dev, DProcLowPriPtrs);
25042 +    qfull     = (E4_QueueFrontPointer (queuePtrs) == E4_QueueBackPointer (queuePtrs));
25043 +    
25044 +    if (CPROC_TrapType(status) == CommandProcDmaQueueOverflow && !qfull)
25045 +       printk (" ******* queue overflow trap - but queue not full\n");
25046 +
25047 +    if (qfull && CPROC_TrapType(status) != CommandProcDmaQueueOverflow)
25048 +       printk (" ****** queue full - but not overflow trap : %llx %llx %x\n", 
25049 +               read_reg64 (dev, DProcLowPriPtrs), read_reg64 (dev, DProcHighPriPtrs), CPROC_TrapType(status));
25050 +
25051 +    /* Store the status register, this also indicates that the intop is pending */
25052 +    dev->dev_dma_flushop[hipri].status = status;
25053 +
25054 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25055 +
25056 +    /* Issue a main interrupt command to the approriate flush command queue,
25057 +     * which will then safely update the completed pointer to skip over the
25058 +     * command which has trapped, also prevent any new commands to be issued
25059 +     * to the command queue.
25060 +     */
25061 +    dev->dev_dma_flushop[hipri].intop.op_function = handle_dma_flushops_intop;
25062 +    dev->dev_dma_flushop[hipri].intop.op_arg      = (void *) (unsigned long) hipri;
25063 +
25064 +    elan4_queue_intop (dev, flushq, &dev->dev_dma_flushop[hipri].intop);
25065 +    
25066 +    /* now execute all operations */
25067 +    for (count = 0; ! list_empty (&list); count++)
25068 +    {
25069 +       ELAN4_DMA_FLUSHOP *op = list_entry (list.next, ELAN4_DMA_FLUSHOP, op_link);
25070 +       
25071 +       list_del (&op->op_link);
25072 +       
25073 +       (*op->op_function) (dev, op->op_arg, qfull);
25074 +    }
25075 +
25076 +    /* finally release the "reasons" for halting */
25077 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25078 +    if ((dev->dev_halt_dproc_count -= count) == 0)
25079 +       elan4_set_schedstatus (dev, 0);
25080 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25081 +       
25082 +    return;
25083 +}
25084 +
25085 +void
25086 +elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri)
25087 +{
25088 +    unsigned long flags;
25089 +
25090 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25091 +
25092 +    if (dev->dev_halt_dproc_count++ == 0)                      /* ensure that the DMA processor cannot */
25093 +       elan4_set_schedstatus (dev, 0);                         /* execute the DMA we issue. */
25094 +
25095 +    if (list_empty (&dev->dev_dma_flushop[hipri].list) && dev->dev_dma_flushop[hipri].status == 0)
25096 +       __issue_dma_flushop_cmd (dev->dev_dma_flushop[hipri].cq);
25097 +       
25098 +    list_add_tail (&op->op_link, &dev->dev_dma_flushop[hipri].list);
25099 +
25100 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25101 +}
25102 +
25103 +static void
25104 +enable_elan_errors (void *arg)
25105 +{
25106 +    ELAN4_DEV *dev = (ELAN4_DEV *) arg;
25107 +
25108 +    ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
25109 +}
25110 +
25111 +#define ERROR_DISABLE_PERIOD   (hz/2)
25112 +#define ERROR_SAMPLE_PERIOD    (hz/10)
25113 +#define ERROR_LIMIT            (100)
25114 +
25115 +static __inline__ void
25116 +check_error_rate (ELAN4_DEV *dev)
25117 +{
25118 +    if (dev->dev_error_time == (lbolt/ERROR_SAMPLE_PERIOD))
25119 +    {
25120 +        if (++dev->dev_errors_per_period >= ERROR_LIMIT && (dev->dev_intmask & INT_ErrorInterrupts))
25121 +       {
25122 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
25123 +           
25124 +           schedule_timer_fn (&dev->dev_error_timeoutid, enable_elan_errors, (void *) dev, ERROR_DISABLE_PERIOD);
25125 +       }
25126 +    }
25127 +    else
25128 +    {
25129 +       dev->dev_error_time        = (lbolt/ERROR_SAMPLE_PERIOD);
25130 +       dev->dev_errors_per_period = 0;
25131 +    }
25132 +}
25133 +
25134 +static __inline__ int
25135 +handle_mainints (ELAN4_DEV *dev, int nticks, int nintr)
25136 +{
25137 +    E4_uint32 nfptr = dev->dev_interruptq_nfptr;
25138 +    E4_uint32 bptr  = read_reg32 (dev, MainIntQueuePtrs.s.Back);
25139 +    E4_uint32 qsize = E4_QueueSize(elan4_interruptq_size);
25140 +    E4_uint32 qmask = qsize - 1;
25141 +    long      tlim  = lbolt + nticks;
25142 +    int       done = 0;
25143 +    unsigned long flags;
25144 +
25145 +    do {
25146 +       int todo  = ((bptr - nfptr) & qmask) / E4_MainIntEntrySize;
25147 +
25148 +       ASSERT (todo > 0);
25149 +
25150 +       PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: fptr %x nfptr %x bptr %x : %d todo\n", 
25151 +                read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr, todo);
25152 +
25153 +       if (nintr >= 0 && (done + todo) > nintr)                /* punt because too may to do in interrupt */
25154 +       {
25155 +           PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: punting (done %d todo %d) (bptr %x fptr %x)\n",
25156 +                    done, todo, bptr, read_reg32 (dev, MainIntQueuePtrs.s.Front));
25157 +
25158 +           return 1;
25159 +       }
25160 +
25161 +       BucketDevStat (dev, s_mainints, todo, MainIntBuckets);
25162 +
25163 +       /* consume all the entries in the queue which we think are there */
25164 +       do {
25165 +           E4_uint64   value = elan4_sdram_readq (dev, nfptr);
25166 +           ELAN4_CTXT *ctxt  = elan4_localctxt (dev, E4_MAIN_INT_CTX (value));
25167 +           E4_uint32   fptr  = nfptr;
25168 +
25169 +           PRINTF2 (DBG_DEVICE, DBG_MAININT, "handle_mainints: process cookie %llx - write fptr=%x\n", value, nfptr);
25170 +
25171 +           if (ctxt == NULL)
25172 +               PRINTF1 (DBG_DEVICE, DBG_INTR, "handle_mainints: context %d invalid\n", E4_MAIN_INT_CTX (value));
25173 +           else
25174 +               ctxt->ctxt_ops->op_interrupt (ctxt, E4_MAIN_INT_COOKIE(value));
25175 +
25176 +           /* compute the next queue front pointer, before updating the front pointer
25177 +            * since we need to ensure that elan4_queue_mainintop doesn't see the queue
25178 +            * as being empty if an extra interrupt is queued in between */
25179 +           dev->dev_interruptq_nfptr = nfptr = (nfptr & ~qmask) | ((nfptr + sizeof (E4_uint64)) & qmask);
25180 +    
25181 +           /* update the queue front pointer, doing this will clear the
25182 +            * interrupt for *all* interrupt cookies which have previously 
25183 +            * been added to the queue */
25184 +           write_reg32 (dev, MainIntQueuePtrs.s.Front, E4_QueueFrontValue (fptr, elan4_interruptq_size));
25185 +           pioflush_reg (dev);
25186 +       } while (bptr != nfptr);
25187 +       
25188 +       /* re-sample the back pointer and if it's different from the previous
25189 +        * queue front pointer, then the queue has something on it again */
25190 +       done += todo;
25191 +       
25192 +       if ((nticks > 0 && ((int) (lbolt - tlim)) > 0))         /* been executing for too long in thread */
25193 +           return 1;
25194 +
25195 +       bptr = read_reg32 (dev, MainIntQueuePtrs.s.Back);
25196 +
25197 +       PRINTF3 (DBG_DEVICE, DBG_MAININT, "handle_mainints: resample : fptr %x nfptr %x bptr %x\n", 
25198 +                read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr);
25199 +
25200 +       /* at this point we've made some space in the interrupt queue,
25201 +        * so check to see if we've got anything to restart */
25202 +       spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25203 +       while (! list_empty (&dev->dev_interruptq_list))
25204 +       {
25205 +           ELAN4_INTOP *op = list_entry (dev->dev_interruptq_list.next, ELAN4_INTOP, op_link);
25206 +           
25207 +           list_del (&op->op_link);
25208 +
25209 +           op->op_function (dev, op->op_arg);
25210 +       }
25211 +       spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25212 +
25213 +    } while (bptr != nfptr);
25214 +
25215 +    return 0;
25216 +}
25217 +
25218 +static void
25219 +elan4_mainint_thread (ELAN4_DEV *dev)
25220 +{
25221 +    unsigned long flags;
25222 +
25223 +    kernel_thread_init ("elan4_mainint");
25224 +    
25225 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25226 +    for (;;)
25227 +    {
25228 +       if (dev->dev_stop_threads)
25229 +           break;
25230 +       
25231 +       if (! (dev->dev_intmask & INT_MainInterrupt))
25232 +       {
25233 +           spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25234 +           
25235 +           if (handle_mainints (dev, elan4_mainint_resched_ticks, -1))
25236 +               BumpDevStat (dev, s_mainint_rescheds);
25237 +
25238 +           spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25239 +           ENABLE_INT_MASK (dev, INT_MainInterrupt);
25240 +       }
25241 +       
25242 +       kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags);
25243 +    }
25244 +
25245 +    dev->dev_mainint_stopped = 1;
25246 +    kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
25247 +
25248 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25249 +
25250 +    kernel_thread_exit();
25251 +}
25252 +
25253 +void
25254 +elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op)
25255 +{
25256 +    unsigned long flags;
25257 +
25258 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25259 +    if (dev->dev_interruptq_nfptr == read_reg32 (dev, MainIntQueuePtrs.s.Back))
25260 +       op->op_function (dev, op->op_arg);
25261 +    else
25262 +       list_add_tail (&op->op_link, &dev->dev_interruptq_list);
25263 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25264 +}
25265 +
25266 +static __inline__ E4_uint32
25267 +handle_cproc_trap (ELAN4_DEV *dev)
25268 +{
25269 +    E4_uint32   cqptr   = read_reg32 (dev, CommandControl.CommandQueueDescsBase) & E4_QueueDescPtrMask;
25270 +    unsigned    cqnum   = ((cqptr - dev->dev_cqaddr) / sizeof (E4_CommandQueueDesc));
25271 +    sdramaddr_t cqdesc  = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc));
25272 +    E4_uint64   control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
25273 +    E4_uint64   status  = read_reg64 (dev, CProcStatus);
25274 +    ELAN4_CTXT *ctxt    = elan4_localctxt (dev, CQ_Context (control));
25275 +
25276 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "handle_cproc_trap: cqnum=%d status=%016llx control=%016llx TrapType\n", 
25277 +            cqnum, status, control, CPROC_TrapType (status));
25278 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "                   %016llx %016llx %016llx %016llx\n",
25279 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)),
25280 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue)),
25281 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers)),
25282 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)));
25283 +
25284 +    BumpDevStat (dev, s_cproc_traps);
25285 +
25286 +    ctxt->ctxt_ops->op_cproc_trap (ctxt, status, cqnum);
25287 +
25288 +    return (CPROC_TrapType (status) == CommandProcWaitTrap ? SCH_RestartCProc | SCH_RestartEProc : SCH_RestartCProc);
25289 +}
25290 +
25291 +static __inline__ E4_uint32
25292 +handle_dproc_trap (ELAN4_DEV *dev, int unit)
25293 +{
25294 +    E4_uint64   status  = (unit == 0) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status);
25295 +    E4_uint32   restart = (unit == 0) ? SCH_RestartDma0Proc : SCH_RestartDma1Proc;
25296 +    ELAN4_CTXT *ctxt    = elan4_localctxt (dev, DPROC_Context (status));
25297 +    
25298 +    PRINTF3 (DBG_DEVICE, DBG_INTR, "handle_dproc_trap: unit %d context %d%s\n", unit, DPROC_Context(status),
25299 +            DPROC_PrefetcherFault(status) ? " (prefetcher)" : "");
25300 +
25301 +    if (DPROC_PrefetcherFault (status))
25302 +       restart |= SCH_RestartDmaPrefetchProc;
25303 +                     
25304 +    BumpDevStat (dev, s_dproc_traps);
25305 +
25306 +    ctxt->ctxt_ops->op_dproc_trap (ctxt, status, unit);
25307 +
25308 +    return (restart);
25309 +}
25310 +
25311 +static __inline__ E4_uint32
25312 +handle_eproc_trap (ELAN4_DEV *dev)
25313 +{
25314 +    E4_uint64   status = read_reg64 (dev, EProcStatus);
25315 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, EPROC_Context (status));
25316 +
25317 +    BumpDevStat (dev, s_eproc_traps);
25318 +
25319 +    ctxt->ctxt_ops->op_eproc_trap (ctxt, status);
25320 +
25321 +    return (SCH_RestartEProc);
25322 +}
25323 +
25324 +static __inline__ E4_uint32
25325 +handle_tproc_trap (ELAN4_DEV *dev)
25326 +{
25327 +    E4_uint64   status = read_reg64 (dev, TProcStatus);
25328 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, TPROC_Context (status));
25329 +
25330 +    BumpDevStat (dev, s_tproc_traps);
25331 +
25332 +    ctxt->ctxt_ops->op_tproc_trap (ctxt, status);
25333 +    
25334 +    return (SCH_RestartTProc);
25335 +}
25336 +
25337 +static __inline__ void
25338 +handle_haltints (ELAN4_DEV *dev, E4_uint32 intreg)
25339 +{
25340 +    struct list_head  list   = LIST_HEAD_INIT(list);
25341 +    E4_uint32         mask   = 0;
25342 +    E4_uint32         active = 0;
25343 +    struct list_head *entry;
25344 +    struct list_head *next;
25345 +    unsigned long     flags;
25346 +
25347 +    BumpDevStat (dev, s_haltints);
25348 +
25349 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25350 +
25351 +    list_for_each_safe (entry, next, &dev->dev_haltop_list) {
25352 +       ELAN4_HALTOP *op = list_entry (entry, ELAN4_HALTOP, op_link);
25353 +
25354 +       PRINTF (DBG_DEVICE, DBG_INTR, "handle_haltints: op=%p op_mask=%x intreg=%x\n", op, op->op_mask, intreg);
25355 +
25356 +       if ((op->op_mask & intreg) != op->op_mask)
25357 +           mask |= op->op_mask;
25358 +       else
25359 +       {
25360 +           list_del (&op->op_link);                            /* remove from list */
25361 +           list_add_tail (&op->op_link, &list);                /* add to local list */
25362 +
25363 +           active |= op->op_mask;
25364 +       }
25365 +    }
25366 +
25367 +    ASSERT (dev->dev_haltop_mask == (mask | active));
25368 +
25369 +    dev->dev_haltop_mask = mask;
25370 +
25371 +    if (list_empty (&list))
25372 +       elan4_set_schedstatus (dev, intreg);
25373 +    else
25374 +    {
25375 +       dev->dev_haltop_active = active;
25376 +       spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25377 +
25378 +       while (! list_empty (&list)) 
25379 +       {
25380 +           ELAN4_HALTOP *op = list_entry (list.next, ELAN4_HALTOP, op_link);
25381 +           
25382 +           list_del (&op->op_link);
25383 +
25384 +           (*op->op_function) (dev, op->op_arg);
25385 +       }
25386 +
25387 +       spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25388 +       dev->dev_haltop_active = 0;
25389 +
25390 +       elan4_set_schedstatus (dev, 0);
25391 +    }
25392 +
25393 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25394 +}
25395 +
25396 +static __inline__ E4_uint32
25397 +handle_iproc_trap (ELAN4_DEV *dev, unsigned unit)
25398 +{
25399 +    sdramaddr_t hdroff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]);
25400 +    E4_uint64   status = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, IProcStatusCntxAndTrType));
25401 +    E4_uint32   filter = elan4_read_filter (dev, IPROC_NetworkContext (status));
25402 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK);
25403 +
25404 +    /*
25405 +     * The context is not valid in the following case :
25406 +     *     ack not been sent AND bad CRC/bad length.
25407 +     *
25408 +     *  NOTE TransCRCStatus and BadLength only valid if NOT an EopTrap.
25409 +     */
25410 +    ASSERT ((IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status))) || IPROC_EOPTrap (status) ||
25411 +           (IPROC_TransCRCStatus (status) == CRC_STATUS_GOOD && !IPROC_BadLength (status)));
25412 +    
25413 +    BumpDevStat (dev, s_iproc_traps);
25414 +
25415 +    ctxt->ctxt_ops->op_iproc_trap (ctxt, status, unit);
25416 +
25417 +    return (SCH_RestartCh0LowPriInput << unit);
25418 +}
25419 +
25420 +void
25421 +handle_pcimemerr (ELAN4_DEV *dev)
25422 +{
25423 +    elan4_pcierror (dev);
25424 +
25425 +    check_error_rate (dev);
25426 +}
25427 +
25428 +void
25429 +handle_sdramint (ELAN4_DEV *dev)
25430 +{
25431 +    E4_uint64 status    = read_reg64 (dev, SDRamECCStatus);
25432 +    E4_uint64 ConfigRegValue = read_reg64 (dev, SDRamConfigReg);
25433 +    char      errstr[200];
25434 +    int              i;
25435 +    int              Found = 0;
25436 +
25437 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_sdramint\n");
25438 +
25439 +    printk ("elan%d: ECC Error %s status=%llx\n",
25440 +           dev->dev_instance, elan4_sdramerr2str (dev, status, ConfigRegValue, errstr), status);
25441 +
25442 +    if (!ECC_UncorrectableErr(status) && !ECC_MultUncorrectErrs(status))
25443 +       printk ("elan%d: ECC error data=%016llx\n", dev->dev_instance, elan4_sdram_readq (dev, ECC_Addr(status)));
25444 +
25445 +    if (ECC_CorrectableErr (status))
25446 +       BumpDevStat (dev, s_correctable_errors);
25447 +    if (ECC_MultCorrectErrs (status))
25448 +       BumpDevStat (dev, s_multiple_errors);
25449 +
25450 +    if (ECC_UncorrectableErr(status))
25451 +       panic ("elan%d: uncorrectable ECC error\n", dev->dev_instance);
25452 +    if (ECC_MultUncorrectErrs(status))
25453 +       panic ("elan%d: muliple uncorrectable ECC error\n", dev->dev_instance);
25454 +    
25455 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR);
25456 +
25457 +    /*
25458 +     * Now try to test for a read/write error type.
25459 +     * This can only be done if it was a correctable error as an uncorrectable error might lockup the node.
25460 +     * It should not be attempted if the data is in the dcache because fetching again would not generate an
25461 +     * error even if the problem was a read, and flushing the cache line would fix a write probelm.
25462 +     * Reading the same location again should cause a new error if the problem was caused by a bad write.
25463 +     */
25464 +    if (elan4_eccerr_recheck &&
25465 +       (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA) &&
25466 +        ECC_CorrectableErr(status) && !ECC_UncorrectableErr(status))
25467 +    {
25468 +       E4_uint64 status2;
25469 +       E4_uint64 Addr = ECC_Addr(status) & ~(E4_CACHELINE_SIZE-1);
25470 +       E4_uint32 SetIndex = (Addr >> 6) & ~(E4_NumCacheLines-1);
25471 +       int       InCache = 0;
25472 +
25473 +       /* check the cache tags to see if the data has been read into a cache line. */
25474 +       for (i=0; i<E4_NumCacheSets; i++)
25475 +          if (((E4_uint32)__elan4_readq (dev, dev->dev_regs + offsetof(E4_Registers, Tags.Tags[i][SetIndex].Value)) & 0x7fffe000) == (Addr & 0x7fffe000))
25476 +          {
25477 +              InCache = 1;
25478 +              break;
25479 +          }
25480 +
25481 +       if (InCache == 0)
25482 +       {
25483 +           printk ("elan%d: checking if ECC error was read or write\n", dev->dev_instance);
25484 +
25485 +           /* Now read and throw away the answer. A read of a word will schedule a block read of sdram */
25486 +           elan4_sdram_readq (dev, Addr);
25487 +           status2 = read_reg64 (dev, SDRamECCStatus);
25488 +           if ((Addr == (ECC_Addr(status2) & ~(E4_CACHELINE_SIZE-1))) && ECC_CorrectableErr(status2))  // Write error.
25489 +           {
25490 +               status = (status & ~0x0030000000000000ULL) | 0x0010000000000000ULL;
25491 +               PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR);
25492 +           }
25493 +           else
25494 +               status = (status & ~0x0030000000000000ULL) | 0x0020000000000000ULL;
25495 +       }
25496 +       else
25497 +           status = status | 0x0030000000000000ULL;
25498 +    }
25499 +    else
25500 +       status &= ~0x0030000000000000ULL;
25501 +
25502 +    /* search for this error already being logged */
25503 +    for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i >= 0; i--)
25504 +        if ((dev->dev_sdramerrs[i].EccStatus == status) && (dev->dev_sdramerrs[i].ConfigReg == ConfigRegValue))
25505 +       {
25506 +            Found = 1;
25507 +           dev->dev_sdramerrs[i].ErrorCount += 1; // Keep a count.
25508 +           break;
25509 +       }
25510 +
25511 +    /* stash the status for /proc */
25512 +    if (!Found)
25513 +    {
25514 +       for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i > 0; i--)
25515 +           dev->dev_sdramerrs[i] = dev->dev_sdramerrs[i-1];
25516 +       dev->dev_sdramerrs[0].EccStatus = status;
25517 +       dev->dev_sdramerrs[0].ConfigReg = ConfigRegValue;
25518 +       dev->dev_sdramerrs[0].ErrorCount = 1; // First error
25519 +    }
25520 +    
25521 +    check_error_rate (dev);
25522 +}
25523 +
25524 +static void
25525 +clear_linkerr_led (void *arg)
25526 +{
25527 +    ELAN4_DEV *dev = (ELAN4_DEV *) arg;
25528 +
25529 +    write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError);
25530 +}
25531 +
25532 +void
25533 +handle_linkerror (ELAN4_DEV *dev)
25534 +{
25535 +    E4_uint32 LinkState;
25536 +    E4_uint32 CurrState = read_reg32 (dev, LinkControlReg);
25537 +
25538 +    /* Set for reading errors. */
25539 +    write_reg32 (dev, LinkControlReg,
25540 +                 (CurrState = CurrState & ~((LCONT_TEST_CONTROL_MASK << LCONT_TEST_CONTROL_SHIFT) |
25541 +                                           (LCONT_TEST_VALUE_MASK << LCONT_TEST_VALUE_SHIFT))));
25542 +    LinkState = LCONT_LINK_STATE(CurrState = read_reg32 (dev, LinkControlReg));
25543 +
25544 +#ifdef DEBUG
25545 +    {
25546 +       E4_uint8 ErrorMsg[256], DataErrorVal[64];
25547 +
25548 +       strcpy (ErrorMsg, "handle_linkerror:");
25549 +       if (LinkState & LS_LockError)    strcat (ErrorMsg, " LockError");
25550 +       if (LinkState & LS_DeskewError)  strcat (ErrorMsg, " DeskewError");
25551 +       if (LinkState & LS_PhaseError)   strcat (ErrorMsg, " PhaseError");
25552 +       if (LinkState & LS_DataError)
25553 +       {
25554 +           E4_uint32 error[4];
25555 +           E4_uint32 i;
25556 +           strcat (ErrorMsg, " DataError");
25557 +           /* Errors */
25558 +           for(i = LRS_ErrorVal8to0; i <= LRS_ErrorVal35to27; i++)
25559 +           {
25560 +               write_reg32 (dev, LinkControlReg,
25561 +                            CurrState | LCONT_TEST_VALUE(i) | (LCONT_READ_STATE << LCONT_TEST_CONTROL_SHIFT));
25562 +               error[i - LRS_ErrorVal8to0] = LCONT_LINK_STATE(read_reg32 (dev, LinkControlReg));
25563 +           }
25564 +           sprintf (DataErrorVal, " Link State Error Val: %09llx %03x %03x %03x %03x", 
25565 +                    (unsigned long long) ((error[0] & 0x1ffUL) | ((error[1] & 0x1ffUL) << 9)  |
25566 +                                 ((error[2] & 0x1ffUL) << 18) | ((error[3] & 0x1ffUL) << 27)),
25567 +                    error[3], error[2], error[1], error[0]);
25568 +           strcat (ErrorMsg, DataErrorVal);
25569 +       }
25570 +       if (LinkState & LS_FifoOvFlow0)  strcat (ErrorMsg, " FifoOvFlow0");
25571 +       if (LinkState & LS_FifoOvFlow1)  strcat (ErrorMsg, " FifoOvFlow1");
25572 +       if (LinkState & LS_Mod45Changed)         strcat (ErrorMsg, " Mod45Changed");
25573 +       if (LinkState & LS_PAckNotSeenError) strcat (ErrorMsg, " PAckNotSeenError");
25574 +       strcat (ErrorMsg, "\n");
25575 +       PRINTF0 (DBG_DEVICE, DBG_INTR, ErrorMsg);
25576 +    }
25577 +#endif
25578 +
25579 +    BumpDevStat (dev, s_link_errors);
25580 +    
25581 +    if (LinkState & LS_LockError)       BumpDevStat (dev, s_lock_errors);
25582 +    if (LinkState & LS_DeskewError)     BumpDevStat (dev, s_deskew_errors);
25583 +    if (LinkState & LS_PhaseError)      BumpDevStat (dev, s_phase_errors);
25584 +    if (LinkState & LS_DataError)       BumpDevStat (dev, s_data_errors);
25585 +    if (LinkState & LS_FifoOvFlow0)     BumpDevStat (dev, s_fifo_overflow0);
25586 +    if (LinkState & LS_FifoOvFlow1)     BumpDevStat (dev, s_fifo_overflow1);
25587 +    if (LinkState & LS_Mod45Changed)    BumpDevStat (dev, s_mod45changed);
25588 +    if (LinkState & LS_PAckNotSeenError) BumpDevStat (dev, s_pack_not_seen);
25589 +
25590 +    PULSE_SCHED_RESTART (dev, SCH_ClearLinkErrorInt);
25591 +    
25592 +    /* schedule a timer to clear the link error LED, so that it stays on 
25593 +     * for a second for every link error that occurs */
25594 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && !timer_fn_queued (&dev->dev_linkerr_timeoutid))
25595 +       schedule_timer_fn (&dev->dev_linkerr_timeoutid, clear_linkerr_led, (void *) dev, HZ);
25596 +
25597 +    /*
25598 +     * Signal the link error to the switch by
25599 +     * enabling the INT_LinkPortKeyFail bit.
25600 +     * Always clear the error bit as the switch
25601 +     * might have produced a spurious "ack" ...
25602 +     */
25603 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT);
25604 +
25605 +    if (dev->dev_linkerr_signalled == 0)
25606 +       dev->dev_linkerr_signalled = 1;
25607 +    else
25608 +       dev->dev_linkerr_signalled = 2;
25609 +    
25610 +    ENABLE_INT_MASK (dev, INT_LinkPortKeyFail);
25611 +      
25612 +    check_error_rate (dev);
25613 +}
25614 +
25615 +void
25616 +handle_linkportkeyfail (ELAN4_DEV *dev)
25617 +{
25618 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_linkportkeyfail\n");
25619 +
25620 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT);
25621 +
25622 +    if (! dev->dev_linkerr_signalled)
25623 +    {
25624 +       /* Hmmm - they're not playing ball */
25625 +       BumpDevStat (dev, s_linkport_keyfail);
25626 +
25627 +       DISABLE_INT_MASK (dev, INT_LinkPortKeyFail);
25628 +    }
25629 +    else
25630 +    {
25631 +       /* If more link errors have occured since we 
25632 +        * signalled the error, then leave it signalled. */
25633 +       if (--dev->dev_linkerr_signalled == 0)
25634 +           DISABLE_INT_MASK (dev, INT_LinkPortKeyFail);
25635 +    }
25636 +}
25637 +
25638 +
25639 +static __inline__ void
25640 +__elan4_4msi0 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25641 +{
25642 +    unsigned long flags;
25643 +
25644 +    if (intreg & intmask & INT_MainInterrupt)
25645 +    {
25646 +       DISABLE_INT_MASK (dev, INT_MainInterrupt);
25647 +
25648 +       if (handle_mainints (dev, -1, elan4_mainint_punt_loops) == 0)
25649 +           ENABLE_INT_MASK (dev, INT_MainInterrupt);
25650 +       else
25651 +       {
25652 +           BumpDevStat (dev, s_mainint_punts);
25653 +           
25654 +           spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25655 +           kcondvar_wakeupone (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
25656 +           spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25657 +       }
25658 +    }
25659 +}
25660 +
25661 +static __inline__ void
25662 +__elan4_4msi1 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25663 +{
25664 +    E4_uint32 restart = 0;
25665 +
25666 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi1: %x\n", intreg);
25667 +    
25668 +    spin_lock (&dev->dev_trap_lock);
25669 +    
25670 +    if (intreg & intmask & INT_CProc)
25671 +       restart |= handle_cproc_trap (dev);
25672 +    if (intreg & intmask & INT_EProc) 
25673 +       restart |= handle_eproc_trap (dev);
25674 +    if (intreg & intmask & INT_Dma0Proc) 
25675 +       restart |= handle_dproc_trap (dev, 0);
25676 +    if (intreg & intmask & INT_Dma1Proc) 
25677 +       restart |= handle_dproc_trap (dev, 1);
25678 +    if (intreg & intmask & INT_TProc)
25679 +       restart |= handle_tproc_trap (dev);
25680 +    
25681 +    PULSE_SCHED_RESTART (dev, restart);
25682 +    
25683 +    spin_unlock (&dev->dev_trap_lock);
25684 +    
25685 +    if (intreg & (INT_Halted|INT_Discarding))
25686 +       handle_haltints (dev, intreg);
25687 +}
25688 +
25689 +static __inline__ void
25690 +__elan4_4msi2 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25691 +{
25692 +    E4_uint32 restart = 0;
25693 +
25694 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi2: %x\n", intreg);
25695 +    
25696 +    spin_lock (&dev->dev_trap_lock);
25697 +    if (intreg & intmask & INT_IProcCh0LowPri)
25698 +       restart |= handle_iproc_trap (dev, 0);
25699 +    
25700 +    if (intreg & intmask & INT_IProcCh1LowPri)
25701 +       restart |= handle_iproc_trap (dev, 1);
25702 +    
25703 +    if (intreg & intmask & INT_IProcCh0HighPri)
25704 +       restart |= handle_iproc_trap (dev, 2);
25705 +    
25706 +    if (intreg & intmask & INT_IProcCh1HighPri)
25707 +       restart |= handle_iproc_trap (dev, 3);
25708 +    
25709 +    PULSE_SCHED_RESTART (dev, restart);
25710 +    
25711 +    spin_unlock (&dev->dev_trap_lock);
25712 +}
25713 +
25714 +static __inline__ void
25715 +__elan4_4msi3 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
25716 +{
25717 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi3: %x\n", intreg);
25718 +    
25719 +    if (intreg & intmask & INT_PciMemErr)
25720 +       handle_pcimemerr (dev);
25721 +    
25722 +    if (intreg & intmask & INT_SDRamInt)
25723 +       handle_sdramint (dev);
25724 +    
25725 +    if (intreg & intmask & INT_LinkError)
25726 +       handle_linkerror (dev);
25727 +    
25728 +    if (intreg & intmask & INT_LinkPortKeyFail)
25729 +       handle_linkportkeyfail (dev);
25730 +}
25731 +
25732 +int
25733 +elan4_1msi0 (ELAN4_DEV *dev)
25734 +{
25735 +    E4_uint32 intmask = dev->dev_intmask;
25736 +    E4_uint32 intreg;
25737 +
25738 +    if (intmask == 0 || ((intreg = read_reg32 (dev, InterruptReg)) & intmask) == 0)
25739 +       return (0);
25740 +
25741 +    BumpDevStat (dev, s_interrupts);
25742 +    
25743 +    do {
25744 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "elan4_1msi0: %x\n", intreg);
25745 +
25746 +       if (intreg & intmask & INT_MSI0)
25747 +           __elan4_4msi0(dev, intreg, intmask);
25748 +       if (intreg & intmask & INT_MSI1)
25749 +           __elan4_4msi1(dev, intreg, intmask);
25750 +       if (intreg & intmask & INT_MSI2)
25751 +           __elan4_4msi2(dev, intreg, intmask); 
25752 +       if (intreg & intmask & INT_MSI3)
25753 +           __elan4_4msi3(dev, intreg, intmask);
25754 +
25755 +       if (intreg & INT_LinkPortKeyFail)
25756 +           handle_linkportkeyfail (dev);
25757 +
25758 +       /* must ensure that the read of the interrupt mask
25759 +        * completes before the read of the interrupt register
25760 +        * since the main interrupt thread clears it's interrupt
25761 +        * and then re-enables it in the interrupt mask. */
25762 +       intmask = dev->dev_intmask;
25763 +       mb();
25764 +       intreg = read_reg32 (dev, InterruptReg);
25765 +
25766 +    } while ((intreg & intmask) != 0);
25767 +
25768 +    return (1);
25769 +}
25770 +
25771 +/* local context management */
25772 +int
25773 +elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops)
25774 +{
25775 +    unsigned long flags;
25776 +    int tbl;
25777 +
25778 +    ctxt->ctxt_dev = dev;
25779 +    ctxt->ctxt_ops = ops;
25780 +
25781 +    INIT_LIST_HEAD (&ctxt->ctxt_cqalist);
25782 +    spin_lock_init (&ctxt->ctxt_mmulock);
25783 +
25784 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
25785 +    {
25786 +       KMEM_ZALLOC (ctxt->ctxt_mmuhash[tbl], ELAN4_HASH_ENTRY **,  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *), 1);
25787 +       
25788 +       if (ctxt->ctxt_mmuhash[tbl] == NULL)
25789 +       {
25790 +           if (tbl != 0)
25791 +               KMEM_FREE (ctxt->ctxt_mmuhash[0], dev->dev_hashsize[0] * sizeof (ELAN4_HASH_ENTRY *));
25792 +           spin_lock_destroy (&ctxt->ctxt_mmulock);
25793 +           return (-ENOMEM);
25794 +       }
25795 +    }
25796 +
25797 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25798 +
25799 +    if ((ctxt->ctxt_num = bt_freebit (dev->dev_ctxmap, (1 << dev->dev_ctxtableshift))) >= 0)
25800 +    {
25801 +       /* chain onto the lists of all contexts */
25802 +       list_add (&ctxt->ctxt_link, &dev->dev_ctxt_list);
25803 +
25804 +       BT_SET (dev->dev_ctxmap, ctxt->ctxt_num);
25805 +    }
25806 +    
25807 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25808 +
25809 +    return (ctxt->ctxt_num < 0 ? -ENOMEM : 0);
25810 +}
25811 +
25812 +void
25813 +elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt)
25814 +{
25815 +    unsigned long flags;
25816 +    int tbl;
25817 +
25818 +    /* remove from list of contexts */
25819 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25820 +
25821 +    list_del (&ctxt->ctxt_link);
25822 +
25823 +    BT_CLEAR (dev->dev_ctxmap, ctxt->ctxt_num);
25824 +
25825 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25826 +
25827 +    spin_lock_destroy (&ctxt->ctxt_info_lock);
25828 +
25829 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
25830 +       KMEM_FREE (ctxt->ctxt_mmuhash[tbl],  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *));
25831 +
25832 +    spin_lock_destroy (&ctxt->ctxt_mmulock);
25833 +}
25834 +
25835 +ELAN4_CTXT *
25836 +elan4_localctxt (ELAN4_DEV *dev, unsigned num)
25837 +{
25838 +    struct list_head *entry;
25839 +    unsigned long flags;
25840 +
25841 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25842 +
25843 +    list_for_each (entry, &dev->dev_ctxt_list) {
25844 +       ELAN4_CTXT *ctxt = list_entry (entry, ELAN4_CTXT, ctxt_link);
25845 +       
25846 +       if (ctxt->ctxt_num == num)
25847 +       {
25848 +           spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25849 +           return (ctxt);
25850 +       }
25851 +    }
25852 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25853 +
25854 +    return ((ELAN4_CTXT *) NULL);
25855 +}
25856 +
25857 +ELAN4_CTXT *
25858 +elan4_networkctxt (ELAN4_DEV *dev, unsigned num)
25859 +{
25860 +    E4_uint32 filter = elan4_read_filter (dev, num);
25861 +    
25862 +    if ((filter & E4_FILTER_CONTEXT_MASK) == INVALID_CONTEXT)
25863 +       return NULL;
25864 +    else
25865 +       return elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK);
25866 +}
25867 +
25868 +/* network context management */
25869 +int
25870 +elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum)
25871 +{
25872 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
25873 +    int               res = 0;
25874 +    E4_uint32         filter;
25875 +    unsigned long     flags;
25876 +    
25877 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
25878 +    
25879 +    filter = elan4_read_filter (dev, ctxnum);
25880 +    if ((filter & E4_FILTER_CONTEXT_MASK) != INVALID_CONTEXT)
25881 +    {
25882 +       PRINTF2 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d filter=%x -> EBUSY\n", ctxnum, filter);
25883 +       res = -EBUSY;
25884 +    }
25885 +    else
25886 +    {
25887 +       PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d - SUCCESS\n", ctxnum);
25888 +
25889 +       elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | E4_FILTER_DISCARD_ALL);
25890 +       PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
25891 +    }
25892 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
25893 +    
25894 +    return (res);
25895 +}
25896 +
25897 +void
25898 +elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum)
25899 +{
25900 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25901 +
25902 +    PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_detach_filter: detach from network context %d\n", ctxnum);
25903 +           
25904 +    elan4_write_filter (dev, ctxnum, INVALID_CONTEXT | E4_FILTER_DISCARD_ALL);
25905 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
25906 +}
25907 +
25908 +void
25909 +elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state)
25910 +{
25911 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25912 +
25913 +    PRINTF6 (ctxt, DBG_NETWORK_CTX, "elan4_set_filter: set filter state %x for network context %d <%s%s%s%s>\n", state, ctxnum,
25914 +            (state & E4_FILTER_DISCARD_ALL) ? "discard,"  : "",
25915 +            (state & E4_FILTER_ACKOK_ALL)   ? "ack-ok,"   : "",
25916 +            (state & E4_FILTER_HIGH_PRI)    ? "high-pri," : "",
25917 +            (state & E4_FILTER_STATS)       ? "stats,"    : "");
25918 +           
25919 +    elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | state);
25920 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
25921 +}
25922 +
25923 +void
25924 +elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl)
25925 +{
25926 +    ELAN4_DEV *dev   = ctxt->ctxt_dev;
25927 +    E4_uint32  value = tbl ? (E4_VPT_VALID | E4_VPT_VALUE(tbl->tbl_entries, tbl->tbl_size)) : 0;
25928 +    
25929 +    /* and insert into the vp table */
25930 +    elan4_sdram_writel (dev, (dev->dev_ctxtable + (ctxt->ctxt_num * sizeof (E4_ContextControlBlock)) +
25931 +                       offsetof (E4_ContextControlBlock, VirtualProcessTable)), value);
25932 +    pioflush_sdram(dev);
25933 +
25934 +    PULSE_SYSCONTROL (dev, CONT_ROUTE_FLUSH);
25935 +}
25936 +
25937 +/* command queue management */
25938 +ELAN4_CQA *
25939 +elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx)
25940 +{
25941 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25942 +    struct list_head *el;
25943 +
25944 +    spin_lock (&dev->dev_cqlock);
25945 +    list_for_each (el, &ctxt->ctxt_cqalist) {
25946 +       ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link);
25947 +       
25948 +       if (cqa->cqa_idx == idx)
25949 +       {
25950 +           cqa->cqa_ref++;
25951 +
25952 +           spin_unlock (&dev->dev_cqlock);
25953 +           return cqa;
25954 +       }
25955 +    }
25956 +    spin_unlock (&dev->dev_cqlock);
25957 +    return NULL;
25958 +}
25959 +
25960 +void
25961 +elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx)
25962 +{
25963 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
25964 +    struct list_head *el, *nel;
25965 +
25966 +    spin_lock (&dev->dev_cqlock);
25967 +    list_for_each_safe (el, nel, &ctxt->ctxt_cqalist) {
25968 +       ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link);
25969 +       
25970 +       if (cqa->cqa_idx == idx)
25971 +       {
25972 +           if (--cqa->cqa_ref || bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1)
25973 +               spin_unlock (&dev->dev_cqlock);
25974 +           else
25975 +           {
25976 +               list_del (&cqa->cqa_link);
25977 +               
25978 +               BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx);
25979 +               BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA);
25980 +               spin_unlock (&dev->dev_cqlock);
25981 +               
25982 +               KMEM_FREE (cqa, sizeof (ELAN4_CQA));
25983 +           }
25984 +           return;
25985 +       }
25986 +    }
25987 +    spin_unlock (&dev->dev_cqlock);
25988 +
25989 +    printk ("elan4_putcqa: idx %d not found\n", idx);
25990 +    BUG();
25991 +}
25992 +
25993 +static ELAN4_CQ *
25994 +elan4_getcq (ELAN4_CTXT *ctxt, unsigned int type)
25995 +{
25996 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
25997 +    ELAN4_CQA        *cqa;
25998 +    struct list_head *el;
25999 +    int                      cidx, didx;
26000 +
26001 +    spin_lock (&dev->dev_cqlock);
26002 +    list_for_each (el, &ctxt->ctxt_cqalist) {
26003 +       cqa = list_entry (el, ELAN4_CQA, cqa_link);
26004 +
26005 +       if (cqa->cqa_type == type && (cidx = bt_freebit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA)) >=0)
26006 +       {
26007 +           BT_SET (cqa->cqa_bitmap, cidx);
26008 +           
26009 +           spin_unlock (&dev->dev_cqlock);
26010 +           return &cqa->cqa_cq[cidx];
26011 +       }
26012 +    }
26013 +    spin_unlock (&dev->dev_cqlock);
26014 +
26015 +    /* allocate a new cqa and it's chunk of command queue descriptors */
26016 +    KMEM_ZALLOC (cqa, ELAN4_CQA *, sizeof (ELAN4_CQA), 1);
26017 +    if (cqa == NULL)
26018 +       return NULL;
26019 +
26020 +    spin_lock (&dev->dev_cqlock);
26021 +    cidx = bt_freebit (ctxt->ctxt_cqamap, ELAN4_MAX_CQA);
26022 +
26023 +    /* On architectures which have MTRR registers for write-combinig
26024 +     * the top command queues from dev->dev_cqreorder upwards are
26025 +     * used for reordered queues.  Without MTRR registers any page
26026 +     * sized group can use write combinig through the ptes. */
26027 +    if (dev->dev_cqreorder == 0)
26028 +       didx = bt_freebit (dev->dev_cqamap, dev->dev_cqcount/ELAN4_CQ_PER_CQA);
26029 +    else
26030 +    {
26031 +       if ((type & CQ_Reorder) != 0)
26032 +           didx = bt_nextbit (dev->dev_cqamap, dev->dev_cqcount/ELAN4_CQ_PER_CQA, (dev->dev_cqreorder/ELAN4_CQ_PER_CQA) - 1, 0);
26033 +       else
26034 +           didx = bt_freebit (dev->dev_cqamap, dev->dev_cqreorder/ELAN4_CQ_PER_CQA);
26035 +    }
26036 +
26037 +    if (cidx < 0 || didx < 0)
26038 +    {
26039 +       spin_unlock (&dev->dev_cqlock);
26040 +       KMEM_FREE (cqa, sizeof (ELAN4_CQA));
26041 +       return NULL;
26042 +    }
26043 +
26044 +    BT_SET (ctxt->ctxt_cqamap, cidx);
26045 +    BT_SET (dev->dev_cqamap, didx);
26046 +
26047 +    cqa->cqa_idx   = cidx;
26048 +    cqa->cqa_type  = type;
26049 +    cqa->cqa_cqnum = (didx * ELAN4_CQ_PER_CQA);
26050 +    
26051 +    list_add_tail (&cqa->cqa_link, &ctxt->ctxt_cqalist);
26052 +    
26053 +    /* initialise the cqa struct */
26054 +    for (cidx = 0; cidx < ELAN4_CQ_PER_CQA; cidx++)
26055 +    {
26056 +       cqa->cqa_cq[cidx].cq_idx = cidx;
26057 +       cqa->cqa_cq[cidx].cq_cqa = cqa;
26058 +    }
26059 +
26060 +    /* no mappings yet */
26061 +    cqa->cqa_ref = 0;
26062 +
26063 +    /* we're going to return entry zero */
26064 +    BT_SET (cqa->cqa_bitmap, 0);
26065 +    spin_unlock (&dev->dev_cqlock);
26066 +    
26067 +    return &cqa->cqa_cq[0];
26068 +}
26069 +
26070 +static void
26071 +elan4_putcq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq)
26072 +{
26073 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
26074 +    ELAN4_CQA        *cqa = cq->cq_cqa;
26075 +
26076 +    spin_lock (&dev->dev_cqlock);
26077 +
26078 +    BT_CLEAR (cqa->cqa_bitmap, cq->cq_idx);
26079 +
26080 +    if (bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1 || cqa->cqa_ref)
26081 +       spin_unlock (&dev->dev_cqlock);
26082 +    else
26083 +    {
26084 +       list_del (&cqa->cqa_link);
26085 +       
26086 +       BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx);
26087 +       BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA);
26088 +       spin_unlock (&dev->dev_cqlock);
26089 +       
26090 +       KMEM_FREE (cqa, sizeof (ELAN4_CQA));
26091 +    }
26092 +}
26093 +
26094 +ELAN4_CQ *
26095 +elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned perm, unsigned cqtype)
26096 +{
26097 +    ELAN4_DEV   *dev = ctxt->ctxt_dev;
26098 +    ELAN4_CQ    *cq;
26099 +    int         cqnum;
26100 +    sdramaddr_t cqdesc;
26101 +    unsigned    offset;
26102 +    E4_uint64   value;
26103 +
26104 +    if ((cq = elan4_getcq (ctxt, cqtype)) == NULL)
26105 +       return NULL;
26106 +
26107 +    cqnum = elan4_cq2num(cq);
26108 +    
26109 +    cq->cq_space = elan4_sdram_alloc (dev, CQ_Size(cqsize));
26110 +    if (cq->cq_space == (virtaddr_t) 0)
26111 +    {
26112 +       elan4_putcq (ctxt, cq);
26113 +       return (NULL);
26114 +    }
26115 +
26116 +    cq->cq_size   = cqsize;
26117 +    cq->cq_perm   = perm;
26118 +    
26119 +    /* and finally initialise the command queue descriptor */
26120 +    cqdesc = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc));
26121 +
26122 +    value  = CQ_QueuePtrsValue (cqsize, cq->cq_space, cq->cq_space);
26123 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26124 +       value |= ((cqtype & CQ_Priority) ? CQ_RevA_Priority : 0);
26125 +    else
26126 +       value |= (((cqtype & CQ_Priority) ? CQ_RevB_Priority : 0) | 
26127 +                 ((cqtype & CQ_Reorder)  ? CQ_RevB_ReorderingQueue : CQ_RevB_32bitWriteQueue));
26128 +
26129 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs), value);
26130 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue), 0);
26131 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), 0);
26132 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), CQ_ControlValue (ctxt->ctxt_num, 2, perm));
26133 +    pioflush_sdram (dev);
26134 +
26135 +    offset = (cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize;
26136 +
26137 +    cq->cq_mapping = elan4_map_device (dev, ELAN4_BAR_REGISTERS, (offset & ~(PAGE_SIZE-1)), 
26138 +                                      PAGE_SIZE, &cq->cq_handle) + (offset & (PAGE_SIZE-1));
26139 +#ifdef CONFIG_MPSAS
26140 +    if (ctxt == &dev->dev_ctxt)
26141 +       return (cq);
26142 +#endif
26143 +
26144 +    elan4_sdram_flushcache (dev, cq->cq_space, CQ_Size(cqsize));
26145 +
26146 +    return (cq);
26147 +}
26148 +    
26149 +void
26150 +elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq)
26151 +{
26152 +    ELAN4_DEV *dev    = ctxt->ctxt_dev;
26153 +    unsigned   offset = (elan4_cq2num(cq) + dev->dev_cqoffset) * CQ_CommandMappingSize;
26154 +
26155 +    elan4_flushcq (dev, cq);
26156 +
26157 +    elan4_unmap_device (dev, cq->cq_mapping - (offset & (PAGE_SIZE-1)), PAGE_SIZE, &cq->cq_handle);
26158 +    elan4_sdram_free (dev, cq->cq_space, CQ_Size (cq->cq_size));
26159 +
26160 +    elan4_putcq (ctxt, cq);
26161 +}
26162 +
26163 +void
26164 +elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq)
26165 +{
26166 +    sdramaddr_t   cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
26167 +    int           hipri;
26168 +    unsigned long flags;
26169 +    
26170 +    PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restarting cq %p\n", cq);
26171 +    
26172 +    spin_lock_irqsave (&dev->dev_requeue_lock, flags);
26173 +
26174 +    while (read_reg32 (dev, CommandControl.CommandRequeuePtr) & E4_CommandRequeueBusy)
26175 +       ;
26176 +    
26177 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26178 +       hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevA_Priority) != 0;
26179 +    else
26180 +       hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevB_Priority) != 0;
26181 +    
26182 +    if (hipri)
26183 +    {
26184 +       PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as high pri\n", elan4_cq2num(cq));
26185 +       write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc | E4_CommandRequeueHighPri);
26186 +    }
26187 +    else
26188 +    {
26189 +       PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as low pri\n", elan4_cq2num(cq));
26190 +       write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc);
26191 +    }
26192 +    pioflush_reg (dev);
26193 +    
26194 +    spin_unlock_irqrestore (&dev->dev_requeue_lock, flags);
26195 +}
26196 +
26197 +static void
26198 +flushcq_intop (ELAN4_DEV *dev, void *arg)
26199 +{
26200 +    unsigned long flags;
26201 +
26202 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26203 +    dev->dev_flush_finished |= (1 << (unsigned long) arg);
26204 +    kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock);
26205 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26206 +}
26207 +void
26208 +elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq)
26209 +{
26210 +    int                  flushqnum = elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1);
26211 +    ELAN4_CQ     *flushq    = dev->dev_flush_cq[flushqnum];
26212 +    unsigned long flags;
26213 +
26214 +    PRINTF (DBG_DEVICE, DBG_FLUSH, "elan4_flushcq: cqnum=%d\n", elan4_cq2num(cq));
26215 +
26216 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26217 +
26218 +    while (! (dev->dev_flush_finished & (1 << flushqnum)))
26219 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
26220 +    
26221 +    dev->dev_flush_finished &= ~(1 << flushqnum);
26222 +
26223 +    dev->dev_flush_op[flushqnum].op_function = flushcq_intop;
26224 +    dev->dev_flush_op[flushqnum].op_arg      = (void *) (unsigned long) flushqnum;
26225 +    
26226 +    elan4_queue_intop (dev, flushq, &dev->dev_flush_op[flushqnum]);
26227 +
26228 +    while (! (dev->dev_flush_finished & (1 << flushqnum)))
26229 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
26230 +    
26231 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26232 +}
26233 +
26234 +void
26235 +elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart)
26236 +{
26237 +    sdramaddr_t cqdesc  = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
26238 +    E4_uint32   control = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
26239 +
26240 +    /* Write the command queues control word, but ensure that the ChannelNotCompleted fields
26241 +     * are not modified.   We use this to just alter the RestartCount/Permissions fields */
26242 +
26243 +    elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), 
26244 +                       CQ_ControlValue (CQ_Context (control), restart ? restart : CQ_RestartCount (control), perm));
26245 +}
26246 +
26247 +/* instruction cache flush */
26248 +static __inline__ void
26249 +elan4_flush_icache_locked (ELAN4_DEV *dev)
26250 +{
26251 +    int i, j;
26252 +
26253 +    PRINTF0 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache_locked: flushing icache\n");
26254 +
26255 +    for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++)
26256 +    {
26257 +        write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift);
26258 +        for (j = 0; j < E4_ICachePortSize; j++)
26259 +           write_reg64 (dev, ICachePort[j], E4_InvalidTagValue);
26260 +    }
26261 +
26262 +    /*
26263 +     * Initialise the top of the ICache Set0 with a instruction which will
26264 +     * cause a know trap fingerprint so that the application can identify it
26265 +     * and ignore the trap.
26266 +     */
26267 +    write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams);
26268 +
26269 +    /* Errata 24: must ensure that the DCache is flushed after loading 
26270 +     *            code for the thread processor. */
26271 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26272 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
26273 +
26274 +    pioflush_reg (dev);
26275 +}
26276 +
26277 +static void
26278 +device_iflush_haltop (ELAN4_DEV *dev, void *arg)
26279 +{
26280 +    unsigned long flags;
26281 +
26282 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26283 +    
26284 +    elan4_flush_icache_locked (dev);
26285 +
26286 +    dev->dev_iflush_queued = 0;
26287 +
26288 +    kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock);
26289 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26290 +}
26291 +
26292 +void
26293 +elan4_flush_icache_halted (ELAN4_CTXT *ctxt)
26294 +{
26295 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26296 +    unsigned long flags;
26297 +
26298 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26299 +    
26300 +    elan4_flush_icache_locked (dev);
26301 +
26302 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26303 +}
26304 +
26305 +void
26306 +elan4_flush_icache (ELAN4_CTXT *ctxt)
26307 +{
26308 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26309 +    unsigned long flags;
26310 +    
26311 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26312 +
26313 +    PRINTF1 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache: queued=%d\n", dev->dev_iflush_queued);
26314 +
26315 +    if (! dev->dev_iflush_queued)
26316 +    {
26317 +       dev->dev_iflush_queued = 1;
26318 +       
26319 +       elan4_queue_haltop (dev, &dev->dev_iflush_haltop);
26320 +    }
26321 +
26322 +    while (dev->dev_iflush_queued)
26323 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
26324 +
26325 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26326 +}
26327 +
26328 +/* device context operations */
26329 +static void
26330 +device_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
26331 +{
26332 +    ELAN4_DEV        *dev  = ctxt->ctxt_dev;
26333 +    ELAN4_CPROC_TRAP *trap = &dev->dev_cproc_trap;
26334 +
26335 +    elan4_extract_cproc_trap (dev, status, trap, cqnum);
26336 +
26337 +    DBGCMD (DBG_DEVICE, DBG_FLUSH, elan4_display_cproc_trap (DBG_DEVICE, DBG_FLUSH, "device_cproc_trap", trap));
26338 +
26339 +    switch (CPROC_TrapType (trap->tr_status))
26340 +    {
26341 +    case CommandProcInterruptQueueOverflow:
26342 +       PRINTF (ctxt, DBG_FLUSH, "device_cproc_trap: cqnum=%d\n", cqnum);
26343 +
26344 +       /* XXXX: we could either just hit restart (and hope) - or we could extract
26345 +        *       the event interrupt cookie out and "complete" the command before
26346 +        *       restarting it */
26347 +       elan4_restartcq (dev, dev->dev_flush_cq[cqnum]);
26348 +       return;
26349 +
26350 +    case CommandProcDmaQueueOverflow:
26351 +    case CommandProcPermissionTrap:
26352 +       handle_dma_flushops (dev, status, cqnum);
26353 +       return;
26354 +       
26355 +    default:
26356 +       printk ("device_cproc_trap: status=%llx control=%llx TrapType=%x cqnum=%d\n", (long long) trap->tr_status,
26357 +               elan4_sdram_readq (dev, dev->dev_cqaddr + cqnum * sizeof (E4_CommandQueueDesc) +
26358 +                                  offsetof (E4_CommandQueueDesc, CQ_Control)),
26359 +               (int) CPROC_TrapType(trap->tr_status), cqnum);
26360 +       panic ("device_cproc_trap");
26361 +    }
26362 +}
26363 +
26364 +static void
26365 +device_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
26366 +{
26367 +    ELAN4_TPROC_TRAP trap;
26368 +
26369 +    elan4_extract_tproc_trap (ctxt->ctxt_dev, status, &trap);
26370 +
26371 +    elan4_display_tproc_trap (DBG_CONSOLE, DBG_TRAP, "device_tproc_trap", &trap);
26372 +    panic ("device_tproc_trap");
26373 +}
26374 +
26375 +static void
26376 +device_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
26377 +{
26378 +    ELAN4_DPROC_TRAP trap;
26379 +
26380 +    elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit);
26381 +
26382 +    elan4_display_dproc_trap (DBG_CONSOLE, DBG_TRAP, "device_dproc_trap", &trap);
26383 +    panic ("device_dproc_trap");
26384 +}
26385 +
26386 +static void
26387 +device_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
26388 +{
26389 +    ELAN4_DEV *dev = (ELAN4_DEV *) ctxt;
26390 +    struct list_head *el,*nel;
26391 +    unsigned long flags;
26392 +
26393 +    PRINTF (ctxt, DBG_FLUSH, "device_interrupt: cookie=%llx\n", cookie);
26394 +
26395 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
26396 +    list_for_each_safe (el, nel, &dev->dev_intop_list) {
26397 +       ELAN4_INTOP *op = list_entry (el, ELAN4_INTOP, op_link);
26398 +
26399 +       if (op->op_cookie == cookie)
26400 +       {
26401 +           if ((op->op_cookie & INTOP_TYPE_MASK) == INTOP_ONESHOT)
26402 +               list_del (&op->op_link);
26403 +
26404 +           spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
26405 +           
26406 +           (*op->op_function)(dev, op->op_arg);
26407 +           return;
26408 +       }
26409 +    }
26410 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
26411 +
26412 +    panic ("device_interrupt: interrupt cookie %llx not found\n", cookie);
26413 +}
26414 +
26415 +static void
26416 +device_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
26417 +{
26418 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26419 +    ELAN4_IPROC_TRAP *trap = &dev->dev_iproc_trap;
26420 +
26421 +    elan4_extract_iproc_trap (dev, status, trap, unit);
26422 +    elan4_inspect_iproc_trap (trap);
26423 +
26424 +    DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "device_iproc_trap", trap));
26425 +
26426 +    if (elan4_neterr_iproc_trap (dev, trap))
26427 +       return;
26428 +
26429 +    elan4_display_iproc_trap (DBG_CONSOLE, DBG_TRAP, "device_iproc_trap", trap);
26430 +    panic ("device_iproc_trap: unexpected trap\n");
26431 +}
26432 +
26433 +ELAN4_TRAP_OPS device_trap_ops = 
26434 +{
26435 +    NULL,
26436 +    device_cproc_trap,
26437 +    device_dproc_trap,
26438 +    device_tproc_trap,
26439 +    device_iproc_trap,
26440 +    device_interrupt,
26441 +};
26442 +
26443 +/*
26444 + * elan4_initialise_device
26445 + *    initialise the ELAN4_DEV struct - spinlocks,cvs etc.
26446 + *    map the registers, sdram etc
26447 + */
26448 +int
26449 +elan4_initialise_device (ELAN4_DEV *dev)
26450 +{
26451 +    int i, bit;
26452 +
26453 +    if (elan4_mainint_resched_ticks == 0)
26454 +       elan4_mainint_resched_ticks = (hz/4);
26455 +
26456 +    /* map the registers */
26457 +    switch (dev->dev_devinfo.dev_revision_id)
26458 +    {
26459 +    case PCI_REVISION_ID_ELAN4_REVA:
26460 +       dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle);
26461 +       
26462 +       dev->dev_rom  = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_EBUS_OFFSET + ELAN4_REVA_EBUS_ROM_OFFSET, 
26463 +                                         ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle);
26464 +       break;
26465 +       
26466 +    case PCI_REVISION_ID_ELAN4_REVB:
26467 +       dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle);
26468 +       dev->dev_rom  = (ioaddr_t) 0;
26469 +       dev->dev_i2c  = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_I2C_OFFSET, ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle);
26470 +       break;
26471 +
26472 +    default:
26473 +       return -EINVAL;
26474 +    }
26475 +
26476 +    /* XXXX: parse the ebus rom to determine the sdram configuration */
26477 +    {
26478 +       extern long long       sdram_cfg;
26479 +
26480 +       if (sdram_cfg == 0)
26481 +           dev->dev_sdram_cfg = SDRAM_STARTUP_VALUE;
26482 +       else
26483 +           dev->dev_sdram_cfg = sdram_cfg;
26484 +    }
26485 +
26486 +    for (bit = 0; ((1 << bit) & elan4_resource_len (dev, ELAN4_BAR_SDRAM)) == 0; bit++)
26487 +       ;
26488 +
26489 +    switch ((dev->dev_sdram_cfg >> SDRAM_RamSize_SH) & 3)
26490 +    {
26491 +    case 0:                    /* 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output) */
26492 +       dev->dev_sdram_numbanks = 4; bit -= 2;
26493 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
26494 +       {
26495 +           dev->dev_sdram_banks[i].b_base = (i << bit);
26496 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
26497 +       }
26498 +       break;
26499 +
26500 +    case 1:                    /*  64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output) */
26501 +       dev->dev_sdram_numbanks = 4; bit -= 2;
26502 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
26503 +       {
26504 +           dev->dev_sdram_banks[i].b_base = ((i & 2) << (bit)) | ((i & 1) << (bit-1));
26505 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
26506 +       }
26507 +       break;
26508 +       
26509 +    case 2:                    /* 2Gbit (16-bit output) or 1Gbit (8-bit output) */
26510 +       dev->dev_sdram_numbanks = 2; bit--;
26511 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
26512 +       {
26513 +           dev->dev_sdram_banks[i].b_base = (i << bit);
26514 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
26515 +       }
26516 +       break;
26517 +
26518 +    case 3:                    /* 4Gbit (16-bit output) or 2Gbit (8-bit output) */
26519 +       dev->dev_sdram_numbanks = 1;
26520 +       dev->dev_sdram_banks[0].b_base = 0;
26521 +       dev->dev_sdram_banks[0].b_size = (1 << bit);
26522 +       break;
26523 +    }
26524 +
26525 +    elan4_sdram_init (dev);
26526 +
26527 +    /* initialise locks for classes of interrupts */
26528 +    spin_lock_init (&dev->dev_trap_lock);
26529 +    spin_lock_init (&dev->dev_intop_lock);
26530 +    spin_lock_init (&dev->dev_haltop_lock);
26531 +    spin_lock_init (&dev->dev_mainint_lock);
26532 +
26533 +    /* initialise other locks */
26534 +    spin_lock_init (&dev->dev_i2c_lock);
26535 +
26536 +    spin_lock_init (&dev->dev_mmulock);
26537 +    spin_lock_init (&dev->dev_cqlock);
26538 +    spin_lock_init (&dev->dev_ctxlock);
26539 +
26540 +    spin_lock_init (&dev->dev_intmask_lock);
26541 +    spin_lock_init (&dev->dev_syscontrol_lock);
26542 +
26543 +    spin_lock_init (&dev->dev_ctxt_lock);
26544 +    spin_lock_init (&dev->dev_flush_lock);
26545 +    spin_lock_init (&dev->dev_requeue_lock);
26546 +
26547 +    kmutex_init (&dev->dev_lock);
26548 +
26549 +    kcondvar_init (&dev->dev_mainint_wait);
26550 +    kcondvar_init (&dev->dev_flush_wait);
26551 +
26552 +    /* initialsie lists */
26553 +    INIT_LIST_HEAD (&dev->dev_ctxt_list);
26554 +    INIT_LIST_HEAD (&dev->dev_intop_list);
26555 +    INIT_LIST_HEAD (&dev->dev_interruptq_list);
26556 +    INIT_LIST_HEAD (&dev->dev_hc_list);
26557 +    INIT_LIST_HEAD (&dev->dev_haltop_list);
26558 +    INIT_LIST_HEAD (&dev->dev_dma_flushop[0].list);
26559 +    INIT_LIST_HEAD (&dev->dev_dma_flushop[1].list);
26560 +
26561 +    dev->dev_state = ELAN4_STATE_STOPPED;
26562 +
26563 +    return (0);
26564 +}
26565 +
26566 +void
26567 +elan4_finalise_device (ELAN4_DEV *dev)
26568 +{
26569 +    kcondvar_destroy (&dev->dev_flush_wait);
26570 +    kcondvar_destroy (&dev->dev_mainint_wait);
26571 +
26572 +    kmutex_destroy (&dev->dev_lock);
26573 +
26574 +    spin_lock_destroy (&dev->dev_requeue_lock);
26575 +    spin_lock_destroy (&dev->dev_flush_lock);
26576 +    spin_lock_destroy (&dev->dev_ctxt_lock);
26577 +
26578 +    spin_lock_destroy (&dev->dev_syscontrol_lock);
26579 +    spin_lock_destroy (&dev->dev_intmask_lock);
26580 +
26581 +    spin_lock_destroy (&dev->dev_ctxlock);
26582 +    spin_lock_destroy (&dev->dev_cqlock);
26583 +    spin_lock_destroy (&dev->dev_mmulock);
26584 +
26585 +    spin_lock_destroy (&dev->dev_i2c_lock);
26586 +
26587 +    spin_lock_destroy (&dev->dev_mainint_lock);
26588 +    spin_lock_destroy (&dev->dev_haltop_lock);
26589 +    spin_lock_destroy (&dev->dev_intop_lock);
26590 +    spin_lock_destroy (&dev->dev_trap_lock);
26591 +
26592 +    while (! list_empty (&dev->dev_hc_list))
26593 +    {
26594 +       ELAN4_HASH_CHUNK *hc = list_entry (dev->dev_hc_list.next, ELAN4_HASH_CHUNK, hc_link);
26595 +       
26596 +       list_del (&hc->hc_link);
26597 +
26598 +       KMEM_FREE(hc, sizeof (ELAN4_HASH_CHUNK));
26599 +    }
26600 +    
26601 +    elan4_sdram_fini (dev);
26602 +    
26603 +    switch (dev->dev_devinfo.dev_revision_id)
26604 +    {
26605 +    case PCI_REVISION_ID_ELAN4_REVA:
26606 +       elan4_unmap_device (dev, dev->dev_rom,  ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle);
26607 +       elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle);
26608 +       break;
26609 +    case PCI_REVISION_ID_ELAN4_REVB:
26610 +       elan4_unmap_device (dev, dev->dev_i2c,  ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle);
26611 +       elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle);
26612 +       break;
26613 +    }
26614 +}
26615 +
26616 +static int
26617 +measure_sysclk (ELAN4_DEV *dev)        
26618 +{
26619 +    E4_uint64 val0, val1;
26620 +    E4_uint32 ticks, ns;
26621 +    
26622 +    write_ureg64 (dev, StatCont, STP_SYS_CLOCK_RATE0);
26623 +    
26624 +    val0 = read_ureg64 (dev, StatCounts[0]);
26625 +    udelay (1000);
26626 +    val1 = read_ureg64 (dev, StatCounts[0]);
26627 +    
26628 +    
26629 +    ticks = ((val1 >> 32) - (val0 >> 32));
26630 +    ns    = ((val1 & 0xffffffff) - (val0 & 0xffffffff));
26631 +    
26632 +    return (ticks / (ns / 1000));
26633 +}
26634 +
26635 +static void
26636 +initialise_cache (ELAN4_DEV *dev)
26637 +{
26638 +    register int set, line;
26639 +
26640 +    /* Initialise the cache to "map" the bottom of sdram - we will use
26641 +     * this space for cache flushing, so require the cache to be set
26642 +     * up so that cachelines for this are in the correct set.
26643 +     *
26644 +     * XXXX: for MPSAS we set bit 28, to ensure that any access to 
26645 +     *       sdram causes the line to be filled first to expunge any
26646 +     *       Xs. */
26647 +    for (set = 0; set < E4_NumCacheSets; set++)
26648 +       for (line = 0; line < E4_NumCacheLines; line++)
26649 +           write_tag (dev, Tags[set][line], (((E4_uint64) set) << 29) | (1 << 28) | (line << 16));
26650 +}
26651 +
26652 +#ifndef CONFIG_MPSAS
26653 +static void
26654 +initialise_cache_tags (ELAN4_DEV *dev, unsigned addr)
26655 +{
26656 +    register int set, line;
26657 +
26658 +    /* Initialise the whole cache to hold sdram at "addr" as direct mapped */
26659 +
26660 +    for (set = 0; set < E4_NumCacheSets; set++)
26661 +       for (line = 0; line < E4_NumCacheLines; line++)
26662 +           write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11));
26663 +}
26664 +
26665 +static void
26666 +initialise_ecc (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
26667 +{
26668 +    register int i, addr;
26669 +
26670 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26671 +    {
26672 +        initialise_cache_tags (dev, E4_CacheSize);
26673 +        for (addr = 0; addr < bank->b_size; addr += E4_CacheSize)
26674 +        {
26675 +           for (i = 0; i < E4_CacheSize; i += sizeof (E4_uint64))
26676 +               writeq (0xbeec000000000000ull | addr | i, bank->b_ioaddr + addr + i);
26677 +           initialise_cache_tags (dev, addr);
26678 +        }
26679 +    }
26680 +    else
26681 +    {
26682 +       /* Write the whole of this bank of sdram. */
26683 +        for (addr = 0; addr < bank->b_size; addr += sizeof (E4_uint64))
26684 +           writeq (0xbeec000000000000ull | addr, bank->b_ioaddr + addr);
26685 +
26686 +       /* Now flush out the top out of the cache */
26687 +        for (addr = 0; addr < E4_CacheSize; addr += sizeof (E4_uint64))
26688 +           writeq (0xbeec000000000000ull | addr, bank->b_ioaddr + addr);
26689 +
26690 +       /* Now read the top value of sdram to guarantee the write has occured before the ecc is enabled */
26691 +       __elan4_readq (dev, bank->b_ioaddr + bank->b_size - sizeof (E4_uint64));
26692 +    }
26693 +}
26694 +#endif
26695 +
26696 +#ifdef CONFIG_MPSAS
26697 +static void
26698 +do_initdma (ELAN4_DEV *dev)
26699 +{
26700 +#define VIRTUAL_ADDRESS        0x10000000ull
26701 +    ELAN4_CQ  *cq  = dev->dev_flush_cq[0];
26702 +    E4_uint64 value;
26703 +    E4_uint32 intreg;
26704 +    E4_uint64 status;
26705 +
26706 +    PRINTF (DBG_DEVICE, DBG_CONFIG, "elan: performing initialising dma\n");
26707 +    
26708 +    DISABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc);
26709 +    
26710 +    /* initialise the context filter */
26711 +    elan4_attach_filter (&dev->dev_ctxt, 0);
26712 +
26713 +    /* now issue a DMA - we expect this to trap */
26714 +    writeq (E4_DMA_TYPE_SIZE (128*4, DMA_DataTypeByte, 0, 0) | RUN_DMA_CMD, cq->cq_mapping + (0 << 3));
26715 +    writeq (0,                                                              cq->cq_mapping + (1 << 3));
26716 +    writeq (0,                                                              cq->cq_mapping + (2 << 3));
26717 +    writeq (dev->dev_tproc_space,                                           cq->cq_mapping + (3 << 3));
26718 +    writeq (dev->dev_tproc_space,                                           cq->cq_mapping + (4 << 3));
26719 +    writeq (0,                                                              cq->cq_mapping + (5 << 3));
26720 +    writeq (0,                                                              cq->cq_mapping + (6 << 3));
26721 +    
26722 +    /* spin waiting for it to trap - then restart the dma processor */
26723 +    do {
26724 +       value   = read_reg64 (dev, IntAndMaskReg);
26725 +       intreg  = (value >> E4_INTERRUPT_REG_SHIFT);
26726 +    } while ((intreg & (INT_Dma0Proc | INT_Dma1Proc)) == 0);
26727 +    
26728 +    /* check it trapped for the right reason */
26729 +    status = (intreg & INT_Dma0Proc) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status);
26730 +    
26731 +    if (DPROC_PrefetcherFault (status) || (DPROC_TrapType(status) != DmaProcFailCountError && DPROC_TrapType(status) != DmaProcPacketAckError))
26732 +    {
26733 +       printk ("elan: bad dma trap, status = %lx\n", (long)status);
26734 +       panic ("elan: bad dma trap\n");
26735 +    }
26736 +    
26737 +    PULSE_SCHED_RESTART (dev, SCH_RestartDma0Proc | SCH_RestartDma1Proc | SCH_RestartDmaPrefetchProc);
26738 +
26739 +    elan4_detach _filter (&dev->dev_ctxt, 0);
26740 +
26741 +    ENABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc);
26742 +#undef VIRTUAL_ADDRESS
26743 +}
26744 +#endif
26745 +
26746 +static int
26747 +ebus_read_vpd (ELAN4_DEV *dev, unsigned char *data, unsigned int nob)
26748 +{
26749 +    unsigned int pci_data_ptr;
26750 +    unsigned int vpd_ptr;
26751 +    register int i;
26752 +
26753 +    if (read_ebus_rom (dev, 0) != 0x55 || read_ebus_rom (dev, 1) != 0xaa)
26754 +    {
26755 +       printk ("elan%d: invalid rom signature in ebus rom\n", dev->dev_instance);
26756 +       return -EINVAL;
26757 +    }
26758 +
26759 +    pci_data_ptr = (read_ebus_rom (dev, 0x19) << 8) | read_ebus_rom (dev, 0x18);
26760 +
26761 +    /* check the pci data structure */
26762 +    if (read_ebus_rom (dev, pci_data_ptr + 0) != 'P' ||
26763 +       read_ebus_rom (dev, pci_data_ptr + 1) != 'C' ||
26764 +       read_ebus_rom (dev, pci_data_ptr + 2) != 'I' ||
26765 +       read_ebus_rom (dev, pci_data_ptr + 3) != 'R')
26766 +    {
26767 +       printk ("elan%d: invalid pci data structure in ebus rom\n", dev->dev_instance);
26768 +       return -EINVAL;
26769 +    }
26770 +    
26771 +    /* extract the VPD pointer */
26772 +    vpd_ptr = (read_ebus_rom (dev, pci_data_ptr + 9) << 8) | read_ebus_rom (dev, pci_data_ptr + 8);
26773 +
26774 +    if (vpd_ptr == 0)
26775 +    {
26776 +       printk ("elan%d: no vital product data in ebus rom\n", dev->dev_instance);
26777 +       return -EINVAL;
26778 +    }
26779 +    
26780 +    /* read the vpd data */
26781 +    for (i = 0; i < nob; i++)
26782 +       data[i] = read_ebus_rom (dev, vpd_ptr + i);
26783 +
26784 +    return 0;
26785 +}
26786 +
26787 +int
26788 +elan4_read_vpd (ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) 
26789 +{
26790 +    unsigned char vpd[I2C_ELAN_EEPROM_VPD_SIZE];
26791 +    unsigned char *ptr = vpd;
26792 +    unsigned int   finished = 0;
26793 +    unsigned char *lim;
26794 +    unsigned char  name[3];
26795 +    unsigned char  value[256];
26796 +    unsigned char  type;
26797 +    unsigned int   len, len2;
26798 +    register int   i;
26799 +
26800 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26801 +    {
26802 +       if (ebus_read_vpd (dev, vpd, I2C_ELAN_EEPROM_VPD_SIZE) < 0)
26803 +       {
26804 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from EBUS rom\n", dev->dev_instance);
26805 +           return -EINVAL ;
26806 +       }       
26807 +    }
26808 +    else
26809 +    {
26810 +       if (i2c_read_rom (dev, I2C_ELAN_EEPROM_VPD_BASEADDR, I2C_ELAN_EEPROM_VPD_SIZE, vpd) < 0)
26811 +       {
26812 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from I2C rom\n", dev->dev_instance);
26813 +           return  -EINVAL;
26814 +       }
26815 +    }
26816 +
26817 +    result[0] = 0;
26818 +    while (! finished)
26819 +    {
26820 +       type = *ptr++;
26821 +       
26822 +       if (type & LARGE_RESOURCE_BIT)
26823 +       {
26824 +           len = *(ptr++);
26825 +           len += *(ptr++) << 8;
26826 +           
26827 +           switch (type & ~LARGE_RESOURCE_BIT)
26828 +           {
26829 +           case LARGE_RESOURCE_STRING:
26830 +           case LARGE_RESOURCE_VENDOR_DEFINED:
26831 +               ptr += len;
26832 +               break;
26833 +               
26834 +           case LARGE_RESOURCE_VITAL_PRODUCT_DATA:
26835 +               for (lim = ptr + len; ptr < lim; )
26836 +               {
26837 +                   name[0] = *ptr++;
26838 +                   name[1] = *ptr++;
26839 +                   name[2] = '\0';
26840 +                   len2    = *ptr++;
26841 +                   
26842 +                   for (i = 0; i < len2 && ptr < lim; i++)
26843 +                       value[i] = *ptr++;
26844 +                   value[i] = '\0';
26845 +                                   
26846 +                   PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, %s: $s\n", dev->dev_instance, name, value);
26847 +
26848 +                   if (tag != NULL) 
26849 +                   { /* looking for just one tag */
26850 +                       if (!strcmp (name, tag))
26851 +                           strcpy(result, value);
26852 +                   } 
26853 +                   else 
26854 +                   { /* get all tags */
26855 +                       strcat(result,name);
26856 +                       strcat(result,": ");
26857 +                       strcat(result,value);
26858 +                       strcat(result,"\n");
26859 +                   }
26860 +               }
26861 +               break;
26862 +               
26863 +           default:
26864 +               PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown large resource %x\n", dev->dev_instance, type);
26865 +               finished = 1;
26866 +               break;
26867 +           }
26868 +       }
26869 +       else
26870 +       {
26871 +           len = type & 0x7;
26872 +           
26873 +           switch (type >> 3)
26874 +           {
26875 +           case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID:
26876 +               ptr += len;
26877 +               break;
26878 +               
26879 +           case SMALL_RESOURCE_VENDOR_DEFINED:
26880 +               ptr += len;
26881 +               break;
26882 +               
26883 +           case SMALL_RESOURCE_END_TAG:
26884 +               finished = 1;
26885 +               break;
26886 +               
26887 +           default:
26888 +               PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown small resource %x\n", dev->dev_instance, type >> 3);
26889 +               finished = 1;
26890 +               break;
26891 +           }
26892 +       }
26893 +    }
26894 +
26895 +    if ( result[0] == 0 ) {
26896 +       if ( tag != 0 ) 
26897 +           PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find tag %s\n", dev->dev_instance, tag);
26898 +       else
26899 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find any tags\n", dev->dev_instance);
26900 +       return -EINVAL;
26901 +    }
26902 +
26903 +    return (0);
26904 +}
26905 +
26906 +int
26907 +elan4_start_device (ELAN4_DEV *dev)
26908 +{
26909 +    E4_VirtualProcessEntry entry;
26910 +    unsigned              pagesizeval[2];
26911 +    unsigned              hashsizeval[2];
26912 +    register int           i, j, tbl, res;
26913 +    unsigned               attempts = 0;
26914 +    E4_PAGE_SIZE_TABLE;
26915 +    unsigned char          serial[256];
26916 +    unsigned int           sdram_factor = SDRAM_166_DLL_CORRECTION_FACTOR;
26917 +
26918 +    PRINTF (DBG_DEVICE, DBG_ALL, "elan4_start_device: entered\n");
26919 +
26920 +    dev->dev_state = ELAN4_STATE_STARTING;
26921 +
26922 + tryagain:
26923 +    /* Initialise the pci config space */
26924 +    if ((res = elan4_pciinit (dev)) < 0)
26925 +       return (res);
26926 +
26927 +    /* Display the serial number */
26928 +    if (elan4_read_vpd (dev, "SN", serial))
26929 +       printk("elan%d: SN: failed to read\n", dev->dev_instance);
26930 +    else
26931 +       printk("elan%d: SN: %s\n", dev->dev_instance, serial);
26932 +
26933 +    /* initialise the interrupt mask to zero */
26934 +    SET_INT_MASK (dev, 0);
26935 +
26936 +    /* Initialise the device registers */
26937 +    write_reg64 (dev, TlbLineValue, 0);
26938 +    write_reg64 (dev, SysControlReg, 0);
26939 +
26940 +    /* Initialise the SDRAM using the configuration value from the ROM */
26941 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_SETUP);
26942 +
26943 +    /* Setup the linkport registers */
26944 +    write_reg64 (dev, LinkPortLock, elan4_linkport_lock);
26945 +
26946 +    /* Setup the tick rates, start the clock, and init the stats registers */
26947 +    write_ureg32 (dev, ClockTickRate.s.TickRates, ELAN4_CLOCK_TICK_RATE);
26948 +    write_ureg64 (dev, Clock, 0);
26949 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
26950 +    for (i = 0; i < 8; i++)
26951 +       write_ureg32 (dev, StatCounts[i].s.StatsCount, 0);
26952 +
26953 +    /* Initialise the Link Control register - disable the TLB prefetcher on RevB
26954 +     * as it can cause very occasional data corruption. */
26955 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
26956 +       write_reg32 (dev, LinkControlReg, LCONT_EN_SYS_READS | LCONT_REVB_DISABLE_TLB_PREFETCH);
26957 +    else
26958 +       write_reg32 (dev, LinkControlReg, LCONT_EN_SYS_READS);
26959 +
26960 +    /* Initialise the Link Control Settings to set the PLL Reference Value */
26961 +    write_reg32 (dev, LinkContSettings, 
26962 +                (elan4_mod45disable ? LCONT_MOD45_DISABLE : 0) |
26963 +                (3 << LCONT_CONFIG_PHASE_SHIFT) |
26964 +                ((elan4_pll_div & LCONT_PLL_REF_VAL_BITS_MASK) << LCONT_PLL_REF_VAL_BITS_SHIFT) |
26965 +                (LCONT_VOD_360 << LCONT_LVDS_VOLTAGE_BITS_SHIFT) |
26966 +                (LCONT_TERM_AUTO_OHM << LCONT_LVDS_TERMINATION_SHIFT));
26967 +
26968 +    /* Clear the link error LED on RevB and above */
26969 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA)
26970 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError);
26971 +
26972 +    /* Compute the SysClk frequency and update the PLL if necessary */
26973 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA)
26974 +    {
26975 +       int mhz = measure_sysclk (dev);
26976 +
26977 +       if (elan4_pll_cfg != 0 || mhz > 190 || mhz < 170)
26978 +           printk ("elan%d: SysClk running at %d Mhz\n", dev->dev_instance, measure_sysclk (dev));
26979 +       else
26980 +       {
26981 +           sdram_factor = SDRAM_150_DLL_CORRECTION_FACTOR;
26982 +
26983 +           elan4_updatepll (dev, ECTRL_SYS_CLOCK_RATIO_4_3);
26984 +           
26985 +           printk ("elan%d: SysClk now running at %d Mhz\n", dev->dev_instance, measure_sysclk (dev));
26986 +       }
26987 +    }
26988 +       
26989 +    initialise_cache (dev);
26990 +
26991 +    /* Initialise the MMU hash table parameters */
26992 +    /* Select the largest elan pagesize which is spanned by the
26993 +     * system pagesize for mmu table 0*/
26994 +    for (i = 0; i < E4_PAGE_SIZE_TABLE_SIZE; i++)
26995 +       if (PageSizeTable[i] > PAGE_SHIFT)
26996 +           break;
26997 +
26998 +    pagesizeval[0] = i - 1;
26999 +    hashsizeval[0] = elan4_hash_0_size_val;
27000 +       
27001 +    /* Select a suitable elan pagesize to match any "large" page
27002 +     * support that the OS provides. */
27003 +    pagesizeval[1] = PAGE_SIZE_4M;
27004 +    hashsizeval[1] = elan4_hash_1_size_val;
27005 +
27006 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27007 +    {
27008 +       dev->dev_pagesizeval[tbl]   = pagesizeval[tbl];
27009 +       dev->dev_pageshift[tbl]     = PageSizeTable[pagesizeval[tbl]];
27010 +       dev->dev_hashsize[tbl]      = (1 << hashsizeval[tbl])/sizeof (E4_HashTableEntry);
27011 +       dev->dev_rsvd_hashmask[tbl] = ((1 << (27 - dev->dev_pageshift[tbl]))-1) & ~((1 << hashsizeval[tbl])-1);
27012 +       dev->dev_rsvd_hashval[tbl]  = 0xFFFFFFFF;
27013 +    }
27014 +
27015 +    PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: pageshifts %d,%d\n", dev->dev_pageshift[0], 
27016 +            NUM_HASH_TABLES == 2 ? dev->dev_pageshift[1] : 0);
27017 +
27018 +    /* Initialise the control register to the desired value */
27019 +    dev->dev_syscontrol = (CONT_EN_ALL_SETS | CONT_MMU_ENABLE | CONT_CACHE_ALL | CONT_2K_NOT_1K_DMA_PACKETS |
27020 +                          (pagesizeval[0] << CONT_TABLE0_PAGE_SIZE_SHIFT) | (hashsizeval[0] << CONT_TABLE0_MASK_SIZE_SHIFT));
27021 +
27022 +    if (NUM_HASH_TABLES == 2)
27023 +       dev->dev_syscontrol |= CONT_TWO_HASH_TABLES | (pagesizeval[1] << CONT_TABLE1_PAGE_SIZE_SHIFT) | (hashsizeval[1] << CONT_TABLE1_MASK_SIZE_SHIFT);
27024 +
27025 +    write_reg64 (dev, SysControlReg, dev->dev_syscontrol);
27026 +
27027 +    /* use direct mapped pci writes during sdram initialisation, since for 
27028 +     * cache flushing to work, we need to ensure that the cacheflush page
27029 +     * never gets lines into the incorrect cache set. */
27030 +    SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
27031 +
27032 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
27033 +       elan4_sdram_setup_delay_lines(dev, sdram_factor);
27034 +
27035 +    for (i = res = 0; i < dev->dev_sdram_numbanks; i++)
27036 +       if (dev->dev_sdram_banks[i].b_size)
27037 +           res |= elan4_sdram_init_bank (dev, &dev->dev_sdram_banks[i]);
27038 +
27039 +    if (! res)
27040 +    {
27041 +       if (dev->dev_devinfo.dev_device_id == PCI_REVISION_ID_ELAN4_REVB && ++attempts < 5)
27042 +       {
27043 +           printk ("elan%d: sdram not working, resetting\n", dev->dev_instance);
27044 +           goto tryagain;
27045 +       }
27046 +
27047 +       printk ("elan%d: could not find any sdram banks\n", dev->dev_instance);
27048 +       goto failed;
27049 +    }
27050 +
27051 +#ifndef CONFIG_MPSAS
27052 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialising for ECC\n");
27053 +
27054 +    for (i = 0 ; i < dev->dev_sdram_numbanks; i++)
27055 +       if (dev->dev_sdram_banks[i].b_ioaddr)
27056 +           initialise_ecc (dev, &dev->dev_sdram_banks[i]);
27057 +#endif
27058 +
27059 +    dev->dev_sdram_initial_ecc_val = read_reg64 (dev, SDRamECCStatus);
27060 +
27061 +    /* Now enable ECC after we've scrubbed the memory */
27062 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_ENABLE_ECC);
27063 +
27064 +    /* clear any errors, and flush the tlb/route cache */
27065 +    PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH | CONT_ROUTE_FLUSH | CONT_CLEAR_LINKPORT_INT | CONT_CLEAR_SDRAM_ERROR);
27066 +
27067 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
27068 +
27069 +    /* Initialise the thread processor's register file */
27070 +    for (i = 0; i < 64; i++)
27071 +       write_reg64 (dev, TProcRegs[i], 0);
27072 +
27073 +    /* Initialise the thread processor's ICache tags */
27074 +    for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++)
27075 +    {
27076 +        write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift);
27077 +        for (j = 0; j < E4_ICachePortSize; j++)
27078 +           write_reg64 (dev, ICachePort[j], E4_InvalidTagValue);
27079 +    }
27080 +
27081 +    /*
27082 +     * Initialise the ICache with a sethi %hi(addr << 7), %r0
27083 +     * writing 8 64 bit values per loop of sethi %g0 values ending in 77 for something different??
27084 +     */
27085 +    for (i = 0; i < E4_ICacheSizeInBytes; i += (E4_ICachePortSize << 3))
27086 +    {
27087 +       write_reg64 (dev, ICachePort_Cntl_Addr, E4_AccessICacheRams | (i >> 3));
27088 +
27089 +       for (j = 0; j < E4_ICachePortSize; j++)
27090 +           write_reg64 (dev, ICachePort[j], 
27091 +                        (E4_uint64) (((E4_uint64)i << (4+7))    + ((E4_uint64)j << (1+7))    + (0x077)) |
27092 +                        (E4_uint64) (((E4_uint64)i << (4+7+32)) + ((E4_uint64)j << (1+7+32)) + (0x0e7)) << 32);
27093 +    }
27094 +
27095 +    /*
27096 +     * Initialise the top of the ICache Set0 with a instruction which will
27097 +     * cause a know trap fingerprint so that the application can identify it
27098 +     * and ignore the trap.
27099 +     */
27100 +    write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams);
27101 +    for (i = 0; i < E4_ICachePortSize; i++)
27102 +       write_reg64 (dev, ICachePort[i], E4_ICacheFixupInsn | (E4_ICacheFixupInsn << 32));
27103 +
27104 +    /* create the buddy allocator for SDRAM */
27105 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
27106 +       if (dev->dev_sdram_banks[i].b_ioaddr)
27107 +           elan4_sdram_add_bank (dev, &dev->dev_sdram_banks[i]);
27108 +
27109 +    dev->dev_ctxtableshift        = elan4_ctxt_table_shift;
27110 +    dev->dev_cqcount              = (1 << elan4_ln2_max_cqs);
27111 +    dev->dev_cqreorder            = 0;
27112 +
27113 +    /* allocate the sdram for cache flushing whilst still in direct mapped mode */
27114 +    dev->dev_cacheflush_space = elan4_sdram_alloc (dev, E4_CacheSize);
27115 +
27116 +    /* and longer need direct mapped pci writes */
27117 +    CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
27118 +
27119 +    /* allocate the hash tables, command queues, context tables etc */
27120 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: allocating hash tables, command queueus, context tables\n");
27121 +
27122 +    dev->dev_comqlowpri       = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS));
27123 +    dev->dev_comqhighpri      = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS));
27124 +    dev->dev_cqaddr           = elan4_sdram_alloc (dev, sizeof (E4_CommandQueueDesc) * dev->dev_cqcount);
27125 +    dev->dev_dmaqhighpri      = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_highpri_size));
27126 +    dev->dev_dmaqlowpri       = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_lowpri_size));
27127 +    dev->dev_threadqhighpri   = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_highpri_size));
27128 +    dev->dev_threadqlowpri    = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_lowpri_size));
27129 +    dev->dev_interruptq       = elan4_sdram_alloc (dev, E4_QueueSize(elan4_interruptq_size));
27130 +
27131 +    dev->dev_ctxtable         = elan4_sdram_alloc (dev, (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock));
27132 +    dev->dev_faultarea        = elan4_sdram_alloc (dev, CUN_Entries * sizeof (E4_FaultSave));
27133 +    dev->dev_inputtraparea    = elan4_sdram_alloc (dev, sizeof (E4_IprocTrapState));
27134 +
27135 +    dev->dev_sdrampages[0]    = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
27136 +    dev->dev_sdrampages[1]    = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
27137 +
27138 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27139 +    {
27140 +       dev->dev_hashtable[tbl] = elan4_sdram_alloc (dev, dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
27141 +#ifndef CONFIG_MPSAS
27142 +       /* Initialise hash tables to invalid (zero) */
27143 +       elan4_sdram_zeroq_sdram (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
27144 +#endif
27145 +    }
27146 +
27147 +    /* Initialise all context filters to discard */
27148 +#ifdef CONFIG_MPSAS
27149 +    if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, dev->dev_ctxtable, 
27150 +                       E4_FILTER_DISCARD_ALL, (1 << (dev->dev_ctxtableshift-1))) < 0)
27151 +    {
27152 +       for (i = 0; i < (1 << dev->dev_ctxtableshift); i++)
27153 +           elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL);
27154 +    }
27155 +#else
27156 +    for (i = 0; i < (1 << dev->dev_ctxtableshift); i++)
27157 +       elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL);
27158 +#endif
27159 +
27160 +    PRINTF4 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: hashtables %x,%x, %x,%x\n", dev->dev_hashtable[0], 
27161 +           dev->dev_hashsize[0], dev->dev_hashtable[1], dev->dev_hashsize[1]);
27162 +
27163 +    /* install the hash table pointers */
27164 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialise registers with table addresses\n");
27165 +    write_reg64 (dev, MmuTableBasePtrs, (((E4_uint64) dev->dev_hashtable[0]) | ((E4_uint64) dev->dev_hashtable[1]) << 32));
27166 +    write_reg64 (dev, MmuFaultAndRootCntxPtr, (((E4_uint64) dev->dev_ctxtableshift) | 
27167 +                                              ((E4_uint64) dev->dev_ctxtable) |
27168 +                                              ((E4_uint64) dev->dev_faultarea) << 32));
27169 +    write_reg64 (dev, InputTrapAndFilter, (((E4_uint64) dev->dev_ctxtableshift) | 
27170 +                                          ((E4_uint64) dev->dev_ctxtable) |
27171 +                                          ((E4_uint64) dev->dev_inputtraparea) << 32));
27172 +    /*
27173 +     * The run ptrs have this format: (Front << 32) | Back
27174 +     * The base for both the front and back is uses the high bits of the back pointer.
27175 +     * So writting just the base value is good enough.
27176 +     */
27177 +    write_reg64 (dev, CommandLowPriRunPtrs,  dev->dev_comqlowpri);
27178 +    write_reg64 (dev, CommandHighPriRunPtrs, dev->dev_comqhighpri);
27179 +
27180 +    /* Initialise the run queues */
27181 +    write_reg64 (dev, DProcHighPriPtrs,       E4_QueueValue (dev->dev_dmaqhighpri,    elan4_dmaq_highpri_size));
27182 +    write_reg64 (dev, DProcLowPriPtrs,        E4_QueueValue (dev->dev_dmaqlowpri,     elan4_dmaq_lowpri_size));
27183 +    write_reg64 (dev, TProcHighPriPtrs,       E4_QueueValue (dev->dev_threadqhighpri, elan4_threadq_highpri_size));
27184 +    write_reg64 (dev, TProcLowPriPtrs,        E4_QueueValue (dev->dev_threadqlowpri,  elan4_threadq_lowpri_size));
27185 +
27186 +    /* Initialise the interrupt queue as "empty" - this is actually with one entry on it */
27187 +    write_reg64 (dev, MainIntQueuePtrs.Value, (((E4_uint64) E4_QueueFrontValue (dev->dev_interruptq, elan4_interruptq_size) << 32) |
27188 +                                              ((E4_uint64) E4_QueueBackPointer(dev->dev_interruptq + E4_MainIntEntrySize))));
27189 +    
27190 +    dev->dev_interruptq_nfptr = dev->dev_interruptq + E4_MainIntEntrySize;
27191 +
27192 +    /*
27193 +     * Flush the context filter before dropping the Discard all bits in the schedule status register.
27194 +     * Also hit the SCH_RestartTProc to clear out X's from the trap state and
27195 +     * hit the SCH_RestartDmaPrefetchProc to clear out X's from the prev register.
27196 +     */
27197 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush | SCH_RestartTProc | SCH_RestartDmaPrefetchProc);
27198 +
27199 +    /* setup the schedule status register. */
27200 +    SET_SCHED_STATUS (dev, SCH_CProcTimeout6p2us | SCH_DProcTimeslice512us);
27201 +
27202 +    /*
27203 +     * Now initialise the inserter cache.s
27204 +     * Bit 31 of the first word of the descriptor is a valid bit. This must be cleared.
27205 +     * Bit 31 becomes a used bit in the descriptors in memory.
27206 +     */
27207 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
27208 +    {
27209 +       write_reg32 (dev, CommandControl.CommandQueueDescsBase, i);     /* select a cache line */
27210 +       write_reg64 (dev, CommandCacheTestPort, 0);                     /* Mark it invalid */
27211 +    }
27212 +    
27213 +    /* Setup the pointer to the command descriptors */
27214 +    /*   the table must be aligned on a CQ_CommandDescsAlignement boundary */
27215 +    /*   since we've allocated a small table - we work out the offset of the */
27216 +    /*   first entry in our table for mapping in the command ports later */
27217 +    dev->dev_cqoffset = (dev->dev_cqaddr & (CQ_CommandDescsAlignment-1)) / sizeof (E4_CommandQueueDesc);
27218 +
27219 +    write_reg32 (dev, CommandControl.CommandQueueDescsBase, (dev->dev_cqaddr & ~(CQ_CommandDescsAlignment-1)) | COM_ENABLE_DEQUEUE);
27220 +
27221 +    /* allocate the bitmaps for cq,ctxt allocation */
27222 +    KMEM_ZALLOC (dev->dev_cqamap, bitmap_t *, BT_BITOUL(dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t), 1);
27223 +    KMEM_ZALLOC (dev->dev_ctxmap, bitmap_t *, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t), 1);
27224 +
27225 +    if (dev->dev_cqamap == NULL || dev->dev_ctxmap == NULL)
27226 +       goto failed;
27227 +
27228 +    /* Make every fourth context be invalid for ICache fixup.
27229 +     * context 0 is also invalid - since it is used to indicate 
27230 +     * an invalid tag. */
27231 +    for (i = 0; i < (1 << dev->dev_ctxtableshift); i += 4)
27232 +       BT_SET (dev->dev_ctxmap, i);
27233 +    
27234 +    /* initialise the halt operations */
27235 +    dev->dev_haltop_mask   = 0;
27236 +    dev->dev_haltop_active = 0;
27237 +
27238 +    /* allocate the hash table shadow structures - and place all blocks on the free lists */
27239 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27240 +    {
27241 +       KMEM_ZALLOC (dev->dev_mmuhash[tbl], ELAN4_HASH_ENTRY *,  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY), 1);
27242 +       KMEM_ZALLOC (dev->dev_mmufree[tbl], ELAN4_HASH_ENTRY **, dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *), 1);
27243 +
27244 +       if (dev->dev_mmuhash[tbl] == NULL || dev->dev_mmufree[tbl] == NULL)
27245 +           goto failed;
27246 +
27247 +       for (i = 0; i < dev->dev_hashsize[tbl]; i++)
27248 +       {
27249 +           dev->dev_mmuhash[tbl][i].he_entry = dev->dev_hashtable[tbl] + (i * sizeof (E4_HashTableEntry));
27250 +           dev->dev_mmufree[tbl][i]          = &dev->dev_mmuhash[tbl][i];
27251 +       }
27252 +    }
27253 +
27254 +    /* setup the interrupt mask register */
27255 +    SET_INT_MASK (dev, (INT_MSI0 | INT_MSI1 | INT_MSI2 | INT_MSI3) & ~(INT_Discarding | INT_Halted | INT_LinkPortKeyFail));
27256 +
27257 +    /* start a thread to handle excessive main interrupts */
27258 +    if (kernel_thread_create (elan4_mainint_thread, (caddr_t) dev) == NULL)
27259 +       goto failed;
27260 +    dev->dev_mainint_started = 1;
27261 +    
27262 +    /* install the device context - and allocate the first 16 command queues */
27263 +    if (elan4_insertctxt (dev, &dev->dev_ctxt, &device_trap_ops) != 0)
27264 +       goto failed;
27265 +
27266 +    /* Allocate command queues, one for each entry in the inserter cache, 
27267 +     * we'll use these queues to flush the insert cache */
27268 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
27269 +    {
27270 +       if ((dev->dev_flush_cq[i] = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit | CQ_InterruptEnableBit, 
27271 +                                                  CQ_Priority)) == NULL)
27272 +           goto failed;
27273 +       
27274 +       ASSERT (elan4_cq2num(dev->dev_flush_cq[i]) == i);
27275 +
27276 +       dev->dev_flush_finished |= (1 << i);
27277 +    }
27278 +
27279 +    /* Allocate command queues for dma halt operations */
27280 +    if ((dev->dev_dma_flushop[0].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit, 0)) == NULL ||
27281 +       (dev->dev_dma_flushop[1].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit, CQ_Priority)) == NULL)
27282 +       goto failed;
27283 +
27284 +#ifdef CONFIG_MPSAS
27285 +    elan4_sdram_flushcache (dev, 0, E4_CacheSize);
27286 +#endif
27287 +
27288 +    /* initialise halt operation for flushing the icache */
27289 +    dev->dev_iflush_haltop.op_function = device_iflush_haltop;
27290 +    dev->dev_iflush_haltop.op_arg      = dev;
27291 +    dev->dev_iflush_haltop.op_mask     = INT_TProcHalted;
27292 +
27293 +    /* Allocate a route table, and create a valid route for vp==0, this is used
27294 +     * when a DMA is removed from the dma run queue */
27295 +    if ((dev->dev_routetable = elan4_alloc_routetable (dev, 0)) == NULL)
27296 +       goto failed;
27297 +
27298 +    elan4_set_routetable (&dev->dev_ctxt, dev->dev_routetable);
27299 +
27300 +    entry.Values[0] = FIRST_MYLINK;
27301 +    entry.Values[1] = 0;
27302 +
27303 +    elan4_write_route (dev, dev->dev_routetable, 0, &entry);
27304 +
27305 +    /* map the sdram pages into the elan */
27306 +    dev->dev_tproc_suspend = DEVICE_TPROC_SUSPEND_ADDR;
27307 +    dev->dev_tproc_space   = DEVICE_TPROC_SPACE_ADDR;
27308 +
27309 +    elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, (dev->dev_sdrampages[0] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocExecute));
27310 +    elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_space,   (dev->dev_sdrampages[1] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocDataWrite));
27311 +
27312 +    /* and store the thread suspend sequence in it for use when a thread is removed from the run queue */
27313 +    elan4_sdram_writel (dev, dev->dev_sdrampages[0], DEVICE_TPROC_SUSPEND_INSTR);
27314 +
27315 +#ifdef CONFIG_MPSAS
27316 +    do_initdma (dev);
27317 +#endif
27318 +    
27319 +    if (!elan4_neterr_init (dev))
27320 +       goto failed;
27321 +
27322 +    elan4_configure_writecombining (dev);
27323 +
27324 +    /* finally register the device with elanmod for rms */
27325 +    dev->dev_idx = elan_dev_register (&dev->dev_devinfo, &elan4_dev_ops, (void *) dev);
27326 +
27327 +    dev->dev_state = ELAN4_STATE_STARTED;
27328 +
27329 +    return (0);
27330 +
27331 + failed:
27332 +    printk ("elan%d: failed to start elan4 device - stopping\n", dev->dev_instance);
27333 +
27334 +    elan4_stop_device (dev);
27335 +    return (-ENOMEM);
27336 +}
27337 +
27338 +void
27339 +elan4_stop_device (ELAN4_DEV *dev)
27340 +{
27341 +    unsigned long flags;
27342 +    int i, tbl;
27343 +
27344 +    dev->dev_state = ELAN4_STATE_STOPPING;
27345 +
27346 +    elan_dev_deregister (&dev->dev_devinfo);
27347 +
27348 +    elan4_unconfigure_writecombining (dev);
27349 +
27350 +    elan4_neterr_destroy (dev);
27351 +
27352 +    if (dev->dev_tproc_suspend)
27353 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, 1 << dev->dev_pageshift[0]);
27354 +
27355 +    if (dev->dev_tproc_space)
27356 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_space,   1 << dev->dev_pageshift[0]);
27357 +
27358 +    if (dev->dev_routetable)
27359 +    {
27360 +       elan4_set_routetable (&dev->dev_ctxt, NULL);
27361 +       elan4_free_routetable (dev, dev->dev_routetable);
27362 +    }
27363 +
27364 +    for (i = 0; i < 2; i++)
27365 +       if (dev->dev_dma_flushop[i].cq)
27366 +           elan4_freecq (&dev->dev_ctxt, dev->dev_dma_flushop[i].cq);
27367 +
27368 +    /* free of the device context - and insert cache flushing command queues */
27369 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
27370 +       if (dev->dev_flush_cq[i])
27371 +           elan4_freecq (&dev->dev_ctxt, dev->dev_flush_cq[i]);
27372 +
27373 +    if (dev->dev_ctxt.ctxt_dev)
27374 +       elan4_removectxt (dev, &dev->dev_ctxt);
27375 +
27376 +    /* stop the mainint thread */
27377 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
27378 +    dev->dev_stop_threads = 1;
27379 +
27380 +    while (dev->dev_mainint_started && !dev->dev_mainint_stopped)
27381 +    {
27382 +       kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
27383 +       kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags);
27384 +    }
27385 +    dev->dev_mainint_started = dev->dev_mainint_stopped = 0;
27386 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
27387 +
27388 +    /* cancel any error interrupt timeouts */
27389 +    if (timer_fn_queued (&dev->dev_error_timeoutid))
27390 +       cancel_timer_fn (&dev->dev_error_timeoutid);
27391 +
27392 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && timer_fn_queued (&dev->dev_linkerr_timeoutid))
27393 +       cancel_timer_fn (&dev->dev_linkerr_timeoutid);
27394 +    
27395 +    /* reset the interrupt mask register to zero */
27396 +    if (dev->dev_regs)
27397 +       SET_INT_MASK (dev, 0);
27398 +
27399 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27400 +    {
27401 +       if (dev->dev_mmuhash[tbl])
27402 +           KMEM_FREE (dev->dev_mmuhash[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY));
27403 +       if (dev->dev_mmufree[tbl])
27404 +           KMEM_FREE (dev->dev_mmufree[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *));
27405 +       if (dev->dev_hashtable[tbl])
27406 +           elan4_sdram_free (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
27407 +    }
27408 +
27409 +    if (dev->dev_cqamap)
27410 +       KMEM_FREE (dev->dev_cqamap, BT_BITOUL (dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t));
27411 +    if (dev->dev_ctxmap)
27412 +       KMEM_FREE (dev->dev_ctxmap, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t));
27413 +
27414 +    if (dev->dev_comqlowpri)
27415 +       elan4_sdram_free (dev, dev->dev_comqlowpri,     (1 << COMMAND_RUN_QUEUE_BITS));
27416 +    if (dev->dev_comqhighpri)
27417 +       elan4_sdram_free (dev, dev->dev_comqhighpri,    (1 << COMMAND_RUN_QUEUE_BITS));
27418 +    if (dev->dev_cqaddr)
27419 +       elan4_sdram_free (dev, dev->dev_cqaddr,         sizeof (E4_CommandQueueDesc) * dev->dev_cqcount);
27420 +    if (dev->dev_dmaqhighpri)
27421 +       elan4_sdram_free (dev, dev->dev_dmaqhighpri,    E4_QueueSize(elan4_dmaq_highpri_size));
27422 +    if (dev->dev_dmaqlowpri)
27423 +       elan4_sdram_free (dev, dev->dev_dmaqlowpri,     E4_QueueSize(elan4_dmaq_lowpri_size));
27424 +    if (dev->dev_threadqhighpri)
27425 +       elan4_sdram_free (dev, dev->dev_threadqhighpri, E4_QueueSize(elan4_threadq_highpri_size));
27426 +    if (dev->dev_threadqlowpri)
27427 +       elan4_sdram_free (dev, dev->dev_threadqlowpri,  E4_QueueSize(elan4_threadq_lowpri_size));
27428 +    if (dev->dev_interruptq)
27429 +       elan4_sdram_free (dev, dev->dev_interruptq,     E4_QueueSize(elan4_interruptq_size));
27430 +    
27431 +    if (dev->dev_ctxtable)
27432 +       elan4_sdram_free (dev, dev->dev_ctxtable,       (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock));
27433 +    if (dev->dev_faultarea)
27434 +       elan4_sdram_free (dev, dev->dev_faultarea,      CUN_Entries * sizeof (E4_FaultSave));
27435 +    if (dev->dev_inputtraparea)
27436 +       elan4_sdram_free (dev, dev->dev_inputtraparea,  sizeof (E4_IprocTrapState));
27437 +
27438 +    if (dev->dev_sdrampages[0])
27439 +       elan4_sdram_free (dev, dev->dev_sdrampages[0],  SDRAM_PAGE_SIZE);
27440 +    if (dev->dev_sdrampages[1])
27441 +       elan4_sdram_free (dev, dev->dev_sdrampages[1],  SDRAM_PAGE_SIZE);
27442 +
27443 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
27444 +       if (dev->dev_sdram_banks[i].b_ioaddr)
27445 +               elan4_sdram_fini_bank (dev, &dev->dev_sdram_banks[i]);
27446 +
27447 +    elan4_pcifini (dev);
27448 +
27449 +    dev->dev_state = ELAN4_STATE_STOPPED;
27450 +
27451 +    if (dev->dev_ack_errors)
27452 +        kfree(dev->dev_ack_errors);
27453 +    if (dev->dev_dproc_timeout)
27454 +        kfree(dev->dev_dproc_timeout);
27455 +    if (dev->dev_cproc_timeout)
27456 +        kfree(dev->dev_cproc_timeout);
27457 +}
27458 +
27459 +static __inline__ int
27460 +compute_arity (int lvl, unsigned n, char *arity)
27461 +{
27462 +    if (arity[lvl] == 0)
27463 +    {
27464 +       if (n <= 8)
27465 +           arity[lvl] = n;
27466 +       else
27467 +           arity[lvl] = 4;
27468 +    }
27469 +
27470 +    return (arity[lvl]);
27471 +}
27472 +
27473 +int
27474 +elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned arityval)
27475 +{
27476 +    int i, lvl, n;
27477 +    char arity[ELAN_MAX_LEVELS];
27478 +
27479 +    if (nodeid >= numnodes)
27480 +       return -EINVAL;
27481 +
27482 +    for (i = 0; i < ELAN_MAX_LEVELS; i++, arityval >>= 4)
27483 +       arity[i] = arityval & 7;
27484 +    
27485 +    for (lvl = 0, n = numnodes; n > compute_arity(lvl, n, arity) && lvl < ELAN_MAX_LEVELS; lvl++)
27486 +    {
27487 +       if ((n % arity[lvl]) != 0)
27488 +           return -EINVAL;
27489 +       
27490 +       n /= arity[lvl];
27491 +    }
27492 +
27493 +    if (arity[lvl] != n)
27494 +       return -EINVAL;
27495 +
27496 +    for (i = 0; i <= lvl; i++)
27497 +       pos->pos_arity[i] = arity[lvl - i];
27498 +
27499 +    pos->pos_nodes  = numnodes;
27500 +    pos->pos_levels = lvl + 1;
27501 +    pos->pos_nodeid = nodeid;
27502 +    pos->pos_mode   = ELAN_POS_MODE_SWITCHED;
27503 +
27504 +    return 0;
27505 +}
27506 +
27507 +int
27508 +elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos)
27509 +{
27510 +    kmutex_lock (&dev->dev_lock);
27511 +    *pos = dev->dev_position;
27512 +    kmutex_unlock (&dev->dev_lock);
27513 +
27514 +    return (pos->pos_mode);
27515 +}
27516 +
27517 +int
27518 +elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos)
27519 +{
27520 +    int forceLocal = 0;
27521 +    int nnodes, i;
27522 +    unsigned int *ack_errors;
27523 +    unsigned int *dproc_timeout;
27524 +    unsigned int *cproc_timeout;
27525 +
27526 +    switch (pos->pos_mode)
27527 +    {
27528 +    case ELAN_POS_UNKNOWN:
27529 +       break;
27530 +       
27531 +    case ELAN_POS_MODE_SWITCHED:
27532 +       if (pos->pos_levels > ELAN_MAX_LEVELS)
27533 +           return (-EINVAL);
27534 +       
27535 +       for (i = 0, nnodes = 1; i < pos->pos_levels; i++)
27536 +       {
27537 +
27538 +           if (pos->pos_arity[i] <= 0 || (i == 0 ? pos->pos_arity[i] > 8 : pos->pos_arity[i] >= 8))  /* allow an 8 way top-switch */
27539 +               return (-EINVAL);
27540 +           
27541 +           nnodes *= pos->pos_arity[i];
27542 +       }
27543 +
27544 +       if (pos->pos_nodes > nnodes || pos->pos_nodeid >= pos->pos_nodes)
27545 +           return (-EINVAL);
27546 +       break;
27547 +       
27548 +    case ELAN_POS_MODE_LOOPBACK:
27549 +       if (pos->pos_levels != 1 || pos->pos_nodes != 1 || pos->pos_nodeid != 0 || pos->pos_arity[0] != 1)
27550 +           return (-EINVAL);
27551 +
27552 +       forceLocal = 1;
27553 +       break;
27554 +
27555 +    case ELAN_POS_MODE_BACKTOBACK:
27556 +       if (pos->pos_levels != 1 || pos->pos_nodes != 2 || pos->pos_nodeid >= 2 || pos->pos_arity[0] != 2)
27557 +           return (-EINVAL);
27558 +
27559 +       forceLocal = (pos->pos_nodeid == 0);
27560 +       break;
27561 +
27562 +    default:
27563 +       return (-EINVAL);
27564 +    }
27565 +
27566 +    ack_errors = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
27567 +    if (!ack_errors)
27568 +       return (-EINVAL);
27569 +    memset(ack_errors, 0, pos->pos_nodes * sizeof(unsigned int));
27570 +    dproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
27571 +    if (!dproc_timeout) 
27572 +    {
27573 +        kfree(ack_errors);
27574 +        return (-EINVAL);
27575 +    }
27576 +    memset(dproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int));
27577 +    cproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
27578 +    if (!cproc_timeout)
27579 +    {
27580 +        kfree(ack_errors);
27581 +        kfree(dproc_timeout);
27582 +        return (-EINVAL);
27583 +    }
27584 +    memset(cproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int));
27585 +       
27586 +    kmutex_lock (&dev->dev_lock);
27587 +    dev->dev_position = *pos;
27588 +    dev->dev_ack_errors = ack_errors;
27589 +    dev->dev_dproc_timeout = dproc_timeout;
27590 +    dev->dev_cproc_timeout = cproc_timeout;
27591 +
27592 +    if (forceLocal)
27593 +       write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) | LCONT_FORCE_COMMSCLK_LOCAL);
27594 +    else
27595 +       write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) & ~LCONT_FORCE_COMMSCLK_LOCAL);
27596 +
27597 +    pioflush_reg (dev);
27598 +    kmutex_unlock (&dev->dev_lock);
27599 +
27600 +    return (0);
27601 +}
27602 +
27603 +void
27604 +elan4_get_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask)
27605 +{
27606 +    kmutex_lock (&dev->dev_lock);
27607 +
27608 +    *mask = dev->dev_devinfo.dev_params_mask;
27609 +    memcpy (params, &dev->dev_devinfo.dev_params, sizeof (ELAN_PARAMS));
27610 +    
27611 +    kmutex_unlock (&dev->dev_lock);
27612 +}
27613 +
27614 +void
27615 +elan4_set_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask)
27616 +{      
27617 +    int i;
27618 +
27619 +    kmutex_lock (&dev->dev_lock);
27620 +    for (i = 0; i < ELAN4_PARAM_COUNT; i++)
27621 +       if (mask & (1 << i))
27622 +           dev->dev_devinfo.dev_params.values[i] = params->values[i];
27623 +    
27624 +    dev->dev_devinfo.dev_params_mask |= mask;
27625 +    kmutex_unlock (&dev->dev_lock);
27626 +}
27627 +
27628 +
27629 +EXPORT_SYMBOL(elan4_get_position);
27630 +EXPORT_SYMBOL(elan4_set_position);
27631 +
27632 +EXPORT_SYMBOL(elan4_queue_haltop);
27633 +EXPORT_SYMBOL(elan4_queue_dma_flushop);
27634 +EXPORT_SYMBOL(elan4_queue_mainintop);
27635 +
27636 +EXPORT_SYMBOL(elan4_insertctxt);
27637 +EXPORT_SYMBOL(elan4_removectxt);
27638 +
27639 +EXPORT_SYMBOL(elan4_attach_filter);
27640 +EXPORT_SYMBOL(elan4_detach_filter);
27641 +EXPORT_SYMBOL(elan4_set_filter);
27642 +EXPORT_SYMBOL(elan4_set_routetable);
27643 +
27644 +EXPORT_SYMBOL(elan4_alloccq);
27645 +EXPORT_SYMBOL(elan4_freecq);
27646 +EXPORT_SYMBOL(elan4_restartcq);
27647 +
27648 +EXPORT_SYMBOL(elan4_flush_icache);
27649 +
27650 +/*
27651 + * Local variables:
27652 + * c-file-style: "stroustrup"
27653 + * End:
27654 + */
27655 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/device_Linux.c
27656 ===================================================================
27657 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/device_Linux.c       2004-02-23 16:02:56.000000000 -0500
27658 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/device_Linux.c    2005-07-28 14:52:52.828681776 -0400
27659 @@ -0,0 +1,2760 @@
27660 +/*
27661 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
27662 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
27663 + * 
27664 + *    For licensing information please see the supplied COPYING file
27665 + *
27666 + */
27667 +
27668 +#ident "@(#)$Id: device_Linux.c,v 1.74.6.20 2005/03/10 11:30:01 david Exp $"
27669 +/*      $Source: /cvs/master/quadrics/elan4mod/device_Linux.c,v $*/
27670 +
27671 +#include <qsnet/kernel.h>
27672 +#include <qsnet/kthread.h>
27673 +#include <qsnet/kpte.h>
27674 +
27675 +#include <asm/io.h>
27676 +#include <asm/irq.h>
27677 +#ifdef CONFIG_MTRR
27678 +#include <asm/mtrr.h>
27679 +#endif
27680 +
27681 +#include <linux/init.h>
27682 +#include <linux/pci.h>
27683 +#include <linux/module.h>
27684 +#include <linux/reboot.h>
27685 +#include <linux/notifier.h>
27686 +
27687 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
27688 +#include <linux/wrapper.h>
27689 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
27690 +typedef void irqreturn_t;
27691 +#endif
27692 +#       define IRQ_NONE
27693 +#       define IRQ_HANDLED
27694 +#endif
27695 +
27696 +#include <elan4/debug.h>
27697 +#include <elan4/device.h>
27698 +#include <elan4/user.h>
27699 +#include <elan4/ioctl.h>
27700 +#include <elan4/intcookie.h>
27701 +
27702 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
27703 +#error please use a 2.4.0 series kernel or newer
27704 +#endif
27705 +
27706 +
27707 +#if defined(LINUX_SPARC) || defined(LINUX_PPC64)
27708 +#define __io_remap_page_range(from,offset,size,prot)   remap_page_range(from,offset,size,prot)
27709 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
27710 +#elif defined(NO_RMAP)
27711 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(from,offset,size,prot)
27712 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
27713 +#else
27714 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(vma,from,offset,size,prot)
27715 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(vma,from,offset,size,prot)
27716 +#endif
27717 +
27718 +static unsigned int pat_pteval = -1;
27719 +
27720 +#ifndef pgprot_noncached
27721 +static inline pgprot_t pgprot_noncached(pgprot_t _prot)
27722 +{
27723 +       unsigned long prot = pgprot_val(_prot);
27724 +#if defined(__powerpc__)
27725 +       prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
27726 +#elif defined(__sparc__)
27727 +       prot &= ~(_PAGE_CACHE);
27728 +       prot |= _PAGE_IE;
27729 +#endif
27730 +
27731 +       return __pgprot(prot);
27732 +}
27733 +#endif
27734 +
27735 +#ifndef pgprot_writecombine
27736 +static inline pgprot_t pgprot_writecombine (pgprot_t _prot)
27737 +{
27738 +    unsigned long prot = pgprot_val(_prot);
27739 +
27740 +    if (pat_pteval != -1)
27741 +       prot = (prot & ~(_PAGE_PCD | _PAGE_PWT | _PAGE_PSE)) | pat_pteval;
27742 +
27743 +    return __pgprot (prot);
27744 +}
27745 +#endif
27746 +
27747 +#define ELAN4_DRIVER_VERSION           0x103           /* 16 bit value */
27748 +
27749 +/*
27750 + * Function prototypes.
27751 + */
27752 +static int        elan4_attach_device (int instance, struct pci_dev *pdev);
27753 +static void       elan4_detach_device (ELAN4_DEV *dev);
27754 +
27755 +static int        elan4_open (struct inode *inode, struct file *file);
27756 +static int        elan4_release(struct inode *inode, struct file *file);
27757 +static int        elan4_ioctl (struct inode *inode, struct file *file, 
27758 +                               unsigned int cmd, unsigned long arg);
27759 +static int        elan4_mmap (struct file *file, struct vm_area_struct *vm_area);
27760 +
27761 +static irqreturn_t elan4_irq (int irq, void *arg, struct pt_regs *regs);
27762 +
27763 +static void        elan4_shutdown_devices(int panicing);
27764 +
27765 +static int      disabled;                                      /* bitmask of which devices not to start */
27766 +unsigned int   elan4_pll_cfg      = 0;
27767 +int            elan4_pll_div      = 31;                        /* RevC PCB */
27768 +int            elan4_mod45disable = 0;
27769 +static int      optimise_pci_bus   = 1;                                /* 0 => don't, 1 => if ok, 2 => always */
27770 +static int      default_features   = 0;                                /* default values for dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] */
27771 +
27772 +long long       sdram_cfg = SDRAM_STARTUP_VALUE;
27773 +static int      sdram_cfg_lo;
27774 +static int     sdram_cfg_hi;
27775 +int            sdram_bank_limit;
27776 +
27777 +MODULE_AUTHOR("Quadrics Ltd.");
27778 +MODULE_DESCRIPTION("Elan 4 Device Driver");
27779 +MODULE_LICENSE("GPL");
27780 +
27781 +MODULE_PARM(elan4_debug, "i");
27782 +MODULE_PARM(elan4_debug_toconsole, "i");
27783 +MODULE_PARM(elan4_debug_tobuffer, "i");
27784 +MODULE_PARM(elan4_debug_mmu, "i");
27785 +MODULE_PARM(elan4_pll_cfg, "i");
27786 +MODULE_PARM(elan4_pll_div, "i");
27787 +MODULE_PARM(elan4_mod45disable, "i");
27788 +MODULE_PARM(optimise_pci_bus, "i");
27789 +MODULE_PARM(default_features, "i");
27790 +
27791 +MODULE_PARM(disabled, "i");
27792 +MODULE_PARM(sdram_cfg_lo, "i");
27793 +MODULE_PARM(sdram_cfg_hi, "i");
27794 +MODULE_PARM(sdram_bank_limit, "i");
27795 +
27796 +MODULE_PARM(elan4_hash_0_size_val, "i");
27797 +MODULE_PARM(elan4_hash_1_size_val, "i");
27798 +MODULE_PARM(elan4_ctxt_table_shift, "i");
27799 +MODULE_PARM(elan4_ln2_max_cqs, "i");
27800 +MODULE_PARM(elan4_dmaq_highpri_size, "i");
27801 +MODULE_PARM(elan4_threadq_highpri_size, "i");
27802 +MODULE_PARM(elan4_dmaq_lowpri_size, "i");
27803 +MODULE_PARM(elan4_threadq_lowpri_size, "i");
27804 +MODULE_PARM(elan4_interruptq_size, "i");
27805 +
27806 +MODULE_PARM(elan4_mainint_punt_loops, "i");
27807 +MODULE_PARM(elan4_mainint_resched_ticks, "i");
27808 +MODULE_PARM(elan4_linkport_lock, "i");
27809 +MODULE_PARM(elan4_eccerr_recheck, "i");
27810 +
27811 +MODULE_PARM(user_p2p_route_options, "i");
27812 +MODULE_PARM(user_bcast_route_options, "i");
27813 +MODULE_PARM(user_dproc_retry_count, "i");
27814 +MODULE_PARM(user_cproc_retry_count, "i");
27815 +
27816 +/*
27817 + * Standard device entry points.
27818 + */
27819 +static struct file_operations elan4_fops = {
27820 +    ioctl:   elan4_ioctl,
27821 +    mmap:    elan4_mmap,
27822 +    open:    elan4_open,
27823 +    release: elan4_release,
27824 +};
27825 +
27826 +ELAN4_DEV *elan4_devices[ELAN4_MAX_CONTROLLER];
27827 +
27828 +#if defined(CONFIG_DEVFS_FS)
27829 +static devfs_handle_t devfs_handle;
27830 +#endif
27831 +
27832 +
27833 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
27834 +static int
27835 +elan4_ioctl32_cmds[] =
27836 +{      /* /dev/elan/control */
27837 +    ELAN4IO_DEVINFO,
27838 +    ELAN4IO_GET_POSITION,
27839 +    ELAN4IO_SET_POSITION,
27840 +    ELAN4IO_GET_PARAMS,
27841 +    ELAN4IO_SET_PARAMS,
27842 +
27843 +    /* /dev/elan4/user */
27844 +    ELAN4IO_POSITION,
27845 +    ELAN4IO_FREE,
27846 +    ELAN4IO_ATTACH,
27847 +    ELAN4IO_DETACH,
27848 +    ELAN4IO_BLOCK_INPUTTER,
27849 +
27850 +    ELAN4IO_ADD_P2PVP,
27851 +    ELAN4IO_ADD_BCASTVP,
27852 +    ELAN4IO_REMOVEVP,
27853 +    ELAN4IO_SET_ROUTE,
27854 +    ELAN4IO_RESET_ROUTE,
27855 +    ELAN4IO_GET_ROUTE,
27856 +    ELAN4IO_CHECK_ROUTE,
27857 +
27858 +    ELAN4IO_ALLOCCQ,
27859 +    ELAN4IO_FREECQ,
27860 +    ELAN4IO_SETPERM32,
27861 +    ELAN4IO_CLRPERM32,
27862 +    ELAN4IO_TRAPSIG,
27863 +    ELAN4IO_TRAPHANDLER32,
27864 +    ELAN4IO_REQUIRED_MAPPINGS,
27865 +       
27866 +    ELAN4IO_RESUME_EPROC_TRAP,
27867 +    ELAN4IO_RESUME_CPROC_TRAP,
27868 +    ELAN4IO_RESUME_DPROC_TRAP,
27869 +    ELAN4IO_RESUME_TPROC_TRAP,
27870 +    ELAN4IO_RESUME_IPROC_TRAP,
27871 +
27872 +    ELAN4IO_FLUSH_ICACHE,
27873 +
27874 +    ELAN4IO_STOP_CTXT,
27875 +
27876 +    ELAN4IO_ALLOC_INTCOOKIE,
27877 +    ELAN4IO_FREE_INTCOOKIE,
27878 +    ELAN4IO_ARM_INTCOOKIE,
27879 +    ELAN4IO_WAIT_INTCOOKIE,
27880 +
27881 +    ELAN4IO_ALLOC_TRAP_QUEUES,
27882 +    ELAN4IO_NETERR_MSG,
27883 +    ELAN4IO_NETERR_TIMER,
27884 +    ELAN4IO_NETERR_FIXUP,
27885 +
27886 +    ELAN4IO_DUMPCQ32,
27887 +};
27888 +
27889 +static int      elan4_ioctl32 (unsigned int fd, unsigned int cmd, 
27890 +                              unsigned long arg, struct file *file);
27891 +#endif
27892 +
27893 +/*
27894 + * Standard device entry points.
27895 + */
27896 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
27897 +
27898 +#include <linux/dump.h>
27899 +
27900 +static int
27901 +elan4_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
27902 +{
27903 +    if (event == DUMP_BEGIN)
27904 +       elan4_shutdown_devices (FALSE);
27905 +
27906 +    return (NOTIFY_DONE);
27907 +}
27908 +static struct notifier_block elan4_dump_notifier = 
27909 +{
27910 +    notifier_call:     elan4_dump_event,
27911 +    priority:          0,
27912 +};
27913 +
27914 +#endif
27915 +
27916 +static int
27917 +elan4_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
27918 +{
27919 +    if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
27920 +       elan4_shutdown_devices (0);
27921 +
27922 +    return (NOTIFY_DONE);
27923 +}
27924 +
27925 +static struct notifier_block elan4_reboot_notifier = 
27926 +{
27927 +    notifier_call:     elan4_reboot_event,
27928 +    priority:          0,
27929 +};
27930 +
27931 +static int
27932 +elan4_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
27933 +{
27934 +    elan4_shutdown_devices (1);
27935 +
27936 +    return (NOTIFY_DONE);
27937 +}
27938 +
27939 +static struct notifier_block elan4_panic_notifier = 
27940 +{
27941 +    notifier_call:     elan4_panic_event,
27942 +    priority:          0,
27943 +};
27944 +
27945 +static int __init
27946 +elan4_init (void)
27947 +{
27948 +    int             err;
27949 +    struct pci_dev *pdev;
27950 +    int                    count;
27951 +#if defined(__ia64)
27952 +    int             seenRevA = 0;
27953 +#endif
27954 +    
27955 +    if ((err = register_chrdev (ELAN4_MAJOR, ELAN4_NAME, &elan4_fops)) < 0)
27956 +       return (err);
27957 +
27958 +#if defined(CONFIG_DEVFS_FS)
27959 +    devfs_handle = devfs_mk_dir (NULL, "elan4", NULL);
27960 +#endif
27961 +
27962 +    intcookie_init();
27963 +    elan4_debug_init();
27964 +    elan4_procfs_init();
27965 +    
27966 +#ifdef CONFIG_MPSAS
27967 +    sas_init();
27968 +#endif
27969 +
27970 +    if (sdram_cfg_lo != 0 && sdram_cfg_hi != 0)
27971 +       sdram_cfg = (((unsigned long long) sdram_cfg_hi) << 32) | ((unsigned long long) sdram_cfg_lo);
27972 +
27973 +    for (count = 0, pdev = NULL; (pdev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN4, pdev)) != NULL ; count++)
27974 +    {
27975 +#if defined(__ia64)
27976 +       unsigned char revid;
27977 +       
27978 +       pci_read_config_byte (pdev, PCI_REVISION_ID, &revid);
27979 +
27980 +       if (revid == PCI_REVISION_ID_ELAN4_REVA && seenRevA++ != 0 && pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL))
27981 +       {
27982 +           printk ("elan: only a single elan4a supported on rx2600\n");
27983 +           continue;
27984 +       }
27985 +#endif
27986 +
27987 +       if (count < ELAN4_MAX_CONTROLLER)
27988 +           elan4_attach_device (count, pdev);
27989 +    }
27990 +
27991 +    if (count >= ELAN4_MAX_CONTROLLER)
27992 +       printk ("elan: found %d elan4 devices - only support %d\n", count, ELAN4_MAX_CONTROLLER);
27993 +
27994 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
27995 +    lock_kernel();
27996 +    {
27997 +       extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
27998 +       register int i;
27999 +       for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++)
28000 +           register_ioctl32_conversion (elan4_ioctl32_cmds[i], elan4_ioctl32);
28001 +    }
28002 +    unlock_kernel();
28003 +#endif
28004 +
28005 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
28006 +    register_dump_notifier (&elan4_dump_notifier);
28007 +#endif
28008 +    register_reboot_notifier (&elan4_reboot_notifier);
28009 +
28010 +#if !defined(NO_PANIC_NOTIFIER)
28011 +    notifier_chain_register (&panic_notifier_list, &elan4_panic_notifier);
28012 +#endif
28013 +
28014 +    return (0);
28015 +}
28016 +
28017 +#ifdef MODULE
28018 +static void __exit
28019 +elan4_exit (void)
28020 +{
28021 +    int i;
28022 +
28023 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
28024 +    lock_kernel();
28025 +    {
28026 +       extern void unregister_ioctl32_conversion(unsigned int cmd);
28027 +
28028 +       for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++)
28029 +           unregister_ioctl32_conversion (elan4_ioctl32_cmds[i]);
28030 +    }
28031 +    unlock_kernel();
28032 +#endif
28033 +
28034 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
28035 +    unregister_dump_notifier (&elan4_dump_notifier);
28036 +#endif
28037 +    unregister_reboot_notifier (&elan4_reboot_notifier);
28038 +
28039 +#if !defined(NO_PANIC_NOTIFIER)
28040 +    notifier_chain_unregister (&panic_notifier_list, &elan4_panic_notifier);
28041 +#endif
28042 +
28043 +    for (i = 0; i < ELAN4_MAX_CONTROLLER; i++)
28044 +       if (elan4_devices[i] != NULL)
28045 +           elan4_detach_device (elan4_devices[i]);
28046 +    
28047 +    elan4_procfs_fini();
28048 +    elan4_debug_fini();
28049 +    intcookie_fini();
28050 +
28051 +#if defined(CONFIG_DEVFS_FS)
28052 +    devfs_unregister (devfs_handle);
28053 +#endif
28054 +
28055 +    unregister_chrdev(ELAN4_MAJOR, ELAN4_NAME);
28056 +}
28057 +
28058 +module_init (elan4_init);
28059 +module_exit (elan4_exit);
28060 +
28061 +#else
28062 +__initcall (elan4_init);
28063 +#endif
28064 +
28065 +/*
28066 + * Minor numbers encoded as :
28067 + *   [5:0]     device number
28068 + *   [15:6]    function number
28069 + */
28070 +#define ELAN4_DEVICE_MASK      0x3F
28071 +#define ELAN4_DEVICE(inode)    (MINOR((inode)->i_rdev) & ELAN4_DEVICE_MASK)
28072 +
28073 +#define ELAN4_MINOR_CONTROL    0
28074 +#define ELAN4_MINOR_MEM                1
28075 +#define ELAN4_MINOR_USER       2
28076 +
28077 +#define ELAN4_MINOR_SHIFT      6
28078 +#define ELAN4_MINOR(inode)     (MINOR((inode)->i_rdev) >> ELAN4_MINOR_SHIFT)
28079 +
28080 +/*
28081 + * Called by init_module() for each card discovered on PCI.
28082 + */
28083 +static int
28084 +elan4_attach_device (int instance, struct pci_dev *pdev)
28085 +{
28086 +    ELAN4_DEV *dev;
28087 +    int res;
28088 +
28089 +    KMEM_ALLOC (dev, ELAN4_DEV *, sizeof (ELAN4_DEV), 1);
28090 +    if ((dev == NULL))
28091 +       return (-ENOMEM);
28092 +    memset (dev, 0, sizeof (ELAN4_DEV));
28093 +
28094 +    /* setup os dependent section of ELAN4_DEV */
28095 +    dev->dev_instance   = instance;
28096 +    dev->dev_osdep.pdev = pdev;
28097 +
28098 +    /* initialise the devinfo */
28099 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_VENDOR_ID,   &dev->dev_devinfo.dev_vendor_id);
28100 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_DEVICE_ID,   &dev->dev_devinfo.dev_device_id);
28101 +    pci_read_config_byte (dev->dev_osdep.pdev, PCI_REVISION_ID, &dev->dev_devinfo.dev_revision_id);
28102 +
28103 +    dev->dev_devinfo.dev_rail                                        = instance;
28104 +    dev->dev_devinfo.dev_driver_version                              = ELAN4_DRIVER_VERSION;
28105 +    dev->dev_devinfo.dev_num_down_links_value                        = 0;
28106 +    dev->dev_devinfo.dev_params_mask                                 = (1 << ELAN4_PARAM_DRIVER_FEATURES);
28107 +    dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES]  = default_features;
28108 +
28109 +    dev->dev_position.pos_mode = ELAN_POS_UNKNOWN;
28110 +
28111 +    /* initialise the data structures and map the device */
28112 +    if ((res = elan4_initialise_device (dev)) != 0)
28113 +    {
28114 +       kfree (dev);
28115 +       return res;
28116 +    }
28117 +
28118 +    /* add the interrupt handler */
28119 +    if (request_irq (pdev->irq, elan4_irq, SA_SHIRQ, "elan4", dev) != 0)
28120 +    {
28121 +       elan4_finalise_device (dev);
28122 +       KMEM_FREE (dev, sizeof(*dev));
28123 +       return -ENXIO;
28124 +    }
28125 +
28126 +    if (pci_request_regions(dev->dev_osdep.pdev, "elan4"))
28127 +    {
28128 +       free_irq (dev->dev_osdep.pdev->irq, dev);
28129 +       KMEM_FREE (dev, sizeof(*dev));
28130 +       return -ENODEV;
28131 +    }
28132 +
28133 +#if defined(CONFIG_DEVFS_FS)
28134 +    {
28135 +       char name[16];
28136 +       
28137 +       sprintf (name, "control%d", dev->dev_instance);
28138 +       dev->dev_osdep.devfs_control = devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
28139 +                                                     dev->dev_instance | (ELAN4_MINOR_CONTROL << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR | S_IWUSR, 
28140 +                                                     &elan4_fops, NULL);
28141 +       sprintf (name, "sdram%d", dev->dev_instance);
28142 +       dev->dev_osdep.devfs_sdram =  devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
28143 +                                                    dev->dev_instance | (ELAN4_MINOR_MEM << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH,
28144 +                                                    &elan4_fops, NULL);
28145 +       sprintf (name, "user%d", dev->dev_instance);
28146 +       dev->dev_osdep.devfs_user =  devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
28147 +                                                   dev->dev_instance | (ELAN4_MINOR_USER << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH,
28148 +                                                   &elan4_fops, NULL);
28149 +    }
28150 +#endif
28151 +
28152 +    /* add the procfs entry */
28153 +    elan4_procfs_device_init (dev);
28154 +
28155 +    /* allow the device to be referenced now */
28156 +    elan4_devices[instance] = dev;
28157 +
28158 +    if ((disabled & (1 << instance)) == 0)
28159 +    {
28160 +       if (elan4_start_device (dev) != 0)
28161 +       {
28162 +           printk ("elan%d: auto-start of device failed\n", dev->dev_instance);
28163 +
28164 +           elan4_detach_device (dev);
28165 +           return (-ENXIO);
28166 +       }
28167 +       
28168 +       dev->dev_state = ELAN4_STATE_STARTED;
28169 +    }
28170 +
28171 +#if defined (__sparc)
28172 +    printk ("elan%d: at pci %s (irq = %s)\n", instance, pdev->slot_name, __irq_itoa(pdev->irq));
28173 +#else
28174 +    printk ("elan%d: at pci %s (irq = %d)\n", instance, pdev->slot_name, pdev->irq);
28175 +#endif
28176 +
28177 +    return (0);
28178 +}
28179 +
28180 +/*
28181 + * Called by cleanup_module() for each board found on PCI.
28182 + */
28183 +static void
28184 +elan4_detach_device (ELAN4_DEV *dev)
28185 +{
28186 +    /* stop the chip and free of resources */
28187 +    if (dev->dev_state == ELAN4_STATE_STARTED)
28188 +       elan4_stop_device (dev);
28189 +    
28190 +    elan4_devices[dev->dev_instance] = NULL;
28191 +
28192 +#if defined(CONFIG_DEVFS_FS)
28193 +    devfs_unregister (dev->dev_osdep.devfs_control);
28194 +    devfs_unregister (dev->dev_osdep.devfs_sdram);
28195 +    devfs_unregister (dev->dev_osdep.devfs_user);
28196 +#endif
28197 +
28198 +    /* release the address space */
28199 +    pci_release_regions (dev->dev_osdep.pdev);
28200 +
28201 +    /* release the interrupt */
28202 +    free_irq (dev->dev_osdep.pdev->irq, dev);
28203 +
28204 +    /* remove the procfs entry */
28205 +    elan4_procfs_device_fini (dev);
28206 +
28207 +    /* unmap the device and finalise the data structures */
28208 +    elan4_finalise_device (dev);
28209 +    
28210 +    KMEM_FREE (dev, sizeof(*dev));
28211 +}
28212 +
28213 +/*
28214 + * Maintain reference counts on the device
28215 + */
28216 +ELAN4_DEV *
28217 +elan4_reference_device (int instance, int state)
28218 +{
28219 +    ELAN4_DEV *dev = elan4_devices[instance];
28220 +
28221 +    if (dev == NULL)
28222 +       return (NULL);
28223 +
28224 +    kmutex_lock (&dev->dev_lock);
28225 +
28226 +    if ((dev->dev_state & state) == 0)
28227 +    {
28228 +       kmutex_unlock (&dev->dev_lock);
28229 +       return (NULL);
28230 +    }
28231 +
28232 +    dev->dev_references++;
28233 +    kmutex_unlock (&dev->dev_lock);
28234 +
28235 +#ifdef MODULE
28236 +    MOD_INC_USE_COUNT;
28237 +#endif
28238 +
28239 +#ifdef CONFIG_MPSAS
28240 +    sas_set_position(dev);
28241 +#endif
28242 +
28243 +    return (dev);
28244 +}
28245 +
28246 +void
28247 +elan4_dereference_device (ELAN4_DEV *dev)
28248 +{
28249 +    kmutex_lock (&dev->dev_lock);
28250 +    dev->dev_references--;
28251 +    kmutex_unlock (&dev->dev_lock);
28252 +
28253 +#ifdef MODULE
28254 +    MOD_DEC_USE_COUNT;
28255 +#endif
28256 +}
28257 +
28258 +static void
28259 +elan4_shutdown_devices(int panicing)
28260 +{
28261 +    ELAN4_DEV *dev;
28262 +    unsigned long flags;
28263 +    register int i;
28264 +
28265 +    local_irq_save (flags);
28266 +    for (i = 0; i < ELAN4_MAX_CONTROLLER; i++)
28267 +    {
28268 +       if ((dev = elan4_devices[i]) != NULL)
28269 +       {
28270 +           printk(KERN_INFO "elan%d: forcing link into reset\n", dev->dev_instance);
28271 +
28272 +           /* set the inputters to discard everything */
28273 +           if (! panicing) spin_lock (&dev->dev_haltop_lock);
28274 +
28275 +           if (dev->dev_discard_lowpri_count++ == 0)
28276 +               elan4_set_schedstatus (dev, 0);
28277 +           if (dev->dev_discard_highpri_count++ == 0)
28278 +               elan4_set_schedstatus (dev, 0);
28279 +
28280 +           if (! panicing) spin_unlock (&dev->dev_haltop_lock);
28281 +
28282 +           /* ideally we'd like to halt all the outputters too,
28283 +            * however this will prevent the kernel comms flushing
28284 +            * to work correctly .....
28285 +            */
28286 +       }
28287 +    }
28288 +    local_irq_restore (flags);
28289 +}
28290 +
28291 +/*
28292 + * /dev/elan4/controlX - control device
28293 + *
28294 + */
28295 +static int
28296 +control_open (struct inode *inode, struct file *file)
28297 +{
28298 +    ELAN4_DEV       *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STOPPED | ELAN4_STATE_STARTED);
28299 +    CONTROL_PRIVATE *pr;
28300 +    
28301 +    if (dev == NULL)
28302 +       return (-ENXIO);
28303 +    
28304 +    KMEM_ALLOC (pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), 1);
28305 +    if ((pr == NULL))
28306 +    {
28307 +       elan4_dereference_device (dev);
28308 +       
28309 +       return (-ENOMEM);
28310 +    }
28311 +
28312 +    PRINTF (DBG_USER, DBG_FILE, "control_open: dev=%p pr=%p\n", dev, pr);
28313 +
28314 +    pr->pr_dev           = dev;
28315 +    pr->pr_boundary_scan = 0;
28316 +
28317 +    file->private_data = (void *) pr;
28318 +
28319 +    return (0);
28320 +}
28321 +
28322 +static int
28323 +control_release (struct inode *inode, struct file *file)
28324 +{
28325 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
28326 +    ELAN4_DEV       *dev = pr->pr_dev;
28327 +
28328 +    PRINTF (DBG_DEVICE, DBG_FILE, "control_release: pr=%p\n", pr);
28329 +
28330 +    //if (pr->pr_boundary_scan)
28331 +    //    elan4_clear_boundary_scan (dev, pr);
28332 +
28333 +    elan4_dereference_device (dev);
28334 +
28335 +    KMEM_FREE (pr, sizeof(*pr));
28336 +
28337 +    return (0);
28338 +}
28339 +
28340 +static int
28341 +control_ioctl (struct inode *inode, struct file *file, 
28342 +                    unsigned int cmd, unsigned long arg)
28343 +{
28344 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
28345 +
28346 +    PRINTF (DBG_DEVICE, DBG_FILE, "control_ioctl: cmd=%x arg=%lx\n", cmd, arg);
28347 +
28348 +    switch (cmd)
28349 +    {
28350 +    case ELAN4IO_DEVINFO:
28351 +       if (copy_to_user ((void *) arg, &pr->pr_dev->dev_devinfo, sizeof (ELAN_DEVINFO)))
28352 +           return (-EFAULT);
28353 +       return (0);
28354 +
28355 +    case ELAN4IO_GET_POSITION:
28356 +    {
28357 +       ELAN_POSITION pos;
28358 +
28359 +       elan4_get_position (pr->pr_dev, &pos);
28360 +
28361 +       if (copy_to_user ((void *) arg, &pos, sizeof (ELAN_POSITION)))
28362 +           return (-EFAULT);
28363 +
28364 +       return (0);
28365 +    }
28366 +
28367 +    case ELAN4IO_SET_POSITION:
28368 +    {
28369 +       ELAN_POSITION pos;
28370 +
28371 +       if (copy_from_user (&pos, (void *) arg, sizeof (ELAN_POSITION)))
28372 +           return (-EFAULT);
28373 +       
28374 +       return (elan4_set_position (pr->pr_dev, &pos));
28375 +    }
28376 +
28377 +    case ELAN4IO_OLD_GET_PARAMS:
28378 +    {
28379 +       ELAN_PARAMS params;
28380 +       unsigned short mask;
28381 +
28382 +       elan4_get_params (pr->pr_dev, &params, &mask);
28383 +
28384 +       if (copy_to_user ((void *) arg, &params, sizeof (ELAN_PARAMS)))
28385 +           return (-EFAULT);
28386 +
28387 +       return (0);
28388 +    }
28389 +
28390 +    case ELAN4IO_OLD_SET_PARAMS:
28391 +    {
28392 +       ELAN_PARAMS params;
28393 +
28394 +       if (copy_from_user (&params, (void *) arg, sizeof (ELAN_PARAMS)))
28395 +           return (-EFAULT);
28396 +       
28397 +       elan4_set_params (pr->pr_dev, &params, 3);
28398 +       
28399 +       return (0);
28400 +    }
28401 +
28402 +    case ELAN4IO_SET_PARAMS:
28403 +    {
28404 +       ELAN4IO_PARAMS_STRUCT args;
28405 +
28406 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PARAMS_STRUCT)))
28407 +           return (-EFAULT);
28408 +       
28409 +       elan4_set_params (pr->pr_dev, &args.p_params, args.p_mask);
28410 +       
28411 +       return (0);
28412 +    }
28413 +
28414 +    case ELAN4IO_GET_PARAMS:
28415 +    {
28416 +       ELAN4IO_PARAMS_STRUCT args;
28417 +
28418 +       elan4_get_params (pr->pr_dev, &args.p_params, &args.p_mask);
28419 +
28420 +       if (copy_to_user ((void *) arg, &args, sizeof (ELAN_PARAMS)))
28421 +           return (-EFAULT);
28422 +
28423 +       return (0);
28424 +    }
28425 +    }
28426 +
28427 +    return (-EINVAL);
28428 +}
28429 +
28430 +static int
28431 +control_mmap (struct file *file, struct vm_area_struct *vma)
28432 +{
28433 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
28434 +    unsigned        bar = OFF_TO_BAR (vma->vm_pgoff << PAGE_SHIFT);
28435 +    unsigned long    off = OFF_TO_OFFSET (vma->vm_pgoff << PAGE_SHIFT);
28436 +    long            len = vma->vm_end - vma->vm_start;
28437 +
28438 +    PRINTF (DBG_USER, DBG_FILE, "control_mmap: pr=%p bar=%x off=%x\n", pr, bar, off);
28439 +
28440 +    /* check bar number and translate the standard psuedo bars */
28441 +    switch (bar)
28442 +    {
28443 +    case ELAN4_BAR_SDRAM:
28444 +    case ELAN4_BAR_REGISTERS:
28445 +       break;
28446 +
28447 +    default:
28448 +       return (-EINVAL);
28449 +    }
28450 +
28451 +    if (off < 0 || (off + len) > pci_resource_len (pr->pr_dev->dev_osdep.pdev, bar))
28452 +       return (-EINVAL);
28453 +
28454 +    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
28455 +
28456 +    if (__io_remap_page_range (vma->vm_start, pci_resource_start (pr->pr_dev->dev_osdep.pdev, bar) + off, len, vma->vm_page_prot))
28457 +       return (-EAGAIN);
28458 +
28459 +    return (0);
28460 +}
28461 +
28462 +/*
28463 + * /dev/elan4/sdramX - sdram access device
28464 + */
28465 +static void 
28466 +mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg)
28467 +{
28468 +    PRINTF (DBG_USER, DBG_MEM, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref);
28469 +
28470 +    elan4_sdram_free (pr->pr_dev, pg->pg_addr, SDRAM_PAGE_SIZE);
28471 +
28472 +    KMEM_FREE(pg, sizeof(*pg));
28473 +}
28474 +
28475 +static MEM_PAGE *
28476 +mem_getpage (MEM_PRIVATE *pr, unsigned long pgoff)
28477 +{
28478 +    int       hashval = MEM_HASH (pgoff);
28479 +    MEM_PAGE *npg = NULL;
28480 +    MEM_PAGE *pg;
28481 +
28482 +    ASSERT ((pgoff & SDRAM_PGOFF_OFFSET) == 0);
28483 +
28484 +    PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx\n", pr, pgoff);
28485 +    
28486 + again:
28487 +    spin_lock (&pr->pr_lock);
28488 +    for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next)
28489 +       if (pg->pg_pgoff == pgoff)
28490 +           break;
28491 +    
28492 +    if (pg != NULL)
28493 +    {
28494 +       PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx -> found %p addr=%x\n", pr, pgoff, pg, pg->pg_addr);
28495 +
28496 +       pg->pg_ref++;
28497 +       spin_unlock (&pr->pr_lock);
28498 +
28499 +       if (npg != NULL)                                        /* we'd raced and someone else had created */
28500 +           mem_freepage (pr, npg);                             /* this page - so free of our new one*/
28501 +       return (pg);
28502 +    }
28503 +    
28504 +    if (npg != NULL)                                           /* didn't find the page, so inset the */
28505 +    {                                                          /* new one we've just created */
28506 +       npg->pg_next = pr->pr_pages[hashval];
28507 +       pr->pr_pages[hashval] = npg;
28508 +       
28509 +       spin_unlock (&pr->pr_lock);
28510 +       return (npg);
28511 +    }
28512 +    
28513 +    spin_unlock (&pr->pr_lock);                                        /* drop spinlock before creating a new page */
28514 +    
28515 +    KMEM_ALLOC (npg, MEM_PAGE *, sizeof (MEM_PAGE), 1);
28516 +    if ((npg == NULL))
28517 +       return (NULL);
28518 +    
28519 +    if ((npg->pg_addr = elan4_sdram_alloc (pr->pr_dev, SDRAM_PAGE_SIZE)) == 0)
28520 +    {
28521 +       KMEM_FREE(npg, sizeof(*npg));
28522 +       return (NULL);
28523 +    }
28524 +
28525 +#ifndef CONFIG_MPSAS
28526 +    /* zero the page before returning it to the user */
28527 +    elan4_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, SDRAM_PAGE_SIZE);
28528 +#endif
28529 +    
28530 +    npg->pg_pgoff = pgoff;
28531 +    npg->pg_ref   = 1;
28532 +    
28533 +    /* created a new page - so have to rescan before inserting it */
28534 +    goto again;
28535 +}
28536 +
28537 +static void
28538 +mem_droppage (MEM_PRIVATE *pr, unsigned long pgoff, int dontfree)
28539 +{
28540 +    MEM_PAGE **ppg;
28541 +    MEM_PAGE  *pg;
28542 +
28543 +    spin_lock (&pr->pr_lock);
28544 +    for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next)
28545 +       if ((*ppg)->pg_pgoff == pgoff)
28546 +           break;
28547 +
28548 +    pg = *ppg;
28549 +
28550 +    ASSERT (*ppg != NULL);
28551 +
28552 +    PRINTF (DBG_USER, DBG_MEM, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree);
28553 +
28554 +    if (--pg->pg_ref == 0 && !dontfree)
28555 +    {
28556 +       *ppg = pg->pg_next;
28557 +
28558 +       mem_freepage (pr, pg);
28559 +    }
28560 +
28561 +    spin_unlock (&pr->pr_lock);
28562 +}
28563 +
28564 +static int
28565 +mem_open (struct inode *inode, struct file *file)
28566 +{
28567 +    ELAN4_DEV   *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED);
28568 +    MEM_PRIVATE *pr;
28569 +    register int i;
28570 +
28571 +    if (dev == NULL)
28572 +       return (-ENXIO);
28573 +
28574 +    KMEM_ALLOC (pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), 1);
28575 +    if ((pr == NULL))
28576 +    {
28577 +       elan4_dereference_device (dev);
28578 +       return (-ENOMEM);
28579 +    }
28580 +
28581 +    spin_lock_init (&pr->pr_lock);
28582 +    pr->pr_dev = dev;
28583 +    for (i = 0; i < MEM_HASH_SIZE; i++)
28584 +       pr->pr_pages[i] = NULL;
28585 +
28586 +    file->private_data = (void *) pr;
28587 +    
28588 +    return (0);
28589 +}
28590 +
28591 +static int
28592 +mem_release (struct inode *node, struct file *file)
28593 +{
28594 +    MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data;
28595 +    MEM_PAGE    *pg, *next;
28596 +    int          i;
28597 +
28598 +    /* free off any pages that we'd allocated */
28599 +    spin_lock (&pr->pr_lock);
28600 +    for (i = 0; i < MEM_HASH_SIZE; i++)
28601 +    {
28602 +       for (pg = pr->pr_pages[i]; pg; pg = next)
28603 +       {
28604 +           next = pg->pg_next;
28605 +           mem_freepage (pr, pg);
28606 +       }
28607 +    }
28608 +    spin_unlock (&pr->pr_lock);
28609 +
28610 +    elan4_dereference_device (pr->pr_dev);
28611 +    KMEM_FREE(pr, sizeof(*pr));
28612 +
28613 +    return (0);
28614 +}
28615 +
28616 +static int
28617 +mem_ioctl (struct inode *inode, struct file *file, 
28618 +                 unsigned int cmd, unsigned long arg)
28619 +{
28620 +    return (-EINVAL);
28621 +}
28622 +
28623 +static void 
28624 +mem_vma_open (struct vm_area_struct *vma)
28625 +{
28626 +    MEM_PRIVATE   *pr = (MEM_PRIVATE *) vma->vm_private_data;
28627 +    unsigned long addr;
28628 +    unsigned long pgoff;
28629 +
28630 +    PRINTF (DBG_USER, DBG_MEM, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28631 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
28632 +    
28633 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28634 +       mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK);
28635 +}
28636 +
28637 +static void
28638 +mem_vma_close (struct vm_area_struct *vma)
28639 +{
28640 +    MEM_PRIVATE  *pr  = (MEM_PRIVATE *) vma->vm_private_data;
28641 +    unsigned long addr;
28642 +    unsigned long pgoff;
28643 +
28644 +    PRINTF (DBG_USER, DBG_MEM, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28645 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
28646 +
28647 +    /* NOTE: the call to close may not have the same vm_start/vm_end values as 
28648 +     *       were passed into mmap()/open() - since if an partial unmap had occurred
28649 +     *       then the vma could have been shrunk or even split.
28650 +     *
28651 +     *       if a the vma is split then an vma_open() will be called for the top
28652 +     *       portion - thus causing the reference counts to become incorrect.
28653 +     *
28654 +     * We drop the reference to any pages we're notified about - so they get freed
28655 +     * earlier than when the device is finally released.
28656 +     */
28657 +    for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28658 +       mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
28659 +}
28660 +
28661 +struct vm_operations_struct mem_vm_ops = {
28662 +    open:              mem_vma_open,
28663 +    close:             mem_vma_close,
28664 +};
28665 +
28666 +static int
28667 +mem_mmap (struct file *file, struct vm_area_struct *vma)
28668 +{
28669 +    MEM_PRIVATE  *pr = (MEM_PRIVATE *) file->private_data;
28670 +    MEM_PAGE     *pg;
28671 +    unsigned long addr;
28672 +    unsigned long pgoff;
28673 +
28674 +    PRINTF (DBG_USER, DBG_MEM, "mem_mmap: vma=%p start=%lx end=%lx pgoff=%lx file=%p\n",
28675 +           vma, vma->vm_start, vma->vm_end, vma->vm_pgoff, file);
28676 +
28677 +    /* Don't allow these pages to be swapped out of dumped */
28678 +    vma->vm_flags |= (VM_RESERVED | VM_IO);
28679 +
28680 +    vma->vm_ops          = &mem_vm_ops;
28681 +    vma->vm_file         = file;
28682 +    vma->vm_private_data = (void *) pr;
28683 +
28684 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
28685 +    {
28686 +       if ((pg = mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK)) == NULL)
28687 +           goto failed;
28688 +
28689 +       PRINTF (DBG_USER, DBG_MEM, "mem_mmap: addr %lx -> pg=%p sdram=%x+%x bar=%lx\n",
28690 +               addr, pg, pg->pg_addr, (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE,
28691 +               pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
28692 +
28693 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
28694 +
28695 +       if (! (pr->pr_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE))
28696 +           vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
28697 +
28698 +       if (__io_remap_page_range (addr, pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) +
28699 +                                pg->pg_addr + (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE, PAGE_SIZE, vma->vm_page_prot))
28700 +       {
28701 +           mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); /* drop our reference to this page */
28702 +           goto failed;
28703 +       }
28704 +
28705 +#if defined(conditional_schedule)
28706 +       conditional_schedule();
28707 +#endif
28708 +    }
28709 +
28710 +    return (0);
28711 +
28712 + failed:
28713 +    /* free of any pages we've already allocated/referenced */
28714 +    while (pgoff-- > vma->vm_pgoff)
28715 +       mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
28716 +
28717 +    return (-ENOMEM);
28718 +}
28719 +
28720 +/*
28721 + * /dev/elan4/userX - control device
28722 + *
28723 + */
28724 +static inline void
28725 +user_private_free (USER_PRIVATE *pr)
28726 +{
28727 +    ELAN4_DEV *dev = pr->pr_uctx->uctx_ctxt.ctxt_dev;
28728 +
28729 +    ASSERT (atomic_read (&pr->pr_ref) == 0);
28730 +
28731 +    user_free (pr->pr_uctx);
28732 +    KMEM_FREE(pr, sizeof(*pr));
28733 +
28734 +    elan4_dereference_device (dev);
28735 +}
28736 +
28737 +#if defined(IOPROC_PATCH_APPLIED)
28738 +static void
28739 +user_ioproc_release (void *arg, struct mm_struct *mm)
28740 +{
28741 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28742 +
28743 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_release: ref=%d\n", atomic_read (&pr->pr_ref));
28744 +
28745 +    elan4mmu_invalidate_ctxt (&pr->pr_uctx->uctx_ctxt);
28746 +
28747 +    pr->pr_mm = NULL;
28748 +
28749 +    if (atomic_dec_and_test (&pr->pr_ref))
28750 +       user_private_free (pr);
28751 +}
28752 +
28753 +/*
28754 + * On 2.4 kernels we get passed a mm_struct, whereas on 2.6 kernels
28755 + * we get the vma which is more usefull
28756 + */
28757 +#if defined(IOPROC_MM_STRUCT_ARG)
28758 +static void
28759 +user_ioproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
28760 +{
28761 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28762 +
28763 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
28764 +
28765 +    /* XXXX: this is intended to sync the modified bit from our page tables,
28766 +     *       into the main cpu's modified bits - however since we do not
28767 +     *       syncronize our modified bit on a ioproc_invalidate_page() call,
28768 +     *       then it could get lost if we modify the page after the last
28769 +     *       modification and writepage has occurred. Hence we invalidate
28770 +     *       all translations and allow it to refault.
28771 +     */
28772 +
28773 +    user_unload_main (pr->pr_uctx, start, end - start);
28774 +}
28775 +
28776 +static void
28777 +user_ioproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
28778 +{
28779 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28780 +
28781 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
28782 +
28783 +    user_unload_main (pr->pr_uctx, start, end - start);
28784 +}
28785 +
28786 +static void
28787 +user_ioproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
28788 +{
28789 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28790 +
28791 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_range: mm=%p start=%lx end=%lx\n", mm, start, end);
28792 +
28793 +#if defined(CONFIG_MPSAS)
28794 +    if (((end - start) >> PAGE_SHIFT) > 16)
28795 +       return;
28796 +#endif
28797 +
28798 +    user_update_main (pr->pr_uctx, mm, start, end - start);
28799 +}
28800 +
28801 +static void
28802 +user_ioproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
28803 +{
28804 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28805 +
28806 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
28807 +
28808 +    user_unload_main (pr->pr_uctx, start, end - start);
28809 +}
28810 +
28811 +
28812 +#else
28813 +
28814 +static void
28815 +user_ioproc_sync_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
28816 +{
28817 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28818 +
28819 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
28820 +
28821 +    /* XXXX: this is intended to sync the modified bit from our page tables,
28822 +     *       into the main cpu's modified bits - however since we do not
28823 +     *       syncronize our modified bit on a ioproc_invalidate_page() call,
28824 +     *       then it could get lost if we modify the page after the last
28825 +     *       modification and writepage has occurred. Hence we invalidate
28826 +     *       all translations and allow it to refault.
28827 +     */
28828 +
28829 +    user_unload_main (pr->pr_uctx, start, end - start);
28830 +}
28831 +
28832 +static void
28833 +user_ioproc_invalidate_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
28834 +{
28835 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28836 +
28837 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
28838 +
28839 +    user_unload_main (pr->pr_uctx, start, end - start);
28840 +}
28841 +
28842 +static void
28843 +user_ioproc_update_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
28844 +{
28845 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28846 +
28847 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_range: vma=%p start=%lx end=%lx\n", vma, start, end);
28848 +
28849 +#if defined(CONFIG_MPSAS)
28850 +    if (((end - start) >> PAGE_SHIFT) > 16)
28851 +       return;
28852 +#endif
28853 +
28854 +    user_update_main (pr->pr_uctx, vma->vm_mm, start, end - start);
28855 +}
28856 +
28857 +static void
28858 +user_ioproc_change_protection (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
28859 +{
28860 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28861 +
28862 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
28863 +
28864 +    user_unload_main (pr->pr_uctx, start, end - start);
28865 +}
28866 +#endif /* defined(IOPROC_NO_VMA_RANGE) */
28867 +
28868 +static void
28869 +user_ioproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
28870 +{
28871 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28872 +
28873 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_page: addr=%lx\n", addr);
28874 +
28875 +    user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE);
28876 +}
28877 +
28878 +static void
28879 +user_ioproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
28880 +{
28881 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28882 +
28883 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_page: addr=%lx\n", addr);
28884 +
28885 +    user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE);
28886 +}
28887 +
28888 +static void
28889 +user_ioproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
28890 +{
28891 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
28892 +
28893 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_page: addr=%lx\n", addr);
28894 +
28895 +    user_update_main (pr->pr_uctx, vma->vm_mm, addr & PAGE_MASK, PAGE_SIZE);
28896 +}
28897 +#endif /* defined(IOPROC_PATCH_APPLIED) */
28898 +
28899 +static int
28900 +user_open (struct inode *inode, struct file *file)
28901 +{
28902 +    ELAN4_DEV    *dev;
28903 +    USER_PRIVATE *pr;
28904 +    USER_CTXT    *uctx;
28905 +    
28906 +    PRINTF (DBG_USER, DBG_FILE, "user_open: mm=%p users=%d count=%d\n", current->mm,
28907 +           atomic_read (&current->mm->mm_users), atomic_read (&current->mm->mm_count));
28908 +
28909 +    if ((dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED)) == NULL)
28910 +       return (-ENXIO);
28911 +    
28912 +    KMEM_ALLOC (pr, USER_PRIVATE *, sizeof (USER_PRIVATE), 1);
28913 +    if ((pr == NULL))
28914 +    {
28915 +       elan4_dereference_device (dev);
28916 +       return (-ENOMEM);
28917 +    }
28918 +
28919 +    uctx = user_alloc (dev);
28920 +
28921 +    if (IS_ERR(uctx))
28922 +    {
28923 +       elan4_dereference_device (dev);
28924 +       KMEM_FREE(pr, sizeof(*pr));
28925 +
28926 +       return PTR_ERR(uctx);
28927 +    }
28928 +
28929 +    /* initialise refcnt to 1 - one for "file" */
28930 +    atomic_set (&pr->pr_ref, 1);
28931 +
28932 +    pr->pr_uctx = uctx;
28933 +    pr->pr_mm   = current->mm;
28934 +
28935 +    {
28936 +       /* register a ioproc callback to notify us of translation changes */
28937 +       pr->pr_ioproc.arg               = (void *) pr;
28938 +       pr->pr_ioproc.release           = user_ioproc_release;
28939 +       pr->pr_ioproc.sync_range        = user_ioproc_sync_range;
28940 +       pr->pr_ioproc.invalidate_range  = user_ioproc_invalidate_range;
28941 +       pr->pr_ioproc.update_range      = user_ioproc_update_range;
28942 +       pr->pr_ioproc.change_protection = user_ioproc_change_protection;
28943 +       pr->pr_ioproc.sync_page         = user_ioproc_sync_page;
28944 +       pr->pr_ioproc.invalidate_page   = user_ioproc_invalidate_page;
28945 +       pr->pr_ioproc.update_page       = user_ioproc_update_page;
28946 +       
28947 +       /* add an extra reference for the ioproc ops */
28948 +       atomic_inc (&pr->pr_ref);
28949 +       
28950 +       spin_lock (&current->mm->page_table_lock);
28951 +       ioproc_register_ops (current->mm, &pr->pr_ioproc);
28952 +       spin_unlock (&current->mm->page_table_lock);
28953 +    }
28954 +
28955 +    file->private_data = (void *) pr;
28956 +
28957 +    return (0);
28958 +}
28959 +
28960 +static int
28961 +user_release (struct inode *inode, struct file *file)
28962 +{
28963 +    USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data;
28964 +
28965 +    PRINTF (pr->pr_uctx, DBG_FILE, "user_release: ref=%d\n", atomic_read (&pr->pr_ref));
28966 +
28967 +    if (atomic_dec_and_test (&pr->pr_ref))
28968 +       user_private_free (pr);
28969 +
28970 +    return (0);
28971 +}
28972 +
28973 +static int
28974 +user_ioctl (struct inode *inode, struct file *file, 
28975 +           unsigned int cmd, unsigned long arg)
28976 +{
28977 +    USER_PRIVATE *pr   = (USER_PRIVATE *) file->private_data;
28978 +    USER_CTXT    *uctx = pr->pr_uctx;
28979 +    int           res  = 0;
28980 +
28981 +    PRINTF (uctx, DBG_FILE, "user_ioctl: cmd=%x arg=%lx\n", cmd, arg);
28982 +
28983 +    if (current->mm != pr->pr_mm)
28984 +       return (-EINVAL);
28985 +    
28986 +    switch (cmd)
28987 +    {
28988 +    case ELAN4IO_DEVINFO:
28989 +       if (copy_to_user ((void *) arg, &uctx->uctx_ctxt.ctxt_dev->dev_devinfo, sizeof (ELAN_DEVINFO)))
28990 +           return (-EFAULT);
28991 +       return (0);
28992 +
28993 +    case ELAN4IO_POSITION:
28994 +    {
28995 +       ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
28996 +
28997 +       if (copy_to_user ((void *) arg, &dev->dev_position, sizeof (ELAN_POSITION)))
28998 +           return (-EFAULT);
28999 +       return (0);
29000 +    }
29001 +
29002 +    case ELAN4IO_FREE:
29003 +        {
29004 +           spin_lock (&current->mm->page_table_lock);
29005 +           if (pr->pr_mm != current->mm)
29006 +               spin_unlock (&current->mm->page_table_lock);
29007 +           else
29008 +           {
29009 +               ioproc_unregister_ops (current->mm, &pr->pr_ioproc);
29010 +               spin_unlock (&current->mm->page_table_lock);
29011 +               
29012 +               user_ioproc_release (pr, current->mm);
29013 +           }
29014 +       }
29015 +       return (0);
29016 +
29017 +    case ELAN4IO_ATTACH:
29018 +    {
29019 +       ELAN_CAPABILITY *cap;
29020 +
29021 +       KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
29022 +       if ((cap == NULL))
29023 +           return (-ENOMEM);
29024 +
29025 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
29026 +           res = -EFAULT;
29027 +       else if ((res = user_attach (uctx, cap)) == 0 && 
29028 +                copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY)))
29029 +       {
29030 +           user_detach (uctx, cap);
29031 +           res = -EFAULT;
29032 +       }
29033 +
29034 +       KMEM_FREE(cap, sizeof(*cap));
29035 +       return (res);
29036 +    }
29037 +
29038 +    case ELAN4IO_DETACH:
29039 +    {
29040 +       ELAN_CAPABILITY *cap;
29041 +
29042 +       KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
29043 +       if ((cap == NULL))
29044 +           return (-ENOMEM);
29045 +
29046 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
29047 +           res = -EFAULT;
29048 +       else
29049 +           user_detach (uctx, cap);
29050 +
29051 +       KMEM_FREE(cap, sizeof(*cap));
29052 +       return (res);
29053 +    }
29054 +
29055 +    case ELAN4IO_BLOCK_INPUTTER:
29056 +       user_block_inputter (uctx, arg);
29057 +       return (0);
29058 +
29059 +    case ELAN4IO_ADD_P2PVP:
29060 +    {
29061 +       ELAN4IO_ADD_P2PVP_STRUCT *args;
29062 +       
29063 +       KMEM_ALLOC (args, ELAN4IO_ADD_P2PVP_STRUCT *, sizeof (ELAN4IO_ADD_P2PVP_STRUCT), 1);
29064 +       if ((args == NULL))
29065 +           return (-ENOMEM);
29066 +
29067 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_ADD_P2PVP_STRUCT)))
29068 +           res = -EFAULT;
29069 +       else 
29070 +           res = user_add_p2pvp (uctx, args->vp_process, &args->vp_capability);
29071 +       
29072 +       KMEM_FREE(args, sizeof(*args));
29073 +       return (res);
29074 +    }
29075 +
29076 +    case ELAN4IO_ADD_BCASTVP:
29077 +    {
29078 +       ELAN4IO_ADD_BCASTVP_STRUCT args;
29079 +
29080 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ADD_BCASTVP_STRUCT)))
29081 +           return (-EFAULT);
29082 +
29083 +       return (user_add_bcastvp (uctx, args.vp_process, args.vp_lowvp, args.vp_highvp));
29084 +    }
29085 +
29086 +    case ELAN4IO_REMOVEVP:
29087 +       return (user_removevp (uctx, arg));
29088 +
29089 +    case ELAN4IO_SET_ROUTE:
29090 +    {
29091 +       ELAN4IO_ROUTE_STRUCT args;
29092 +       
29093 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29094 +           return (-EFAULT);
29095 +
29096 +       return (user_set_route (uctx, args.rt_process, &args.rt_route));
29097 +    }
29098 +
29099 +    case ELAN4IO_RESET_ROUTE:
29100 +    {
29101 +       ELAN4IO_ROUTE_STRUCT args;
29102 +       
29103 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29104 +           return (-EFAULT);
29105 +
29106 +       return (user_reset_route (uctx, args.rt_process));
29107 +    }
29108 +
29109 +    case ELAN4IO_GET_ROUTE:
29110 +    {
29111 +       ELAN4IO_ROUTE_STRUCT args;
29112 +       
29113 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29114 +           return (-EFAULT);
29115 +
29116 +       if ((res = user_get_route (uctx, args.rt_process, &args.rt_route)) == 0 &&
29117 +           copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT)))
29118 +           res = -EFAULT;
29119 +
29120 +       return (res);
29121 +    }
29122 +
29123 +    case ELAN4IO_CHECK_ROUTE:
29124 +    {
29125 +       ELAN4IO_ROUTE_STRUCT args;
29126 +       
29127 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29128 +           return (-EFAULT);
29129 +
29130 +       if ((res = user_check_route (uctx, args.rt_process, &args.rt_route, &args.rt_error)) == 0 &&
29131 +           copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT)))
29132 +           res = -EFAULT;
29133 +
29134 +       return (res);
29135 +    }
29136 +       
29137 +    case ELAN4IO_ALLOCCQ:
29138 +    {
29139 +       ELAN4IO_ALLOCCQ_STRUCT args;
29140 +       USER_CQ              *ucq;
29141 +
29142 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOCCQ_STRUCT)))
29143 +           return (-EFAULT);
29144 +       
29145 +       ucq = user_alloccq (uctx, args.cq_size & CQ_SizeMask, args.cq_perm & CQ_PermissionMask,
29146 +                           (args.cq_type & ELAN4IO_CQ_TYPE_REORDER) ? UCQ_REORDER : 0);
29147 +       if (IS_ERR (ucq))
29148 +           return PTR_ERR (ucq);
29149 +       
29150 +       args.cq_indx = elan4_cq2idx (ucq->ucq_cq);
29151 +       
29152 +       if (copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ALLOCCQ_STRUCT)))
29153 +       {
29154 +           user_dropcq (uctx, ucq);
29155 +           return (-EFAULT);
29156 +       }
29157 +       
29158 +       /* don't drop the reference on the cq until the context is freed,
29159 +        * or the caller explicitly frees the cq */
29160 +       return (0);
29161 +    }
29162 +       
29163 +    case ELAN4IO_FREECQ:
29164 +    {
29165 +       USER_CQ *ucq;
29166 +       unsigned indx;
29167 +
29168 +       if (copy_from_user (&indx, (void *) arg, sizeof (unsigned)))
29169 +           return (-EFAULT);
29170 +
29171 +       if ((ucq = user_findcq (uctx, indx)) == NULL)           /* can't free unallocated cq */
29172 +           return (-EINVAL);
29173 +       
29174 +       user_dropcq (uctx, ucq);                                /* drop the reference we've just taken */
29175 +
29176 +       if ((ucq->ucq_flags & UCQ_SYSTEM))                      /* can't free device driver cq */
29177 +           return (-EINVAL);
29178 +
29179 +       user_dropcq (uctx, ucq);                                /* and the one held from the alloccq call */
29180 +
29181 +       return (0);
29182 +    }
29183 +
29184 +    case ELAN4IO_DUMPCQ:
29185 +    {
29186 +       ELAN4IO_DUMPCQ_STRUCT args;
29187 +       ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
29188 +       USER_CQ *ucq;
29189 +       void *buf;
29190 +       int i;
29191 +       
29192 +       if (copy_from_user (&args, (void *) arg, sizeof(ELAN4IO_DUMPCQ_STRUCT)))
29193 +           return (-EFAULT);
29194 +
29195 +       if ((ucq = user_findcq (uctx, args.cq_indx)) == NULL)
29196 +           return (-EINVAL);
29197 +
29198 +       if (args.bufsize)
29199 +       {
29200 +           E4_uint32 usedBufSize = min(args.cq_size, args.bufsize);
29201 +
29202 +           KMEM_ALLOC (buf, void *, usedBufSize, 1);
29203 +
29204 +           if (buf == NULL)
29205 +               return (-ENOMEM);
29206 +
29207 +           for (i=0; i<usedBufSize; i+=sizeof(int))
29208 +               ((int *)buf)[i/sizeof(int)] = elan4_sdram_readl(dev, ucq->ucq_cq->cq_space + i);
29209 +
29210 +           if (copy_to_user((void *)args.buffer, buf, usedBufSize))
29211 +           {
29212 +               KMEM_FREE(buf, args.bufsize);
29213 +               return (-EFAULT);
29214 +           }
29215 +           KMEM_FREE(buf, usedBufSize);
29216 +           args.bufsize = usedBufSize;
29217 +       }
29218 +
29219 +       args.cq_size = CQ_Size(ucq->ucq_cq->cq_size);
29220 +       args.cq_space = ucq->ucq_cq->cq_space;
29221 +
29222 +
29223 +       if (copy_to_user((void *)arg, &args, sizeof(ELAN4IO_DUMPCQ_STRUCT)))
29224 +       {
29225 +           return (-EFAULT);
29226 +       }
29227 +       
29228 +       user_dropcq (uctx, ucq); /* drop the reference we've just taken */
29229 +
29230 +       return (0);
29231 +    }
29232 +
29233 +    case ELAN4IO_SETPERM:
29234 +    {
29235 +       ELAN4IO_PERM_STRUCT args;
29236 +       
29237 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT)))
29238 +           return (-EFAULT);
29239 +
29240 +       return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm));
29241 +    }
29242 +
29243 +    case ELAN4IO_CLRPERM:
29244 +    {
29245 +       ELAN4IO_PERM_STRUCT args;
29246 +
29247 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT)))
29248 +           return (-EFAULT);
29249 +
29250 +       user_clrperm (uctx, args.ps_eaddr, args.ps_len);
29251 +       return (0);
29252 +    }
29253 +    
29254 +    case ELAN4IO_TRAPSIG:
29255 +    {
29256 +       ELAN4IO_TRAPSIG_STRUCT args;
29257 +
29258 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPSIG_STRUCT)))
29259 +           return (-EFAULT);
29260 +
29261 +       pr->pr_uctx->uctx_trap_pid   = current->pid;
29262 +       pr->pr_uctx->uctx_trap_signo = args.ts_signo;
29263 +       
29264 +       return (0);
29265 +    }
29266 +    
29267 +    case ELAN4IO_TRAPHANDLER:
29268 +    {
29269 +       ELAN4IO_TRAPHANDLER_STRUCT args;
29270 +
29271 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT)))
29272 +           return (-EFAULT);
29273 +
29274 +       return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)args.th_trapp, args.th_nticks));
29275 +    }
29276 +
29277 +    case ELAN4IO_REQUIRED_MAPPINGS:
29278 +    {
29279 +       ELAN4IO_REQUIRED_MAPPINGS_STRUCT args;
29280 +       
29281 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_REQUIRED_MAPPINGS_STRUCT)))
29282 +           return (-EFAULT);
29283 +
29284 +       pr->pr_uctx->uctx_upage_addr    = args.rm_upage_addr;
29285 +       pr->pr_uctx->uctx_trestart_addr = args.rm_trestart_addr;
29286 +
29287 +       return (0);
29288 +    }
29289 +
29290 +    case ELAN4IO_ALLOC_TRAP_QUEUES:
29291 +    {
29292 +       ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT args;
29293 +
29294 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT)))
29295 +           return (-EFAULT);
29296 +
29297 +       return (user_alloc_trap_queues (uctx, args.tq_ndproc_traps, args.tq_neproc_traps, 
29298 +                                       args.tq_ntproc_traps, args.tq_nthreads, args.tq_ndmas));
29299 +    }
29300 +
29301 +    case ELAN4IO_RESUME_EPROC_TRAP:
29302 +    {
29303 +       ELAN4IO_RESUME_EPROC_TRAP_STRUCT args;
29304 +       
29305 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_EPROC_TRAP_STRUCT)))
29306 +           return (-EFAULT);
29307 +
29308 +       return (user_resume_eproc_trap (pr->pr_uctx, args.rs_addr));
29309 +    }
29310 +
29311 +    case ELAN4IO_RESUME_CPROC_TRAP:
29312 +    {
29313 +       ELAN4IO_RESUME_CPROC_TRAP_STRUCT args;
29314 +       
29315 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_CPROC_TRAP_STRUCT)))
29316 +           return (-EFAULT);
29317 +
29318 +       return (user_resume_cproc_trap (pr->pr_uctx, args.rs_indx));
29319 +    }
29320 +
29321 +    case ELAN4IO_RESUME_DPROC_TRAP:
29322 +    {
29323 +       ELAN4IO_RESUME_DPROC_TRAP_STRUCT args;
29324 +       
29325 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_DPROC_TRAP_STRUCT)))
29326 +           return (-EFAULT);
29327 +
29328 +       return (user_resume_dproc_trap (pr->pr_uctx, &args.rs_desc));
29329 +    }
29330 +
29331 +    case ELAN4IO_RESUME_TPROC_TRAP:
29332 +    {
29333 +       ELAN4IO_RESUME_TPROC_TRAP_STRUCT args;
29334 +       
29335 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_TPROC_TRAP_STRUCT)))
29336 +           return (-EFAULT);
29337 +
29338 +       return (user_resume_tproc_trap (pr->pr_uctx, &args.rs_regs));
29339 +    }
29340 +
29341 +    case ELAN4IO_RESUME_IPROC_TRAP:
29342 +    {
29343 +       ELAN4IO_RESUME_IPROC_TRAP_STRUCT args;
29344 +       
29345 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_IPROC_TRAP_STRUCT)))
29346 +           return (-EFAULT);
29347 +
29348 +       return (user_resume_iproc_trap (pr->pr_uctx, args.rs_channel, args.rs_trans, 
29349 +                                       &args.rs_header, &args.rs_data));
29350 +    }
29351 +
29352 +    case ELAN4IO_FLUSH_ICACHE:
29353 +       elan4_flush_icache (&uctx->uctx_ctxt);
29354 +       return (0);
29355 +
29356 +    case ELAN4IO_STOP_CTXT:
29357 +       if (arg)
29358 +           user_swapout (uctx, UCTX_USER_STOPPED);
29359 +       else
29360 +           user_swapin (uctx, UCTX_USER_STOPPED);
29361 +       return (0);
29362 +
29363 +    case ELAN4IO_ALLOC_INTCOOKIE_TABLE:
29364 +    {
29365 +       ELAN_CAPABILITY *cap;
29366 +       INTCOOKIE_TABLE *tbl;
29367 +
29368 +       KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
29369 +       if ((cap == NULL))
29370 +           return (-ENOMEM);
29371 +
29372 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
29373 +           res = -EFAULT;
29374 +       else
29375 +       {
29376 +           tbl = intcookie_alloc_table(cap);
29377 +
29378 +           if (tbl == NULL)
29379 +               res = -ENOMEM;
29380 +           else
29381 +           {
29382 +               /* Install the intcookie table we've just created */
29383 +               spin_lock (&uctx->uctx_spinlock);
29384 +               if (uctx->uctx_intcookie_table != NULL)
29385 +                   res = -EBUSY;
29386 +               else
29387 +                   uctx->uctx_intcookie_table = tbl;
29388 +               spin_unlock (&uctx->uctx_spinlock);
29389 +               
29390 +               /* drop the table we created if there already was one */
29391 +               if (res != 0)
29392 +                   intcookie_free_table (tbl);
29393 +           }
29394 +       }
29395 +
29396 +       KMEM_FREE(cap, sizeof(*cap));
29397 +
29398 +       return (res);
29399 +    }
29400 +
29401 +    case ELAN4IO_FREE_INTCOOKIE_TABLE:
29402 +    {
29403 +       INTCOOKIE_TABLE *tbl;
29404 +
29405 +       spin_lock (&uctx->uctx_spinlock);
29406 +       tbl = uctx->uctx_intcookie_table;
29407 +       uctx->uctx_intcookie_table = NULL;
29408 +       spin_unlock (&uctx->uctx_spinlock);
29409 +
29410 +       if (tbl != NULL)
29411 +           intcookie_free_table (tbl);
29412 +
29413 +       return (tbl == NULL ? -EINVAL : 0);
29414 +    }
29415 +
29416 +    case ELAN4IO_ALLOC_INTCOOKIE:
29417 +    {
29418 +       /* For backwards compatibility with the old libs (pre 1.8.0)
29419 +        * we allocate an intcookie table on the first cookie
29420 +        * alloc if one hasn't be created already
29421 +        */
29422 +       if (uctx->uctx_intcookie_table == NULL)
29423 +       {
29424 +           ELAN_CAPABILITY *cap;
29425 +           INTCOOKIE_TABLE *tbl;
29426 +           
29427 +           KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
29428 +           if ((cap == NULL))
29429 +               return (-ENOMEM);
29430 +
29431 +           /* Create a dummy capability */
29432 +           elan_nullcap(cap);
29433 +
29434 +           /* Must be unique for each process on a node */
29435 +           cap->cap_mycontext = (int) ELAN4_TASK_HANDLE();
29436 +
29437 +           /* Create a new intcookie table */
29438 +           tbl = intcookie_alloc_table(cap);
29439 +
29440 +           /* Hang intcookie table off uctx */
29441 +           spin_lock (&uctx->uctx_spinlock);
29442 +           if (uctx->uctx_intcookie_table == NULL)
29443 +           {
29444 +               uctx->uctx_intcookie_table = tbl;
29445 +               spin_unlock (&uctx->uctx_spinlock);
29446 +           }
29447 +           else
29448 +           {
29449 +               spin_unlock (&uctx->uctx_spinlock);
29450 +               intcookie_free_table(tbl);
29451 +           }
29452 +
29453 +           KMEM_FREE(cap, sizeof(*cap));
29454 +       }
29455 +       
29456 +       return (intcookie_alloc (uctx->uctx_intcookie_table, arg));
29457 +    }
29458 +
29459 +    case ELAN4IO_FREE_INTCOOKIE:
29460 +       if (uctx->uctx_intcookie_table == NULL)
29461 +           return -EINVAL;
29462 +       else
29463 +           return (intcookie_free (uctx->uctx_intcookie_table, arg));
29464 +
29465 +    case ELAN4IO_ARM_INTCOOKIE:
29466 +       if (uctx->uctx_intcookie_table == NULL)
29467 +           return -EINVAL;
29468 +       else
29469 +           return (intcookie_arm (uctx->uctx_intcookie_table, arg));
29470 +
29471 +    case ELAN4IO_WAIT_INTCOOKIE:
29472 +       if (uctx->uctx_intcookie_table == NULL)
29473 +           return -EINVAL;
29474 +       else
29475 +           return (intcookie_wait (uctx->uctx_intcookie_table, arg));
29476 +
29477 +    case ELAN4IO_FIRE_INTCOOKIE:
29478 +    {
29479 +       ELAN4IO_FIRECAP_STRUCT *args;
29480 +
29481 +       KMEM_ALLOC (args, ELAN4IO_FIRECAP_STRUCT *, sizeof (ELAN4IO_FIRECAP_STRUCT), 1);
29482 +       if ((args == NULL))
29483 +           return (-ENOMEM);
29484 +
29485 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_FIRECAP_STRUCT)))
29486 +           res = -EFAULT;
29487 +       else
29488 +           res = intcookie_fire_cap (&args->fc_capability, args->fc_cookie);
29489 +       
29490 +       KMEM_FREE(args, sizeof(*args));
29491 +
29492 +       return (res);
29493 +    }
29494 +
29495 +    case ELAN4IO_NETERR_MSG:
29496 +    {
29497 +       ELAN4IO_NETERR_MSG_STRUCT args;
29498 +       
29499 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_MSG_STRUCT)))
29500 +           return (-EFAULT);
29501 +
29502 +       return (user_send_neterr_msg (uctx, args.nm_vp, args.nm_nctx, args.nm_retries, &args.nm_msg));
29503 +    }
29504 +
29505 +    case ELAN4IO_NETERR_TIMER:
29506 +    {
29507 +       unsigned long ticks = ((unsigned long) arg * HZ) / 1000;
29508 +
29509 +       PRINTF (uctx, DBG_NETERR, "elan4_neterr_timer: arg %ld inc %ld\n", arg, ticks);
29510 +
29511 +       mod_timer (&uctx->uctx_neterr_timer, (jiffies + (ticks > 0 ? ticks : 1)));
29512 +       return 0;
29513 +    }
29514 +               
29515 +    case ELAN4IO_NETERR_FIXUP:
29516 +    {
29517 +       ELAN4IO_NETERR_FIXUP_STRUCT args;
29518 +
29519 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_FIXUP_STRUCT)))
29520 +           return (-EFAULT);
29521 +
29522 +       if (args.nf_sten)
29523 +           return (user_neterr_sten (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop));
29524 +       else
29525 +           return (user_neterr_dma  (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop));
29526 +    }
29527 +    default:
29528 +       PRINTF (uctx, DBG_FILE, "user_ioctl: invalid ioctl %x\n", cmd);
29529 +       return (-EINVAL);
29530 +    }
29531 +}
29532 +
29533 +static void
29534 +user_vma_open (struct vm_area_struct *vma)
29535 +{
29536 +    USER_PRIVATE *pr   = (USER_PRIVATE *) vma->vm_private_data;
29537 +    USER_CTXT    *uctx = pr->pr_uctx;
29538 +    unsigned long addr;
29539 +    unsigned long pgoff;
29540 +
29541 +    PRINTF (uctx, DBG_FILE, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
29542 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
29543 +
29544 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
29545 +       elan4_getcqa (&uctx->uctx_ctxt, pgoff);
29546 +}
29547 +
29548 +static void 
29549 +user_vma_close (struct vm_area_struct *vma)
29550 +{
29551 +    USER_PRIVATE *pr   = (USER_PRIVATE *) vma->vm_private_data;
29552 +    USER_CTXT    *uctx = pr->pr_uctx;
29553 +    unsigned long addr;
29554 +    unsigned long pgoff;
29555 +
29556 +    PRINTF (uctx, DBG_FILE, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
29557 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
29558 +
29559 +    /* NOTE: the same comments apply as mem_vma_close */
29560 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
29561 +       if (elan4_getcqa (&uctx->uctx_ctxt, pgoff) != NULL)
29562 +       {
29563 +           elan4_putcqa (&uctx->uctx_ctxt, pgoff);                     /* drop the reference we've just taken */
29564 +           elan4_putcqa (&uctx->uctx_ctxt, pgoff);                     /* and the one held by the mmap */
29565 +       }
29566 +}
29567 +
29568 +struct vm_operations_struct user_vm_ops = {
29569 +    open:              user_vma_open,
29570 +    close:             user_vma_close,
29571 +};
29572 +
29573 +static int
29574 +user_mmap (struct file *file, struct vm_area_struct *vma)
29575 +{
29576 +    USER_PRIVATE *pr    = (USER_PRIVATE *) file->private_data;
29577 +    USER_CTXT    *uctx  = pr->pr_uctx;
29578 +    ELAN4_DEV     *dev   = uctx->uctx_ctxt.ctxt_dev;
29579 +    ELAN4_CQA     *cqa;
29580 +    unsigned long addr;
29581 +    unsigned long pgoff;
29582 +    int           res;
29583 +    ioaddr_t      ioaddr;
29584 +    
29585 +    /* Don't allow these pages to be swapped out of dumped */
29586 +    vma->vm_flags |= (VM_RESERVED | VM_IO);
29587 +
29588 +    vma->vm_ops          = &user_vm_ops;
29589 +    vma->vm_file         = file;
29590 +    vma->vm_private_data = (void *) pr;
29591 +    
29592 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
29593 +    {
29594 +       switch (pgoff)
29595 +       {
29596 +       default:
29597 +           PRINTF (uctx, DBG_FILE, "user_mmap: command queue %ld mapping at %lx\n",  pgoff, addr);
29598 +           
29599 +           if ((cqa = elan4_getcqa (&uctx->uctx_ctxt, pgoff)) == NULL)
29600 +           {
29601 +               res = -EINVAL;
29602 +               goto failed;
29603 +           }
29604 +
29605 +           PRINTF (uctx, DBG_FILE, "user_mmap: cqa=%p idx=%d num=%d ref=%d\n", cqa, cqa->cqa_idx, cqa->cqa_cqnum, cqa->cqa_ref);
29606 +    
29607 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
29608 +
29609 +           if (! (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE) && (cqa->cqa_type & CQ_Reorder) != 0)
29610 +               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
29611 +
29612 +           PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (%lx, %lx, %lx, %lx)\n",
29613 +                   addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
29614 +                   (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize, PAGE_SIZE,
29615 +                   vma->vm_page_prot);
29616 +
29617 +           if (__io_remap_page_range (addr, 
29618 +                                      pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
29619 +                                      (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize,
29620 +                                      PAGE_SIZE, vma->vm_page_prot))
29621 +           {
29622 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range failed\n");
29623 +
29624 +               elan4_putcqa (&uctx->uctx_ctxt, pgoff);
29625 +               res = -ENOMEM;
29626 +               goto failed;
29627 +           }
29628 +           break;
29629 +           
29630 +       case ELAN4_OFF_USER_REGS:
29631 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
29632 +
29633 +           switch (dev->dev_devinfo.dev_revision_id)
29634 +           {
29635 +           case PCI_REVISION_ID_ELAN4_REVA:
29636 +               ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVA_REG_OFFSET + offsetof(E4_Registers, uRegs);
29637 +               break;
29638 +               
29639 +           case PCI_REVISION_ID_ELAN4_REVB:
29640 +               ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVB_REG_OFFSET + offsetof(E4_Registers, uRegs);
29641 +               break;
29642 +
29643 +           default:
29644 +               res = -EINVAL;
29645 +               goto failed;
29646 +           }
29647 +
29648 +           PRINTF (uctx, DBG_FILE, "user_mmap: user_regs at %lx ioaddr %lx prot %lx\n",
29649 +                   addr, ioaddr, vma->vm_page_prot.pgprot);
29650 +
29651 +           if (__io_remap_page_range (addr,  (ioaddr & PAGEMASK), PAGE_SIZE, vma->vm_page_prot))
29652 +           {                     
29653 +               res = -EAGAIN;
29654 +               goto failed;
29655 +           }
29656 +
29657 +           break;
29658 +           
29659 +       case ELAN4_OFF_USER_PAGE:
29660 +           PRINTF (uctx, DBG_FILE, "user_mmap: shared user page - kaddr=%lx uaddr=%lx phys=%lx\n", 
29661 +                   uctx->uctx_upage, addr, kmem_to_phys (uctx->uctx_upage));
29662 +
29663 +           /* we do not want to have this area swapped out, lock it */
29664 +           vma->vm_flags |= VM_LOCKED;
29665 +           
29666 +           /* Mark the page as reserved or else the remap_page_range() doesn't remap it */
29667 +           SetPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)));
29668 +       
29669 +           if (__remap_page_range (addr, kmem_to_phys (uctx->uctx_upage), PAGE_SIZE, vma->vm_page_prot))
29670 +           {
29671 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (user_page) failed\n");
29672 +               res = -ENOMEM;
29673 +               goto failed;
29674 +           }
29675 +           break;
29676 +           
29677 +       case ELAN4_OFF_TPROC_TRAMPOLINE:
29678 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
29679 +
29680 +           PRINTF (uctx, DBG_FILE, "user_mmap: tproc trampoline - kaddr=%lx uaddr=%lx phys=%lx\n", uctx->uctx_trampoline, addr, 
29681 +                   pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)));
29682 +
29683 +           if (__io_remap_page_range (addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + 
29684 +                                      uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)),
29685 +                                      PAGE_SIZE, vma->vm_page_prot))
29686 +           {
29687 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (tproc_trampoline) failed\n");
29688 +               res = -ENOMEM;
29689 +               goto failed;
29690 +           }
29691 +           break;
29692 +           
29693 +       case ELAN4_OFF_DEVICE_STATS:
29694 +           printk ("user_mmap: device_stats\n");
29695 +           break;
29696 +       }
29697 +       
29698 +    }
29699 +
29700 +    return (0);
29701 +
29702 + failed:
29703 +    for (addr -= PAGE_SIZE, pgoff--; addr >= vma->vm_start; addr -= PAGE_SIZE, pgoff--)
29704 +       elan4_putcqa (&uctx->uctx_ctxt, pgoff);         /* drop the reference we've just taken */
29705 +    return (res);
29706 +}
29707 +
29708 +/* driver entry points */
29709 +static int
29710 +elan4_open (struct inode *inode, struct file *file)
29711 +{
29712 +    PRINTF (DBG_USER, DBG_FILE, "elan4_open: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file);
29713 +    
29714 +    switch (ELAN4_MINOR (inode))
29715 +    {
29716 +    case ELAN4_MINOR_CONTROL:
29717 +       return (control_open (inode, file));
29718 +    case ELAN4_MINOR_MEM:
29719 +       return (mem_open (inode, file));
29720 +    case ELAN4_MINOR_USER:
29721 +       return (user_open (inode, file));
29722 +    default:
29723 +       return (-ENXIO);
29724 +    }
29725 +}
29726 +
29727 +static int
29728 +elan4_release (struct inode *inode, struct file *file)
29729 +{
29730 +    PRINTF (DBG_USER, DBG_FILE, "elan4_release: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file);
29731 +    
29732 +    switch (ELAN4_MINOR (inode))
29733 +    {
29734 +    case ELAN4_MINOR_CONTROL:
29735 +       return (control_release (inode, file));
29736 +    case ELAN4_MINOR_MEM:
29737 +       return (mem_release (inode, file));
29738 +    case ELAN4_MINOR_USER:
29739 +       return (user_release (inode, file));
29740 +    default:
29741 +       return (-ENXIO);
29742 +    }
29743 +}
29744 +
29745 +static int
29746 +elan4_ioctl (struct inode *inode, struct file *file, 
29747 +            unsigned int cmd, unsigned long arg)
29748 +{
29749 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd);
29750 +    
29751 +    switch (ELAN4_MINOR (inode))
29752 +    {
29753 +    case ELAN4_MINOR_CONTROL:
29754 +       return (control_ioctl (inode, file, cmd, arg));
29755 +    case ELAN4_MINOR_MEM:
29756 +       return (mem_ioctl (inode, file, cmd, arg));
29757 +    case ELAN4_MINOR_USER:
29758 +       return (user_ioctl (inode, file, cmd, arg));
29759 +    default:
29760 +       return (-ENXIO);
29761 +    }
29762 +}
29763 +
29764 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
29765 +static int
29766 +elan4_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
29767 +{
29768 +    struct inode *inode = file->f_dentry->d_inode;
29769 +    extern int sys_ioctl (unsigned int fd, unsigned int cmd, unsigned long arg);
29770 +
29771 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd);
29772 +    
29773 +    if (ELAN4_MINOR (inode) == ELAN4_MINOR_USER)
29774 +    {
29775 +       USER_PRIVATE *pr    = (USER_PRIVATE *) file->private_data;
29776 +       USER_CTXT    *uctx  = pr->pr_uctx;
29777 +
29778 +       if (current->mm != pr->pr_mm)
29779 +           return -EINVAL;
29780 +       
29781 +       switch (cmd)
29782 +       {
29783 +       case ELAN4IO_SETPERM32:
29784 +       {
29785 +           ELAN4IO_PERM_STRUCT32 args;
29786 +           
29787 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32)))
29788 +               return (-EFAULT);
29789 +           
29790 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: setperm maddr=%x eaddr=%llx len=%llxx perm=%d\n",
29791 +                   args.ps_maddr, args.ps_eaddr,args.ps_len, args.ps_perm);
29792 +
29793 +           return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm));
29794 +       }
29795 +       
29796 +       case ELAN4IO_CLRPERM32:
29797 +       {
29798 +           ELAN4IO_PERM_STRUCT32 args;
29799 +           
29800 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32)))
29801 +               return (-EFAULT);
29802 +           
29803 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: clrperm eaddr=%llx len=%ll\n",
29804 +                   args.ps_eaddr, args.ps_len);
29805 +
29806 +           user_clrperm (uctx, args.ps_eaddr, args.ps_len);
29807 +           return (0);
29808 +       }
29809 +    
29810 +       case ELAN4IO_TRAPHANDLER32:
29811 +       {
29812 +           ELAN4IO_TRAPHANDLER_STRUCT32 args;
29813 +           
29814 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT32)))
29815 +               return (-EFAULT);
29816 +           
29817 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: traphandler trapp=%x nticks=%d\n",
29818 +                   args.th_trapp, args.th_nticks);
29819 +
29820 +           return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)(unsigned long)args.th_trapp, args.th_nticks));
29821 +       }
29822 +       }
29823 +    }
29824 +
29825 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: fd=%d cmd=%x arg=%lx file=%p\n", fd, cmd, arg, file);
29826 +    return (sys_ioctl (fd, cmd, arg));
29827 +}
29828 +#endif
29829 +
29830 +
29831 +
29832 +static int
29833 +elan4_mmap (struct file *file, struct vm_area_struct *vma)
29834 +{
29835 +    PRINTF (DBG_USER, DBG_FILE, "elan4_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx\n", 
29836 +           ELAN4_DEVICE (file->f_dentry->d_inode), ELAN4_MINOR (file->f_dentry->d_inode),
29837 +           vma->vm_start, vma->vm_end, vma->vm_pgoff);
29838 +
29839 +    switch (ELAN4_MINOR (file->f_dentry->d_inode))
29840 +    {
29841 +    case ELAN4_MINOR_CONTROL:
29842 +       return (control_mmap (file, vma));
29843 +    case ELAN4_MINOR_MEM:
29844 +       return (mem_mmap (file, vma));
29845 +    case ELAN4_MINOR_USER:
29846 +       return (user_mmap (file, vma));
29847 +    default:
29848 +       return (-ENXIO);
29849 +    }
29850 +}
29851 +
29852 +void
29853 +elan4_update_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *bridge)
29854 +{
29855 +    u16 cnf;
29856 +    
29857 +    pci_read_config_word (bridge, 0x40 /* CNF */, &cnf);
29858 +    
29859 +    /* We expect the CNF register to be configured as follows
29860 +     *
29861 +     * [8]   == 1      PMODE PCI Mode
29862 +     * [7:6] == 2/3    PFREQ PCI Frequency (100/133)
29863 +     * [5]   == 0      RSDIS Restreaming Disable
29864 +     * [4:3] == 0x     PP    Prefetch Policy
29865 +     * [2]   == 0       DTD   Delayed Transaction Depth
29866 +     * [1:0] == 10      MDT   MaximumDelaedTransactions
29867 +     */
29868 +    
29869 +    if ((cnf & (1 << 8)) == 0)
29870 +       printk ("elan%d: strangeness - elan reports PCI-X but P64H2 reports PCI mode !\n", dev->dev_instance);
29871 +    else if ((cnf & 0xb7) != 0x82 && (cnf & 0xb7) != 0x84 && optimise_pci_bus < 2)
29872 +       printk ("elan%d: P64H2 CNF is not configured as expected : RSDIS=%d PP=%d DTD=%d MDT=%d\n",
29873 +               dev->dev_instance, (cnf >> 5) & 1, (cnf >> 3) & 3, (cnf >> 2) & 1, cnf & 3);
29874 +    else
29875 +    {
29876 +       switch ((cnf >> 6) & 3)
29877 +       {
29878 +       case 2:                                         /* PCI-X 100 */
29879 +           pci_write_config_word (bridge, 0xfc /* PC100 */, 0x7777);
29880 +           
29881 +           printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 100\n", dev->dev_instance);
29882 +           
29883 +           break;
29884 +           
29885 +       case 3:                                         /* PCI-X 133 */
29886 +           pci_write_config_word (bridge, 0xfe /* PC133 */, 0x7777);
29887 +           
29888 +           printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 133\n", dev->dev_instance);
29889 +           break;
29890 +       }
29891 +       
29892 +       pci_write_config_word (bridge, 0x40 /* CNF */, (cnf & 0xfff8) | 0x4);   /* DTD=1 MDT=0 */
29893 +    }
29894 +}
29895 +
29896 +int
29897 +elan4_optimise_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *pdev)
29898 +{
29899 +    struct pci_bus   *bus      = pdev->bus;
29900 +    struct pci_dev   *bridge   = bus->self;
29901 +    unsigned int      devcount = 0;
29902 +    u8                revision;
29903 +    u32               ectrl;
29904 +    struct list_head *el;
29905 +    
29906 +    pci_read_config_dword (pdev, PCI_ELAN_CONTROL, &ectrl);
29907 +
29908 +    /* We can only run in PCI-Xmode with a B1 stepping P64H2 because of P64H2 Errata 3 */
29909 +    pci_read_config_byte (bridge, PCI_REVISION_ID, &revision);
29910 +    if (revision < 0x04)
29911 +    {
29912 +       if ((ectrl & ECTRL_INITIALISATION_MODE) != Pci2_2)
29913 +       {
29914 +           static const char *p64h2_stepping[4] = {"UNKNOWN", "UNKNOWN", "UNKNOWN", "B0"};
29915 +
29916 +           printk ("elan%d: unable to use device because of P64H2 Errata 3 on\n"
29917 +                   "       %s stepping part and running in a PCI-X slot\n", 
29918 +                   dev->dev_instance, p64h2_stepping[revision]);
29919 +           return -EINVAL;
29920 +       }
29921 +    }
29922 +    
29923 +    /* We can only alter the bus configuration registers if the Elan is the only device
29924 +     * on the bus ... */
29925 +    list_for_each (el, &bus->devices) {
29926 +       struct pci_dev *pcip = list_entry (el, struct pci_dev, bus_list);
29927 +
29928 +       if (pcip == pdev || (pcip->vendor == PCI_VENDOR_ID_INTEL && pcip->device == 0x1462 /* P64H2 HOTPLUG */))
29929 +           continue;
29930 +           
29931 +       devcount++;
29932 +    }
29933 +
29934 +    if (devcount > 0 || !list_empty (&bus->children))
29935 +    {
29936 +       printk ("elan%d: unable to optimise P64H2 settings as %s%s\n", dev->dev_instance,
29937 +               (devcount > 0) ? "more than one device on bus" :  "",
29938 +               ! list_empty (&bus->children) ? "has child buses" : "");
29939 +       return 0;
29940 +    }
29941 +
29942 +#ifdef __ia64
29943 +    if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz)
29944 +    {
29945 +       struct pci_dev *pcip;
29946 +       unsigned int sioh_good      = 0;
29947 +       unsigned int sioh_downgrade = 0;
29948 +       unsigned int snc_good       = 0;
29949 +       unsigned int snc_downgrade  = 0;
29950 +       
29951 +       /* Search for the associated SIOH and SNC on ia64,
29952 +        * if we have a C2 SIOH and a C0/C1 SNC, then we can
29953 +        * reconfigure the P64H2 as follows:
29954 +        *    CNF:MDT   = 0
29955 +        *    CNF:DTD   = 1
29956 +        *    CNF:PC133 = 7777
29957 +        *
29958 +        * if not, then issue a warning that down rev parts
29959 +        * affect bandwidth.
29960 +        */
29961 +       for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x500, pcip)); )
29962 +       {
29963 +           pci_read_config_byte (pcip, PCI_REVISION_ID, &revision);
29964 +           
29965 +           if (revision >= 0x21)
29966 +               snc_good++;
29967 +           else
29968 +           {
29969 +               printk ("elan%d: SNC revision %x (%s)\n", dev->dev_instance, revision,
29970 +                       revision == 0x00 ? "A0" : revision == 0x01 ? "A1" : 
29971 +                       revision == 0x02 ? "A2" : revision == 0x03 ? "A3" :
29972 +                       revision == 0x10 ? "B0" : revision == 0x20 ? "C0" : 
29973 +                       revision == 0x21 ? "C1" : "UNKNOWN");
29974 +           
29975 +               snc_downgrade++;
29976 +           }
29977 +       }
29978 +
29979 +       for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x510, pcip)) != NULL; )
29980 +       {
29981 +           pci_read_config_byte (pcip, PCI_REVISION_ID, &revision);
29982 +           
29983 +           
29984 +           if (revision >= 0x22)
29985 +               sioh_good++;
29986 +           else
29987 +           {
29988 +               printk ("elan%d: SIOH revsision %x (%s)\n", dev->dev_instance, revision,
29989 +                       revision == 0x10 ? "C0" : revision == 0x20 ? "C0" : 
29990 +                       revision == 0x21 ? "C1" : revision == 0x22 ? "C2" : "UNKNOWN");
29991 +
29992 +               sioh_downgrade++;
29993 +           }
29994 +       }
29995 +
29996 +       if (optimise_pci_bus < 2 && (sioh_downgrade || snc_downgrade))
29997 +           printk ("elan%d: unable to optimise as SNC/SIOH below required C1/C2 steppings\n", dev->dev_instance);
29998 +       else if (optimise_pci_bus < 2 && (sioh_good == 0 || snc_good == 0))
29999 +           printk ("elan%d: unable to optimise as cannot determine SNC/SIOH revision\n", dev->dev_instance);
30000 +       else
30001 +           elan4_update_intel_p64h2 (dev, bridge);
30002 +    }
30003 +#endif
30004 +    
30005 +#ifdef __i386
30006 +    if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz)
30007 +       elan4_update_intel_p64h2 (dev, bridge);
30008 +#endif     
30009 +    return 0;
30010 +}
30011 +
30012 +int
30013 +elan4_optimise_intel_pxh (ELAN4_DEV *dev, struct pci_dev *pdev)
30014 +{
30015 +    dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_64BIT_READ;
30016 +
30017 +    return 0;
30018 +}
30019 +
30020 +void
30021 +elan4_optimise_serverworks_ciobx2 (ELAN4_DEV *dev)
30022 +{
30023 +    struct pci_dev *pdev = dev->dev_osdep.pdev;
30024 +    struct pci_dev *pcip;
30025 +    unsigned char   bus;
30026 +    unsigned int    dor;
30027 +    
30028 +    /* Find the CIOBX2 for our bus number */
30029 +    for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, pcip)) != NULL;)
30030 +    {
30031 +       pci_read_config_byte (pcip, 0x44 /* BUSNUM */, &bus);
30032 +       
30033 +       if (pdev->bus->number == bus)
30034 +       {
30035 +           printk ("elan%d: optimise CIOBX2 : setting DOR to disable read pipe lining\n", dev->dev_instance);
30036 +
30037 +           pci_read_config_dword (pcip, 0x78 /* DOR */, &dor);
30038 +           pci_write_config_dword (pcip, 0x78 /* DOR */, dor | (1 << 16));
30039 +
30040 +           printk ("elan%d: disabling write-combining on ServerWorks chipset\n", dev->dev_instance);
30041 +           dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_NO_WRITE_COMBINE;
30042 +       }
30043 +    }
30044 +}
30045 +
30046 +int
30047 +elan4_optimise_bus (ELAN4_DEV *dev)
30048 +{
30049 +    struct pci_dev *pdev = dev->dev_osdep.pdev;
30050 +
30051 +    if (pdev->bus && pdev->bus->self) 
30052 +    {
30053 +       struct pci_dev *bridge = pdev->bus->self;
30054 +       
30055 +       if (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x1460 /* Intel P64H2 */)
30056 +           return elan4_optimise_intel_p64h2 (dev, pdev);
30057 +
30058 +       if ((bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0329) /* Intel 6700PXH Fn 0 */ ||
30059 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032a) /* Intel 6700PXH Fn 2 */ ||
30060 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032c) /* Intel 6702PXH */ ||
30061 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0320) /* Intel PXH-D */)
30062 +           return elan4_optimise_intel_pxh (dev, pdev);
30063 +    }
30064 +
30065 +    if (pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL) != NULL)              /* on HP ZX1 set the relaxed ordering  */
30066 +       dev->dev_pteval = PTE_RelaxedOrder;                                     /* bit to get better DMA bandwidth. */
30067 +
30068 +    if (pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, NULL) != NULL)     /* ServerWorks CIOBX2 */
30069 +       elan4_optimise_serverworks_ciobx2 (dev);
30070 +
30071 +    return 0;
30072 +}
30073 +
30074 +int
30075 +elan4_pciinit (ELAN4_DEV *dev)
30076 +{
30077 +    int res;
30078 +    u32 value;
30079 +    u16 command;
30080 +    u8 cacheline;
30081 +    unsigned long flags;
30082 +
30083 +    if (optimise_pci_bus && (res = elan4_optimise_bus (dev)) <0)
30084 +       return (res);
30085 +
30086 +    if ((res = pci_enable_device (dev->dev_osdep.pdev)) < 0)
30087 +       return (res);
30088 +
30089 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
30090 +    if ((value & ECTRL_INITIALISATION_MODE) == Pci2_2)
30091 +       printk ("elan%d: is an elan4%c (PCI-2.2)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
30092 +    else
30093 +    {
30094 +       switch (value & ECTRL_INITIALISATION_MODE)
30095 +       {
30096 +       case PciX50To66MHz:
30097 +           printk ("elan%d: is an elan4%c (PCI-X 50-66)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
30098 +           break;
30099 +           
30100 +       case PciX66to100MHz:
30101 +           printk ("elan%d: is an elan4%c (PCI-X 66-100)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
30102 +           break;
30103 +           
30104 +       case PciX100to133MHz:
30105 +           printk ("elan%d: is an elan4%c (PCI-X 100-133)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
30106 +           break;
30107 +           
30108 +       default:
30109 +           printk ("elan%d: Invalid PCI-X mode\n", dev->dev_instance);
30110 +           return (-EINVAL);
30111 +       }
30112 +    }
30113 +
30114 +    /* initialise the elan pll control register */
30115 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value);
30116 +
30117 +    if (elan4_pll_cfg)
30118 +    {
30119 +       printk ("elan%d: setting pll control to %08x\n", dev->dev_instance, elan4_pll_cfg);
30120 +
30121 +       pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, elan4_pll_cfg);
30122 +    }
30123 +    else
30124 +    {
30125 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
30126 +           pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
30127 +                                   (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_4_3);
30128 +       else
30129 +           pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
30130 +                                   (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_6_5 | SysPll_FeedForwardISel0 | SysPll_FeedForwardISel1);
30131 +    }  
30132 +
30133 +    /* initialise the elan control register */
30134 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
30135 +
30136 +    value = ((15 << ECTRL_IPROC_HIGH_PRI_TIME_SHIFT) |
30137 +            (15 << ECTRL_OTHER_HIGH_PRI_TIME_SHIFT) |
30138 +            (value & ECTRL_28_NOT_30_BIT_LOCAL_BAR) |
30139 +            (dev->dev_topaddrmode ? ECTRL_ExtraMasterAddrBits : 0) |
30140 +            ECTRL_ENABLE_LATENCY_RESET | 
30141 +            ECTRL_ENABLE_WRITEBURSTS | 
30142 +            ECTRL_ENABLE_2_2READBURSTS);
30143 +
30144 +#ifdef LINUX_SPARC
30145 +    value &= ~(ECTRL_ENABLE_LATENCY_RESET | ECTRL_ENABLE_WRITEBURSTS);
30146 +#endif
30147 +
30148 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET);
30149 +
30150 +    switch (dev->dev_devinfo.dev_revision_id)
30151 +    {
30152 +    case PCI_REVISION_ID_ELAN4_REVA:
30153 +       /* Delay 10ms here if we've changed the sysclock ratio */
30154 +       /* to allow the PLL to stabalise before proceeding */
30155 +       udelay (10000);
30156 +       break;
30157 +       
30158 +    case PCI_REVISION_ID_ELAN4_REVB:
30159 +    {
30160 +       unsigned char val = read_i2c (dev, I2cLedsValue);
30161 +
30162 +       /* On RevB we have to explicitly reset the PLLs */
30163 +       pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command);
30164 +
30165 +       write_i2c (dev, I2cLedsValue, val | 0x80);
30166 +       udelay (1000);
30167 +
30168 +       /* Issue the PLL counter reset and immediately inhibit all pci interaction 
30169 +        * while the PLL is recovering. The write to the PCI_COMMAND register has 
30170 +        * to occur within 50uS of the write to the i2c registers */
30171 +       local_irq_save (flags);
30172 +       write_i2c (dev, I2cLedsValue, val & ~0x80);
30173 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, (1 << 10) /* PCI_COMMAND_DISABLE_INT */);
30174 +       local_irq_restore (flags);
30175 +
30176 +       /* Wait for the write to occur and for the PLL to regain lock */
30177 +       udelay (20000); udelay (20000);
30178 +
30179 +       /* Re-enable pci interaction and clear any spurious errors deteced */
30180 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_STATUS, PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR);
30181 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command);
30182 +       break;
30183 +    }
30184 +    }
30185 +
30186 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value);
30187 +
30188 +    /* Enable master accesses */
30189 +    pci_set_master (dev->dev_osdep.pdev);
30190 +
30191 +    /* Verify that the memWrInvalidate bit is set */
30192 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command);
30193 +    pci_read_config_byte (dev->dev_osdep.pdev, PCI_CACHE_LINE_SIZE, &cacheline);
30194 +
30195 +    if ((command & PCI_COMMAND_INVALIDATE) == 0)
30196 +    {
30197 +       printk ("elan%d: enable MemWrInvalidate (cacheline %d)\n",
30198 +               dev->dev_instance, cacheline * 4);
30199 +
30200 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command | PCI_COMMAND_INVALIDATE);
30201 +    }
30202 +
30203 +    return (0);
30204 +}
30205 +
30206 +void
30207 +elan4_updatepll (ELAN4_DEV *dev, unsigned int val)
30208 +{
30209 +    u32 value;
30210 +
30211 +    if (elan4_pll_cfg == 0)
30212 +    {
30213 +       pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value);
30214 +
30215 +       pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
30216 +                               (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | val);
30217 +
30218 +       /* Delay 10ms here if we've changed the sysclock ratio */
30219 +       /* to allow the PLL to stabalise before proceeding */
30220 +       udelay (10000);
30221 +    }  
30222 +}
30223 +
30224 +void
30225 +elan4_pcifini (ELAN4_DEV *dev)
30226 +{
30227 +    u32 value;
30228 +
30229 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
30230 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET);
30231 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value);
30232 +
30233 +    pci_disable_device (dev->dev_osdep.pdev);
30234 +}
30235 +
30236 +void
30237 +elan4_pcierror (ELAN4_DEV *dev)
30238 +{
30239 +    struct pci_dev *pci = dev->dev_osdep.pdev;
30240 +    u8  type;
30241 +    u16 status, cmd;
30242 +    u32 physlo, physhi, control;
30243 +    
30244 +    printk("elan%d: pci error has occurred\n", dev->dev_instance);
30245 +
30246 +    pci_read_config_word  (pci, PCI_STATUS,             &status);
30247 +    pci_read_config_word  (pci, PCI_COMMAND,             &cmd);
30248 +    pci_read_config_dword (pci, PCI_ELAN_CONTROL,       &control);
30249 +
30250 +    if (control & ECTRL_REC_SPLIT_COMP_MESSAGE)
30251 +    {
30252 +       u32 message, attr;
30253 +       
30254 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control & ~ECTRL_SELECT_SPLIT_MESS_ATTR);
30255 +       pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &message);
30256 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_SELECT_SPLIT_MESS_ATTR);
30257 +       pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &attr);
30258 +
30259 +       printk ("elan%d: pcierror - received split completion message - attr=%08x, message=%08x\n", 
30260 +               dev->dev_instance, attr, message);
30261 +
30262 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_REC_SPLIT_COMP_MESSAGE); /* clear the error */
30263 +    }
30264 +    else
30265 +    {
30266 +       pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_LO, &physlo);
30267 +       pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_HI, &physhi);
30268 +       pci_read_config_byte  (pci, PCI_ELAN_PARITY_TYPE,    &type);
30269 +       
30270 +       printk ("elan%d: pcierror - status %x cmd %4x physaddr %08x%08x type %x\n", 
30271 +               dev->dev_instance, status, cmd, physhi, physlo, type);
30272 +       
30273 +       if (status & PCI_STATUS_PARITY)
30274 +           printk ("elan%d: parity error signalled (PERR)\n", dev->dev_instance);
30275 +       if (status & PCI_STATUS_DETECTED_PARITY)
30276 +           printk ("elan%d: detected parity error\n", dev->dev_instance);
30277 +       if (status & PCI_STATUS_REC_MASTER_ABORT)
30278 +           printk ("elan%d: received master abort\n", dev->dev_instance);
30279 +       if (status & PCI_STATUS_REC_TARGET_ABORT)
30280 +           printk ("elan%d: received target abort\n", dev->dev_instance);
30281 +       if (status & PCI_STATUS_SIG_SYSTEM_ERROR)
30282 +           printk ("elan%d: signalled SERR\n", dev->dev_instance);
30283 +       if (status & PCI_STATUS_SIG_TARGET_ABORT)
30284 +           printk ("elan%d: signalled target abort\n", dev->dev_instance);
30285 +
30286 +       pci_write_config_word (pci, PCI_STATUS, status);        /* clear the errors */
30287 +    }
30288 +
30289 +    DISABLE_INT_MASK (dev, INT_PciMemErr);
30290 +
30291 +#ifdef notdef
30292 +    panic ("elan%d: pcierror\n", dev->dev_instance);           /* better panic ! */
30293 +#endif
30294 +}
30295 +
30296 +static irqreturn_t
30297 +elan4_irq (int irq, void *arg, struct pt_regs *regs)
30298 +{
30299 +    if (elan4_1msi0 ((ELAN4_DEV *) arg))
30300 +           return IRQ_HANDLED;
30301 +    else
30302 +           return IRQ_NONE;
30303 +}
30304 +
30305 +ioaddr_t
30306 +elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handle)
30307 +{
30308 +    return (ioaddr_t) ioremap_nocache (pci_resource_start (dev->dev_osdep.pdev, bar) + off, size);
30309 +}
30310 +
30311 +void
30312 +elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handle)
30313 +{
30314 +    iounmap ((void *) ptr);
30315 +}
30316 +
30317 +unsigned long
30318 +elan4_resource_len (ELAN4_DEV *dev, unsigned bar)
30319 +{
30320 +    return (pci_resource_len (dev->dev_osdep.pdev, bar));
30321 +}
30322 +
30323 +void
30324 +elan4_configure_writecombining (ELAN4_DEV *dev)
30325 +{
30326 +    if ((dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE))
30327 +       return;
30328 +
30329 +#if (defined(__i386) || defined(__x86_64)) && defined (X86_FEATURE_PAT)
30330 +
30331 +#ifndef boot_cpu_has
30332 +#  define boot_cpu_has(bit)      test_bit(bit, boot_cpu_data.x86_capability)
30333 +#endif
30334 +
30335 +    /* Try to utilise PAT entries which already exist */
30336 +    if (boot_cpu_has (X86_FEATURE_PAT))
30337 +    {
30338 +       unsigned int val0, val1, i;
30339 +       int slot = -1;
30340 +
30341 +       /* Read the IA32CR_PAT MSR register and see if a slot is
30342 +        * set for write-combinig.  Note we assume that all CPUs 
30343 +        * are configured the same like they're supposed to. */
30344 +       rdmsr (0x277, val0, val1);
30345 +       
30346 +       /* Check for PAT write combining entry (value 0x01) */
30347 +       for (i = 0; i < 4; i++, val0 >>= 8)
30348 +           if ((val0 & 0xff) == 0x01)
30349 +               slot = i;
30350 +       for (i = 4; i < 8; i++, val1 >>= 8)
30351 +           if ((val1 & 0xff) == 0x01)
30352 +               slot = i;
30353 +
30354 +       if (slot >= 0)
30355 +       {
30356 +           printk ("elan%d: using PAT for write combining (slot %d)\n", dev->dev_instance, slot);
30357 +
30358 +           pat_pteval = ((slot & 4) ? _PAGE_PSE : 0) | ((slot & 2) ? _PAGE_PCD : 0) | ((slot & 1) ? _PAGE_PWT : 0);
30359 +           return;
30360 +       }
30361 +    }
30362 +#endif
30363 +
30364 +#ifdef CONFIG_MTRR
30365 +    /* try and initialise the MTRR registers to enable write-combining */
30366 +    dev->dev_osdep.sdram_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), 
30367 +                                         pci_resource_len   (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM),
30368 +                                         MTRR_TYPE_WRCOMB, 1);
30369 +    if (dev->dev_osdep.sdram_mtrr < 0)
30370 +       printk ("elan%d: cannot configure MTRR for sdram\n", dev->dev_instance);
30371 +    
30372 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
30373 +    {
30374 +       unsigned int cqreorder = dev->dev_cqcount >> 1;
30375 +       unsigned int cqcount   = dev->dev_cqcount  - cqreorder;
30376 +
30377 +       dev->dev_osdep.regs_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
30378 +                                            (dev->dev_cqoffset + cqreorder) * CQ_CommandMappingSize,
30379 +                                            CQ_CommandMappingSize * cqcount,
30380 +                                            MTRR_TYPE_WRCOMB, 1);
30381 +       
30382 +       if (dev->dev_osdep.regs_mtrr < 0)
30383 +           printk ("elan%d: cannot configure MTRR for command ports\n", dev->dev_instance);
30384 +       else
30385 +           dev->dev_cqreorder = cqreorder;
30386 +    }
30387 +#endif
30388 +}
30389 +
30390 +void
30391 +elan4_unconfigure_writecombining (ELAN4_DEV *dev)
30392 +{
30393 +    if ((dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE))
30394 +       return;
30395 +
30396 +#ifdef CONFIG_MTRR
30397 +    if (pat_pteval == -1)
30398 +    {
30399 +       if (dev->dev_osdep.sdram_mtrr >=0 )
30400 +           mtrr_del (dev->dev_osdep.sdram_mtrr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), 
30401 +                     pci_resource_len   (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
30402 +       
30403 +       if (dev->dev_cqreorder && dev->dev_osdep.regs_mtrr >= 0)
30404 +           mtrr_del (dev->dev_osdep.regs_mtrr, 
30405 +                     pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
30406 +                     (dev->dev_cqoffset + dev->dev_cqreorder) * CQ_CommandMappingSize,
30407 +                     CQ_CommandMappingSize * (dev->dev_cqcount >> 1));
30408 +    }
30409 +#endif
30410 +}
30411 +
30412 +EXPORT_SYMBOL(elan4_reference_device);
30413 +EXPORT_SYMBOL(elan4_dereference_device);
30414 +
30415 +/*
30416 + * Local variables:
30417 + * c-file-style: "stroustrup"
30418 + * End:
30419 + */
30420 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/i2c.c
30421 ===================================================================
30422 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/i2c.c        2004-02-23 16:02:56.000000000 -0500
30423 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/i2c.c     2005-07-28 14:52:52.829681624 -0400
30424 @@ -0,0 +1,248 @@
30425 +/*
30426 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
30427 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
30428 + * 
30429 + *    For licensing information please see the supplied COPYING file
30430 + *
30431 + */
30432 +
30433 +#ident "@(#)$Id: i2c.c,v 1.4 2004/01/07 13:37:45 jon Exp $"
30434 +/*      $Source: /cvs/master/quadrics/elan4mod/i2c.c,v $*/
30435 +#include <qsnet/kernel.h>
30436 +
30437 +#include <elan4/sdram.h>
30438 +#include <elan4/debug.h>
30439 +#include <elan4/device.h>
30440 +#include <elan4/commands.h>
30441 +
30442 +#include <elan4/i2c.h>
30443 +#include <elan4/pci.h>
30444 +#include <elan4/ioctl.h>
30445 +#include <elan4/registers.h>
30446 +
30447 +#define I2C_POLL_LIMIT         8
30448 +
30449 +static int
30450 +i2c_poll_busy (ELAN4_DEV *dev)
30451 +{
30452 +    int t    = 100;
30453 +    int loop = 0;
30454 +    volatile unsigned char val;
30455 +
30456 +    /* wait for any led I2C operation to finish */
30457 +    while (((val = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cPortBusy) && loop++ < I2C_POLL_LIMIT)
30458 +    {
30459 +       DELAY (t);
30460 +       
30461 +       if (t < 500000)
30462 +           t <<= 1;
30463 +    }
30464 +    if (loop >= I2C_POLL_LIMIT)
30465 +    {
30466 +       printk ("elan%d: I2c has timed out waiting for I2cPortBusy to clear!\n", dev->dev_instance);
30467 +       printk ("elan%d: I2cPortControl=%x I2cLedBase=%x I2cStatus=%x\n",
30468 +               dev->dev_instance, val, read_i2c (dev, I2cLedBase), read_i2c (dev, I2cStatus));
30469 +    }
30470 +
30471 +    return val;
30472 +}
30473 +
30474 +static int
30475 +i2c_poll_stopped (ELAN4_DEV *dev)
30476 +{
30477 +    int t    = 100;
30478 +    int loop = 0;
30479 +    unsigned char val=0, newval;
30480 +
30481 +    /* wait for any led I2C operation to finish. Must see it stopped at least twice */
30482 +    while (!(((newval = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cStopped) &&
30483 +             (val & I2cCntl_I2cStopped)) &&
30484 +             (loop++ < I2C_POLL_LIMIT))
30485 +    {
30486 +       DELAY (t);
30487 +       
30488 +       if (t < 500000)
30489 +           t <<= 1;
30490 +       val = newval;
30491 +    }
30492 +
30493 +    return val;
30494 +}
30495 +
30496 +int
30497 +i2c_disable_auto_led_update (ELAN4_DEV *dev)
30498 +{
30499 +    spin_lock (&dev->dev_i2c_lock);
30500 +
30501 +    if (dev->dev_i2c_led_disabled++ == 0)
30502 +    {
30503 +       write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) & ~I2cCntl_I2cUpdatingLedReg);
30504 +
30505 +       if (! (i2c_poll_stopped (dev) & I2cCntl_I2cStopped))
30506 +       {
30507 +           write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg);
30508 +           
30509 +           spin_unlock (&dev->dev_i2c_lock);
30510 +           
30511 +           return -EAGAIN;
30512 +       }
30513 +       
30514 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) & ~I2cCntl_SampleNewLedValues);
30515 +    }
30516 +
30517 +    spin_unlock (&dev->dev_i2c_lock);
30518 +
30519 +    return 0;
30520 +}
30521 +
30522 +void
30523 +i2c_enable_auto_led_update (ELAN4_DEV *dev)
30524 +{
30525 +    spin_lock (&dev->dev_i2c_lock);
30526 +    if (--dev->dev_i2c_led_disabled == 0)
30527 +    {
30528 +       write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg);
30529 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_SampleNewLedValues);
30530 +    }
30531 +
30532 +    spin_unlock (&dev->dev_i2c_lock);
30533 +}
30534 +
30535 +int
30536 +i2c_write (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data)
30537 +{
30538 +    int i;
30539 +
30540 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
30541 +       return -EAGAIN;
30542 +    
30543 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
30544 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
30545 +    
30546 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30547 +       return -ENXIO;
30548 +
30549 +    for (i = 0; i < count; i++)
30550 +    {
30551 +       write_i2c (dev, I2cWrData, data[i]);
30552 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | (i == (count-1) ? I2cCntl_I2cPortGenStopBit : 0));
30553 +    }
30554 +
30555 +    return 0;
30556 +}
30557 +
30558 +int
30559 +i2c_read (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data)
30560 +{
30561 +    int i;
30562 +
30563 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
30564 +       return -EAGAIN; /* not idle */ 
30565 +
30566 +    write_i2c (dev, I2cWrData,      I2C_READ_ADDR(address));
30567 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
30568 +
30569 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30570 +       return -ENXIO;
30571 +    
30572 +    for (i = 0; i < count; i++)
30573 +    {
30574 +       write_i2c (dev, I2cWrData, 0xff);
30575 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortRead | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0));
30576 +
30577 +       i2c_poll_busy (dev);
30578 +
30579 +       data[i] = read_i2c (dev, I2cRdData);
30580 +    }
30581 +
30582 +    return 0;
30583 +}
30584 +
30585 +int
30586 +i2c_writereg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data)
30587 +{
30588 +    int i;
30589 +
30590 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
30591 +       return -EAGAIN; /* not idle */ 
30592 +
30593 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
30594 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
30595 +
30596 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30597 +       return -ENXIO;
30598 +    
30599 +    write_i2c (dev, I2cWrData,      reg);
30600 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
30601 +
30602 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30603 +       return -ENXIO;
30604 +    
30605 +    for (i = 0; i < count; i++)
30606 +    {
30607 +       write_i2c (dev, I2cWrData, data[i]);
30608 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0));
30609 +
30610 +       if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30611 +           printk (" i2c_writereg: off %d failed\n", i);
30612 +    }
30613 +
30614 +    return 0;
30615 +}
30616 +
30617 +int
30618 +i2c_readreg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data)
30619 +{
30620 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
30621 +       return -EAGAIN; /* not idle */ 
30622 +
30623 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
30624 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
30625 +
30626 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30627 +       return -ENXIO;
30628 +    
30629 +    write_i2c (dev, I2cWrData,      reg);
30630 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | I2cCntl_I2cPortGenStopBit);
30631 +
30632 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
30633 +       return -ENXIO;
30634 +
30635 +    return i2c_read (dev, address, count, data);
30636 +}
30637 +
30638 +int
30639 +i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int len, unsigned char *data)
30640 +{
30641 +    unsigned int top = addr + len;
30642 +    int res;
30643 +
30644 +    if ((res = i2c_disable_auto_led_update (dev)) == 0)
30645 +    {
30646 +       /* read the rom in chunks that don't span the block boundary */
30647 +       while (addr < top)
30648 +       {
30649 +           unsigned int thisnob  = top - addr;
30650 +           unsigned int blocknob = I2C_24LC16B_BLOCKSIZE - I2C_24LC16B_BLOCKOFFSET(addr);
30651 +           
30652 +           if (thisnob > blocknob)
30653 +               thisnob = blocknob;
30654 +
30655 +           if ((res = i2c_readreg (dev, I2C_EEPROM_ADDR + I2C_24LC16B_BLOCKADDR(addr),
30656 +                                   I2C_24LC16B_BLOCKOFFSET(addr), thisnob, data)) < 0)
30657 +               break;
30658 +           
30659 +           addr += thisnob;
30660 +           data += thisnob;
30661 +       }
30662 +
30663 +       i2c_enable_auto_led_update (dev);
30664 +    }
30665 +    return res;
30666 +}
30667 +
30668 +/*
30669 + * Local variables:
30670 + * c-file-style: "stroustrup"
30671 + * End:
30672 + */
30673 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/intcookie.c
30674 ===================================================================
30675 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/intcookie.c  2004-02-23 16:02:56.000000000 -0500
30676 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/intcookie.c       2005-07-28 14:52:52.829681624 -0400
30677 @@ -0,0 +1,371 @@
30678 +/*
30679 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
30680 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
30681 + * 
30682 + *    For licensing information please see the supplied COPYING file
30683 + *
30684 + */
30685 +
30686 +#ident "@(#)$Id: intcookie.c,v 1.14.2.1 2005/03/01 12:01:57 david Exp $"
30687 +/*      $Source: /cvs/master/quadrics/elan4mod/intcookie.c,v $*/
30688 +
30689 +#include <qsnet/kernel.h>
30690 +
30691 +#include <elan4/debug.h>
30692 +#include <elan4/types.h>
30693 +#include <elan/capability.h>
30694 +#include <elan4/intcookie.h>
30695 +
30696 +static INTCOOKIE_TABLE *intcookie_tables;
30697 +static spinlock_t      intcookie_table_lock;
30698 +
30699 +/*
30700 + * intcookie_drop_entry:
30701 + *   drop the reference to a cookie held 
30702 + *   by the cookie table
30703 + */
30704 +static void
30705 +intcookie_drop_entry (INTCOOKIE_ENTRY *ent)
30706 +{
30707 +    unsigned long flags;
30708 +
30709 +    spin_lock_irqsave (&ent->ent_lock, flags);
30710 +    if (--ent->ent_ref != 0)
30711 +    {
30712 +       ent->ent_fired = ent->ent_cookie;
30713 +       kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
30714 +
30715 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30716 +    }
30717 +    else
30718 +    {
30719 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30720 +
30721 +       spin_lock_destroy (&ent->ent_lock);
30722 +       kcondvar_destroy (&ent->ent_wait);
30723 +
30724 +       KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY));
30725 +    }
30726 +}
30727 +
30728 +void
30729 +intcookie_init()
30730 +{
30731 +    spin_lock_init (&intcookie_table_lock);
30732 +}
30733 +
30734 +void
30735 +intcookie_fini()
30736 +{
30737 +    spin_lock_destroy (&intcookie_table_lock);
30738 +}
30739 +
30740 +INTCOOKIE_TABLE *
30741 +intcookie_alloc_table (ELAN_CAPABILITY *cap)
30742 +{
30743 +    INTCOOKIE_TABLE *tbl, *ntbl;
30744 +    ELAN_CAPABILITY *ncap;
30745 +    
30746 +    KMEM_ZALLOC (ntbl, INTCOOKIE_TABLE *, sizeof (INTCOOKIE_TABLE), 1);
30747 +
30748 +    if (ntbl == NULL)
30749 +       return (NULL);
30750 +
30751 +    KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1);
30752 +
30753 +    if (ncap == NULL)
30754 +    {
30755 +       KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE));
30756 +       return (NULL);
30757 +    }
30758 +
30759 +    spin_lock (&intcookie_table_lock);
30760 +    
30761 +    for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next)
30762 +       if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext)
30763 +           break;
30764 +    
30765 +    if (tbl != NULL)
30766 +       tbl->tbl_ref++;
30767 +    else
30768 +    {
30769 +       spin_lock_init (&ntbl->tbl_lock);
30770 +
30771 +       ntbl->tbl_cap     = ncap;
30772 +       ntbl->tbl_ref     = 1;
30773 +       ntbl->tbl_entries = NULL;
30774 +       
30775 +       /* Save supplied cap */
30776 +       memcpy (ncap, cap, ELAN_CAP_SIZE(cap));
30777 +
30778 +       if ((ntbl->tbl_next = intcookie_tables) != NULL)
30779 +           intcookie_tables->tbl_prev = ntbl;
30780 +       intcookie_tables = ntbl;
30781 +       ntbl->tbl_prev = NULL;
30782 +    }
30783 +    spin_unlock (&intcookie_table_lock);
30784 +
30785 +    if (tbl == NULL)
30786 +       return (ntbl);
30787 +    else
30788 +    {
30789 +       KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE));
30790 +       KMEM_FREE (ncap, ELAN_CAP_SIZE(cap));
30791 +       return (tbl);
30792 +    }    
30793 +}
30794 +
30795 +void
30796 +intcookie_free_table (INTCOOKIE_TABLE *tbl)
30797 +{
30798 +    INTCOOKIE_ENTRY *ent;
30799 +
30800 +    spin_lock (&intcookie_table_lock);
30801 +    if (tbl->tbl_ref > 1)
30802 +    {
30803 +       tbl->tbl_ref--;
30804 +       spin_unlock (&intcookie_table_lock);
30805 +       return;
30806 +    }
30807 +    
30808 +    if (tbl->tbl_prev)
30809 +       tbl->tbl_prev->tbl_next = tbl->tbl_next;
30810 +    else
30811 +       intcookie_tables = tbl->tbl_next;
30812 +    if (tbl->tbl_next)
30813 +       tbl->tbl_next->tbl_prev = tbl->tbl_prev;
30814 +    
30815 +    spin_unlock (&intcookie_table_lock);
30816 +    
30817 +    /* NOTE - table no longer visible to other threads
30818 +     *        no need to aquire tbl_lock */
30819 +    while ((ent = tbl->tbl_entries) != NULL)
30820 +    {
30821 +       if ((tbl->tbl_entries = ent->ent_next) != NULL)
30822 +           ent->ent_next->ent_prev = NULL;
30823 +       
30824 +       intcookie_drop_entry (ent);
30825 +    }
30826 +    spin_lock_destroy (&tbl->tbl_lock);
30827 +
30828 +    KMEM_FREE (tbl->tbl_cap, ELAN_CAP_SIZE(tbl->tbl_cap));
30829 +    KMEM_FREE (tbl, sizeof (INTCOOKIE_TABLE));
30830 +}
30831 +
30832 +int
30833 +intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30834 +{
30835 +    INTCOOKIE_ENTRY *ent, *nent;
30836 +    unsigned long flags;
30837 +
30838 +    KMEM_ZALLOC (nent, INTCOOKIE_ENTRY *, sizeof (INTCOOKIE_ENTRY), 1);
30839 +
30840 +    if (nent == NULL)
30841 +       return (-ENOMEM);
30842 +    
30843 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30844 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30845 +       if (ent->ent_cookie == cookie)
30846 +           break;
30847 +
30848 +    if (ent == NULL)
30849 +    {
30850 +       kcondvar_init (&nent->ent_wait);
30851 +       spin_lock_init (&nent->ent_lock);
30852 +
30853 +       nent->ent_ref    = 1;
30854 +       nent->ent_cookie = cookie;
30855 +
30856 +       if ((nent->ent_next = tbl->tbl_entries) != NULL)
30857 +           tbl->tbl_entries->ent_prev = nent;
30858 +       tbl->tbl_entries = nent;
30859 +       nent->ent_prev = NULL;
30860 +    }
30861 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30862 +
30863 +    if (ent == NULL)
30864 +       return (0);
30865 +    else
30866 +    {
30867 +       KMEM_FREE (nent, sizeof (INTCOOKIE_ENTRY));
30868 +       return (-EINVAL);
30869 +    }
30870 +}
30871 +
30872 +int
30873 +intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30874 +{
30875 +    INTCOOKIE_ENTRY *ent;
30876 +    unsigned long flags;
30877 +
30878 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30879 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30880 +       if (ent->ent_cookie == cookie)
30881 +           break;
30882 +    
30883 +    if (ent == NULL)
30884 +    {
30885 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30886 +       return (-EINVAL);
30887 +    }
30888 +
30889 +    if (ent->ent_prev == NULL)
30890 +       tbl->tbl_entries = ent->ent_next;
30891 +    else
30892 +       ent->ent_prev->ent_next = ent->ent_next;
30893 +
30894 +    if (ent->ent_next != NULL)
30895 +       ent->ent_next->ent_prev = ent->ent_prev;
30896 +    
30897 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30898 +
30899 +    intcookie_drop_entry (ent);
30900 +
30901 +    return (0);
30902 +}
30903 +
30904 +/*
30905 + * intcookie_fire_cookie:
30906 + *    fire the cookie - this is called from the event interrupt.
30907 + */
30908 +int
30909 +intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30910 +{
30911 +    INTCOOKIE_ENTRY *ent;
30912 +    unsigned long flags;
30913 +
30914 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30915 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30916 +       if (ent->ent_cookie == cookie)
30917 +           break;
30918 +    
30919 +    if (ent == NULL)
30920 +    {
30921 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30922 +       return (-EINVAL);
30923 +    }
30924 +           
30925 +    spin_lock (&ent->ent_lock);
30926 +    ent->ent_fired = cookie;
30927 +    kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
30928 +    spin_unlock (&ent->ent_lock);
30929 +
30930 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30931 +
30932 +    return (0);
30933 +}    
30934 +
30935 +int
30936 +intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie)
30937 +{
30938 +    int res;
30939 +    INTCOOKIE_TABLE *tbl;
30940 +
30941 +    spin_lock (&intcookie_table_lock);
30942 +    
30943 +    for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next)
30944 +       if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext)
30945 +           break;
30946 +    
30947 +    if (tbl != NULL)
30948 +       tbl->tbl_ref++;
30949 +
30950 +    spin_unlock (&intcookie_table_lock);
30951 +
30952 +    /* No matching table found */
30953 +    if (tbl == NULL)
30954 +       return (-EINVAL);
30955 +
30956 +    /* Fire the correct cookie */
30957 +    res = intcookie_fire (tbl, cookie);
30958 +
30959 +    /* Decrement reference count (and free if necessary) */
30960 +    intcookie_free_table (tbl);
30961 +
30962 +    return (res);
30963 +}
30964 +
30965 +/*
30966 + * intcookie_wait_cookie:
30967 + *    deschedule on a cookie if it has not already fired.
30968 + *    note - if the cookie is removed from the table, then
30969 + *           we free it off when we're woken up.
30970 + */
30971 +int
30972 +intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
30973 +{
30974 +    INTCOOKIE_ENTRY *ent;
30975 +    unsigned long flags;
30976 +    int res;
30977 +    
30978 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
30979 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
30980 +       if (ent->ent_cookie == cookie)
30981 +           break;
30982 +    
30983 +    if (ent == NULL)
30984 +    {
30985 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
30986 +       return (-EINVAL);
30987 +    }
30988 +
30989 +    spin_lock (&ent->ent_lock);
30990 +    spin_unlock (&tbl->tbl_lock);
30991 +
30992 +    if (ent->ent_fired != 0)
30993 +    {
30994 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
30995 +       return (0);
30996 +    }
30997 +
30998 +    ent->ent_ref++;
30999 +    kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags);
31000 +    
31001 +    res = ent->ent_fired ? 0 : -EINTR;
31002 +
31003 +    if (--ent->ent_ref > 0)
31004 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
31005 +    else
31006 +    {
31007 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
31008 +       
31009 +       spin_lock_destroy (&ent->ent_lock);
31010 +       kcondvar_destroy (&ent->ent_wait);
31011 +
31012 +       KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY));
31013 +    }
31014 +
31015 +    return (res);
31016 +}
31017 +
31018 +int
31019 +intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
31020 +{
31021 +    INTCOOKIE_ENTRY *ent;
31022 +    unsigned long flags;
31023 +
31024 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
31025 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
31026 +       if (ent->ent_cookie == cookie)
31027 +           break;
31028 +    
31029 +    if (ent == NULL)
31030 +    {
31031 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31032 +       return (-EINVAL);
31033 +    }
31034 +           
31035 +    spin_lock (&ent->ent_lock);
31036 +    ent->ent_fired = 0;
31037 +    spin_unlock (&ent->ent_lock);
31038 +
31039 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31040 +
31041 +    return (0);
31042 +}
31043 +
31044 +/*
31045 + * Local variables:
31046 + * c-file-style: "stroustrup"
31047 + * End:
31048 + */
31049 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/Makefile
31050 ===================================================================
31051 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/Makefile     2004-02-23 16:02:56.000000000 -0500
31052 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/Makefile  2005-07-28 14:52:52.830681472 -0400
31053 @@ -0,0 +1,15 @@
31054 +#
31055 +# Makefile for Quadrics QsNet
31056 +#
31057 +# Copyright (c) 2002-2004 Quadrics Ltd
31058 +#
31059 +# File: drivers/net/qsnet/elan4/Makefile
31060 +#
31061 +
31062 +
31063 +#
31064 +
31065 +obj-$(CONFIG_ELAN4)    += elan4.o
31066 +elan4-objs     := device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o
31067 +
31068 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
31069 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/Makefile.conf
31070 ===================================================================
31071 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/Makefile.conf        2004-02-23 16:02:56.000000000 -0500
31072 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/Makefile.conf     2005-07-28 14:52:52.830681472 -0400
31073 @@ -0,0 +1,10 @@
31074 +# Flags for generating QsNet Linux Kernel Makefiles
31075 +MODNAME                =       elan4.o
31076 +MODULENAME     =       elan4
31077 +KOBJFILES      =       device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o
31078 +EXPORT_KOBJS   =       device.o device_Linux.o mmu.o mmu_Linux.o procfs_Linux.o routetable.o sdram.o trap.o
31079 +CONFIG_NAME    =       CONFIG_ELAN4
31080 +SGALFC         =       
31081 +# EXTRALINES START
31082 +
31083 +# EXTRALINES END
31084 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/mmu.c
31085 ===================================================================
31086 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/mmu.c        2004-02-23 16:02:56.000000000 -0500
31087 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/mmu.c     2005-07-28 14:52:52.832681168 -0400
31088 @@ -0,0 +1,862 @@
31089 +/*
31090 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31091 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31092 + * 
31093 + *    For licensing information please see the supplied COPYING file
31094 + *
31095 + */
31096 +
31097 +#ident "@(#)$Id: mmu.c,v 1.29.6.3 2005/03/10 15:49:24 mike Exp $"
31098 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu.c,v $*/
31099 +
31100 +#include <qsnet/kernel.h>
31101 +#include <qsnet/kpte.h>
31102 +
31103 +#include <elan4/debug.h>
31104 +#include <elan4/device.h>
31105 +
31106 +int elan4_debug_mmu;
31107 +
31108 +/* Permission table - see ELAN4 MMU documentation */
31109 +u_char elan4_permtable[] =
31110 +{
31111 +   0x00, /* 0x000000 - Disable */
31112 +   0x00, /* 0x000000 - Unused  */
31113 +   0x01, /* 0x000001 - Local Data Read */
31114 +   0x03, /* 0x000011 - Local Data Write */
31115 +   0x11, /* 0x010001 - Local Read */
31116 +   0x10, /* 0x010000 - Local Execute */
31117 +   0x05, /* 0x000101 - Read Only */
31118 +   0x13, /* 0x010011 - Local Write */
31119 +   0x20, /* 0x100000 - Local Event Access */
31120 +   0x23, /* 0x100011 - Local Event Write Ac */
31121 +   0xa3, /* 1x100011 - Remote Ev Loc Write */
31122 +   0xaf, /* 1x101111 - Remote All */
31123 +   0x07, /* 0x000111 - Remote Read Only */
31124 +   0x0d, /* 0x001101 - Remote Write Only */
31125 +   0x0f, /* 0x001111 - Remote Read/Write */
31126 +   0xbf, /* 1x111111 - No Fault */
31127 +};
31128 +
31129 +u_char elan4_permreadonly[] = 
31130 +{
31131 +    PERM_Disabled,             /* PERM_Disabled */
31132 +    PERM_Disabled,             /* PERM_Unused */
31133 +    PERM_LocDataRead,          /* PERM_LocDataRead */
31134 +    PERM_LocDataRead,          /* PERM_LocDataWrite */
31135 +    PERM_LocRead,              /* PERM_LocRead */
31136 +    PERM_LocExecute,           /* PERM_LocExecute */
31137 +    PERM_ReadOnly,             /* PERM_ReadOnly */
31138 +    PERM_LocRead,              /* PERM_LocWrite */
31139 +    PERM_LocEventOnly,         /* PERM_LocEventOnly */
31140 +    PERM_LocDataRead,          /* PERM_LocEventWrite */
31141 +    PERM_LocDataRead,          /* PERM_RemoteEvent */
31142 +    PERM_ReadOnly,             /* PERM_RemoteAll */
31143 +    PERM_RemoteReadOnly,       /* PERM_RemoteReadOnly */
31144 +    PERM_ReadOnly,             /* PERM_RemoteWriteLocRead */
31145 +    PERM_ReadOnly,             /* PERM_DataReadWrite */
31146 +    PERM_ReadOnly,             /* PERM_NoFault */
31147 +};
31148 +
31149 +static void
31150 +elan4mmu_synctag (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx)
31151 +{
31152 +    E4_uint64 value = (he->he_tag[tagidx] & HE_TAG_VALID) ? he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK) : INVALID_CONTEXT;
31153 +    
31154 +    if (he->he_next)
31155 +       value |= ((tagidx == 0) ? 
31156 +                 ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) :
31157 +                 ((he->he_next->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK));
31158 +    else if (tagidx == 0)
31159 +       value |= TAG_CHAINPTR_30TO19_MASK;
31160 +    
31161 +    MPRINTF (DBG_DEVICE, 4, "elan4mmu_synctag: he=%p tagidx=%d he->he_tag=%llx -> value=%llx\n", he, tagidx, he->he_tag[tagidx], value);
31162 +
31163 +    elan4_sdram_writeq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx), value);
31164 +}
31165 +
31166 +static void
31167 +elan4mmu_chain_hents (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *phe, ELAN4_HASH_ENTRY *he)
31168 +{
31169 +    ASSERT ((elan4_sdram_readq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0)) & TAG_CHAINPTR_30TO19_MASK) == TAG_CHAINPTR_30TO19_MASK);
31170 +
31171 +    elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(1),
31172 +                       ((phe->he_tag[1] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK)));
31173 +    elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0),
31174 +                       ((phe->he_tag[0] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK)));
31175 +}
31176 +
31177 +static void
31178 +elan4mmu_writepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx, E4_uint64 value)
31179 +{
31180 +    /*
31181 +     * NOTE - we can only change a valid PTE if we're upgrading it's permissions,
31182 +     * any other changes should have invalidated it first. */
31183 +
31184 +    MPRINTF (DBG_DEVICE, 4, "elan4mmu_writepte: he=%p tagidx=%d pteidx=%x value=%llx\n", he, tagidx, pteidx, (unsigned long long) value);
31185 +
31186 +    if (pteidx == 3)
31187 +    {
31188 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx), (value >> 16) & 0xFFFF);
31189 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx), (value >> 32) & 0xFFFF);
31190 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), (value >> 0)  & 0xFFFF);
31191 +    }
31192 +    else
31193 +    {
31194 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx), (value >> 32) & 0xFFFF);
31195 +       elan4_sdram_writel (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), value & 0xFFFFFFFF);
31196 +    }
31197 +}
31198 +
31199 +static void
31200 +elan4mmu_invalidatepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx)
31201 +{
31202 +    if (pteidx == 3)
31203 +       elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), PTE_SetPerm (PERM_Disabled));
31204 +    else
31205 +       elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), PTE_SetPerm (PERM_Disabled));
31206 +}
31207 +
31208 +static E4_uint64
31209 +elan4mmu_readpte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx)
31210 +{
31211 +    if (pteidx == 3)
31212 +       return (((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx)) << 0)  |
31213 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx)) << 16) |
31214 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx)) << 32));
31215 +    else
31216 +       return ((E4_uint64) elan4_sdram_readl (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx)) |
31217 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx)) << 32));
31218 +}
31219 +
31220 +
31221 +void
31222 +elan4mmu_flush_tlb (ELAN4_DEV *dev)
31223 +{
31224 +    PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH);
31225 +
31226 +    while (read_reg64 (dev, SysControlReg) & CONT_TLB_FLUSH)
31227 +       DELAY (1);
31228 +}
31229 +
31230 +/*
31231 + * elanmmu_flush_tlb_hash - this flushes the hash copy entries and the elan
31232 + * tlb.  However after the write to the hash copy entry if the elan was
31233 + * in the process of walking, then it could write the hash copy with a valid
31234 + * entry which we had just invalidated. However once we've seen the tlb flushed
31235 + * then if the walk engine had done a write - then we need to invaldate the
31236 + * hash copy entries again and reflush the tlb.
31237 + *
31238 + * If we're invalidating a lot of hash blocks, then the chances are that the
31239 + * walk engine will perform a write - so we flush the tlb first, then invalidate
31240 + * the hash copy entries, then flush the tlb again.
31241 + */
31242 +static void
31243 +elan4mmu_flush_tlb_hash (ELAN4_DEV *dev, int tbl, unsigned baseidx, unsigned topidx)
31244 +{
31245 +    int       notmany = (abs(topidx - baseidx) < 5) ? 1 : 0;
31246 +    int       hashidx;
31247 +    E4_uint32 reg;
31248 +
31249 +    if (notmany)
31250 +       PULSE_SYSCONTROL (dev, CONT_CLEAR_WALK_WROTE_TABLES);
31251 +    else
31252 +       elan4mmu_flush_tlb(dev);
31253 +
31254 +    do {
31255 +       for (hashidx = baseidx; hashidx <= topidx; hashidx++)
31256 +           if (dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_COPY)
31257 +           {
31258 +               ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_VALID) == 0);
31259 +               ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[1] & HE_TAG_VALID) == 0);
31260 +
31261 +               elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 0);
31262 +               elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 1);
31263 +           }
31264 +       
31265 +       PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH);
31266 +       
31267 +       while ((reg = read_reg64 (dev, SysControlReg)) & CONT_TLB_FLUSH)
31268 +           DELAY (1);
31269 +       
31270 +    } while (notmany-- && (reg & CONT_CLEAR_WALK_WROTE_TABLES) != 0);
31271 +}
31272 +
31273 +void
31274 +elan4mmu_display_hent (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int hashidx)
31275 +{
31276 +    int tagidx;
31277 +
31278 +    elan4_debugf (DBG_DEVICE, DBG_MMU, "elan4mmu_display_hent: hashidx=%d he=%p entry at %lx\n", hashidx, he, he->he_entry);
31279 +    elan4_debugf (DBG_DEVICE, DBG_MMU, "                       next=%p prev=%p chain=%p,%p\n", he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1]);
31280 +    for (tagidx = 0; tagidx < 2; tagidx++)
31281 +    {
31282 +       E4_uint64 tag  = elan4_sdram_readq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx));
31283 +       E4_uint64 pte0 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 0));
31284 +       E4_uint64 pte1 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 1));
31285 +       E4_uint64 pte2 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 2));
31286 +       E4_uint64 pte3 = ((pte0 >> 48) | (pte1 >> 32) | (pte2 >> 16));
31287 +       
31288 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Tag %d (%llx,%08x) context=%04x vaddr=%llx\n", tagidx, he->he_tag[tagidx], he->he_pte[tagidx], (int) (tag & TAG_CONTEXT_MASK), (tag & TAG_ADDRESS_MASK));
31289 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 0 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte0 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
31290 +                     (int) (pte0 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte0 & PTE_TYPE_MASK), (pte0 & PTE_MOD_MASK) ? " mod" : "", (pte0 & PTE_REF_MASK) ? " ref" : "");
31291 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 1 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte1 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
31292 +                     (int) (pte1 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte1 & PTE_TYPE_MASK), (pte1 & PTE_MOD_MASK) ? " mod" : "", (pte1 & PTE_REF_MASK) ? " ref" : "");
31293 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 2 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte2 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
31294 +                     (int) (pte2 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte2 & PTE_TYPE_MASK), (pte2 & PTE_MOD_MASK) ? " mod" : "", (pte2 & PTE_REF_MASK) ? " ref" : "");
31295 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 3 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte3 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
31296 +                     (int) (pte3 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte3 & PTE_TYPE_MASK), (pte3 & PTE_MOD_MASK) ? " mod" : "", (pte3 & PTE_REF_MASK) ? " ref" : "");
31297 +    }
31298 +}
31299 +
31300 +static __inline__ ELAN4_HASH_ENTRY *
31301 +he_ctxt_next (ELAN4_HASH_ENTRY *he, int ctxnum)
31302 +{
31303 +    return ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum) ? he->he_chain[0] : he->he_chain[1];
31304 +}
31305 +
31306 +static __inline__ ELAN4_HASH_ENTRY *
31307 +he_ctxt_unlink (ELAN4_CTXT *ctxt, int tbl, int hashidx, ELAN4_HASH_ENTRY *prevhe, ELAN4_HASH_ENTRY *he, ELAN4_HASH_ENTRY *next)
31308 +{
31309 +    /* Check whether either tag is in use by this context */
31310 +    if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
31311 +       return he;
31312 +
31313 +    if (prevhe == NULL)
31314 +       ctxt->ctxt_mmuhash[tbl][hashidx] = next;
31315 +    else
31316 +    {
31317 +       /* previous he, ensure that both chain pointers are changed is this ctxt is using both tags */
31318 +       ASSERT ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num);
31319 +
31320 +       if ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
31321 +           prevhe->he_chain[0] = next;
31322 +       if ((prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
31323 +           prevhe->he_chain[1] = next;
31324 +    }
31325 +
31326 +    return prevhe;
31327 +}
31328 +
31329 +void
31330 +elan4mmu_display (ELAN4_CTXT *ctxt, int tbl, const char *tag)
31331 +{
31332 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
31333 +    ELAN4_HASH_ENTRY *he;
31334 +    int hashidx;
31335 +
31336 +    for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
31337 +       for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxt->ctxt_num))
31338 +       {
31339 +           elan4_debugf (DBG_DEVICE, DBG_MMU, "%s: hashidx=%d he=%p tags <%llx,%llx>\n", tag, hashidx, he,
31340 +                         (he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0,
31341 +                         (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[1], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0);
31342 +           elan4mmu_display_hent (dev, he, hashidx);
31343 +       }
31344 +}
31345 +
31346 +static ELAN4_HASH_ENTRY *
31347 +elan4mmu_alloc_hent (ELAN4_DEV *dev, int tbl, int hashidx, E4_uint64 newtag, int *tagidx)
31348 +{
31349 +    ELAN4_HASH_ENTRY *he, *phe;
31350 +    unsigned long flags;
31351 +    int i;
31352 +
31353 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
31354 +
31355 +    /* 2nd see if there are any partial free blocks */
31356 +    if ((he = dev->dev_mmufree[tbl][hashidx]) != NULL)
31357 +    {
31358 +       *tagidx = ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) ? 0 : 1;
31359 +       
31360 +       MPRINTF (DBG_DEVICE, 3, "elan4mmu_alloc_hent: allocate he=%p idx=%d%s\n", he, *tagidx, (he == &dev->dev_mmuhash[tbl][hashidx]) ? " hash-block" : "");
31361 +       
31362 +       he->he_tag[*tagidx] = newtag | HE_TAG_VALID;
31363 +
31364 +       elan4mmu_synctag (dev, he, *tagidx);
31365 +       
31366 +       if ((he->he_tag[(*tagidx) ^ 1] & TAG_CONTEXT_MASK) != INVALID_CONTEXT)
31367 +       {
31368 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_alloc_hent: block full - remove from freelist\n");
31369 +           dev->dev_mmufree[tbl][hashidx] = he->he_chain[*tagidx];
31370 +       }
31371 +       
31372 +       spin_unlock_irqrestore (&dev->dev_mmulock, flags);
31373 +       return (he);
31374 +    }
31375 +    
31376 +    if ((he = dev->dev_mmufreelist) != NULL)
31377 +       dev->dev_mmufreelist = he->he_next;
31378 +    else
31379 +    {
31380 +       ELAN4_HASH_CHUNK *hc;
31381 +       sdramaddr_t       entry;
31382 +
31383 +       KMEM_ALLOC (hc, ELAN4_HASH_CHUNK *, sizeof (ELAN4_HASH_CHUNK), 0);
31384 +       
31385 +       if (hc == NULL)
31386 +       {
31387 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
31388 +           return ((ELAN4_HASH_ENTRY *) NULL);
31389 +       }
31390 +       
31391 +       if ((entry = elan4_sdram_alloc (dev, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS)) == (sdramaddr_t) 0)
31392 +       {
31393 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
31394 +
31395 +           KMEM_FREE (hc, sizeof (ELAN4_HASH_CHUNK));
31396 +           return ((ELAN4_HASH_ENTRY *) NULL);
31397 +       }
31398 +
31399 +       list_add_tail (&hc->hc_link, &dev->dev_hc_list);
31400 +
31401 +       elan4_sdram_zeroq_sdram (dev, entry, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS);
31402 +
31403 +       /* no initialise all chunks and chain all but the first onto the freelist */
31404 +       for (i = 0; i < ELAN4_HENT_CHUNKS; i++, entry += sizeof (E4_HashTableEntry))
31405 +       {
31406 +           hc->hc_hents[i].he_entry = entry;
31407 +
31408 +           if (i == 0)
31409 +               he = &hc->hc_hents[0];
31410 +           else
31411 +           {
31412 +               hc->hc_hents[i].he_next = dev->dev_mmufreelist;
31413 +               dev->dev_mmufreelist = &hc->hc_hents[i];
31414 +           }
31415 +       }
31416 +    }
31417 +
31418 +    /* Initialise hash entry, using slot 0 */
31419 +    *tagidx = 0;
31420 +
31421 +    he->he_next     = NULL;
31422 +    he->he_prev     = NULL;
31423 +    he->he_chain[0] = NULL;
31424 +    he->he_chain[1] = NULL;
31425 +    he->he_tag[0]   = newtag | HE_TAG_VALID;
31426 +    he->he_tag[1]   = E4MMU_TAG(0, INVALID_CONTEXT);
31427 +    he->he_pte[0]   = 0;
31428 +    he->he_pte[1]   = 0;
31429 +    
31430 +    elan4mmu_synctag (dev, he, 0);
31431 +    
31432 +    /* add slot 1 to freelist */
31433 +    he->he_chain[1] = dev->dev_mmufree[tbl][hashidx];
31434 +    dev->dev_mmufree[tbl][hashidx] = he;
31435 +    
31436 +    /* add to mmuhash lists */
31437 +    for (phe = &dev->dev_mmuhash[tbl][hashidx]; phe->he_next; phe = phe->he_next)
31438 +       ;
31439 +    phe->he_next = he;
31440 +    he->he_prev  = phe;
31441 +    he->he_next  = NULL;
31442 +    
31443 +    /* finally chain the hash block into the hash tables */
31444 +    elan4mmu_chain_hents (dev, phe, he);
31445 +    
31446 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
31447 +    return (he);
31448 +}
31449 +
31450 +static void
31451 +elan4mmu_free_hent (ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he, int tagidx)
31452 +{
31453 +    unsigned long flags;
31454 +    int pteidx;
31455 +
31456 +    /* Invalidate the tag, and zero all ptes */
31457 +    for (pteidx = 0; pteidx < 4; pteidx++)
31458 +       if (HE_GET_PTE(he, tagidx, pteidx))
31459 +           elan4mmu_writepte (dev, he, tagidx, pteidx, 0);
31460 +
31461 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
31462 +
31463 +    he->he_tag[tagidx] = E4MMU_TAG(0, INVALID_CONTEXT);
31464 +    he->he_pte[tagidx] = 0;
31465 +
31466 +    elan4mmu_synctag (dev, he, tagidx);
31467 +
31468 +    if ((he->he_tag[tagidx^1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) /* Both tags are now free */
31469 +    {
31470 +       if (he == &dev->dev_mmuhash[tbl][hashidx])              /* it's the hash block entry */
31471 +       {                                                       /* so as it's already on the freelist */
31472 +           he->he_chain[tagidx] = he->he_chain[tagidx^1];      /* just copy it's chain pointers */
31473 +
31474 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free but hashblk\n", tbl, hashidx, tagidx, he);
31475 +       }
31476 +       else
31477 +       {
31478 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free\n", tbl, hashidx, tagidx, he);
31479 +           
31480 +           /* XXXX - should remove it from the hash table, and 
31481 +           *         place back on the anonymous freelist */
31482 +           he->he_chain[tagidx] = he->he_chain[tagidx^1];
31483 +       }
31484 +    }
31485 +    else
31486 +    {
31487 +       /* Other tag still in use */
31488 +       he->he_chain[tagidx] = dev->dev_mmufree[tbl][hashidx];
31489 +       dev->dev_mmufree[tbl][hashidx] = he;
31490 +
31491 +       MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => other tag in use\n", tbl, hashidx, tagidx, he);
31492 +    }
31493 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
31494 +}
31495 +
31496 +ELAN4_HASH_ENTRY *
31497 +elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp)
31498 +{
31499 +    ELAN4_DEV        *dev     = ctxt->ctxt_dev;
31500 +    unsigned         ctxnum  = ctxt->ctxt_num;
31501 +    unsigned          hashidx = E4MMU_HASH_INDEX (ctxnum, vaddr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31502 +    E4_uint64         newtag  = E4MMU_TAG(vaddr, ctxnum);
31503 +    ELAN4_HASH_ENTRY *he      = &dev->dev_mmuhash[tbl][hashidx];
31504 +    unsigned         tagidx;
31505 +
31506 +    MPRINTF (ctxt, 2, "elan4mmu_ptealloc: tbl=%d ctxnum=%d vaddr=%llx -> hashidx %d\n", tbl, ctxnum, vaddr, hashidx);
31507 +
31508 +    /* 1st) check whether we're reloading an existing entry */
31509 +    for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
31510 +    {
31511 +       ASSERT ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxnum);
31512 +
31513 +       for (tagidx = 0; tagidx < 2; tagidx++)
31514 +       {
31515 +           if ((he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK | HE_TAG_VALID)) == (newtag | HE_TAG_VALID))
31516 +           {
31517 +               MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return old he %p tagidx %d\n", he, tagidx);
31518 +
31519 +               *tagidxp = tagidx;
31520 +               return he;
31521 +           }
31522 +       }
31523 +    }
31524 +
31525 +    if ((he = elan4mmu_alloc_hent (dev, tbl, hashidx, newtag, &tagidx)) == NULL)
31526 +       return NULL;
31527 +
31528 +    /* chain onto context hash */
31529 +    if ((he->he_tag[tagidx ^ 1] & TAG_CONTEXT_MASK) == ctxnum) /* already chained using other link */
31530 +    {                                                          /* so ensure both slots are chained the same */
31531 +       he->he_chain[tagidx] = he->he_chain[tagidx^1];
31532 +    }
31533 +    else
31534 +    {
31535 +       he->he_chain[tagidx] = ctxt->ctxt_mmuhash[tbl][hashidx];
31536 +       ctxt->ctxt_mmuhash[tbl][hashidx] = he;
31537 +    }
31538 +
31539 +    MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return new he %p tagidx %d\n", he, tagidx);
31540 +
31541 +    *tagidxp = tagidx;
31542 +
31543 +    return he;
31544 +}
31545 +
31546 +int
31547 +elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, E4_uint64 newpte)
31548 +{
31549 +    ELAN4_DEV        *dev     = ctxt->ctxt_dev;
31550 +    unsigned          pteidx  = E4MMU_SHIFT_ADDR(vaddr, dev->dev_pageshift[tbl]) & 3;
31551 +    unsigned         tagidx;
31552 +    ELAN4_HASH_ENTRY *he;
31553 +
31554 +    MPRINTF (ctxt, 0, "elan4mmu_pteload: ctx=%d tbl=%d pteidx=%d vaddr=%llx pte=%llx\n", 
31555 +           ctxt->ctxt_num, tbl, pteidx, (unsigned long long)vaddr, newpte);
31556 +
31557 +    spin_lock (&ctxt->ctxt_mmulock);
31558 +
31559 +    if ((he = elan4mmu_ptealloc (ctxt, tbl, vaddr, &tagidx)) == NULL)
31560 +    {
31561 +       spin_unlock (&ctxt->ctxt_mmulock);
31562 +       return -ENOMEM;
31563 +    }
31564 +
31565 +    MPRINTF (ctxt, 1, "elan4mmu_pteload: %s he=%p tagidx=%d pteidx=%d\n", HE_GET_PTE(he,0,pteidx) ? "reloading" : "loading", he, tagidx, pteidx);
31566 +    
31567 +    ASSERT (HE_GET_PTE(he,tagidx,pteidx) == 0 ||                                                       /* invalid -> valid */
31568 +           (elan4mmu_readpte (dev, he, tagidx, pteidx) & PTE_PPN_MASK) == (newpte & PTE_PPN_MASK));    /* or same phys address */
31569 +    
31570 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
31571 +    
31572 +    HE_SET_PTE(he, tagidx, pteidx, (newpte & PTE_PERM_TYPE_MASK));
31573 +
31574 +    spin_unlock (&ctxt->ctxt_mmulock);
31575 +    return 0;
31576 +}
31577 +
31578 +void
31579 +elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len)
31580 +{
31581 +    ELAN4_DEV        *dev       = ctxt->ctxt_dev;
31582 +    unsigned          ctxnum    = ctxt->ctxt_num;
31583 +    unsigned long     tagspan   = (1 << (dev->dev_pageshift[tbl] + 2));
31584 +    E4_Addr           end       = start + len - 1;
31585 +    int                      needflush = 0;
31586 +    unsigned          baseidx, topidx;
31587 +    unsigned          hashidx, tagidx, pteidx;
31588 +    ELAN4_HASH_ENTRY *he, *prevhe, *next;
31589 +    
31590 +    MPRINTF (ctxt, 0, "elan4mmu_unload_range: tbl=%d start=%llx end=%llx len=%lx\n", tbl, start, end, len);
31591 +
31592 +    /* determine how much of the hash table we've got to scan */
31593 +    
31594 +    /* GNAT 6760: When we have a Main page size which maps onto multiple Elan pages
31595 +     * we need to do something a bit more clever here or else it takes ms per page invalidate
31596 +     * This change helps in the meantime
31597 +     */
31598 +    /* if (len <= (1 << dev->dev_pageshift[tbl])) */
31599 +    if (len <= PAGE_SIZE)
31600 +    {
31601 +       baseidx = E4MMU_HASH_INDEX (ctxnum, start, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31602 +       topidx  = E4MMU_HASH_INDEX (ctxnum, end,   dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31603 +
31604 +       if (baseidx != topidx)
31605 +       {
31606 +           /* GNAT 6760: Need to search whole of the hash table (slow!) */
31607 +           baseidx = 0;
31608 +           topidx  = dev->dev_hashsize[tbl] - 1;
31609 +       }
31610 +    }
31611 +    else
31612 +    {
31613 +       baseidx = 0;
31614 +       topidx  = dev->dev_hashsize[tbl] - 1;
31615 +    }
31616 +
31617 +    MPRINTF (ctxt, 1, "elan4mmu_unload_range: baseidx=%d topidx=%d\n", baseidx, topidx);
31618 +
31619 +    spin_lock (&ctxt->ctxt_mmulock);
31620 +
31621 +    /* 1st - invalidate the tag for all hash blocks which are completely invalidated,
31622 +     *       and remember the first/last hash blocks */
31623 +    for (hashidx = baseidx; hashidx <= topidx; hashidx++)
31624 +       for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
31625 +           for (tagidx = 0; tagidx < 2; tagidx++)
31626 +               if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
31627 +               {
31628 +                   E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31629 +                   E4_Addr top  = base + (tagspan -1);
31630 +                   
31631 +                   if (start < top && end > base)
31632 +                   {
31633 +                       unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl];
31634 +                       unsigned tidx = (end   >= top)  ? 3 : (end   & (tagspan-1)) >> dev->dev_pageshift[tbl];
31635 +                       
31636 +                       MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx hashidx=%d bidx=%d tidx=%d\n", he, base, top, hashidx, bidx, tidx);
31637 +                       
31638 +                       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31639 +                           if (HE_GET_PTE(he, tagidx, pteidx))
31640 +                           {
31641 +                               elan4mmu_invalidatepte (dev, he, tagidx, pteidx);
31642 +                               needflush = 1;
31643 +                           }
31644 +                   }
31645 +                   else if (base >= start && top <= end)               /* hash entry completely spanned */
31646 +                   {                                                   /* so invalidate the tag */
31647 +                       MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned\n", he, base, top);
31648 +
31649 +                       he->he_tag[tagidx] &= ~HE_TAG_VALID;
31650 +                       
31651 +                       elan4mmu_synctag (dev, he, tagidx);
31652 +                       needflush = 1;
31653 +                   }
31654 +               }
31655 +
31656 +    if (needflush)
31657 +    {
31658 +       /* 2nd invalidate the first/last hash blocks if they are partially invalidated
31659 +        * and flush the tlb/hash copy blocks */
31660 +       elan4mmu_flush_tlb_hash (dev, tbl, baseidx, topidx);
31661 +       
31662 +       /* 3rd free off the hash entries which are completely invalidated */
31663 +       for (hashidx = baseidx; hashidx <= topidx; hashidx++)
31664 +           for (prevhe = NULL, he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = next)
31665 +           {
31666 +               next = he_ctxt_next (he, ctxnum);
31667 +               
31668 +               for (tagidx = 0; tagidx < 2; tagidx++)
31669 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
31670 +                   {
31671 +                       E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31672 +                       E4_Addr top  = base + (tagspan -1);
31673 +                       
31674 +                       if (start < top && end > base)
31675 +                       {
31676 +                           unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl];
31677 +                           unsigned tidx = (end   >= top)  ? 3 : (end   & (tagspan-1)) >> dev->dev_pageshift[tbl];
31678 +                           
31679 +                           MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx bidx=%d tidx=%d\n", he, base, top, bidx, tidx);
31680 +                           
31681 +                           for (pteidx = bidx; pteidx <= tidx; pteidx++)
31682 +                               if (HE_GET_PTE(he, tagidx, pteidx))
31683 +                               {
31684 +                                   HE_SET_PTE(he, tagidx, pteidx, 0);
31685 +                                   
31686 +                                   elan4mmu_writepte (dev, he, tagidx, pteidx, 0);
31687 +                               }
31688 +                       }
31689 +                       
31690 +                       if ((base >= start && top <= end) || he->he_pte[tagidx] == 0)   /* hash entry completely spanned or all pte's cleared */
31691 +                       {                                                                       /* so invalidate the pte's and free it */
31692 +                           
31693 +                           MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned or empty\n", he, base, top);
31694 +                           
31695 +                           elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx);
31696 +                       }
31697 +                   }
31698 +               
31699 +               prevhe = he_ctxt_unlink (ctxt, tbl, hashidx, prevhe, he, next);
31700 +           }
31701 +    }
31702 +    spin_unlock (&ctxt->ctxt_mmulock);
31703 +}
31704 +
31705 +void
31706 +elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt)
31707 +{
31708 +    ELAN4_DEV *dev    = ctxt->ctxt_dev;
31709 +    int        ctxnum = ctxt->ctxt_num;
31710 +    ELAN4_HASH_ENTRY *he;
31711 +    int tbl, hashidx, tagidx;
31712 +
31713 +    MPRINTF (ctxt, 0, "elan4mmu_invalidate_ctxt: invalidating ctxnum=%d\n", ctxnum);
31714 +
31715 +    spin_lock (&ctxt->ctxt_mmulock);
31716 +
31717 +    /* 1st invalidate all tags belonging to me */
31718 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
31719 +       for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
31720 +           for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
31721 +               for (tagidx = 0; tagidx < 2; tagidx++)
31722 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) /* own tag block */
31723 +                   {
31724 +                       MPRINTF (ctxt, 1, "elan4mmu_invalidate_ctxt: he=%p addr=%llx hashidx=%d tagidx=%d\n", 
31725 +                                he, he->he_tag[tagidx] & TAG_ADDRESS_MASK, hashidx, tagidx);
31726 +
31727 +                       he->he_tag[tagidx] &= ~HE_TAG_VALID;
31728 +                       
31729 +                       elan4mmu_synctag (dev, he, tagidx);
31730 +                   }
31731 +
31732 +    /* 2nd flush the tlb & cached hash block */
31733 +    elan4mmu_flush_tlb (dev);
31734 +    
31735 +    /* 3rd invalidate all pte's and free off the hash entries */
31736 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
31737 +       for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
31738 +           while ((he = ctxt->ctxt_mmuhash[tbl][hashidx]) != NULL)
31739 +           {
31740 +               ctxt->ctxt_mmuhash[tbl][hashidx] = he_ctxt_next (he, ctxnum);
31741 +
31742 +               for (tagidx = 0; tagidx < 2; tagidx++)
31743 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
31744 +                       elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx);
31745 +           }
31746 +    spin_unlock (&ctxt->ctxt_mmulock);
31747 +}
31748 +
31749 +ELAN4_HASH_CACHE *
31750 +elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep)
31751 +{
31752 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31753 +    E4_Addr           end      = start + (npages << dev->dev_pageshift[tbl]) - 1;
31754 +    unsigned long     tagshift = dev->dev_pageshift[tbl] + 2;
31755 +    E4_Addr           tagspan  = 1 << tagshift;
31756 +    E4_Addr           base     = (start & ~(tagspan-1));
31757 +    E4_Addr           top      = (end   & ~(tagspan-1)) + (tagspan-1);
31758 +    unsigned int      nhes     = (top - base + 1) >> tagshift;
31759 +    ELAN4_HASH_CACHE *hc;
31760 +    unsigned int      tagidx,  pteidx;
31761 +    E4_Addr           addr;
31762 +    int                      i;
31763 +    
31764 +    MPRINTF (ctxt, 0, "elan4mmu_reserve: start=%llx npages=%d\n", start, npages);
31765 +    MPRINTF (ctxt, 0, "         pageshift=%d tagspan=%lx base=%llx top=%llx end=%llx nhes=%d\n",
31766 +            dev->dev_pageshift[tbl], tagspan, base, top, end, nhes);
31767 +
31768 +    KMEM_ALLOC (hc, ELAN4_HASH_CACHE *, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]), cansleep);
31769 +
31770 +    if (hc == NULL)
31771 +       return NULL;
31772 +
31773 +    hc->hc_start = start;
31774 +    hc->hc_end   = end;
31775 +    hc->hc_tbl   = tbl;
31776 +
31777 +    spin_lock (&ctxt->ctxt_mmulock);
31778 +    for (addr = base, i = 0; i < nhes; addr += tagspan, i++)
31779 +    {
31780 +       unsigned bidx = (i == 0)        ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0;
31781 +       unsigned tidx = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3;
31782 +
31783 +       
31784 +       if ((hc->hc_hes[i] = elan4mmu_ptealloc (ctxt, tbl, addr & ~(tagspan-1), &tagidx)) == NULL)
31785 +           goto failed;
31786 +
31787 +
31788 +       MPRINTF (ctxt, 2, "elan4mmu_reserve: tbl=%d addr=%llx -> hashidx=%d tagidx=%d\n", tbl, addr & ~(tagspan-1), 
31789 +                E4MMU_HASH_INDEX (ctxt->ctxt_num, (addr & ~(tagspan-1)), dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1), tagidx);
31790 +                        
31791 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31792 +       {
31793 +           ASSERT (HE_GET_PTE (hc->hc_hes[i], tagidx, pteidx) == 0);
31794 +
31795 +           MPRINTF (ctxt, 2, "elan4mmu_reserve: i=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n",
31796 +                    i, addr, hc->hc_hes[i], tagidx, pteidx);
31797 +
31798 +           HE_SET_PTE (hc->hc_hes[i], tagidx, pteidx, PTE_PERM_TYPE_MASK);
31799 +       }
31800 +    }
31801 +    spin_unlock (&ctxt->ctxt_mmulock);
31802 +
31803 +    return hc;
31804 +
31805 + failed:
31806 +    for (i--, addr -= tagspan; i >= 0; i--, addr -= tagspan)
31807 +    {
31808 +       unsigned bidx    = (i == 0) ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0;
31809 +       unsigned tidx    = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3;
31810 +       unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
31811 +       unsigned tagidx  = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1)) ? 0 : 1;
31812 +           
31813 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31814 +           HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, 0);
31815 +
31816 +       if (hc->hc_hes[i]->he_pte[tagidx] == 0)
31817 +           elan4mmu_free_hent (dev, tbl, hashidx, hc->hc_hes[i], tagidx);
31818 +    }
31819 +    spin_unlock (&ctxt->ctxt_mmulock);
31820 +
31821 +    KMEM_FREE (hc, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]));
31822 +    
31823 +    return NULL;
31824 +}
31825 +
31826 +void
31827 +elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc)
31828 +{
31829 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31830 +    E4_Addr          start    = hc->hc_start;
31831 +    E4_Addr           end      = hc->hc_end;
31832 +    unsigned long     tagshift = dev->dev_pageshift[hc->hc_tbl] + 2;
31833 +    E4_Addr           tagspan  = 1 << tagshift;
31834 +    E4_Addr           base     = (start & ~(tagspan-1));
31835 +    E4_Addr           top      = (end   & ~(tagspan-1)) + (tagspan-1);
31836 +    unsigned int      nhes     = (top - base + 1) >> tagshift;
31837 +    ELAN4_HASH_ENTRY *prevhe, *he, *next;
31838 +    E4_Addr           addr;
31839 +    unsigned int      pteidx;
31840 +    int                      i;
31841 +
31842 +    spin_lock (&ctxt->ctxt_mmulock);
31843 +
31844 +    MPRINTF (ctxt, 0, "elan4mmu_release: base=%llx top=%llx\n", base, top);
31845 +
31846 +    for (addr = base, i = 0; i < nhes; addr += tagspan, i++)
31847 +    {
31848 +       unsigned bidx    = (i == 0)        ? (start & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 0;
31849 +       unsigned tidx    = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 3;
31850 +       unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1);
31851 +       unsigned tagidx  = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1)) ? 0 : 1;
31852 +           
31853 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
31854 +       {
31855 +           elan4mmu_invalidatepte (dev, hc->hc_hes[i], tagidx, pteidx);
31856 +
31857 +           HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, 0);
31858 +       }
31859 +
31860 +       MPRINTF (ctxt, 2, "elan4mmu_release: i=%d addr=%llx he=%p (hashidx=%d tagidx=%d pteidx=%d) pte=%x\n",
31861 +                i, addr, hc->hc_hes[i], hashidx, tagidx, pteidx, hc->hc_hes[i]->he_pte[tagidx]);
31862 +
31863 +       /* remove from context hash */
31864 +       /* need to move to the  hc->hc_hes[i] in the ctxt list and set prevhe, he, next */
31865 +       prevhe = NULL;
31866 +       he = ctxt->ctxt_mmuhash[hc->hc_tbl][hashidx];
31867 +       next = he_ctxt_next (he, ctxt->ctxt_num);
31868 +
31869 +       while(he != hc->hc_hes[i]) {
31870 +           prevhe = he;
31871 +           he = next;
31872 +           next = he_ctxt_next (he, ctxt->ctxt_num);
31873 +       }
31874 +
31875 +       if (he->he_pte[tagidx] == 0) 
31876 +           elan4mmu_free_hent (dev, hc->hc_tbl, hashidx, he, tagidx);
31877 +
31878 +       he_ctxt_unlink (ctxt, hc->hc_tbl, hashidx, prevhe, he, next);
31879 +    }
31880 +    spin_unlock (&ctxt->ctxt_mmulock);
31881 +}
31882 +
31883 +void
31884 +elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte)
31885 +{
31886 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31887 +    unsigned int      tbl      = hc->hc_tbl;
31888 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
31889 +    E4_Addr           tagspan  = 1 << tagshift;
31890 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
31891 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
31892 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
31893 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
31894 +
31895 +    MPRINTF (ctxt, 2, "elan4mmu_set_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d) newpte=%llx\n", idx, addr, he, tagidx, pteidx, newpte);
31896 +
31897 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
31898 +
31899 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
31900 +}
31901 +
31902 +E4_uint64
31903 +elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx)
31904 +{
31905 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31906 +    unsigned int      tbl      = hc->hc_tbl;
31907 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
31908 +    E4_Addr           tagspan  = 1 << tagshift;
31909 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
31910 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
31911 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
31912 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
31913 +
31914 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
31915 +
31916 +    return elan4mmu_readpte (dev, he, tagidx, pteidx);
31917 +}
31918 +
31919 +void
31920 +elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx)
31921 +{
31922 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
31923 +    unsigned int      tbl      = hc->hc_tbl;
31924 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
31925 +    E4_Addr           tagspan  = 1 << tagshift;
31926 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
31927 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
31928 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
31929 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
31930 +
31931 +    MPRINTF (ctxt, 2, "elan4mmu_clear_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n", idx, addr, he, tagidx, pteidx);
31932 +
31933 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
31934 +
31935 +    elan4mmu_invalidatepte (dev, he, tagidx, pteidx);
31936 +}
31937 +
31938 +EXPORT_SYMBOL(elan4mmu_flush_tlb);
31939 +EXPORT_SYMBOL(elan4mmu_pteload);
31940 +EXPORT_SYMBOL(elan4mmu_unload_range);
31941 +EXPORT_SYMBOL(elan4mmu_reserve);
31942 +EXPORT_SYMBOL(elan4mmu_release);
31943 +EXPORT_SYMBOL(elan4mmu_set_pte);
31944 +EXPORT_SYMBOL(elan4mmu_get_pte);
31945 +EXPORT_SYMBOL(elan4mmu_clear_pte);
31946 +/*
31947 + * Local variables:
31948 + * c-file-style: "stroustrup"
31949 + * End:
31950 + */
31951 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/mmu_Linux.c
31952 ===================================================================
31953 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/mmu_Linux.c  2004-02-23 16:02:56.000000000 -0500
31954 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/mmu_Linux.c       2005-07-28 14:52:52.832681168 -0400
31955 @@ -0,0 +1,265 @@
31956 +/*
31957 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31958 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31959 + *
31960 + *    For licensing information please see the supplied COPYING file
31961 + *
31962 + */
31963 +
31964 +#ident "@(#)$Id: mmu_Linux.c,v 1.8 2004/05/10 14:10:46 daniel Exp $"
31965 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu_Linux.c,v $*/
31966 +
31967 +#include <qsnet/kernel.h>
31968 +
31969 +#include <elan4/debug.h>
31970 +#include <elan4/device.h>
31971 +
31972 +#include <linux/pci.h>
31973 +#include <linux/version.h>
31974 +
31975 +/*
31976 + * Convert a physical address into an pte.  This should generate a "local" pte for 
31977 + * physical addresses which are elan4 sdram or elan4 command queues.  For elan4
31978 + * registers and other addresses on the same bus, this should be the local pci 
31979 + * bus address.  All other addresses should access the physical address via the
31980 + * PCI bridge.
31981 + */
31982 +
31983 +#ifdef __alpha
31984 +#define ioaddr2paddr(ioaddr)   virt_to_phys((void *) __ioremap(ioaddr, PAGE_SIZE))
31985 +#elif defined(__ia64)
31986 +#define ioaddr2paddr(ioaddr)   ((ioaddr) & ~__IA64_UNCACHED_OFFSET)
31987 +#else
31988 +#define ioaddr2paddr(ioaddr)   (ioaddr)
31989 +#endif
31990 +
31991 +int
31992 +elan4mmu_categorise_paddr (ELAN4_DEV *dev, physaddr_t *physp)
31993 +{
31994 +    physaddr_t sdram_base = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31995 +    physaddr_t sdram_top  = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31996 +    physaddr_t regs_base  = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31997 +    physaddr_t regs_top   = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
31998 +    physaddr_t phys       = *physp;
31999 +    int        iscommand;
32000 +
32001 +    if (phys >= sdram_base && phys <= sdram_top)
32002 +    {
32003 +       (*physp) = (phys ^ sdram_base);
32004 +       return ELAN4MMU_PADDR_SDRAM;
32005 +    }
32006 +    
32007 +    if (phys >= regs_base && phys < regs_top)
32008 +    {
32009 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
32010 +           iscommand = (phys < (regs_base + ELAN4_REVA_REG_OFFSET));
32011 +       else
32012 +           iscommand = (phys < (regs_base + ELAN4_REVB_I2C_OFFSET));
32013 +       
32014 +       if (iscommand)
32015 +       {
32016 +           (*physp) = phys ^ regs_base;
32017 +
32018 +           return ELAN4MMU_PADDR_COMMAND;
32019 +       }
32020 +       else
32021 +       {
32022 +           // XXXX (*physp) = phys2bus (phys);
32023 +
32024 +           return ELAN4MMU_PADDR_LOCALPCI;
32025 +       }
32026 +    }
32027 +
32028 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
32029 +    if (VALID_PAGE (virt_to_page (phys_to_virt (phys))))
32030 +#else
32031 +    if (virt_addr_valid (phys_to_virt (phys)))
32032 +#endif
32033 +       return ELAN4MMU_PADDR_PAGE;
32034 +    
32035 +    return ELAN4MMU_PADDR_OTHER;
32036 +}
32037 +
32038 +int
32039 +elan4mmu_sdram_aliascheck (ELAN4_CTXT *ctxt, E4_Addr addr, physaddr_t phys)
32040 +{
32041 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
32042 +
32043 +    /*
32044 +     * On MPSAS we don't allocate a large enough context table, so 
32045 +     * if we see an address/context pair which would "alias" because
32046 +     * they differ in unchecked hash bits to a previous pteload, 
32047 +     * then we kill the application.
32048 +     */
32049 +    unsigned hashval = (E4MMU_SHIFT_ADDR(addr, (dev->dev_pageshift[0]) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(ctxt->ctxt_num));
32050 +    
32051 +    if (dev->dev_rsvd_hashval[0] == 0xFFFFFFFF)
32052 +       dev->dev_rsvd_hashval[0] = hashval & dev->dev_rsvd_hashmask[0];
32053 +    
32054 +    if ((hashval & dev->dev_rsvd_hashmask[0]) != dev->dev_rsvd_hashval[0])
32055 +    {
32056 +       printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx ctxnum=%x -> [%x] overlaps %x - %x [hashidx=%x]\n", (unsigned long long) addr, 
32057 +               ctxt->ctxt_num, hashval, hashval & dev->dev_rsvd_hashmask[0], dev->dev_rsvd_hashval[0],
32058 +               E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[0], dev->dev_hashsize[0]-1));
32059 +       
32060 +       return 0;
32061 +    }
32062 +
32063 +    if (((addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (phys & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))))
32064 +    {
32065 +       printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx incorrectly alias sdram at %lx\n", (unsigned long long) addr, 
32066 +               phys ^ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
32067 +       return 0;
32068 +    }
32069 +
32070 +    return 1;
32071 +}
32072 +
32073 +int
32074 +elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type)
32075 +{
32076 +#if defined(__i386) && !defined(CONFIG_X86_PAE)
32077 +    if (dev->dev_topaddrvalid == 0)
32078 +    {
32079 +       dev->dev_topaddrvalid = 1;
32080 +
32081 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(0), 0);
32082 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(1), 0);
32083 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(2), 0);
32084 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(3), 0);
32085 +    }
32086 +    return (0);
32087 +#else
32088 +    register int i;
32089 +    E4_uint16 match;
32090 +
32091 +    if (dev->dev_topaddrmode)                                  /* ExtraMasterAddrBits=1 => match {paddr[63:50],type[3:2]} */
32092 +       match = ((paddr >> 48) & ~3) | ((type >> 2) & 3);
32093 +    else                                                       /* ExtraMasterAddrBits=0 => match {paddr[63:48]} */
32094 +       match = (paddr >> 48);
32095 +    
32096 +    MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: mode=%d paddr=%lx type=%x match=%x [%x %x.%x.%x.%x]\n",
32097 +            dev->dev_topaddrmode, paddr, type, match, dev->dev_topaddrvalid,
32098 +            dev->dev_topaddr[0], dev->dev_topaddr[1], dev->dev_topaddr[2], dev->dev_topaddr[3]);
32099 +    
32100 +    for (i = 0; i < 4; i++)
32101 +       if ((dev->dev_topaddrvalid & (1 << i)) && dev->dev_topaddr[i] == match)
32102 +           return (i);
32103 +    
32104 +    for (i = 0; i < 4; i++)
32105 +    {
32106 +       if ((dev->dev_topaddrvalid & (1 << i)) == 0)
32107 +       {
32108 +           MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: allocate slot %d for %x\n", i, match);
32109 +
32110 +           dev->dev_topaddrvalid |= (1 << i);
32111 +           dev->dev_topaddr[i] = match;
32112 +
32113 +           pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(i), match);
32114 +           return (i);
32115 +       }
32116 +    }
32117 +
32118 +    panic ("elan4mmu_alloc_topaddr: all topaddrs in use\n");
32119 +    return (0);
32120 +#endif
32121 +}
32122 +
32123 +E4_uint64
32124 +elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t phys, unsigned perm)
32125 +{
32126 +    physaddr_t sdram_base = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
32127 +    physaddr_t sdram_top  = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
32128 +    physaddr_t regs_base  = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
32129 +    physaddr_t regs_top   = ioaddr2paddr (pci_resource_end (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
32130 +    int        iscommand;
32131 +    E4_uint64  pte;
32132 +    unsigned   type;
32133 +    
32134 +    if (phys >= sdram_base && phys <= sdram_top)
32135 +    {
32136 +       phys ^= sdram_base;
32137 +       type  = PTE_SetPerm (perm);
32138 +    }
32139 +    else if (phys >= regs_base && phys < regs_top)
32140 +    {
32141 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
32142 +           iscommand = (phys < (regs_base + ELAN4_REVA_REG_OFFSET));
32143 +       else
32144 +           iscommand = (phys < (regs_base + ELAN4_REVB_I2C_OFFSET));
32145 +       
32146 +       if (iscommand)
32147 +       {
32148 +           phys ^= regs_base;
32149 +           type  = PTE_SetPerm (perm) | PTE_CommandQueue;
32150 +       }
32151 +       else
32152 +       {
32153 +           type = PTE_SetPerm (perm) | PTE_PciNotLocal;
32154 +           // phys = phys2bus (phys);
32155 +       }
32156 +    }
32157 +    else
32158 +    {
32159 +       type = PTE_SetPerm (perm) | PTE_PciNotLocal | dev->dev_pteval;
32160 +
32161 +#ifdef LINUX_SPARC
32162 +       /* XXXX if not local pci bus, then or in the bypass bit */
32163 +       phys |= 0xfffe000000000000;
32164 +       type |= PTE_BigEndian;
32165 +#endif
32166 +
32167 +
32168 +#if defined(__alpha)
32169 +       phys |= alpha_mv.pci_dac_offset;
32170 +#endif
32171 +    }
32172 +
32173 +    if ((type & PTE_PciNotLocal) == 0)
32174 +       pte = (phys >> PTE_PADDR_SHIFT) | type;
32175 +    else
32176 +    {
32177 +       unsigned topaddr = elan4mmu_alloc_topaddr (dev, phys, type);
32178 +       
32179 +       if (dev->dev_topaddrmode)
32180 +           pte = (phys >> PTE_PADDR_SHIFT) | (type & ~0xc) | (topaddr << 2);
32181 +       else
32182 +           pte = ((phys >> PTE_PADDR_SHIFT) & ~PTE_TOPADDR_MASK) | (((E4_uint64) topaddr) << 45) | type;
32183 +    }
32184 +
32185 +    return pte;
32186 +}
32187 +
32188 +physaddr_t
32189 +elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte)
32190 +{
32191 +    physaddr_t sdram_base = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
32192 +    physaddr_t regs_base  = ioaddr2paddr (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
32193 +    physaddr_t phys;
32194 +    
32195 +    if (pte & PTE_PciNotLocal)
32196 +    {
32197 +       if (dev->dev_topaddrmode)
32198 +           phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 2) & 3] & 0xfffc) << 48) | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT);
32199 +       else
32200 +           phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 45) & 3] & 0xffff) << 48)| ((pte & PTE_PPN_MASK & ~PTE_TOPADDR_MASK) << PTE_PADDR_SHIFT);
32201 +
32202 +#ifdef LINUX_SPARC     /* XXXX if not local pci bus, then or in the bypass bit */
32203 +       phys ^= 0xfffe000000000000;
32204 +#endif
32205 +
32206 +#if defined(__alpha)
32207 +       phys ^= alpha_mv.pci_dac_offset;
32208 +#endif
32209 +       return phys;
32210 +    }
32211 +    
32212 +    if (pte & PTE_CommandQueue)
32213 +       return (regs_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT));
32214 +    
32215 +    /* sdram */
32216 +    return (sdram_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT));
32217 +}
32218 +
32219 +EXPORT_SYMBOL(elan4mmu_phys2pte);
32220 +EXPORT_SYMBOL(elan4mmu_pte2phys);
32221 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/neterr.c
32222 ===================================================================
32223 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/neterr.c     2004-02-23 16:02:56.000000000 -0500
32224 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/neterr.c  2005-07-28 14:52:52.833681016 -0400
32225 @@ -0,0 +1,270 @@
32226 +/*
32227 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
32228 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
32229 + * 
32230 + *    For licensing information please see the supplied COPYING file
32231 + *
32232 + */
32233 +
32234 +#ident "@(#)$Id: neterr.c,v 1.4.6.3 2004/11/05 13:11:17 david Exp $"
32235 +/*      $Source: /cvs/master/quadrics/elan4mod/neterr.c,v $*/
32236 +
32237 +#include <qsnet/kernel.h>
32238 +
32239 +#include <elan4/sdram.h>
32240 +#include <elan4/debug.h>
32241 +#include <elan4/device.h>
32242 +#include <elan4/commands.h>
32243 +#include <elan4/trtype.h>
32244 +#include <elan4/neterr.h>
32245 +
32246 +typedef struct neterr_inputq
32247 +{
32248 +    E4_InputQueue      inputq;                                 /* input queue */
32249 +    E4_Event32         qevent;                                 /* input queue event */
32250 +    E4_uint64          sent;                                   /* # messages sent (cq flow control)*/
32251 +} NETERR_INPUTQ;
32252 +
32253 +#define NETERR_NSLOTS  64                                      /* single page of queue space (4Kb) */
32254 +
32255 +#define NETERR_RETRIES 16
32256 +#define NETERR_CQ_SIZE CQ_Size8K
32257 +#define NETERR_CQ_MSGS (CQ_Size(NETERR_CQ_SIZE) / (21*8))
32258 +#define NETERR_VP_COUNT        64                                      /* this *must* be > NETERR_CQ_MSGS */
32259 +#define NETERR_VP_BASE 1                                       /* use vp 1 upwards */
32260 +
32261 +void
32262 +elan4_neterr_interrupt (ELAN4_DEV *dev, void *arg)
32263 +{
32264 +    E4_Addr          qfptr  = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr));
32265 +    E4_Addr          qbptr  = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr));
32266 +    E4_Addr          qfirst = DEVICE_NETERR_SLOTS_ADDR;
32267 +    E4_Addr          qlast  = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE;
32268 +    ELAN4_CQ        *cq     = dev->dev_neterr_intcq;
32269 +    int              count  = 0;
32270 +    ELAN4_CTXT      *ctxt;
32271 +    ELAN4_NETERR_MSG msg;
32272 +
32273 +    while (qfptr != qbptr)
32274 +    {
32275 +       elan4_sdram_copyq_from_sdram (dev, dev->dev_neterr_slots + (qfptr - qfirst), &msg, ELAN4_NETERR_MSG_SIZE);
32276 +
32277 +       ctxt = elan4_networkctxt (dev, msg.msg_context);
32278 +
32279 +       if (ctxt != NULL && ctxt->ctxt_ops->op_neterrmsg)
32280 +           ctxt->ctxt_ops->op_neterrmsg (ctxt, &msg);
32281 +       else
32282 +           PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_interrupt: no process - sender %d.%d\n", msg.msg_sender.loc_node, msg.msg_sender.loc_context);
32283 +
32284 +       count++;
32285 +
32286 +       /* move on the from pointer */
32287 +       qfptr = (qfptr == qlast) ? qfirst : qfptr + ELAN4_NETERR_MSG_SIZE;
32288 +
32289 +       elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfptr);
32290 +    }
32291 +
32292 +    if (count == 0)
32293 +    {
32294 +       printk ("elan4_neterr_interrupt: spurious\n");
32295 +       return;
32296 +    }
32297 +
32298 +    /* Issue the waitevent to the interrupt queue */
32299 +    writeq (WAIT_EVENT_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)),   cq->cq_mapping);
32300 +    writeq (  E4_EVENT_INIT_VALUE (-32 * count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),       cq->cq_mapping);
32301 +    writeq (  DEVICE_NETERR_INTCQ_ADDR,                                                                cq->cq_mapping);
32302 +    writeq (INTERRUPT_CMD | (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT),            cq->cq_mapping);
32303 +
32304 +    pioflush_reg (dev);
32305 +}
32306 +
32307 +int
32308 +elan4_neterr_init (ELAN4_DEV *dev)
32309 +{
32310 +    unsigned int intqaddr;
32311 +    E4_Addr     qfirst, qlast;
32312 +    
32313 +    if ((dev->dev_neterr_inputq = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0)
32314 +       return 0;
32315 +
32316 +    if ((dev->dev_neterr_slots = elan4_sdram_alloc (dev, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE))) == 0)
32317 +       return 0;
32318 +
32319 +    if ((dev->dev_neterr_msgcq = elan4_alloccq (&dev->dev_ctxt, NETERR_CQ_SIZE, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority)) == NULL)
32320 +       return 0;
32321 +
32322 +    if ((dev->dev_neterr_intcq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_WaitEventEnableBit | CQ_InterruptEnableBit, CQ_Priority)) == NULL)
32323 +       return 0;
32324 +
32325 +    intqaddr = (dev->dev_cqoffset + elan4_cq2num (dev->dev_neterr_intcq)) * CQ_CommandMappingSize;
32326 +    qfirst   = DEVICE_NETERR_SLOTS_ADDR;
32327 +    qlast    = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE;
32328 +
32329 +    spin_lock_init (&dev->dev_neterr_lock);
32330 +
32331 +    /* Register an interrupt operation */
32332 +    dev->dev_neterr_intop.op_function = elan4_neterr_interrupt;
32333 +    dev->dev_neterr_intop.op_arg      = NULL;
32334 +
32335 +    elan4_register_intop (dev, &dev->dev_neterr_intop);
32336 +
32337 +    /* Initialise the inputq descriptor and event */
32338 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfirst);
32339 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr), qfirst);
32340 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_control), E4_InputQueueControl (qfirst, qlast, ELAN4_NETERR_MSG_SIZE));
32341 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_event), DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent));
32342 +    
32343 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_CountAndType), E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
32344 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WritePtr), DEVICE_NETERR_INTCQ_ADDR);
32345 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WriteValue), (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD);
32346 +
32347 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent), 0);
32348 +
32349 +    /* Map them all into the device context */
32350 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, (dev->dev_neterr_inputq >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_RemoteAll));
32351 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR,  (intqaddr >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocDataWrite) | PTE_CommandQueue);
32352 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR, (dev->dev_neterr_slots >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_DataReadWrite));
32353 +
32354 +    /* finally attach to the neterr context */
32355 +    if (elan4_attach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM) != 0)
32356 +       panic ("elan4_neterr_init: failed to attach to neterr context\n");
32357 +
32358 +    /* and drop the context filter */
32359 +    elan4_set_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM, E4_FILTER_HIGH_PRI);
32360 +
32361 +    return 1;
32362 +}
32363 +
32364 +void
32365 +elan4_neterr_destroy (ELAN4_DEV *dev)
32366 +{
32367 +    if (dev->dev_neterr_intcq)
32368 +    {
32369 +       elan4_detach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM);
32370 +       
32371 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR,  1 << dev->dev_pageshift[0]);
32372 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR,  1 << dev->dev_pageshift[0]);
32373 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, 1 << dev->dev_pageshift[0]);
32374 +
32375 +       spin_lock_destroy (&dev->dev_neterr_lock);
32376 +    }
32377 +
32378 +    if (dev->dev_neterr_intcq)
32379 +       elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_intcq);
32380 +    dev->dev_neterr_intcq = NULL;
32381 +
32382 +    if (dev->dev_neterr_msgcq)
32383 +       elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_msgcq);
32384 +    dev->dev_neterr_msgcq = NULL;
32385 +
32386 +    if (dev->dev_neterr_slots)
32387 +       elan4_sdram_free (dev, dev->dev_neterr_slots, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE));
32388 +    dev->dev_neterr_slots = 0;
32389 +    
32390 +    if (dev->dev_neterr_inputq)
32391 +       elan4_sdram_free (dev, dev->dev_neterr_inputq, SDRAM_PAGE_SIZE);
32392 +    dev->dev_neterr_inputq = 0;
32393 +}
32394 +
32395 +int
32396 +elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg)
32397 +{
32398 +    ELAN4_CQ  *cq = dev->dev_neterr_msgcq;
32399 +    E4_uint64  sent;
32400 +    E4_VirtualProcessEntry route;
32401 +    unsigned int vp;
32402 +    unsigned long flags;
32403 +
32404 +    spin_lock_irqsave (&dev->dev_neterr_lock, flags);
32405 +
32406 +    sent = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent));
32407 +
32408 +    PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_sendmsg: nodeid=%d retries=%d cookie=%llx sender=%d,%d%s\n", 
32409 +           nodeid, retries, msg->msg_cookies[0], msg->msg_sender.loc_node, msg->msg_sender.loc_context,
32410 +           (dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS ? " - no cq space" : "");
32411 +
32412 +    if ((dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS)
32413 +    {
32414 +       spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
32415 +       return 0;
32416 +    }
32417 +
32418 +    vp = NETERR_VP_BASE + (dev->dev_neterr_queued % NETERR_VP_COUNT);
32419 +
32420 +    if (elan4_generate_route (&dev->dev_position, &route, ELAN4_NETERR_CONTEXT_NUM, nodeid, nodeid, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI) < 0)
32421 +    {
32422 +       spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
32423 +       return 0;
32424 +    }
32425 +
32426 +    elan4_write_route (dev, dev->dev_routetable, vp, &route);
32427 +
32428 +    writeq ((GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(retries)),                             cq->cq_mapping);
32429 +    writeq (NOP_CMD,                                                                           cq->cq_mapping);
32430 +    
32431 +    writeq (OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp),             cq->cq_mapping);
32432 +    writeq (SEND_TRANS_CMD | (TR_INPUT_Q_GETINDEX << 16),                                      cq->cq_mapping);
32433 +    writeq (  DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq),                    cq->cq_mapping);
32434 +
32435 +    writeq (SEND_TRANS_CMD | (TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD) << 16),                 cq->cq_mapping);
32436 +    writeq (  0 /* address */,                                                                 cq->cq_mapping);
32437 +    writeq (  ((E4_uint64 *) msg)[0],                                                          cq->cq_mapping);
32438 +    writeq (  ((E4_uint64 *) msg)[1],                                                          cq->cq_mapping);
32439 +    writeq (  ((E4_uint64 *) msg)[2],                                                          cq->cq_mapping);
32440 +    writeq (  ((E4_uint64 *) msg)[3],                                                          cq->cq_mapping);
32441 +    writeq (  ((E4_uint64 *) msg)[4],                                                          cq->cq_mapping);
32442 +    writeq (  ((E4_uint64 *) msg)[5],                                                          cq->cq_mapping);
32443 +    writeq (  ((E4_uint64 *) msg)[6],                                                          cq->cq_mapping);
32444 +    writeq (  ((E4_uint64 *) msg)[7],                                                          cq->cq_mapping);
32445 +
32446 +    writeq (SEND_TRANS_CMD | (TR_INPUT_Q_COMMIT << 16),                                                cq->cq_mapping);
32447 +    writeq (  DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq),                    cq->cq_mapping);
32448 +    writeq (  0 /* cookie */,                                                                  cq->cq_mapping);
32449 +    
32450 +    writeq (GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(NETERR_RETRIES),                                cq->cq_mapping);
32451 +    writeq (WRITE_DWORD_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, sent)),    cq->cq_mapping);
32452 +    writeq (  ++dev->dev_neterr_queued,                                                                cq->cq_mapping);
32453 +
32454 +    pioflush_reg (dev);
32455 +
32456 +    spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
32457 +
32458 +    return 1;
32459 +}
32460 +
32461 +int
32462 +elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap)
32463 +{
32464 +    E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans];
32465 +    unsigned long flags;
32466 +
32467 +    switch (IPROC_TrapValue (hdrp->IProcStatusCntxAndTrType))
32468 +    {
32469 +    case InputEopErrorOnWaitForEop:
32470 +    case InputEopErrorTrap:
32471 +    case InputCrcErrorAfterPAckOk:
32472 +       return 1;
32473 +
32474 +    case InputEventEngineTrapped:
32475 +       printk ("elan%d: device_iproc_trap: InputEventEngineTrapped - Trans=%x TrAddr=%llx\n", 
32476 +               dev->dev_instance, (int)IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType), (long long) hdrp->TrAddr);
32477 +
32478 +       if ((IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) &&
32479 +           hdrp->TrAddr == DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq))
32480 +       {
32481 +           spin_lock_irqsave (&dev->dev_neterr_lock, flags);
32482 +           writeq ((DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)) | SET_EVENT_CMD, dev->dev_neterr_msgcq->cq_mapping);
32483 +           spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
32484 +           return 1;
32485 +       }
32486 +       
32487 +    default:
32488 +       return 0;
32489 +    }
32490 +}
32491 +/*
32492 + * Local variables:
32493 + * c-file-style: "stroustrup"
32494 + * End:
32495 + */
32496 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/procfs_Linux.c
32497 ===================================================================
32498 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/procfs_Linux.c       2004-02-23 16:02:56.000000000 -0500
32499 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/procfs_Linux.c    2005-07-28 14:52:52.835680712 -0400
32500 @@ -0,0 +1,1074 @@
32501 +/*
32502 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
32503 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
32504 + *
32505 + *    For licensing information please see the supplied COPYING file
32506 + *
32507 + */
32508 +
32509 +#ident "@(#)$Id: procfs_Linux.c,v 1.27.2.9 2005/03/09 12:00:08 addy Exp $ $Name: QSNETMODULES-4-31_20050321 $"
32510 +/*      $Source: /cvs/master/quadrics/elan4mod/procfs_Linux.c,v $*/
32511 +
32512 +#include <qsnet/kernel.h>
32513 +
32514 +#include <linux/module.h>
32515 +#include <linux/proc_fs.h>
32516 +#include <linux/ctype.h>
32517 +
32518 +#include <qsnet/procfs_linux.h>
32519 +
32520 +#include <elan4/i2c.h>
32521 +#include <elan4/debug.h>
32522 +#include <elan4/device.h>
32523 +#include <elan4/user.h>
32524 +
32525 +/*
32526 + *
32527 + * procfs format for elan4:
32528 + *
32529 + * /proc/qsnet/elan4/config
32530 + *    elan4_debug
32531 + *    elan4_debug_toconsole
32532 + *    elan4_debug_tobuffer
32533 + *    elan4_debug_display_ctxt
32534 + *    elan4_debug_ignore_ctxt
32535 + *    elan4_debug_ignore_type
32536 + *    elan4_debug_mmu
32537 + *    elan4_mainint_punt_loops
32538 + *    user_p2p_route_options
32539 + *    user_bcast_route_options
32540 + *
32541 + * /proc/qsnet/elan4/deviceN
32542 + *    stats
32543 + *    position
32544 + *    vpd
32545 + */
32546 +
32547 +struct proc_dir_entry *elan4_procfs_root;
32548 +struct proc_dir_entry *elan4_config_root;
32549 +
32550 +/* borrowed from fs/proc/proc_misc - helper for proc_read_int */
32551 +static int 
32552 +proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
32553 +{
32554 +    if (len <= off+count) *eof = 1;
32555 +    *start = page + off;
32556 +    len -= off;
32557 +    if (len>count) len = count;
32558 +    if (len<0) len = 0;
32559 +    return len;
32560 +}
32561 +
32562 +static int
32563 +proc_read_devinfo (char *page, char **start, off_t off,
32564 +                   int count, int *eof, void *data)
32565 +{
32566 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32567 +    int        len = 0;
32568 +
32569 +    if (! dev)
32570 +       len = sprintf (page, "<unknown>\n");
32571 +    else
32572 +    {
32573 +       len += sprintf (page + len, "dev_vendor_id            0x%x\n", dev->dev_devinfo.dev_vendor_id);
32574 +       len += sprintf (page + len, "dev_device_id            0x%x\n", dev->dev_devinfo.dev_vendor_id);
32575 +       len += sprintf (page + len, "dev_revision_id          0x%x\n", dev->dev_devinfo.dev_revision_id);
32576 +       len += sprintf (page + len, "dev_instance             0x%x\n", dev->dev_devinfo.dev_instance);
32577 +       len += sprintf (page + len, "dev_rail                 0x%x\n", dev->dev_devinfo.dev_rail);
32578 +       len += sprintf (page + len, "dev_driver_version       0x%x\n", dev->dev_devinfo.dev_driver_version);
32579 +       len += sprintf (page + len, "dev_params_mask          0x%x\n", dev->dev_devinfo.dev_params_mask);
32580 +       len += sprintf (page + len, "dev_params:                  \n");
32581 +       len += sprintf (page + len, " 0  - PciCmdQPadFlag     0x%x\n", dev->dev_devinfo.dev_params.values[0]);
32582 +       len += sprintf (page + len, " 1  - EventCopyWinPt     0x%x\n", dev->dev_devinfo.dev_params.values[1]);
32583 +       len += sprintf (page + len, " 2  - PciWriteCombining  0x%x\n", dev->dev_devinfo.dev_params.values[2]);
32584 +       len += sprintf (page + len, " 3  -                    0x%x\n", dev->dev_devinfo.dev_params.values[3]);
32585 +       len += sprintf (page + len, " 4  -                    0x%x\n", dev->dev_devinfo.dev_params.values[4]);
32586 +       len += sprintf (page + len, " 5  -                    0x%x\n", dev->dev_devinfo.dev_params.values[5]);
32587 +       len += sprintf (page + len, " 6  -                    0x%x\n", dev->dev_devinfo.dev_params.values[6]);
32588 +       len += sprintf (page + len, " 7  -                    0x%x\n", dev->dev_devinfo.dev_params.values[7]);
32589 +       len += sprintf (page + len, " 8  -                    0x%x\n", dev->dev_devinfo.dev_params.values[8]);
32590 +       len += sprintf (page + len, " 9  -                    0x%x\n", dev->dev_devinfo.dev_params.values[9]);
32591 +       len += sprintf (page + len, " 10 -                    0x%x\n", dev->dev_devinfo.dev_params.values[10]);
32592 +       len += sprintf (page + len, " 11 - features           0x%x\n", dev->dev_devinfo.dev_params.values[11]);
32593 +       len += sprintf (page + len, "dev_num_down_links_value 0x%x\n", dev->dev_devinfo.dev_num_down_links_value);
32594 +    }
32595 +
32596 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32597 +}
32598 +
32599 +static int
32600 +proc_read_position (char *page, char **start, off_t off,
32601 +                   int count, int *eof, void *data)
32602 +{
32603 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32604 +    int        len;
32605 +
32606 +    if (dev->dev_position.pos_mode == ELAN_POS_UNKNOWN)
32607 +       len = sprintf (page, "<unknown>\n");
32608 +    else
32609 +       len = sprintf (page, 
32610 +                      "NodeId                 %d\n"
32611 +                      "NumLevels              %d\n"
32612 +                      "NumNodes               %d\n",
32613 +                      dev->dev_position.pos_nodeid, 
32614 +                      dev->dev_position.pos_levels, 
32615 +                      dev->dev_position.pos_nodes);
32616 +
32617 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32618 +}
32619 +
32620 +static int
32621 +proc_write_position (struct file *file, const char *buf, unsigned long count, void *data)
32622 +{
32623 +    ELAN4_DEV *dev     = (ELAN4_DEV *) data;
32624 +    unsigned  nodeid   = ELAN_INVALID_NODE;
32625 +    unsigned  numnodes = 0;
32626 +    char     *page, *p;
32627 +    int       res;
32628 +    ELAN_POSITION pos;
32629 +
32630 +    if (count == 0)
32631 +       return (0);
32632 +
32633 +    if (count >= PAGE_SIZE)
32634 +       return (-EINVAL);
32635 +
32636 +    if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
32637 +       return (-ENOMEM);
32638 +
32639 +    MOD_INC_USE_COUNT;
32640 +
32641 +    if (copy_from_user (page, buf, count))
32642 +       res = -EFAULT;
32643 +    else
32644 +    {
32645 +       page[count] = '\0';
32646 +       
32647 +       if (page[count-1] == '\n')
32648 +           page[count-1] = '\0';
32649 +
32650 +       if (! strcmp (page, "<unknown>"))
32651 +       {
32652 +           pos.pos_mode      = ELAN_POS_UNKNOWN;
32653 +           pos.pos_nodeid    = ELAN_INVALID_NODE;
32654 +           pos.pos_nodes     = 0;
32655 +           pos.pos_levels    = 0;
32656 +       }
32657 +       else
32658 +       {
32659 +           for (p = page; *p; )
32660 +           {
32661 +               while (isspace (*p))
32662 +                   p++;
32663 +               
32664 +               if (! strncmp (p, "NodeId=", strlen("NodeId=")))
32665 +                   nodeid   = simple_strtoul (p + strlen ("NodeId="), NULL, 0);
32666 +               if (! strncmp (p, "NumNodes=", strlen ("NumNodes=")))
32667 +                   numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0);
32668 +               
32669 +               while (*p && !isspace(*p))
32670 +                   p++;
32671 +           }
32672 +
32673 +           if (elan4_compute_position (&pos, nodeid, numnodes, dev->dev_devinfo.dev_num_down_links_value) != 0)
32674 +               printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->dev_instance, nodeid, numnodes);
32675 +           else
32676 +           {
32677 +               printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->dev_instance, pos.pos_nodeid,
32678 +                       pos.pos_nodes, pos.pos_levels);
32679 +
32680 +               if (elan4_set_position (dev, &pos) < 0)
32681 +                   printk ("elan%d: failed to set device position\n", dev->dev_instance);
32682 +           }
32683 +       }
32684 +    }
32685 +
32686 +    MOD_DEC_USE_COUNT;
32687 +    free_page ((unsigned long) page);
32688 +
32689 +    return (count);
32690 +}
32691 +
32692 +static int
32693 +proc_read_temp (char *page, char **start, off_t off,
32694 +               int count, int *eof, void *data)
32695 +{
32696 +    ELAN4_DEV    *dev = (ELAN4_DEV *) data;
32697 +    unsigned char values[2];
32698 +    int           len;
32699 +
32700 +    if (i2c_disable_auto_led_update (dev) < 0)
32701 +       len = sprintf (page, "<unknown>");
32702 +    else
32703 +    {
32704 +       if (i2c_read (dev, I2C_TEMP_ADDR, 2, values) < 0)
32705 +           len = sprintf (page, "<not-present>");
32706 +       else
32707 +           len = sprintf (page, "%s%d%s\n", (values[0] & 0x80) ? "-" : "",
32708 +                          (values[0] & 0x80) ? -((signed char)values[0]) - 1 : values[0],
32709 +                          (values[1] & 0x80) ? ".5" : ".0");
32710 +
32711 +       i2c_enable_auto_led_update (dev);
32712 +    }
32713 +
32714 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32715 +}
32716 +
32717 +static int
32718 +proc_read_eccerr (char *page, char **start, off_t off,
32719 +                 int count, int *eof, void *data)
32720 +{
32721 +    ELAN4_DEV    *dev = (ELAN4_DEV *) data;
32722 +    char          errstr[200];
32723 +    register int  i, len = 0;
32724 +
32725 +    *page = '\0';
32726 +
32727 +    for (i = 0; i < sizeof (dev->dev_sdramerrs)/sizeof(dev->dev_sdramerrs[0]); i++)
32728 +       if (dev->dev_sdramerrs[i].ErrorCount != 0)
32729 +           len += sprintf (page + len, "%s occured %0d times\n",
32730 +                           elan4_sdramerr2str (dev, dev->dev_sdramerrs[i].EccStatus, dev->dev_sdramerrs[i].ConfigReg, errstr),
32731 +                           dev->dev_sdramerrs[i].ErrorCount);
32732 +
32733 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32734 +}
32735 +
32736 +static int
32737 +proc_read_vpd (char *page, char **start, off_t off,
32738 +              int count, int *eof, void *data)
32739 +{
32740 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32741 +    int        len;
32742 +
32743 +    if ( elan4_read_vpd (dev, NULL, page) )
32744 +       len = sprintf (page, "no vpd tags found\n");
32745 +    else
32746 +       len = strlen(page)+1;
32747 +
32748 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32749 +}
32750 +
32751 +static int
32752 +proc_read_linkportkey (char *page, char **start, off_t off,
32753 +                      int count, int *eof, void *data)
32754 +{
32755 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32756 +    int        len;
32757 +
32758 +    len = sprintf (page, "%llx\n", read_reg64 (dev, LinkPortLock));
32759 +
32760 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
32761 +}
32762 +
32763 +static int
32764 +proc_write_linkportkey (struct file *file, const char *buf, unsigned long count, void *data)
32765 +{
32766 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32767 +    int               res = 0;
32768 +    char       tmpbuf[30];
32769 +
32770 +    if (count > sizeof (tmpbuf) - 1)
32771 +       return -EINVAL;
32772 +
32773 +    MOD_INC_USE_COUNT;
32774 +
32775 +    if (copy_from_user (tmpbuf, buf, count))
32776 +       res = -EFAULT;
32777 +    else
32778 +    {
32779 +       tmpbuf[count] = '\0';
32780 +
32781 +       write_reg64 (dev, LinkPortLock, simple_strtoull (tmpbuf, NULL, 16));
32782 +    }
32783 +
32784 +    MOD_DEC_USE_COUNT;
32785 +
32786 +    return (count);
32787 +}
32788 +
32789 +static struct device_info 
32790 +{
32791 +    char *name;
32792 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
32793 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
32794 +    unsigned minrev;
32795 +} device_info[] = {
32796 +    {"devinfo",     proc_read_devinfo,     NULL,                   0},
32797 +    {"position",    proc_read_position,    proc_write_position,    0},
32798 +    {"temp",        proc_read_temp,        NULL,                   1},
32799 +    {"eccerr",      proc_read_eccerr,      NULL,                   0},
32800 +    {"vpd",         proc_read_vpd,         NULL,                   0},
32801 +    {"linkportkey", proc_read_linkportkey, proc_write_linkportkey, 0},
32802 +};
32803 +
32804 +static int
32805 +proc_read_link_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32806 +{
32807 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32808 +    char      *p   = page;
32809 +
32810 +    p += sprintf (p, "%20s %ld\n", "link_errors", dev->dev_stats.s_link_errors);
32811 +    p += sprintf (p, "%20s %ld\n", "lock_errors", dev->dev_stats.s_lock_errors);
32812 +    p += sprintf (p, "%20s %ld\n", "deskew_errors", dev->dev_stats.s_deskew_errors);
32813 +    p += sprintf (p, "%20s %ld\n", "phase_errors", dev->dev_stats.s_phase_errors);
32814 +
32815 +    p += sprintf (p, "%20s %ld\n", "data_errors", dev->dev_stats.s_data_errors);
32816 +    p += sprintf (p, "%20s %ld\n", "fifo_overflow0", dev->dev_stats.s_fifo_overflow0);
32817 +    p += sprintf (p, "%20s %ld\n", "fifo_overflow1", dev->dev_stats.s_fifo_overflow1);
32818 +    p += sprintf (p, "%20s %ld\n", "mod45changed", dev->dev_stats.s_mod45changed);
32819 +    p += sprintf (p, "%20s %ld\n", "pack_not_seen", dev->dev_stats.s_pack_not_seen);
32820 +
32821 +    p += sprintf (p, "%20s %ld\n", "linkport_keyfail", dev->dev_stats.s_linkport_keyfail);
32822 +    p += sprintf (p, "%20s %ld\n", "eop_reset", dev->dev_stats.s_eop_reset);
32823 +    p += sprintf (p, "%20s %ld\n", "bad_length", dev->dev_stats.s_bad_length);
32824 +    p += sprintf (p, "%20s %ld\n", "crc_error", dev->dev_stats.s_crc_error);
32825 +    p += sprintf (p, "%20s %ld\n", "crc_bad", dev->dev_stats.s_crc_bad);
32826 +
32827 +    p += sprintf (p, "%20s %ld\n", "cproc_timeout", dev->dev_stats.s_cproc_timeout);
32828 +    p += sprintf (p, "%20s %ld\n", "dproc_timeout", dev->dev_stats.s_dproc_timeout);
32829 +
32830 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32831 +}
32832 +
32833 +static char *
32834 +proc_sprintf_bucket_stat (char *p, char *name, unsigned long *stats, int *buckets)
32835 +{
32836 +    int i;
32837 +
32838 +    p += sprintf (p, "%20s ", name);
32839 +
32840 +    for (i = 0; i < ELAN4_DEV_STATS_BUCKETS-1; i++)
32841 +       p += sprintf (p, "%ld(<=%d) ", stats[i], buckets[i]);
32842 +    p += sprintf (p, "%ld(>%d)\n", stats[i], buckets[i-1]);
32843 +
32844 +    return p;
32845 +}
32846 +
32847 +static int
32848 +proc_read_intr_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32849 +{
32850 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32851 +    char      *p   = page;
32852 +
32853 +    p += sprintf (p, "%20s %ld\n", "interrupts",       dev->dev_stats.s_interrupts);
32854 +    p += sprintf (p, "%20s %ld\n", "haltints",         dev->dev_stats.s_haltints);
32855 +
32856 +    p += sprintf (p, "%20s %ld\n", "mainint_punts",    dev->dev_stats.s_mainint_punts);
32857 +    p += sprintf (p, "%20s %ld\n", "mainint_rescheds", dev->dev_stats.s_mainint_rescheds);
32858 +
32859 +    p  = proc_sprintf_bucket_stat (p, "mainints", dev->dev_stats.s_mainints, MainIntBuckets);
32860 +
32861 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32862 +}
32863 +
32864 +static int
32865 +proc_read_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32866 +{
32867 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32868 +    char      *p   = page;
32869 +
32870 +    p += sprintf (p, "%20s %ld\n", "cproc_traps", dev->dev_stats.s_cproc_traps);
32871 +    p += sprintf (p, "%20s %ld\n", "dproc_traps", dev->dev_stats.s_dproc_traps);
32872 +    p += sprintf (p, "%20s %ld\n", "eproc_traps", dev->dev_stats.s_eproc_traps);
32873 +    p += sprintf (p, "%20s %ld\n", "iproc_traps", dev->dev_stats.s_iproc_traps);
32874 +    p += sprintf (p, "%20s %ld\n", "tproc_traps", dev->dev_stats.s_tproc_traps);
32875 +
32876 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32877 +}
32878 +
32879 +static int
32880 +proc_read_cproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32881 +{
32882 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32883 +    char      *p   = page;
32884 +    int        i;
32885 +    extern char *const CProcTrapNames[];
32886 +
32887 +    for (i = 0; i < sizeof (dev->dev_stats.s_cproc_trap_types)/sizeof(dev->dev_stats.s_cproc_trap_types[0]); i++)
32888 +       p += sprintf (p, "%-40s %ld\n", CProcTrapNames[i], dev->dev_stats.s_cproc_trap_types[i]);
32889 +
32890 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32891 +}
32892 +
32893 +static int
32894 +proc_read_dproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32895 +{
32896 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32897 +    char      *p   = page;
32898 +    int        i;
32899 +    extern char *const DProcTrapNames[];
32900 +
32901 +    for (i = 0; i < sizeof (dev->dev_stats.s_dproc_trap_types)/sizeof(dev->dev_stats.s_dproc_trap_types[0]); i++)
32902 +       p += sprintf (p, "%-40s %ld\n", DProcTrapNames[i], dev->dev_stats.s_dproc_trap_types[i]);
32903 +
32904 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32905 +}
32906 +
32907 +static int
32908 +proc_read_eproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32909 +{
32910 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32911 +    char      *p   = page;
32912 +    int        i;
32913 +    extern char *const EProcTrapNames[];
32914 +
32915 +    for (i = 0; i < sizeof (dev->dev_stats.s_eproc_trap_types)/sizeof(dev->dev_stats.s_eproc_trap_types[0]); i++)
32916 +       p += sprintf (p, "%-40s %ld\n", EProcTrapNames[i], dev->dev_stats.s_eproc_trap_types[i]);
32917 +
32918 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32919 +}
32920 +
32921 +static int
32922 +proc_read_iproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32923 +{
32924 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32925 +    char      *p   = page;
32926 +    int        i;
32927 +    extern char *const IProcTrapNames[];
32928 +
32929 +    for (i = 0; i < sizeof (dev->dev_stats.s_iproc_trap_types)/sizeof(dev->dev_stats.s_iproc_trap_types[0]); i++)
32930 +       p += sprintf (p, "%-40s %ld\n", IProcTrapNames[i], dev->dev_stats.s_iproc_trap_types[i]);
32931 +
32932 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32933 +}
32934 +
32935 +static int
32936 +proc_read_tproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32937 +{
32938 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32939 +    char      *p   = page;
32940 +    int        i;
32941 +    extern char *const TProcTrapNames[];
32942 +
32943 +    for (i = 0; i < sizeof (dev->dev_stats.s_tproc_trap_types)/sizeof(dev->dev_stats.s_tproc_trap_types[0]); i++)
32944 +       p += sprintf (p, "%-40s %ld\n", TProcTrapNames[i], dev->dev_stats.s_tproc_trap_types[i]);
32945 +
32946 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32947 +}
32948 +
32949 +static int
32950 +proc_read_sdram_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32951 +{
32952 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
32953 +    char      *p   = page;
32954 +
32955 +    p += sprintf (p, "%20s %ld\n",  "correctable_errors", dev->dev_stats.s_correctable_errors);
32956 +    p += sprintf (p, "%20s %ld\n",  "multiple_errors",    dev->dev_stats.s_multiple_errors);
32957 +    p += sprintf (p, "%20s %ldK\n", "sdram_bytes_free",   dev->dev_stats.s_sdram_bytes_free/1024);
32958 +
32959 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
32960 +}
32961 +
32962 +void
32963 +elan4_ringbuf_store (ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev)
32964 +{
32965 +    int newend;
32966 +    
32967 +    ASSERT (kmutex_is_locked (&dev->dev_lock));
32968 +
32969 +    memcpy(&ringbuf->routes[ringbuf->end], route, sizeof(E4_VirtualProcessEntry));
32970 +    newend = ringbuf->end + 1;
32971 +    if (newend >= DEV_STASH_ROUTE_COUNT)
32972 +        newend -= DEV_STASH_ROUTE_COUNT;
32973 +    if (newend == ringbuf->start)
32974 +        ringbuf->start += 1;
32975 +    if (ringbuf->start >= DEV_STASH_ROUTE_COUNT)
32976 +        ringbuf->start -= DEV_STASH_ROUTE_COUNT;
32977 +    ringbuf->end = newend;
32978 +}
32979 +       
32980 +static int
32981 +proc_read_dproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
32982 +{
32983 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
32984 +       char      *p   = page;
32985 +       unsigned int *dproc_timeout;
32986 +
32987 +       dproc_timeout = dev->dev_dproc_timeout;
32988 +
32989 +       if (!dproc_timeout) 
32990 +               p += sprintf (p, "No stats available\n");
32991 +       else
32992 +       {
32993 +               int i;
32994 +
32995 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
32996 +                       if (dproc_timeout[i] != 0) 
32997 +                               p += sprintf (p, "Node %d: %u errors\n", i, dproc_timeout[i]);
32998 +       }
32999 +
33000 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
33001 +}
33002 +
33003 +static int
33004 +elan4_route2str (E4_VirtualProcessEntry *route, char *routeStr)
33005 +{
33006 +    int        part = 0;
33007 +    int        shift;
33008 +    int        broadcast;
33009 +    E4_uint64  value;
33010 +    char      *ptr = routeStr;
33011 +    int        b;
33012 +
33013 +    /* unpack first */
33014 +    value = route->Values[part] & 0x7f;
33015 +    if ( (value & 0x78) == 0) {
33016 +        /* empty route */
33017 +        strcpy(routeStr,"Invalid lead route");
33018 +        return (-EINVAL);
33019 +    }
33020 +
33021 +    if ( value & 0x40 ) {
33022 +        /* broad cast */
33023 +       strcpy(routeStr,"Broadcast");
33024 +       return (-EINVAL);
33025 +    } else {
33026 +        switch ((value  & 0x30) >> 4) {
33027 +        case 0: { *ptr++ = '0' + (value & 0x7); break; }
33028 +        case 1: { *ptr++ = 'M';                 break; }
33029 +        case 2: { *ptr++ = 'U';                 break; }
33030 +        case 3: { *ptr++ = 'A';                 break; }
33031 +        }
33032 +    }
33033 +
33034 +    shift = 16;
33035 +    broadcast = 0;
33036 +    while ( 1 ) {
33037 +        b =  (route->Values[part] >> shift) & 0xf;
33038 +
33039 +        if ( broadcast ) {
33040 +            /* about to pick up the second byte of a broadcast pair */
33041 +            broadcast = 0;
33042 +        } else {
33043 +            if ( b & 0x8) {
33044 +                /*  output link */
33045 +                 *ptr++ = '0' + (b & 0x7);
33046 +            } else {
33047 +                if ( b & 0x4) {
33048 +                    /* broad cast */
33049 +                    broadcast = 1;
33050 +                } else {
33051 +                    switch ( b & 0x3 ) {
33052 +                    case 0: { *ptr++ =  0 ; return (0);     break; }
33053 +                    case 1: { *ptr++ = 'M';                 break; }
33054 +                    case 2: { *ptr++ = 'U';                 break; }
33055 +                    case 3: { *ptr++ = 'A';                 break; }
33056 +                    }
33057 +                }
33058 +            }
33059 +        }
33060 +
33061 +        shift += 4; 
33062 +        if ( part != 0 ) {
33063 +            if ( shift > 36) {
33064 +                /* too far, now in the crc value */
33065 +                strcpy(routeStr,"Invalid route length");
33066 +                return (-EINVAL);
33067 +            }
33068 +        } else { 
33069 +            if ( shift >= 64) { 
33070 +                /* move to the next 64 bits */
33071 +                part = 1;
33072 +                shift = 2;
33073 +            }
33074 +        }
33075 +    }
33076 +
33077 +    /* never reached */
33078 +    return (-EINVAL);
33079 +}
33080 +
33081 +
33082 +static int
33083 +proc_read_dproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
33084 +{
33085 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
33086 +       char      *p   = page;
33087 +       ELAN4_ROUTE_RINGBUF *ringbuf;
33088 +       char      routestr[33];
33089 +
33090 +       ringbuf = &dev->dev_dproc_timeout_routes;
33091 +
33092 +       if (!ringbuf) 
33093 +               p += sprintf (p, "No stats available\n");
33094 +       else
33095 +       {
33096 +               int start;
33097 +               int end;
33098 +               int i;
33099 +
33100 +               memset(&routestr, 0, 33);
33101 +               
33102 +               kmutex_lock(&dev->dev_lock);
33103 +               
33104 +               start = ringbuf->start;
33105 +               end = ringbuf->end;
33106 +
33107 +               if (end < start)
33108 +                       end = DEV_STASH_ROUTE_COUNT;
33109 +
33110 +               for (i=start; i<end; i++) 
33111 +               {
33112 +                       elan4_route2str (&ringbuf->routes[i], routestr);
33113 +                       p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
33114 +               }
33115 +
33116 +               if (ringbuf->end < start)
33117 +               {
33118 +                       start = 0;
33119 +                       end = ringbuf->end;
33120 +                       for (i=start; i<end; i++)
33121 +                       {
33122 +                               elan4_route2str (&ringbuf->routes[i], routestr);
33123 +                               p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
33124 +                       }
33125 +               }
33126 +
33127 +               kmutex_unlock(&dev->dev_lock);
33128 +       }
33129 +
33130 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
33131 +}
33132 +
33133 +
33134 +static int
33135 +proc_read_cproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
33136 +{
33137 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
33138 +       char      *p   = page;
33139 +       unsigned int *cproc_timeout;
33140 +
33141 +       cproc_timeout = dev->dev_cproc_timeout;
33142 +
33143 +       if (!cproc_timeout) 
33144 +               p += sprintf (p, "No stats available\n");
33145 +       else
33146 +       {
33147 +               int i;
33148 +
33149 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
33150 +                       if (cproc_timeout[i] != 0) 
33151 +                               p += sprintf (p, "Node %d: %u errors\n", i, cproc_timeout[i]);
33152 +       }
33153 +
33154 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
33155 +}
33156 +
33157 +static int
33158 +proc_read_cproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
33159 +{
33160 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
33161 +       char      *p   = page;
33162 +       ELAN4_ROUTE_RINGBUF *ringbuf;
33163 +       char      routestr[33];
33164 +
33165 +       ringbuf = &dev->dev_cproc_timeout_routes;
33166 +
33167 +       if (!ringbuf) 
33168 +               p += sprintf (p, "No stats available\n");
33169 +       else
33170 +       {
33171 +               int start;
33172 +               int end;
33173 +               int i;
33174 +
33175 +               memset(&routestr, 0, 33);
33176 +
33177 +               kmutex_lock(&dev->dev_lock);
33178 +               
33179 +               start = ringbuf->start;
33180 +               end = ringbuf->end;
33181 +
33182 +               if (end < start)
33183 +                       end = DEV_STASH_ROUTE_COUNT;
33184 +
33185 +               for (i=start; i<end; i++) 
33186 +               {
33187 +                       elan4_route2str (&ringbuf->routes[i], routestr);
33188 +                       p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
33189 +               }
33190 +
33191 +               if (ringbuf->end < start)
33192 +               {
33193 +                       start = 0;
33194 +                       end = ringbuf->end;
33195 +                       for (i=start; i<end; i++)
33196 +                       {
33197 +                               elan4_route2str (&ringbuf->routes[i], routestr);
33198 +                               p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
33199 +                       }
33200 +               }
33201 +
33202 +               kmutex_unlock(&dev->dev_lock);
33203 +       }
33204 +
33205 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
33206 +}
33207 +
33208 +static int
33209 +proc_read_traperr_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
33210 +{
33211 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
33212 +       char      *p   = page;
33213 +       unsigned int *ack_errors;
33214 +
33215 +       ack_errors = dev->dev_ack_errors;
33216 +
33217 +       if (!ack_errors) 
33218 +               p += sprintf (p, "No stats available\n");
33219 +       else
33220 +       {
33221 +               int i;
33222 +
33223 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
33224 +                       if (ack_errors[i] != 0) 
33225 +                               p += sprintf (p, "Node %d: %u errors\n", i, ack_errors[i]);
33226 +       }
33227 +
33228 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
33229 +}
33230 +
33231 +static int
33232 +proc_read_ackerror_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
33233 +{
33234 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
33235 +       char      *p   = page;
33236 +       ELAN4_ROUTE_RINGBUF *ringbuf;
33237 +       char      routestr[33];
33238 +
33239 +       ringbuf = &dev->dev_ack_error_routes;
33240 +
33241 +       if (!ringbuf) 
33242 +               p += sprintf (p, "No stats available\n");
33243 +       else
33244 +       {
33245 +               int start;
33246 +               int end;
33247 +               int i;
33248 +
33249 +               memset(&routestr, 0, 33);
33250 +
33251 +               kmutex_lock(&dev->dev_lock);
33252 +               
33253 +               start = ringbuf->start;
33254 +               end = ringbuf->end;
33255 +
33256 +               if (end < start)
33257 +                       end = DEV_STASH_ROUTE_COUNT;
33258 +
33259 +               for (i=start; i<end; i++) 
33260 +               {
33261 +                       elan4_route2str (&ringbuf->routes[i], routestr);
33262 +                       p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
33263 +               }
33264 +
33265 +               if (ringbuf->end < start)
33266 +               {
33267 +                       start = 0;
33268 +                       end = ringbuf->end;
33269 +                       for (i=start; i<end; i++)
33270 +                       {
33271 +                               elan4_route2str (&ringbuf->routes[i], routestr);
33272 +                               p += sprintf (p, "Route %llx %llx->%s\n", ringbuf->routes[i].Values[0], ringbuf->routes[i].Values[1], routestr);
33273 +                       }
33274 +               }
33275 +
33276 +               kmutex_unlock(&dev->dev_lock);
33277 +       }
33278 +
33279 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
33280 +}
33281 +
33282 +static struct stats_info 
33283 +{
33284 +    char *name;
33285 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
33286 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); 
33287 +} stats_info[] = {
33288 +    {"link",     proc_read_link_stats, NULL},
33289 +    {"intr",     proc_read_intr_stats, NULL},
33290 +    {"trap",     proc_read_trap_stats, NULL},
33291 +    {"cproc",    proc_read_cproc_trap_stats, NULL},
33292 +    {"dproc",    proc_read_dproc_trap_stats, NULL},
33293 +    {"eproc",    proc_read_eproc_trap_stats, NULL},
33294 +    {"iproc",    proc_read_iproc_trap_stats, NULL},
33295 +    {"tproc",    proc_read_tproc_trap_stats, NULL},
33296 +    {"sdram",    proc_read_sdram_stats, NULL},
33297 +    {"trapdmaerr", proc_read_traperr_stats, NULL},
33298 +    {"dproctimeout", proc_read_dproc_timeout_stats, NULL},
33299 +    {"cproctimeout", proc_read_cproc_timeout_stats, NULL},
33300 +    {"dproctimeoutroutes", proc_read_dproc_timeout_routes, NULL},
33301 +    {"cproctimeoutroutes", proc_read_cproc_timeout_routes, NULL},
33302 +    {"ackerrroutes", proc_read_ackerror_routes, NULL},
33303 +};
33304 +
33305 +static int
33306 +proc_read_sysconfig (char *page, char **start, off_t off, int count, int *eof, void *data)
33307 +{
33308 +    ELAN4_DEV *dev        = (ELAN4_DEV *) data;
33309 +    E4_uint32  syscontrol = dev->dev_syscontrol;
33310 +    int               len       = 0;
33311 +
33312 +   *eof = 1;
33313 +   if (off != 0)
33314 +      return (0);
33315 +
33316 +    if (syscontrol & CONT_EN_ALL_SETS)
33317 +       len += sprintf (page + len, "%sEN_ALL_SETS", len == 0 ? "" : " ");
33318 +    if (syscontrol & CONT_MMU_ENABLE)
33319 +       len += sprintf (page + len, "%sMMU_ENABLE", len == 0 ? "" : " ");
33320 +    if (syscontrol & CONT_CACHE_HASH_TABLE)
33321 +       len += sprintf (page + len, "%sCACHE_HASH_TABLE", len == 0 ? "" : " ");
33322 +    if (syscontrol & CONT_CACHE_CHAINS)
33323 +       len += sprintf (page + len, "%sCACHE_CHAINS", len == 0 ? "" : " ");
33324 +    if (syscontrol & CONT_CACHE_ROOT_CNTX)
33325 +       len += sprintf (page + len, "%sCACHE_ROOT_CNTX", len == 0 ? "" : " ");
33326 +    if (syscontrol & CONT_CACHE_STEN_ROUTES)
33327 +       len += sprintf (page + len, "%sCACHE_STEN_ROUTES", len == 0 ? "" : " ");
33328 +    if (syscontrol & CONT_CACHE_DMA_ROUTES)
33329 +       len += sprintf (page + len, "%sCACHE_DMA_ROUTES", len == 0 ? "" : " ");
33330 +    if (syscontrol & CONT_INHIBIT_MAX_CHAIN_ITEMS)
33331 +       len += sprintf (page + len, "%sINHIBIT_MAX_CHAIN_ITEMS", len == 0 ? "" : " ");
33332 +
33333 +    len += sprintf (page + len, "%sTABLE0_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_MASK_SIZE_SHIFT) & PAGE_MASK_MASK);
33334 +    len += sprintf (page + len, "%sTABLE0_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK);
33335 +    len += sprintf (page + len, "%sTABLE1_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_MASK_SIZE_SHIFT) & PAGE_MASK_MASK);
33336 +    len += sprintf (page + len, "%sTABLE1_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK);
33337 +
33338 +    if (syscontrol & CONT_2K_NOT_1K_DMA_PACKETS)
33339 +       len += sprintf (page + len, "%s2K_NOT_1K_DMA_PACKETS", len == 0 ? "" : " ");
33340 +    if (syscontrol & CONT_ALIGN_ALL_DMA_PACKETS)
33341 +       len += sprintf (page + len, "%sALIGN_ALL_DMA_PACKETS", len == 0 ? "" : " ");
33342 +    if (syscontrol & CONT_DIRECT_MAP_PCI_WRITES)
33343 +       len += sprintf (page + len, "%sDIRECT_MAP_PCI_WRITES", len == 0 ? "" : " ");
33344 +
33345 +    len += sprintf (page + len, "\n");
33346 +
33347 +   *start = page;
33348 +   return (len);
33349 +}
33350 +
33351 +static int
33352 +proc_write_sysconfig (struct file *file, const char *ubuffer, unsigned long count, void *data)
33353 +{
33354 +    ELAN4_DEV *dev       = (ELAN4_DEV *) data;
33355 +    unsigned long page   = __get_free_page (GFP_KERNEL);
33356 +    char         *buffer = (char *)page;
33357 +    int            add   = 0;
33358 +    int            sub   = 0;
33359 +    
33360 +    count = MIN (count, PAGE_SIZE - 1);
33361 +    if (copy_from_user (buffer, ubuffer, count))
33362 +    {
33363 +       free_page (page);
33364 +       return (-EFAULT);
33365 +    }
33366 +   
33367 +    buffer[count] = 0;                         /* terminate string */
33368 +
33369 +    while (*buffer != 0)
33370 +    {
33371 +       char *ptr;
33372 +       char *end;
33373 +       int   ch;
33374 +       int   val;
33375 +       int   op;
33376 +
33377 +       ch = *buffer;
33378 +       if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n')
33379 +       {
33380 +           buffer++;
33381 +           continue;
33382 +       }
33383 +      
33384 +       op = *buffer;
33385 +       if (op == '+' || op == '-')
33386 +           buffer++;
33387 +
33388 +       for (end = buffer; *end != 0; end++)
33389 +           if (*end == ' ' || *end == '\t' ||
33390 +               *end == '\r' || *end == '\n')
33391 +               break;
33392 +      
33393 +       if (end == buffer)
33394 +           break;
33395 +      
33396 +       ch = *end;
33397 +       *end = 0;
33398 +
33399 +       for (ptr = buffer; *ptr != 0; ptr++)
33400 +           if ('a' <= *ptr && *ptr <= 'z')
33401 +               *ptr = *ptr + 'A' - 'a';
33402 +      
33403 +       if (!strcmp (buffer, "EN_ALL_SETS"))
33404 +           val = CONT_EN_ALL_SETS;
33405 +       if (!strcmp (buffer, "CACHE_HASH_TABLE"))
33406 +           val = CONT_CACHE_HASH_TABLE;
33407 +       else if (!strcmp (buffer, "CACHE_CHAINS"))
33408 +           val = CONT_CACHE_CHAINS;
33409 +       else if (!strcmp (buffer, "CACHE_ROOT_CNTX"))
33410 +           val = CONT_CACHE_ROOT_CNTX;
33411 +       else if (!strcmp (buffer, "CACHE_STEN_ROUTES"))
33412 +           val = CONT_CACHE_STEN_ROUTES;
33413 +       else if (!strcmp (buffer, "CACHE_DMA_ROUTES"))
33414 +           val = CONT_CACHE_DMA_ROUTES;
33415 +       else if (!strcmp (buffer, "2K_NOT_1K_DMA_PACKETS"))
33416 +           val = CONT_2K_NOT_1K_DMA_PACKETS;
33417 +       else if (!strcmp (buffer, "ALIGN_ALL_DMA_PACKETS"))
33418 +           val = CONT_ALIGN_ALL_DMA_PACKETS;
33419 +       else
33420 +           val = 0;
33421 +
33422 +       if (op == '+')
33423 +           add |= val;
33424 +       else if (op == '-')
33425 +           sub |= val;
33426 +
33427 +       *end = ch;
33428 +       buffer = end;
33429 +    }
33430 +
33431 +    if ((add | sub) & CONT_EN_ALL_SETS)
33432 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
33433 +
33434 +    CHANGE_SYSCONTROL (dev, add, sub);
33435 +   
33436 +    if ((add | sub) & CONT_EN_ALL_SETS)
33437 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
33438 +
33439 +    free_page (page);
33440 +    return (count);
33441 +}
33442 +
33443 +static struct config_info 
33444 +{
33445 +    char *name;
33446 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
33447 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); 
33448 +} config_info[] = {
33449 +    {"sysconfig",  proc_read_sysconfig, proc_write_sysconfig},
33450 +};
33451 +
33452 +void
33453 +elan4_procfs_device_init (ELAN4_DEV *dev)
33454 +{
33455 +    struct proc_dir_entry *p;
33456 +    char name[NAME_MAX];
33457 +    int i;
33458 +
33459 +    sprintf (name, "device%d", dev->dev_instance);
33460 +    dev->dev_osdep.procdir  = proc_mkdir (name, elan4_procfs_root);
33461 +
33462 +    for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++)
33463 +    {
33464 +       if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev)
33465 +           continue;
33466 +
33467 +       if ((p = create_proc_entry (device_info[i].name, 0, dev->dev_osdep.procdir)) != NULL)
33468 +       {
33469 +           p->read_proc  = device_info[i].read_func;
33470 +           p->write_proc = device_info[i].write_func;
33471 +           p->data       = dev;
33472 +           p->owner      = THIS_MODULE;
33473 +       }
33474 +    }
33475 +
33476 +    dev->dev_osdep.configdir = proc_mkdir ("config", dev->dev_osdep.procdir);
33477 +    for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++)
33478 +    {
33479 +       if ((p = create_proc_entry (config_info[i].name, 0, dev->dev_osdep.configdir)) != NULL)
33480 +       {
33481 +           p->read_proc  = config_info[i].read_func;
33482 +           p->write_proc = config_info[i].write_func;
33483 +           p->data       = dev;
33484 +           p->owner      = THIS_MODULE;
33485 +       }
33486 +    }
33487 +
33488 +    dev->dev_osdep.statsdir = proc_mkdir ("stats", dev->dev_osdep.procdir);
33489 +    for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++)
33490 +    {
33491 +       if ((p = create_proc_entry (stats_info[i].name, 0, dev->dev_osdep.statsdir)) != NULL)
33492 +       {
33493 +           p->read_proc  = stats_info[i].read_func;
33494 +           p->write_proc = stats_info[i].write_func;
33495 +           p->data       = dev;
33496 +           p->owner      = THIS_MODULE;
33497 +       }
33498 +    }
33499 +}
33500 +
33501 +void
33502 +elan4_procfs_device_fini (ELAN4_DEV *dev)
33503 +{
33504 +    char name[NAME_MAX];
33505 +    int i;
33506 +
33507 +    for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++)
33508 +       remove_proc_entry (stats_info[i].name, dev->dev_osdep.statsdir);
33509 +    remove_proc_entry ("stats", dev->dev_osdep.procdir);
33510 +
33511 +    for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++)
33512 +       remove_proc_entry (config_info[i].name, dev->dev_osdep.configdir);
33513 +    remove_proc_entry ("config", dev->dev_osdep.procdir);
33514 +
33515 +    for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++)
33516 +    {
33517 +       if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev)
33518 +           continue;
33519 +       
33520 +       remove_proc_entry (device_info[i].name, dev->dev_osdep.procdir);
33521 +    }
33522 +
33523 +    sprintf (name, "device%d", dev->dev_instance);
33524 +    remove_proc_entry (name, elan4_procfs_root);
33525 +}
33526 +
33527 +void
33528 +elan4_procfs_init(void)
33529 +{
33530 +    elan4_procfs_root = proc_mkdir("elan4", qsnet_procfs_root);
33531 +    elan4_config_root = proc_mkdir("config", elan4_procfs_root);
33532 +
33533 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug",              &elan4_debug,              0);
33534 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug_toconsole",    &elan4_debug_toconsole,    0);
33535 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug_tobuffer",     &elan4_debug_tobuffer,     0);
33536 +    qsnet_proc_register_int (elan4_config_root, "elan4_debug_mmu",          &elan4_debug_mmu,          0);
33537 +    qsnet_proc_register_int (elan4_config_root, "elan4_mainint_punt_loops", &elan4_mainint_punt_loops, 0);
33538 +    qsnet_proc_register_hex (elan4_config_root, "user_p2p_route_options",   &user_p2p_route_options,   0);
33539 +    qsnet_proc_register_hex (elan4_config_root, "user_bcast_route_options", &user_bcast_route_options, 0);
33540 +    qsnet_proc_register_int (elan4_config_root, "user_dproc_retry_count",   &user_dproc_retry_count,    0);
33541 +    qsnet_proc_register_int (elan4_config_root, "user_cproc_retry_count",   &user_cproc_retry_count,    0);
33542 +    qsnet_proc_register_int (elan4_config_root, "num_fault_save",   &num_fault_save,    0);
33543 +    qsnet_proc_register_int (elan4_config_root, "min_fault_pages",   &min_fault_pages,    0);
33544 +    qsnet_proc_register_int (elan4_config_root, "max_fault_pages",   &max_fault_pages,    0);
33545 +}
33546 +
33547 +void
33548 +elan4_procfs_fini(void)
33549 +{
33550 +    remove_proc_entry ("max_fault_pages",          elan4_config_root);
33551 +    remove_proc_entry ("min_fault_pages",          elan4_config_root);
33552 +    remove_proc_entry ("num_fault_save",           elan4_config_root);
33553 +    remove_proc_entry ("user_cproc_retry_count",   elan4_config_root);
33554 +    remove_proc_entry ("user_dproc_retry_count",   elan4_config_root);
33555 +    remove_proc_entry ("user_bcast_route_options", elan4_config_root);
33556 +    remove_proc_entry ("user_p2p_route_options",   elan4_config_root);
33557 +    remove_proc_entry ("elan4_mainint_punt_loops", elan4_config_root);
33558 +    remove_proc_entry ("elan4_debug_mmu",          elan4_config_root);
33559 +    remove_proc_entry ("elan4_debug_tobuffer",     elan4_config_root);
33560 +    remove_proc_entry ("elan4_debug_toconsole",    elan4_config_root);
33561 +    remove_proc_entry ("elan4_debug",              elan4_config_root);
33562 +
33563 +    remove_proc_entry ("config", elan4_procfs_root);
33564 +    remove_proc_entry ("elan4", qsnet_procfs_root);
33565 +}
33566 +
33567 +EXPORT_SYMBOL(elan4_procfs_root);
33568 +EXPORT_SYMBOL(elan4_config_root);
33569 +
33570 +/*
33571 + * Local variables:
33572 + * c-file-style: "stroustrup"
33573 + * End:
33574 + */
33575 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/quadrics_version.h
33576 ===================================================================
33577 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/quadrics_version.h   2004-02-23 16:02:56.000000000 -0500
33578 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/quadrics_version.h        2005-07-28 14:52:52.835680712 -0400
33579 @@ -0,0 +1 @@
33580 +#define QUADRICS_VERSION "4.31qsnet"
33581 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/regions.c
33582 ===================================================================
33583 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/regions.c    2004-02-23 16:02:56.000000000 -0500
33584 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/regions.c 2005-07-28 14:52:52.836680560 -0400
33585 @@ -0,0 +1,609 @@
33586 +/*
33587 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
33588 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
33589 + * 
33590 + *    For licensing information please see the supplied COPYING file
33591 + *
33592 + */
33593 +
33594 +#ident "@(#)$Id: regions.c,v 1.18.2.1 2004/11/18 11:31:08 david Exp $"
33595 +/*      $Source: /cvs/master/quadrics/elan4mod/regions.c,v $*/
33596 +
33597 +#include <qsnet/kernel.h>
33598 +
33599 +#include <elan4/debug.h>
33600 +#include <elan4/device.h>
33601 +#include <elan4/user.h>
33602 +
33603 +/*================================================================================*/
33604 +/* elan address region management */
33605 +USER_RGN *
33606 +user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail)
33607 +{
33608 +    USER_RGN *rgn;
33609 +    USER_RGN *hirgn;
33610 +    USER_RGN *lorgn;
33611 +    E4_Addr   base;
33612 +    E4_Addr   lastaddr;
33613 +    int              forward;
33614 +    
33615 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex));
33616 +
33617 +    if (uctx->uctx_ergns == NULL)
33618 +       return (NULL);
33619 +
33620 +    rgn = uctx->uctx_ergnlast;
33621 +    if (rgn == NULL)
33622 +       rgn = uctx->uctx_ergns;
33623 +
33624 +    forward = 0;
33625 +    if ((base = rgn->rgn_ebase) < addr)
33626 +    {
33627 +       if (addr <= (base + rgn->rgn_len - 1))
33628 +           return (rgn);                                       /* ergnlast contained addr */
33629 +       
33630 +       hirgn = uctx->uctx_etail;
33631 +       
33632 +       if ((lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < addr)
33633 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
33634 +       
33635 +       if ((addr - base) > (lastaddr - addr))
33636 +           rgn = hirgn;
33637 +       else
33638 +       {
33639 +           rgn = rgn->rgn_enext;
33640 +           forward++;
33641 +       }
33642 +    }
33643 +    else
33644 +    {
33645 +       lorgn = uctx->uctx_ergns;
33646 +
33647 +       if (lorgn->rgn_ebase > addr)
33648 +           return (lorgn);                                     /* lowest regions is higher than addr */
33649 +       if ((addr - lorgn->rgn_ebase) < (base - addr))
33650 +       {
33651 +           rgn = lorgn;                                        /* search forward from head */
33652 +           forward++;
33653 +       }
33654 +    }
33655 +    if (forward)
33656 +    {
33657 +       while ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr)
33658 +           rgn = rgn->rgn_enext;
33659 +
33660 +       if (rgn->rgn_ebase <= addr)
33661 +           uctx->uctx_ergnlast = rgn;
33662 +       return (rgn);
33663 +    }
33664 +    else
33665 +    {
33666 +       while (rgn->rgn_ebase > addr)
33667 +           rgn = rgn->rgn_eprev;
33668 +
33669 +       if ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr)
33670 +           return (rgn->rgn_enext);
33671 +       else
33672 +       {
33673 +           uctx->uctx_ergnlast = rgn;
33674 +           return (rgn);
33675 +       }
33676 +    }
33677 +}
33678 +
33679 +static int
33680 +user_addrgn_elan (USER_CTXT *uctx, USER_RGN  *nrgn)
33681 +{
33682 +    USER_RGN *rgn   = user_findrgn_elan (uctx, nrgn->rgn_ebase, 1);
33683 +    E4_Addr   nbase = nrgn->rgn_ebase;
33684 +    E4_Addr   ntop  = nbase + nrgn->rgn_len - 1;
33685 +    E4_Addr   base;
33686 +
33687 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33688 +
33689 +    if (rgn == NULL)
33690 +    {
33691 +       uctx->uctx_ergns = uctx->uctx_etail = nrgn;
33692 +       nrgn->rgn_enext = nrgn->rgn_eprev = NULL;
33693 +    }
33694 +    else
33695 +    {
33696 +       base = rgn->rgn_ebase;
33697 +       
33698 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
33699 +       {                                                       /* so insert after region (and hence at end */
33700 +           nrgn->rgn_eprev = rgn;                              /* of list */
33701 +           nrgn->rgn_enext = NULL;
33702 +           rgn->rgn_enext = uctx->uctx_etail = nrgn;
33703 +       }
33704 +       else
33705 +       {
33706 +           if (nbase >= base || ntop >= base)                  /* overlapping region */
33707 +               return (-1);
33708 +           
33709 +           nrgn->rgn_enext = rgn;                              /* insert before region */
33710 +           nrgn->rgn_eprev = rgn->rgn_eprev;
33711 +           rgn->rgn_eprev  = nrgn;
33712 +           if (uctx->uctx_ergns == rgn)
33713 +               uctx->uctx_ergns = nrgn;
33714 +           else
33715 +               nrgn->rgn_eprev->rgn_enext = nrgn;
33716 +       }
33717 +    }
33718 +    uctx->uctx_ergnlast = nrgn;
33719 +    
33720 +    return (0);
33721 +}
33722 +
33723 +static USER_RGN *
33724 +user_removergn_elan (USER_CTXT *uctx, USER_RGN  *rgn)
33725 +{
33726 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33727 +
33728 +    uctx->uctx_ergnlast = rgn->rgn_enext;
33729 +    if (rgn == uctx->uctx_etail)
33730 +       uctx->uctx_etail = rgn->rgn_eprev;
33731 +    else
33732 +       rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev;
33733 +    
33734 +    if (rgn == uctx->uctx_ergns)
33735 +       uctx->uctx_ergns = rgn->rgn_enext;
33736 +    else
33737 +       rgn->rgn_eprev->rgn_enext = rgn->rgn_enext;
33738 +
33739 +    return (rgn);
33740 +}
33741 +
33742 +USER_RGN *
33743 +user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr)
33744 +{
33745 +    USER_RGN *rgn = user_findrgn_elan (uctx, addr, 0);
33746 +
33747 +    if (rgn != NULL && rgn->rgn_ebase <= addr && addr <= (rgn->rgn_ebase + rgn->rgn_len - 1))
33748 +       return (rgn);
33749 +
33750 +    return (NULL);
33751 +}
33752 +
33753 +/* main address region management */
33754 +USER_RGN *
33755 +user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail)
33756 +{
33757 +    USER_RGN  *rgn;
33758 +    USER_RGN  *hirgn;
33759 +    USER_RGN  *lorgn;
33760 +    virtaddr_t lastaddr;
33761 +    virtaddr_t base;
33762 +    int               forward;
33763 +    
33764 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex));
33765 +
33766 +    if (uctx->uctx_mrgns == NULL)
33767 +       return (NULL);
33768 +    
33769 +    rgn = uctx->uctx_mrgnlast;
33770 +    if (rgn == NULL)
33771 +       rgn = uctx->uctx_mrgns;
33772 +    
33773 +    forward = 0;
33774 +    if ((base = rgn->rgn_mbase) < addr)
33775 +    {
33776 +       if (addr <= (base + rgn->rgn_len - 1))
33777 +           return (rgn);                                       /* ergnlast contained addr */
33778 +       
33779 +       hirgn = uctx->uctx_mtail;
33780 +       if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr)
33781 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
33782 +       
33783 +       if ((addr - base) > (lastaddr - addr))
33784 +           rgn = hirgn;
33785 +       else
33786 +       {
33787 +           rgn = rgn->rgn_mnext;
33788 +           forward++;
33789 +       }
33790 +    }
33791 +    else
33792 +    {
33793 +       lorgn = uctx->uctx_mrgns;
33794 +       if (lorgn->rgn_mbase > addr)
33795 +           return (lorgn);                                     /* lowest regions is higher than addr */
33796 +       if ((addr - lorgn->rgn_mbase) < (base - addr))
33797 +       {
33798 +           rgn = lorgn;                                        /* search forward from head */
33799 +           forward++;
33800 +       }
33801 +    }
33802 +    if (forward)
33803 +    {
33804 +       while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
33805 +           rgn = rgn->rgn_mnext;
33806 +
33807 +       if (rgn->rgn_mbase <= addr)
33808 +           uctx->uctx_mrgnlast = rgn;
33809 +       return (rgn);
33810 +    }
33811 +    else
33812 +    {
33813 +       while (rgn->rgn_mbase > addr)
33814 +           rgn = rgn->rgn_mprev;
33815 +
33816 +       if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
33817 +           return (rgn->rgn_mnext);
33818 +       else
33819 +       {
33820 +           uctx->uctx_mrgnlast = rgn;
33821 +           return (rgn);
33822 +       }
33823 +    }
33824 +}
33825 +
33826 +static int
33827 +user_addrgn_main (USER_CTXT *uctx, USER_RGN *nrgn)
33828 +{
33829 +    USER_RGN  *rgn   = user_findrgn_main (uctx, nrgn->rgn_mbase, 1);
33830 +    virtaddr_t nbase = nrgn->rgn_mbase;
33831 +    virtaddr_t ntop  = nbase + nrgn->rgn_len - 1;
33832 +    virtaddr_t base;
33833 +
33834 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33835 +
33836 +    if (rgn == NULL)
33837 +    {
33838 +       uctx->uctx_mrgns = uctx->uctx_mtail = nrgn;
33839 +       nrgn->rgn_mnext = nrgn->rgn_mprev = NULL;
33840 +    }
33841 +    else
33842 +    {
33843 +       base = rgn->rgn_mbase;
33844 +
33845 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
33846 +       {                                                       /* so insert after region (and hence at end */
33847 +           nrgn->rgn_mprev = rgn;                              /* of list */
33848 +           nrgn->rgn_mnext = NULL;
33849 +           rgn->rgn_mnext = uctx->uctx_mtail = nrgn;
33850 +       }
33851 +       else
33852 +       {
33853 +           if (nbase >= base || ntop >= base)                  /* overlapping region */
33854 +               return (-1);
33855 +
33856 +           nrgn->rgn_mnext = rgn;                              /* insert before region */
33857 +           nrgn->rgn_mprev = rgn->rgn_mprev;
33858 +           rgn->rgn_mprev  = nrgn;
33859 +           if (uctx->uctx_mrgns == rgn)
33860 +               uctx->uctx_mrgns = nrgn;
33861 +           else
33862 +               nrgn->rgn_mprev->rgn_mnext = nrgn;
33863 +       }
33864 +    }
33865 +    uctx->uctx_mrgnlast = nrgn;
33866 +    
33867 +    return (0);
33868 +}
33869 +
33870 +static USER_RGN *
33871 +user_removergn_main (USER_CTXT *uctx, USER_RGN *rgn)
33872 +{
33873 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
33874 +
33875 +    uctx->uctx_mrgnlast = rgn->rgn_mnext;
33876 +    if (rgn == uctx->uctx_mtail)
33877 +       uctx->uctx_mtail = rgn->rgn_mprev;
33878 +    else
33879 +       rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev;
33880 +    
33881 +    if (rgn == uctx->uctx_mrgns)
33882 +       uctx->uctx_mrgns = rgn->rgn_mnext;
33883 +    else
33884 +       rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext;
33885 +
33886 +    return (rgn);
33887 +}
33888 +
33889 +/* Remove whole region from both lists */
33890 +static void
33891 +user_removergn (USER_CTXT *uctx, USER_RGN *rgn)
33892 +{
33893 +    spin_lock (&uctx->uctx_rgnlock);
33894 +
33895 +    elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, rgn->rgn_len);
33896 +           
33897 +    user_removergn_elan (uctx, rgn);
33898 +    user_removergn_main (uctx, rgn);
33899 +    
33900 +    spin_unlock (&uctx->uctx_rgnlock);
33901 +    
33902 +    KMEM_FREE (rgn, sizeof (USER_RGN));
33903 +}
33904 +
33905 +/* Remove all allocated regions */
33906 +void
33907 +user_freergns (USER_CTXT *uctx)
33908 +{
33909 +    kmutex_lock (&uctx->uctx_rgnmutex);
33910 +
33911 +    while (uctx->uctx_mrgns)
33912 +       user_removergn(uctx, uctx->uctx_mrgns);
33913 +
33914 +    kmutex_unlock (&uctx->uctx_rgnmutex);
33915 +
33916 +    ASSERT (uctx->uctx_ergns == NULL);
33917 +}
33918 +
33919 +USER_RGN *
33920 +user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr)
33921 +{
33922 +    USER_RGN *rgn = user_findrgn_main (uctx, addr, 0);
33923 +    
33924 +    if (rgn != NULL && rgn->rgn_mbase <= addr && addr <= (rgn->rgn_mbase + rgn->rgn_len - 1))
33925 +       return (rgn);
33926 +    return (NULL);
33927 +}
33928 +
33929 +int
33930 +user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm)
33931 +{
33932 +    USER_RGN *nrgn;
33933 +
33934 +    PRINTF4 (uctx, DBG_PERM, "user_setperm: user %lx elan %llx len %lx perm %x\n", maddr, (long long) eaddr, len, perm);
33935 +
33936 +    if ((maddr & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET))
33937 +    {
33938 +        PRINTF0 (uctx, DBG_PERM, "user_setperm:  alignment failure\n");
33939 +       return (-EINVAL);
33940 +    }
33941 +    
33942 +    if ((maddr + len - 1) <= maddr || (eaddr + len - 1) <= eaddr) 
33943 +    {
33944 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  range failure\n");
33945 +       return (-EINVAL);
33946 +    }
33947 +
33948 +    KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1);
33949 +    
33950 +    if (nrgn == NULL)
33951 +       return (-ENOMEM);
33952 +
33953 +    nrgn->rgn_mbase = maddr;
33954 +    nrgn->rgn_ebase = eaddr;
33955 +    nrgn->rgn_len   = len;
33956 +    nrgn->rgn_perm  = perm;
33957 +
33958 +    kmutex_lock (&uctx->uctx_rgnmutex);
33959 +    spin_lock (&uctx->uctx_rgnlock);
33960 +
33961 +    if (user_addrgn_elan (uctx, nrgn) < 0)
33962 +    {
33963 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  elan address exists\n");
33964 +       spin_unlock (&uctx->uctx_rgnlock);
33965 +       kmutex_unlock (&uctx->uctx_rgnmutex);
33966 +
33967 +       KMEM_FREE (nrgn, sizeof (USER_RGN));
33968 +       return (-EINVAL);
33969 +    }
33970 +    
33971 +    if (user_addrgn_main (uctx, nrgn) < 0)
33972 +    {
33973 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  main address exists\n");
33974 +       user_removergn_elan (uctx, nrgn);
33975 +       
33976 +       spin_unlock (&uctx->uctx_rgnlock);
33977 +       kmutex_unlock (&uctx->uctx_rgnmutex);
33978 +
33979 +       KMEM_FREE (nrgn, sizeof (USER_RGN));
33980 +       return (-EINVAL);
33981 +    }
33982 +    spin_unlock (&uctx->uctx_rgnlock);
33983 +
33984 +    if ((perm & PERM_Preload))
33985 +       user_preload_main (uctx, maddr, len);
33986 +
33987 +    kmutex_unlock (&uctx->uctx_rgnmutex);
33988 +
33989 +    return (0);
33990 +}
33991 +
33992 +void
33993 +user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len)
33994 +{
33995 +    E4_Addr       raddr;
33996 +    E4_Addr       rtop;
33997 +    USER_RGN     *nrgn;
33998 +    USER_RGN     *rgn;
33999 +    USER_RGN     *rgn_next;
34000 +    unsigned long ssize;
34001 +    int                  res;
34002 +
34003 +    PRINTF2 (uctx, DBG_PERM, "user_clrperm: elan %llx len %lx\n", addr, len);
34004 +
34005 +    raddr = (addr & PAGEMASK);
34006 +    rtop = ((addr + len - 1) & PAGEMASK) + (PAGESIZE-1);
34007 +
34008 +    kmutex_lock (&uctx->uctx_rgnmutex);
34009 +    
34010 +    for (rgn = user_findrgn_elan (uctx, addr, 0); rgn != NULL; rgn = rgn_next)
34011 +    {
34012 +       if (rtop < rgn->rgn_ebase)                              /* rtop was in a gap */
34013 +           break;
34014 +       
34015 +       rgn_next = rgn->rgn_enext;                              /* Save next region pointer */
34016 +       
34017 +       PRINTF (uctx, DBG_PERM, "              elan %llx->%llx main %p->%p\n", 
34018 +               rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1,
34019 +               rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1);
34020 +
34021 +       if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
34022 +       {
34023 +           /* whole region is cleared */
34024 +
34025 +           PRINTF (uctx, DBG_PERM, "              whole region\n");
34026 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1);
34027 +           user_removergn (uctx, rgn);
34028 +       }
34029 +       else if (raddr <= rgn->rgn_ebase)
34030 +       {
34031 +           /* clearing at beginning, so shrink size and increment base ptrs */
34032 +           ssize = rtop - rgn->rgn_ebase + 1;
34033 +           
34034 +           PRINTF (uctx, DBG_PERM, "              clear at beginning %x\n", ssize);
34035 +
34036 +           spin_lock (&uctx->uctx_rgnlock);
34037 +
34038 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + ssize-1);
34039 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, ssize);
34040 +
34041 +           rgn->rgn_mbase += ssize;
34042 +           rgn->rgn_ebase += ssize;
34043 +           rgn->rgn_len   -= ssize;
34044 +           
34045 +           spin_unlock(&uctx->uctx_rgnlock);
34046 +       }
34047 +       else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
34048 +       {
34049 +           /* clearing at end, so just shrink length of region */
34050 +           ssize = (rgn->rgn_ebase + rgn->rgn_len - 1) - raddr + 1;
34051 +
34052 +           PRINTF (uctx, DBG_PERM, "              clear at end %x\n", ssize);
34053 +
34054 +           spin_lock (&uctx->uctx_rgnlock);
34055 +
34056 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", raddr, raddr+ssize-1);
34057 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, ssize);
34058 +
34059 +           rgn->rgn_len -= ssize;
34060 +
34061 +           spin_unlock(&uctx->uctx_rgnlock);
34062 +       }
34063 +       else
34064 +       {
34065 +           /* the section to go is in the middle,  so need to  */
34066 +           /* split it into two regions */
34067 +           KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1);
34068 +
34069 +           spin_lock (&uctx->uctx_rgnlock);
34070 +
34071 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", raddr, rtop);
34072 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, rtop - raddr + 1);
34073 +
34074 +           nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);
34075 +           nrgn->rgn_ebase = rtop + 1;
34076 +           nrgn->rgn_len   = (rgn->rgn_ebase + rgn->rgn_len - 1) - rtop;
34077 +           nrgn->rgn_perm  = rgn->rgn_perm;
34078 +
34079 +           PRINTF (uctx, DBG_PERM, "              new elan %llx->%llx main %p->%p\n", 
34080 +                   nrgn->rgn_ebase, nrgn->rgn_ebase + nrgn->rgn_len-1,
34081 +                   nrgn->rgn_mbase, nrgn->rgn_mbase + nrgn->rgn_len-1);
34082 +
34083 +           rgn->rgn_len = (raddr - rgn->rgn_ebase);            /* shrink original region */
34084 +
34085 +           PRINTF (uctx, DBG_PERM, "              old elan %llx->%llx main %p->%p\n", 
34086 +                   rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1,
34087 +                   rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1);
34088 +
34089 +           res = user_addrgn_elan (uctx, nrgn);                /* insert new region */
34090 +           ASSERT (res == 0);                                  /* which cannot fail */
34091 +
34092 +           res = user_addrgn_main (uctx, nrgn);        
34093 +           ASSERT (res == 0);
34094 +
34095 +           spin_unlock(&uctx->uctx_rgnlock);
34096 +       }
34097 +    }
34098 +    kmutex_unlock (&uctx->uctx_rgnmutex);
34099 +}
34100 +
34101 +int
34102 +user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access)
34103 +{
34104 +    USER_RGN *rgn;
34105 +
34106 +    PRINTF3 (uctx, DBG_PERM, "user_checkperm: elan %lx len %lx access %x\n", raddr, rsize, access);
34107 +    
34108 +    if ((raddr + rsize - 1) < raddr)
34109 +       return (-ENOMEM);
34110 +    
34111 +    kmutex_lock (&uctx->uctx_rgnmutex);
34112 +    if ((rgn = user_rgnat_elan (uctx, raddr)) == (USER_RGN *) NULL)
34113 +    {
34114 +       kmutex_unlock (&uctx->uctx_rgnmutex);
34115 +       return (-ENOMEM);
34116 +    }
34117 +    else
34118 +    {
34119 +       register int ssize;
34120 +       
34121 +       for (; rsize != 0; rsize -= ssize, raddr += ssize)
34122 +       {
34123 +           if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1))
34124 +           {
34125 +               rgn  = rgn->rgn_enext;
34126 +               
34127 +               if (rgn == NULL || raddr != rgn->rgn_ebase)
34128 +               {
34129 +                   kmutex_unlock (&uctx->uctx_rgnmutex);
34130 +                   return (-ENOMEM);
34131 +               }
34132 +           }
34133 +           if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1))
34134 +               ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
34135 +           else
34136 +               ssize = rsize;
34137 +           
34138 +           PRINTF4 (uctx, DBG_PERM, "user_checkperm : rgn %lx -> %lx perm %x access %x\n",
34139 +                    rgn->rgn_ebase, rgn->rgn_ebase + (E4_Addr)rgn->rgn_len, rgn->rgn_perm, access);
34140 +
34141 +           if (ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, access))
34142 +           {
34143 +               kmutex_unlock (&uctx->uctx_rgnmutex);
34144 +               return (-EACCES);
34145 +           }
34146 +       }
34147 +    }
34148 +    
34149 +    kmutex_unlock (&uctx->uctx_rgnmutex);
34150 +    
34151 +    return (0);
34152 +}
34153 +
34154 +virtaddr_t
34155 +user_elan2main (USER_CTXT *uctx, E4_Addr addr)
34156 +{
34157 +    USER_RGN  *rgn;
34158 +    virtaddr_t raddr;
34159 +    
34160 +    spin_lock (&uctx->uctx_rgnlock);
34161 +    
34162 +    if ((rgn = user_rgnat_elan (uctx, addr)) == (USER_RGN *) NULL)
34163 +       raddr = (virtaddr_t) 0;
34164 +    else
34165 +       raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
34166 +
34167 +    spin_unlock (&uctx->uctx_rgnlock);
34168 +
34169 +    return (raddr);
34170 +}
34171 +
34172 +E4_Addr
34173 +user_main2elan (USER_CTXT *uctx, virtaddr_t addr)
34174 +{
34175 +    USER_RGN *rgn;
34176 +    E4_Addr   raddr;
34177 +
34178 +    spin_lock (&uctx->uctx_rgnlock);
34179 +    
34180 +    if ((rgn = user_rgnat_main (uctx, addr)) == (USER_RGN *) NULL)
34181 +       raddr = (virtaddr_t) 0;
34182 +    else
34183 +       raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
34184 +    
34185 +    spin_unlock (&uctx->uctx_rgnlock);
34186 +
34187 +    return (raddr);
34188 +}
34189 +
34190 +/*
34191 + * Local variables:
34192 + * c-file-style: "stroustrup"
34193 + * End:
34194 + */
34195 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/routetable.c
34196 ===================================================================
34197 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/routetable.c 2004-02-23 16:02:56.000000000 -0500
34198 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/routetable.c      2005-07-28 14:52:52.837680408 -0400
34199 @@ -0,0 +1,249 @@
34200 +/*
34201 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
34202 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
34203 + * 
34204 + *    For licensing information please see the supplied COPYING file
34205 + *
34206 + */
34207 +
34208 +#ident "@(#)$Id: routetable.c,v 1.15 2004/07/20 09:29:40 david Exp $"
34209 +/*      $Source: /cvs/master/quadrics/elan4mod/routetable.c,v $*/
34210 +
34211 +#include <qsnet/kernel.h>
34212 +
34213 +#include <elan4/sdram.h>
34214 +#include <elan4/debug.h>
34215 +#include <elan4/device.h>
34216 +
34217 +ELAN4_ROUTE_TABLE *
34218 +elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size)
34219 +{
34220 +    ELAN4_ROUTE_TABLE *tbl;
34221 +
34222 +    KMEM_ZALLOC (tbl, ELAN4_ROUTE_TABLE *, sizeof (ELAN4_ROUTE_TABLE), 1);
34223 +
34224 +    if (tbl == (ELAN4_ROUTE_TABLE *) NULL)
34225 +       return (NULL);
34226 +    
34227 +    tbl->tbl_size    = (size & E4_VPT_SIZE_MASK);
34228 +    tbl->tbl_entries = elan4_sdram_alloc (dev, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
34229 +
34230 +    if (tbl->tbl_entries == 0)
34231 +    {
34232 +       KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE));
34233 +       return ((ELAN4_ROUTE_TABLE *) NULL);
34234 +    }
34235 +
34236 +    spin_lock_init (&tbl->tbl_lock);
34237 +
34238 +    /* zero the route table */
34239 +    elan4_sdram_zeroq_sdram (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
34240 +
34241 +    return (tbl);
34242 +}
34243 +
34244 +void
34245 +elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl)
34246 +{
34247 +    elan4_sdram_free (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
34248 +    
34249 +    spin_lock_destroy (&tbl->tbl_lock);
34250 +
34251 +    KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE));
34252 +}
34253 +
34254 +void
34255 +elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry)
34256 +{
34257 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
34258 +    
34259 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), entry->Values[1]);
34260 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), entry->Values[0]);
34261 +    pioflush_sdram (dev);
34262 +}
34263 +
34264 +void
34265 +elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry)
34266 +{
34267 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
34268 +    
34269 +    entry->Values[0] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]));
34270 +    entry->Values[1] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]));
34271 +}
34272 +
34273 +void
34274 +elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp)
34275 +{
34276 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
34277 +
34278 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), 0);
34279 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), 0);
34280 +    pioflush_sdram (dev);
34281 +}
34282 +
34283 +static void
34284 +pack_them_routes (E4_VirtualProcessEntry *entry, E4_uint16 first, E4_uint8 *packed, unsigned ctx)
34285 +{
34286 +    E4_uint64 value0 = first;
34287 +    E4_uint64 value1 = ROUTE_CTXT_VALUE(ctx);
34288 +    E4_uint32 ThirdRouteBCastVal;
34289 +    register int i;
34290 +
34291 +    for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
34292 +    {
34293 +       value0 |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
34294 +       value1 |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
34295 +    }
34296 +
34297 +    /* DMA fix for large broadcast route values that fall into the double issue of route value 3 bug. */
34298 +    /* NOTE - this is only required when the link is running in Mod45 mode, it could be automatically
34299 +     *        disabled when Mod44 is detected */
34300 +    
34301 +    /* First seach for the alignment type. The bug is only sensitive to an odd bcast aligment on the 3rd word. */
34302 +    for (i=4;i<16;i++)
34303 +       if (((value0 >> (i*4)) & 0xc) == 4)
34304 +           i++;
34305 +    
34306 +    if (i == 17)
34307 +    {
34308 +       ThirdRouteBCastVal = value1 & 0xcccccccc;
34309 +       if      (((value1 & 0xfffff0000000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x04444444))
34310 +           value1 |= 0x140000000ULL;
34311 +       else if (((value1 & 0xfffffff00000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00044444))
34312 +           value1 |= 0x1400000ULL;
34313 +       else if (((value1 & 0xfffffffff000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000444))
34314 +           value1 |= 0x14000ULL;
34315 +       else if (((value1 & 0xfffffffffff0ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000004))
34316 +           value1 |= 0x140ULL;
34317 +    }
34318 +    
34319 +    entry->Values[0] = value0;
34320 +    entry->Values[1] = value1;
34321 +}
34322 +
34323 +int
34324 +elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctx, unsigned lowid, unsigned highid, unsigned options)
34325 +{
34326 +    unsigned int broadcast    = (lowid != highid);
34327 +    unsigned int noadaptive   = 0;
34328 +    int          padbcast     = 0;
34329 +    E4_uint16    first;
34330 +    int                 rb;
34331 +    E4_uint8     packed[ROUTE_NUM_PACKED];
34332 +    int                 level, llink, hlink;
34333 +
34334 + regenerate_routes:
34335 +    first = 0;
34336 +    rb    = 0;
34337 +
34338 +    switch (pos->pos_mode)
34339 +    {
34340 +    case ELAN_POS_MODE_LOOPBACK:
34341 +       if (lowid != highid || lowid != pos->pos_nodeid)
34342 +           return (-EINVAL);
34343 +       
34344 +       route->Values[0] = FIRST_MYLINK;
34345 +       route->Values[1] = ROUTE_CTXT_VALUE (ctx);
34346 +       return (0);
34347 +
34348 +    case ELAN_POS_MODE_BACKTOBACK:
34349 +       if (lowid != highid || lowid == pos->pos_nodeid)
34350 +           return (-EINVAL);
34351 +       
34352 +       route->Values[0] = FIRST_MYLINK;
34353 +       route->Values[1] = ROUTE_CTXT_VALUE (ctx);
34354 +       return (0);
34355 +
34356 +    case ELAN_POS_MODE_SWITCHED:
34357 +    {
34358 +       unsigned char *arityp  = &pos->pos_arity[pos->pos_levels - 1];
34359 +       unsigned int   spanned = *arityp;
34360 +       unsigned int   broadcasting = 0;
34361 +       
34362 +       bzero (packed, sizeof (packed));
34363 +
34364 +       /* XXXX compute noadaptive ? */
34365 +
34366 +       for (level = 0; 
34367 +            level < pos->pos_levels && ! ((pos->pos_nodeid / spanned) == (lowid / spanned) &&
34368 +                                          (pos->pos_nodeid / spanned) ==  (highid / spanned)); 
34369 +            level++, spanned *= *(--arityp))
34370 +       {
34371 +           if (first == 0)
34372 +               first = (broadcast || noadaptive) ? FIRST_BCAST_TREE : FIRST_ADAPTIVE;
34373 +           else if (broadcast && padbcast)
34374 +           {
34375 +               padbcast = 0;
34376 +               packed[rb++] = PACKED_BCAST0(4, 4);
34377 +               packed[rb++] = PACKED_BCAST1(4, 4);
34378 +           }
34379 +           else
34380 +               packed[rb++] = (broadcast || noadaptive) ? PACKED_BCAST_TREE : PACKED_ADAPTIVE;    
34381 +       }
34382 +
34383 +       while (level >= 0)
34384 +       {
34385 +           spanned /= *arityp;
34386 +           
34387 +           llink = (lowid  / spanned) % *arityp;
34388 +           hlink = (highid / spanned) % *arityp;
34389 +           
34390 +           if (llink != hlink || broadcasting)
34391 +           {
34392 +               broadcasting = 1;
34393 +               
34394 +               if (first == 0)
34395 +                   first = FIRST_BCAST (hlink, llink);
34396 +               else
34397 +               {
34398 +                   packed[rb++] = PACKED_BCAST0(hlink, llink);
34399 +                   
34400 +                   if ((rb % 4) == 0 && PACKED_BCAST1(hlink, llink) == 0)
34401 +                   {
34402 +                       padbcast = 1;
34403 +                       goto regenerate_routes;
34404 +                   }
34405 +                   
34406 +                   packed[rb++] = PACKED_BCAST1(hlink, llink);
34407 +               }
34408 +           }
34409 +           else
34410 +           {
34411 +               if (first == 0)
34412 +                   first = FIRST_ROUTE(llink);
34413 +               else
34414 +                   packed[rb++] = PACKED_ROUTE(llink);
34415 +           }
34416 +           
34417 +           level--;
34418 +           arityp++;
34419 +       }
34420 +
34421 +       pack_them_routes (route, first | (options & FIRST_OPTIONS_MASK), packed, ctx);
34422 +       return (0);
34423 +    }
34424 +    }
34425 +
34426 +    return (-EINVAL);
34427 +}
34428 +
34429 +int
34430 +elan4_check_route (ELAN_POSITION *postiion, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags)
34431 +{
34432 +    /* XXXX - TBD */
34433 +    return (0);
34434 +}
34435 +
34436 +EXPORT_SYMBOL(elan4_alloc_routetable);
34437 +EXPORT_SYMBOL(elan4_free_routetable);
34438 +EXPORT_SYMBOL(elan4_write_route);
34439 +EXPORT_SYMBOL(elan4_read_route);
34440 +EXPORT_SYMBOL(elan4_invalidate_route);
34441 +EXPORT_SYMBOL(elan4_generate_route);
34442 +EXPORT_SYMBOL(elan4_check_route);
34443 +
34444 +/*
34445 + * Local variables:
34446 + * c-file-style: "stroustrup"
34447 + * End:
34448 + */
34449 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/sdram.c
34450 ===================================================================
34451 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/sdram.c      2004-02-23 16:02:56.000000000 -0500
34452 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/sdram.c   2005-07-28 14:52:52.839680104 -0400
34453 @@ -0,0 +1,1039 @@
34454 +/*
34455 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
34456 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
34457 + * 
34458 + *    For licensing information please see the supplied COPYING file
34459 + *
34460 + */
34461 +
34462 +#ident "@(#)$Id: sdram.c,v 1.29.6.4 2005/03/03 16:30:45 david Exp $"
34463 +/*      $Source: /cvs/master/quadrics/elan4mod/sdram.c,v $*/
34464 +
34465 +#include <qsnet/kernel.h>
34466 +
34467 +#include <elan4/debug.h>
34468 +#include <elan4/device.h>
34469 +
34470 +EXPORT_SYMBOL_GPL(elan4_sdram_readb);
34471 +EXPORT_SYMBOL_GPL(elan4_sdram_readw);
34472 +EXPORT_SYMBOL_GPL(elan4_sdram_readl);
34473 +EXPORT_SYMBOL_GPL(elan4_sdram_readq);
34474 +EXPORT_SYMBOL_GPL(elan4_sdram_writeb);
34475 +EXPORT_SYMBOL_GPL(elan4_sdram_writew);
34476 +EXPORT_SYMBOL_GPL(elan4_sdram_writel);
34477 +EXPORT_SYMBOL_GPL(elan4_sdram_writeq);
34478 +EXPORT_SYMBOL_GPL(elan4_sdram_zerob_sdram);
34479 +EXPORT_SYMBOL_GPL(elan4_sdram_zerow_sdram);
34480 +EXPORT_SYMBOL_GPL(elan4_sdram_zerol_sdram);
34481 +EXPORT_SYMBOL_GPL(elan4_sdram_zeroq_sdram);
34482 +EXPORT_SYMBOL_GPL(elan4_sdram_copyb_from_sdram);
34483 +EXPORT_SYMBOL_GPL(elan4_sdram_copyw_from_sdram);
34484 +EXPORT_SYMBOL_GPL(elan4_sdram_copyl_from_sdram);
34485 +EXPORT_SYMBOL_GPL(elan4_sdram_copyq_from_sdram);
34486 +EXPORT_SYMBOL_GPL(elan4_sdram_copyb_to_sdram);
34487 +EXPORT_SYMBOL_GPL(elan4_sdram_copyw_to_sdram);
34488 +EXPORT_SYMBOL_GPL(elan4_sdram_copyl_to_sdram);
34489 +EXPORT_SYMBOL_GPL(elan4_sdram_copyq_to_sdram);
34490 +EXPORT_SYMBOL_GPL(elan4_sdram_alloc);
34491 +EXPORT_SYMBOL_GPL(elan4_sdram_free);
34492 +EXPORT_SYMBOL_GPL(elan4_sdram_flushcache);
34493 +
34494 +#define SDRAM_MIN_BANK_SIZE            ((1 << 15) * 8)         /* 256 Kbytes */
34495 +
34496 +static inline ELAN4_SDRAM_BANK *
34497 +sdramaddr_to_bank (ELAN4_DEV *dev, sdramaddr_t saddr)
34498 +{
34499 +    register int i;
34500 +    
34501 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
34502 +    {
34503 +       ELAN4_SDRAM_BANK *bank = &dev->dev_sdram_banks[i];
34504 +
34505 +       if (saddr >= bank->b_base && saddr < (bank->b_base + bank->b_size))
34506 +           return (bank);
34507 +    }
34508 +    printk ("sdramaddr_to_bank: sdram address %lx not in a sdram bank\n", saddr);
34509 +    BUG();
34510 +
34511 +    return (NULL);     /* NOTREACHED */
34512 +}
34513 +
34514 +static inline int
34515 +sdramaddr_to_bankoffset (ELAN4_DEV *dev, sdramaddr_t saddr)
34516 +{
34517 +    return (saddr & (sdramaddr_to_bank (dev, saddr)->b_size-1));
34518 +}
34519 +
34520 +static inline int
34521 +sdramaddr_to_bit(ELAN4_DEV *dev, int indx, sdramaddr_t saddr)
34522 +{
34523 +    return (sdramaddr_to_bankoffset(dev, saddr) >> (SDRAM_MIN_BLOCK_SHIFT+(indx)));
34524 +}
34525 +
34526 +static inline ioaddr_t
34527 +sdramaddr_to_ioaddr (ELAN4_DEV *dev, sdramaddr_t saddr)
34528 +{
34529 +    ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, saddr);
34530 +
34531 +    return (bank->b_ioaddr + (saddr - bank->b_base));
34532 +}
34533 +
34534 +unsigned char
34535 +elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t off)
34536 +{
34537 +    return (__elan4_readb (dev, sdramaddr_to_ioaddr(dev, off)));
34538 +}
34539 +
34540 +unsigned short
34541 +elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t off)
34542 +{
34543 +    return (__elan4_readw (dev, sdramaddr_to_ioaddr(dev, off)));
34544 +}
34545 +
34546 +unsigned int
34547 +elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t off)
34548 +{
34549 +    return (__elan4_readl (dev, sdramaddr_to_ioaddr(dev, off)));
34550 +}
34551 +
34552 +unsigned long long
34553 +elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t off)
34554 +{
34555 +    return (__elan4_readq (dev, sdramaddr_to_ioaddr(dev, off)));
34556 +}
34557 +
34558 +void
34559 +elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t off, unsigned char val)
34560 +{
34561 +    writeb (val, (void *) sdramaddr_to_ioaddr(dev, off));
34562 +
34563 +    mb();
34564 +}
34565 +
34566 +void
34567 +elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t off, unsigned short val)
34568 +{
34569 +    writew (val, (void *) sdramaddr_to_ioaddr(dev, off));
34570 +
34571 +    mb();
34572 +}
34573 +
34574 +void
34575 +elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t off, unsigned int val)
34576 +{
34577 +    writel (val, (void *) (sdramaddr_to_ioaddr(dev, off)));
34578 +
34579 +    mb();
34580 +}
34581 +
34582 +void
34583 +elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t off, unsigned long long val)
34584 +{
34585 +    writeq (val, (void *) (sdramaddr_to_ioaddr(dev, off)));
34586 +
34587 +    mb();
34588 +}
34589 +
34590 +void
34591 +elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
34592 +{
34593 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34594 +    ioaddr_t lim  = dest + nbytes;
34595 +
34596 +    for (; dest < lim; dest += sizeof (u8))
34597 +       writeb (0, (void *) dest);
34598 +}
34599 +
34600 +void
34601 +elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
34602 +{
34603 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34604 +    ioaddr_t lim  = dest + nbytes;
34605 +
34606 +    for (; dest < lim; dest += sizeof (u8))
34607 +       writeb (0, (void *) dest);
34608 +}
34609 +
34610 +void
34611 +elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
34612 +{
34613 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34614 +    ioaddr_t lim  = dest + nbytes;
34615 +
34616 +    for (; dest < lim; dest += sizeof (u32))
34617 +       writel (0, (void *) dest);
34618 +}
34619 +
34620 +void
34621 +elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
34622 +{
34623 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34624 +    ioaddr_t lim  = dest + nbytes;
34625 +
34626 +#ifdef CONFIG_MPSAS
34627 +    if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, 0, nbytes) == 0)
34628 +       return;
34629 +#endif
34630 +
34631 +    for (; dest < lim; dest += sizeof (u64))
34632 +       writeq (0, (void *) dest);
34633 +}
34634 +
34635 +void
34636 +elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
34637 +{
34638 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
34639 +    u8      *dest = (u8 *) to;
34640 +    ioaddr_t lim  = src + nbytes;
34641 +
34642 +    for (; src < lim; src += sizeof (u8))
34643 +       *dest++ = __elan4_readb (dev, src);
34644 +}
34645 +
34646 +void
34647 +elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
34648 +{
34649 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
34650 +    u16     *dest = (u16 *) to;
34651 +    ioaddr_t lim  = src + nbytes;
34652 +
34653 +    for (; src < lim; src += sizeof (u16))
34654 +       *dest++ = __elan4_readw (dev, src);
34655 +}
34656 +
34657 +void
34658 +elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
34659 +{
34660 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
34661 +    u32     *dest = (u32 *) to;
34662 +    ioaddr_t lim  = src + nbytes;
34663 +
34664 +    for (; src < lim; src += sizeof (u32))
34665 +       *dest++ = __elan4_readl (dev, src);
34666 +}
34667 +
34668 +void
34669 +elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
34670 +{
34671 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
34672 +    u64     *dest = (u64 *) to;
34673 +    ioaddr_t lim  = src + nbytes;
34674 +
34675 +#ifdef CONFIG_MPSAS
34676 +    if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, from, (unsigned long) to, nbytes) == 0)
34677 +       return;
34678 +#endif
34679 +
34680 +    for (; src < lim; src += sizeof (u64))
34681 +       *dest++ = __elan4_readq (dev, src);
34682 +}
34683 +
34684 +void
34685 +elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
34686 +{
34687 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34688 +    u8      *src  = (u8 *) from;
34689 +    ioaddr_t lim  = dest + nbytes;
34690 +
34691 +    for (; dest < lim; dest += sizeof (u8))
34692 +       writeb (*src++, (void *) (dest));
34693 +
34694 +    mb();
34695 +}
34696 +
34697 +void
34698 +elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
34699 +{
34700 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34701 +    u16     *src  = (u16 *) from;
34702 +    ioaddr_t lim  = dest + nbytes;
34703 +
34704 +    for (; dest < lim; dest += sizeof (u16))
34705 +       writew (*src++, (void *) (dest));
34706 +
34707 +    mb();
34708 +}
34709 +
34710 +void
34711 +elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
34712 +{
34713 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34714 +    u32     *src  = (u32 *) from;
34715 +    ioaddr_t lim  = dest + nbytes;
34716 +
34717 +    for (; dest < lim; dest += sizeof (u16))
34718 +       writew (*src++, (void *) (dest));
34719 +
34720 +    mb();
34721 +}
34722 +
34723 +void
34724 +elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
34725 +{
34726 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
34727 +    u64     *src  = (u64 *) from;
34728 +    ioaddr_t lim  = dest + nbytes;
34729 +
34730 +#ifdef CONFIG_MPSAS
34731 +    if (sas_copyto_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, (unsigned long) from, nbytes) == 0)
34732 +       return;
34733 +#endif
34734 +
34735 +    for (; dest < lim; dest += sizeof (u64))
34736 +       writeq (*src++, (void *) (dest));
34737 +
34738 +    mb();
34739 +}
34740 +
34741 +/* sdram buddy allocator */
34742 +typedef struct sdramblock
34743 +{
34744 +    sdramaddr_t        next;
34745 +    sdramaddr_t prev;
34746 +} sdramblock_t;
34747 +
34748 +static inline sdramaddr_t
34749 +read_next (ELAN4_DEV *dev, sdramaddr_t block)
34750 +{
34751 +    return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next)));
34752 +}
34753 +
34754 +static inline sdramaddr_t
34755 +read_prev (ELAN4_DEV *dev, sdramaddr_t block)
34756 +{
34757 +    return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev)));
34758 +}
34759 +
34760 +static inline void
34761 +write_next (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val)
34762 +{
34763 +    writel (val, (void *) (sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next))));
34764 +}
34765 +
34766 +static inline void
34767 +write_prev (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val)
34768 +{
34769 +    writel (val, (void *) (sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev))));
34770 +}
34771 +
34772 +static inline void
34773 +freelist_insert (ELAN4_DEV *dev, int idx, sdramaddr_t block)
34774 +{
34775 +    sdramaddr_t next = dev->dev_sdram_freelists[(idx)];
34776 +
34777 +    /*
34778 +     * block->prev = NULL;
34779 +     * block->next = next;
34780 +     * if (next != NULL)
34781 +     *    next->prev = block;
34782 +     * freelist = block;
34783 +     */
34784 +    write_prev (dev, block, (sdramaddr_t) 0);
34785 +    write_next (dev, block, next);
34786 +    if (next != (sdramaddr_t) 0)
34787 +       write_prev (dev, next, block);
34788 +    dev->dev_sdram_freelists[idx] = block;
34789 +
34790 +    dev->dev_sdram_freecounts[idx]++;
34791 +    dev->dev_stats.s_sdram_bytes_free += (SDRAM_MIN_BLOCK_SIZE << idx);
34792 +
34793 +    mb();
34794 +}
34795 +
34796 +static inline void
34797 +freelist_remove (ELAN4_DEV *dev,int idx, sdramaddr_t block)
34798 +{
34799 +    /*
34800 +     * if (block->prev)
34801 +     *     block->prev->next = block->next;
34802 +     * else
34803 +     *     dev->dev_sdram_freelists[idx] = block->next;
34804 +     * if (block->next)
34805 +     *     block->next->prev = block->prev;
34806 +     */
34807 +    sdramaddr_t blocknext = read_next (dev, block);
34808 +    sdramaddr_t blockprev = read_prev (dev, block);
34809 +
34810 +    if (blockprev)
34811 +       write_next (dev, blockprev, blocknext);
34812 +    else
34813 +       dev->dev_sdram_freelists[idx] = blocknext;
34814 +    if (blocknext)
34815 +       write_prev (dev, blocknext, blockprev);
34816 +
34817 +    dev->dev_sdram_freecounts[idx]--;
34818 +    dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx);
34819 +
34820 +    mb();
34821 +}
34822 +
34823 +static inline void
34824 +freelist_removehead(ELAN4_DEV *dev, int idx, sdramaddr_t block)
34825 +{
34826 +    sdramaddr_t blocknext = read_next (dev, block);
34827 +
34828 +    if ((dev->dev_sdram_freelists[idx] = blocknext) != 0)
34829 +       write_prev (dev, blocknext, 0);
34830 +
34831 +    dev->dev_sdram_freecounts[idx]--;
34832 +    dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx);
34833 +
34834 +    mb();
34835 +}
34836 +
34837 +#ifdef DEBUG
34838 +static int
34839 +display_blocks (ELAN4_DEV *dev, int indx, char *string)
34840 +{
34841 +    sdramaddr_t block;
34842 +    int nbytes = 0;
34843 +
34844 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "%s - indx %d\n", string, indx);
34845 +    for (block = dev->dev_sdram_freelists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block))
34846 +    {
34847 +       PRINTF (DBG_DEVICE, DBG_SDRAM, "  %x\n", block);
34848 +       nbytes += (SDRAM_MIN_BLOCK_SIZE << indx);
34849 +    }
34850 +
34851 +    return (nbytes);
34852 +}
34853 +
34854 +void
34855 +elan4_sdram_display (ELAN4_DEV *dev, char *string)
34856 +{
34857 +    int indx;
34858 +    int nbytes = 0;
34859 +    
34860 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_display: dev=%p\n", dev);
34861 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
34862 +       if (dev->dev_sdram_freelists[indx] != (sdramaddr_t) 0)
34863 +           nbytes += display_blocks (dev, indx, string);
34864 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "\n%d bytes free - %d pages free\n", nbytes, nbytes/SDRAM_PAGE_SIZE);
34865 +}
34866 +
34867 +void
34868 +elan4_sdram_verify (ELAN4_DEV *dev)
34869 +{
34870 +    int indx, size, nbits, i, b;
34871 +    sdramaddr_t block;
34872 +
34873 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
34874 +    {
34875 +       unsigned count = 0;
34876 +
34877 +       for (block = dev->dev_sdram_freelists[indx]; block; block = read_next (dev, block), count++)
34878 +       {
34879 +           ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block);
34880 +           unsigned         off  = sdramaddr_to_bankoffset (dev, block);
34881 +           int              bit  = sdramaddr_to_bit (dev, indx, block);
34882 +
34883 +           if ((block & (size-1)) != 0)
34884 +               printk ("elan4_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx);
34885 +           
34886 +           if (bank == NULL || off > bank->b_size)
34887 +               printk ("elan4_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx);
34888 +           else if (BT_TEST (bank->b_bitmaps[indx], bit) == 0)
34889 +               printk ("elan4_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx);
34890 +           else
34891 +           {
34892 +               for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1)
34893 +               {
34894 +                   bit = sdramaddr_to_bit (dev, i, block);
34895 +
34896 +                   for (b = 0; b < nbits; b++)
34897 +                       if (BT_TEST(bank->b_bitmaps[i], bit + b))
34898 +                           printk ("elan4_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b);
34899 +               }
34900 +           }
34901 +       }
34902 +
34903 +       if (dev->dev_sdram_freecounts[indx] != count)
34904 +           printk ("elan4_sdram_verify: indx=%x expected %d got %d\n", indx, dev->dev_sdram_freecounts[indx], count);
34905 +    }
34906 +}
34907 +
34908 +#endif
34909 +
34910 +static void
34911 +free_block (ELAN4_DEV *dev, sdramaddr_t block, int indx)
34912 +{
34913 +    ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block);
34914 +    unsigned         bit  = sdramaddr_to_bit (dev, indx, block);
34915 +    unsigned         size = SDRAM_MIN_BLOCK_SIZE << indx;
34916 +
34917 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%x indx=%d bit=%x\n", block, indx, bit);
34918 +
34919 +    ASSERT ((block & (size-1)) == 0);
34920 +    ASSERT (BT_TEST (bank->b_bitmaps[indx], bit) == 0);
34921 +
34922 +    while (BT_TEST (bank->b_bitmaps[indx], bit ^ 1))
34923 +    {
34924 +       sdramaddr_t buddy = block ^ size;
34925 +       
34926 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%x buddy=%x indx=%d\n", block, buddy, indx);
34927 +       
34928 +       BT_CLEAR (bank->b_bitmaps[indx], bit ^ 1);
34929 +       
34930 +       freelist_remove (dev, indx, buddy);
34931 +       
34932 +       block = (block < buddy) ? block : buddy;
34933 +       indx++;
34934 +       size <<= 1;
34935 +       bit >>= 1;
34936 +    }
34937 +    
34938 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%x indx=%d bit=%x\n", block, indx, bit);
34939 +    
34940 +    freelist_insert (dev, indx, block);
34941 +    
34942 +    BT_SET (bank->b_bitmaps[indx], bit);
34943 +}
34944 +
34945 +void
34946 +elan4_sdram_init (ELAN4_DEV *dev)
34947 +{
34948 +    int indx;
34949 +
34950 +    spin_lock_init (&dev->dev_sdram_lock);
34951 +
34952 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
34953 +    {
34954 +       dev->dev_sdram_freelists[indx]  = (sdramaddr_t) 0;
34955 +       dev->dev_sdram_freecounts[indx] = 0;
34956 +    }
34957 +}
34958 +
34959 +void
34960 +elan4_sdram_fini (ELAN4_DEV *dev)
34961 +{
34962 +    spin_lock_destroy (&dev->dev_sdram_lock);
34963 +}
34964 +
34965 +#ifdef CONFIG_MPSAS
34966 +/* size of Elan SDRAM in simulation */
34967 +#define SDRAM_used_addr_bits           (16)
34968 +#define SDRAM_SIMULATION_BANK_SIZE     ((1 << SDRAM_used_addr_bits) * 8)       /* 128 kbytes */
34969 +
34970 +static int
34971 +elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
34972 +{
34973 +    printk ("elan%d: memory bank %d is %d Kb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (SDRAM_SIMULATION_BANK_SIZE / 1024));
34974 +
34975 +    bank->b_size = SDRAM_SIMULATION_BANK_SIZE;
34976 +
34977 +    return 1;
34978 +}
34979 +
34980 +#else
34981 +
34982 +static void
34983 +initialise_cache_tags (ELAN4_DEV *dev, unsigned addr)
34984 +{
34985 +    register int set, line;
34986 +
34987 +    mb();
34988 +
34989 +    /* Initialise the whole cache to hold sdram at "addr" as direct mapped */
34990 +
34991 +    for (set = 0; set < E4_NumCacheSets; set++)
34992 +       for (line = 0; line < E4_NumCacheLines; line++)
34993 +           write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11));
34994 +
34995 +    read_tag (dev, Tags[set][line]);   /* read it back to guarantee the memory system is quite again */
34996 +    mb();
34997 +}
34998 +
34999 +static __inline__ int
35000 +sdram_GreyToBinary(int GreyVal, int NoOfBits)
35001 +{
35002 +    int Bit;
35003 +    int BinaryVal=0;
35004 +    for (Bit=(1 << (NoOfBits-1)); Bit != 0; Bit >>= 1)
35005 +       BinaryVal ^= (GreyVal & Bit) ^ ((BinaryVal >> 1) & Bit);
35006 +    return (BinaryVal);
35007 +}
35008 +
35009 +static __inline__ int
35010 +sdram_BinaryToGrey(int BinaryVal)
35011 +{
35012 +    return (BinaryVal ^ (BinaryVal >> 1));
35013 +}
35014 +
35015 +void
35016 +elan4_sdram_setup_delay_lines (ELAN4_DEV *dev, int factor)
35017 +{
35018 +    /* This is used to fix the SDRAM delay line values */
35019 +    int i, AutoGenDelayValue=0;
35020 +    int NewDelayValue;
35021 +
35022 +    if (dev->dev_sdram_cfg & SDRAM_FIXED_DELAY_ENABLE)   /* already setup. */
35023 +       return;
35024 +
35025 +    /* now get an average of 10 dll values */
35026 +    for (i=0;i<10;i++)
35027 +       AutoGenDelayValue += sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(read_reg64 (dev, SDRamConfigReg)),
35028 +                                              SDRAM_FIXED_DLL_DELAY_BITS);
35029 +
35030 +    NewDelayValue = factor + (AutoGenDelayValue / 10); /* Mean of 10 values */
35031 +
35032 +    dev->dev_sdram_cfg = (dev->dev_sdram_cfg & ~(SDRAM_FIXED_DLL_DELAY_MASK << SDRAM_FIXED_DLL_DELAY_SHIFT)) |
35033 +                         SDRAM_FIXED_DELAY_ENABLE | SDRAM_FIXED_DLL_DELAY(sdram_BinaryToGrey(NewDelayValue));
35034 +
35035 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg);     /* Put back the new value */
35036 +
35037 +    pioflush_reg (dev);
35038 +}
35039 +
35040 +static int
35041 +elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
35042 +{
35043 +    unsigned long      mappedsize = bank->b_size;
35044 +    ioaddr_t           ioaddr;
35045 +    unsigned long long value, size;
35046 +    register int       i;
35047 +    extern int         sdram_bank_limit;
35048 +
35049 +    if (mappedsize > SDRAM_MAX_BLOCK_SIZE)
35050 +       mappedsize = SDRAM_MAX_BLOCK_SIZE;
35051 +
35052 +    while ((ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, mappedsize, &bank->b_handle)) == 0)
35053 +    {
35054 +       if (mappedsize <= (64*1024*1024))                       /* boards normally populated with 64mb, so winge if we can't see this much */
35055 +           printk ("elan%d: could not map bank %d size %dMb\n", dev->dev_instance, (int)(bank - dev->dev_sdram_banks), (int)mappedsize/(1024*1024));
35056 +
35057 +       if ((mappedsize >>= 1) < (1024*1024))
35058 +           return 0;
35059 +    }
35060 +
35061 +    /* first probe to see if the memory bank is present */
35062 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
35063 +       initialise_cache_tags (dev, E4_CacheSize);
35064 +
35065 +    for (i = 0; i < 64; i++)
35066 +    {
35067 +       unsigned long long pattern = (1ull << i);
35068 +
35069 +       writeq (pattern, ioaddr);                                       /* write pattern at base  */
35070 +
35071 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
35072 +           initialise_cache_tags (dev, 0);
35073 +
35074 +       writeq (~pattern, ioaddr + E4_CacheSize);                       /* write ~pattern at cachesize */
35075 +
35076 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
35077 +          initialise_cache_tags (dev, E4_CacheSize);
35078 +       
35079 +       writeq (~pattern, ioaddr + 2*E4_CacheSize);                     /* write ~pattern at 2*cachesize */
35080 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
35081 +           initialise_cache_tags (dev, 2*E4_CacheSize);
35082 +       
35083 +       value = __elan4_readq (dev, ioaddr);                            /* read pattern back at 0 */
35084 +       
35085 +       if (value != pattern)
35086 +       {
35087 +           printk ("elan%d: sdram bank %d not present\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
35088 +           elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
35089 +           return 0;
35090 +       }
35091 +    }
35092 +    
35093 +    /* sdram bank is present, so work out it's size.  We store the maximum size at the base
35094 +     * and then store the address at each address on every power of two address until
35095 +     * we reach the minimum mappable size (PAGESIZE), we then read back the value at the
35096 +     * base to determine the bank size */
35097 +    writeq (mappedsize, ioaddr);
35098 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
35099 +        initialise_cache_tags (dev, 0);
35100 +
35101 +    for (size = mappedsize >> 1; size > PAGE_SIZE; size >>= 1)
35102 +    {
35103 +       writeq (size, ioaddr + size);
35104 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
35105 +           initialise_cache_tags (dev, size);
35106 +    }
35107 +
35108 +    if ((size = __elan4_readq (dev, ioaddr)) < SDRAM_MIN_BANK_SIZE)
35109 +    {
35110 +       printk ("elan%d: memory bank %d dubious\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
35111 +       elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
35112 +       return 0;
35113 +    }
35114 +
35115 +    if (sdram_bank_limit == 0 || size <= (sdram_bank_limit * 1024 * 1024))
35116 +       printk ("elan%d: memory bank %d is %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024)));
35117 +    else
35118 +    {
35119 +       size = (sdram_bank_limit * 1024 * 1024);
35120 +       printk ("elan%d: limit bank %d to %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024)));
35121 +    }
35122 +
35123 +    bank->b_size = size;
35124 +
35125 +    elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
35126 +    return 1;
35127 +}
35128 +#endif
35129 +
35130 +int
35131 +elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
35132 +{
35133 +    int indx, size;
35134 +
35135 +    bank->b_ioaddr = 0;
35136 +
35137 +    if (! elan4_sdram_probe_bank (dev, bank))
35138 +       return 0;
35139 +
35140 +    if ((bank->b_ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, bank->b_size, &bank->b_handle)) == (ioaddr_t) 0)
35141 +    {
35142 +       printk ("elan%d: could not map sdrambank %d\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
35143 +       return 0;
35144 +    }
35145 +
35146 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1) /* allocate the buddy allocator bitmaps */
35147 +       KMEM_ZALLOC (bank->b_bitmaps[indx], bitmap_t *, sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size), 1);
35148 +    
35149 +    return 1;
35150 +}
35151 +
35152 +void
35153 +elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
35154 +{
35155 +    int indx, size;
35156 +
35157 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1)
35158 +       KMEM_FREE (bank->b_bitmaps[indx], sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size));
35159 +    
35160 +    elan4_unmap_device (dev, bank->b_ioaddr, bank->b_size, &bank->b_handle);
35161 +}
35162 +
35163 +void
35164 +elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
35165 +{
35166 +    sdramaddr_t base = bank->b_base;
35167 +    sdramaddr_t top  = bank->b_base + bank->b_size;
35168 +    register int indx;
35169 +    register unsigned long size;
35170 +
35171 +    /* align to the minimum block size */
35172 +    base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
35173 +    top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
35174 +
35175 +    /* don't allow 0 as a valid "base" */
35176 +    if (base == 0)
35177 +       base = SDRAM_MIN_BLOCK_SIZE;
35178 +
35179 +    /* carve the bottom to the biggest boundary */
35180 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
35181 +    {
35182 +       if ((base & size) == 0)
35183 +           continue;
35184 +
35185 +       if ((base + size) > top)
35186 +           break;
35187 +
35188 +       free_block (dev, base, indx);
35189 +       
35190 +       base += size;
35191 +    }
35192 +
35193 +    /* carve the top down to the biggest boundary */
35194 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
35195 +    {
35196 +       if ((top & size) == 0)
35197 +           continue;
35198 +
35199 +       if ((top - size) < base)
35200 +           break;
35201 +
35202 +       free_block (dev, (top - size), indx);
35203 +       
35204 +       top -= size;
35205 +    }
35206 +
35207 +    /* now free of the space in between */
35208 +    while (base < top)
35209 +    {
35210 +       free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1));
35211 +
35212 +       base += SDRAM_MAX_BLOCK_SIZE;
35213 +    }
35214 +}
35215 +
35216 +sdramaddr_t
35217 +elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes)
35218 +{
35219 +    sdramaddr_t block;
35220 +    register int i, indx;
35221 +    unsigned long size;
35222 +    unsigned long flags;
35223 +
35224 +    spin_lock_irqsave (&dev->dev_sdram_lock, flags);
35225 +
35226 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
35227 +       ;
35228 +
35229 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx);
35230 +
35231 +    /* need to split a bigger block up */
35232 +    for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1)
35233 +       if (dev->dev_sdram_freelists[i])
35234 +           break;
35235 +    
35236 +    if (i == SDRAM_NUM_FREE_LISTS)
35237 +    {
35238 +       spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
35239 +       printk ("elan4_sdram_alloc: %d bytes failed\n", nbytes);
35240 +       return ((sdramaddr_t) 0);
35241 +    }
35242 +    
35243 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: use block=%x indx=%d\n", dev->dev_sdram_freelists[i], i);
35244 +
35245 +    /* remove the block from the free list */
35246 +    freelist_removehead (dev, i, (block = dev->dev_sdram_freelists[i]));
35247 +
35248 +    /* clear the approriate bit in the bitmap */
35249 +    BT_CLEAR (sdramaddr_to_bank (dev, block)->b_bitmaps[i], sdramaddr_to_bit (dev,i, block));
35250 +
35251 +    /* and split it up as required */
35252 +    while (i-- > indx)
35253 +       free_block (dev, block + (size >>= 1), i);
35254 +
35255 +    spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
35256 +
35257 +    ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0);
35258 +
35259 +#ifdef CONFIG_MPSAS
35260 +    elan4_sdram_zeroq_sdram (dev, block, sizeof (sdramblock_t));
35261 +#endif
35262 +
35263 +    return ((sdramaddr_t) block);
35264 +}
35265 +
35266 +void
35267 +elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t block, int nbytes)
35268 +{
35269 +    register int indx;
35270 +    unsigned long size;
35271 +    unsigned long flags;
35272 +
35273 +    spin_lock_irqsave (&dev->dev_sdram_lock, flags);
35274 +
35275 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
35276 +       ;
35277 +
35278 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_free: indx=%d block=%x\n", indx, block);
35279 +
35280 +    free_block (dev, block, indx);
35281 +
35282 +    spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
35283 +}
35284 +
35285 +void
35286 +elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t addr, int len)
35287 +{
35288 +    int set, off;
35289 +
35290 +    SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
35291 +
35292 +    /*
35293 +     * if flushing more than a single set (8K), then you have to flush the whole cache.
35294 +     *   NOTE - in the real world we will probably want to generate a burst across
35295 +     *          the pci bus.
35296 +     */
35297 +    if (len >= E4_CacheSetSize)
35298 +    {
35299 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => whole cache\n", addr, len, addr + len);
35300 +
35301 +#ifdef CONFIG_MPSAS
35302 +       elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space, E4_CacheSize);
35303 +#else
35304 +       for (set = 0; set < E4_NumCacheSets; set++)
35305 +           for (off = 0; off < E4_CacheSetSize; off += E4_CacheLineSize)
35306 +               elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
35307 +#endif
35308 +    }
35309 +    else
35310 +    {
35311 +       unsigned base    = addr & ~(E4_CACHELINE_SIZE-1);
35312 +       unsigned top     = (addr + len + (E4_CACHELINE_SIZE-1)) & ~(E4_CACHELINE_SIZE-1);
35313 +       unsigned baseoff = base & (E4_CacheSetSize-1);
35314 +       unsigned topoff  = top  & (E4_CacheSetSize-1);
35315 +
35316 +       if ((base ^ top) & E4_CacheSetSize)                     /* wraps */
35317 +       {
35318 +           PRINTF7 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => split cache (%x,%x %x,%x)\n", 
35319 +                    addr, len, addr + len, 0, topoff, baseoff, E4_CacheSetSize);
35320 +
35321 +#ifdef CONFIG_MPSAS
35322 +           for (set = 0; set < E4_NumCacheSets; set++)
35323 +           {
35324 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize), topoff);
35325 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, E4_CacheSetSize - baseoff);
35326 +           }
35327 +#else
35328 +           for (set = 0; set < E4_NumCacheSets; set++)
35329 +           {
35330 +               for (off = 0; off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE)
35331 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
35332 +               
35333 +               for (off = (base & (E4_CacheSetSize-1)); off < E4_CacheSetSize; off += E4_CACHELINE_SIZE)
35334 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
35335 +           }
35336 +#endif
35337 +       }
35338 +       else
35339 +       {
35340 +           PRINTF5 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => part cache (%x,%x)\n", 
35341 +                    addr, len, addr + len, baseoff, topoff);
35342 +
35343 +#ifdef CONFIG_MPSAS
35344 +           for (set = 0; set < E4_NumCacheSets; set++)
35345 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, topoff - baseoff);
35346 +#else
35347 +           for (set = 0; set < E4_NumCacheSets; set++)
35348 +               for (off = (base & (E4_CacheSetSize-1)); off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE)
35349 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
35350 +#endif
35351 +       }
35352 +    }
35353 +    pioflush_sdram (dev);
35354 +    
35355 +    CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
35356 +}
35357 +
35358 +static char *
35359 +get_correctableErr_bitpos(uint SyndromeBits)
35360 +{
35361 +    switch (SyndromeBits)
35362 +    {
35363 +    case 0x00: return ("NoErr");
35364 +    case 0x31: return ("00"); 
35365 +    case 0x32: return ("01"); 
35366 +    case 0xc4: return ("02"); 
35367 +    case 0xc8: return ("03"); 
35368 +    case 0x26: return ("04"); 
35369 +    case 0x91: return ("05"); 
35370 +    case 0x89: return ("06"); 
35371 +    case 0x64: return ("07"); 
35372 +    case 0xc1: return ("08"); 
35373 +    case 0xf2: return ("09"); 
35374 +    case 0x34: return ("10"); 
35375 +    case 0xf8: return ("11"); 
35376 +    case 0xf1: return ("12"); 
35377 +    case 0xc2: return ("13"); 
35378 +    case 0xf4: return ("14"); 
35379 +    case 0x38: return ("15"); 
35380 +    case 0xd6: return ("16"); 
35381 +    case 0xa1: return ("17"); 
35382 +    case 0x79: return ("18"); 
35383 +    case 0xa4: return ("19"); 
35384 +    case 0xd9: return ("20"); 
35385 +    case 0xa2: return ("21"); 
35386 +    case 0x76: return ("22"); 
35387 +    case 0xa8: return ("23"); 
35388 +    case 0xe6: return ("24"); 
35389 +    case 0x51: return ("25"); 
35390 +    case 0xb9: return ("26"); 
35391 +    case 0x54: return ("27"); 
35392 +    case 0xe9: return ("28"); 
35393 +    case 0x52: return ("29"); 
35394 +    case 0xb6: return ("30"); 
35395 +    case 0x58: return ("31"); 
35396 +    case 0x13: return ("32"); 
35397 +    case 0x23: return ("33"); 
35398 +    case 0x4c: return ("34"); 
35399 +    case 0x8c: return ("35"); 
35400 +    case 0x62: return ("36"); 
35401 +    case 0x19: return ("37"); 
35402 +    case 0x98: return ("38"); 
35403 +    case 0x46: return ("39"); 
35404 +    case 0x1c: return ("40"); 
35405 +    case 0x2f: return ("41"); 
35406 +    case 0x43: return ("42"); 
35407 +    case 0x8f: return ("43"); 
35408 +    case 0x1f: return ("44"); 
35409 +    case 0x2c: return ("45"); 
35410 +    case 0x4f: return ("46"); 
35411 +    case 0x83: return ("47"); 
35412 +    case 0x6d: return ("48"); 
35413 +    case 0x1a: return ("49"); 
35414 +    case 0x97: return ("50"); 
35415 +    case 0x4a: return ("51"); 
35416 +    case 0x9d: return ("52"); 
35417 +    case 0x2a: return ("53"); 
35418 +    case 0x67: return ("54"); 
35419 +    case 0x8a: return ("55"); 
35420 +    case 0x6e: return ("56"); 
35421 +    case 0x15: return ("57"); 
35422 +    case 0x9b: return ("58"); 
35423 +    case 0x45: return ("59"); 
35424 +    case 0x9e: return ("60"); 
35425 +    case 0x25: return ("61"); 
35426 +    case 0x6b: return ("62"); 
35427 +    case 0x85: return ("63"); 
35428 +    case 0x01: return ("C0"); 
35429 +    case 0x02: return ("C1"); 
35430 +    case 0x04: return ("C2"); 
35431 +    case 0x08: return ("C3"); 
35432 +    case 0x10: return ("C4"); 
35433 +    case 0x20: return ("C5"); 
35434 +    case 0x40: return ("C6"); 
35435 +    case 0x80: return ("C7"); 
35436 +
35437 +    case 0x07: case 0x0b: case 0x0d: case 0x0e: case 0x3d: case 0x3e: case 0x70: case 0x7c: // T  
35438 +    case 0xb0: case 0xbc: case 0xc7: case 0xcb: case 0xd0: case 0xd3: case 0xe0: case 0xe3: // T  
35439 +       return ("triple");
35440 +
35441 +    case 0x0f: case 0x55: case 0x5a: case 0xa5: case 0xaa: case 0xf0: case 0xff: // Q  
35442 +       return ("quadruple");
35443 +
35444 +    case 0x16: case 0x29: case 0x37: case 0x3b: case 0x49: case 0x57: case 0x5b: case 0x5d: case 0x5e: case 0x61: // M  
35445 +    case 0x68: case 0x73: case 0x75: case 0x7a: case 0x7f: case 0x86: case 0x92: case 0x94: case 0xa7: case 0xab: // M  
35446 +    case 0xad: case 0xae: case 0xb3: case 0xb5: case 0xba: case 0xbf: case 0xcd: case 0xce: case 0xd5: case 0xda: // M  
35447 +    case 0xdc: case 0xdf: case 0xe5: case 0xea: case 0xec: case 0xef: case 0xf7: case 0xfb: case 0xfd: case 0xfe: // M  
35448 +       return ("multiple");
35449 +
35450 +    default:   // all other cases
35451 +       return ("double");
35452 +    }
35453 +}
35454 +
35455 +char *
35456 +elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, E4_uint64 ConfigReg, char *str)
35457 +{
35458 +    E4_uint64 StartupSyndrome    = dev->dev_sdram_initial_ecc_val;
35459 +    int       RisingDQSsyndrome  = ((ECC_RisingDQSSyndrome(status) == ECC_RisingDQSSyndrome(StartupSyndrome)) ?
35460 +                                   0 : ECC_RisingDQSSyndrome(status));
35461 +    int              FallingDQSsyndrome = ((ECC_FallingDQSSyndrome(status) == ECC_FallingDQSSyndrome(StartupSyndrome)) ?
35462 +                                   0 : ECC_FallingDQSSyndrome(status));
35463 +    E4_uint64 Addr = ECC_Addr(status);
35464 +    int       Bank = (Addr >> 6) & 3;
35465 +    int       Cas  = ((Addr >> 3) & 7) | ((Addr >> (8 - 3)) & 0xf8) | ((Addr >> (25 - 8)) & 0x100) |
35466 +                    ((Addr >> (27 - 9)) & 0x200) | ((Addr >> (29 - 10)) & 0xc00);
35467 +    int       Ras  = ((Addr >> 13) & 0xfff) | ((Addr >> (26 - 12)) & 0x1000) | ((Addr >> (28 - 13)) & 0x2000) |
35468 +                    ((Addr >> (30 - 14)) & 0x4000);
35469 +
35470 +    sprintf (str, "Addr=%07llx Bank=%x Ras=%x Cas=%x Falling DQS=%s Rising DQS=%s Syndrome=%x%s%s%s%s Type=%s SDRamDelay=%s,%0d",              /* 41 + 16 + 8 + 15 + 24 + 13 + 22 + 10 + 10 == 151 */
35471 +            (long long)Addr, Bank, Ras, Cas,
35472 +            get_correctableErr_bitpos(FallingDQSsyndrome),
35473 +            get_correctableErr_bitpos(RisingDQSsyndrome),
35474 +            (int)ECC_Syndrome(status),
35475 +            ECC_UncorrectableErr(status)   ? " Uncorrectable" : "",
35476 +            ECC_MultUncorrectErrs(status)  ? " Multiple-Uncorrectable" : "",
35477 +            ECC_CorrectableErr(status)     ? " Correctable" : "",
35478 +            ECC_MultCorrectErrs(status)    ? " Multiple-Correctable" : "",
35479 +            (status & 0x0010000000000000ull)  ? "W" :
35480 +            (status & 0x0020000000000000ull)  ? "R" :
35481 +            (status & 0x0030000000000000ull)  ? "C" : "-",
35482 +            (ConfigReg & SDRAM_FIXED_DELAY_ENABLE)  ? "F" : "A",
35483 +            sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(ConfigReg), SDRAM_FIXED_DLL_DELAY_BITS));
35484 +
35485 +    return str;
35486 +}
35487 +
35488 +/*
35489 + * Local variables:
35490 + * c-file-style: "stroustrup"
35491 + * End:
35492 + */
35493 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/trap.c
35494 ===================================================================
35495 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/trap.c       2004-02-23 16:02:56.000000000 -0500
35496 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/trap.c    2005-07-28 14:52:52.840679952 -0400
35497 @@ -0,0 +1,777 @@
35498 +/*
35499 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
35500 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
35501 + * 
35502 + *    For licensing information please see the supplied COPYING file
35503 + *
35504 + */
35505 +
35506 +#ident "@(#)$Id: trap.c,v 1.19.10.3 2005/03/09 12:00:08 addy Exp $"
35507 +/*      $Source: /cvs/master/quadrics/elan4mod/trap.c,v $*/
35508 +
35509 +#include <qsnet/kernel.h>
35510 +
35511 +#include <elan4/debug.h>
35512 +#include <elan4/device.h>
35513 +
35514 +#include <elan4/trtype.h>
35515 +#include <elan4/commands.h>
35516 +
35517 +char * const PermTypes[16] = 
35518 +{
35519 +    "Disabled",       "Unused",          "LocalDataRead", "LocalDataWrite",
35520 +    "LocalRead",      "LocalExecute",    "ReadOnly",      "LocalWrite",
35521 +    "LocalEventOnly", "LocalEventWrite", "RemoteEvent",   "RemoteAll",
35522 +    "RemoteReadOnly", "RemoteWriteOnly", "DataReadWrite", "NoFault",
35523 +};
35524 +
35525 +char * const AccTypes[] =
35526 +{
35527 +    "LocalDataRead ", "LocalDataWrite", "RemoteRead    ", "RemoteWrite   ",
35528 +    "Execute       ", "LocalEvent    ", "Unused        ", "RemoteEvent   "
35529 +};
35530 +char * const DataTypes[] = {"Byte ", "HWord", "Word ", "DWord"};
35531 +char * const PhysTypes[] = {"Special Read", "Special Write", "Physical Read", "Physical Write"};
35532 +    
35533 +char * const EProcTrapNames[] = {
35534 +    "EventProcNoFault",
35535 +    "EventProcAddressAlignment",
35536 +    "EventProcMemoryFault",
35537 +    "EventProcCountWrapError",
35538 +};
35539 +
35540 +char * const CProcTrapNames[] = {
35541 +    "CommandProcNoFault",
35542 +    "CommandProcInserterError",
35543 +    "CommandProcPermissionTrap",
35544 +    "CommandProcSendTransInvalid",
35545 +    "CommandProcSendTransExpected",
35546 +    "CommandProcDmaQueueOverflow",
35547 +    "CommandProcInterruptQueueOverflow",
35548 +    "CommandProcMemoryFault",
35549 +    "CommandProcRouteFetchFault",
35550 +    "CommandProcFailCountZero",
35551 +    "CommandProcAddressAlignment",
35552 +    "CommandProcWaitTrap",
35553 +    "CommandProcMultipleGuards",
35554 +    "CommandProcOpenOnGuardedChan",
35555 +    "CommandProcThreadQueueOverflow",
35556 +    "CommandProcBadData",
35557 +};
35558 +
35559 +char *const CProcInsertError[] = {
35560 +    "No Error",
35561 +    "Overflowed",
35562 +    "Invalid Write Size",
35563 +    "Invalid Write Order",
35564 +};
35565 +
35566 +char * const DProcTrapNames[] = {
35567 +    "DmaProcNoFault",
35568 +    "DmaProcRouteFetchFault",
35569 +    "DmaProcFailCountError",
35570 +    "DmaProcPacketAckError",
35571 +    "DmaProcRunQueueReadFault",
35572 +    "DmaProcQueueOverFlow",
35573 +};
35574 +
35575 +char *const IProcTrapNames[] = {
35576 +    "InputNoFault",
35577 +    "InputAddressAlignment",
35578 +    "InputMemoryFault",
35579 +    "InputInvalidTransType",
35580 +    "InputDmaQueueOverflow",
35581 +    "InputEventEngineTrapped",
35582 +    "InputCrcErrorAfterPAckOk",
35583 +    "InputEopErrorOnWaitForEop",
35584 +    "InputEopErrorTrap",
35585 +    "InputDiscardAfterAckOk",
35586 +};
35587 +
35588 +char *const TProcTrapNames[] = {
35589 +    "HaltThread",
35590 +    "TrapForTooManyInstructions",
35591 +    "InstAccessException",
35592 +    "Unimplemented",
35593 +    "DataAccessException",
35594 +    "DataAlignmentError",
35595 +    "TrapForUsingBadData",
35596 +};
35597 +
35598 +#define declare_spaces(space, str)             char space[64]; do { int i; for (i = 0; i < strlen(str); i++) spaces[i] = ' '; space[i] = '\0'; } while (0)
35599 +#define declare_prefix(space, spaces, str)     char space[64]; do { strcpy (space, spaces); strcat (space, str); } while (0)
35600 +
35601 +void
35602 +elan4_display_farea (void *type, int mode, char *str, E4_FaultSave *farea)
35603 +{
35604 +    E4_uint32 FSR = FaultSaveFSR(farea->FSRAndFaultContext);
35605 +
35606 +    declare_spaces(spaces, str);
35607 +    
35608 +    elan4_debugf (type, mode, "%s Fault occurred at %016llx for context %4x\n", str,
35609 +                 farea->FaultAddress, FaultSaveContext(farea->FSRAndFaultContext));
35610 +    
35611 +    if (FSR & AT_VirtualWriteAccBit)                           /* Virtual write access */
35612 +       elan4_debugf (type, mode, "%s FSR=%x: Virtual Write. DWSize=0x%x EndP=0x%x Access=%s DT=%s\n",
35613 +                     spaces, FSR, FSR & AT_VirtualWriteSizeMask,
35614 +                     (FSR >> AT_VirtualWriteEndPtrShift) & AT_VirtualWriteEndPtrMask,
35615 +                     AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask],
35616 +                     DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]);
35617 +    else if (FSR & AT_VirtualReadAccBit)                       /* Virtual read access */
35618 +       elan4_debugf (type, mode, "%s FSR=%x: Virtual Read. DWSize=0x%x Access=%s DT=%s\n",
35619 +                     spaces, FSR, FSR & AT_VirtualReadSizeMask,
35620 +                     AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask],
35621 +                     DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]);
35622 +    else
35623 +       elan4_debugf (type, mode, "%s FSR=%x: %s. Size=0x%x\n", spaces,
35624 +                     FSR, PhysTypes[(FSR >> AT_SelBitsShift) & AT_SelBitsMask],
35625 +                     FSR & AT_OtherSizeMask);
35626 +    elan4_debugf (type, mode, "%s FSR: %s %s%s %sWalking\n", spaces,
35627 +                 (FSR & AT_NonAlloc) ? "NonAlloc" : "Alloc",
35628 +                 (FSR & AT_DmaData) ? "Dma " : "",
35629 +                 (FSR & FSR_WalkForThread) ? "ThreadAcc" : "UnitsAcc",
35630 +                 (FSR & FSR_Walking) ? "" : "Not");
35631 +    PRINTF (type, mode, "%s FSR: %s%sHashTable=%s\n", spaces,
35632 +           (FSR & FSR_NoTranslationsFound) ? "NoTranslationsFound " : "",
35633 +           (FSR & FSR_WalkingProtectionFault) ? "WalkingProtectionFault " : "",
35634 +           (FSR & FSR_HashTable1) ? "1" : "0");
35635 +    if (FSR & (FSR_RouteVProcErr | FSR_FaultForBadData))
35636 +       elan4_debugf (type, mode, "%s FSR: %s%s\n", spaces,
35637 +                     (FSR & FSR_RouteVProcErr) ? "RouteVProcErr " : "",
35638 +                     (FSR & FSR_FaultForBadData) ? "FaultForBadData " : "");
35639 +}
35640 +
35641 +void
35642 +elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap)
35643 +{
35644 +    declare_spaces (spaces, str);
35645 +
35646 +    elan4_debugf (type, mode, "%s Status=%016llx %s EventAddr=%016llx CountAndType=%016llx\n", str,
35647 +                 trap->tr_status, EProcTrapNames[EPROC_TrapType(trap->tr_status)],
35648 +                 trap->tr_eventaddr, trap->tr_event.ev_CountAndType);
35649 +    elan4_debugf (type, mode, "%s Param=%016llx.%016llx\n", spaces,
35650 +                 trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
35651 +
35652 +    elan4_display_farea (type, mode, strcat (spaces, EPROC_Port0Fault(trap->tr_status) ? " EPROC0" : " EPROC1"), &trap->tr_faultarea);
35653 +}
35654 +
35655 +void
35656 +elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap)
35657 +{
35658 +    declare_spaces(spaces, str);
35659 +
35660 +    elan4_debugf (type, mode, "%s Status=%llx %s Command=%llx\n", str, trap->tr_status, 
35661 +                 CProcTrapNames[CPROC_TrapType(trap->tr_status)], trap->tr_command);
35662 +    elan4_debugf (type, mode, "%s Desc=%016llx %016llx %016llx %016llx\n", str,
35663 +                 trap->tr_qdesc.CQ_QueuePtrs, trap->tr_qdesc.CQ_HoldingValue,
35664 +                 trap->tr_qdesc.CQ_AckBuffers, trap->tr_qdesc.CQ_Control);
35665 +
35666 +    switch (CPROC_TrapType (trap->tr_status))
35667 +    {
35668 +    case CommandProcInserterError:
35669 +       elan4_debugf (type, mode, "%s   %s\n", str, CProcInsertError[CQ_RevB_ErrorType(trap->tr_qdesc.CQ_QueuePtrs)]);
35670 +       break;
35671 +
35672 +    case CommandProcWaitTrap:
35673 +       elan4_display_eproc_trap (type, mode, spaces, &trap->tr_eventtrap);
35674 +       break;
35675 +
35676 +    default:
35677 +       elan4_display_farea (type, mode, spaces, &trap->tr_faultarea);
35678 +       break;
35679 +    }
35680 +}
35681 +
35682 +void
35683 +elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap)
35684 +{
35685 +    declare_spaces (spaces, str);
35686 +
35687 +    elan4_debugf (type, mode, "%s status %llx - %s\n", str,
35688 +                 trap->tr_status, DProcTrapNames[DPROC_TrapType(trap->tr_status)]);
35689 +
35690 +    elan4_debugf (type, mode, "%s DESC %016llx %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_typeSize, 
35691 +                 trap->tr_desc.dma_cookie, trap->tr_desc.dma_vproc, trap->tr_desc.dma_srcAddr);
35692 +    elan4_debugf (type, mode, "%s      %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_dstAddr, 
35693 +                 trap->tr_desc.dma_srcEvent, trap->tr_desc.dma_dstEvent);
35694 +
35695 +    if (DPROC_PrefetcherFault (trap->tr_status))
35696 +       elan4_display_farea (type, mode, spaces, &trap->tr_prefetchFault);
35697 +}
35698 +
35699 +void
35700 +elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap)
35701 +{
35702 +    register int i;
35703 +    declare_spaces (spaces, str);
35704 +
35705 +    elan4_debugf (type, mode, "%s PC=%016llx nPC=%016llx State=%016llx Status=%016llx -%s%s%s%s\n", str,
35706 +                 trap->tr_pc, trap->tr_npc, trap->tr_state, trap->tr_status, 
35707 +                 (trap->tr_state & TS_TrapForTooManyInstructions) ? " TrapForTooManyInstructions" : "",
35708 +                 (trap->tr_state & TS_Unimplemented)              ? " Unimplemented"              : "",
35709 +                 (trap->tr_state & TS_DataAlignmentError)         ? " DataAlignmentError"         : "",
35710 +                 (trap->tr_state & TS_InstAccessException)        ? " InstAccessException"        : "",
35711 +                 (trap->tr_state & TS_DataAccessException)        ? " DataAlignmentError"         : "");
35712 +    
35713 +    for (i = 0; i < 64; i += 4)
35714 +       elan4_debugf (type, mode, "%s r%d - %016llx %016llx %016llx %016llx\n", spaces, i,
35715 +                     trap->tr_regs[i], trap->tr_regs[i+1], trap->tr_regs[i+2], trap->tr_regs[i+3]);
35716 +    
35717 +    if (trap->tr_state & TS_InstAccessException)
35718 +    {
35719 +       declare_prefix (prefix, spaces, "Inst");
35720 +
35721 +       elan4_display_farea (type, mode, prefix, &trap->tr_instFault);
35722 +    }
35723 +
35724 +    if (trap->tr_state & TS_DataAccessException)
35725 +    {
35726 +       declare_prefix (prefix, spaces, "Data");
35727 +       elan4_display_farea (type, mode, prefix, &trap->tr_dataFault);
35728 +    }
35729 +}
35730 +
35731 +void
35732 +elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap)
35733 +{
35734 +    register int i;
35735 +    declare_spaces (spaces, str);
35736 +
35737 +    for (i = 0; i < trap->tr_numTransactions; i++)
35738 +    {
35739 +       E4_IprocTrapHeader *hdrp    = &trap->tr_transactions[i];
35740 +       E4_uint64           status  = hdrp->IProcStatusCntxAndTrType;
35741 +       E4_Addr             addr    = hdrp->TrAddr;
35742 +       char               *typeString;
35743 +       char                buffer[256];
35744 +       char               *ptr = buffer;
35745 +       
35746 +       if (IPROC_EOPTrap(status))
35747 +       {
35748 +           switch (IPROC_EOPType(status))
35749 +           {
35750 +           case EOP_GOOD:        typeString = "EopGood";   break;
35751 +           case EOP_BADACK:      typeString = "EopBadAck"; break;
35752 +           case EOP_ERROR_RESET: typeString = "EopReset";  break;
35753 +           default:              typeString = "EopBad";    break;
35754 +           }
35755 +           
35756 +           ptr += sprintf (ptr, "%15s Cntx=%-6d", typeString, IPROC_NetworkContext(status));
35757 +       }
35758 +       else
35759 +       {
35760 +           if (IPROC_BadLength(status))
35761 +               typeString = "BadLength";
35762 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_DISCARD)
35763 +               typeString = "DiscardCrc";
35764 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_ERROR)
35765 +               typeString = "ErrorCrc Remote Network error";
35766 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_BAD)
35767 +               typeString = "BadCrc Cable error into this node.";
35768 +           else
35769 +           {
35770 +               if ((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK)
35771 +                   typeString = "WriteBlock";
35772 +               else
35773 +               {
35774 +                   switch (IPROC_TransactionType(status) & TR_OPCODE_MASK)
35775 +                   {
35776 +                   case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK: typeString = "SetEvent";        break;
35777 +                   case TR_REMOTEDMA & TR_OPCODE_MASK:         typeString = "RemoteDma";       break;
35778 +                   case TR_SENDDISCARD & TR_OPCODE_MASK:       typeString = "SendDiscard";     break;
35779 +                   case TR_GTE & TR_OPCODE_MASK:               typeString = "GTE";             break;
35780 +                   case TR_LT & TR_OPCODE_MASK:                typeString = "LT";              break;
35781 +                   case TR_EQ & TR_OPCODE_MASK:                typeString = "EQ";              break;
35782 +                   case TR_NEQ & TR_OPCODE_MASK:               typeString = "NEQ";             break;
35783 +                   case TR_IDENTIFY & TR_OPCODE_MASK:          typeString = "Idenfity";        break;
35784 +                   case TR_ADDWORD & TR_OPCODE_MASK:           typeString = "AddWord";         break;
35785 +                   case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:    typeString = "InputQCommit";    break;
35786 +                   case TR_TESTANDWRITE & TR_OPCODE_MASK:      typeString = "TestAndWrite";    break;
35787 +                   case TR_INPUT_Q_GETINDEX & TR_OPCODE_MASK:  typeString = "InputQGetIndex";  break;
35788 +                   case TR_TRACEROUTE_TRANS & TR_OPCODE_MASK:  typeString = "TraceRoute";      break;
35789 +                   default:                                    typeString = "Unknown";         break;
35790 +                   }
35791 +               }
35792 +           }
35793 +
35794 +           ptr += sprintf (ptr, "%15s Cntx=%-6d Addr=%016llx", typeString, IPROC_NetworkContext(status), (unsigned long long) addr);
35795 +       }
35796 +       
35797 +       
35798 +       if (IPROC_TrapValue(status) != InputNoFault)
35799 +       {
35800 +           ptr += sprintf (ptr, " TrType=%2d ChanTrapped=%x GoodAck=%x BadAck=%x InputterChan=%d", IPROC_TrapValue(status),
35801 +                           IPROC_ChannelTrapped(status), IPROC_GoodAckSent(status), IPROC_BadAckSent(status),
35802 +                           IPROC_InputterChan(status));
35803 +           if (IPROC_EOPTrap(status))
35804 +               ptr += sprintf (ptr, " EOPType=%d", IPROC_EOPType(status));
35805 +           else
35806 +               ptr += sprintf (ptr, " %s%s%s%s", 
35807 +                               IPROC_FirstTrans(status) ? " FirstTrans" : "",
35808 +                               IPROC_LastTrans(status) ? " LastTrans" : "",
35809 +                               (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP) ? " WaitForEop" : "",
35810 +                               (IPROC_GoodAckSent(status) &  (1 << IPROC_Channel(status))) ? " AckSent" : "");
35811 +       }
35812 +       
35813 +       elan4_debugf (type, mode, "%s %s\n", str, buffer);
35814 +
35815 +       str = spaces;
35816 +    }
35817 +
35818 +    elan4_display_farea (type, mode, spaces, &trap->tr_faultarea);
35819 +}
35820 +
35821 +#define elan4_sdram_copy_faultarea(dev, unit, farea) \
35822 +    elan4_sdram_copyq_from_sdram ((dev), (dev)->dev_faultarea + (unit) * sizeof (E4_FaultSave), (E4_uint64 *) farea, sizeof (E4_FaultSave));
35823 +
35824 +void
35825 +elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent)
35826 +{
35827 +    /* only one of the memory ports can fault at a time */
35828 +    ASSERT (EPROC_TrapType(status) != EventProcMemoryFault || (EPROC_Port0Fault(status) ^ EPROC_Port1Fault(status)) == 1);
35829 +
35830 +    trap->tr_status = status;
35831 +    
35832 +    if (EPROC_Port0Fault(status))
35833 +       elan4_sdram_copy_faultarea (dev, CUN_EventProc0, &trap->tr_faultarea);
35834 +    if (EPROC_Port1Fault(status))
35835 +       elan4_sdram_copy_faultarea (dev, CUN_EventProc1, &trap->tr_faultarea);
35836 +
35837 +    if (iswaitevent)
35838 +    {
35839 +       /*
35840 +        * for waitevents the Event address is always taken from the command processor
35841 +        * 
35842 +        * if we trapped during the copy then we take the "Event" from the event processor
35843 +        * since we need to complete the copy.  Otherwise we'll be reissuing the original
35844 +        * command again
35845 +        */
35846 +       E4_uint32 fsr = FaultSaveFSR(trap->tr_faultarea.FSRAndFaultContext);
35847 +
35848 +       trap->tr_eventaddr = read_reg64 (dev, CommandHold) ^ WAIT_EVENT_CMD;
35849 +
35850 +       if (EPROC_TrapType(trap->tr_status) == EventProcMemoryFault && 
35851 +           (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite))
35852 +       {
35853 +           trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType);
35854 +           trap->tr_event.ev_Params[0]    = read_reg64 (dev, EventParameters[0]);
35855 +           trap->tr_event.ev_Params[1]    = read_reg64 (dev, EventParameters[1]);
35856 +       }
35857 +       else
35858 +       {
35859 +           trap->tr_event.ev_Params[0]    = read_reg64 (dev, CommandCopy[5]);
35860 +           trap->tr_event.ev_CountAndType = read_reg64 (dev, CommandCopy[4]);
35861 +           trap->tr_event.ev_Params[1]    = read_reg64 (dev, CommandCopy[6]);
35862 +
35863 +       }
35864 +    }
35865 +    else
35866 +    {
35867 +       trap->tr_eventaddr             = read_reg64 (dev, EventAddress);
35868 +       trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType);
35869 +       trap->tr_event.ev_Params[0]    = read_reg64 (dev, EventParameters[0]);
35870 +       trap->tr_event.ev_Params[1]    = read_reg64 (dev, EventParameters[1]);
35871 +    }
35872 +
35873 +    BumpDevStat (dev, s_eproc_trap_types[EPROC_TrapType(status)]);
35874 +}
35875 +
35876 +int 
35877 +cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq, int chan)
35878 +{
35879 +       /* cq = ucq->ucq_cq */
35880 +       if ((cq->cq_perm & CQ_STENEnableBit) != 0)
35881 +       {
35882 +            sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
35883 +           E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
35884 +           sdramaddr_t   insertPtr    = (queuePtrs & CQ_PtrMask);
35885 +           sdramaddr_t   commandPtr   = CQ_CompletedPtr (queuePtrs);
35886 +           unsigned int  cqSize       = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
35887 +
35888 +           if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue))
35889 +           {
35890 +               E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue));
35891 +
35892 +               for (; (oooMask & 1) != 0; oooMask >>= 1)
35893 +                   insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1));
35894 +           }
35895 +
35896 +           while (commandPtr != insertPtr)
35897 +           {
35898 +               E4_uint64    command = elan4_sdram_readq (dev, commandPtr);
35899 +               unsigned int cmdSize;
35900 +
35901 +                switch (__categorise_command (command, &cmdSize))
35902 +               {
35903 +               case 0:
35904 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
35905 +                   break;
35906 +
35907 +               case 1: /* open */
35908 +                   if (((chan << 4) == (command & (1<<4))))
35909 +                       /* Matches supplied channel */
35910 +                       return (command >> 32);
35911 +                   else
35912 +                       (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
35913 +                   break;
35914 +
35915 +               case 2:
35916 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
35917 +               case 3:
35918 +                   printk ("cproc_open_extract_vp: invalid command %llx\n", command);
35919 +                   return -1;
35920 +               }
35921 +           } /* while */
35922 +       }
35923 +
35924 +       return -1;
35925 +}
35926 +
35927 +void
35928 +elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum)
35929 +{
35930 +    /* extract the state from the device */
35931 +    elan4_sdram_copy_faultarea (dev, CUN_CommandProc, &trap->tr_faultarea);
35932 +
35933 +    trap->tr_status  = status;
35934 +    trap->tr_command = read_reg64 (dev, CommandHold);
35935 +    
35936 +    elan4_sdram_copyq_from_sdram (dev, dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc)), &trap->tr_qdesc, sizeof (E4_CommandQueueDesc));
35937 +
35938 +    if (CPROC_TrapType (status) == CommandProcWaitTrap)
35939 +       elan4_extract_eproc_trap (dev, read_reg64 (dev, EProcStatus), &trap->tr_eventtrap, 1);
35940 +
35941 +    BumpDevStat (dev, s_cproc_trap_types[CPROC_TrapType(status)]);
35942 +
35943 +    if (PackValue(trap->tr_qdesc.CQ_AckBuffers, 0) == PackTimeout || PackValue(trap->tr_qdesc.CQ_AckBuffers, 1) == PackTimeout)
35944 +       BumpDevStat (dev, s_cproc_timeout);
35945 +}
35946 +
35947 +void
35948 +elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit)
35949 +{
35950 +    trap->tr_status = status;
35951 +    
35952 +    if (unit == 0)
35953 +    {
35954 +       trap->tr_desc.dma_typeSize   = read_reg64 (dev, Dma0Desc.dma_typeSize);
35955 +       trap->tr_desc.dma_cookie     = read_reg64 (dev, Dma0Desc.dma_cookie);
35956 +       trap->tr_desc.dma_vproc      = read_reg64 (dev, Dma0Desc.dma_vproc);
35957 +       trap->tr_desc.dma_srcAddr    = read_reg64 (dev, Dma0Desc.dma_srcAddr);
35958 +       trap->tr_desc.dma_dstAddr    = read_reg64 (dev, Dma0Desc.dma_dstAddr);
35959 +       trap->tr_desc.dma_srcEvent   = read_reg64 (dev, Dma0Desc.dma_srcEvent);
35960 +       trap->tr_desc.dma_dstEvent   = read_reg64 (dev, Dma0Desc.dma_dstEvent);
35961 +       
35962 +       elan4_sdram_copy_faultarea (dev, CUN_DProcPA0, &trap->tr_packAssemFault);
35963 +    }
35964 +    else
35965 +    {
35966 +       trap->tr_desc.dma_typeSize   = read_reg64 (dev, Dma1Desc.dma_typeSize);
35967 +       trap->tr_desc.dma_cookie     = read_reg64 (dev, Dma1Desc.dma_cookie);
35968 +       trap->tr_desc.dma_vproc      = read_reg64 (dev, Dma1Desc.dma_vproc);
35969 +       trap->tr_desc.dma_srcAddr    = read_reg64 (dev, Dma1Desc.dma_srcAddr);
35970 +       trap->tr_desc.dma_dstAddr    = read_reg64 (dev, Dma1Desc.dma_dstAddr);
35971 +       trap->tr_desc.dma_srcEvent   = read_reg64 (dev, Dma1Desc.dma_srcEvent);
35972 +       trap->tr_desc.dma_dstEvent   = read_reg64 (dev, Dma1Desc.dma_dstEvent);
35973 +       
35974 +       elan4_sdram_copy_faultarea (dev, CUN_DProcPA1, &trap->tr_packAssemFault);
35975 +    }
35976 +    
35977 +    if (DPROC_PrefetcherFault (trap->tr_status))
35978 +       elan4_sdram_copy_faultarea (dev, (CUN_DProcData0 | DPROC_FaultUnitNo(trap->tr_status)), &trap->tr_prefetchFault);
35979 +
35980 +    if (DPROC_PacketTimeout (trap->tr_status))
35981 +       BumpDevStat (dev, s_dproc_timeout);
35982 +
35983 +    BumpDevStat (dev, s_dproc_trap_types[DPROC_TrapType(status)]);
35984 +}    
35985 +
35986 +void
35987 +elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap)
35988 +{
35989 +    int i;
35990 +
35991 +    trap->tr_status = status;
35992 +    trap->tr_state  = read_reg64 (dev, Thread_Trap_State);
35993 +    trap->tr_pc     = read_reg64 (dev, PC_W);
35994 +    trap->tr_npc    = read_reg64 (dev, nPC_W);
35995 +    trap->tr_dirty  = read_reg64 (dev, DirtyBits);
35996 +    trap->tr_bad    = read_reg64 (dev, BadBits);
35997 +
35998 +#ifdef CONFIG_MPSAS
35999 +    if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS, 
36000 +                         ((dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) ? ELAN4_REVA_REG_OFFSET : ELAN4_REVB_REG_OFFSET) +
36001 +                         offsetof (E4_Registers, Regs.TProcRegs), (unsigned long) &trap->tr_regs, 64*sizeof (E4_uint64)) < 0)
36002 +    {
36003 +       for (i = 0; i < 64; i++)
36004 +           if (trap->tr_dirty & ((E4_uint64) 1 << i))
36005 +               trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]);
36006 +    }
36007 +
36008 +    for (i = 0; i < 64; i++)
36009 +       if (! (trap->tr_dirty & ((E4_uint64) 1 << i)))
36010 +           trap->tr_regs[i] = 0xdeadbabedeadbabeULL;
36011 +#else
36012 +    for (i = 0; i < 64; i++)
36013 +    {
36014 +       if (trap->tr_dirty & ((E4_uint64) 1 << i))
36015 +           trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]);
36016 +       else
36017 +           trap->tr_regs[i] = 0xdeadbabedeadbabeULL;
36018 +    }
36019 +#endif
36020 +    
36021 +    if (trap->tr_state & TS_DataAccessException)
36022 +       elan4_sdram_copy_faultarea (dev, CUN_TProcData0 | TS_DataPortNo (trap->tr_state), &trap->tr_dataFault);
36023 +
36024 +    if (trap->tr_state & TS_InstAccessException)
36025 +       elan4_sdram_copy_faultarea (dev, CUN_TProcInst, &trap->tr_instFault);
36026 +
36027 +    for (i = 0; i < 7; i++)
36028 +       if (trap->tr_state & (1 << i))
36029 +           BumpDevStat (dev, s_tproc_trap_types[i]);
36030 +}
36031 +
36032 +void
36033 +elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit)
36034 +{
36035 +    sdramaddr_t hdroff  = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]);
36036 +    sdramaddr_t dataoff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrData[0][unit]);
36037 +    register int i, j;
36038 +    int                  CurrUnitNo    = (unit >= 2) ? CUN_IProcHighPri : CUN_IProcLowPri;
36039 +    sdramaddr_t CurrFaultArea = dev->dev_faultarea + (CurrUnitNo * sizeof (E4_FaultSave));
36040 +
36041 +    /* Finally copy the fault area */
36042 +    elan4_sdram_copy_faultarea (dev, CurrUnitNo, &trap->tr_faultarea);
36043 +
36044 +    /*
36045 +     * Clear out the fault save area after reading to allow a fault on the write of the back pointer of
36046 +     * an InputQCommit to be obsurved if a simultaneous event proc trap occurs.
36047 +     */
36048 +    elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FSRAndFaultContext), 0x0ULL);
36049 +    elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FaultAddress), 0x0ULL);
36050 +
36051 +    /* copy the transaction headers */
36052 +    trap->tr_transactions[0].IProcStatusCntxAndTrType = status;
36053 +    trap->tr_transactions[0].TrAddr                   = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, TrAddr));
36054 +    
36055 +    for (i = 0; !IPROC_EOPTrap(trap->tr_transactions[i].IProcStatusCntxAndTrType);)
36056 +    {
36057 +       if (IPROC_BadLength (trap->tr_transactions[i].IProcStatusCntxAndTrType))
36058 +           BumpDevStat (dev, s_bad_length);
36059 +       else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_BAD)
36060 +           BumpDevStat (dev, s_crc_bad);
36061 +       else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_ERROR)
36062 +           BumpDevStat (dev, s_crc_error);
36063 +
36064 +       BumpDevStat (dev, s_iproc_trap_types[IPROC_TrapValue (trap->tr_transactions[i].IProcStatusCntxAndTrType)]);
36065 +
36066 +       hdroff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapHeader);
36067 +
36068 +       if (++i == MAX_TRAPPED_TRANS)
36069 +           break;
36070 +
36071 +       elan4_sdram_copyq_from_sdram (dev, hdroff, &trap->tr_transactions[i], sizeof (E4_IprocTrapHeader));
36072 +    }
36073 +    
36074 +    if (IPROC_EOPType (trap->tr_transactions[i].IProcStatusCntxAndTrType) == EOP_ERROR_RESET)
36075 +       BumpDevStat (dev, s_eop_reset);
36076 +
36077 +    /* Remember the number of transactions we've copied */
36078 +    trap->tr_numTransactions = i + 1;
36079 +    
36080 +    /* Copy all the data blocks in one go */
36081 +    for (i = 0; i < MIN (trap->tr_numTransactions, MAX_TRAPPED_TRANS); i++, dataoff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapData))
36082 +    {
36083 +       if (IPROC_BadLength(status) || IPROC_TransCRCStatus (status) != CRC_STATUS_GOOD)
36084 +           elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, TRANS_DATA_DWORDS*sizeof(E4_uint64));
36085 +       else
36086 +       {
36087 +           int trtype  = IPROC_TransactionType(trap->tr_transactions[i].IProcStatusCntxAndTrType);
36088 +           int ndwords = (trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT;
36089 +
36090 +           elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, ndwords*sizeof(E4_uint64));
36091 +
36092 +           for (j = ndwords; j < TRANS_DATA_DWORDS; j++)
36093 +               trap->tr_dataBuffers[i].Data[j] = 0xbeec0f212345678ull;
36094 +       }
36095 +    }
36096 +    
36097 +}
36098 +
36099 +void
36100 +elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap)
36101 +{
36102 +    int i;
36103 +
36104 +    trap->tr_flags          = 0;
36105 +    trap->tr_trappedTrans    = TR_TRANS_INVALID;
36106 +    trap->tr_waitForEopTrans = TR_TRANS_INVALID;
36107 +    trap->tr_identifyTrans   = TR_TRANS_INVALID;
36108 +
36109 +    if (trap->tr_numTransactions > MAX_TRAPPED_TRANS)
36110 +       trap->tr_flags = TR_FLAG_TOOMANY_TRANS;
36111 +
36112 +    /*
36113 +     * Now scan all the transactions received 
36114 +     */
36115 +    for (i = 0; i < MIN(trap->tr_numTransactions, MAX_TRAPPED_TRANS) ; i++)
36116 +    {
36117 +       E4_IprocTrapHeader *hdrp   = &trap->tr_transactions[i];
36118 +       E4_uint64           status = hdrp->IProcStatusCntxAndTrType;
36119 +
36120 +       if (trap->tr_identifyTrans == TR_TRANS_INVALID)
36121 +       {
36122 +           switch (IPROC_TransactionType (status) & (TR_OPCODE_MASK | TR_SIZE_MASK))
36123 +           {
36124 +           case TR_IDENTIFY          & (TR_OPCODE_MASK | TR_SIZE_MASK):
36125 +           case TR_REMOTEDMA         & (TR_OPCODE_MASK | TR_SIZE_MASK):
36126 +           case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK):
36127 +           case TR_INPUT_Q_COMMIT    & (TR_OPCODE_MASK | TR_SIZE_MASK):
36128 +           case TR_ADDWORD           & (TR_OPCODE_MASK | TR_SIZE_MASK):
36129 +           case TR_TESTANDWRITE      & (TR_OPCODE_MASK | TR_SIZE_MASK):
36130 +               trap->tr_identifyTrans = i;
36131 +               break;
36132 +           }
36133 +       }
36134 +
36135 +       if (IPROC_TrapValue(status) == InputNoFault)            /* We're looking at transactions stored before the trap */
36136 +           continue;                                           /* these should only be identifies */
36137 +       
36138 +       if (trap->tr_trappedTrans == TR_TRANS_INVALID)          /* Remember the transaction which caused the */
36139 +           trap->tr_trappedTrans = i;                          /* trap */
36140 +
36141 +       if (IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status)))
36142 +           trap->tr_flags |= TR_FLAG_ACK_SENT;
36143 +           
36144 +       if (IPROC_EOPTrap(status))                              /* Check for EOP */
36145 +       {
36146 +           ASSERT (i == trap->tr_numTransactions - 1);
36147 +
36148 +           switch (IPROC_EOPType(status))
36149 +           {
36150 +           case EOP_GOOD:
36151 +               /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */  
36152 +               /* unless it was a flood, in which case someone must have sent an ack */
36153 +               /* but not necessarily us */
36154 +               break;
36155 +
36156 +           case EOP_BADACK:
36157 +               /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if
36158 +                * we sent a PAckOk. WFlag this to ignore the AckSent. */
36159 +               trap->tr_flags |= TR_FLAG_EOP_BAD;
36160 +               break;
36161 +
36162 +           case EOP_ERROR_RESET:
36163 +               /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */
36164 +               trap->tr_flags |= TR_FLAG_EOP_ERROR;
36165 +               break;
36166 +
36167 +           default:
36168 +               printk ("elan4_inspect_iproc_trap: unknown eop type %d", IPROC_EOPType(status));
36169 +               BUG();
36170 +               /* NOTREACHED */
36171 +           }
36172 +           continue;
36173 +       }
36174 +       else
36175 +       {
36176 +           if (IPROC_BadLength(status) || (IPROC_TransCRCStatus (status) == CRC_STATUS_ERROR ||
36177 +                                           IPROC_TransCRCStatus (status) == CRC_STATUS_BAD))
36178 +           {
36179 +               {
36180 +                   register int j;
36181 +                   if (IPROC_BadLength(status))
36182 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped on bad length data. status=%016llx Address=%016llx\n",
36183 +                                status, hdrp->TrAddr);
36184 +                   else
36185 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped with bad CRC. status=%016llx Address=%016llx\n",
36186 +                                status, hdrp->TrAddr);
36187 +                   for (j = 0; j < TRANS_DATA_DWORDS; j++)
36188 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: DataBuffers[%d] : %016llx\n", j, trap->tr_dataBuffers[i].Data[j]);
36189 +               }
36190 +
36191 +               trap->tr_flags |= TR_FLAG_BAD_TRANS;
36192 +               continue;
36193 +           }
36194 +           
36195 +           if (IPROC_TransCRCStatus (status) == CRC_STATUS_DISCARD)
36196 +               continue;
36197 +
36198 +           if ((((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK) ||
36199 +                (IPROC_TransactionType(status) == TR_TRACEROUTE_TRANS)) &&
36200 +               (trap->tr_flags & TR_FLAG_ACK_SENT) && trap->tr_identifyTrans == TR_TRANS_INVALID)
36201 +           {
36202 +               /* 
36203 +                * Writeblock after the ack is sent without an identify transaction - this is 
36204 +                * considered to be a DMA packet and requires the next packet to be nacked - since 
36205 +                * the DMA processor will send this in a deterministic time and there's an upper 
36206 +                * limit on the network latency (the output timeout) we just need to hold the context 
36207 +                * filter up for a while.
36208 +                */
36209 +               trap->tr_flags |= TR_FLAG_DMA_PACKET;
36210 +           }
36211 +           
36212 +           if (IPROC_LastTrans(status) && (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP))
36213 +           {
36214 +               /*
36215 +                * WaitForEop transactions - if we have to do network error fixup
36216 +                * then we may need to execute/ignore this transaction dependant
36217 +                * on whether the source will be resending it.
36218 +                */
36219 +               trap->tr_waitForEopTrans = i;
36220 +           }
36221 +
36222 +           /*
36223 +            * This is a special case caused by a minor input processor bug.
36224 +            * If simultaneous InputMemoryFault and InputEventEngineTrapped occur then the chip will probably return
36225 +            * InputEventEngineTrapped even though the write of the back pointer has not occured and must be done by
36226 +            * the trap handler.
36227 +            * In this case the fault address will equal q->q_bptr. If there has been only EventEngineTrap then the
36228 +            * the fault address should be zero as the trap handler now always zeros this after every input trap.
36229 +            */
36230 +           if ((IPROC_TransactionType (status) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) &&
36231 +               trap->tr_faultarea.FaultAddress == hdrp->TrAddr + offsetof(E4_InputQueue, q_bptr) &&
36232 +               IPROC_TrapValue(status) == InputEventEngineTrapped)
36233 +           {
36234 +               hdrp->IProcStatusCntxAndTrType = (status & 0xFFFFFFF0FFFFFFFFull) | ((E4_uint64) InputMemoryFault << 32);
36235 +           }
36236 +       }
36237 +
36238 +       PRINTF (DBG_DEVICE, DBG_INTR, "inspect[%d] status=%llx TrapValue=%d -> flags %x\n", i, status, IPROC_TrapValue(status), trap->tr_flags);
36239 +    }
36240 +}
36241 +
36242 +E4_uint64
36243 +elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq)
36244 +{
36245 +    sdramaddr_t cqdesc     = dev->dev_cqaddr + elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc);
36246 +    E4_uint64   cqcontrol  = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
36247 +    E4_uint32   extractOff = CQ_ExtractPtr (cqcontrol) & (CQ_Size(cq->cq_size)-1);
36248 +    
36249 +    if (extractOff == 0)
36250 +       extractOff = CQ_Size(cq->cq_size) - sizeof (E4_uint64);
36251 +    else
36252 +       extractOff -= sizeof (E4_uint64);
36253 +
36254 +    return (elan4_sdram_readq (dev, cq->cq_space + extractOff));
36255 +}
36256 +
36257 +EXPORT_SYMBOL(elan4_extract_eproc_trap);
36258 +EXPORT_SYMBOL(elan4_display_eproc_trap);
36259 +EXPORT_SYMBOL(elan4_extract_cproc_trap);
36260 +EXPORT_SYMBOL(elan4_display_cproc_trap);
36261 +EXPORT_SYMBOL(elan4_extract_dproc_trap);
36262 +EXPORT_SYMBOL(elan4_display_dproc_trap);
36263 +EXPORT_SYMBOL(elan4_extract_tproc_trap);
36264 +EXPORT_SYMBOL(elan4_display_tproc_trap);
36265 +EXPORT_SYMBOL(elan4_extract_iproc_trap);
36266 +EXPORT_SYMBOL(elan4_inspect_iproc_trap);
36267 +EXPORT_SYMBOL(elan4_display_iproc_trap);
36268 +
36269 +
36270 +/*
36271 + * Local variables:
36272 + * c-file-style: "stroustrup"
36273 + * End:
36274 + */
36275 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/user.c
36276 ===================================================================
36277 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/user.c       2004-02-23 16:02:56.000000000 -0500
36278 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/user.c    2005-07-28 14:52:52.846679040 -0400
36279 @@ -0,0 +1,3362 @@
36280 +/*
36281 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
36282 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
36283 + * 
36284 + *    For licensing information please see the supplied COPYING file
36285 + *
36286 + */
36287 +
36288 +#ident "@(#)$Id: user.c,v 1.68.2.11 2005/03/09 12:00:09 addy Exp $"
36289 +/*      $Source: /cvs/master/quadrics/elan4mod/user.c,v $*/
36290 +
36291 +#include <qsnet/kernel.h>
36292 +#include <qsnet/kpte.h>
36293 +
36294 +#include <elan/elanmod.h>
36295 +#include <elan4/debug.h>
36296 +#include <elan4/device.h>
36297 +#include <elan4/user.h>
36298 +
36299 +#include <elan4/trtype.h>
36300 +#include <elan4/commands.h>
36301 +
36302 +#include <stdarg.h>
36303 +
36304 +/* allow this code to compile against an Eagle elanmod */
36305 +#ifdef __ELANMOD_DEVICE_H
36306 +#define elan_attach_cap(cap,rnum,args,func)    elanmod_attach_cap(cap,args,func)
36307 +#define elan_detach_cap(cap,rnum)              elanmod_detach_cap(cap)
36308 +#endif
36309 +
36310 +#define NETERR_MSGS    16
36311 +
36312 +int user_p2p_route_options   = FIRST_TIMEOUT(3);
36313 +int user_bcast_route_options = FIRST_TIMEOUT(3);
36314 +int user_dproc_retry_count   = 15;
36315 +int user_cproc_retry_count   = 2;
36316 +
36317 +int num_fault_save           = 30;
36318 +int min_fault_pages          = 1;
36319 +int max_fault_pages          = 128;
36320 +
36321 +static int
36322 +user_validate_cap (USER_CTXT *uctx, ELAN_CAPABILITY *cap, unsigned use)
36323 +{
36324 +    /* Don't allow a user process to attach to system context */
36325 +    if (ELAN4_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN4_SYSTEM_CONTEXT (cap->cap_highcontext))
36326 +    {
36327 +       PRINTF3 (DBG_DEVICE, DBG_VP,"user_validate_cap: lctx %x hctx %x high %x\n", cap->cap_lowcontext, cap->cap_highcontext, ELAN4_KCOMM_BASE_CONTEXT_NUM);
36328 +       PRINTF0 (DBG_DEVICE, DBG_VP,"user_validate_cap: user process cant attach to system cap\n");
36329 +       return (EINVAL);
36330 +    }
36331 +    
36332 +    return elanmod_classify_cap(&uctx->uctx_position, cap, use);
36333 +}
36334 +
36335 +static __inline__ void
36336 +__user_signal_trap (USER_CTXT *uctx)
36337 +{
36338 +    switch (uctx->uctx_trap_state)
36339 +    {
36340 +    case UCTX_TRAP_IDLE:
36341 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: deliver signal %d to pid %d\n", uctx->uctx_trap_signo, uctx->uctx_trap_pid);
36342 +
36343 +       if (uctx->uctx_trap_signo)
36344 +           kill_proc (uctx->uctx_trap_pid, uctx->uctx_trap_signo, 1);
36345 +       break;
36346 +
36347 +    case UCTX_TRAP_SLEEPING:
36348 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: wakeup sleeping trap handler\n");
36349 +
36350 +       kcondvar_wakeupone (&uctx->uctx_wait, &uctx->uctx_spinlock);
36351 +       break;
36352 +    }
36353 +    uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
36354 +}
36355 +
36356 +static void
36357 +user_signal_timer (unsigned long arg)
36358 +{
36359 +    USER_CTXT    *uctx = (USER_CTXT *) arg;
36360 +    unsigned long flags;
36361 +
36362 +    PRINTF (uctx, DBG_TRAP, "user_signal_timer: state=%d pid=%d signal=%d (now %d start %d)\n",
36363 +           uctx->uctx_trap_state, uctx->uctx_trap_pid, uctx->uctx_trap_signo, jiffies,
36364 +           uctx->uctx_int_start);
36365 +
36366 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36367 +    __user_signal_trap (uctx);
36368 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36369 +}
36370 +
36371 +#define MAX_INTS_PER_TICK      50
36372 +#define MIN_INTS_PER_TICK      20
36373 +
36374 +static void
36375 +user_signal_trap (USER_CTXT *uctx)
36376 +{
36377 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
36378 +
36379 +    PRINTF (uctx, DBG_TRAP, "user_signal_trap: state=%d pid=%d signal=%d%s\n", uctx->uctx_trap_state,
36380 +           uctx->uctx_trap_pid, uctx->uctx_trap_signo, timer_pending(&uctx->uctx_int_timer) ? " (timer-pending)" : "");
36381 +
36382 +    uctx->uctx_int_count++;
36383 +
36384 +    if (timer_pending (&uctx->uctx_int_timer))
36385 +       return;
36386 +
36387 +    if (uctx->uctx_int_count > ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK))
36388 +    {
36389 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: deferring signal for %d ticks (count %d ticks %d -> %d)\n", 
36390 +               uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start),
36391 +               ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK));
36392 +
36393 +       /* We're interrupting too fast, so defer this signal */
36394 +       uctx->uctx_int_timer.expires = jiffies + (++uctx->uctx_int_delay);
36395 +
36396 +       add_timer (&uctx->uctx_int_timer);
36397 +    }
36398 +    else
36399 +    {
36400 +       __user_signal_trap (uctx);
36401 +
36402 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: check signal for %d ticks (count %d ticks %d -> %d)\n", 
36403 +               uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start),
36404 +               (int)(jiffies - uctx->uctx_int_start) * MIN_INTS_PER_TICK);
36405 +           
36406 +       if (uctx->uctx_int_count < ((int) (jiffies - uctx->uctx_int_start)) * MIN_INTS_PER_TICK)
36407 +       {
36408 +           PRINTF (uctx, DBG_TRAP, "user_signal_trap: reset interrupt throttle (count %d ticks %d)\n", 
36409 +                   uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start));
36410 +
36411 +           uctx->uctx_int_start = jiffies;
36412 +           uctx->uctx_int_count = 0;
36413 +           uctx->uctx_int_delay = 0;
36414 +       }
36415 +    }
36416 +}
36417 +
36418 +static void
36419 +user_neterr_timer (unsigned long arg)
36420 +{
36421 +    USER_CTXT *uctx = (USER_CTXT *) arg;
36422 +    unsigned long flags;
36423 +    
36424 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36425 +
36426 +    uctx->uctx_status |= UCTX_NETERR_TIMER;
36427 +    
36428 +    user_signal_trap (uctx);
36429 +
36430 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36431 +}
36432 +
36433 +static void
36434 +user_flush_dma_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull)
36435 +{
36436 +    E4_uint64          qptrs = read_reg64 (dev, DProcLowPriPtrs);
36437 +    E4_uint32          qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
36438 +    E4_uint32          qfptr = E4_QueueFrontPointer (qptrs);
36439 +    E4_uint32          qbptr = E4_QueueBackPointer (qptrs);
36440 +    E4_DProcQueueEntry qentry;
36441 +
36442 +    while ((qfptr != qbptr) || qfull)
36443 +    {
36444 +       E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize));
36445 +
36446 +       if (DMA_Context (typeSize) == uctx->uctx_ctxt.ctxt_num)
36447 +       {
36448 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry));
36449 +
36450 +           PRINTF4 (uctx, DBG_SWAP, "user_flush_dma_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Desc.dma_typeSize, 
36451 +                    qentry.Desc.dma_cookie, qentry.Desc.dma_vproc, qentry.Desc.dma_srcAddr);
36452 +           PRINTF3 (uctx, DBG_SWAP, "                         %016llx %016llx %016llx\n", qentry.Desc.dma_dstAddr, 
36453 +                    qentry.Desc.dma_srcEvent, qentry.Desc.dma_dstEvent);
36454 +
36455 +           if (RING_QUEUE_REALLY_FULL (uctx->uctx_dmaQ))
36456 +               uctx->uctx_status |= UCTX_DPROC_QUEUE_OVERFLOW;
36457 +           else
36458 +           {
36459 +               *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = qentry.Desc;
36460 +               (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
36461 +           }
36462 +           
36463 +           qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
36464 +           qentry.Desc.dma_cookie   = 0;
36465 +           qentry.Desc.dma_vproc    = 0;
36466 +           qentry.Desc.dma_srcAddr  = 0;
36467 +           qentry.Desc.dma_dstAddr  = 0;
36468 +           qentry.Desc.dma_srcEvent = 0;
36469 +           qentry.Desc.dma_dstEvent = 0;
36470 +
36471 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
36472 +       }
36473 +
36474 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
36475 +       qfull = 0;
36476 +    }
36477 +}
36478 +
36479 +static void
36480 +user_flush_thread_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull)
36481 +{
36482 +    E4_uint64          qptrs = read_reg64 (dev, TProcLowPriPtrs);
36483 +    E4_uint32          qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
36484 +    E4_uint32          qfptr = E4_QueueFrontPointer (qptrs);
36485 +    E4_uint32          qbptr = E4_QueueBackPointer (qptrs);
36486 +    E4_TProcQueueEntry qentry;
36487 +
36488 +    while ((qfptr != qbptr) || qfull)
36489 +    {
36490 +       E4_uint64 context = elan4_sdram_readq (dev, qfptr + offsetof (E4_TProcQueueEntry, Context));
36491 +
36492 +       if (TPROC_Context (context) == uctx->uctx_ctxt.ctxt_num)
36493 +       {
36494 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_TProcQueueEntry));
36495 +
36496 +           PRINTF (uctx, DBG_SWAP, "user_flush_thread_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Regs.Registers[0],
36497 +                   qentry.Regs.Registers[1], qentry.Regs.Registers[2], qentry.Regs.Registers[3]);
36498 +           PRINTF (uctx, DBG_SWAP, "                            %016llx %016llx %016llx\n", 
36499 +                   qentry.Regs.Registers[4], qentry.Regs.Registers[5], qentry.Regs.Registers[6]);
36500 +
36501 +           if (RING_QUEUE_REALLY_FULL (uctx->uctx_threadQ))
36502 +               uctx->uctx_status |= UCTX_TPROC_QUEUE_OVERFLOW;
36503 +           else
36504 +           {
36505 +               *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = qentry.Regs;
36506 +               (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
36507 +           }
36508 +           
36509 +           /* change the thread to execute the suspend sequence */
36510 +           qentry.Regs.Registers[0] = dev->dev_tproc_suspend;
36511 +           qentry.Regs.Registers[1] = dev->dev_tproc_space;
36512 +           qentry.Context           = dev->dev_ctxt.ctxt_num;
36513 +
36514 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_TProcQueueEntry));
36515 +       }
36516 +       
36517 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_TProcQueueEntry)) & (qsize-1));
36518 +       qfull = 0;
36519 +    }
36520 +}
36521 +
36522 +static void
36523 +user_flush_dmas (ELAN4_DEV *dev, void *arg, int qfull)
36524 +{
36525 +    USER_CTXT        *uctx = (USER_CTXT *) arg;
36526 +    unsigned long     flags;
36527 +    
36528 +    ASSERT ((read_reg32 (dev, InterruptReg) & INT_DProcHalted) != 0);
36529 +
36530 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36531 +
36532 +    if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0)
36533 +    {
36534 +       PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: status %x - no more reasons\n", uctx->uctx_status);
36535 +
36536 +       uctx->uctx_status &= ~UCTX_STOPPING;
36537 +
36538 +       user_signal_trap (uctx);
36539 +    }
36540 +    else
36541 +    {
36542 +       user_flush_dma_runqueue (dev, uctx, qfull);
36543 +
36544 +       uctx->uctx_status = (uctx->uctx_status | UCTX_STOPPED) & ~UCTX_STOPPING;
36545 +    
36546 +       PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: statux %x - stopped\n", uctx->uctx_status);
36547 +
36548 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
36549 +    }
36550 +
36551 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36552 +}
36553 +
36554 +static void
36555 +user_flush (ELAN4_DEV *dev, void *arg)
36556 +{
36557 +    USER_CTXT        *uctx = (USER_CTXT *) arg;
36558 +    struct list_head *entry;
36559 +    unsigned long     flags;
36560 +
36561 +    ASSERT ((read_reg32 (dev, InterruptReg) & (INT_Halted|INT_Discarding)) == (INT_Halted|INT_Discarding));
36562 +
36563 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36564 +
36565 +    if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0)
36566 +    {
36567 +       PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - no more reasons\n", uctx->uctx_status);
36568 +
36569 +       uctx->uctx_status &= ~UCTX_STOPPING;
36570 +
36571 +       user_signal_trap (uctx);
36572 +    }
36573 +    else
36574 +    {
36575 +       PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - flushing context\n", uctx->uctx_status);
36576 +
36577 +       list_for_each (entry, &uctx->uctx_cqlist) {
36578 +           USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
36579 +
36580 +           if (ucq->ucq_state == UCQ_RUNNING)
36581 +           {
36582 +               /* NOTE: since the inserter can still be running we modify the permissions
36583 +                *       to zero then when the extractor starts up again it will trap */
36584 +               PRINTF1 (uctx, DBG_SWAP, "user_flush: stopping cq indx=%d\n", elan4_cq2idx(ucq->ucq_cq));
36585 +
36586 +               elan4_updatecq (dev, ucq->ucq_cq, 0, 0);
36587 +           }
36588 +       }
36589 +       
36590 +       user_flush_thread_runqueue (dev, uctx, TPROC_LowRunQueueFull(read_reg64 (dev, TProcStatus)));
36591 +
36592 +       /* since we can't determine whether the dma run queue is full or empty, we use a dma
36593 +        * halt operation to do the flushing - as the reason for halting the dma processor 
36594 +        * will be released when we return, we keep it halted until the flush has completed */
36595 +       elan4_queue_dma_flushop (dev, &uctx->uctx_dma_flushop, 0);
36596 +
36597 +       if (uctx->uctx_status & UCTX_EXITING)
36598 +           elan4_flush_icache_halted (&uctx->uctx_ctxt);
36599 +    }
36600 +
36601 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36602 +}
36603 +
36604 +static void
36605 +user_set_filter (USER_CTXT *uctx, E4_uint32 state)
36606 +{
36607 +    struct list_head *entry;
36608 +
36609 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
36610 +
36611 +    list_for_each (entry, &uctx->uctx_cent_list) {
36612 +       USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link);
36613 +
36614 +       elan4_set_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext, state);
36615 +    }
36616 +}
36617 +
36618 +static void
36619 +user_start_nacking (USER_CTXT *uctx, unsigned reason)
36620 +{
36621 +    PRINTF2 (uctx, DBG_SWAP, "user_start_nacking: status %x reason %x\n", uctx->uctx_status, reason);
36622 +
36623 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
36624 +
36625 +    if (UCTX_NACKING(uctx))
36626 +       uctx->uctx_status |= reason;
36627 +    else
36628 +    {
36629 +       uctx->uctx_status |= reason;
36630 +
36631 +       user_set_filter (uctx, E4_FILTER_STATS | E4_FILTER_DISCARD_ALL);
36632 +    }
36633 +}
36634 +
36635 +static void
36636 +user_stop_nacking (USER_CTXT *uctx, unsigned reason)
36637 +{
36638 +    PRINTF2 (uctx, DBG_SWAP, "user_stop_nacking: status %x reason %x\n", uctx->uctx_status, reason);
36639 +    
36640 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
36641 +    
36642 +    uctx->uctx_status &= ~reason;
36643 +    
36644 +    if (! UCTX_NACKING (uctx))
36645 +       user_set_filter (uctx, E4_FILTER_STATS);
36646 +}
36647 +
36648 +static void
36649 +user_start_stopping (USER_CTXT *uctx, unsigned reason)
36650 +{
36651 +    ELAN4_DEV *dev =uctx->uctx_ctxt.ctxt_dev;
36652 +
36653 +    PRINTF2 (uctx, DBG_SWAP, "user_start_stopping: status %x reason %x\n", uctx->uctx_status, reason);
36654 +
36655 +    ASSERT (! (uctx->uctx_status & UCTX_STOPPED));
36656 +
36657 +    user_start_nacking (uctx, reason);
36658 +    
36659 +    if ((uctx->uctx_status & UCTX_STOPPING) != 0)
36660 +       return;
36661 +    
36662 +    uctx->uctx_status |= UCTX_STOPPING;
36663 +
36664 +    /* queue the halt operation to  remove all threads/dmas/cqs from the run queues */
36665 +    /*    and also flush through the context filter change */
36666 +    elan4_queue_haltop (dev, &uctx->uctx_haltop);
36667 +}
36668 +
36669 +static void
36670 +user_stop_stopping (USER_CTXT *uctx, unsigned reason)
36671 +{
36672 +    PRINTF2 (uctx, DBG_SWAP, "user_stop_stopping: status %x reason %x\n", uctx->uctx_status, reason);
36673 +    
36674 +    user_stop_nacking (uctx, reason);
36675 +
36676 +    if (UCTX_RUNNABLE (uctx))
36677 +    {
36678 +       uctx->uctx_status &= ~UCTX_STOPPED;
36679 +
36680 +       PRINTF1 (uctx, DBG_SWAP, "user_stop_stopping: no more reasons => %x\n", uctx->uctx_status);
36681 +
36682 +       user_signal_trap (uctx);
36683 +    }
36684 +}
36685 +
36686 +void
36687 +user_swapout (USER_CTXT *uctx, unsigned reason)
36688 +{
36689 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
36690 +    unsigned long flags;
36691 +    
36692 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36693 +    
36694 +    PRINTF2 (uctx, DBG_SWAP, "user_swapout: status %x reason %x\n", uctx->uctx_status, reason);
36695 +    
36696 +    user_start_nacking (uctx, reason);
36697 +    
36698 +    while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING) &&                /* wait for someone else to finish */
36699 +          uctx->uctx_trap_count > 0)                                           /* and for trap handlers to notice */
36700 +    {                                                                          /* and exit */
36701 +       PRINTF1 (uctx, DBG_SWAP, "user_swapout: waiting for %d trap handlers to exit/previous swapout\n", uctx->uctx_trap_count);
36702 +
36703 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
36704 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);
36705 +    }
36706 +
36707 +    if (uctx->uctx_status & UCTX_SWAPPED)                                      /* already swapped out */
36708 +    {
36709 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36710 +       return;
36711 +    }
36712 +    
36713 +    uctx->uctx_status |= (UCTX_SWAPPING|UCTX_STOPPING);                                /* mark the context as swapping & stopping */
36714 +    
36715 +    /* queue the halt operation to  remove all threads/dmas/cqs from the run queues */
36716 +    /*    and also flush through the context filter change */
36717 +    elan4_queue_haltop (dev, &uctx->uctx_haltop);
36718 +    
36719 +    while (! (uctx->uctx_status & UCTX_STOPPED))
36720 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);
36721 +
36722 +    /* all state has been removed from the elan - we can now "tidy" it up */
36723 +
36724 +    PRINTF0 (uctx, DBG_SWAP, "user_swapout: swapped out\n");
36725 +    
36726 +    uctx->uctx_status = (uctx->uctx_status & ~UCTX_SWAPPING) | UCTX_SWAPPED;
36727 +    
36728 +    kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
36729 +
36730 +    PRINTF1 (uctx, DBG_SWAP, "user_swapout: all done - status %x\n", uctx->uctx_status);
36731 +
36732 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36733 +}
36734 +
36735 +void
36736 +user_swapin (USER_CTXT *uctx, unsigned reason)
36737 +{
36738 +    unsigned long flags;
36739 +
36740 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36741 +
36742 +    ASSERT (uctx->uctx_status & UCTX_SWAPPED_REASONS);
36743 +
36744 +    PRINTF2 (uctx, DBG_SWAP, "user_swapin: status %x reason %x\n", uctx->uctx_status, reason);
36745 +
36746 +    while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING))                  /* wait until other threads have */
36747 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);         /* completed their swap operation */
36748 +
36749 +    ASSERT (uctx->uctx_status & (UCTX_SWAPPED | UCTX_STOPPED));
36750 +
36751 +    user_stop_nacking (uctx, reason);
36752 +
36753 +    if (! (uctx->uctx_status & UCTX_SWAPPED_REASONS))
36754 +    {
36755 +       uctx->uctx_status &= ~UCTX_SWAPPED;
36756 +
36757 +       /* no longer swapped out - wakeup anyone sleeping waiting for swapin */
36758 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
36759 +
36760 +       if (! (uctx->uctx_status & UCTX_STOPPED_REASONS))
36761 +       {
36762 +           uctx->uctx_status &= ~UCTX_STOPPED;
36763 +           user_signal_trap (uctx);
36764 +       }
36765 +    }
36766 +
36767 +    PRINTF1 (uctx, DBG_SWAP, "user_swapin: all done - status %x\n", uctx->uctx_status);
36768 +
36769 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36770 +}
36771 +
36772 +void
36773 +user_destroy_callback (void *arg, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
36774 +{
36775 +    USER_CTXT *uctx = (USER_CTXT *) arg;
36776 +
36777 +    PRINTF (uctx, DBG_VP, "user_destroy_callback: %s\n", map == NULL ? "cap destoyed" : "map destroyed");
36778 +}
36779 +
36780 +int
36781 +user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap)
36782 +{
36783 +    ELAN4_DEV       *dev = uctx->uctx_ctxt.ctxt_dev;
36784 +    USER_CTXT_ENTRY *cent;
36785 +    unsigned long flags;
36786 +    int ctype, res;
36787 +    
36788 +    if ((ctype = user_validate_cap (uctx, cap, ELAN_USER_ATTACH)) < 0)
36789 +       return ctype;
36790 +
36791 +    if ((ctype == ELAN_CAP_RMS) && (res = elan_attach_cap (cap, dev->dev_devinfo.dev_rail, uctx, user_destroy_callback)) != 0)
36792 +    {
36793 +       /* NOTE: elan_attach_cap returns +ve errnos */
36794 +       return -res;
36795 +    }
36796 +
36797 +    KMEM_ALLOC (cent, USER_CTXT_ENTRY *, sizeof (USER_CTXT_ENTRY), 1);
36798 +    if (cent == NULL)
36799 +    {
36800 +       if (ctype == ELAN_CAP_RMS)
36801 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
36802 +
36803 +       return -ENOMEM;
36804 +    }
36805 +
36806 +    KMEM_ALLOC (cent->cent_cap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1);
36807 +    if (cent->cent_cap == NULL)
36808 +    {
36809 +       if (ctype == ELAN_CAP_RMS)
36810 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
36811 +
36812 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
36813 +       return -ENOMEM;
36814 +    }
36815 +
36816 +    memcpy (cent->cent_cap, cap, ELAN_CAP_SIZE(cap));
36817 +
36818 +    if ((res = elan4_attach_filter (&uctx->uctx_ctxt, cap->cap_mycontext)) != 0)
36819 +    {
36820 +       if (ctype == ELAN_CAP_RMS)
36821 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
36822 +       
36823 +       KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cap));
36824 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
36825 +
36826 +       return res;
36827 +    }
36828 +
36829 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36830 +
36831 +    list_add_tail (&cent->cent_link, &uctx->uctx_cent_list);
36832 +
36833 +    if (! UCTX_NACKING (uctx))
36834 +       user_set_filter (uctx, E4_FILTER_STATS);
36835 +
36836 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36837 +
36838 +    return (0);
36839 +    
36840 +}
36841 +
36842 +void
36843 +user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap)
36844 +{
36845 +    ELAN4_DEV         *dev = uctx->uctx_ctxt.ctxt_dev;
36846 +    struct list_head  *entry;
36847 +    struct list_head  *next;
36848 +    struct list_head   list;
36849 +    unsigned long      flags;
36850 +
36851 +    INIT_LIST_HEAD (&list);
36852 +
36853 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36854 +    
36855 +    PRINTF (uctx, DBG_NETWORK_CTX, cap ? "user_detach: network context %d\n" : "user_detach: all network contexts\n", cap ? cap->cap_mycontext : 0);
36856 +
36857 +    list_for_each_safe (entry, next, &uctx->uctx_cent_list) {
36858 +       USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link);
36859 +
36860 +       if (cap == NULL || ELAN_CAP_MATCH (cap, cent->cent_cap))
36861 +       {
36862 +           PRINTF1 (uctx, DBG_NETWORK_CTX, "user_detach: detach from network context %d\n", cent->cent_cap->cap_mycontext);
36863 +           
36864 +           elan4_detach_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext);
36865 +
36866 +           list_del (&cent->cent_link);
36867 +           list_add_tail (&cent->cent_link, &list);
36868 +       }
36869 +    }
36870 +
36871 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36872 +
36873 +    while (! list_empty (&list))
36874 +    {
36875 +       USER_CTXT_ENTRY *cent = list_entry (list.next, USER_CTXT_ENTRY, cent_link);
36876 +
36877 +       list_del (&cent->cent_link);
36878 +
36879 +       if (user_validate_cap (uctx, cent->cent_cap, ELAN_USER_DETACH) == ELAN_CAP_RMS)
36880 +           elan_detach_cap (cent->cent_cap, dev->dev_devinfo.dev_rail); 
36881 +       
36882 +       KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cent->cent_cap));
36883 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
36884 +    }
36885 +}
36886 +
36887 +void
36888 +user_block_inputter (USER_CTXT *uctx, unsigned blocked)
36889 +{
36890 +    unsigned long flags;
36891 +    int isblocked;
36892 +
36893 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
36894 +    
36895 +    isblocked = (uctx->uctx_status & UCTX_USER_FILTERING);
36896 +
36897 +    if (blocked && !isblocked)
36898 +       user_start_nacking (uctx, UCTX_USER_FILTERING);
36899 +
36900 +    if (!blocked && isblocked)
36901 +       user_stop_nacking (uctx, UCTX_USER_FILTERING);
36902 +
36903 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
36904 +}
36905 +
36906 +static USER_VPSEG *
36907 +user_install_vpseg (USER_CTXT *uctx, unsigned process, unsigned entries)
36908 +{
36909 +    struct list_head *entry;
36910 +    USER_VPSEG       *seg;
36911 +
36912 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
36913 +
36914 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
36915 +       seg = list_entry (entry, USER_VPSEG, vps_link);
36916 +
36917 +       if (process <= (seg->vps_process + seg->vps_entries-1) && 
36918 +           (process + entries - 1) >= seg->vps_process)
36919 +           return ((USER_VPSEG *) NULL);
36920 +    }
36921 +
36922 +    KMEM_ZALLOC (seg, USER_VPSEG *, sizeof (USER_VPSEG), 1);
36923 +    
36924 +    if (seg == (USER_VPSEG *) NULL)
36925 +       return ((USER_VPSEG *) NULL);
36926 +
36927 +    seg->vps_process = process;
36928 +    seg->vps_entries = entries;
36929 +
36930 +    list_add_tail (&seg->vps_link, &uctx->uctx_vpseg_list);
36931 +
36932 +    return (seg);
36933 +}
36934 +
36935 +static void
36936 +user_remove_vpseg (USER_CTXT *uctx, USER_VPSEG *seg)
36937 +{
36938 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
36939 +
36940 +    list_del (&seg->vps_link);
36941 +    
36942 +    switch (seg->vps_type)
36943 +    {
36944 +    case USER_VPSEG_P2P:
36945 +       /* These pointers (union) are only valid for P2P segs */
36946 +       if (seg->vps_p2p_routes)
36947 +           KMEM_FREE (seg->vps_p2p_routes, sizeof (E4_VirtualProcessEntry) * seg->vps_entries);
36948 +       
36949 +       if (seg->vps_p2p_cap)
36950 +           KMEM_FREE (seg->vps_p2p_cap, ELAN_CAP_SIZE(seg->vps_p2p_cap));
36951 +
36952 +       break;
36953 +       
36954 +    case USER_VPSEG_BCAST:
36955 +       ;
36956 +    }
36957 +
36958 +    KMEM_FREE (seg, sizeof (USER_VPSEG));
36959 +}
36960 +
36961 +static USER_VPSEG *
36962 +user_find_vpseg (USER_CTXT *uctx, unsigned low, unsigned high)
36963 +{
36964 +    struct list_head *entry;
36965 +
36966 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
36967 +
36968 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
36969 +       USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link);
36970 +
36971 +       if (seg->vps_process <= low && (seg->vps_process + seg->vps_entries) > high)
36972 +           return (seg);
36973 +    }
36974 +
36975 +    return ((USER_VPSEG *) NULL);
36976 +}
36977 +
36978 +static ELAN_LOCATION 
36979 +user_process2location (USER_CTXT *uctx, USER_VPSEG *seg, unsigned process)
36980 +{
36981 +    ELAN_LOCATION location;
36982 +    int           nnodes, nctxs;
36983 +    int           nodeOff, ctxOff, vpOff;
36984 +
36985 +    location.loc_node    = ELAN_INVALID_NODE;
36986 +    location.loc_context = -1;
36987 +
36988 +    if (seg == NULL)
36989 +       seg = user_find_vpseg (uctx, process, process);
36990 +
36991 +    if (seg == NULL || (seg->vps_type != USER_VPSEG_P2P))
36992 +       return (location);
36993 +
36994 +    nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap);
36995 +    nctxs  = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap);
36996 +
36997 +    switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK)
36998 +    {
36999 +    case ELAN_CAP_TYPE_BLOCK:
37000 +       for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
37001 +       {
37002 +           for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
37003 +           {
37004 +               if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
37005 +               {
37006 +                   if (vpOff++ == (process - seg->vps_process))
37007 +                   { 
37008 +                       location.loc_node    = seg->vps_p2p_cap->cap_lownode + nodeOff;
37009 +                       location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff;
37010 +                       goto found;
37011 +                   }
37012 +               }
37013 +           }
37014 +       }
37015 +       break;
37016 +       
37017 +    case ELAN_CAP_TYPE_CYCLIC:
37018 +       for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
37019 +       {
37020 +           for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
37021 +           {
37022 +               if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
37023 +               {                                   
37024 +                   if (vpOff++ ==  (process - seg->vps_process))
37025 +                   { 
37026 +                       location.loc_node    = seg->vps_p2p_cap->cap_lownode + nodeOff;
37027 +                       location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff;
37028 +                       goto found;
37029 +                   }
37030 +               }
37031 +           }
37032 +       }
37033 +       break;  
37034 +    }
37035 +       
37036 + found:
37037 +    return (location);
37038 +}
37039 +
37040 +static unsigned 
37041 +user_location2process (USER_CTXT *uctx, ELAN_LOCATION location)
37042 +{
37043 +    unsigned int      process = ELAN_INVALID_PROCESS;
37044 +    struct list_head *entry;
37045 +    int               nnodes, nctxs;
37046 +    int               nodeOff, ctxOff, vpOff;
37047 +
37048 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37049 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
37050 +       USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link);
37051 +
37052 +       if (seg->vps_type != USER_VPSEG_P2P)
37053 +           continue;
37054 +
37055 +       if (location.loc_node >= seg->vps_p2p_cap->cap_lownode && location.loc_node <= seg->vps_p2p_cap->cap_highnode &&
37056 +           location.loc_context >= seg->vps_p2p_cap->cap_lowcontext && location.loc_context <= seg->vps_p2p_cap->cap_highcontext)
37057 +       {
37058 +           nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap);
37059 +           nctxs  = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap);
37060 +
37061 +           switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK)
37062 +           {
37063 +           case ELAN_CAP_TYPE_BLOCK:
37064 +               for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
37065 +               {
37066 +                   for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
37067 +                   {
37068 +                       if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
37069 +                       {
37070 +                           if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff &&
37071 +                               location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff)
37072 +                           {
37073 +                               process = seg->vps_process + vpOff;
37074 +                               goto found;
37075 +                           }
37076 +                           vpOff++;
37077 +                       }
37078 +                   }
37079 +               }
37080 +               break;
37081 +       
37082 +           case ELAN_CAP_TYPE_CYCLIC:
37083 +               for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
37084 +               {
37085 +                   for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
37086 +                   {
37087 +                       if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
37088 +                       {
37089 +                           if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff &&
37090 +                               location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff)
37091 +                           {
37092 +                               process = seg->vps_process + vpOff;
37093 +                               goto found;
37094 +                           }
37095 +                           vpOff++;
37096 +                       }
37097 +                   }
37098 +               }
37099 +               break;
37100 +           }
37101 +       }
37102 +    }
37103 + found:
37104 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37105 +
37106 +    return (process);
37107 +}
37108 +
37109 +static void
37110 +user_loadroute_vpseg (USER_CTXT *uctx, USER_VPSEG *seg, ELAN_POSITION *pos)
37111 +{
37112 +    ELAN4_DEV             *dev    = uctx->uctx_ctxt.ctxt_dev;
37113 +    ELAN_CAPABILITY       *cap    = seg->vps_p2p_cap;
37114 +    unsigned               nnodes = ELAN_CAP_NUM_NODES (cap);
37115 +    unsigned               nctxs  = ELAN_CAP_NUM_CONTEXTS (cap);
37116 +    E4_VirtualProcessEntry route;
37117 +    unsigned              nodeOff;
37118 +    unsigned              ctxOff;
37119 +    unsigned              vpOff;
37120 +
37121 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
37122 +    {
37123 +    case ELAN_CAP_TYPE_BLOCK:
37124 +       for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
37125 +       {
37126 +           for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
37127 +           {
37128 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
37129 +               {
37130 +                   if (seg->vps_p2p_routes != NULL)
37131 +                       route = seg->vps_p2p_routes[vpOff];
37132 +                   else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff,
37133 +                                                  cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0)
37134 +                   {
37135 +                       vpOff++;
37136 +                       continue;
37137 +                   }
37138 +
37139 +                   PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n",
37140 +                            seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff,
37141 +                            route.Values[0], route.Values[1]);
37142 +                   
37143 +                   elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route);
37144 +                                             
37145 +                   vpOff++;
37146 +               }
37147 +           }
37148 +       }
37149 +       break;
37150 +
37151 +    case ELAN_CAP_TYPE_CYCLIC:
37152 +       for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
37153 +       {
37154 +           for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
37155 +           {
37156 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
37157 +               {
37158 +                   if (seg->vps_p2p_routes != NULL)
37159 +                       route = seg->vps_p2p_routes[vpOff];
37160 +                   else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff,
37161 +                                                  cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0)
37162 +                   {
37163 +                       vpOff++;
37164 +                       continue;
37165 +                   }
37166 +
37167 +                   PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n",
37168 +                            seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff,
37169 +                            route.Values[0], route.Values[1]);
37170 +                   
37171 +                   elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route);
37172 +                                             
37173 +                   vpOff++;
37174 +               }
37175 +           }
37176 +       }
37177 +       break;
37178 +    }
37179 +}
37180 +
37181 +static int
37182 +user_loadroute_bcast (USER_CTXT *uctx, USER_VPSEG *seg)
37183 +{
37184 +    ELAN4_DEV             *dev = uctx->uctx_ctxt.ctxt_dev;
37185 +    ELAN_POSITION         *pos = &uctx->uctx_position;
37186 +    E4_VirtualProcessEntry route;
37187 +    USER_VPSEG            *aseg;
37188 +    int                    res;
37189 +    ELAN_LOCATION          low;
37190 +    ELAN_LOCATION          high;
37191 +
37192 +    if ((aseg = user_find_vpseg (uctx, seg->vps_bcast_lowvp, seg->vps_bcast_highvp)) == NULL || aseg->vps_type != USER_VPSEG_P2P)
37193 +       return (-EINVAL);
37194 +    
37195 +#ifdef use_elanmod
37196 +    if ((res = user_validate_cap (dev, aseg->vps_p2p_cap, ELAN_USER_BROADCAST)) < 0)
37197 +       return (res);
37198 +#endif
37199 +    
37200 +    low  = user_process2location (uctx, aseg, seg->vps_bcast_lowvp);
37201 +    high = user_process2location (uctx, aseg, seg->vps_bcast_highvp);
37202 +
37203 +    if (low.loc_context != high.loc_context)
37204 +       return (-EINVAL);
37205 +
37206 +    /* NOTE: if loopback can only broadcast to ourself - 
37207 +     *       if back-to-back can only broadcast to other node */
37208 +    if ((pos->pos_mode == ELAN_POS_MODE_LOOPBACK   && low.loc_node != high.loc_node && low.loc_node != pos->pos_nodeid) ||
37209 +       (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && low.loc_node != high.loc_node && low.loc_node == pos->pos_nodeid))
37210 +    {
37211 +       return (-EINVAL);
37212 +    }
37213 +    
37214 +    if ((res = elan4_generate_route (pos, &route, low.loc_context, low.loc_node, high.loc_node, user_bcast_route_options)) < 0)
37215 +       return (res);
37216 +
37217 +    PRINTF (uctx, DBG_VP, "user_loadroute_bcast: virtual process %d -> nodes %d.%d context %d [%016llx.%016llx]\n",
37218 +           seg->vps_process, low.loc_node, high.loc_node, low.loc_context, route.Values[0], route.Values[1]);
37219 +    
37220 +    elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process, &route);
37221 +    return (0);
37222 +}
37223 +
37224 +int
37225 +user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap)
37226 +{
37227 +    USER_VPSEG      *seg;
37228 +    ELAN_CAPABILITY *ncap;
37229 +    unsigned         entries;
37230 +
37231 +    if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) == 0)
37232 +       entries = bt_nbits (cap->cap_bitmap , ELAN_CAP_BITMAPSIZE(cap));
37233 +    else
37234 +       entries = ELAN_CAP_BITMAPSIZE(cap);
37235 +    
37236 +    if ((process + entries) > (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
37237 +       return (-EINVAL);
37238 +
37239 +    KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE (cap), 1);
37240 +
37241 +    if (ncap == NULL)
37242 +       return (-ENOMEM);
37243 +    
37244 +    memcpy (ncap, cap, ELAN_CAP_SIZE (cap));
37245 +
37246 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37247 +
37248 +    if ((seg = user_install_vpseg (uctx, process, entries)) == NULL)
37249 +    {
37250 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37251 +       return (-EINVAL);
37252 +    }
37253 +    
37254 +    seg->vps_type       = USER_VPSEG_P2P;
37255 +    seg->vps_p2p_cap    = ncap;
37256 +    seg->vps_p2p_routes = NULL;
37257 +
37258 +    user_loadroute_vpseg (uctx, seg, &uctx->uctx_position);
37259 +    
37260 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37261 +
37262 +    return (0);
37263 +}
37264 +
37265 +int
37266 +user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp)
37267 +{
37268 +    USER_VPSEG *seg;
37269 +    int         res;
37270 +
37271 +    if (lowvp > highvp || process >= (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
37272 +       return (-EINVAL);
37273 +
37274 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37275 +
37276 +    if ((seg = user_install_vpseg (uctx, process, 1)) == NULL)
37277 +    {
37278 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37279 +       return (-EINVAL);
37280 +    }
37281 +
37282 +    seg->vps_type         = USER_VPSEG_BCAST;
37283 +    seg->vps_bcast_lowvp  = lowvp;
37284 +    seg->vps_bcast_highvp = highvp;
37285 +
37286 +    if ((res = user_loadroute_bcast (uctx, seg)) < 0)
37287 +       user_remove_vpseg (uctx, seg);
37288 +
37289 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37290 +    return (res);
37291 +}
37292 +
37293 +int
37294 +user_removevp (USER_CTXT *uctx, unsigned process)
37295 +{
37296 +    USER_VPSEG *seg;
37297 +
37298 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37299 +    
37300 +    if (process == ELAN_INVALID_PROCESS)
37301 +       seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link);
37302 +    else
37303 +       seg = user_find_vpseg (uctx, process, process);
37304 +
37305 +    if (seg == NULL)
37306 +    {
37307 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37308 +       return (-EINVAL);
37309 +    }
37310 +
37311 +    do {
37312 +       ELAN4_DEV    *dev = uctx->uctx_ctxt.ctxt_dev;
37313 +       int i;
37314 +
37315 +       for (i = 0; i < seg->vps_entries; i++)
37316 +           elan4_invalidate_route (dev, uctx->uctx_routetable, seg->vps_process + i);
37317 +
37318 +       user_remove_vpseg (uctx, seg);
37319 +
37320 +    } while (process == ELAN_INVALID_PROCESS && (seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link)) != NULL);
37321 +
37322 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37323 +
37324 +    return (0);
37325 +}
37326 +
37327 +int
37328 +user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route)
37329 +{
37330 +    ELAN4_DEV    *dev = uctx->uctx_ctxt.ctxt_dev;
37331 +    USER_VPSEG   *seg;
37332 +    ELAN_LOCATION location;
37333 +
37334 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37335 +
37336 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
37337 +    {
37338 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37339 +       return (-EINVAL);
37340 +    }
37341 +
37342 +    /* check that the route supplied is valid and goes to the correct place */
37343 +    location = user_process2location (uctx, seg, process);
37344 +
37345 +    if (elan4_check_route (&uctx->uctx_position, location, route, 0) != 0)
37346 +    {
37347 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37348 +       return (-EINVAL);
37349 +    }
37350 +
37351 +    if (seg->vps_p2p_routes == NULL)
37352 +       KMEM_ZALLOC (seg->vps_p2p_routes, E4_VirtualProcessEntry *, sizeof (E4_VirtualProcessEntry) * seg->vps_entries, 1);
37353 +    
37354 +    if (seg->vps_p2p_routes == NULL)
37355 +    {
37356 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37357 +       return (-ENOMEM);
37358 +    }
37359 +    
37360 +    seg->vps_p2p_routes[process - seg->vps_process].Values[0] = route->Values[0];
37361 +    seg->vps_p2p_routes[process - seg->vps_process].Values[1] = ROUTE_CTXT_VALUE(location.loc_context) | (route->Values[1] & ~ROUTE_CTXT_MASK);
37362 +    
37363 +    PRINTF (uctx, DBG_ROUTE, "user_set_route: vp=%d -> %016llx%016llx\n", process, 
37364 +           seg->vps_p2p_routes[process - seg->vps_process].Values[1], seg->vps_p2p_routes[process - seg->vps_process].Values[0]);
37365 +
37366 +    elan4_write_route (dev, uctx->uctx_routetable, process, &seg->vps_p2p_routes[process - seg->vps_process]);
37367 +
37368 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37369 +
37370 +    return (0);
37371 +}
37372 +
37373 +int
37374 +user_reset_route (USER_CTXT *uctx, unsigned process)
37375 +{
37376 +    ELAN4_DEV             *dev = uctx->uctx_ctxt.ctxt_dev;
37377 +    E4_VirtualProcessEntry route;
37378 +    ELAN_LOCATION          location;
37379 +    USER_VPSEG            *seg;
37380 +
37381 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37382 +
37383 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
37384 +    {
37385 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37386 +       return (-EINVAL);
37387 +    }
37388 +
37389 +    if (seg->vps_p2p_routes != NULL)
37390 +    {
37391 +       seg->vps_p2p_routes[process - seg->vps_process].Values[0] = 0;
37392 +       seg->vps_p2p_routes[process - seg->vps_process].Values[1] = 0;
37393 +    }
37394 +    
37395 +    /* generate the default route to this location */
37396 +    location = user_process2location (uctx, seg, process);
37397 +
37398 +    PRINTF (uctx, DBG_ROUTE, "user_reset_route: vp=%d\n", process);
37399 +
37400 +    if (elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, 0) < 0)
37401 +       elan4_invalidate_route (dev, uctx->uctx_routetable, process);
37402 +    else
37403 +       elan4_write_route (dev, uctx->uctx_routetable, process, &route);
37404 +
37405 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37406 +
37407 +    return (0);
37408 +}
37409 +
37410 +int
37411 +user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route)
37412 +{
37413 +    ELAN4_DEV  *dev = uctx->uctx_ctxt.ctxt_dev;
37414 +    USER_VPSEG   *seg;
37415 +    
37416 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37417 +
37418 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
37419 +    {
37420 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37421 +       return (-EINVAL);
37422 +    }
37423 +
37424 +    elan4_read_route (dev, uctx->uctx_routetable, process, route);
37425 +
37426 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37427 +    return (0);
37428 +}
37429 +
37430 +int
37431 +user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error)
37432 +{
37433 +    ELAN4_DEV  *dev = uctx->uctx_ctxt.ctxt_dev;
37434 +    USER_VPSEG *seg;
37435 +    
37436 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37437 +
37438 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
37439 +    {
37440 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37441 +       return (-EINVAL);
37442 +    }
37443 +
37444 +    elan4_read_route (dev, uctx->uctx_routetable, process, route);
37445 +
37446 +    *error = elan4_check_route (&uctx->uctx_position, user_process2location (uctx, seg, process), route, 0);
37447 +
37448 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37449 +    return (0);
37450 +}
37451 +
37452 +int
37453 +user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg)
37454 +{
37455 +    USER_VPSEG   *seg;
37456 +    ELAN_LOCATION location;
37457 +    unsigned long flags;
37458 +    int                  res, found = 0;
37459 +    struct list_head *el;
37460 +
37461 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37462 +    /* determine the location of the virtual process */
37463 +    if ((seg = user_find_vpseg (uctx, vp, vp)) == NULL)
37464 +    {
37465 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d has no vpseg\n", vp);
37466 +
37467 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37468 +       return -EINVAL;
37469 +    }
37470 +
37471 +    switch (seg->vps_type)
37472 +    {
37473 +    case USER_VPSEG_P2P:
37474 +       location = user_process2location (uctx, seg, vp);
37475 +       break;
37476 +
37477 +    case USER_VPSEG_BCAST:
37478 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d is a bcast vp\n", vp);
37479 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37480 +       return -EINVAL;
37481 +    }
37482 +
37483 +    /*  check that we're attached to the network context */
37484 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37485 +    list_for_each (el , &uctx->uctx_cent_list) {
37486 +       USER_CTXT_ENTRY *cent = list_entry (el, USER_CTXT_ENTRY, cent_link);
37487 +       
37488 +       if (cent->cent_cap->cap_mycontext == nctx)
37489 +           found++;
37490 +    }
37491 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37492 +    
37493 +    if (! found)
37494 +    {
37495 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: nctx=%d not attached\n", nctx);
37496 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37497 +
37498 +       return -EINVAL;
37499 +    }
37500 +
37501 +    /* Update the fields which the user might have "faked" */
37502 +    msg->msg_context            = location.loc_context;
37503 +    msg->msg_sender.loc_node    = uctx->uctx_position.pos_nodeid;
37504 +    msg->msg_sender.loc_context = nctx;
37505 +
37506 +    res = elan4_neterr_sendmsg (uctx->uctx_ctxt.ctxt_dev, location.loc_node, retries, msg);
37507 +
37508 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37509 +
37510 +    return (res);
37511 +}
37512 +
37513 +
37514 +static int
37515 +user_resolvevp (USER_CTXT *uctx, unsigned process)
37516 +{
37517 +    int                    res = 0;
37518 +    USER_VPSEG            *seg;
37519 +    ELAN_LOCATION          location;
37520 +    E4_VirtualProcessEntry route;
37521 +
37522 +    PRINTF1 (uctx, DBG_VP, "user_resolvevp: process=%d\n", process);
37523 +
37524 +    kmutex_lock (&uctx->uctx_vpseg_lock);
37525 +
37526 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL)
37527 +    {
37528 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
37529 +       return (-EINVAL);
37530 +    }
37531 +
37532 +    switch (seg->vps_type)
37533 +    {
37534 +    case USER_VPSEG_P2P:
37535 +#ifdef use_elanmod
37536 +       if ((res = user_validate_cap (uctx, seg->vps_p2p_cap, ELAN_USER_P2P)) != 0)
37537 +           break;
37538 +#endif
37539 +
37540 +       location = user_process2location (uctx, seg, process);
37541 +
37542 +       PRINTF (uctx, DBG_VP, "user_resolvevp: vp=%d -> node=%d ctx=%d\n", process, location.loc_node, location.loc_context);
37543 +       
37544 +       if (seg->vps_p2p_routes != NULL && seg->vps_p2p_routes[process - seg->vps_process].Values[0] != 0)
37545 +           route = seg->vps_p2p_routes[process - seg->vps_process];
37546 +       else if ((res = elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, user_p2p_route_options)) < 0)
37547 +           break;;
37548 +       
37549 +       elan4_write_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, process, &route);
37550 +       break;
37551 +
37552 +    case USER_VPSEG_BCAST:
37553 +       res = user_loadroute_bcast (uctx, seg);
37554 +       break;
37555 +       
37556 +    default:
37557 +       res = -EINVAL;
37558 +       break;
37559 +    }
37560 +
37561 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
37562 +    return (res);
37563 +}
37564 +
37565 +static void
37566 +user_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
37567 +{
37568 +    USER_CTXT    *uctx = (USER_CTXT *) ctxt;
37569 +    unsigned long flags;
37570 +
37571 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37572 +
37573 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_eprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
37574 +    {
37575 +       PRINTF (uctx, DBG_EPROC, "user_eproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
37576 +
37577 +       uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR;
37578 +    }
37579 +    else
37580 +    {
37581 +       elan4_extract_eproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps), 0);
37582 +       
37583 +       DBGCMD (ctxt, DBG_EPROC, elan4_display_eproc_trap (ctxt, DBG_EPROC, "user_eproc_trap", RING_QUEUE_BACK(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps)));
37584 +       
37585 +       if (RING_QUEUE_ADD (uctx->uctx_eprocTrapQ))
37586 +           user_start_stopping (uctx, UCTX_EPROC_QUEUE_FULL);
37587 +    }
37588 +
37589 +    user_signal_trap (uctx);
37590 +
37591 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37592 +}
37593 +
37594 +static void
37595 +user_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
37596 +{
37597 +    USER_CTXT        *uctx = (USER_CTXT *) ctxt;
37598 +    USER_CQ          *ucq  = NULL;
37599 +    struct list_head *entry;
37600 +    unsigned long     flags;
37601 +
37602 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37603 +    
37604 +    list_for_each (entry, &uctx->uctx_cqlist) {
37605 +       ucq = list_entry (entry, USER_CQ, ucq_link);
37606 +
37607 +       if (elan4_cq2num(ucq->ucq_cq) == cqnum)
37608 +           break;
37609 +    }
37610 +
37611 +    ASSERT (ucq != NULL);
37612 +
37613 +    if (ucq->ucq_state != UCQ_RUNNING && CPROC_TrapType (status) == CommandProcInserterError)
37614 +    {
37615 +       PRINTF (ctxt, DBG_TRAP, "user_cproc_trap CommandProcInserterError\n");
37616 +       ucq->ucq_errored++;
37617 +    }
37618 +    else
37619 +    {
37620 +       ASSERT (ucq->ucq_state == UCQ_RUNNING);
37621 +
37622 +       elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &ucq->ucq_trap, cqnum);
37623 +
37624 +       DBGCMD (ctxt, DBG_CPROC, elan4_display_cproc_trap (ctxt, DBG_CPROC, "user_cproc_trap", &ucq->ucq_trap));
37625 +
37626 +       ucq->ucq_state = UCQ_TRAPPED;
37627 +       
37628 +    }
37629 +
37630 +    user_signal_trap (uctx);
37631 +       
37632 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37633 +}
37634 +
37635 +static void
37636 +user_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
37637 +{
37638 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
37639 +    unsigned long flags;
37640 +
37641 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37642 +
37643 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_dprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
37644 +    {
37645 +       PRINTF (uctx, DBG_DPROC, "user_dproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
37646 +
37647 +       uctx->uctx_status |= UCTX_DPROC_QUEUE_ERROR;
37648 +    }
37649 +    else
37650 +    {
37651 +       ELAN4_DPROC_TRAP *trap = RING_QUEUE_BACK (uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps);
37652 +       
37653 +       elan4_extract_dproc_trap (ctxt->ctxt_dev, status, trap, unit);
37654 +
37655 +       DBGCMD (ctxt, DBG_DPROC, elan4_display_dproc_trap (ctxt, DBG_DPROC, "user_dproc_trap", trap));
37656 +
37657 +       if (!DPROC_PrefetcherFault (status) && DPROC_TrapType(status) == DmaProcFailCountError && !RING_QUEUE_FULL (uctx->uctx_dmaQ))
37658 +       {
37659 +           trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count);
37660 +
37661 +           *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = trap->tr_desc;
37662 +    
37663 +           (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
37664 +       }
37665 +       else
37666 +       {
37667 +           if (RING_QUEUE_ADD (uctx->uctx_dprocTrapQ))
37668 +               user_start_stopping (uctx, UCTX_DPROC_QUEUE_FULL);
37669 +       }
37670 +    }
37671 +
37672 +    user_signal_trap (uctx);
37673 +
37674 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37675 +}
37676 +
37677 +static void
37678 +user_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
37679 +{
37680 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
37681 +    unsigned long flags;
37682 +
37683 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37684 +
37685 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_tprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
37686 +    {
37687 +       PRINTF (uctx, DBG_TPROC, "user_tproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
37688 +
37689 +       uctx->uctx_status |= UCTX_TPROC_QUEUE_ERROR;
37690 +    }
37691 +    else
37692 +    {
37693 +       elan4_extract_tproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps));
37694 +       
37695 +       DBGCMD (ctxt, DBG_TPROC, elan4_display_tproc_trap (ctxt, DBG_TPROC, "user_tproc_trap", RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps)));
37696 +       
37697 +       if (RING_QUEUE_ADD (uctx->uctx_tprocTrapQ))
37698 +           user_start_stopping (uctx, UCTX_TPROC_QUEUE_FULL);
37699 +    }
37700 +    user_signal_trap (uctx);
37701 +
37702 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37703 +}
37704 +
37705 +static void
37706 +user_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
37707 +{
37708 +    USER_CTXT       *uctx  = (USER_CTXT *) ctxt;
37709 +    USER_IPROC_TRAP *utrap = &uctx->uctx_iprocTrap[unit & 1];
37710 +    unsigned long    flags;
37711 +
37712 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37713 +
37714 +    ASSERT (utrap->ut_state == UTS_IPROC_RUNNING);
37715 +
37716 +    elan4_extract_iproc_trap (ctxt->ctxt_dev, status, &utrap->ut_trap, unit);
37717 +    DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "user_iproc_trap", &utrap->ut_trap));
37718 +
37719 +    utrap->ut_state = UTS_IPROC_TRAPPED;
37720 +
37721 +    user_start_nacking (uctx, unit ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
37722 +
37723 +    user_signal_trap (uctx);
37724 +
37725 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37726 +}
37727 +
37728 +static void
37729 +user_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
37730 +{
37731 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
37732 +    unsigned long flags;
37733 +
37734 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37735 +
37736 +    PRINTF1 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx\n", cookie);
37737 +
37738 +    switch (cookie)
37739 +    {
37740 +    case ELAN4_INT_COOKIE_DDCQ:
37741 +       uctx->uctx_ddcq_intr--;
37742 +
37743 +       user_signal_trap (uctx);
37744 +       break;
37745 +
37746 +    default:
37747 +       if (uctx->uctx_intcookie_table == NULL || intcookie_fire (uctx->uctx_intcookie_table, cookie) != 0)
37748 +       {
37749 +           PRINTF2 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx %s\n", cookie, uctx->uctx_intcookie_table ? "not found" : "no table");
37750 +           uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR;
37751 +           user_signal_trap (uctx);
37752 +       }
37753 +       break;
37754 +    }
37755 +
37756 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37757 +}
37758 +
37759 +static void
37760 +user_neterrmsg (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg)
37761 +{
37762 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
37763 +    unsigned long flags;
37764 +
37765 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37766 +    
37767 +    if (! RING_QUEUE_FULL (uctx->uctx_msgQ))
37768 +    {
37769 +       memcpy (RING_QUEUE_BACK (uctx->uctx_msgQ, uctx->uctx_msgs), msg, sizeof (ELAN4_NETERR_MSG));
37770 +
37771 +       (void) RING_QUEUE_ADD (uctx->uctx_msgQ);
37772 +    
37773 +       user_signal_trap (uctx);
37774 +    }
37775 +    
37776 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37777 +}
37778 +
37779 +ELAN4_TRAP_OPS user_trap_ops = 
37780 +{
37781 +    user_eproc_trap,
37782 +    user_cproc_trap,
37783 +    user_dproc_trap,
37784 +    user_tproc_trap,
37785 +    user_iproc_trap,
37786 +    user_interrupt,
37787 +    user_neterrmsg,
37788 +};
37789 +
37790 +static int
37791 +deliver_trap (ELAN4_USER_TRAP *utrapp, int type, unsigned proc, void *trap, ...)
37792 +{
37793 +    register int i, len;
37794 +    va_list ap;
37795 +
37796 +    PRINTF (NULL, DBG_TRAP, "deliver_trap: type=%d proc=%d\n", type, proc);
37797 +
37798 +    switch (proc)
37799 +    {
37800 +    case UTS_CPROC:      len = sizeof (ELAN4_CPROC_TRAP); break;
37801 +    case UTS_DPROC:      len = sizeof (ELAN4_DPROC_TRAP); break;
37802 +    case UTS_EPROC:      len = sizeof (ELAN4_EPROC_TRAP); break;
37803 +    case UTS_IPROC:      len = sizeof (ELAN4_IPROC_TRAP); break;
37804 +    case UTS_TPROC:      len = sizeof (ELAN4_TPROC_TRAP); break;
37805 +    case UTS_NETERR_MSG: len = sizeof (ELAN4_NETERR_MSG); break;
37806 +    default:             len = 0; break;
37807 +    }
37808 +
37809 +    if (put_user (type, &utrapp->ut_type) || put_user (proc, &utrapp->ut_proc) || copy_to_user (&utrapp->ut_trap, trap, len))
37810 +       return (UTS_EFAULT);
37811 +
37812 +    va_start (ap, trap);
37813 +    for (i = 0; i < sizeof (utrapp->ut_args)/sizeof (utrapp->ut_args[0]); i++)
37814 +       if (put_user (va_arg (ap, unsigned long), &utrapp->ut_args[i]))
37815 +           return (UTS_EFAULT);
37816 +    va_end (ap);
37817 +
37818 +    return (type);
37819 +}
37820 +
37821 +static int
37822 +user_pagefault (USER_CTXT *uctx, E4_FaultSave *farea)
37823 +{
37824 +    E4_Addr      addr = farea->FaultAddress;
37825 +    E4_uint32    fsr  = FaultSaveFSR(farea->FSRAndFaultContext);
37826 +    FAULT_SAVE  *entry;
37827 +    FAULT_SAVE **predp;
37828 +    int count;
37829 +
37830 +    PRINTF2 (uctx, DBG_FAULT, "user_pagefault: addr=%llx fsr %x\n", (unsigned long long) addr, fsr);
37831 +    
37832 +    if ((fsr & FSR_FaultForBadData) != 0)                      /* Memory ECC error during walk */
37833 +    {
37834 +       PRINTF0 (uctx, DBG_FAULT, "user_pagefault: ECC error during walk\n");
37835 +       return (-EFAULT);
37836 +    }
37837 +    
37838 +    if ((fsr & FSR_FaultForMaxChainCount) != 0)                        /* Have walked a chain of 1024 items */
37839 +    {
37840 +       PRINTF0 (uctx, DBG_FAULT, "user_pagefault: pte chain too long\n");
37841 +       return (-EFAULT);
37842 +    }
37843 +    
37844 +    if (uctx->uctx_num_fault_save)
37845 +    {
37846 +        spin_lock (&uctx->uctx_fault_lock);
37847 +        for( predp = &uctx->uctx_fault_list; (entry = *predp)->next != NULL; predp = &entry->next)
37848 +        {
37849 +           if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1)))
37850 +               break;
37851 +        }
37852 +
37853 +        *predp = entry->next;
37854 +        entry->next = uctx->uctx_fault_list;
37855 +        uctx->uctx_fault_list = entry;
37856 +
37857 +        if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1)))
37858 +        {
37859 +           if ((entry->count <<= 1) > max_fault_pages)
37860 +               entry->count = max_fault_pages;
37861 +        }
37862 +        else
37863 +           entry->count = min_fault_pages;
37864 +
37865 +        entry->addr = (addr & ~((E4_Addr) PAGE_SIZE-1))+(entry->count * PAGE_SIZE);
37866 +        count = entry->count;
37867 +        spin_unlock (&uctx->uctx_fault_lock);
37868 +
37869 +        if (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), count * PAGESIZE, fsr) == 0)
37870 +       return 0;
37871 +
37872 +       /* else pre-faulting has failed, try just this page */
37873 +    }
37874 +
37875 +    return (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), PAGE_SIZE, fsr));
37876 +
37877 +}
37878 +
37879 +static int
37880 +queue_dma_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_DMA *dma)
37881 +{
37882 +    unsigned long flags;
37883 +
37884 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37885 +
37886 +    if (RING_QUEUE_FULL (uctx->uctx_dmaQ))
37887 +    {
37888 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37889 +       
37890 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_DPROC_QUEUE_OVERFLOW));
37891 +    }
37892 +
37893 +    *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma;
37894 +    
37895 +    (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
37896 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37897 +
37898 +    return (UTS_FINISHED);
37899 +}
37900 +
37901 +static int
37902 +queue_thread_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_ThreadRegs *regs)
37903 +{
37904 +    unsigned long flags;
37905 +    
37906 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
37907 +
37908 +    if (RING_QUEUE_FULL (uctx->uctx_threadQ))
37909 +    {
37910 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37911 +
37912 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_TPROC_QUEUE_OVERFLOW));
37913 +    }
37914 +
37915 +    *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs;
37916 +    (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
37917 +    
37918 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
37919 +
37920 +    return (UTS_FINISHED);
37921 +}
37922 +
37923 +static int
37924 +fixup_eproc_trap (USER_CTXT *uctx, ELAN4_EPROC_TRAP *trap, int waitevent)
37925 +{
37926 +    E4_FaultSave *farea = &trap->tr_faultarea;
37927 +    E4_uint32     fsr   = FaultSaveFSR(farea->FSRAndFaultContext);
37928 +    E4_uint64     CountAndType;
37929 +    E4_uint64     CopySource;
37930 +    E4_uint64     CopyDest;
37931 +
37932 +    /*
37933 +     * Event processor can trap as follows :
37934 +     *   1) Event location read                (faddr == event location & Event Permission)
37935 +     *   2) Event location write       (faddr == event location & Event Permission)
37936 +     *   3) Copy Source read           Read Access
37937 +     *   4) Copy/Write dest write      other
37938 +     *
37939 +     *  NOTE - it is possible to see both 3) and 4) together - but only with physical errors.
37940 +     */
37941 +    if (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite)
37942 +    {
37943 +       /* 
37944 +        * We complete the copy/write by issuing a waitevent 0 of the approriate type.
37945 +        *   - NB mask off bottom bits of EventAddr in case of partial setevent
37946 +        */
37947 +       E4_uint64 EventAddr = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1);
37948 +
37949 +       if (! user_ddcq_check (uctx, 4))
37950 +           return (0);
37951 +       
37952 +       if ((trap->tr_event.ev_CountAndType & E4_EVENT_COPY_TYPE_MASK) == E4_EVENT_WRITE)
37953 +       {
37954 +           /* case 4) faulted on write word to destination */
37955 +
37956 +           CountAndType = trap->tr_event.ev_CountAndType & E4_EVENT_TYPE_MASK;
37957 +           
37958 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: write Event=%llx CountAndType=%llx\n", EventAddr, CountAndType);
37959 +           PRINTF (uctx, DBG_TRAP, "                  WritePtr=%llx WriteValue=%llx\n", 
37960 +                   trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue);
37961 +
37962 +           user_ddcq_waitevent (uctx, EventAddr, CountAndType, trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue);
37963 +       }
37964 +       else
37965 +       {
37966 +           /* case 3) or case 4) faulted on read/write of copy */
37967 +           if (AT_Perm (fsr) == AT_PermLocalDataRead)
37968 +           {
37969 +               CountAndType = (trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | EPROC_CopySize(trap->tr_status);
37970 +               CopySource   = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE;
37971 +               CopyDest     = trap->tr_event.ev_CopyDest;
37972 +           }
37973 +           else
37974 +           {
37975 +               CountAndType = ((trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | 
37976 +                               ((EPROC_CopySize(trap->tr_status) + EVENT_COPY_NDWORDS) & E4_EVENT_COPY_SIZE_MASK));
37977 +               CopySource   = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE;
37978 +               CopyDest     = trap->tr_event.ev_CopyDest - EVENT_COPY_BLOCK_SIZE;
37979 +           }
37980 +           
37981 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: copy Event=%llx CountAndType=%llx\n", EventAddr, CountAndType);
37982 +           PRINTF (uctx, DBG_TRAP, "                  CopySource=%llx CopyDest=%llx\n", CopySource, CopyDest);
37983 +
37984 +           user_ddcq_waitevent (uctx, EventAddr, CountAndType, CopySource, CopyDest);
37985 +       }
37986 +    }
37987 +    else
37988 +    {
37989 +       E4_uint64 EventAddr  = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1);
37990 +       E4_uint32 EventCount = trap->tr_eventaddr & (E4_EVENT_ALIGN-1);
37991 +
37992 +       /* case 1) or 2) - just reissue the event */
37993 +       if (! waitevent)
37994 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: setevent EventAddr=%llx EventCount=%x\n", EventAddr, EventCount);
37995 +       else
37996 +       {
37997 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: waitevent Event=%llx CountAndType=%llx\n", EventAddr, trap->tr_event.ev_CountAndType);
37998 +           PRINTF (uctx, DBG_TRAP, "                  Param[0]=%llx Param[1]=%llx\n",
37999 +                    trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
38000 +       }
38001 +
38002 +       if (! user_ddcq_check (uctx, waitevent ? 4 : 2))
38003 +           return (0);
38004 +       
38005 +       if (waitevent)
38006 +           user_ddcq_waitevent (uctx, EventAddr, trap->tr_event.ev_CountAndType, 
38007 +                                 trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
38008 +       else
38009 +           user_ddcq_seteventn (uctx, EventAddr, EventCount);
38010 +    }
38011 +
38012 +    return (1);
38013 +}
38014 +
38015 +
38016 +static int
38017 +resolve_eproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_EPROC_TRAP *trap)
38018 +{
38019 +    switch (EPROC_TrapType (trap->tr_status))
38020 +    {
38021 +    case EventProcNoFault:
38022 +       PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcNoFault\n");
38023 +
38024 +       return (UTS_FINISHED);
38025 +       
38026 +    case EventProcAddressAlignment:
38027 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_EPROC, trap));
38028 +
38029 +    case EventProcMemoryFault:
38030 +       PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcMemoryFault @ %llx\n", trap->tr_faultarea.FaultAddress);
38031 +
38032 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
38033 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_EPROC, trap));
38034 +
38035 +       return (UTS_FINISHED);
38036 +       
38037 +    case EventProcCountWrapError:
38038 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_EPROC, trap));
38039 +
38040 +    default:
38041 +       printk ("resolve_eproc_trap: bad trap type %d\n", EPROC_TrapType (trap->tr_status));
38042 +       BUG();
38043 +    }
38044 +
38045 +    return (UTS_FINISHED);
38046 +}
38047 +
38048 +static int
38049 +resolve_cproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, USER_CQ *ucq)
38050 +{
38051 +    ELAN4_DEV        *dev    = uctx->uctx_ctxt.ctxt_dev;
38052 +    ELAN4_CPROC_TRAP *trap   = &ucq->ucq_trap;
38053 +    E4_uint64         command;
38054 +    int               res;
38055 +    int               chan;
38056 +
38057 +    ELAN_LOCATION location;
38058 +    int vp, node;
38059 +
38060 +    PRINTF2 (uctx, DBG_CPROC, "resolve_cproc_trap: cq %p is trapped - Status %lx\n", ucq, trap->tr_status);
38061 +    
38062 +    switch (CPROC_TrapType (trap->tr_status))
38063 +    {
38064 +    case CommandProcDmaQueueOverflow:
38065 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcDmaQueueOverflow\n");
38066 +       /*
38067 +        * XXXX: should wait for the queue to become emptier if we're 
38068 +        *       responsible for it being very full
38069 +        */
38070 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38071 +       break;
38072 +
38073 +    case CommandProcInterruptQueueOverflow:
38074 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcInterruptQueueOverflow\n");
38075 +       /*
38076 +        * XXXX: should wait for the queue to become emptier if we're
38077 +        *       responsible for it being very full
38078 +        */
38079 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38080 +       break;
38081 +       
38082 +    case CommandProcWaitTrap:
38083 +       PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcWaitTrap\n");
38084 +       
38085 +       if ((res = resolve_eproc_trap (uctx, utrapp, &trap->tr_eventtrap)) != UTS_FINISHED)
38086 +       {
38087 +           ucq->ucq_state = UCQ_STOPPED;
38088 +
38089 +           return (res);
38090 +       }
38091 +       
38092 +       if (fixup_eproc_trap (uctx, &trap->tr_eventtrap, 1) == 0)
38093 +           return UTS_RESCHEDULE;
38094 +
38095 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38096 +       break;
38097 +       
38098 +    case CommandProcMemoryFault:
38099 +       PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcMemoryFault at %llx\n", trap->tr_faultarea.FaultAddress);
38100 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
38101 +       {
38102 +           ucq->ucq_state = UCQ_STOPPED;
38103 +
38104 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
38105 +       }
38106 +       
38107 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38108 +       break;
38109 +       
38110 +    case CommandProcRouteFetchFault:
38111 +       command = elan4_trapped_open_command (dev, ucq->ucq_cq);
38112 +       
38113 +       PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcRouteFetchFault to vp %d\n", (int) (command >> 32));
38114 +       
38115 +       if (user_resolvevp (uctx, (unsigned) (command >> 32)) != 0)
38116 +       {
38117 +           ucq->ucq_state = UCQ_STOPPED;
38118 +
38119 +           return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq), (long) (command >> 32)));
38120 +       }
38121 +
38122 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38123 +       break;
38124 +       
38125 +    case CommandProcFailCountZero:
38126 +       PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcFailCountZero - reset failcount\n");
38127 +       
38128 +       /* Update CPROC timeout route statistics */
38129 +       for (chan = 0; chan <= 1; chan++)
38130 +       {
38131 +           /* Was there a timeout on this channel ? */
38132 +           if (PackValue(trap->tr_qdesc.CQ_AckBuffers, chan) == PackTimeout)
38133 +           {
38134 +               /* Find the last open command for that channel to extract the relevant vp */
38135 +               if ((vp = cproc_open_extract_vp(uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq, chan)) != -1)
38136 +               {
38137 +                   E4_VirtualProcessEntry route;
38138 +                   
38139 +                   kmutex_lock (&uctx->uctx_vpseg_lock);
38140 +                   location = user_process2location(uctx, NULL, vp);
38141 +                   elan4_read_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, vp, &route);
38142 +                   kmutex_unlock (&uctx->uctx_vpseg_lock);
38143 +                   node = location.loc_node;
38144 +                   
38145 +                   kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
38146 +                   
38147 +                   if ((node >= 0) && (node < uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes))
38148 +                   {
38149 +                       uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout[node]++;
38150 +                       
38151 +                       elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout_routes,
38152 +                                           &route, uctx->uctx_ctxt.ctxt_dev);
38153 +                   }
38154 +                   
38155 +                   kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
38156 +               }
38157 +           }
38158 +       }
38159 +           
38160 +       /* NOTE - we must not modify the ChannelNotCompleted bits - so modify */
38161 +       /*        the restart count with a part-word store */
38162 +       elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, user_cproc_retry_count);
38163 +
38164 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38165 +       break;
38166 +
38167 +    case CommandProcAddressAlignment:
38168 +       ucq->ucq_state = UCQ_STOPPED;
38169 +
38170 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
38171 +
38172 +    case CommandProcPermissionTrap:
38173 +    {
38174 +       sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc));
38175 +       E4_uint64   control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
38176 +
38177 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcPermissionTrap - %s\n",
38178 +               (control & CQ_PermissionMask) != ucq->ucq_cq->cq_perm ? "resume from stop" : "permission denied");
38179 +       
38180 +       if ((control & CQ_PermissionMask) == ucq->ucq_cq->cq_perm)
38181 +           return (deliver_trap (utrapp, UTS_PERMISSION_DENIED, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
38182 +
38183 +       elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, 0);
38184 +
38185 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
38186 +       break;
38187 +    }
38188 +    
38189 +    case CommandProcBadData:
38190 +       ucq->ucq_state = UCQ_STOPPED;
38191 +
38192 +       return (deliver_trap (utrapp, UTS_INVALID_COMMAND, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
38193 +
38194 +    default:
38195 +       ucq->ucq_state = UCQ_STOPPED;
38196 +
38197 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
38198 +    }
38199 +
38200 +    return (UTS_FINISHED);
38201 +}
38202 +
38203 +static int
38204 +resolve_dproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_DPROC_TRAP *trap)
38205 +{
38206 +    ELAN_LOCATION location;
38207 +    int node;
38208 +    E4_VirtualProcessEntry route;
38209 +
38210 +    if (DPROC_PrefetcherFault (trap->tr_status))
38211 +    {
38212 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: PrefetcherFault at %llx\n", trap->tr_prefetchFault.FaultAddress);
38213 +
38214 +       if (user_pagefault (uctx, &trap->tr_prefetchFault) != 0)
38215 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_DPROC, trap));
38216 +       
38217 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc));
38218 +    }
38219 +    
38220 +    switch (DPROC_TrapType (trap->tr_status))
38221 +    {
38222 +    case DmaProcRouteFetchFault:
38223 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcRouteFetchFault vp %d\n", trap->tr_desc.dma_vproc);
38224 +
38225 +       if (user_resolvevp (uctx, trap->tr_desc.dma_vproc) != 0)
38226 +           return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_DPROC, trap, trap->tr_desc.dma_vproc));
38227 +       
38228 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* immediate */));
38229 +       
38230 +    case DmaProcFailCountError:
38231 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcFailCountError - vp %d cookie %llx\n",
38232 +               trap->tr_desc.dma_vproc, trap->tr_desc.dma_cookie);
38233 +
38234 +       trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count);
38235 +
38236 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
38237 +
38238 +    case DmaProcPacketAckError:
38239 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcPacketAckError - %d%s\n", DPROC_PacketAckValue (trap->tr_status), 
38240 +               DPROC_PacketTimeout (trap->tr_status) ? " timeout" : "");
38241 +
38242 +       kmutex_lock (&uctx->uctx_vpseg_lock);
38243 +       location = user_process2location(uctx, NULL, trap->tr_desc.dma_vproc);
38244 +       elan4_read_route(uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, trap->tr_desc.dma_vproc, &route);
38245 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
38246 +       node = location.loc_node;
38247 +
38248 +       /* Update dproc route timeout statistics */
38249 +       if ((node >= 0) && (node < uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes))
38250 +       {
38251 +           kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
38252 +           
38253 +           if ((route.Values[0] != 0) || (route.Values[1] != 0))
38254 +           {
38255 +               if (DPROC_PacketTimeout (trap->tr_status))
38256 +               {
38257 +                   uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout[node]++;
38258 +                   elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout_routes,
38259 +                                       &route, uctx->uctx_ctxt.ctxt_dev);
38260 +               }
38261 +               else
38262 +               {
38263 +                   uctx->uctx_ctxt.ctxt_dev->dev_ack_errors[node]++;
38264 +                   elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_ack_error_routes,
38265 +                                       &route, uctx->uctx_ctxt.ctxt_dev);
38266 +               }
38267 +           }
38268 +           
38269 +           kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
38270 +       }
38271 +
38272 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
38273 +
38274 +    case DmaProcQueueOverflow:
38275 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcQueueOverflow\n");
38276 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
38277 +       
38278 +    case DmaProcRunQueueReadFault:
38279 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_DPROC, trap));
38280 +       
38281 +    default:
38282 +       printk ("resolve_dproc_trap: unknown trap type : %d\n", DPROC_TrapType(trap->tr_status));
38283 +       BUG();
38284 +    }
38285 +    return UTS_FINISHED;
38286 +}
38287 +
38288 +int
38289 +resolve_tproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_TPROC_TRAP *trap)
38290 +{
38291 +    PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trap state = %lx\n", trap->tr_state);
38292 +
38293 +    if (trap->tr_state & TS_TrapForTooManyInstructions)
38294 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_TPROC, trap));
38295 +    
38296 +    if (trap->tr_state & TS_Unimplemented)
38297 +       return (deliver_trap (utrapp, UTS_UNIMP_INSTR, UTS_TPROC, trap));
38298 +    
38299 +    if (trap->tr_state & TS_DataAlignmentError)
38300 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_TPROC, trap));
38301 +    
38302 +    if ((trap->tr_state & TS_InstAccessException) && user_pagefault (uctx, &trap->tr_instFault) != 0)
38303 +       return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap));
38304 +    
38305 +    if ((trap->tr_state & TS_DataAccessException) && user_pagefault (uctx, &trap->tr_dataFault) != 0)
38306 +       return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap));
38307 +    
38308 +    /* If we're restarting from trap - then just need to re-issue it */
38309 +    if (trap->tr_pc == uctx->uctx_trestart_addr || (trap->tr_state & TS_TrappedFlag))
38310 +    {
38311 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trapped in trap code PC=%llx SP=%llx\n", trap->tr_pc, trap->tr_regs[1]);
38312 +
38313 +       trap->tr_regs[0] = uctx->uctx_trestart_addr;
38314 +    }
38315 +    else
38316 +    {
38317 +       E4_uint64 *sp = (E4_uint64 *) user_elan2main (uctx, trap->tr_regs[1]);
38318 +       int        i, reload;
38319 +
38320 +       /* need to store the register on the stack see */
38321 +       /*  lib_tproc_trampoline_elan4_thread.S for stack layout */
38322 +#define TS_STACK_OFF(REG)      ((((REG)&7)) - (((REG)>>3)*8) - 8)
38323 +       for (reload = 0, i = 0; i < 64; i++)
38324 +       {
38325 +           if (trap->tr_dirty & ((E4_uint64) 1 << i))
38326 +           {
38327 +               PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: %%r%d  [%016llx] -> %p\n", i, trap->tr_regs[i], &sp[TS_STACK_OFF(i)]);
38328 +
38329 +               sulonglong ((u64 *) &sp[TS_STACK_OFF(i)], trap->tr_regs[i]);
38330 +               
38331 +               reload |= (1 << (i >> 3));
38332 +           }
38333 +       }
38334 +#undef TS_STACK_OFF
38335 +
38336 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: pc %llx npc %llx\n", trap->tr_pc, trap->tr_npc);
38337 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: CC %x reload %x\n", (int) (trap->tr_state >> TS_XCCshift), reload);
38338 +
38339 +       trap->tr_regs[0] = uctx->uctx_trestart_addr;
38340 +       trap->tr_regs[2] = trap->tr_pc;
38341 +       trap->tr_regs[3] = trap->tr_npc;
38342 +       trap->tr_regs[4] = (trap->tr_state >> TS_XCCshift) & TS_XCCmask;
38343 +       trap->tr_regs[5] = reload;
38344 +    }
38345 +
38346 +    return (queue_thread_for_retry (uctx, utrapp, (E4_ThreadRegs *) trap->tr_regs));
38347 +}
38348 +
38349 +static int
38350 +resolve_iproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int channel)
38351 +{
38352 +    USER_IPROC_TRAP  *utrap = &uctx->uctx_iprocTrap[channel];
38353 +    ELAN4_IPROC_TRAP *trap  = &utrap->ut_trap;
38354 +    unsigned long     flags;
38355 +
38356 +    elan4_inspect_iproc_trap (trap);
38357 +
38358 +    if (trap->tr_flags & TR_FLAG_TOOMANY_TRANS)
38359 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
38360 +
38361 +    ASSERT (trap->tr_trappedTrans >= 0 && trap->tr_trappedTrans < trap->tr_numTransactions);
38362 +
38363 +    switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType))
38364 +    {
38365 +    case InputMemoryFault:
38366 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
38367 +       {
38368 +           utrap->ut_state = UTS_IPROC_STOPPED;
38369 +           
38370 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_IPROC, trap, channel));
38371 +       }
38372 +       break;
38373 +
38374 +    case InputDmaQueueOverflow:
38375 +    case InputEventEngineTrapped:
38376 +       /* nothing to do for these 2 - restarting will simulate the transactions */
38377 +       break;
38378 +
38379 +    case InputEopErrorOnWaitForEop:
38380 +    case InputEopErrorTrap:
38381 +       break;
38382 +
38383 +    case InputCrcErrorAfterPAckOk:
38384 +       PRINTF (DBG_DEVICE, DBG_IPROC, "InputCrcErrorAfterPAckOk: flags %x\n", trap->tr_flags);
38385 +
38386 +       ASSERT ((trap->tr_flags & TR_FLAG_ACK_SENT) && ((trap->tr_flags & (TR_FLAG_DMA_PACKET|TR_FLAG_BAD_TRANS)) ||
38387 +                                                       ((trap->tr_flags & TR_FLAG_EOP_ERROR) && trap->tr_identifyTrans == TR_TRANS_INVALID)));
38388 +       break;
38389 +
38390 +    case InputDiscardAfterAckOk:
38391 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
38392 +
38393 +    case InputAddressAlignment:
38394 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_IPROC, trap, channel));
38395 +
38396 +    case InputInvalidTransType:
38397 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
38398 +
38399 +    default:
38400 +       printk ("resolve_iproc_trap: unknown trap type %d\n", IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType));
38401 +       BUG();
38402 +       /* NOTREACHED */
38403 +    }
38404 +
38405 +    if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD))
38406 +    {
38407 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38408 +
38409 +       utrap->ut_state = UTS_IPROC_RUNNING;
38410 +
38411 +       user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
38412 +       
38413 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38414 +    }
38415 +    else if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID)))
38416 +    {
38417 +       /* 
38418 +        * TR_FLAG_DMA_PACKET   means a DMA packet has faulted.
38419 +        *
38420 +        * TR_FLAG_BAD_TRANS    means we have a transaction with a bad CRC after the transaction
38421 +        *                      which sent the ack - this implies it's an overlapped ack DMA packet
38422 +        *
38423 +        * TR_FLAG_EOP_ERROR    means we've received an EOP reset - if we hadn't seen an identify
38424 +        *                      transaction then it's a DMA packet.
38425 +        *
38426 +        * To ensure that the DMA processor works correctly the next packet must be NACKed to 
38427 +        * cause it to resend this one.
38428 +        */
38429 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: %s during DMA packet\n",
38430 +               (trap->tr_flags & TR_FLAG_BAD_TRANS) ? "BadTransaction" : (trap->tr_flags & TR_FLAG_EOP_ERROR) ? "EopError" : "trap");
38431 +
38432 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38433 +
38434 +       if (trap->tr_flags & TR_FLAG_DMA_PACKET)
38435 +       {
38436 +           if (! (trap->tr_flags & TR_FLAG_BAD_TRANS))
38437 +               utrap->ut_state = UTS_IPROC_EXECUTE_PACKET;
38438 +           else
38439 +           {
38440 +               kcondvar_t waithere;
38441 +
38442 +               /* We must ensure that the next packet is always nacked, so
38443 +                * we wait here for an output timeout before dropping the 
38444 +                * context filter - we just pause here for 4 mS */
38445 +               kcondvar_init (&waithere);
38446 +               kcondvar_timedwait (&waithere, &uctx->uctx_spinlock, &flags, lbolt + (HZ/250) + 1);;
38447 +               kcondvar_destroy (&waithere);
38448 +
38449 +               utrap->ut_state = UTS_IPROC_RUNNING;
38450 +               
38451 +               user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
38452 +           }
38453 +       }
38454 +       else
38455 +       {
38456 +           utrap->ut_state = UTS_IPROC_RUNNING;
38457 +
38458 +           user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
38459 +       }
38460 +
38461 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38462 +    }
38463 +    else if (trap->tr_flags & TR_FLAG_EOP_ERROR)
38464 +    {
38465 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: EopError with identify\n");
38466 +
38467 +       utrap->ut_state = UTS_IPROC_NETWORK_ERROR;
38468 +    }
38469 +    else
38470 +    {
38471 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: execute packet\n");
38472 +
38473 +       utrap->ut_state = UTS_IPROC_EXECUTE_PACKET;
38474 +    }
38475 +
38476 +    return UTS_FINISHED;
38477 +}
38478 +
38479 +
38480 +static int
38481 +resolve_cproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38482 +{
38483 +    struct list_head *entry;
38484 +    int res = UTS_FINISHED;
38485 +
38486 +    kmutex_lock (&uctx->uctx_cqlock);
38487 +    list_for_each (entry, &uctx->uctx_cqlist) {
38488 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
38489 +
38490 +       if (ucq->ucq_state == UCQ_TRAPPED)
38491 +       {
38492 +           res = resolve_cproc_trap (uctx, utrapp, ucq);
38493 +
38494 +           if (res != UTS_FINISHED)
38495 +               break;
38496 +       }
38497 +
38498 +       if (ucq->ucq_errored)
38499 +       {
38500 +           ucq->ucq_errored = 0;
38501 +           res = deliver_trap (utrapp, UTS_CPROC_ERROR, UTS_CPROC, &ucq->ucq_trap, elan4_cq2idx(ucq->ucq_cq));
38502 +           break;
38503 +       }
38504 +    }
38505 +    kmutex_unlock (&uctx->uctx_cqlock);
38506 +
38507 +    return (res);
38508 +}
38509 +
38510 +static int
38511 +resolve_eproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38512 +{
38513 +    unsigned long flags;
38514 +    int res;
38515 +
38516 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38517 +    while (! RING_QUEUE_EMPTY (uctx->uctx_eprocTrapQ))
38518 +    {
38519 +       ELAN4_EPROC_TRAP trap = *RING_QUEUE_FRONT (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps);
38520 +
38521 +       (void) RING_QUEUE_REMOVE (uctx->uctx_eprocTrapQ);
38522 +
38523 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38524 +
38525 +       if ((res = resolve_eproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
38526 +           return (res);
38527 +
38528 +       if (fixup_eproc_trap (uctx, &trap, 0) == 0)
38529 +       {
38530 +           PRINTF (uctx, DBG_EPROC, "resolve_eproc_trap: could not fixup eproc trap - requeue it\n");
38531 +
38532 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38533 +           if (RING_QUEUE_REALLY_FULL(uctx->uctx_eprocTrapQ))
38534 +               uctx->uctx_status |= UCTX_EPROC_QUEUE_OVERFLOW;
38535 +           else
38536 +           {
38537 +               *RING_QUEUE_FRONT(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps) = trap;
38538 +           
38539 +               (void) RING_QUEUE_ADD_FRONT(uctx->uctx_eprocTrapQ);
38540 +           }
38541 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38542 +
38543 +           return UTS_RESCHEDULE;
38544 +       }
38545 +       
38546 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38547 +    }
38548 +
38549 +    if (uctx->uctx_status & UCTX_EPROC_QUEUE_FULL)
38550 +       user_stop_stopping (uctx, UCTX_EPROC_QUEUE_FULL);
38551 +
38552 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38553 +    return (UTS_FINISHED);
38554 +}
38555 +           
38556 +static int
38557 +resolve_dproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38558 +{
38559 +    unsigned long flags;
38560 +    int res;
38561 +    
38562 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38563 +    while (! RING_QUEUE_EMPTY (uctx->uctx_dprocTrapQ))
38564 +    {
38565 +       ELAN4_DPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps);
38566 +       
38567 +       (void) RING_QUEUE_REMOVE (uctx->uctx_dprocTrapQ);
38568 +
38569 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38570 +
38571 +       if ((res = resolve_dproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
38572 +           return (res);
38573 +       
38574 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38575 +    }
38576 +
38577 +    if (uctx->uctx_status & UCTX_DPROC_QUEUE_FULL)
38578 +       user_stop_stopping (uctx, UCTX_DPROC_QUEUE_FULL);
38579 +    
38580 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38581 +    return (UTS_FINISHED);
38582 +}
38583 +
38584 +static int
38585 +resolve_tproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38586 +{
38587 +    unsigned long flags;
38588 +    int res;
38589 +    
38590 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38591 +    while (! RING_QUEUE_EMPTY (uctx->uctx_tprocTrapQ))
38592 +    {
38593 +       ELAN4_TPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps);
38594 +       
38595 +       (void) RING_QUEUE_REMOVE (uctx->uctx_tprocTrapQ);
38596 +
38597 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38598 +
38599 +       if ((res = resolve_tproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
38600 +           return (res);
38601 +       
38602 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38603 +    }
38604 +
38605 +    if (uctx->uctx_status & UCTX_TPROC_QUEUE_FULL)
38606 +       user_stop_stopping (uctx, UCTX_TPROC_QUEUE_FULL);
38607 +
38608 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38609 +    return (UTS_FINISHED);
38610 +}
38611 +
38612 +static int
38613 +resolve_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38614 +{
38615 +    unsigned long flags;
38616 +    int i, res;
38617 +
38618 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38619 +    for (i = 0; i < 2; i++)
38620 +       if (uctx->uctx_iprocTrap[i].ut_state == UTS_IPROC_TRAPPED)
38621 +       {
38622 +           uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_RESOLVING;
38623 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
38624 +           
38625 +           if ((res = resolve_iproc_trap (uctx, utrapp, i)) != UTS_FINISHED)
38626 +               return (res);
38627 +           
38628 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38629 +       }
38630 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38631 +    
38632 +    return (UTS_FINISHED);
38633 +}
38634 +
38635 +static int
38636 +resolve_all_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38637 +{
38638 +    int res;
38639 +
38640 +    if ((res = resolve_iproc_traps (uctx, utrapp)) != UTS_FINISHED ||
38641 +       (res = resolve_cproc_traps (uctx, utrapp)) != UTS_FINISHED ||
38642 +       (res = resolve_eproc_traps (uctx, utrapp)) != UTS_FINISHED ||
38643 +       (res = resolve_dproc_traps (uctx, utrapp)) != UTS_FINISHED ||
38644 +       (res = resolve_tproc_traps (uctx, utrapp)) != UTS_FINISHED)
38645 +       return (res);
38646 +
38647 +    if (uctx->uctx_status & UCTX_OVERFLOW_REASONS)
38648 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, uctx->uctx_status));
38649 +
38650 +    if (uctx->uctx_status & UCTX_ERROR_REASONS)
38651 +       return (deliver_trap (utrapp, UTS_QUEUE_ERROR, UTS_NOPROC, NULL, uctx->uctx_status));
38652 +
38653 +    return (UTS_FINISHED);
38654 +}
38655 +
38656 +static int
38657 +execute_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38658 +{
38659 +    unsigned long flags;
38660 +    int i;
38661 +
38662 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38663 +    for (i = 0; i < 2; i++)
38664 +       switch (uctx->uctx_iprocTrap[i].ut_state)
38665 +       {
38666 +       case UTS_IPROC_EXECUTE_PACKET:
38667 +           uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_EXECUTING;
38668 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
38669 +           
38670 +           return (deliver_trap (utrapp, UTS_EXECUTE_PACKET, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i));
38671 +
38672 +       case UTS_IPROC_NETWORK_ERROR:
38673 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
38674 +           
38675 +           return (deliver_trap (utrapp, UTS_NETWORK_ERROR_TRAP, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i));
38676 +       }
38677 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38678 +    
38679 +    return (UTS_FINISHED);
38680 +}
38681 +
38682 +static int
38683 +progress_neterr (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
38684 +{
38685 +    unsigned long flags;
38686 +
38687 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38688 +    if (! RING_QUEUE_EMPTY (uctx->uctx_msgQ))
38689 +    {
38690 +       ELAN4_NETERR_MSG msg = *RING_QUEUE_FRONT (uctx->uctx_msgQ, uctx->uctx_msgs);
38691 +       
38692 +       (void) RING_QUEUE_REMOVE (uctx->uctx_msgQ);
38693 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38694 +       
38695 +       return deliver_trap (utrapp, UTS_NETWORK_ERROR_MSG, UTS_NETERR_MSG, &msg, user_location2process (uctx, msg.msg_sender));
38696 +    }
38697 +    
38698 +    if (uctx->uctx_status & UCTX_NETERR_TIMER)
38699 +    {
38700 +       uctx->uctx_status &= ~UCTX_NETERR_TIMER;
38701 +
38702 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38703 +       
38704 +       return deliver_trap (utrapp, UTS_NETWORK_ERROR_TIMER, UTS_NOPROC, NULL);
38705 +    }
38706 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38707 +    
38708 +    return (UTS_FINISHED);
38709 +}
38710 +
38711 +static void
38712 +restart_command_queues (USER_CTXT *uctx)
38713 +{
38714 +    struct list_head *entry;
38715 +
38716 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
38717 +    
38718 +    list_for_each (entry, &uctx->uctx_cqlist) {
38719 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
38720 +       
38721 +       if (ucq->ucq_state == UCQ_NEEDS_RESTART)
38722 +       {
38723 +           ucq->ucq_state = UCQ_RUNNING;
38724 +           
38725 +           elan4_restartcq (uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq);
38726 +       }
38727 +    }
38728 +}
38729 +
38730 +static int
38731 +restart_dmas (USER_CTXT *uctx)
38732 +{
38733 +    PRINTF (uctx, DBG_TRAP, "restart_dmas: back=%d front=%d\n", uctx->uctx_dmaQ.q_back, uctx->uctx_dmaQ.q_front);
38734 +
38735 +    while (! RING_QUEUE_EMPTY (uctx->uctx_dmaQ))
38736 +    {
38737 +       if (! user_ddcq_check (uctx, 7))
38738 +           return (0);
38739 +
38740 +       user_ddcq_run_dma (uctx, RING_QUEUE_FRONT(uctx->uctx_dmaQ, uctx->uctx_dmas));
38741 +       
38742 +       (void) RING_QUEUE_REMOVE (uctx->uctx_dmaQ);
38743 +    }
38744 +
38745 +    return (1);
38746 +}
38747 +
38748 +static int
38749 +restart_threads (USER_CTXT *uctx)
38750 +{
38751 +    PRINTF (uctx, DBG_TRAP, "restart_threads: back=%d front=%d\n", uctx->uctx_threadQ.q_back, uctx->uctx_threadQ.q_front);
38752 +
38753 +    while (! RING_QUEUE_EMPTY (uctx->uctx_threadQ))
38754 +    {
38755 +       if (! user_ddcq_check (uctx, 7))
38756 +           return (0);
38757 +
38758 +       user_ddcq_run_thread (uctx, RING_QUEUE_FRONT(uctx->uctx_threadQ, uctx->uctx_threads));
38759 +       
38760 +       (void) RING_QUEUE_REMOVE (uctx->uctx_threadQ);
38761 +    }
38762 +
38763 +    return (1);
38764 +}
38765 +
38766 +int
38767 +user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr)
38768 +{
38769 +    PRINTF2 (uctx, DBG_RESUME, "user_resume_eproc_trap: addr=%llx -> %s\n", addr, user_ddcq_check(uctx, 2) ? "success" : "EAGAIN");
38770 +
38771 +    if (! user_ddcq_check (uctx, 2))
38772 +       return (-EAGAIN);
38773 +
38774 +    user_ddcq_setevent (uctx, addr);
38775 +
38776 +    return (0);
38777 +}
38778 +
38779 +int
38780 +user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx)
38781 +{
38782 +    struct list_head *entry;
38783 +    unsigned long flags;
38784 +
38785 +    PRINTF1 (uctx, DBG_RESUME, "user_resume_cproc_trap: indx=%d\n", indx);
38786 +
38787 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38788 +
38789 +    list_for_each (entry, &uctx->uctx_cqlist) {
38790 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
38791 +       
38792 +       if (elan4_cq2idx(ucq->ucq_cq) == indx && ucq->ucq_state == UCQ_STOPPED && !(ucq->ucq_flags & UCQ_SYSTEM))
38793 +       {
38794 +           ucq->ucq_state = UCQ_NEEDS_RESTART;
38795 +           
38796 +           user_signal_trap (uctx);
38797 +
38798 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38799 +           return (0);
38800 +       }
38801 +    }
38802 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38803 +
38804 +    return (-EINVAL);
38805 +}
38806 +
38807 +int
38808 +user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma)
38809 +{
38810 +    unsigned long flags;
38811 +    int res = 0;
38812 +
38813 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38814 +    if (RING_QUEUE_FULL (uctx->uctx_dmaQ))
38815 +       res = -ENOMEM;
38816 +    else
38817 +    {
38818 +       *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma;
38819 +       (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
38820 +
38821 +       user_signal_trap (uctx);
38822 +    }
38823 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38824 +    return (res);
38825 +}
38826 +
38827 +int
38828 +user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs)
38829 +{
38830 +    unsigned long flags;
38831 +    int res = 0;
38832 +
38833 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38834 +    if (RING_QUEUE_FULL (uctx->uctx_threadQ))
38835 +       res = -ENOMEM;
38836 +    else
38837 +    {
38838 +       *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs;
38839 +       (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
38840 +
38841 +       user_signal_trap (uctx);
38842 +    }
38843 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38844 +    return (res);
38845 +}
38846 +
38847 +int
38848 +user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans,
38849 +                       E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap)
38850 +{
38851 +    unsigned long flags;
38852 +    int res = 0;
38853 +
38854 +    if (channel >= 2)
38855 +       return (-EINVAL);
38856 +
38857 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38858 +    if (uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_STOPPED &&
38859 +       uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_EXECUTING &&
38860 +       uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_NETWORK_ERROR)
38861 +       res = -EINVAL;
38862 +    else
38863 +    {
38864 +       ELAN4_IPROC_TRAP *trap = &uctx->uctx_iprocTrap[channel].ut_trap;
38865 +
38866 +       if (trans < trap->tr_numTransactions)
38867 +       {
38868 +           PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> execute\n", trans);
38869 +
38870 +           uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_EXECUTE_PACKET;
38871 +           trap->tr_trappedTrans                  = trans;
38872 +           trap->tr_transactions[trans]           = *hdrp;
38873 +           trap->tr_dataBuffers[trans]            = *datap;
38874 +       }
38875 +       else
38876 +       {
38877 +           PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> running\n", trans);
38878 +
38879 +           uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_RUNNING;
38880 +       
38881 +           user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
38882 +       }
38883 +    }
38884 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38885 +    
38886 +    return (res);
38887 +}
38888 +
38889 +int
38890 +__categorise_command (E4_uint64 command, int *cmdSize)
38891 +{
38892 +    switch (command & 0x3)
38893 +    {
38894 +    case RUN_THREAD_CMD: *cmdSize = 7; break;
38895 +       
38896 +    default:
38897 +       switch (command & 0x7)
38898 +       {
38899 +       case WRITE_DWORD_CMD: *cmdSize = 2; break;
38900 +       case ADD_DWORD_CMD:   *cmdSize = 2; break;
38901 +           
38902 +       default:
38903 +           switch (command & 0xF)
38904 +           {
38905 +           case OPEN_STEN_PKT_CMD:
38906 +               *cmdSize = 1;
38907 +               return 1;
38908 +               
38909 +           case COPY64_CMD:    *cmdSize = 2; break;
38910 +           case GUARD_CMD:     *cmdSize = 1; break;
38911 +           case INTERRUPT_CMD: *cmdSize = 1; break;
38912 +           case RUN_DMA_CMD:   *cmdSize = 7; break;
38913 +               
38914 +           default:
38915 +               switch (command & 0x1f)
38916 +               {
38917 +               case SEND_TRANS_CMD:
38918 +                   *cmdSize = 2 + (((command >> 16) & TR_SIZE_MASK) >> TR_SIZE_SHIFT);
38919 +                   return 2;
38920 +                   
38921 +               case SET_EVENT_CMD:    *cmdSize = 1; break;
38922 +               case SET_EVENTN_CMD:   *cmdSize = 2; break;
38923 +               case WAIT_EVENT_CMD:   *cmdSize = 4; break;
38924 +
38925 +               default:
38926 +                   switch (command & 0x3f)
38927 +                   {
38928 +                   case NOP_CMD:            *cmdSize = 1; break;
38929 +                   case MAKE_EXT_CLEAN_CMD: *cmdSize = 1; break;
38930 +                   default:
38931 +                       return 3;
38932 +                   }
38933 +                   break;
38934 +               }
38935 +           }
38936 +       }
38937 +    }
38938 +
38939 +    return 0;
38940 +}
38941 +
38942 +int
38943 +__whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize)
38944 +{
38945 +    /* Move onto next command */
38946 +    while (cmdSize-- && (*commandPtr) != insertPtr)
38947 +       *commandPtr = ((*commandPtr) & ~(cqSize-1)) | (((*commandPtr) + sizeof (E4_uint64)) & (cqSize-1));
38948 +
38949 +    return cmdSize == -1;
38950 +}
38951 +
38952 +int
38953 +user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop)
38954 +{
38955 +    ELAN4_DEV        *dev   = uctx->uctx_ctxt.ctxt_dev;
38956 +    int                      found = 0;
38957 +    struct list_head *el;
38958 +
38959 +    user_swapout (uctx, UCTX_NETERR_FIXUP);
38960 +
38961 +    kmutex_lock (&uctx->uctx_cqlock);
38962 +    list_for_each (el, &uctx->uctx_cqlist) {
38963 +       USER_CQ *ucq = list_entry (el, USER_CQ, ucq_link);
38964 +       
38965 +       if ((ucq->ucq_cq->cq_perm & CQ_STENEnableBit) != 0)
38966 +       {
38967 +           sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc));
38968 +           E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
38969 +           sdramaddr_t   insertPtr    = (queuePtrs & CQ_PtrMask);
38970 +           sdramaddr_t   commandPtr   = CQ_CompletedPtr (queuePtrs);
38971 +           unsigned int  cqSize       = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
38972 +           E4_uint64     openCommand  = 0;
38973 +
38974 +           if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue))
38975 +           {
38976 +               E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue));
38977 +               
38978 +               for (; (oooMask & 1) != 0; oooMask >>= 1)
38979 +                   insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1));
38980 +           }
38981 +
38982 +           while (commandPtr != insertPtr)
38983 +           {
38984 +               E4_uint64    command = elan4_sdram_readq (dev, commandPtr);
38985 +               sdramaddr_t  identifyPtr;
38986 +               unsigned int cmdSize;
38987 +               
38988 +               switch (__categorise_command (command, &cmdSize))
38989 +               {
38990 +               case 0:
38991 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
38992 +                   break;
38993 +                   
38994 +               case 1:
38995 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d OPEN %llx\n", elan4_cq2num (ucq->ucq_cq), command);
38996 +                   
38997 +                   if ((command >> 32) == vp)
38998 +                       openCommand = command;
38999 +
39000 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
39001 +                   break;
39002 +                   
39003 +               case 2:
39004 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d SENDTRANS %llx\n", elan4_cq2num (ucq->ucq_cq), command);
39005 +                   
39006 +                   if (openCommand == 0)
39007 +                       (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
39008 +                   else
39009 +                   {
39010 +                       switch ((command >> 16) & (TR_OPCODE_MASK | TR_SIZE_MASK))
39011 +                       {
39012 +                       case TR_IDENTIFY  & (TR_OPCODE_MASK | TR_SIZE_MASK):
39013 +                       case TR_REMOTEDMA & (TR_OPCODE_MASK | TR_SIZE_MASK):
39014 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_IDENTIFY/TR_REMOTEDMA\n");
39015 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + sizeof (E4_uint64)) & (cqSize-1));
39016 +                           break;
39017 +                           
39018 +                       case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK):
39019 +                       case TR_INPUT_Q_COMMIT    & (TR_OPCODE_MASK | TR_SIZE_MASK):
39020 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_SETEVENT_IDENTIFY/TR_INPUT_Q_COMMIT\n");
39021 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 2*sizeof (E4_uint64)) & (cqSize-1));
39022 +                           break;
39023 +                           
39024 +                       case TR_ADDWORD & (TR_OPCODE_MASK | TR_SIZE_MASK):
39025 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_ADDWORD\n");
39026 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 3*sizeof (E4_uint64)) & (cqSize-1));
39027 +                           break;
39028 +                           
39029 +                       case TR_TESTANDWRITE & (TR_OPCODE_MASK | TR_SIZE_MASK):
39030 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_TESTANDWRITE\n");
39031 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 4*sizeof (E4_uint64)) & (cqSize-1));
39032 +                           break;
39033 +                           
39034 +                       default:
39035 +                           identifyPtr = 0;
39036 +                       }
39037 +                       
39038 +                       if (! __whole_command (&commandPtr, insertPtr, cqSize, cmdSize))
39039 +                       {
39040 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: not whole command\n");
39041 +                           openCommand = 0;
39042 +                       }
39043 +
39044 +                       else if (identifyPtr)
39045 +                       {
39046 +                           E4_uint64 tcookie = elan4_sdram_readq (dev, identifyPtr);
39047 +                           
39048 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie=%llx [%llx]\n", tcookie, cookie);
39049 +                           
39050 +                           if (tcookie == cookie)
39051 +                           {
39052 +                               unsigned int vchan = (openCommand >> 4) & 0x1f;
39053 +                               
39054 +                               PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie matches - vchan=%d\n", vchan);
39055 +                               
39056 +                               if (! waitforeop)
39057 +                               {
39058 +                                   /* Alter the CQ_AckBuffer for this channel to indicate an
39059 +                                    * ack was received */
39060 +                                   E4_uint64 value  = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers));
39061 +                                   E4_uint64 nvalue = ((value & ~((E4_uint64)0xf << ((vchan & 0xf) << 2))) |
39062 +                                                       ((E4_uint64) PackOk << ((vchan & 0xf) << 2)));
39063 +                                   
39064 +                                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: CQ_AckBuffers %llx -> %llx\n", value, nvalue);
39065 +                                   
39066 +                                   elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), nvalue);
39067 +                                   pioflush_sdram (dev);
39068 +                               }
39069 +                               
39070 +                               found++;
39071 +                           }
39072 +                           openCommand = 0;
39073 +                       }
39074 +                       
39075 +                       if ((command >> 16) & TR_LAST_AND_SEND_ACK)
39076 +                           openCommand = 0;
39077 +                   }
39078 +                   break;
39079 +                   
39080 +               case 3:
39081 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: invalid command %llx\n", command);
39082 +                   kmutex_unlock (&uctx->uctx_cqlock);
39083 +                   return -EINVAL;
39084 +               }
39085 +               
39086 +           }
39087 +       }
39088 +    }
39089 +    kmutex_unlock (&uctx->uctx_cqlock);
39090 +
39091 +    user_swapin (uctx, UCTX_NETERR_FIXUP);
39092 +
39093 +    return found;
39094 +}
39095 +
39096 +int
39097 +user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop)
39098 +{
39099 +    unsigned long flags;
39100 +    int found = 0;
39101 +    int idx;
39102 +
39103 +    user_swapout (uctx, UCTX_NETERR_FIXUP);
39104 +
39105 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39106 +    RING_QUEUE_ITERATE (uctx->uctx_dmaQ, idx) {
39107 +       E4_DMA *dma = &uctx->uctx_dmas[idx];
39108 +
39109 +       if (dma->dma_vproc == vp && dma->dma_cookie == cookie)
39110 +       {
39111 +           PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness");
39112 +
39113 +           if (! waitforeop) 
39114 +           {
39115 +               dma->dma_dstEvent = 0;
39116 +               dma->dma_typeSize = DMA_ShMemWrite | DMA_Context (dma->dma_typeSize);
39117 +           }
39118 +           found++;
39119 +       }
39120 +    }
39121 +
39122 +    RING_QUEUE_ITERATE (uctx->uctx_dprocTrapQ, idx) {
39123 +       ELAN4_DPROC_TRAP *trap = &uctx->uctx_dprocTraps[idx];
39124 +
39125 +       if (trap->tr_desc.dma_vproc == vp && trap->tr_desc.dma_cookie == cookie)
39126 +       {
39127 +           PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaTrapQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness");
39128 +
39129 +           if (! waitforeop) 
39130 +           {
39131 +               trap->tr_desc.dma_dstEvent = 0;
39132 +               trap->tr_desc.dma_typeSize = DMA_ShMemWrite | DMA_Context (trap->tr_desc.dma_typeSize);
39133 +           }
39134 +           found++;
39135 +       }
39136 +    }
39137 +
39138 +    /* The device driver command queue should be empty at this point ! */
39139 +    if (user_ddcq_flush (uctx) == 0)
39140 +       found = -EAGAIN;
39141 +
39142 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39143 +
39144 +    /* The device driver command queue should be empty at this point ! */
39145 +    if (user_ddcq_flush (uctx) == 0)
39146 +       found = -EAGAIN;
39147 +    
39148 +    user_swapin (uctx, UCTX_NETERR_FIXUP);
39149 +
39150 +    return found;
39151 +}
39152 +
39153 +int
39154 +user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks)
39155 +{
39156 +    unsigned long entered = jiffies;
39157 +    unsigned int  need_reenter = 0;
39158 +    unsigned long flags;
39159 +    int           res;
39160 +
39161 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39162 +
39163 +    PRINTF1 (uctx, DBG_TRAP, "user_trap_handler: entered state=%d\n", uctx->uctx_trap_state);
39164 +
39165 +    uctx->uctx_trap_count++;
39166 +    
39167 +    for (;;)
39168 +    {
39169 +       if (uctx->uctx_status & UCTX_SWAPPED_REASONS)
39170 +       {
39171 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting on swapped reasons\n");
39172 +           
39173 +           res = UTS_FINISHED;
39174 +           goto no_more_to_do;
39175 +       }
39176 +
39177 +       if ((long) (jiffies - entered) > HZ)
39178 +       {
39179 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting for reschedule\n");
39180 +           res = UTS_RESCHEDULE;
39181 +           goto no_more_to_do;
39182 +       }
39183 +       
39184 +       switch (uctx->uctx_trap_state)
39185 +       {
39186 +       case UCTX_TRAP_ACTIVE:
39187 +           uctx->uctx_trap_state = UCTX_TRAP_SLEEPING;
39188 +           
39189 +           if (nticks == 0 || need_reenter || kcondvar_timedwaitsig (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags, lbolt + nticks) != CV_RET_NORMAL)
39190 +           {
39191 +               PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting by kcondvar_timedwaitsig\n");
39192 +
39193 +               res = UTS_FINISHED;
39194 +               goto no_more_to_do;
39195 +           }
39196 +
39197 +           /* Have slept above, so resample entered */
39198 +           entered = jiffies;
39199 +           
39200 +           uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
39201 +           continue;
39202 +
39203 +       case UCTX_TRAP_IDLE:
39204 +       case UCTX_TRAP_SIGNALLED:
39205 +           uctx->uctx_trap_state = UCTX_TRAP_ACTIVE;
39206 +           break;
39207 +       }
39208 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39209 +
39210 +       PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: resolve traps - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status);
39211 +
39212 +       switch ((res = resolve_all_traps (uctx, utrapp)))
39213 +       {
39214 +       case UTS_FINISHED:
39215 +           break;
39216 +           
39217 +       case UTS_RESCHEDULE:
39218 +           need_reenter++;
39219 +           break;
39220 +
39221 +       default:
39222 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39223 +           goto no_more_to_do;
39224 +       }
39225 +
39226 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39227 +       if (! user_ddcq_flush (uctx))
39228 +       {
39229 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq not flushed - re-enter\n");
39230 +           need_reenter++;
39231 +           continue;
39232 +       }
39233 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39234 +
39235 +       if ((res = progress_neterr (uctx, utrapp)) != UTS_FINISHED)
39236 +       {
39237 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39238 +           goto no_more_to_do;
39239 +       }
39240 +
39241 +       if ((res = execute_iproc_traps (uctx, utrapp)) != UTS_FINISHED)
39242 +       {
39243 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39244 +           goto no_more_to_do;
39245 +       }
39246 +
39247 +       PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: restart items - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status);
39248 +
39249 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39250 +       if (UCTX_RUNNABLE (uctx))
39251 +       {
39252 +           restart_command_queues (uctx);
39253 +
39254 +           if (! restart_threads (uctx) || ! restart_dmas (uctx))
39255 +           {
39256 +               PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq full - re-enter\n");
39257 +               need_reenter++;
39258 +           }
39259 +       }
39260 +    }
39261 + no_more_to_do:
39262 +    uctx->uctx_trap_state = UCTX_TRAP_IDLE;
39263 +
39264 +    /*
39265 +     * Always ensure that the command queue is flushed with a flow control
39266 +     * write, so that on the next trap we (hopefully) find it empty and so
39267 +     * can immediately restart the context.   Also if we need to be re-enter
39268 +     * the trap handler and don't have an interrupt outstanding, then issue
39269 +     * one now.
39270 +     */
39271 +    user_ddcq_flush (uctx);
39272 +    if (need_reenter && uctx->uctx_ddcq_intr == 0)
39273 +    {
39274 +       uctx->uctx_ddcq_intr++;
39275 +       user_ddcq_intr (uctx);
39276 +    }
39277 +
39278 +    if (--uctx->uctx_trap_count == 0 && (uctx->uctx_status & UCTX_SWAPPING))
39279 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
39280 +
39281 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39282 +
39283 +    PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: finished state=%d res=%d\n", uctx->uctx_trap_state, res);
39284 +
39285 +    return (res == UTS_EFAULT ? -EFAULT : 0);
39286 +}
39287 +
39288 +USER_CQ *
39289 +user_alloccq (USER_CTXT *uctx, unsigned cqsize, unsigned perm, unsigned uflags)
39290 +{
39291 +    USER_CQ      *ucq;
39292 +    unsigned long flags;
39293 +
39294 +    KMEM_ZALLOC (ucq, USER_CQ *, sizeof (USER_CQ), 1);
39295 +
39296 +    if (ucq == (USER_CQ *) NULL)
39297 +       return ERR_PTR(-ENOMEM);
39298 +    
39299 +    /* NOTE - do not allow the user to create high-priority queues as we only flush through the low-priority run queues */
39300 +    if ((ucq->ucq_cq = elan4_alloccq (&uctx->uctx_ctxt, cqsize, perm, (uflags & UCQ_REORDER) ? CQ_Reorder : 0)) == NULL)
39301 +    {
39302 +       KMEM_FREE (ucq, sizeof (USER_CQ));
39303 +       
39304 +       PRINTF2 (uctx, DBG_CQ, "user_alloccq: failed elan4_allocq cqsize %d uflags %x\n", cqsize, uflags);
39305 +
39306 +       return ERR_PTR(-ENOMEM);
39307 +    }
39308 +    
39309 +    atomic_set (&ucq->ucq_ref, 1);
39310 +
39311 +    ucq->ucq_state = UCQ_RUNNING;
39312 +    ucq->ucq_flags = uflags;
39313 +    
39314 +    PRINTF3 (uctx, DBG_CQ, "user_alloccq: ucq=%p idx=%d cqnum=%d\n", ucq, elan4_cq2idx (ucq->ucq_cq), elan4_cq2num(ucq->ucq_cq));
39315 +
39316 +    /* chain it onto the context */
39317 +    kmutex_lock (&uctx->uctx_cqlock);
39318 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39319 +    list_add (&ucq->ucq_link, &uctx->uctx_cqlist);
39320 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39321 +    kmutex_unlock (&uctx->uctx_cqlock);
39322 +
39323 +    return (ucq);
39324 +}
39325 +
39326 +USER_CQ *
39327 +user_findcq (USER_CTXT *uctx, unsigned idx)
39328 +{
39329 +    struct list_head *entry;
39330 +    unsigned long flags;
39331 +
39332 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39333 +    list_for_each (entry, &uctx->uctx_cqlist) {
39334 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
39335 +
39336 +       if (elan4_cq2idx(ucq->ucq_cq) == idx)
39337 +       {
39338 +           atomic_inc (&ucq->ucq_ref);
39339 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39340 +           return (ucq);
39341 +       }
39342 +    }
39343 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39344 +
39345 +    return (NULL);
39346 +}
39347 +
39348 +void
39349 +user_dropcq (USER_CTXT *uctx, USER_CQ *ucq)
39350 +{
39351 +    unsigned long flags;
39352 +
39353 +    PRINTF2 (uctx, DBG_CQ, "user_dropcq: ucq=%p ref=%d\n", ucq, atomic_read (&ucq->ucq_ref));
39354 +
39355 +    kmutex_lock (&uctx->uctx_cqlock);
39356 +    if (! atomic_dec_and_test (&ucq->ucq_ref))
39357 +    {
39358 +       kmutex_unlock (&uctx->uctx_cqlock);
39359 +       return;
39360 +    }
39361 +
39362 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39363 +    list_del (&ucq->ucq_link);
39364 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39365 +
39366 +    kmutex_unlock (&uctx->uctx_cqlock);
39367 +
39368 +    elan4_freecq (&uctx->uctx_ctxt, ucq->ucq_cq);
39369 +
39370 +    KMEM_FREE (ucq, sizeof (USER_CQ));
39371 +}
39372 +
39373 +int
39374 +user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, 
39375 +                       unsigned ntproc_traps, unsigned nthreads, unsigned ndmas)
39376 +{
39377 +    ELAN4_DPROC_TRAP *dprocs;
39378 +    ELAN4_EPROC_TRAP *eprocs;
39379 +    ELAN4_TPROC_TRAP *tprocs;
39380 +    E4_DMA           *dmas;
39381 +    E4_ThreadRegs    *threads;
39382 +    ELAN4_NETERR_MSG *msgs;
39383 +    unsigned long     flags;
39384 +
39385 +    int nmsgs = NETERR_MSGS;
39386 +
39387 +    /* bounds check the values that have been passed in */
39388 +    if (ndproc_traps < 2 || ndproc_traps > 10000 ||
39389 +       ntproc_traps < 1 || ntproc_traps > 100   ||
39390 +       neproc_traps < 6 || neproc_traps > 10000 ||
39391 +       nthreads     < 2 || nthreads     > 10000 ||
39392 +       ndmas        < 2 || ndmas        > 10000)
39393 +       return -EINVAL;
39394 +
39395 +    if (uctx->uctx_dmas != NULL)
39396 +       return -EBUSY;
39397 +
39398 +    KMEM_ZALLOC (dprocs, ELAN4_DPROC_TRAP *, ndproc_traps * sizeof (ELAN4_DPROC_TRAP), 1);
39399 +    KMEM_ZALLOC (eprocs, ELAN4_EPROC_TRAP *, neproc_traps * sizeof (ELAN4_EPROC_TRAP), 1);
39400 +    KMEM_ZALLOC (tprocs, ELAN4_TPROC_TRAP *, ntproc_traps * sizeof (ELAN4_TPROC_TRAP), 1);
39401 +    KMEM_ZALLOC (threads, E4_ThreadRegs *, nthreads * sizeof (E4_ThreadRegs), 1);
39402 +    KMEM_ZALLOC (dmas, E4_DMA *, ndmas * sizeof (E4_DMA), 1);
39403 +    KMEM_ZALLOC (msgs, ELAN4_NETERR_MSG *, nmsgs * sizeof (ELAN4_NETERR_MSG), 1);
39404 +
39405 +    if (dprocs == NULL || eprocs == NULL || tprocs == NULL || dmas == NULL || threads == NULL || msgs == NULL)
39406 +    {
39407 +       if (dprocs != NULL) KMEM_FREE (dprocs, ndproc_traps * sizeof (ELAN4_DPROC_TRAP));
39408 +       if (eprocs != NULL) KMEM_FREE (eprocs, neproc_traps * sizeof (ELAN4_EPROC_TRAP));
39409 +       if (tprocs != NULL) KMEM_FREE (tprocs, ntproc_traps * sizeof (ELAN4_TPROC_TRAP));
39410 +       if (threads != NULL) KMEM_FREE (threads, nthreads * sizeof (E4_ThreadRegs));
39411 +       if (dmas != NULL) KMEM_FREE (dmas, ndmas * sizeof (E4_DMA));
39412 +       if (msgs != NULL) KMEM_FREE (msgs, nmsgs * sizeof (ELAN4_NETERR_MSG));
39413 +       
39414 +       return -ENOMEM;
39415 +    }
39416 +    
39417 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39418 +
39419 +    uctx->uctx_dprocTraps = dprocs;
39420 +    uctx->uctx_eprocTraps = eprocs;
39421 +    uctx->uctx_tprocTraps = tprocs;
39422 +    uctx->uctx_threads    = threads;
39423 +    uctx->uctx_dmas       = dmas;
39424 +    uctx->uctx_msgs       = msgs;
39425 +
39426 +    RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, ndproc_traps, 1 /* 1 for 2nd dma */);
39427 +    RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, ntproc_traps, 0);
39428 +    RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, neproc_traps, 5 /* 1 for command, 2 for dma, 2 for inputter */);
39429 +    RING_QUEUE_INIT (uctx->uctx_threadQ,    nthreads,     1);
39430 +    RING_QUEUE_INIT (uctx->uctx_dmaQ,       ndmas,        1);
39431 +    RING_QUEUE_INIT (uctx->uctx_msgQ,       nmsgs,        0);
39432 +
39433 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39434 +    
39435 +    return 0;
39436 +}
39437 +
39438 +USER_CTXT *
39439 +user_alloc (ELAN4_DEV *dev)
39440 +{
39441 +    USER_CTXT *uctx;
39442 +    int res;
39443 +    int i;
39444 +
39445 +    /* Allocate and initialise the context private data */
39446 +    KMEM_ZALLOC (uctx, USER_CTXT *, sizeof  (USER_CTXT), 1);
39447 +
39448 +    if (uctx == NULL)
39449 +       return ERR_PTR(-ENOMEM);
39450 +
39451 +    if (elan4_get_position (dev, &uctx->uctx_position) == ELAN_POS_UNKNOWN)
39452 +    {
39453 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
39454 +       return ERR_PTR(-EAGAIN);
39455 +    }
39456 +    
39457 +    if ((res = elan4_insertctxt (dev, &uctx->uctx_ctxt, &user_trap_ops)) != 0)
39458 +    {
39459 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
39460 +       return ERR_PTR(res);
39461 +    }
39462 +
39463 +    KMEM_GETPAGES (uctx->uctx_upage, ELAN4_USER_PAGE *, btopr (sizeof (ELAN4_USER_PAGE)), 1);
39464 +    if (uctx->uctx_upage == NULL)
39465 +    {
39466 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
39467 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
39468 +       return ERR_PTR(-ENOMEM);
39469 +    }
39470 +    
39471 +    if ((uctx->uctx_trampoline = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0)
39472 +    {
39473 +       KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
39474 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
39475 +
39476 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
39477 +       return ERR_PTR(-ENOMEM);
39478 +    }
39479 +    
39480 +    if ((uctx->uctx_routetable = elan4_alloc_routetable (dev, 4 /* 512 << 4 == 8192 entries */)) == NULL)
39481 +    {
39482 +       elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE);
39483 +       KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
39484 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
39485 +
39486 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
39487 +       return ERR_PTR(-ENOMEM);
39488 +    }
39489 +
39490 +    elan4_set_routetable (&uctx->uctx_ctxt, uctx->uctx_routetable);
39491 +
39492 +    /* initialise the trap and swap queues to be really full */
39493 +    RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, 0, 1);
39494 +    RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, 0, 1);
39495 +    RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, 0, 1);
39496 +    RING_QUEUE_INIT (uctx->uctx_threadQ, 0, 1);
39497 +    RING_QUEUE_INIT (uctx->uctx_dmaQ, 0, 1);
39498 +
39499 +    INIT_LIST_HEAD (&uctx->uctx_cent_list);
39500 +    INIT_LIST_HEAD (&uctx->uctx_vpseg_list);
39501 +    INIT_LIST_HEAD (&uctx->uctx_cqlist);
39502 +
39503 +    uctx->uctx_haltop.op_function = user_flush;
39504 +    uctx->uctx_haltop.op_arg      = uctx;
39505 +    uctx->uctx_haltop.op_mask     = INT_Halted|INT_Discarding;
39506 +
39507 +    uctx->uctx_dma_flushop.op_function = user_flush_dmas;
39508 +    uctx->uctx_dma_flushop.op_arg      = uctx;
39509 +
39510 +    kmutex_init (&uctx->uctx_vpseg_lock);
39511 +    kmutex_init (&uctx->uctx_cqlock);
39512 +    kmutex_init (&uctx->uctx_rgnmutex);
39513 +
39514 +    spin_lock_init (&uctx->uctx_spinlock);
39515 +    spin_lock_init (&uctx->uctx_rgnlock);
39516 +    spin_lock_init (&uctx->uctx_fault_lock);
39517 +
39518 +    kcondvar_init (&uctx->uctx_wait);
39519 +
39520 +    if ((uctx->uctx_ddcq = user_alloccq (uctx, CQ_Size1K, CQ_EnableAllBits, UCQ_SYSTEM)) == NULL)
39521 +    {
39522 +       user_free (uctx);
39523 +       return ERR_PTR(-ENOMEM);
39524 +    }
39525 +
39526 +    uctx->uctx_trap_count = 0;
39527 +    uctx->uctx_trap_state = UCTX_TRAP_IDLE;
39528 +    uctx->uctx_status     = 0 /* UCTX_DETACHED | UCTX_SWAPPED | UCTX_STOPPED */;
39529 +
39530 +    init_timer (&uctx->uctx_int_timer);
39531 +
39532 +    uctx->uctx_int_timer.function = user_signal_timer;
39533 +    uctx->uctx_int_timer.data     = (unsigned long) uctx;
39534 +    uctx->uctx_int_start          = jiffies;
39535 +    uctx->uctx_int_count          = 0;
39536 +    uctx->uctx_int_delay          = 0;
39537 +
39538 +    init_timer (&uctx->uctx_neterr_timer);
39539 +    uctx->uctx_neterr_timer.function = user_neterr_timer;
39540 +    uctx->uctx_neterr_timer.data     = (unsigned long) uctx;
39541 +
39542 +    uctx->uctx_upage->upage_ddcq_completed = 0;
39543 +    uctx->uctx_ddcq_completed              = 0;
39544 +    uctx->uctx_ddcq_insertcnt              = 0;
39545 +
39546 +    uctx->uctx_num_fault_save = num_fault_save;
39547 +    if (uctx->uctx_num_fault_save) 
39548 +    {  
39549 +       KMEM_ZALLOC (uctx->uctx_faults, FAULT_SAVE *, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save), 1);
39550 +        if ( uctx->uctx_faults == NULL) 
39551 +       {
39552 +           user_free (uctx);
39553 +           return ERR_PTR(-ENOMEM);
39554 +        }
39555 +    
39556 +        for (i = 0; i < uctx->uctx_num_fault_save; i++)
39557 +           uctx->uctx_faults[i].next = (i == (uctx->uctx_num_fault_save-1) ? NULL : &uctx->uctx_faults[i+1]);
39558 +
39559 +    }
39560 +    uctx->uctx_fault_list = uctx->uctx_faults;
39561 +
39562 +    return (uctx);
39563 +}
39564 +
39565 +void
39566 +user_free (USER_CTXT *uctx)
39567 +{
39568 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
39569 +
39570 +    user_swapout (uctx, UCTX_EXITING);
39571 +
39572 +    /* Detach from all input contexts */
39573 +    user_detach (uctx, NULL);
39574 +
39575 +    /* since we're single threaded here - (called from close()) */
39576 +    /* we don't need to hold the lock to drop the command queues */
39577 +    /* since they cannot be mapped into user space */
39578 +    while (! list_empty (&uctx->uctx_cqlist))
39579 +       user_dropcq (uctx, list_entry (uctx->uctx_cqlist.next, USER_CQ, ucq_link));
39580 +
39581 +    /* Free off all of vpseg_list */
39582 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39583 +    while (! list_empty (&uctx->uctx_vpseg_list))
39584 +       user_remove_vpseg(uctx, list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link));
39585 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39586 +    
39587 +    if (timer_pending (&uctx->uctx_int_timer))
39588 +       del_timer_sync (&uctx->uctx_int_timer);
39589 +    
39590 +    if (timer_pending (&uctx->uctx_neterr_timer))
39591 +       del_timer_sync (&uctx->uctx_neterr_timer);
39592 +
39593 +    if (uctx->uctx_dprocTraps)
39594 +       KMEM_FREE (uctx->uctx_dprocTraps, uctx->uctx_dprocTrapQ.q_size * sizeof (ELAN4_DPROC_TRAP));
39595 +    if (uctx->uctx_tprocTraps)
39596 +       KMEM_FREE (uctx->uctx_tprocTraps, uctx->uctx_tprocTrapQ.q_size * sizeof (ELAN4_TPROC_TRAP));
39597 +    if (uctx->uctx_eprocTraps)
39598 +       KMEM_FREE (uctx->uctx_eprocTraps, uctx->uctx_eprocTrapQ.q_size * sizeof (ELAN4_EPROC_TRAP));
39599 +    if (uctx->uctx_dmas)
39600 +       KMEM_FREE (uctx->uctx_dmas, uctx->uctx_dmaQ.q_size * sizeof (E4_DMA));
39601 +    if (uctx->uctx_msgs)
39602 +       KMEM_FREE (uctx->uctx_msgs, NETERR_MSGS * sizeof (ELAN4_NETERR_MSG));
39603 +    if (uctx->uctx_threads)
39604 +       KMEM_FREE (uctx->uctx_threads, uctx->uctx_threadQ.q_size * sizeof (E4_ThreadRegs));
39605 +    if (uctx->uctx_faults)
39606 +       KMEM_FREE (uctx->uctx_faults, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save));
39607 +
39608 +    if (uctx->uctx_intcookie_table)
39609 +       intcookie_free_table (uctx->uctx_intcookie_table);
39610 +
39611 +    elan4_set_routetable (&uctx->uctx_ctxt, NULL);
39612 +    elan4_free_routetable (dev, uctx->uctx_routetable);
39613 +
39614 +    /* Free off all USER_RGNs */
39615 +    user_freergns(uctx);
39616 +
39617 +    elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE);
39618 +
39619 +    /* Clear the PG_Reserved bit before free to avoid a memory leak */
39620 +    ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)));
39621 +    KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
39622 +
39623 +    elan4_removectxt (dev, &uctx->uctx_ctxt);
39624 +
39625 +    kcondvar_destroy (&uctx->uctx_wait);
39626 +
39627 +    spin_lock_destroy (&uctx->uctx_rgnlock);
39628 +    spin_lock_destroy (&uctx->uctx_spinlock);
39629 +
39630 +    kmutex_destroy (&uctx->uctx_rgnmutex);
39631 +    kmutex_destroy (&uctx->uctx_cqlock);
39632 +    kmutex_destroy (&uctx->uctx_vpseg_lock);
39633 +
39634 +    KMEM_FREE (uctx, sizeof (USER_CTXT));
39635 +}
39636 +
39637 +/*
39638 + * Local variables:
39639 + * c-file-style: "stroustrup"
39640 + * End:
39641 + */
39642 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/user_ddcq.c
39643 ===================================================================
39644 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/user_ddcq.c  2004-02-23 16:02:56.000000000 -0500
39645 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/user_ddcq.c       2005-07-28 14:52:52.847678888 -0400
39646 @@ -0,0 +1,226 @@
39647 +/*
39648 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
39649 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
39650 + * 
39651 + *    For licensing information please see the supplied COPYING file
39652 + *
39653 + */
39654 +
39655 +#ident "@(#)$Id: user_ddcq.c,v 1.15 2004/06/23 11:06:05 addy Exp $"
39656 +/*      $Source: /cvs/master/quadrics/elan4mod/user_ddcq.c,v $*/
39657 +
39658 +#include <qsnet/kernel.h>
39659 +
39660 +#include <elan4/debug.h>
39661 +#include <elan4/device.h>
39662 +#include <elan4/user.h>
39663 +#include <elan4/commands.h>
39664 +
39665 +#if PAGE_SIZE < CQ_CommandMappingSize
39666 +#  define ELAN4_COMMAND_QUEUE_MAPPING  PAGE_SIZE
39667 +#else
39668 +#  define ELAN4_COMMAND_QUEUE_MAPPING  CQ_CommandMappingSize
39669 +#endif
39670 +
39671 +/* The user device driver command queue is used for re-issuing 
39672 + * trapped items.  It is allocated as a 1K command queue, and
39673 + * we insert command flow writes event 256 words.
39674 + */
39675 +#define USER_CTRLFLOW_COUNT    256
39676 +
39677 +/* Flow control of the device driver command queue is handled by periodically 
39678 + * inserting dword writes into the command stream.   When you need to know 
39679 + * that the queue has been flushed, then you insert an extra contorl flow
39680 + * write into the command queue.   Should the queue not be flushed, but the
39681 + * trap handler be returning to user space, then it will also insert and 
39682 + * extra interrupt command to ensure that it is re-entered after the queue
39683 + * has been flushed.
39684 + *
39685 + * Note - we account the space for the interrupt command on each control 
39686 + * flow write so that we do not overflow the queue even if we end up 
39687 + * inserting an interrupt for every command flow write.  In general only
39688 + * a single interrupt should get inserted....
39689 + */
39690 +
39691 +#define user_ddcq_command_write(value,off) do { \
39692 +    PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_write: cmdptr=%x off=%d value=%llx\n", cmdptr, off, value);\
39693 +    writeq(value, cmdptr + (off << 3)); \
39694 +} while (0)
39695 +
39696 +#define user_ddcq_command_space(uctx)  \
39697 +    ((CQ_Size (uctx->uctx_ddcq->ucq_cq->cq_size)>>3) - ((uctx)->uctx_ddcq_insertcnt - (uctx)->uctx_upage->upage_ddcq_completed))
39698 +
39699 +#define user_ddcq_command_flow_write(uctx) do { \
39700 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
39701 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
39702 +\
39703 +    (uctx)->uctx_ddcq_completed = ((uctx)->uctx_ddcq_insertcnt += 3);\
39704 +\
39705 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_write: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \
39706 +           (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \
39707 +    user_ddcq_command_write (GUARD_CMD       | GUARD_ALL_CHANNELS,      0);\
39708 +    user_ddcq_command_write (WRITE_DWORD_CMD | (uctx)->uctx_upage_addr, 1);\
39709 +    user_ddcq_command_write ((uctx)->uctx_ddcq_completed,               2);\
39710 +} while (0)
39711 +
39712 +#define user_ddcq_command_flow_intr(uctx) do { \
39713 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
39714 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
39715 +\
39716 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_intr: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \
39717 +           (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \
39718 +    user_ddcq_command_write (INTERRUPT_CMD   | ELAN4_INT_COOKIE_DDCQ,   3);\
39719 +} while (0)
39720 +
39721 +#define user_ddcq_command_prologue(uctx, count) do { \
39722 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
39723 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
39724 +   PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_prologue: iptr=%llx cmdptr=%x\n", iptr, cmdptr);
39725 +
39726 +#define user_ddcq_command_epilogue(uctx, count, extra) \
39727 +   (uctx)->uctx_ddcq_insertcnt = iptr + (count);\
39728 +\
39729 +   PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_epilogue: iptr=%llx + %x + %x - completed %llx\n", iptr, count, extra, (uctx)->uctx_ddcq_completed);\
39730 +   if (((iptr) + (count) + (extra)) > ((uctx)->uctx_ddcq_completed + USER_CTRLFLOW_COUNT))\
39731 +       user_ddcq_command_flow_write(uctx); \
39732 +} while (0)
39733 +
39734 +int
39735 +user_ddcq_check (USER_CTXT *uctx, unsigned num)
39736 +{
39737 +    PRINTF (uctx, DBG_DDCQ, "user_check_ddcq: insert=%llx completed=%llx num=%d\n", 
39738 +           uctx->uctx_ddcq_insertcnt, uctx->uctx_upage->upage_ddcq_completed, num);
39739 +
39740 +    /* Ensure that there is enough space for the command we want to issue,
39741 +     * PLUS the guard/writeword for the control flow flush.
39742 +     * PLUS the interrupt command for rescheduling */
39743 +    if (user_ddcq_command_space (uctx) > (num + 4))
39744 +    {
39745 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: loads of space\n");
39746 +
39747 +       return (1);
39748 +    }
39749 +    
39750 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: not enough space - reschedule\n");
39751 +
39752 +    uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
39753 +    return (0);
39754 +}
39755 +
39756 +int
39757 +user_ddcq_flush (USER_CTXT *uctx)
39758 +{
39759 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
39760 +    USER_CQ   *ucq = uctx->uctx_ddcq;
39761 +
39762 +    switch (ucq->ucq_state)
39763 +    {
39764 +    case UCQ_TRAPPED:
39765 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: command queue is trapped\n");
39766 +       return (0);
39767 +       
39768 +    case UCQ_NEEDS_RESTART:
39769 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: restarting command queue\n");
39770 +
39771 +       if (UCTX_RUNNABLE (uctx))
39772 +       {
39773 +           ucq->ucq_state = UCQ_RUNNING;
39774 +           elan4_restartcq (dev, ucq->ucq_cq);
39775 +       }
39776 +       break;
39777 +    }
39778 +
39779 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: insertcnt=%llx completed=%llx [%llx]\n", 
39780 +           uctx->uctx_ddcq_insertcnt, uctx->uctx_ddcq_completed, uctx->uctx_upage->upage_ddcq_completed);
39781 +
39782 +    if (uctx->uctx_ddcq_completed != uctx->uctx_ddcq_insertcnt)
39783 +       user_ddcq_command_flow_write (uctx);
39784 +
39785 +    return (uctx->uctx_ddcq_completed == uctx->uctx_upage->upage_ddcq_completed);
39786 +}
39787 +
39788 +void
39789 +user_ddcq_intr (USER_CTXT *uctx)
39790 +{
39791 +    user_ddcq_command_flow_intr (uctx);
39792 +}
39793 +
39794 +void
39795 +user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma)
39796 +{
39797 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_dma: cookie=%llx vproc=%llx\n",  dma->dma_cookie, dma->dma_vproc);
39798 +
39799 +    user_ddcq_command_prologue(uctx, 7) {
39800 +
39801 +       user_ddcq_command_write ((dma->dma_typeSize & ~DMA_ContextMask) | RUN_DMA_CMD, 0);
39802 +       user_ddcq_command_write (dma->dma_cookie,                                      1);
39803 +       user_ddcq_command_write (dma->dma_vproc,                                       2);
39804 +       user_ddcq_command_write (dma->dma_srcAddr,                                     3);
39805 +       user_ddcq_command_write (dma->dma_dstAddr,                                     4);
39806 +       user_ddcq_command_write (dma->dma_srcEvent,                                    5);
39807 +       user_ddcq_command_write (dma->dma_dstEvent,                                    6);
39808 +
39809 +    } user_ddcq_command_epilogue (uctx, 7, 0);
39810 +}
39811 +
39812 +void
39813 +user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs)
39814 +{
39815 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_thread: PC=%llx SP=%llx\n", regs->Registers[0], regs->Registers[1]);
39816 +
39817 +    user_ddcq_command_prologue(uctx, 7) {
39818 +
39819 +       user_ddcq_command_write (regs->Registers[0] | RUN_THREAD_CMD, 0);
39820 +       user_ddcq_command_write (regs->Registers[1],                  1);
39821 +       user_ddcq_command_write (regs->Registers[2],                  2);
39822 +       user_ddcq_command_write (regs->Registers[3],                  3);
39823 +       user_ddcq_command_write (regs->Registers[4],                  4);
39824 +       user_ddcq_command_write (regs->Registers[5],                  5);
39825 +       user_ddcq_command_write (regs->Registers[6],                  6);
39826 +       
39827 +    } user_ddcq_command_epilogue (uctx, 7, 0);
39828 +}
39829 +
39830 +void
39831 +user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr)
39832 +{
39833 +    user_ddcq_command_prologue (uctx, 1) {
39834 +
39835 +       user_ddcq_command_write (SET_EVENT_CMD | addr, 0);
39836 +    
39837 +    } user_ddcq_command_epilogue (uctx, 1, 0);
39838 +}
39839 +
39840 +void
39841 +user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count)
39842 +{
39843 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_seteventn: addr=%llx count=%lx\n", addr, count);
39844 +
39845 +    user_ddcq_command_prologue (uctx, 2) {
39846 +
39847 +       user_ddcq_command_write (SET_EVENTN_CMD, 0);
39848 +       user_ddcq_command_write (addr | count,   1);
39849 +
39850 +    } user_ddcq_command_epilogue (uctx, 2, 0);
39851 +}
39852 +
39853 +void
39854 +user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1)
39855 +{
39856 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_waitevent: addr=%llx CountAndType=%llx Param=%llx,%llx\n", addr, CountAndType, Param0, Param1);
39857 +
39858 +    user_ddcq_command_prologue (uctx, 4) {
39859 +
39860 +       user_ddcq_command_write (WAIT_EVENT_CMD | addr, 0);
39861 +       user_ddcq_command_write (CountAndType,          1);
39862 +       user_ddcq_command_write (Param0,                2);
39863 +       user_ddcq_command_write (Param1,                3);
39864 +
39865 +    } user_ddcq_command_epilogue (uctx, 4, 0);
39866 +}
39867 +
39868 +/*
39869 + * Local variables:
39870 + * c-file-style: "stroustrup"
39871 + * End:
39872 + */
39873 Index: linux-2.6.5-7.191/drivers/net/qsnet/elan4/user_Linux.c
39874 ===================================================================
39875 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/elan4/user_Linux.c 2004-02-23 16:02:56.000000000 -0500
39876 +++ linux-2.6.5-7.191/drivers/net/qsnet/elan4/user_Linux.c      2005-07-28 14:52:52.847678888 -0400
39877 @@ -0,0 +1,377 @@
39878 +/*
39879 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
39880 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
39881 + * 
39882 + *    For licensing information please see the supplied COPYING file
39883 + *
39884 + */
39885 +
39886 +#ident "@(#)$Id: user_Linux.c,v 1.25.2.4 2005/01/18 14:36:10 david Exp $"
39887 +/*      $Source: /cvs/master/quadrics/elan4mod/user_Linux.c,v $*/
39888 +
39889 +#include <qsnet/kernel.h>
39890 +#include <qsnet/kpte.h>
39891 +
39892 +#include <linux/pci.h>
39893 +
39894 +#include <elan4/debug.h>
39895 +#include <elan4/device.h>
39896 +#include <elan4/user.h>
39897 +
39898 +static int
39899 +user_pteload (USER_CTXT *uctx, E4_Addr addr, physaddr_t phys, int perm)
39900 +{
39901 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
39902 +    E4_uint64  newpte = elan4mmu_phys2pte (dev, phys, perm);
39903 +    
39904 +    /*
39905 +     * On MPSAS we don't allocate a large enough context table, so 
39906 +     * if we see an address/context pair which would "alias" because
39907 +     * they differ in unchecked hash bits to a previous pteload, 
39908 +     * then we kill the application.
39909 +     */
39910 +    {
39911 +       unsigned hashval = (E4MMU_SHIFT_ADDR(addr, (dev->dev_pageshift[0]) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(uctx->uctx_ctxt.ctxt_num));
39912 +       
39913 +       if (dev->dev_rsvd_hashval[0] == 0xFFFFFFFF)
39914 +           dev->dev_rsvd_hashval[0] = hashval & dev->dev_rsvd_hashmask[0];
39915 +       
39916 +       if ((hashval & dev->dev_rsvd_hashmask[0]) != dev->dev_rsvd_hashval[0])
39917 +       {
39918 +           printk ("user_pteload: vaddr=%016llx ctxnum=%x -> [%x] overlaps %x - %x [hashidx=%x]\n", (unsigned long long) addr, 
39919 +                   uctx->uctx_ctxt.ctxt_num, hashval, hashval & dev->dev_rsvd_hashmask[0], dev->dev_rsvd_hashval[0],
39920 +                   E4MMU_HASH_INDEX (uctx->uctx_ctxt.ctxt_num, addr, dev->dev_pageshift[0], dev->dev_hashsize[0]-1));
39921 +           
39922 +           return -EFAULT;
39923 +       }
39924 +    }
39925 +
39926 +    if ((newpte & (PTE_PciNotLocal | PTE_CommandQueue)) == 0 && 
39927 +       ((addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (phys & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))))
39928 +    {
39929 +       printk ("user_pteload: vaddr=%016llx incorrectly alias sdram at %lx\n", (unsigned long long) addr, 
39930 +               phys ^ pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
39931 +       return -EFAULT;
39932 +    }
39933 +
39934 +    if (newpte & PTE_PciNotLocal)
39935 +       PRINTF (uctx, DBG_FAULT, "user_pteload: addr=%llx -> pte=%llx (pci)\n", addr, newpte);
39936 +    else if (newpte & PTE_CommandQueue)
39937 +       PRINTF (uctx, DBG_FAULT, "user_pteload: addr=%llx -> pte=%llx (command)\n", addr, newpte);
39938 +    else
39939 +       PRINTF (uctx, DBG_FAULT, "user_pteload: addr=%llx -> pte=%llx (sdram)\n", addr, newpte);
39940 +
39941 +    elan4mmu_pteload (&uctx->uctx_ctxt, 0, addr, newpte);
39942 +
39943 +    return (0);
39944 +}
39945 +
39946 +int
39947 +user_load_range (USER_CTXT *uctx, E4_Addr eaddr, unsigned long nbytes, E4_uint32 fsr)
39948 +{
39949 +    ELAN4_DEV             *dev       = uctx->uctx_ctxt.ctxt_dev;
39950 +    struct mm_struct      *mm        = current->mm;
39951 +    int                    writeable = (AT_Perm(fsr) == AT_PermLocalDataWrite ||
39952 +                                       AT_Perm(fsr) == AT_PermRemoteWrite    ||
39953 +                                       AT_Perm(fsr) == AT_PermLocalEvent     ||
39954 +                                       AT_Perm(fsr) == AT_PermRemoteEvent);
39955 +    struct vm_area_struct *vma;
39956 +    int                    i, perm;
39957 +    unsigned long          len;
39958 +    unsigned long          maddr;
39959 +    physaddr_t            phys;
39960 +
39961 +    kmutex_lock (&uctx->uctx_rgnmutex);
39962 +
39963 +    while (nbytes > 0)
39964 +    {
39965 +       USER_RGN *rgn = user_rgnat_elan (uctx, eaddr);
39966 +
39967 +       if (rgn == NULL || ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, AT_Perm (fsr)))
39968 +       {
39969 +           PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx -> %s\n", eaddr, rgn == NULL ? "no mapping" : "no permission");
39970 +
39971 +           kmutex_unlock (&uctx->uctx_rgnmutex);
39972 +           return (rgn == NULL ? -EFAULT : -EPERM);
39973 +       }
39974 +
39975 +       if (writeable)
39976 +           perm = rgn->rgn_perm;
39977 +/* This is the correct code but it breaks the Eagle libraries (1.6.X) - backed out (addy 24.08.04)
39978 +       else if (AT_Perm(fsr) == AT_PermExecute && (rgn->rgn_perm & PERM_Mask) != PERM_LocExecute)
39979 +*/
39980 +       else if (AT_Perm(fsr) == AT_PermExecute)
39981 +           perm = PERM_LocRead | (rgn->rgn_perm & ~PERM_Mask);
39982 +       else
39983 +           perm = ELAN4_PERM_READONLY (rgn->rgn_perm & PERM_Mask) | (rgn->rgn_perm & ~PERM_Mask);
39984 +
39985 +       PRINTF (uctx, DBG_FAULT, "user_load_range: rgn=%p [%llx.%lx.%x]\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len);
39986 +
39987 +       len = ((rgn->rgn_ebase + rgn->rgn_len) - eaddr);
39988 +       if (len > nbytes)
39989 +           len = nbytes;
39990 +       nbytes -= len;
39991 +       
39992 +       maddr = rgn->rgn_mbase + (eaddr - rgn->rgn_ebase);
39993 +
39994 +       PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx->%llx -> %lx->%lx len=%x perm=%x\n", eaddr, 
39995 +               eaddr + len, maddr, maddr + len, len, perm);
39996 +
39997 +       down_read (&mm->mmap_sem);
39998 +       while (len > 0)
39999 +       {
40000 +           if ((vma = find_vma_intersection (mm, maddr, maddr + PAGE_SIZE)) == NULL ||
40001 +               (writeable && !(vma->vm_flags & VM_WRITE)))
40002 +           {
40003 +               PRINTF (DBG_USER, DBG_FAULT, "ctxt_pagefault: %s %lx\n", vma ? "no writeble at" : "no vma for", maddr);
40004 +               up_read (&mm->mmap_sem);
40005 +               kmutex_unlock (&uctx->uctx_rgnmutex);
40006 +               return (-EFAULT);
40007 +           }
40008 +
40009 +           spin_lock (&mm->page_table_lock);
40010 +           {
40011 +               pte_t *ptep_ptr;
40012 +               pte_t  ptep_value;
40013 +
40014 +               ptep_ptr = find_pte_map (mm, maddr);
40015 +               if (ptep_ptr) {
40016 +                   ptep_value = *ptep_ptr;
40017 +                   pte_unmap(ptep_ptr);
40018 +               }
40019 +
40020 +               PRINTF (uctx, DBG_FAULT, "user_load_range: %lx %s %s\n", maddr, writeable ? "writeable" : "readonly", 
40021 +                       !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
40022 +                       writeable && !pte_write(ptep_value) ? "COW" : "OK");
40023 +               
40024 +               if (ptep_ptr == NULL || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value)) || !pte_read (ptep_value))
40025 +               {
40026 +                   spin_unlock (&mm->page_table_lock);
40027 +                   
40028 +                   make_pages_present(maddr, maddr + PAGE_SIZE);
40029 +                   
40030 +                   spin_lock (&mm->page_table_lock);
40031 +
40032 +                   ptep_ptr = find_pte_map (mm, maddr);
40033 +                   if (ptep_ptr) {
40034 +                       ptep_value = *ptep_ptr;
40035 +                       pte_unmap(ptep_ptr);
40036 +                   }
40037 +                   
40038 +                   if (ptep_ptr == NULL || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value)) || !pte_read (ptep_value))
40039 +                   {   
40040 +                       spin_unlock (&mm->page_table_lock);
40041 +                       up_read (&mm->mmap_sem);
40042 +                       kmutex_unlock (&uctx->uctx_rgnmutex);
40043 +                       return (-EFAULT);
40044 +                   }
40045 +               } 
40046 +               
40047 +               if (writeable)
40048 +                   pte_mkdirty(ptep_value);
40049 +               pte_mkyoung (ptep_value);
40050 +
40051 +               phys = pte_phys (ptep_value);
40052 +
40053 +               for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
40054 +               {
40055 +                   if (user_pteload (uctx, eaddr, phys, perm) < 0)
40056 +                   {
40057 +                       spin_unlock (&mm->page_table_lock);
40058 +                       up_read (&mm->mmap_sem);
40059 +                       kmutex_unlock (&uctx->uctx_rgnmutex);
40060 +                       return (-EFAULT);
40061 +                   }
40062 +                   
40063 +                   eaddr += (1 << dev->dev_pageshift[0]);
40064 +                   phys  += (1 << dev->dev_pageshift[0]);
40065 +               }
40066 +           }
40067 +           spin_unlock (&mm->page_table_lock);
40068 +               
40069 +           maddr += PAGE_SIZE;
40070 +           len   -= PAGE_SIZE;
40071 +       }
40072 +       up_read (&mm->mmap_sem);
40073 +    }
40074 +    kmutex_unlock (&uctx->uctx_rgnmutex);
40075 +
40076 +    PRINTF (uctx, DBG_FAULT, "user_load_range: alldone\n");
40077 +
40078 +    return (0);
40079 +}
40080 +
40081 +void
40082 +user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len)
40083 +{
40084 +    virtaddr_t             lim = addr + len - 1;
40085 +    struct vm_area_struct *vma;
40086 +
40087 +    down_read (&current->mm->mmap_sem);
40088 +
40089 +    if ((vma = find_vma (current->mm, addr)) != NULL)
40090 +    {
40091 +       do {
40092 +           unsigned long start = vma->vm_start;
40093 +           unsigned long end   = vma->vm_end;
40094 +
40095 +           if ((start-1) >= lim)
40096 +               break;
40097 +
40098 +           if (start < addr) start = addr;
40099 +           if ((end-1) > lim) end = lim+1;
40100 +               
40101 +           if (vma->vm_flags & VM_IO)
40102 +               continue;
40103 +
40104 +           user_unload_main (uctx, start, end - start);
40105 +
40106 +           make_pages_present (start, end);
40107 +
40108 +           user_update_main (uctx, current->mm, start, end - start);
40109 +
40110 +       } while ((vma = find_vma (current->mm, vma->vm_end)) != NULL);
40111 +    }
40112 +    up_read (&current->mm->mmap_sem);
40113 +}
40114 +
40115 +static void
40116 +user_update_range (USER_CTXT *uctx, int tbl, struct mm_struct *mm, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, int perm)
40117 +{
40118 +    ELAN4_DEV *dev    = uctx->uctx_ctxt.ctxt_dev;
40119 +    int        roperm = ELAN4_PERM_READONLY(perm & PERM_Mask) | (perm & ~PERM_Mask);
40120 +    int        nbytes;
40121 +
40122 +    while (len > 0)
40123 +    {
40124 +       pte_t *ptep_ptr;
40125 +       pte_t  ptep_value;
40126 +       
40127 +       ptep_ptr = find_pte_map (mm, maddr);
40128 +       if (ptep_ptr) {
40129 +           ptep_value = *ptep_ptr;
40130 +           pte_unmap(ptep_ptr);
40131 +       }
40132 +
40133 +       PRINTF (uctx, DBG_IOPROC, "user_update_range: %llx (%lx) %s\n", eaddr, maddr, 
40134 +               !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
40135 +               !pte_write(ptep_value) ? "RO/COW" : "OK");
40136 +       
40137 +       if (ptep_ptr && !pte_none(ptep_value) && pte_present(ptep_value) && pte_read (ptep_value)) {
40138 +           physaddr_t phys_value = pte_phys(ptep_value);
40139 +           for (nbytes = 0; nbytes < PAGE_SIZE; nbytes += (1 << dev->dev_pageshift[0]))
40140 +           {
40141 +               user_pteload (uctx, eaddr, phys_value, pte_write (ptep_value) ? perm : roperm);
40142 +
40143 +               eaddr       += (1 << dev->dev_pageshift[0]);
40144 +               phys_value  += (1 << dev->dev_pageshift[0]);
40145 +           }
40146 +       }
40147 +
40148 +       maddr += PAGE_SIZE;
40149 +       len   -= PAGE_SIZE;
40150 +    }
40151 +}
40152 +
40153 +void
40154 +user_update_main (USER_CTXT *uctx, struct mm_struct *mm, virtaddr_t start, unsigned long len)
40155 +{
40156 +    USER_RGN     *rgn;
40157 +    unsigned long ssize;
40158 +    virtaddr_t    end = start + len - 1;
40159 +
40160 +    spin_lock (&uctx->uctx_rgnlock);
40161 +
40162 +    PRINTF (uctx, DBG_IOPROC, "user_update_main: start=%lx end=%lx\n", start, end);
40163 +
40164 +    for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext)
40165 +    {
40166 +       if (end < rgn->rgn_mbase)
40167 +           break;
40168 +       
40169 +       if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) 
40170 +       {
40171 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1);
40172 +
40173 +           user_update_range (uctx, 0 /* tbl */, mm, rgn->rgn_mbase, rgn->rgn_ebase, rgn->rgn_len, rgn->rgn_perm);
40174 +       }
40175 +       else if (start <= rgn->rgn_mbase)
40176 +       {
40177 +           ssize = end - rgn->rgn_mbase + 1;
40178 +
40179 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize);
40180 +
40181 +           user_update_range (uctx, 0 /* tbl */, mm, rgn->rgn_mbase, rgn->rgn_ebase, ssize, rgn->rgn_perm);
40182 +       }
40183 +       else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
40184 +       {
40185 +           ssize = (rgn->rgn_mbase + rgn->rgn_len) - start;
40186 +
40187 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: end   %lx -> %lx\n", start, start + ssize);
40188 +
40189 +           user_update_range (uctx, 0 /* tbl */, mm, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize, rgn->rgn_perm);
40190 +       }
40191 +       else
40192 +       {
40193 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: middle %lx -> %lx\n", start, end);
40194 +
40195 +           user_update_range (uctx, 0 /* tbl */, mm, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), len, rgn->rgn_perm);
40196 +       }
40197 +    }
40198 +    spin_unlock (&uctx->uctx_rgnlock);
40199 +}
40200 +
40201 +void
40202 +user_unload_main (USER_CTXT *uctx, virtaddr_t start, unsigned long len)
40203 +{
40204 +    USER_RGN     *rgn;
40205 +    unsigned long ssize;
40206 +    virtaddr_t    end = start + len - 1;
40207 +
40208 +    spin_lock (&uctx->uctx_rgnlock);
40209 +
40210 +    PRINTF (uctx, DBG_IOPROC, "user_unload_main: start=%lx end=%lx\n", start, end);
40211 +
40212 +    for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext)
40213 +    {
40214 +       if (end < rgn->rgn_mbase)
40215 +           break;
40216 +       
40217 +       if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
40218 +       {
40219 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1);
40220 +
40221 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, rgn->rgn_len);
40222 +       }
40223 +       else if (start <= rgn->rgn_mbase)
40224 +       {
40225 +           ssize = end - rgn->rgn_mbase + 1;
40226 +
40227 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize);
40228 +
40229 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, ssize);
40230 +       }
40231 +       else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
40232 +       {
40233 +           ssize = (rgn->rgn_mbase + rgn->rgn_len) - start;
40234 +           
40235 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: end   %lx -> %lx\n", start, start + ssize);
40236 +           
40237 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize);
40238 +       }
40239 +       else
40240 +       {
40241 +
40242 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: middle %lx -> %lx\n", start, end);
40243 +
40244 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), len);
40245 +       }
40246 +    }
40247 +    spin_unlock (&uctx->uctx_rgnlock);
40248 +}
40249 +
40250 +/*
40251 + * Local variables:
40252 + * c-file-style: "stroustrup"
40253 + * End:
40254 + */
40255 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/asm_elan4_thread.S
40256 ===================================================================
40257 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/asm_elan4_thread.S      2004-02-23 16:02:56.000000000 -0500
40258 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/asm_elan4_thread.S   2005-07-28 14:52:52.848678736 -0400
40259 @@ -0,0 +1,78 @@
40260 +/*
40261 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
40262 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
40263 + *
40264 + *    For licensing information please see the supplied COPYING file
40265 + *
40266 + */
40267 +
40268 +#ident "@(#)$Id: asm_elan4_thread.S,v 1.1 2003/09/23 13:55:11 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
40269 +/*      $Source: /cvs/master/quadrics/epmod/asm_elan4_thread.S,v $*/
40270 +
40271 +#include <elan4/events.h>
40272 +#include <elan4/commands.h>
40273 +
40274 +/*
40275 + * c_reschedule (E4_uint64 *commandport)
40276 + */            
40277 +       .global c_reschedule
40278 +c_reschedule:
40279 +       add             %sp, -128, %sp
40280 +       st64            %r16, [%sp]                     // preserve call preserved registers
40281 +       st64            %r24, [%sp + 64]                // - see CALL_USED_REGISTERS.
40282 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
40283 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
40284 +       nop                                             // BUG FIX: E4 RevA
40285 +       nop                                             // BUG FIX: E4 RevA
40286 +       
40287 +       mov             %r7, %r18                       // (%r2) return pc
40288 +1:     call            2f
40289 +        mov            %sp, %r17                       // (%r1) SP
40290 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
40291 +       mov             NOP_CMD, %r23                   // "nop" command
40292 +       st64suspend     %r16, [%r8]
40293 +3:     ld64            [%sp], %r16
40294 +       ld64            [%sp + 64], %r24                // restore call preserved register
40295 +       jmpl            %r2+8, %r0                      // and return
40296 +        add            %sp, 128, %sp
40297 +       
40298 +
40299 +/*
40300 + * c_waitevent (E4_uint64 *commandport, E4_Event *event, E4_uint64 count)
40301 + */
40302 +       .global c_waitevent
40303 +c_waitevent:
40304 +       add             %sp, -192, %sp
40305 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
40306 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
40307 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
40308 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
40309 +       nop                                             // BUG FIX: E4 RevA
40310 +       nop                                             // BUG FIX: E4 RevA
40311 +
40312 +       mov             %r7, %r18                       // (%r2) return pc
40313 +1:     call            2f
40314 +        mov            %sp, %r17                       // (%r1) SP
40315 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
40316 +       st32            %r16, [%sp]                     // event source block
40317 +       mov             MAKE_EXT_CLEAN_CMD, %r23        // "flush command queue desc" command
40318 +       st8             %r23, [%sp+56]                  // event source block
40319 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
40320 +       mov             %r23,%r23                       // BUG FIX: E4 RevA
40321 +       nop                                             // BUG FIX: E4 RevA
40322 +       nop                                             // BUG FIX: E4 RevA
40323 +       
40324 +
40325 +       or              %r9, WAIT_EVENT_CMD, %r16
40326 +       sll8            %r10, 32, %r17
40327 +       or              %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17
40328 +       mov             %sp, %r18
40329 +       mov             %r8, %r19
40330 +       
40331 +       st32suspend     %r16, [%r8]
40332 +       
40333 +3:     ld64            [%sp + 64], %r16                // restore call preserved register
40334 +       ld64            [%sp + 128], %r24
40335 +       jmpl            %r2+8, %r0                      // and return
40336 +        add            %sp, 192, %sp
40337 +
40338 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/assym_elan4.h
40339 ===================================================================
40340 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/assym_elan4.h   2004-02-23 16:02:56.000000000 -0500
40341 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/assym_elan4.h        2005-07-28 14:52:52.848678736 -0400
40342 @@ -0,0 +1,20 @@
40343 +/*
40344 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
40345 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
40346 + *
40347 + *    For licensing information please see the supplied COPYING file
40348 + *
40349 + */
40350 +
40351 +#ident "@(#)$Id: genassym_elan4.c,v 1.3 2004/04/25 11:26:07 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
40352 +/*      $Source: /cvs/master/quadrics/epmod/genassym_elan4.c,v $*/
40353 +
40354 +/* Generated by genassym_elan4 - do not modify */
40355 +
40356 +#define EP4_RCVR_THREAD_STALL  0
40357 +#define EP4_RCVR_PENDING_TAILP 128
40358 +#define EP4_RCVR_PENDING_HEAD  136
40359 +#define EP4_RCVR_DEBUG         176
40360 +#define EP4_RXD_NEXT           664
40361 +#define EP4_RXD_QUEUED         728
40362 +#define EP4_RXD_DEBUG          944
40363 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/cm.c
40364 ===================================================================
40365 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/cm.c    2004-02-23 16:02:56.000000000 -0500
40366 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/cm.c 2005-07-28 14:52:52.853677976 -0400
40367 @@ -0,0 +1,3000 @@
40368 +/*
40369 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
40370 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
40371 + *
40372 + *    For licensing information please see the supplied COPYING file
40373 + *
40374 + */
40375 +
40376 +#ident "@(#)$Id: cm.c,v 1.83.2.6 2005/01/13 12:37:57 mike Exp $"
40377 +/*      $Source: /cvs/master/quadrics/epmod/cm.c,v $ */
40378 +
40379 +#include <qsnet/kernel.h>
40380 +
40381 +#include <elan/kcomm.h>
40382 +
40383 +#include "kcomm_vp.h"
40384 +#include "debug.h"
40385 +#include "cm.h"
40386 +#include <elan/epsvc.h>
40387 +
40388 +#include <qsnet/procfs_linux.h>
40389 +
40390 +#if defined(LINUX)
40391 +#include "conf_linux.h"
40392 +#endif
40393 +
40394 +int BranchingRatios[CM_MAX_LEVELS];
40395 +
40396 +int MachineId      = -1;
40397 +int BrokenLevel    = -1;                       /* Simulates Broken Network */
40398 +int RejoinCheck    = 1;
40399 +int RejoinPanic    = 0;
40400 +
40401 +static int
40402 +SegmentNo (CM_RAIL *cmRail, u_int nodeid, u_int lvl)
40403 +{
40404 +    int i;
40405 +
40406 +    ASSERT (lvl < cmRail->NumLevels);
40407 +    
40408 +    for (i = 0; i < lvl; i++)
40409 +       nodeid /= cmRail->Levels[i].NumSegs;
40410 +    
40411 +    return (nodeid % cmRail->Levels[lvl].NumSegs);
40412 +}
40413 +
40414 +static int
40415 +ClusterIds (CM_RAIL *cmRail, int clvl, int *clmin, int *clmax)
40416 +{
40417 +    int clid  = cmRail->Rail->Position.pos_nodeid - cmRail->Levels[clvl].MinNodeId;
40418 +
40419 +    if (clvl == 0)
40420 +       *clmin = *clmax = clid;
40421 +    else
40422 +    {
40423 +       *clmin = cmRail->Levels[clvl - 1].MinNodeId - cmRail->Levels[clvl].MinNodeId;
40424 +       *clmax = *clmin + cmRail->Levels[clvl - 1].NumNodes - 1;
40425 +    }
40426 +    return (clid);
40427 +}
40428 +
40429 +#if defined(PER_CPU_TIMEOUT)
40430 +static void
40431 +__Schedule_Discovery (CM_RAIL *cmRail)         /* we urgently need to schedule discovery */
40432 +{
40433 +    cmRail->NextDiscoverTime = lbolt;
40434 +
40435 +    if (cmRail->NextRunTime == 0 || AFTER (cmRail->NextRunTime, cmRail->NextDiscoverTime))
40436 +       cmRail->NextRunTime = cmRail->NextDiscoverTime;
40437 +}
40438 +
40439 +static void
40440 +__Schedule_Heartbeat (CM_RAIL *cmRail)
40441 +{
40442 +    cmRail->NextHeartbeatTime = lbolt;
40443 +
40444 +    if (cmRail->NextRunTime == 0 || AFTER (cmRail->NextRunTime, cmRail->NextHeartbeatTime))
40445 +       cmRail->NextRunTime = cmRail->NextHeartbeatTime;
40446 +}
40447 +#else
40448 +
40449 +static void
40450 +__Schedule_Timer (CM_RAIL *cmRail, long tick)
40451 +{
40452 +    if (! timer_pending (&cmRail->HeartbeatTimer) || AFTER (cmRail->NextRunTime, tick))
40453 +    {
40454 +       cmRail->NextRunTime = tick;
40455 +
40456 +       mod_timer (&cmRail->HeartbeatTimer, tick);
40457 +    }
40458 +}
40459 +
40460 +static void
40461 +__Schedule_Discovery (CM_RAIL *cmRail)         /* we urgently need to schedule discovery */
40462 +{
40463 +    __Schedule_Timer (cmRail, cmRail->NextDiscoverTime = lbolt);
40464 +}
40465 +
40466 +static void
40467 +__Schedule_Heartbeat (CM_RAIL *cmRail)
40468 +{
40469 +    __Schedule_Timer (cmRail, cmRail->NextHeartbeatTime = lbolt);
40470 +}
40471 +#endif
40472 +
40473 +static int
40474 +MsgBusy (CM_RAIL *cmRail, int msgNumber)
40475 +{
40476 +    switch (ep_outputq_state (cmRail->Rail, cmRail->MsgQueue, msgNumber))
40477 +    {
40478 +    case EP_OUTPUTQ_BUSY:                      /* still busy */
40479 +       return 1;
40480 +       
40481 +    case EP_OUTPUTQ_FAILED:                    /* NACKed */
40482 +    {
40483 +#if defined(DEBUG_PRINTF)
40484 +       CM_MSG  *msg  = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber);
40485 +       uint8_t  type  = msg->Hdr.Type;
40486 +       uint16_t nmaps = msg->Hdr.NumMaps;
40487 +       int16_t  off   = msg->Payload.Statemaps[CM_MSG_MAP(0)].offset;
40488 +       
40489 +       CPRINTF4 (((type == CM_MSG_TYPE_DISCOVER_LEADER) || (type == CM_MSG_TYPE_DISCOVER_SUBORDINATE))  ? 6 : 3, /* we expect broadcasts to be NACKed */
40490 +                 "%s: msg %d type %d  failed%s\n", cmRail->Rail->Name, msgNumber, type, 
40491 +                 (type != CM_MSG_TYPE_HEARTBEAT) ? "" : nmaps == 0 ? ": null heartbeat" :
40492 +                 off == STATEMAP_RESET ? ": heartbeat with R statemaps" : ": heartbeat with statemaps");
40493 +#endif
40494 +       return 0;
40495 +    }
40496 +    
40497 +    case EP_OUTPUTQ_FINISHED:
40498 +       return 0;
40499 +
40500 +    default:
40501 +       panic ("MsgBusy - bad return code from ep_outputq_state\n");
40502 +       /* NOTREACHED */
40503 +    }
40504 +    return 0;
40505 +}
40506 +
40507 +static void
40508 +LaunchMessage (CM_RAIL *cmRail, int msgNumber, int vp, int qnum, int retries, int type, int lvl, int nmaps)
40509 +{
40510 +    CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber);
40511 +    CM_HDR *hdr = &msg->Hdr;
40512 +
40513 +    ASSERT (nmaps >= 0 && nmaps <= CM_MSG_MAXMAPS);
40514 +    ASSERT (SPINLOCK_HELD (&cmRail->Lock));
40515 +
40516 +    hdr->Version   = CM_MSG_VERSION;
40517 +    hdr->ParamHash = cmRail->ParamHash;
40518 +    hdr->Timestamp = cmRail->Timestamp;
40519 +    hdr->Checksum  = 0;
40520 +    hdr->NodeId    = cmRail->Rail->Position.pos_nodeid;
40521 +    hdr->MachineId = MachineId;
40522 +    hdr->NumMaps   = nmaps;
40523 +    hdr->Level     = lvl;
40524 +    hdr->Type      = type;
40525 +    hdr->Checksum  = CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps));
40526 +
40527 +    if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf)))                       /* Simulate broken network? */
40528 +       return;
40529 +
40530 +    if (ep_outputq_send (cmRail->Rail, cmRail->MsgQueue, msgNumber, 
40531 +                        CM_MSG_SIZE(nmaps), vp, qnum, retries));
40532 +       IncrStat (cmRail, LaunchMessageFail);
40533 +}
40534 +
40535 +static int
40536 +SendMessage (CM_RAIL *cmRail, int nodeId, int lvl, int type)
40537 +{
40538 +    int        msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg;
40539 +    int n         = CM_NUM_SPARE_MSG_BUFFERS;
40540 +    int retries;
40541 +
40542 +    ASSERT (type == CM_MSG_TYPE_IMCOMING ||    /* other types must use SendToSgmt */
40543 +           type == CM_MSG_TYPE_REJOIN);
40544 +   
40545 +    while (n-- > 0 && MsgBusy (cmRail, msgNumber))     /* search for idle "spare" buffer */
40546 +    {
40547 +       if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS)
40548 +           cmRail->NextSpareMsg = 0;
40549 +      
40550 +       msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg;
40551 +    }
40552 +
40553 +    if (n == 0)                                        /* all "spare" message buffers busy */
40554 +    {
40555 +       CPRINTF3 (3, "%s: all spare message buffers busy: trying to send type %d to %d\n",
40556 +                 cmRail->Rail->Name, type, nodeId);
40557 +       return (0);
40558 +    }
40559 +
40560 +    /* NB IMCOMING may be echoed by MANY nodes, so we don't (and musn't) have any retries */
40561 +    retries = (type == CM_MSG_TYPE_IMCOMING) ? 0 : CM_P2P_DMA_RETRIES;
40562 +   
40563 +    LaunchMessage (cmRail, msgNumber, EP_VP_NODE (nodeId), EP_SYSTEMQ_INTR, /* eager receive */
40564 +                  retries, type, lvl, 0);
40565 +   
40566 +    if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS) /* check this one last next time */
40567 +       cmRail->NextSpareMsg = 0;
40568 +
40569 +    return (1);
40570 +}
40571 +
40572 +static int
40573 +SendToSgmt (CM_RAIL *cmRail, CM_SGMT *sgmt, int type)
40574 +{    
40575 +    bitmap_t         seg;
40576 +    int              offset;
40577 +    int              nmaps;
40578 +    int              sidx;
40579 +    int              clvl;
40580 +    
40581 +    ASSERT (sgmt->Level <= cmRail->TopLevel);
40582 +
40583 +    if (MsgBusy (cmRail, sgmt->MsgNumber))             /* previous message still busy */
40584 +    {
40585 +       CPRINTF3 (3, "%s: node message buffer busy: trying to send type %d to %d\n",
40586 +                 cmRail->Rail->Name, type, sgmt->NodeId);
40587 +      
40588 +       return (0);
40589 +    }
40590 +
40591 +    switch (type)
40592 +    {
40593 +    case CM_MSG_TYPE_RESOLVE_LEADER:
40594 +    case CM_MSG_TYPE_DISCOVER_LEADER:
40595 +       ASSERT (sgmt->State == CM_SGMT_ABSENT);
40596 +       ASSERT (sgmt->Level == ((cmRail->Role == CM_ROLE_LEADER_CANDIDATE) ? cmRail->TopLevel : cmRail->TopLevel - 1));
40597 +       ASSERT (sgmt->Level < cmRail->NumLevels);
40598 +       ASSERT (sgmt->Sgmt == cmRail->Levels[sgmt->Level].MySgmt);
40599 +      
40600 +       /* broadcast to me and all my peers at this level (== my segment in the level above) */
40601 +       sidx = (sgmt->Level == cmRail->NumLevels - 1) ? 0 : cmRail->Levels[sgmt->Level + 1].MySgmt;
40602 +
40603 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level + 1, sidx), 
40604 +                      EP_SYSTEMQ_INTR, 0,              /* eager rx; no retries */
40605 +                      type, sgmt->Level, 0);
40606 +       return (1);
40607 +      
40608 +    case CM_MSG_TYPE_DISCOVER_SUBORDINATE:
40609 +       ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt);
40610 +       ASSERT (sgmt->State == CM_SGMT_WAITING);
40611 +       ASSERT (sgmt->Level > 0);                       /* broadcasting just to subtree */
40612 +      
40613 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level, sgmt->Sgmt), 
40614 +                      EP_SYSTEMQ_INTR, 0,              /* eager rx; no retries */
40615 +                      CM_MSG_TYPE_DISCOVER_SUBORDINATE, sgmt->Level, 0);
40616 +       return (1);
40617 +      
40618 +    case CM_MSG_TYPE_NOTIFY:
40619 +       ASSERT (sgmt->State == CM_SGMT_PRESENT);
40620 +      
40621 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId),
40622 +                      EP_SYSTEMQ_INTR, CM_P2P_DMA_RETRIES, /* eager rx; lots of retries */
40623 +                      CM_MSG_TYPE_NOTIFY, sgmt->Level, 0);
40624 +       return (1);
40625 +      
40626 +    case CM_MSG_TYPE_HEARTBEAT:
40627 +    {
40628 +       CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, sgmt->MsgNumber);
40629 +       CM_HDR *hdr = &msg->Hdr;
40630 +
40631 +       ASSERT (sgmt->State == CM_SGMT_PRESENT);
40632 +       
40633 +       hdr->AckSeq = sgmt->AckSeq;
40634 +   
40635 +       if (!sgmt->MsgAcked)                    /* Current message not acknowledged */
40636 +       {
40637 +           /* must have been something significant to require an ack */
40638 +           ASSERT (sgmt->SendMaps);
40639 +           ASSERT (sgmt->NumMaps > 0);
40640 +           
40641 +           CPRINTF3 (3, "%s: retrying heartbeat to %d (%d entries)\n", cmRail->Rail->Name, sgmt->NodeId, sgmt->NumMaps);
40642 +
40643 +           IncrStat (cmRail, RetryHeartbeat);
40644 +
40645 +           nmaps = sgmt->NumMaps;
40646 +       }
40647 +       else
40648 +       {
40649 +           nmaps = 0;
40650 +      
40651 +           if (sgmt->SendMaps)                 /* can send maps */
40652 +           {
40653 +               for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
40654 +               {
40655 +                   if (!sgmt->Maps[clvl].OutputMapValid)
40656 +                       continue;
40657 +                   
40658 +                   while ((offset = statemap_findchange (sgmt->Maps[clvl].OutputMap, &seg, 1)) >= 0)
40659 +                   {
40660 +                       CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)];
40661 +
40662 +                       sgmt->Maps[clvl].SentChanges = 1;
40663 +                       
40664 +                       map->level  = clvl;
40665 +                       map->offset = offset;
40666 +                       map->seg[0] = seg & 0xffff;
40667 +                       map->seg[1] = (seg >> 16) & 0xffff;
40668 +#if (BT_ULSHIFT == 6)
40669 +                       map->seg[2] = (seg >> 32) & 0xffff;
40670 +                       map->seg[3] = (seg >> 48) & 0xffff;
40671 +#elif (BT_ULSHIFT != 5)
40672 +#error "Bad value for BT_ULSHIFT"
40673 +#endif
40674 +                       if (++nmaps == CM_MSG_MAXMAPS)
40675 +                           goto msg_full;
40676 +                   }
40677 +
40678 +                   if (sgmt->Maps[clvl].SentChanges)
40679 +                   {
40680 +                       CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)];
40681 +
40682 +                       sgmt->Maps[clvl].SentChanges = 0;
40683 +
40684 +                       map->level  = clvl;
40685 +                       map->offset = STATEMAP_NOMORECHANGES;
40686 +                       
40687 +                       if (++nmaps == CM_MSG_MAXMAPS)
40688 +                           goto msg_full;
40689 +                   }
40690 +               }
40691 +           }
40692 +           
40693 +           ASSERT (nmaps < CM_MSG_MAXMAPS);
40694 +
40695 +       msg_full:
40696 +           sgmt->NumMaps = nmaps;              /* remember how many incase we retry */
40697 +
40698 +           if (nmaps == 0)                     /* no changes to send */
40699 +               hdr->Seq = sgmt->MsgSeq;        /* this one can be dropped */
40700 +           else
40701 +           {
40702 +               hdr->Seq = ++(sgmt->MsgSeq);    /* on to next message number */
40703 +               sgmt->MsgAcked = 0;             /* need this one to be acked before I can send another */
40704 +
40705 +               IncrStat (cmRail, MapChangesSent);
40706 +           }
40707 +       }
40708 +
40709 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId), 
40710 +                      EP_SYSTEMQ_POLLED,  CM_P2P_DMA_RETRIES, /* polled receive, lots of retries */
40711 +                      CM_MSG_TYPE_HEARTBEAT, sgmt->Level, nmaps);
40712 +
40713 +       IncrStat (cmRail, HeartbeatsSent);
40714 +
40715 +       return (1);
40716 +    }
40717 +
40718 +    default:                                   /* other types must use SendMessage */
40719 +       printk ("SendToSgmt: invalid type %d\n", type);
40720 +       ASSERT (0);
40721 +
40722 +       return (1);
40723 +    }
40724 +}
40725 +
40726 +static char *
40727 +GlobalStatusString (statemap_t *map, int idx)
40728 +{
40729 +    char *strings[] = {"....", "S...", "C...", "R...", 
40730 +                      ".s..", "Ss..", "Cs..", "Rs..", 
40731 +                      "..r.", "S.r.", "C.r.", "R.r.", 
40732 +                      ".sr.", "Ssr.", "Csr.", "Rsr.", 
40733 +                      "...R", "S..R", "C..R", "R..R", 
40734 +                      ".s.R", "Ss.R", "Cs.R", "Rs.R", 
40735 +                      "..rR", "S.rR", "C.rR", "R.rR", 
40736 +                      ".srR", "SsrR", "CsrR", "RsrR"};
40737 +    
40738 +    return (strings[statemap_getbits (map, idx * CM_GSTATUS_BITS, CM_GSTATUS_BITS)]);
40739 +}
40740 +
40741 +static char *
40742 +MapString (char *name, statemap_t *map, int nnodes, char *trailer)
40743 +{
40744 +    static char *space;
40745 +    int          i;
40746 +
40747 +    if (space == NULL)
40748 +       KMEM_ALLOC (space, char *, EP_MAX_NODES*(CM_GSTATUS_BITS+1), 0);
40749 +
40750 +    if (space == NULL)
40751 +       return ("<cannot allocate memory>");
40752 +    else
40753 +    {
40754 +       char *ptr = space;
40755 +
40756 +       sprintf (space, "%s ", name); ptr += strlen (ptr);
40757 +       for (i = 0; i < nnodes; i++, ptr += strlen (ptr))
40758 +           sprintf (ptr, "%s%s", i == 0 ? "" : ",", GlobalStatusString (map, i));
40759 +       sprintf (ptr, " %s", trailer);
40760 +       return (space);
40761 +    }
40762 +}
40763 +
40764 +void
40765 +DisplayMap (DisplayInfo *di, CM_RAIL *cmRail, char *name, statemap_t *map, int nnodes, char *trailer)
40766 +{
40767 +    char  linebuf[256];
40768 +    char *ptr = linebuf;
40769 +    int   i;
40770 +
40771 +#define NODES_PER_LINE 32
40772 +    for (i = 0; i < nnodes; i++)
40773 +    {
40774 +       if (ptr == linebuf)
40775 +       {
40776 +           sprintf (ptr, "%4d", i);
40777 +           ptr += strlen (ptr);
40778 +       }
40779 +       
40780 +       sprintf (ptr, ",%s", GlobalStatusString (map, i));
40781 +       ptr += strlen (ptr);
40782 +
40783 +       if ((i % NODES_PER_LINE) == (NODES_PER_LINE-1) || (i == (nnodes-1)))
40784 +       {
40785 +           (di->func)(di->arg, "%s: %s %s %s\n", cmRail->Rail->Name, name, linebuf, trailer);
40786 +           ptr = linebuf;
40787 +       }
40788 +    }
40789 +#undef NODES_PER_LINE
40790 +}
40791 +
40792 +void
40793 +DisplayNodeMaps (DisplayInfo *di, CM_RAIL *cmRail)
40794 +{
40795 +    int   lvl;
40796 +    int   clvl;
40797 +    char  mapname[128];
40798 +    
40799 +    (di->func)(di->arg, "%s: Node %d maps...\n", cmRail->Rail->Name, cmRail->Rail->Position.pos_nodeid);
40800 +
40801 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
40802 +    {
40803 +       int nnodes = cmRail->Levels[clvl].NumNodes;
40804 +
40805 +       (di->func)(di->arg, "%s: Cluster level %d: Connected %ld - %s%s\n", 
40806 +                  cmRail->Rail->Name, clvl, cmRail->Levels[clvl].Connected,
40807 +                  cmRail->Levels[clvl].Online     ? "Online" : "Offline",
40808 +                  cmRail->Levels[clvl].Restarting ? ", Restarting" : "");
40809 +
40810 +       for (lvl = 0; lvl < cmRail->TopLevel && lvl <= clvl; lvl++)
40811 +       {
40812 +           CM_LEVEL *level = &cmRail->Levels[lvl];
40813 +
40814 +           sprintf (mapname, "%10s%2d", "Level", lvl);
40815 +           DisplayMap (di, cmRail, mapname, level->SubordinateMap[clvl], nnodes,
40816 +                       level->SubordinateMapValid[clvl] ? "" : "(invalid)");
40817 +       }
40818 +
40819 +       sprintf (mapname, "%12s", "Local");
40820 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LocalMap, nnodes, "");
40821 +
40822 +       sprintf (mapname, "%12s", "Subtree");
40823 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].SubTreeMap, nnodes, 
40824 +                   cmRail->Levels[clvl].SubTreeMapValid ? "" : "(invalid)");
40825 +
40826 +       sprintf (mapname, "%12s", "Global");
40827 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].GlobalMap, nnodes, 
40828 +                   cmRail->Levels[clvl].GlobalMapValid ? "" : "(invalid)");
40829 +
40830 +       sprintf (mapname, "%12s", "LastGlobal");
40831 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LastGlobalMap, nnodes, "");
40832 +    }
40833 +}
40834 +
40835 +void
40836 +DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail)
40837 +{
40838 +    int   lvl;
40839 +    int   sidx;
40840 +    
40841 +    (di->func)(di->arg, "%s: Node %d segments...\n", cmRail->Rail->Name, cmRail->NodeId);
40842 +    
40843 +    for (lvl = 0; lvl <= cmRail->TopLevel && lvl < cmRail->NumLevels; lvl++)
40844 +    {
40845 +       (di->func)(di->arg, "   level %d: ", lvl);
40846 +       
40847 +       for (sidx = 0; sidx < ((lvl == cmRail->TopLevel) ? 1 : cmRail->Levels[lvl].NumSegs); sidx++)
40848 +       {
40849 +           CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx];
40850 +               
40851 +           if (sgmt->State == CM_SGMT_PRESENT)
40852 +               (di->func)(di->arg, "[%d, in: %d out: %d %s%s]", 
40853 +                       sgmt->NodeId,
40854 +                       sgmt->AckSeq,
40855 +                       sgmt->MsgSeq,
40856 +                       sgmt->MsgAcked ? "A" : "-",
40857 +                       sgmt->SendMaps ? "!" : "-");
40858 +           else
40859 +               (di->func)(di->arg, "[%s]", (sgmt->State == CM_SGMT_ABSENT ? "absent" :
40860 +                                sgmt->State == CM_SGMT_WAITING ? "waiting" :
40861 +                                sgmt->State == CM_SGMT_COMING ? "coming" : "UNKNOWN"));
40862 +       }
40863 +       (di->func)(di->arg, "\n");
40864 +    }
40865 +}
40866 +
40867 +
40868 +static void
40869 +StartConnecting (CM_RAIL *cmRail, CM_SGMT *sgmt, int NodeId, int Timestamp)
40870 +{
40871 +    int clvl;
40872 +
40873 +    CPRINTF4 (2, "%s: lvl %d subtree %d node %d -> connecting\n", cmRail->Rail->Name, sgmt->Level, sgmt->Sgmt, NodeId);
40874 +
40875 +    /* Only reconnect the same guy if he was reborn */
40876 +    ASSERT (sgmt->State != CM_SGMT_PRESENT ||
40877 +           (sgmt->NodeId == NodeId && sgmt->Timestamp != Timestamp));
40878 +   
40879 +    /* After we've connected to a new peer, we wait to receive
40880 +     * STATEMAP_RESET before we accumulate changes and we wait for a
40881 +     * complete map to be received before we propagate changes to other
40882 +     * nodes.
40883 +     *
40884 +     * If I'm the subordinate, I can start sending maps right away, since
40885 +     * the leader is ready for them already.  If I'm the leader, I hold off
40886 +     * sending maps until I've seen the subordinate's first heartbeat,
40887 +     * because the subordinate might miss my NOTIFY message, still think
40888 +     * she's a leader candidate and ignore my heartbeats.
40889 +     */
40890 +    sgmt->SendMaps = (sgmt->Level == cmRail->TopLevel); /* I can send maps to my leader (she NOTIFIED me) */
40891 +
40892 +    for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
40893 +    {
40894 +       statemap_reset (sgmt->Maps[clvl].CurrentInputMap);
40895 +       statemap_reset (sgmt->Maps[clvl].InputMap);
40896 +       statemap_reset (sgmt->Maps[clvl].OutputMap);
40897 +       
40898 +       sgmt->Maps[clvl].InputMapValid = 0;
40899 +       sgmt->Maps[clvl].OutputMapValid = 0;
40900 +       sgmt->Maps[clvl].SentChanges = 0;
40901 +
40902 +       if (sgmt->Level == cmRail->TopLevel)    /* connection to leader */
40903 +       {
40904 +           ASSERT (sgmt->Sgmt == 0);
40905 +           ASSERT (cmRail->Role == CM_ROLE_SUBORDINATE);
40906 +
40907 +           if (cmRail->Levels[clvl].SubTreeMapValid) /* already got a subtree map to send up */
40908 +           {
40909 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap);
40910 +               sgmt->Maps[clvl].OutputMapValid = 1;
40911 +
40912 +               statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
40913 +           }
40914 +       }
40915 +       else                                    /* connection to subordinate */
40916 +       {
40917 +           ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt);
40918 +
40919 +           if (cmRail->Levels[clvl].GlobalMapValid) /* already got a global map to broadcast */
40920 +           {
40921 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap);
40922 +               sgmt->Maps[clvl].OutputMapValid = 1;
40923 +           }
40924 +       }
40925 +    }
40926 +    
40927 +    /* Initialise sequence counters */
40928 +    sgmt->MsgSeq = sgmt->AckSeq = 0;
40929 +    sgmt->MsgAcked = 1;                        /* ready to send a new sequenced message */
40930 +   
40931 +    sgmt->State      = CM_SGMT_PRESENT;
40932 +    sgmt->NodeId     = NodeId;
40933 +    sgmt->UpdateTick = lbolt;
40934 +    sgmt->Timestamp  = Timestamp;
40935 +}
40936 +
40937 +static void
40938 +StartSubTreeDiscovery (CM_RAIL *cmRail, CM_SGMT *sgmt)
40939 +{
40940 +    sgmt->State = CM_SGMT_WAITING;
40941 +    sgmt->UpdateTick = lbolt;
40942 +    sgmt->WaitingTick = lbolt;
40943 +
40944 +    if (sgmt->Level > 0)
40945 +       __Schedule_Discovery (cmRail);
40946 +}
40947 +
40948 +void
40949 +StartSubordinateDiscovery (CM_RAIL *cmRail)
40950 +{
40951 +    int       i;
40952 +    int       lvl = cmRail->TopLevel - 1;
40953 +    CM_LEVEL *level = &cmRail->Levels[lvl];
40954 +
40955 +    ASSERT (lvl >= 0 && lvl < cmRail->NumLevels);
40956 +
40957 +    for (i = 0; i < level->NumSegs; i++)
40958 +    {
40959 +        CM_SGMT *sgmt = &level->Sgmts[i];
40960 +       
40961 +       if (i != level->MySgmt)         /* No-one should connect here */
40962 +           StartSubTreeDiscovery (cmRail, sgmt);
40963 +    }
40964 +}
40965 +
40966 +void
40967 +StartLeaderDiscovery (CM_RAIL *cmRail)
40968 +{
40969 +    int       i;
40970 +    int       clvl;
40971 +    CM_LEVEL *level = &cmRail->Levels[cmRail->TopLevel];
40972 +
40973 +    ASSERT (cmRail->TopLevel < cmRail->NumLevels);
40974 +
40975 +    for (clvl = cmRail->TopLevel; clvl < cmRail->NumLevels; clvl++)
40976 +    {
40977 +        cmRail->Levels[clvl].GlobalMapValid = 0;
40978 +       cmRail->Levels[clvl].SubTreeMapValid = 0;
40979 +        level->SubordinateMapValid[clvl] = 0;
40980 +    }
40981 +
40982 +    for (i = 0; i < level->NumSegs; i++)
40983 +    {
40984 +        CM_SGMT *sgmt = &level->Sgmts[i];
40985 +       
40986 +       sgmt->State = CM_SGMT_ABSENT;
40987 +    }
40988 +
40989 +    cmRail->DiscoverStartTick = lbolt;
40990 +    cmRail->Role = CM_ROLE_LEADER_CANDIDATE;
40991 +   
40992 +    __Schedule_Discovery (cmRail);
40993 +}
40994 +
40995 +static void
40996 +RaiseTopLevel (CM_RAIL *cmRail)
40997 +{
40998 +    ASSERT (cmRail->NumLevels != 0);
40999 +    ASSERT (cmRail->TopLevel < cmRail->NumLevels);
41000 +
41001 +    CPRINTF2 (2, "%s: RaiseTopLevel %d\n", cmRail->Rail->Name, cmRail->TopLevel + 1);
41002 +
41003 +    if (++cmRail->TopLevel == cmRail->NumLevels)       /* whole machine leader? */
41004 +       cmRail->Role = CM_ROLE_LEADER;
41005 +    else
41006 +       StartLeaderDiscovery (cmRail);          /* look for my leader */
41007 +
41008 +    StartSubordinateDiscovery (cmRail);                /* and any direct subordinates */
41009 +}
41010 +
41011 +static void
41012 +LowerTopLevel (CM_RAIL *cmRail, int lvl)
41013 +{
41014 +    ASSERT (cmRail->NumLevels != 0);
41015 +    ASSERT (lvl < cmRail->NumLevels);
41016 +
41017 +    CPRINTF2 (2, "%s: LowerTopLevel %d\n", cmRail->Rail->Name, lvl);
41018 +
41019 +    if (lvl == 0)
41020 +       cmRail->Timestamp = lbolt;
41021 +
41022 +    cmRail->TopLevel = lvl;
41023 +
41024 +    StartLeaderDiscovery (cmRail);             /* look for my leader */
41025 +}
41026 +
41027 +static int
41028 +IShouldLead (CM_RAIL *cmRail, CM_MSG *msg)
41029 +{
41030 +    /* NB, this function MUST be consistently calculated on any nodes, just
41031 +     * from the info supplied in the message.  Otherwise leadership
41032 +     * arbitration during concurrent discovery will fail.
41033 +     */
41034 +    return (cmRail->NodeId < msg->Hdr.NodeId);
41035 +}
41036 +
41037 +static int
41038 +SumCheck (CM_MSG *msg)
41039 +{
41040 +    CM_HDR   *hdr   = &msg->Hdr;
41041 +    uint16_t  sum   = hdr->Checksum;
41042 +    uint16_t  nmaps = hdr->NumMaps;
41043 +
41044 +    if (nmaps > CM_MSG_MAXMAPS) {
41045 +       printk ("SumCheck: nmaps %d > CM_MSG_MAXMAPS\n", nmaps);
41046 +       return 0;
41047 +    }
41048 +    
41049 +    if ((hdr->Type != CM_MSG_TYPE_HEARTBEAT) && nmaps != 0) {
41050 +       printk ("SumCheck: type(%d) not HEARTBEAT and nmaps(%d) != 0\n", hdr->Type, nmaps);
41051 +       return 0;
41052 +    }
41053 +
41054 +    hdr->Checksum = 0;
41055 +    
41056 +    if (CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)) != sum) {
41057 +       printk ("SumCheck: checksum failed %x %x\n", CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)), sum);
41058 +
41059 +       return 0;
41060 +    }
41061 +       
41062 +    return 1;
41063 +}
41064 +
41065 +static void
41066 +ProcessMessage (EP_RAIL *rail, void *arg, void *msgbuf)
41067 +{
41068 +    CM_RAIL       *cmRail = (CM_RAIL *) arg;
41069 +    CM_MSG         *msg    = (CM_MSG *) msgbuf;
41070 +    CM_HDR         *hdr    = &msg->Hdr;
41071 +    int             lvl;
41072 +    int             sidx;
41073 +    CM_LEVEL       *level;
41074 +    CM_SGMT        *sgmt;
41075 +    bitmap_t        seg;
41076 +    int             i;
41077 +    int            delay;
41078 +    static long    tlast;
41079 +    static int     count;
41080 +
41081 +    /* Poll the message Version field until the message has completely
41082 +     * arrived in main memory. */
41083 +    for (delay = 1; hdr->Version == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
41084 +       DELAY (delay);
41085 +
41086 +    /* Display a message every 60 seconds if we see an "old" format message */
41087 +    if (hdr->Version == EP_SYSTEMQ_UNRECEIVED && (((lbolt - tlast) > 60*HZ) ? (count = 0) : ++count) < 1)
41088 +    {
41089 +       printk ("%s: received old protocol message (type %d from node %d)\n", cmRail->Rail->Name, 
41090 +               ((uint8_t *) msg)[20], ((uint16_t *) msg)[4]);
41091 +
41092 +       tlast = lbolt;
41093 +       goto finished;
41094 +    }
41095 +
41096 +    if (hdr->Version != CM_MSG_VERSION || hdr->ParamHash != cmRail->ParamHash || hdr->MachineId != MachineId)
41097 +    {
41098 +       CPRINTF8 (1, "%s: invalid message : Version %08x (%08x) ParamHash %08x (%08x) MachineId %04x (%04x) Nodeid %d\n", cmRail->Rail->Name,
41099 +                 hdr->Version, CM_MSG_VERSION, hdr->ParamHash, cmRail->ParamHash, hdr->MachineId, MachineId, hdr->NodeId);
41100 +       goto finished;
41101 +    }
41102 +
41103 +    if (!SumCheck (msg))
41104 +    {
41105 +       printk ("%s: checksum failed on msg from %d?\n", cmRail->Rail->Name, hdr->NodeId);
41106 +       goto finished;
41107 +    }
41108 +    
41109 +    if (hdr->NodeId == cmRail->NodeId)         /* ignore my own broadcast */       
41110 +    {
41111 +       CPRINTF3 (6, "%s: node %d type %d: ignored (MESSAGE FROM ME)\n", 
41112 +                 cmRail->Rail->Name, hdr->NodeId, hdr->Type);
41113 +
41114 +       if (hdr->Type != CM_MSG_TYPE_DISCOVER_LEADER && hdr->Type != CM_MSG_TYPE_RESOLVE_LEADER)
41115 +           printk ("%s: node %d type %d: ignored (MESSAGE FROM ME)\n", 
41116 +                   cmRail->Rail->Name, hdr->NodeId, hdr->Type);
41117 +       goto finished;
41118 +    }
41119 +
41120 +    lvl = hdr->Level;
41121 +    level = &cmRail->Levels[lvl];
41122 +
41123 +    if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf)))                       /* Simulate broken network? */
41124 +       goto finished;
41125 +    
41126 +    if (lvl >= cmRail->NumLevels ||            /* from outer space  */
41127 +       hdr->NodeId < level->MinNodeId ||       /* from outside this level's subtree */
41128 +       hdr->NodeId >= level->MinNodeId + level->NumNodes)
41129 +    {
41130 +       printk ("%s: lvl %d node %d type %d: ignored (%s)\n", 
41131 +               cmRail->Rail->Name, lvl, hdr->NodeId, hdr->Type, 
41132 +               lvl >= cmRail->NumLevels ? "level too big for machine" : "outside subtree");
41133 +       goto finished;
41134 +    }
41135 +
41136 +    sidx = SegmentNo (cmRail, hdr->NodeId, lvl);
41137 +    sgmt = &level->Sgmts[sidx];
41138 +    
41139 +    switch (hdr->Type)
41140 +    {
41141 +    case CM_MSG_TYPE_RESOLVE_LEADER:
41142 +       if (lvl >= cmRail->TopLevel)
41143 +       {
41144 +           CPRINTF4 (6, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: ignored (above my level)\n", 
41145 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41146 +           break;
41147 +       }
41148 +
41149 +       /* someone else thinks they lead at the same level as me */
41150 +       CPRINTF4 (1, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", 
41151 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41152 +       
41153 +       printk ("%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", 
41154 +               cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41155 +       
41156 +
41157 +       SendMessage (cmRail, hdr->NodeId, lvl, CM_MSG_TYPE_REJOIN);
41158 +       break;
41159 +       
41160 +    case CM_MSG_TYPE_DISCOVER_LEADER:
41161 +       if (lvl > cmRail->TopLevel)
41162 +       {
41163 +           CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (above my level)\n", 
41164 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41165 +           break;
41166 +       }
41167 +
41168 +       if (sidx == level->MySgmt)              /* someone I led thinks they lead some of my subtrees */
41169 +       {
41170 +           CPRINTF4 (1, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", 
41171 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41172 +
41173 +           printk ("%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", 
41174 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41175 +
41176 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
41177 +           break;
41178 +       }       
41179 +
41180 +       if (lvl < cmRail->TopLevel)                     /* I'm the leader of this level */
41181 +       {
41182 +           if (sgmt->State == CM_SGMT_PRESENT &&       /* someone thinks someone I lead is dead */
41183 +               sgmt->NodeId != hdr->NodeId)
41184 +           {
41185 +               /* My subordinate's death could be noticed by one of her peers
41186 +                * before I do.  If she _is_ dead, I'll notice before long and
41187 +                * NOTIFY this discover.  If this discover completes before I
41188 +                * detect my subordinate's death, the discovering node will
41189 +                * try to take over from me, and then I'll RESET her.
41190 +                */
41191 +               CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (got established subordinate)\n", 
41192 +                         cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41193 +               return;
41194 +           }
41195 +
41196 +           if (sgmt->State != CM_SGMT_PRESENT || /* New connection */
41197 +               sgmt->Timestamp != hdr->Timestamp) /* new incarnation */
41198 +               StartConnecting (cmRail, sgmt, hdr->NodeId, hdr->Timestamp);
41199 +         
41200 +           CPRINTF4 (2, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !NOTIFY)\n", 
41201 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41202 +         
41203 +           SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_NOTIFY);
41204 +           break;
41205 +       }
41206 +
41207 +       ASSERT (lvl == cmRail->TopLevel);
41208 +
41209 +       if (cmRail->Role == CM_ROLE_SUBORDINATE)
41210 +       {
41211 +           /* I think my leader is alive, in which case she'll NOTIFY this
41212 +            * DISCOVER.  If she's dead, I'll start to become a leader
41213 +            * candidate and handle this appropriately.
41214 +            */
41215 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: ignored (I'm a subordinate)\n", 
41216 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
41217 +           break;
41218 +       }
41219 +       
41220 +       ASSERT (cmRail->Role == CM_ROLE_LEADER_CANDIDATE);
41221 +
41222 +       /* A peer at this level is bidding for leadership along with me */
41223 +       if (IShouldLead (cmRail, msg))
41224 +       {
41225 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: but I should lead\n", 
41226 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
41227 +
41228 +           /* So there _is_ someone there; She'll be seeing my DISCOVER
41229 +            * messages and extending her discovery period, so that when I
41230 +            * become leader, I'll NOTIFY her.  In the meantime I'll flag her
41231 +            * activity, so she remains WAITING.
41232 +            */
41233 +           sgmt->UpdateTick = lbolt;
41234 +           break;
41235 +       }
41236 +       
41237 +       /* Defer to sender... */
41238 +       CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: delaying me becoming leader\n", 
41239 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
41240 +       
41241 +       StartLeaderDiscovery (cmRail);
41242 +       break;
41243 +
41244 +    case CM_MSG_TYPE_DISCOVER_SUBORDINATE:
41245 +       if (lvl <= cmRail->TopLevel)
41246 +       {
41247 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (from my subtree)\n", 
41248 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
41249 +           break;
41250 +       }
41251 +       
41252 +       if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE)
41253 +       {
41254 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (I'm not looking for a leader)\n", 
41255 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
41256 +           break;
41257 +       }
41258 +       
41259 +       if (hdr->Level > cmRail->BroadcastLevel && AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT))
41260 +       {
41261 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (broadcast level too low)\n",
41262 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
41263 +           break;
41264 +       }
41265 +
41266 +       CPRINTF3 (2, "%s: lvl %d node %d DISCOVER_SUBORDINATE: !IMCOMING\n", 
41267 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
41268 +       
41269 +       SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_IMCOMING);
41270 +       break;
41271 +
41272 +    case CM_MSG_TYPE_IMCOMING:
41273 +       if (lvl > cmRail->TopLevel ||           /* from peer or node above me */
41274 +           sgmt->State == CM_SGMT_PRESENT ||   /* already got a subtree */
41275 +           sgmt->State == CM_SGMT_ABSENT)      /* already written off this subtree */
41276 +       {
41277 +           CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: ignored\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41278 +           break;
41279 +       }
41280 +
41281 +       CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: waiting...\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41282 +
41283 +       sgmt->State = CM_SGMT_COMING;
41284 +       sgmt->UpdateTick = lbolt;
41285 +       break;
41286 +       
41287 +    case CM_MSG_TYPE_NOTIFY:
41288 +       if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE || /* I'm not looking for a leader */
41289 +           lvl != cmRail->TopLevel)            /* at this level */
41290 +       {
41291 +           /* If this person really should be my leader, my existing leader
41292 +            * will time out, and I'll discover this one. */
41293 +           CPRINTF4 (2, "%s: lvl %d node %d NOTIFY: ignored (%s)\n", 
41294 +                     cmRail->Rail->Name, lvl, hdr->NodeId,
41295 +                     lvl < cmRail->TopLevel ? "already leader" : 
41296 +                     lvl > cmRail->TopLevel ? "lvl too high" : "already subordinate");
41297 +           break;
41298 +       }
41299 +
41300 +       CPRINTF3 (2, "%s: lvl %d node %d NOTIFY: becoming subordinate\n", 
41301 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
41302 +       
41303 +       cmRail->Role = CM_ROLE_SUBORDINATE;             /* Now I've found my level */
41304 +       StartConnecting (cmRail, &level->Sgmts[0], hdr->NodeId, hdr->Timestamp);
41305 +       break;
41306 +
41307 +    case CM_MSG_TYPE_HEARTBEAT:
41308 +       if (lvl > cmRail->TopLevel)
41309 +       {
41310 +           CPRINTF3 (2, "%s: lvl %d node %d H/BEAT: ignored (lvl too high)\n", 
41311 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
41312 +           break;
41313 +       }
41314 +
41315 +       if (lvl == cmRail->TopLevel)                    /* heartbeat from my leader */
41316 +       {
41317 +           if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE) /* but I've not got one */
41318 +           {
41319 +               /* I'm probably a new incarnation of myself; I'll keep doing
41320 +                * discovery until my previous existence's leader NOTIFY's me.
41321 +                * If I was this node's leader, she'll time me out (I'm not
41322 +                * sending heartbeats to her) and we'll fight it out for
41323 +                * leadership. */
41324 +               CPRINTF3 (2, "%s: lvl %d node %d H/BEAT ignored (no leader)\n", 
41325 +                         cmRail->Rail->Name, lvl, hdr->NodeId);
41326 +               break;
41327 +           }
41328 +           sidx = 0;
41329 +           sgmt = &level->Sgmts[0];
41330 +       }
41331 +      
41332 +       if (sgmt->State != CM_SGMT_PRESENT ||   /* not fully connected with this guy */
41333 +           sgmt->NodeId != hdr->NodeId ||      /* someone else impersonating my peer */
41334 +           sgmt->Timestamp != hdr->Timestamp)  /* new incarnation of my peer */
41335 +       {
41336 +           CPRINTF4 (1, "%s: lvl %d sidx %d node %d H/BEAT: !REJOIN\n", 
41337 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
41338 +         
41339 +           printk ("%s: lvl %d sidx %d node %d H/BEAT: !REJOIN %s\n",
41340 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId,
41341 +                   sgmt->State != CM_SGMT_PRESENT ? "not present" :
41342 +                   sgmt->NodeId != hdr->NodeId ? "someone else" : "new incarnation");
41343 +           
41344 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
41345 +           break;
41346 +       }
41347 +
41348 +       if (!((hdr->Seq == sgmt->AckSeq) ||     /* NOT duplicate message or */
41349 +             (hdr->Seq == (CM_SEQ)(sgmt->AckSeq + 1))) || /* expected message */
41350 +           !((hdr->AckSeq == sgmt->MsgSeq) ||  /* NOT expected ack or */
41351 +             (hdr->AckSeq == (CM_SEQ)(sgmt->MsgSeq - 1)))) /* duplicate ack */
41352 +       {
41353 +           CPRINTF9 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", 
41354 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, 
41355 +                     (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq);
41356 +        
41357 +           printk ("%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", 
41358 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, 
41359 +                   (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq);
41360 +        
41361 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
41362 +           break;
41363 +       }
41364 +
41365 +       IncrStat (cmRail, HeartbeatsRcvd);
41366 +
41367 +       sgmt->UpdateTick = lbolt;
41368 +       sgmt->SendMaps = 1;
41369 +
41370 +       if (sgmt->MsgSeq == hdr->AckSeq)                /* acking current message */
41371 +           sgmt->MsgAcked = 1;                 /* can send the next one */
41372 +
41373 +       if (hdr->Seq == sgmt->AckSeq)           /* discard duplicate (or NULL heartbeat) */
41374 +       {
41375 +           CPRINTF6 (6, "%s: lvl %d sidx %d node %d type %d: %s H/BEAT\n", 
41376 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type,
41377 +                     hdr->NumMaps == 0 ? "null" : "duplicate");
41378 +           break;
41379 +       }
41380 +
41381 +       CPRINTF7 (6, "%s: lvl %d sidx %d node %d type %d: seq %d maps %d H/BEAT\n", 
41382 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, hdr->Seq, hdr->NumMaps);
41383 +
41384 +       sgmt->AckSeq = hdr->Seq;                        /* ready to receive next one */
41385 +       
41386 +       for (i = 0; i < hdr->NumMaps; i++)
41387 +       {
41388 +           CM_STATEMAP_ENTRY *map  = &msg->Payload.Statemaps[CM_MSG_MAP(i)];
41389 +           int                clvl = map->level;
41390 +           
41391 +           if (clvl < 0)                       /* end of message */
41392 +               break;
41393 +
41394 +           if (clvl < sgmt->Level)             /* bad level */
41395 +           {
41396 +               CPRINTF6 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (bad clevel %d)\n", 
41397 +                         cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, clvl);
41398 +
41399 +               SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
41400 +               goto finished;
41401 +           }
41402 +
41403 +           if (map->offset == STATEMAP_NOMORECHANGES) /* end of atomic changes */
41404 +           {
41405 +               if (!sgmt->Maps[clvl].InputMapValid || /* not set InputMap yet */
41406 +                   statemap_changed (sgmt->Maps[clvl].CurrentInputMap)) /* previously applied changes */
41407 +               {
41408 +                   CPRINTF3 (4, "%s: received new clvl %d map from %d\n", cmRail->Rail->Name, clvl, sgmt->NodeId);
41409 +
41410 +                   statemap_setmap (sgmt->Maps[clvl].InputMap, sgmt->Maps[clvl].CurrentInputMap);
41411 +                   sgmt->Maps[clvl].InputMapValid = 1;
41412 +
41413 +                   statemap_clearchanges (sgmt->Maps[clvl].CurrentInputMap);
41414 +               }
41415 +               continue;
41416 +           }
41417 +           
41418 +           seg = ((bitmap_t)map->seg[0])
41419 +               | (((bitmap_t)map->seg[1]) << 16)
41420 +#if (BT_ULSHIFT == 6)
41421 +               | (((bitmap_t)map->seg[2]) << 32)
41422 +               | (((bitmap_t)map->seg[3]) << 48)
41423 +#elif (BT_ULSHIFT != 5)
41424 +#error "Bad value for BT_ULSHIFT"
41425 +#endif
41426 +               ;
41427 +           statemap_setseg (sgmt->Maps[clvl].CurrentInputMap, map->offset, seg);
41428 +       }
41429 +       break;
41430 +
41431 +    case CM_MSG_TYPE_REJOIN:
41432 +       CPRINTF5 (1, "%s: lvl %d sidx %d node %d type %d: REJOIN\n",
41433 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type);
41434 +       printk ("%s: lvl %d sidx %d node %d type %d: REJOIN\n", 
41435 +               cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type);
41436 +
41437 +       LowerTopLevel (cmRail, 0);
41438 +
41439 +       IncrStat (cmRail, RejoinRequest);
41440 +       break;
41441 +
41442 +    default:
41443 +       printk ("%s: lvl=%d unknown message type %d\n", cmRail->Rail->Name, lvl, hdr->Type);
41444 +       break;
41445 +    }
41446 + finished:
41447 +    hdr->Version = EP_SYSTEMQ_UNRECEIVED;
41448 +}
41449 +
41450 +static void
41451 +PollInputQueues (CM_RAIL *cmRail)
41452 +{
41453 +    ep_poll_inputq (cmRail->Rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail);
41454 +    ep_poll_inputq (cmRail->Rail, cmRail->PolledQueue, 0, ProcessMessage, cmRail);
41455 +}
41456 +
41457 +static void
41458 +IntrQueueCallback (EP_RAIL *rail, void *arg)
41459 +{
41460 +    CM_RAIL *cmRail = (CM_RAIL *) arg;
41461 +    unsigned long flags;
41462 +
41463 +    /* If the lock is held, then don't bother spinning for it, 
41464 +     * since the messages will be received at this, or the
41465 +     * next heartbeat */
41466 +    local_irq_save (flags);
41467 +    if (spin_trylock (&cmRail->Lock))
41468 +    {
41469 +       if (AFTER (lbolt, cmRail->NextRunTime + MSEC2TICKS(CM_TIMER_SCHEDULE_TIMEOUT)))
41470 +           printk ("%s: heartbeat timer stuck - scheduled\n", cmRail->Rail->Name);
41471 +       else
41472 +           ep_poll_inputq (rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail);
41473 +       spin_unlock (&cmRail->Lock);
41474 +    }
41475 +    local_irq_restore (flags);
41476 +}
41477 +
41478 +char *
41479 +sprintClPeers (char *str, CM_RAIL *cmRail, int clvl)
41480 +{
41481 +   int clLo     = cmRail->Levels[clvl].MinNodeId;
41482 +   int clHi     = clLo + cmRail->Levels[clvl].NumNodes - 1;
41483 +   int subClLo  = (clvl == 0) ? cmRail->NodeId : cmRail->Levels[clvl - 1].MinNodeId;
41484 +   int subClHi  = subClLo + ((clvl == 0) ? 0 : cmRail->Levels[clvl - 1].NumNodes - 1);
41485 +   
41486 +   if (subClHi == clHi)
41487 +      sprintf (str, "[%d-%d]", clLo, subClLo - 1);
41488 +   else if (subClLo == clLo)
41489 +      sprintf (str, "[%d-%d]", subClHi + 1, clHi);
41490 +   else
41491 +      sprintf (str, "[%d-%d][%d-%d]", clLo, subClLo - 1, subClHi + 1, clHi);
41492 +
41493 +   return (str);
41494 +}
41495 +
41496 +static void
41497 +RestartComms (CM_RAIL *cmRail, int clvl)
41498 +{
41499 +    int             base;
41500 +    int             nodeId;
41501 +    int             lstat;
41502 +    int             numClNodes;
41503 +    int             subClMin;
41504 +    int             subClMax;
41505 +    int             myClId;
41506 +    int             thisClId;
41507 +    
41508 +    myClId     = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
41509 +    base       = myClId * CM_GSTATUS_BITS;
41510 +    numClNodes = cmRail->Levels[clvl].NumNodes;
41511 +
41512 +    statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
41513 +                     CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
41514 +    cmRail->Levels[clvl].Restarting = 1;
41515 +
41516 +    if (cmRail->Levels[clvl].Online)
41517 +    {
41518 +       cmRail->Levels[clvl].Online = 0;
41519 +       
41520 +       for (thisClId = 0; thisClId < numClNodes; thisClId++)
41521 +       {
41522 +           if (thisClId == subClMin)   /* skip sub-cluster; it's just someone in this cluster */
41523 +           {                           /* that wants me to restart */
41524 +               thisClId = subClMax;
41525 +               continue;
41526 +           }
41527 +           
41528 +           nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
41529 +           base   = thisClId * CM_GSTATUS_BITS;
41530 +           lstat  = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
41531 +           
41532 +           if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN)
41533 +           {
41534 +               switch (ep_disconnect_node (cmRail->Rail, nodeId))
41535 +               {
41536 +               case EP_NODE_CONNECTING:
41537 +                   /* gstat must == RUNNING */
41538 +                   cmRail->Levels[clvl].Connected--;
41539 +                   break;
41540 +               case EP_NODE_DISCONNECTED:
41541 +                   /* CLOSING || STARTING || (lstat & RESTART) */
41542 +                   break;
41543 +               }
41544 +           }
41545 +       }
41546 +    }
41547 +}
41548 +
41549 +static void
41550 +UpdateGlobalStatus (CM_RAIL *cmRail)
41551 +{
41552 +    char            clNodeStr[32];                             /* [%d-%d][%d-%d] */
41553 +    int             nodeId;
41554 +    int             offset;
41555 +    int             base;
41556 +    bitmap_t        gstat;
41557 +    bitmap_t        lgstat;
41558 +    bitmap_t        lstat;
41559 +    int             clvl;
41560 +    int             numClNodes;
41561 +    int             subClMin;
41562 +    int             subClMax;
41563 +    int             myClId;
41564 +    int             thisClId;
41565 +    int             lastClId;
41566 +
41567 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
41568 +    {
41569 +       if (!cmRail->Levels[clvl].GlobalMapValid || /* not got the global map yet */
41570 +           !statemap_changed (cmRail->Levels[clvl].GlobalMap)) /* no changes to respond to */
41571 +       {
41572 +           CPRINTF2 (6, "%s: Got invalid or unchanged clvl %d global map\n", cmRail->Rail->Name, clvl);
41573 +           continue;
41574 +       }
41575 +       
41576 +       CPRINTF2 (5, "%s: Got valid changed clvl %d global map\n", cmRail->Rail->Name, clvl);
41577 +       
41578 +       lastClId = -1;
41579 +       myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
41580 +       numClNodes = cmRail->Levels[clvl].NumNodes;
41581 +       
41582 +       while ((offset = statemap_findchange (cmRail->Levels[clvl].GlobalMap, &gstat, 1)) >= 0)
41583 +       {
41584 +           /*
41585 +            * Check every node that this segment covers - however
41586 +            * if the last node we checked in the previous segmemt
41587 +            * is also the first node in this segment, then skip
41588 +            * it.
41589 +            */
41590 +           if ((thisClId = (offset/CM_GSTATUS_BITS)) == lastClId)
41591 +               thisClId++;
41592 +           lastClId = (offset + BT_NBIPUL - 1)/CM_GSTATUS_BITS;
41593 +           
41594 +           /* check each node that might have changed */
41595 +           for ( ; thisClId <= lastClId && thisClId < numClNodes; thisClId++)
41596 +           {
41597 +               base = thisClId * CM_GSTATUS_BITS;
41598 +               nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
41599 +
41600 +               if (thisClId >= subClMin && thisClId <= subClMax) /* skip sub-cluster */
41601 +                   continue;
41602 +
41603 +               /* This isn't me; I need to sense what this node is driving
41604 +                * (just the starting and running bits) and respond
41605 +                * appropriately...
41606 +                */
41607 +               lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
41608 +               gstat  = statemap_getbits (cmRail->Levels[clvl].GlobalMap,     base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
41609 +
41610 +               if (lgstat == gstat)            /* no change in peer state */
41611 +                   continue;
41612 +
41613 +               CPRINTF5 (3, "%s: Node %d: lgstat %s, gstat %s, lstat %s\n", cmRail->Rail->Name, nodeId,
41614 +                         GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
41615 +                         GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
41616 +                         GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
41617 +
41618 +               /* What I'm currently driving as my acknowledgement */
41619 +               lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
41620 +
41621 +               switch (gstat)
41622 +               {
41623 +               case CM_GSTATUS_STARTING:
41624 +                   if ((lgstat == CM_GSTATUS_ABSENT || lgstat == CM_GSTATUS_CLOSING) && lstat == CM_GSTATUS_MAY_START)
41625 +                   {
41626 +                       CPRINTF2 (1, "%s: ===================node %d STARTING\n", cmRail->Rail->Name, nodeId);
41627 +                       
41628 +                       ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED);
41629 +
41630 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41631 +                       continue;
41632 +                   }
41633 +                   break;
41634 +                   
41635 +               case CM_GSTATUS_RUNNING:
41636 +                   if ((lgstat == CM_GSTATUS_ABSENT   && lstat == CM_GSTATUS_MAY_START) ||
41637 +                       (lgstat == CM_GSTATUS_STARTING && lstat == CM_GSTATUS_MAY_RUN))
41638 +                   {
41639 +                       CPRINTF3 (1, "%s: ===================node %d%s RUNNING\n", cmRail->Rail->Name, nodeId,
41640 +                                 lgstat == CM_GSTATUS_ABSENT ? " Already" : "");
41641 +
41642 +                       ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED);
41643 +
41644 +                       if (cmRail->Levels[clvl].Online)
41645 +                       {
41646 +                           ep_connect_node (cmRail->Rail, nodeId);
41647 +
41648 +                           cmRail->Levels[clvl].Connected++;
41649 +                       }
41650 +
41651 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41652 +                       continue;
41653 +                   }
41654 +                   break;
41655 +                   
41656 +               case CM_GSTATUS_CLOSING:
41657 +                   CPRINTF4 (1, "%s: ===================node %d CLOSING%s%s\n", cmRail->Rail->Name, nodeId,
41658 +                             (lstat & CM_GSTATUS_RESTART) ? " for Restart" : "",
41659 +                             cmRail->Levels[clvl].Online ? "" : " (offline)");
41660 +
41661 +                   if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN)
41662 +                   {
41663 +                       switch (ep_disconnect_node (cmRail->Rail, nodeId))
41664 +                       {
41665 +                       case EP_NODE_CONNECTING:
41666 +                           cmRail->Levels[clvl].Connected--;
41667 +                           /* DROPTHROUGH */
41668 +                       case EP_NODE_DISCONNECTED:
41669 +                           lstat = CM_GSTATUS_MAY_START;
41670 +                           break;
41671 +                       }
41672 +                   }
41673 +
41674 +                   if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_START) /* clear restart if we've disconnected */
41675 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
41676 +                   continue;
41677 +                   
41678 +               default:
41679 +                   break;
41680 +               }
41681 +
41682 +               /* "unexpected" state change forces me to ask her to restart */
41683 +               if (! (lstat & CM_GSTATUS_RESTART))             /* not requesting restart already */
41684 +               {
41685 +                   CPRINTF5 (1, "%s: ===================node %d %s, old %s new %s\n", cmRail->Rail->Name, nodeId,
41686 +                             (gstat == CM_GSTATUS_ABSENT)  ? "ABSENT" : "REQUEST RESTART", 
41687 +                             GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
41688 +                             GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId));
41689 +
41690 +                   /* request restart */
41691 +                   if (cmRail->Levels[clvl].Online && lstat == CM_GSTATUS_MAY_RUN)
41692 +                   {
41693 +                       switch (ep_disconnect_node (cmRail->Rail, nodeId))
41694 +                       {
41695 +                       case EP_NODE_CONNECTING:
41696 +                           cmRail->Levels[clvl].Connected--;
41697 +                           /* DROPTHROUGH */
41698 +                       case EP_NODE_DISCONNECTED:
41699 +                           lstat = CM_GSTATUS_MAY_START;
41700 +                           break;
41701 +                       }
41702 +                   }
41703 +
41704 +                   statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
41705 +                   continue;
41706 +               }
41707 +
41708 +               continue;
41709 +           }
41710 +       }
41711 +    
41712 +       /* Now check myself - see what everyone else thinks I'm doing */
41713 +       base   = myClId * CM_GSTATUS_BITS;
41714 +       lstat  = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
41715 +       gstat  = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS);
41716 +       lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS);
41717 +
41718 +       if (lgstat == gstat)                    /* my state in this cluster hasn't changed */
41719 +       {
41720 +           CPRINTF3 (6, "%s: my clvl %d global status unchanged from %s\n", cmRail->Rail->Name,
41721 +                     clvl, GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId));
41722 +           goto all_done;
41723 +       }
41724 +
41725 +       if ((gstat & CM_GSTATUS_RESTART) != 0)  /* someone wants me to restart */
41726 +       {
41727 +           if ((lstat & CM_GSTATUS_STATUS_MASK) == CM_GSTATUS_CLOSING) /* I'm already restarting */
41728 +               goto all_done;
41729 +           
41730 +           CPRINTF2 (1, "%s: ===================RESTART REQUEST from %s\n", cmRail->Rail->Name,
41731 +                     sprintClPeers (clNodeStr, cmRail, clvl));
41732 +           
41733 +           printk ("%s: Restart Request from %s\n", cmRail->Rail->Name,
41734 +                   sprintClPeers (clNodeStr, cmRail, clvl));
41735 +           
41736 +           RestartComms (cmRail, clvl);
41737 +           goto all_done;
41738 +       }
41739 +       
41740 +       CPRINTF6 (5, "%s: clvl %d: lgstat %s gstat %s, lstat %s%s\n", cmRail->Rail->Name, clvl,
41741 +                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, myClId),
41742 +                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId),
41743 +                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, myClId),
41744 +                 (gstat != lstat) ? " (IGNORED)" : "");
41745 +                       
41746 +       if (gstat != lstat)                     /* not everyone agrees with me */
41747 +           goto all_done;
41748 +
41749 +       switch (lstat)
41750 +       {
41751 +       default:
41752 +           ASSERT (0);                         /* I never drive this */
41753 +           
41754 +       case CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START: /* I can restart now (have seen restart go away) */
41755 +           ASSERT (!cmRail->Levels[clvl].Online);
41756 +           
41757 +           CPRINTF2 (1,"%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name,
41758 +                     sprintClPeers (clNodeStr, cmRail, clvl));
41759 +           printk ("%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name,
41760 +                   sprintClPeers (clNodeStr, cmRail, clvl));
41761 +           
41762 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
41763 +                             CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41764 +           goto all_done;
41765 +           
41766 +       case CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN:
41767 +           ASSERT (!cmRail->Levels[clvl].Online);
41768 +           
41769 +           CPRINTF2 (1, "%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name,
41770 +                     sprintClPeers (clNodeStr, cmRail, clvl));
41771 +           printk ("%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name,
41772 +                   sprintClPeers (clNodeStr, cmRail, clvl));
41773 +           
41774 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
41775 +                             CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41776 +           goto all_done;
41777 +           
41778 +       case CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN:
41779 +           if (! cmRail->Levels[clvl].Online)
41780 +           {
41781 +               CPRINTF2 (1, "%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name,
41782 +                         sprintClPeers (clNodeStr, cmRail, clvl));
41783 +               printk ("%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name,
41784 +                       sprintClPeers (clNodeStr, cmRail, clvl));
41785 +               
41786 +               cmRail->Levels[clvl].Online = 1;
41787 +               
41788 +               for (thisClId = 0; thisClId < numClNodes; thisClId++)
41789 +               {
41790 +                   if (thisClId == subClMin)   /* skip sub-cluster */
41791 +                   {
41792 +                       thisClId = subClMax;
41793 +                       continue;
41794 +                   }
41795 +                   
41796 +                   nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
41797 +                   
41798 +                   base  = thisClId * CM_GSTATUS_BITS;
41799 +                   lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
41800 +                   gstat = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
41801 +                   
41802 +                   /* Only connect to her if I see her as running and I'm not requesting her 
41803 +                    * to restart - this means that I was offline when I saw her transition
41804 +                    * to running and haven't seen her in a "bad" state since. */
41805 +                   if (gstat == CM_GSTATUS_RUNNING && ! (lstat & CM_GSTATUS_RESTART))
41806 +                   {
41807 +                       CPRINTF5 (1, "%s: node %d lgstat %s gstat %s, lstat %s -> CONNECT\n", cmRail->Rail->Name, nodeId,
41808 +                                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
41809 +                                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
41810 +                                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
41811 +                       
41812 +                       if (lstat == CM_GSTATUS_MAY_START)
41813 +                           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
41814 +
41815 +                       ep_connect_node (cmRail->Rail, nodeId);
41816 +
41817 +                       cmRail->Levels[clvl].Connected++;
41818 +                   }
41819 +               }
41820 +           }
41821 +           goto all_done;
41822 +       }
41823 +
41824 +    all_done:
41825 +       statemap_setmap (cmRail->Levels[clvl].LastGlobalMap, cmRail->Levels[clvl].GlobalMap);
41826 +    }
41827 +}
41828 +
41829 +static void
41830 +ReduceGlobalMap (CM_RAIL *cmRail, int clvl)
41831 +{
41832 +    int       lvl;
41833 +    int       sidx;
41834 +    int       recompute;
41835 +    CM_LEVEL *level;
41836 +    int       cTopLevel;
41837 +    int       cRole;
41838 +
41839 +    if (clvl < cmRail->TopLevel)
41840 +    {
41841 +       cTopLevel = clvl + 1;
41842 +       cRole = CM_ROLE_LEADER;
41843 +    }
41844 +    else
41845 +    {
41846 +       cTopLevel = cmRail->TopLevel;
41847 +       cRole = cmRail->Role;
41848 +    }
41849 +    
41850 +    /* Update cmRail->Levels[*].SubordinateMap[clvl] for all subordinate levels */
41851 +    for (lvl = 0; lvl < cTopLevel; lvl++)
41852 +    {
41853 +       level = &cmRail->Levels[lvl];
41854 +
41855 +       /* We need to recompute this level's statemap if...
41856 +        * . Previous level's statemap has changes to propagate OR
41857 +        * . This level's statemap has not been computed yet OR
41858 +        * . A subordinate at this level has sent me a change.
41859 +        * Note that we can only do this if all subordinates from this
41860 +        * level down are present with valid statemaps, or absent (i.e. not
41861 +        * timing out).
41862 +        */
41863 +
41864 +       ASSERT (lvl == 0 || cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]);
41865 +
41866 +       recompute = !level->SubordinateMapValid[clvl] ||
41867 +                   (lvl > 0 && statemap_changed (cmRail->Levels[lvl - 1].SubordinateMap[clvl]));
41868 +         
41869 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
41870 +       {
41871 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
41872 +
41873 +           if (!(sgmt->State == CM_SGMT_ABSENT || /* absent nodes contribute zeros */
41874 +                 (sgmt->State == CM_SGMT_PRESENT && /* present nodes MUST have received a map to contribute */
41875 +                  sgmt->Maps[clvl].InputMapValid)))
41876 +           {
41877 +               CPRINTF5 (5, "%s: waiting for clvl %d lvl %d seg %d node %d\n", cmRail->Rail->Name,
41878 +                         clvl, lvl, sidx, sgmt->NodeId);
41879 +
41880 +               /* Gotta wait for this guy, so we can't compute this level,
41881 +                * or any higher levels. */
41882 +               return;
41883 +           }
41884 +
41885 +           if (statemap_changed (sgmt->Maps[clvl].InputMap))
41886 +           {
41887 +               ASSERT (sgmt->Maps[clvl].InputMapValid);
41888 +
41889 +               recompute = 1;
41890 +
41891 +               CPRINTF7 (5, "%s: %s clvl %d map from @ %d %d (%d) - %s\n",
41892 +                         cmRail->Rail->Name, sgmt->State == CM_SGMT_ABSENT ? "newly absent" : "got new",
41893 +                         clvl, lvl, sidx, sgmt->NodeId, 
41894 +                         MapString ("Input", sgmt->Maps[clvl].InputMap, cmRail->Levels[clvl].NumNodes, ""));
41895 +           }
41896 +       }
41897 +
41898 +       if (recompute)
41899 +       {
41900 +           if (lvl == 0)
41901 +               statemap_reset (cmRail->Levels[clvl].TmpMap);
41902 +           else
41903 +           {
41904 +               ASSERT (cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]);
41905 +               
41906 +               statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[lvl - 1].SubordinateMap[clvl]);
41907 +               statemap_clearchanges (cmRail->Levels[lvl - 1].SubordinateMap[clvl]);
41908 +           }
41909 +        
41910 +           for (sidx = 0; sidx < level->NumSegs; sidx++)
41911 +           {
41912 +               CM_SGMT *sgmt = &level->Sgmts[sidx];
41913 +               
41914 +               if (sgmt->State != CM_SGMT_ABSENT)      /* absent nodes contribute zeroes */
41915 +               {
41916 +                   ASSERT (sgmt->State == CM_SGMT_PRESENT);
41917 +                   ASSERT (sgmt->Maps[clvl].InputMapValid);
41918 +                   statemap_ormap (cmRail->Levels[clvl].TmpMap, sgmt->Maps[clvl].InputMap);
41919 +               }
41920 +               statemap_clearchanges (sgmt->Maps[clvl].InputMap);
41921 +           }
41922 +
41923 +           statemap_setmap (level->SubordinateMap[clvl], cmRail->Levels[clvl].TmpMap);
41924 +           level->SubordinateMapValid[clvl] = 1;
41925 +
41926 +           CPRINTF4 (5, "%s: recompute clvl %d level %d statemap - %s\n", cmRail->Rail->Name, clvl, lvl,
41927 +                     MapString ("level", level->SubordinateMap[clvl], cmRail->Levels[clvl].NumNodes, ""));
41928 +       }
41929 +    }
41930 +
41931 +    if (cRole == CM_ROLE_LEADER_CANDIDATE)     /* don't know this cluster's leader yet */
41932 +       return;
41933 +
41934 +    ASSERT (cTopLevel == 0 || cmRail->Levels[cTopLevel - 1].SubordinateMapValid[clvl]);
41935 +
41936 +    /* Update SubTreeMap */
41937 +    
41938 +    if (!cmRail->Levels[clvl].SubTreeMapValid || 
41939 +       statemap_changed (cmRail->Levels[clvl].LocalMap) ||
41940 +       (cTopLevel > 0 && statemap_changed (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl])))
41941 +    {
41942 +       statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[clvl].LocalMap);
41943 +       statemap_clearchanges (cmRail->Levels[clvl].LocalMap);
41944 +
41945 +       if (cTopLevel > 0)
41946 +       {
41947 +           statemap_ormap (cmRail->Levels[clvl].TmpMap, cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]);
41948 +           statemap_clearchanges (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]);
41949 +       }
41950 +
41951 +       statemap_setmap (cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].TmpMap);
41952 +       cmRail->Levels[clvl].SubTreeMapValid = 1;
41953 +
41954 +       CPRINTF3 (5, "%s: recompute clvl %d subtree map - %s\n", cmRail->Rail->Name, clvl,
41955 +                 MapString ("subtree", cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].NumNodes, ""));
41956 +    }
41957 +
41958 +    if (cRole == CM_ROLE_SUBORDINATE)          /* got a leader (Not me) */
41959 +    {                                          /* => send SubTreeMap to her */
41960 +       CM_SGMT *leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0];
41961 +
41962 +       ASSERT (leader->State == CM_SGMT_PRESENT);
41963 +       ASSERT (cmRail->Levels[clvl].SubTreeMapValid);
41964 +
41965 +       if (!leader->Maps[clvl].OutputMapValid ||
41966 +           statemap_changed (cmRail->Levels[clvl].SubTreeMap))
41967 +       {
41968 +           statemap_setmap (leader->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap);
41969 +           leader->Maps[clvl].OutputMapValid = 1;
41970 +
41971 +           statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
41972 +
41973 +           CPRINTF3 (5, "%s: sending clvl %d subtree map to leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId);
41974 +       }
41975 +    }
41976 +}
41977 +
41978 +void
41979 +BroadcastGlobalMap (CM_RAIL *cmRail, int clvl)
41980 +{
41981 +    int       lvl;
41982 +    int       sidx;
41983 +    CM_LEVEL *level;
41984 +    CM_SGMT  *leader;
41985 +    int       cTopLevel;
41986 +    int       cRole;
41987 +
41988 +    if (clvl < cmRail->TopLevel)
41989 +    {
41990 +       cTopLevel = clvl + 1;
41991 +       cRole = CM_ROLE_LEADER;
41992 +    }
41993 +    else
41994 +    {
41995 +       cTopLevel = cmRail->TopLevel;
41996 +       cRole = cmRail->Role;
41997 +    }
41998 +
41999 +    switch (cRole)
42000 +    {
42001 +    default:
42002 +       ASSERT (0);
42003 +       
42004 +    case CM_ROLE_LEADER_CANDIDATE:             /* don't know this cluster's leader yet */
42005 +       return;
42006 +
42007 +    case CM_ROLE_LEADER:                       /* cluster leader: */
42008 +       ASSERT (clvl < cmRail->TopLevel);               /* set GlobalMap from SubTreeMap */
42009 +       
42010 +       if (!cmRail->Levels[clvl].SubTreeMapValid)      /* can't set global map */
42011 +           return;
42012 +
42013 +       if (cmRail->Levels[clvl].GlobalMapValid &&      /* already set global map */
42014 +           !statemap_changed (cmRail->Levels[clvl].SubTreeMap)) /* no changes to propagate */
42015 +           return;
42016 +       
42017 +       statemap_setmap (cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].SubTreeMap);
42018 +       cmRail->Levels[clvl].GlobalMapValid = 1;
42019 +       statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
42020 +
42021 +       CPRINTF2 (5, "%s: whole cluster %d leader setting global map\n", cmRail->Rail->Name, clvl);
42022 +
42023 +       UpdateGlobalStatus (cmRail);
42024 +       break;
42025 +       
42026 +    case CM_ROLE_SUBORDINATE:                  /* cluster subordinate: */
42027 +       ASSERT (clvl >= cmRail->TopLevel);              /* receive GlobalMap from leader */
42028 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
42029 +       
42030 +       leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0];
42031 +       ASSERT (leader->State == CM_SGMT_PRESENT);
42032 +
42033 +       if (!leader->Maps[clvl].InputMapValid)  /* can't set global map */
42034 +           return;
42035 +       
42036 +       if (cmRail->Levels[clvl].GlobalMapValid &&      /* already set global map */
42037 +           !statemap_changed (leader->Maps[clvl].InputMap)) /* no changes to propagate */
42038 +           return;
42039 +
42040 +       statemap_setmap (cmRail->Levels[clvl].GlobalMap, leader->Maps[clvl].InputMap);
42041 +       cmRail->Levels[clvl].GlobalMapValid = 1;
42042 +       statemap_clearchanges (leader->Maps[clvl].InputMap);
42043 +
42044 +       CPRINTF3 (5, "%s: getting clvl %d global map from leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId);
42045 +
42046 +       UpdateGlobalStatus (cmRail);
42047 +       break;
42048 +    }
42049 +
42050 +    CPRINTF3 (5, "%s: clvl %d %s\n", cmRail->Rail->Name, clvl,
42051 +             MapString ("global", cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].NumNodes, ""));
42052 +    
42053 +    /* Broadcast global map to all subordinates */
42054 +    for (lvl = 0; lvl < cTopLevel; lvl++)
42055 +    {
42056 +       level = &cmRail->Levels[lvl];
42057 +       
42058 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
42059 +       {
42060 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
42061 +           
42062 +           if (sgmt->State == CM_SGMT_PRESENT)
42063 +           {
42064 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap);
42065 +               sgmt->Maps[clvl].OutputMapValid = 1;
42066 +               
42067 +               CPRINTF5 (5, "%s: sending clvl %d global map to subordinate %d %d (%d)\n", 
42068 +                         cmRail->Rail->Name, clvl, lvl, sidx, sgmt->NodeId);
42069 +           }
42070 +       }
42071 +    }
42072 +}
42073 +
42074 +static void
42075 +CheckPeerPulse (CM_RAIL *cmRail, CM_SGMT *sgmt)
42076 +{
42077 +    int clvl, sendRejoin;
42078 +    
42079 +    switch (sgmt->State)
42080 +    {
42081 +    case CM_SGMT_ABSENT:
42082 +       break;
42083 +
42084 +    case CM_SGMT_WAITING:                      /* waiting for a subtree */
42085 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT)))
42086 +           break;
42087 +      
42088 +       CPRINTF3 (2, "%s: lvl %d subtree %d contains no live nodes\n", cmRail->Rail->Name, 
42089 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
42090 +
42091 +       sgmt->State = CM_SGMT_ABSENT;
42092 +       for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
42093 +       {
42094 +           statemap_zero (sgmt->Maps[clvl].InputMap);          /* need to start propagating zeros (flags change) */
42095 +           sgmt->Maps[clvl].InputMapValid = 1;         /* and must indicate that the map is now valid */
42096 +       }
42097 +       break;
42098 +
42099 +    case CM_SGMT_COMING:                               /* lost/waiting subtree sent me IMCOMING */
42100 +       ASSERT (sgmt->Level > 0);                       /* we only do subtree discovery below our own level */
42101 +
42102 +       if (AFTER (lbolt, sgmt->WaitingTick + MSEC2TICKS(CM_WAITING_TIMEOUT)))
42103 +       {
42104 +           CPRINTF3 (1, "%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name,
42105 +                     sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
42106 +           printk ("%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name,
42107 +                   sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
42108 +
42109 +           sgmt->State = CM_SGMT_ABSENT;
42110 +           for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
42111 +           {
42112 +               statemap_zero (sgmt->Maps[clvl].InputMap);              /* need to start propagating zeros (flags change) */
42113 +               sgmt->Maps[clvl].InputMapValid = 1;             /* and must indicate that the map is now valid */
42114 +           }
42115 +           break;
42116 +       }
42117 +
42118 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT)))
42119 +           break;
42120 +
42121 +       CPRINTF3 (2, "%s: lvl %d subtree %d hasn't connected yet\n", cmRail->Rail->Name,
42122 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
42123 +
42124 +       sgmt->State = CM_SGMT_WAITING;
42125 +       sgmt->UpdateTick = lbolt;
42126 +
42127 +       if (sgmt->Level > 0)
42128 +           __Schedule_Discovery (cmRail);
42129 +       break;
42130 +      
42131 +    case CM_SGMT_PRESENT:
42132 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_HEARTBEAT_TIMEOUT)))
42133 +           break;
42134 +
42135 +       if (sgmt->Level == cmRail->TopLevel)            /* leader died */
42136 +       {
42137 +           sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0);
42138 +
42139 +           CPRINTF4 (1, "%s: leader (%d) node %d JUST DIED%s\n", 
42140 +                     cmRail->Rail->Name, sgmt->Level, sgmt->NodeId,
42141 +                     sendRejoin ? ": !REJOIN" : "");
42142 +           
42143 +           printk ("%s: lvl %d leader (%d) JUST DIED%s\n", 
42144 +                   cmRail->Rail->Name, sgmt->Level, sgmt->NodeId,
42145 +                   sendRejoin ? ": !REJOIN" : "");
42146 +       
42147 +           if (sendRejoin)
42148 +           {
42149 +               /* she's not sent us any heartbeats even though she responded to a discover
42150 +                * so tell her to rejoin the tree at the bottom, this will mean that she 
42151 +                * has to run the heartbeat timer before being able to rejoin the tree. */
42152 +               SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN);
42153 +           }
42154 +
42155 +           StartLeaderDiscovery (cmRail);
42156 +           break;
42157 +       }
42158 +
42159 +       sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0);
42160 +
42161 +       CPRINTF5 (2, "%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, 
42162 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId,
42163 +                 sendRejoin ? ": !REJOIN" : "");
42164 +       printk ("%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, 
42165 +               sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId,
42166 +               sendRejoin ? ": !REJOIN" : "");
42167 +
42168 +       if (sendRejoin)
42169 +       {
42170 +           /* she's not sent us any heartbeats even though she responded to a discover
42171 +            * so tell her to rejoin the tree at the bottom, this will mean that she 
42172 +            * has to run the heartbeat timer before being able to rejoin the tree. */
42173 +           SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN);
42174 +       }
42175 +
42176 +       StartSubTreeDiscovery (cmRail, sgmt);
42177 +       break;
42178 +        
42179 +    default:
42180 +       ASSERT (0);
42181 +    }
42182 +}
42183 +
42184 +static void
42185 +CheckPeerPulses (CM_RAIL *cmRail)
42186 +{
42187 +    int lvl;
42188 +    int sidx;
42189 +   
42190 +    /* check children are alive */
42191 +    for (lvl = 0; lvl < cmRail->TopLevel; lvl++)
42192 +       for (sidx = 0; sidx < cmRail->Levels[lvl].NumSegs; sidx++)
42193 +           CheckPeerPulse (cmRail, &cmRail->Levels[lvl].Sgmts[sidx]);
42194 +
42195 +    /* check leader is alive */
42196 +    if (cmRail->Role == CM_ROLE_SUBORDINATE)
42197 +    {
42198 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
42199 +       ASSERT (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT);
42200 +      
42201 +       CheckPeerPulse (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0]);
42202 +    }
42203 +}
42204 +
42205 +static void
42206 +SendHeartbeats (CM_RAIL *cmRail)
42207 +{
42208 +    int lvl;
42209 +
42210 +    /* Send heartbeats to my children */
42211 +    for (lvl = 0; lvl < cmRail->TopLevel; lvl++)
42212 +    {
42213 +       CM_LEVEL *level = &cmRail->Levels[lvl];
42214 +       int       sidx;
42215 +       
42216 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
42217 +       {
42218 +           CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx];
42219 +
42220 +           if (sgmt->State == CM_SGMT_PRESENT)
42221 +               SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_HEARTBEAT);
42222 +       }
42223 +    }
42224 +
42225 +    /* Send heartbeat to my leader */
42226 +    if (cmRail->Role == CM_ROLE_SUBORDINATE)
42227 +    {
42228 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
42229 +       SendToSgmt (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0], CM_MSG_TYPE_HEARTBEAT);
42230 +    }
42231 +}
42232 +
42233 +static int
42234 +BroadcastDiscover (CM_RAIL *cmRail)
42235 +{
42236 +    int       sidx;
42237 +    int              lvl;
42238 +    int       msgType;
42239 +    CM_LEVEL *level;
42240 +    int       urgent;
42241 +
42242 +    ASSERT (cmRail->TopLevel <= cmRail->NumLevels);
42243 +    ASSERT ((cmRail->Role == CM_ROLE_LEADER) ? (cmRail->TopLevel == cmRail->NumLevels) :
42244 +           (cmRail->Role == CM_ROLE_SUBORDINATE) ? (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT) :
42245 +           (cmRail->Role == CM_ROLE_LEADER_CANDIDATE));
42246 +
42247 +    if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE)      /* got a leader/lead whole machine */
42248 +    {
42249 +       urgent = 0;                             /* non-urgent leader discovery */
42250 +       lvl = cmRail->TopLevel - 1;             /* on nodes I lead (resolves leader conflicts) */
42251 +       msgType = CM_MSG_TYPE_RESOLVE_LEADER;
42252 +    }
42253 +    else
42254 +    {
42255 +       urgent = 1;                             /* urgent leader discovery */
42256 +       lvl = cmRail->TopLevel;                 /* on nodes I'd like to lead */
42257 +       msgType = CM_MSG_TYPE_DISCOVER_LEADER;
42258 +    }
42259 +
42260 +    if (lvl >= 0)
42261 +    {
42262 +       if (lvl > cmRail->BroadcastLevel)
42263 +       {
42264 +           /* Unable to broadcast at this level in the spanning tree, so we 
42265 +            * just continue doing discovery until we are able to broadcast */
42266 +           CPRINTF4 (6, "%s: broadcast level %d too low to discover %d at level %d\n",
42267 +                     cmRail->Rail->Name, cmRail->BroadcastLevel, msgType, lvl);
42268 +
42269 +           cmRail->DiscoverStartTick = lbolt;
42270 +       }
42271 +       else
42272 +       {
42273 +           level = &cmRail->Levels[lvl];
42274 +           SendToSgmt (cmRail, &level->Sgmts[level->MySgmt], msgType);
42275 +       }
42276 +    }
42277 +    
42278 +    while (lvl > 0)
42279 +    {
42280 +       level = &cmRail->Levels[lvl];
42281 +      
42282 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
42283 +       {
42284 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
42285 +        
42286 +           if (sgmt->State == CM_SGMT_WAITING)
42287 +           {
42288 +               ASSERT (sidx != level->MySgmt);
42289 +               /* Do subordinate discovery.  Existing subordinates will
42290 +                * ignore it, but leader candidates will send IMCOMING.
42291 +                * This is always urgent since we'll assume a subtree is
42292 +                * absent if I don't get IMCOMING within the timeout.
42293 +                */
42294 +               SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_DISCOVER_SUBORDINATE);
42295 +               urgent = 1;
42296 +           }
42297 +       }
42298 +       lvl--;
42299 +    }
42300 +   
42301 +    return (urgent);
42302 +}
42303 +
42304 +static void
42305 +CheckBroadcast (CM_RAIL *cmRail)
42306 +{
42307 +    int  clvl;
42308 +
42309 +    for (clvl = cmRail->NumLevels-1; clvl >= 0 && cmRail->Rail->SwitchBroadcastLevel < cmRail->Levels[clvl].SwitchLevel; clvl--)
42310 +       ;
42311 +
42312 +    if (cmRail->OfflineReasons || cmRail->Rail->System->Shutdown)
42313 +       clvl = -1;
42314 +
42315 +    /* if the level at which we can broadcast drops, then we must rejoin the
42316 +     * spanning tree at the highest level for which broadcast is good. */
42317 +    if (cmRail->BroadcastLevel > clvl && clvl < (int)(cmRail->Role == CM_ROLE_LEADER ? cmRail->TopLevel - 1 : cmRail->TopLevel))
42318 +    {
42319 +       printk ("%s: REJOINING at level %d because %s\n", cmRail->Rail->Name, clvl+1, 
42320 +               (cmRail->OfflineReasons & CM_OFFLINE_MANAGER) ? "of manager thread" :
42321 +               (cmRail->OfflineReasons & CM_OFFLINE_PROCFS)  ? "force offline"  : 
42322 +               cmRail->Rail->System->Shutdown ? "system shutdown" : "broadcast level changed");
42323 +       LowerTopLevel (cmRail, clvl+1);
42324 +    }
42325 +    
42326 +    if (cmRail->BroadcastLevel != clvl)
42327 +    {
42328 +       cmRail->BroadcastLevel     = clvl;
42329 +       cmRail->BroadcastLevelTick = lbolt;
42330 +    }
42331 +
42332 +    /* schedule the update thread, to withdraw from comms with 
42333 +     * nodes "outside" of the valid broadcastable range. */
42334 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42335 +    {
42336 +       if (cmRail->BroadcastLevel < clvl)
42337 +       {
42338 +           if (AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT) && 
42339 +               !(cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST))
42340 +           {
42341 +               printk ("%s: Withdraw at Level %d\n", cmRail->Rail->Name, clvl);
42342 +               cmRail->Levels[clvl].OfflineReasons |= CM_OFFLINE_BROADCAST;
42343 +           }
42344 +       }
42345 +       else
42346 +       {
42347 +           if (cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST)
42348 +           {
42349 +               printk ("%s: Rejoin at Level %d\n", cmRail->Rail->Name, clvl);
42350 +               cmRail->Levels[clvl].OfflineReasons &= ~CM_OFFLINE_BROADCAST;
42351 +           }
42352 +       }
42353 +    }
42354 +       
42355 +}
42356 +
42357 +static void
42358 +CheckManager (CM_RAIL *cmRail)
42359 +{
42360 +    long time,  state = ep_kthread_state (&cmRail->Rail->System->ManagerThread, &time);
42361 +
42362 +    if (state == KT_STATE_RUNNING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_RUNNING_TIMEOUT)))
42363 +       state = KT_STATE_SLEEPING;
42364 +    if (state != KT_STATE_SLEEPING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT)))
42365 +       state = KT_STATE_SLEEPING;
42366 +
42367 +    if ((cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state == KT_STATE_SLEEPING)
42368 +    {
42369 +       printk ("%s: manager thread unstuck\n", cmRail->Rail->Name);
42370 +
42371 +       cmRail->OfflineReasons &= ~CM_OFFLINE_MANAGER;
42372 +    }
42373 +
42374 +    if (!(cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state != KT_STATE_SLEEPING)
42375 +    {
42376 +       printk ("%s: manager thread stuck - %s\n", cmRail->Rail->Name,
42377 +               state == KT_STATE_SCHEDULED ? "scheduled" : 
42378 +               state == KT_STATE_RUNNING ? "running" : 
42379 +               state == KT_STATE_STALLED ? "stalled" : "unknown");
42380 +
42381 +       cmRail->OfflineReasons |= CM_OFFLINE_MANAGER;
42382 +    }
42383 +}
42384 +
42385 +static void
42386 +CheckOfflineReasons (CM_RAIL *cmRail, int clvl)
42387 +{
42388 +    int subClMin, subClMax, myClId;
42389 +    char clNodeStr[32];                                /* [%d-%d][%d-%d] */
42390 +
42391 +    if (cmRail->Levels[clvl].OfflineReasons)
42392 +    {
42393 +       if (cmRail->Levels[clvl].Online)
42394 +       {
42395 +           printk ("%s: Withdraw from %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl));
42396 +           
42397 +           RestartComms (cmRail, clvl);
42398 +       }
42399 +    }
42400 +    else
42401 +    {
42402 +       if (cmRail->Levels[clvl].Restarting && cmRail->Levels[clvl].Connected == 0)
42403 +       {
42404 +           printk ("%s: Rejoin with %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl));
42405 +
42406 +           myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
42407 +           
42408 +           ASSERT (statemap_getbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS, CM_GSTATUS_BITS) == 
42409 +                   (CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART));
42410 +    
42411 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS,
42412 +                             CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
42413 +
42414 +           cmRail->Levels[clvl].Restarting = 0;
42415 +       }
42416 +    }
42417 +}
42418 +
42419 +void
42420 +DoHeartbeatWork (CM_RAIL *cmRail)
42421 +{
42422 +    long now = lbolt;
42423 +    int  clvl;
42424 +
42425 +    if ((RejoinCheck || RejoinPanic) &&
42426 +       AFTER (now, cmRail->NextRunTime + MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT))) /* If I've been unresponsive for too long */
42427 +    {
42428 +       /* I'd better reconnect to the network because I've not been playing the game */
42429 +       CPRINTF4 (1, "%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now,  cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT));
42430 +       printk ("%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now,  cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT));
42431 +       
42432 +       LowerTopLevel (cmRail, 0);
42433 +       
42434 +       IncrStat (cmRail, RejoinTooSlow);
42435 +       
42436 +       if (RejoinPanic)
42437 +           panic ("ep: REJOINING because I was too slow (heartbeat)\n");
42438 +    }
42439 +    
42440 +    PollInputQueues (cmRail);
42441 +    
42442 +    if (cmRail->NextDiscoverTime && ! BEFORE (now, cmRail->NextDiscoverTime))
42443 +    {
42444 +       if (BroadcastDiscover (cmRail))         /* urgent discovery required? */
42445 +           cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_URGENT_DISCOVER_INTERVAL);
42446 +       else
42447 +           cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_PERIODIC_DISCOVER_INTERVAL);
42448 +       
42449 +       if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE && AFTER (now, cmRail->DiscoverStartTick + MSEC2TICKS (CM_DISCOVER_TIMEOUT)))
42450 +           RaiseTopLevel (cmRail);
42451 +    }
42452 +    
42453 +    if (cmRail->NextHeartbeatTime && ! BEFORE (now, cmRail->NextHeartbeatTime))
42454 +    {
42455 +       CheckPosition (cmRail->Rail);
42456 +       CheckPeerPulses (cmRail);
42457 +       CheckBroadcast (cmRail);
42458 +       CheckManager (cmRail);
42459 +       
42460 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42461 +       {
42462 +           CheckOfflineReasons (cmRail, clvl);
42463 +           ReduceGlobalMap (cmRail, clvl);
42464 +           BroadcastGlobalMap (cmRail, clvl);
42465 +       }
42466 +       
42467 +       SendHeartbeats (cmRail);
42468 +       
42469 +       /* Compute the next heartbeat time, but "drift" it towards the last
42470 +        * periodic discovery time we saw from the whole machine leader */
42471 +       cmRail->NextHeartbeatTime = now + MSEC2TICKS (CM_HEARTBEAT_INTERVAL);
42472 +    }
42473 +
42474 +    if (cmRail->NextDiscoverTime && AFTER (cmRail->NextHeartbeatTime, cmRail->NextDiscoverTime))
42475 +       cmRail->NextRunTime = cmRail->NextDiscoverTime;
42476 +    else 
42477 +       cmRail->NextRunTime = cmRail->NextHeartbeatTime;
42478 +}
42479 +
42480 +#define CM_SVC_INDICATOR_OFFSET(CMRAIL,CLVL,IND,NODEID)     ( ( CMRAIL->Levels[CLVL].NumNodes * CM_GSTATUS_BITS ) \
42481 +                                                               + ( CMRAIL->Levels[CLVL].NumNodes * IND ) \
42482 +                                                               + ( NODEID - CMRAIL->Levels[CLVL].MinNodeId ) )
42483 +int
42484 +cm_svc_indicator_set (EP_RAIL *rail, int svc_indicator)
42485 +{
42486 +    CM_RAIL *cmRail = rail->ClusterRail;
42487 +    unsigned long flags;
42488 +    int           clvl;
42489 +
42490 +    EPRINTF2 (DBG_SVC,"cm_svc_indicator_set: rail %p ind %d\n", rail, svc_indicator);
42491 +
42492 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
42493 +    {
42494 +       EPRINTF1 (DBG_SVC,"cm_svc_indicator_set: service indicator %d not registered\n", svc_indicator);
42495 +       return (-1);
42496 +    }
42497 +
42498 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
42499 +       return (-2);
42500 +    
42501 +    spin_lock_irqsave (&cmRail->Lock, flags);
42502 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)  {
42503 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 1, 1); 
42504 +       EPRINTF3 (DBG_SVC,"cm_svc_indicator_set: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId));
42505 +    }
42506 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42507 +
42508 +    return (0);
42509 +}
42510 +
42511 +int
42512 +cm_svc_indicator_clear (EP_RAIL *rail, int svc_indicator)
42513 +{
42514 +    CM_RAIL *cmRail = rail->ClusterRail;
42515 +    unsigned long flags;
42516 +    int           clvl;
42517 +
42518 +    EPRINTF2 (DBG_SVC, "cm_svc_indicator_clear: rail %p ind %d\n", rail, svc_indicator);
42519 +
42520 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
42521 +    {
42522 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_clear: service indicator %d not registered\n", svc_indicator);
42523 +       return (-1);
42524 +    }
42525 +
42526 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
42527 +       return (-2);
42528 +
42529 +    spin_lock_irqsave (&cmRail->Lock, flags);
42530 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)  {
42531 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 0, 1); 
42532 +       EPRINTF3 (DBG_SVC, "cm_svc_indicator_clear: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId));
42533 +    }
42534 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42535 +
42536 +    return (0);
42537 +}
42538 +
42539 +int
42540 +cm_svc_indicator_is_set (EP_RAIL *rail, int svc_indicator, int nodeId)
42541 +{
42542 +    CM_RAIL *cmRail = rail->ClusterRail;
42543 +    unsigned long flags;
42544 +    int           clvl;
42545 +    bitmap_t      bits;
42546 +
42547 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: rail %p ind %d nodeId %d (me=%d)\n", rail, svc_indicator, nodeId, cmRail->NodeId);
42548 +
42549 +    if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS)
42550 +    {
42551 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: service indicator %d not registered\n", svc_indicator);
42552 +       return (0);
42553 +    }
42554 +
42555 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
42556 +       return (0);
42557 +    
42558 +    spin_lock_irqsave (&cmRail->Lock, flags);
42559 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42560 +       if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
42561 +           break;
42562 +
42563 +    if ( clvl == cmRail->NumLevels) { 
42564 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: node out of range %d \n", nodeId); 
42565 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
42566 +       return (0);
42567 +    }
42568 +
42569 +    if ( cmRail->NodeId == nodeId ) 
42570 +       bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
42571 +    else
42572 +       bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
42573 +
42574 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: clvl %d nodeId %d offset %d %x\n", clvl, nodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), bits);
42575 +
42576 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42577 +
42578 +    return  ( (bits == 0) ? (0) : (1) );
42579 +}
42580 +
42581 +int
42582 +cm_svc_indicator_bitmap (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes)
42583 +{
42584 +    /* or in the bit map */  
42585 +    CM_RAIL      *cmRail = rail->ClusterRail;
42586 +    int           nodeId, clvl;
42587 +    bitmap_t      bits;
42588 +    unsigned long flags;
42589 +    int           clip_out_low, clip_out_high;
42590 +    int           curr_low, curr_high;
42591 +    int           check_low, check_high;
42592 +
42593 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_bitmap: rail %p ind %d low %d high %d\n", rail, svc_indicator, low, (low + nnodes));
42594 +
42595 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
42596 +    {
42597 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_bitmap: service indicator %d not registered\n", svc_indicator);
42598 +       return (-1);
42599 +    }
42600 +
42601 +    if (rail->State != EP_RAIL_STATE_RUNNING) 
42602 +       return (-2);
42603 +
42604 +    spin_lock_irqsave (&cmRail->Lock, flags);
42605 +    
42606 +    clip_out_low = clip_out_high = -1; /* all in */
42607 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++) {
42608 +
42609 +       /* curr_high/low is the range of the current lvl */
42610 +       curr_low  = cmRail->Levels[clvl].MinNodeId;
42611 +       curr_high = cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes;
42612 +
42613 +       /* find out how much of low high is in this range and only check that part */
42614 +       check_low  = ( low  < curr_low)  ? curr_low  : low; 
42615 +       check_high = ( (low + nnodes) > curr_high) ? curr_high : (low + nnodes);
42616 +
42617 +       EPRINTF6 (DBG_SVC, "cm_svc_indicator_bitmap: curr(%d,%d) check(%d,%d) clip(%d,%d)\n", curr_low, curr_high, check_low, check_high, clip_out_low, clip_out_high);
42618 +
42619 +       for(nodeId = check_low; nodeId < check_high; nodeId++) {
42620 +
42621 +           if (  (clip_out_low <= nodeId) && (nodeId <= clip_out_high))
42622 +               nodeId = clip_out_high; /* step over the cliped out section */
42623 +           else {
42624 +
42625 +               if ( cmRail->NodeId == nodeId ) 
42626 +                   bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
42627 +               else
42628 +                   bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
42629 +               
42630 +               if ( bits ) {
42631 +                   EPRINTF2 (DBG_SVC, "cm_svc_indicator_bitmap: its set nodeId %d (clvl %d)\n", nodeId, clvl);
42632 +                   BT_SET ( bitmap , nodeId - low );
42633 +               }
42634 +           }
42635 +       }
42636 +
42637 +       /* widen the clip out range */
42638 +       clip_out_low  = curr_low;
42639 +       clip_out_high = curr_high -1; 
42640 +    }
42641 +
42642 +    spin_unlock_irqrestore (&cmRail->Lock, flags);      
42643 +
42644 +    return (0);
42645 +}
42646 +
42647 +#if defined(PER_CPU_TIMEOUT)
42648 +static void
42649 +cm_percpu_timeout (void *arg)
42650 +{
42651 +    CM_RAIL          *cmRail = (CM_RAIL *) arg;
42652 +    CM_TIMEOUT_DATA *hbd     = &cmRail->HeartbeatTimeoutsData[current_cpu()];
42653 +    long             now     = lbolt;
42654 +    unsigned         delay   = now - hbd->ScheduledAt;
42655 +    unsigned long    flags;
42656 +
42657 +    if (delay > hbd->WorstDelay)
42658 +       hbd->WorstDelay = delay;
42659 +    if (hbd->BestDelay == 0 || delay < hbd->BestDelay)
42660 +       hbd->BestDelay = delay;
42661 +
42662 +    if (cmRail->HeartbeatTimeoutsShouldStop)
42663 +    {
42664 +       spin_lock_irqsave (&cmRail->Lock, flags);
42665 +       cmRail->HeartbeatTimeoutsStopped |= (1 << current_cpu());
42666 +       kcondvar_wakeupall (&cmRail->HeartbeatTimeoutsWait, &cmRail->Lock);
42667 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
42668 +       return;
42669 +    }
42670 +
42671 +    if (cmRail->NextRunTime == 0 || AFTER (cmRail->NextRunTime, lbolt))
42672 +       hbd->EarlyCount++;
42673 +    else if (cmRail->HeartbeatTimeoutRunning)
42674 +       hbd->MissedCount++;
42675 +    else
42676 +    {
42677 +       local_irq_save (flags);
42678 +       
42679 +       if (! spin_trylock (&cmRail->HeartbeatTimeoutsLock))
42680 +           hbd->WastedCount++;
42681 +       else
42682 +       {
42683 +           cmRail->HeartbeatTimeoutRunning = 1;
42684 +           hbd->WorkCount++;
42685 +
42686 +           spin_lock (&cmRail->Lock);
42687 +
42688 +           if ((delay = (lbolt - cmRail->NextRunTime)) > hbd->WorstHearbeatDelay)
42689 +               hbd->WorstHearbeatDelay = delay;
42690 +           if ((delay = (lbolt - now) > hbd->WorstLockDelay))
42691 +               hbd->WorstLockDelay = delay;
42692 +
42693 +           DoHeartbeatWork (cmRail);
42694 +
42695 +           spin_unlock (&cmRail->Lock);
42696 +           spin_unlock (&cmRail->HeartbeatTimeoutsLock);
42697 +
42698 +           cmRail->HeartbeatTimeoutRunning = 0;
42699 +       }
42700 +       local_irq_restore (flags);
42701 +    }
42702 +
42703 +    hbd->ScheduledAt = lbolt + MSEC2TICKS (CM_PERCPU_TIMEOUT_INTERVAL);
42704 +    timeout_cpu (cm_percpu_timeout, cmRail, MSECS2TICKS (CM_PERCPU_TIMEOUT_INTERVAL), CALLOUT_TYPE|CALLOUT_NOMALLOC);
42705 +}
42706 +
42707 +static void
42708 +StartPerCpuTimeouts (CM_RAIL *cmRail)
42709 +{
42710 +    register int c;
42711 +
42712 +    spin_lock_init (&cmRail->HeartbeatTimeoutsLock);
42713 +
42714 +    KMEM_ZALLOC (cmRail->HeartbeatTimeoutsData, CM_TIMEOUT_DATA *, ncpus * sizeof (CM_TIMEOUT_DATA), 1);
42715 +
42716 +    for (c = 0; c < cpus_in_box; c++)
42717 +    {
42718 +       if (cpu_to_processor (c))
42719 +       {       
42720 +           if (current_cpu() != c)
42721 +           {
42722 +               thread_bind (current_thread(), cpu_to_processor(c));
42723 +               mpsleep (current_thread(), 0, "StartPerCpuTimeouts", 1, NULL, 0);
42724 +
42725 +               if (current_cpu() != c)
42726 +                   panic ("ep: StartPerCpuTimeouts - failed to switch cpu\n");
42727 +           }
42728 +           
42729 +           cmRail->HeartbeatTimeoutsStarted |= (1 << c);
42730 +           cmRail->HeartbeatTimeoutsData[c].ScheduledAt = lbolt + c;
42731 +
42732 +           timeout_cpu (cm_percpu_timeout, cmRail, c, CALLOUT_TYPE|CALLOUT_NOMALLOC);
42733 +       }
42734 +    }
42735 +
42736 +    thread_bind(current_thread(), NULL);
42737 +}
42738 +
42739 +static void
42740 +StopPerCpuTimeouts (CM_RAIL *cmRail)
42741 +{
42742 +    register int c;
42743 +    unsigned long flags;
42744 +
42745 +    cmRail->HeartbeatTimeoutsShouldStop = 1;
42746 +
42747 +    for (c = 0; c < cpus_in_box; c++)
42748 +    {
42749 +       if (cmRail->HeartbeatTimeoutsStarted & (1 << c))
42750 +       {
42751 +           printk ("%s: stopping cpu_timeout on cpu %d\n", cmRail->Rail->Name, c);
42752 +
42753 +           if (untimeout_cpu (cm_percpu_timeout, cmRail, c, CALLOUT_TYPE|CALLOUT_NOMALLOC, NULL))
42754 +               cmRail->HeartbeatTimeoutsStopped |= (1 << c);
42755 +       }
42756 +    }
42757 +    thread_bind(current_thread(), NULL);
42758 +
42759 +    spin_lock_irqsave (&cmRail->Lock, flags);
42760 +    while (cmRail->HeartbeatTimeoutsStopped != cmRail->HeartbeatTimeoutsStarted)
42761 +       kcondvar_wait (&cmRail->HeartbeatTimeoutsWait, &cmRail->Lock, &flags);
42762 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42763 +
42764 +    cmRail->HeartbeatTimeoutsStarted    = 0;
42765 +    cmRail->HeartbeatTimeoutsStopped    = 0;
42766 +    cmRail->HeartbeatTimeoutsShouldStop = 0;
42767 +
42768 +    KMEM_FREE (cmRail->HeartbeatTimeoutsData, ncpus * sizeof (CM_TIMEOUT_DATA));
42769 +
42770 +    spin_lock_destroy (&cmRail->HeartbeatTimeoutsLock);
42771 +}
42772 +
42773 +#else
42774 +
42775 +static void
42776 +cm_heartbeat_timer (unsigned long arg)
42777 +{
42778 +    CM_RAIL *cmRail = (CM_RAIL *) arg;
42779 +    unsigned long flags;
42780 +
42781 +    spin_lock_irqsave (&cmRail->Lock, flags);
42782 +
42783 +    ASSERT (cmRail->Rail->State == EP_RAIL_STATE_RUNNING);
42784 +
42785 +    DoHeartbeatWork (cmRail);
42786 +    
42787 +    __Schedule_Timer (cmRail, cmRail->NextRunTime);
42788 +
42789 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42790 +}
42791 +
42792 +#endif /* defined(PER_CPU_TIMEOUT) */
42793 +
42794 +
42795 +
42796 +void
42797 +DisplayRailDo (DisplayInfo *di, EP_RAIL *rail)
42798 +{
42799 +    CM_RAIL *cmRail = rail->ClusterRail;
42800 +    unsigned long flags;
42801 +    int  i, j;
42802 +
42803 +    if (rail->State != EP_RAIL_STATE_RUNNING)
42804 +       return;
42805 +
42806 +    spin_lock_irqsave (&cmRail->Lock, flags);
42807 +
42808 +    (di->func)(di->arg, "NodeId=%d NodeLevel=%d NumLevels=%d NumNodes=%d\n", 
42809 +           cmRail->NodeId, cmRail->TopLevel, cmRail->NumLevels, cmRail->Rail->Position.pos_nodes);
42810 +    
42811 +    (di->func)(di->arg, "[");
42812 +
42813 +    for (i = 0; i < cmRail->NumLevels; i++)
42814 +    {
42815 +       if (i > 0)
42816 +           (di->func)(di->arg, ",");
42817 +       
42818 +       if (i < cmRail->TopLevel)
42819 +       {
42820 +           (di->func)(di->arg, "L ");
42821 +         
42822 +           for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
42823 +               switch (cmRail->Levels[i].Sgmts[j].State)
42824 +               {
42825 +               case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[j].NodeId); break;
42826 +               case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break;
42827 +               case CM_SGMT_COMING:  (di->func)(di->arg, "c%4s", ""); break;
42828 +               case CM_SGMT_ABSENT:  (di->func)(di->arg, ".%4s", ""); break;
42829 +               default:              (di->func)(di->arg, "?%4s", ""); break;
42830 +               }
42831 +       }
42832 +       else
42833 +           switch (cmRail->Role)
42834 +           {
42835 +           case CM_ROLE_LEADER_CANDIDATE:      
42836 +               (di->func)(di->arg,"l "); 
42837 +               for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
42838 +                   (di->func)(di->arg,"     ");
42839 +               break;
42840 +         
42841 +           case CM_ROLE_SUBORDINATE:       
42842 +               switch (cmRail->Levels[i].Sgmts[0].State)
42843 +               {
42844 +               case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[0].NodeId); break;
42845 +               case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break;
42846 +               case CM_SGMT_COMING:  (di->func)(di->arg, "c%4s", ""); break;
42847 +               case CM_SGMT_ABSENT:  (di->func)(di->arg, ".%4s", ""); break;
42848 +               default:              (di->func)(di->arg, "?%4s", ""); break;
42849 +               }
42850 +               for (j = 1; j < cmRail->Levels[i].NumSegs; j++)
42851 +                   (di->func)(di->arg, "     ");
42852 +               break;
42853 +         
42854 +           default:
42855 +               (di->func)(di->arg, "####");
42856 +               break;
42857 +           }
42858 +    }
42859 +    (di->func)(di->arg, "]\n");
42860 +
42861 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
42862 +}
42863 +
42864 +void
42865 +DisplayRail (EP_RAIL *rail) 
42866 +{
42867 +    if (rail->State == EP_RAIL_STATE_RUNNING)
42868 +       DisplayRailDo (&di_ep_debug, rail);
42869 +}
42870 +
42871 +void
42872 +DisplayStatus (EP_RAIL *rail)
42873 +{
42874 +    if (rail->State == EP_RAIL_STATE_RUNNING)
42875 +    {
42876 +       CM_RAIL *cmRail = rail->ClusterRail;
42877 +       unsigned long flags;
42878 +       
42879 +       spin_lock_irqsave (&cmRail->Lock, flags);
42880 +       
42881 +       DisplayNodeMaps (&di_ep_debug, cmRail);
42882 +       
42883 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
42884 +    }
42885 +}
42886 +
42887 +void
42888 +DisplaySegs (EP_RAIL *rail)
42889 +{
42890 +    if (rail->State == EP_RAIL_STATE_RUNNING)
42891 +    {
42892 +       CM_RAIL *cmRail = rail->ClusterRail;
42893 +       unsigned long flags;
42894 +       
42895 +       spin_lock_irqsave (&cmRail->Lock, flags);
42896 +       
42897 +       DisplayNodeSgmts (&di_ep_debug, cmRail);
42898 +       
42899 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
42900 +    }
42901 +}
42902 +
42903 +static void
42904 +LoadBroadcastRoute (CM_RAIL *cmRail, int lvl, int sidx)
42905 +{
42906 +    EP_RAIL *rail  = cmRail->Rail;
42907 +    int      nsegs = cmRail->Levels[0].NumSegs;
42908 +    int      vp    = EP_VP_BCAST(lvl, sidx);
42909 +    int      nodes = 1;
42910 +    int      baseNode;
42911 +    int      i;
42912 +
42913 +    ASSERT (lvl > 0 && lvl <= cmRail->NumLevels);
42914 +    ASSERT (sidx == 0 || lvl < cmRail->NumLevels);
42915 +
42916 +    ASSERT (vp >= EP_VP_BCAST_BASE && vp < EP_VP_BCAST_BASE + EP_VP_BCAST_COUNT);
42917 +
42918 +    for (i = 1; i <= lvl; i++)
42919 +    {
42920 +       nodes *= nsegs;
42921 +       nsegs = (i == cmRail->NumLevels) ? 1 : cmRail->Levels[i].NumSegs;
42922 +    }
42923 +
42924 +    baseNode = ((cmRail->NodeId / (nodes * nsegs)) * nsegs + sidx) * nodes;
42925 +
42926 +    CPRINTF5 (2, "%s: broadcast vp lvl %d sidx %d [%d,%d]\n", 
42927 +             cmRail->Rail->Name, lvl, sidx, baseNode, baseNode + nodes - 1);
42928 +    
42929 +    rail->Operations.LoadSystemRoute (rail, vp, baseNode, baseNode + nodes - 1);
42930 +}
42931 +
42932 +static void
42933 +LoadRouteTable (CM_RAIL *cmRail)
42934 +{
42935 +    EP_RAIL *rail = cmRail->Rail;
42936 +    int      i, j;
42937 +   
42938 +   if (cmRail->NumNodes > EP_MAX_NODES)
42939 +   {
42940 +       printk ("More nodes (%d) than point-to-point virtual process table entries (%d)\n", cmRail->NumNodes, EP_MAX_NODES);
42941 +       panic ("LoadRouteTable\n");
42942 +   }
42943 +
42944 +   for (i = 0; i < cmRail->NumNodes; i++)
42945 +       rail->Operations.LoadSystemRoute (rail, EP_VP_NODE(i), i, i);
42946 +
42947 +   /* Generate broadcast routes for subtrees */
42948 +   for (i = 1; i < cmRail->NumLevels; i++)
42949 +      for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
42950 +         LoadBroadcastRoute (cmRail, i, j);
42951 +
42952 +   /* Generate broadcast route for whole machine */
42953 +   LoadBroadcastRoute (cmRail, cmRail->NumLevels, 0);
42954 +
42955 +   /* Finally invalidate all the data routes */
42956 +   for (i = 0; i < cmRail->NumNodes; i++)
42957 +       rail->Operations.UnloadNodeRoute (cmRail->Rail, i);
42958 +}
42959 +
42960 +void
42961 +cm_node_disconnected (EP_RAIL *rail, unsigned nodeId)
42962 +{
42963 +    CM_RAIL *cmRail = rail->ClusterRail;
42964 +    int      base, lstat, lgstat;
42965 +    int             clvl, subClMin, subClMax;
42966 +    int      thisClId, myClId;
42967 +    unsigned long flags;
42968 +
42969 +    ASSERT (nodeId != cmRail->NodeId);
42970 +
42971 +    spin_lock_irqsave (&cmRail->Lock, flags);
42972 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42973 +       if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
42974 +           break;
42975 +
42976 +    myClId   = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
42977 +    thisClId = nodeId - cmRail->Levels[clvl].MinNodeId;
42978 +    base     = thisClId * CM_GSTATUS_BITS;
42979 +    lstat    = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
42980 +    lgstat   = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
42981 +
42982 +    ASSERT ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN);
42983 +
42984 +    CPRINTF7 (2, "%s: cm_node_disconnected: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s -> %sMAY_START\n",
42985 +             cmRail->Rail->Name, nodeId, clvl,
42986 +             GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
42987 +             GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
42988 +             GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId),
42989 +             ((lgstat != CM_GSTATUS_CLOSING) && (lstat & CM_GSTATUS_RESTART)) ? "RESTART|" : "");
42990 +    
42991 +    switch (lgstat)
42992 +    {
42993 +    case CM_GSTATUS_CLOSING:
42994 +       /* delayed ack of closing - set MAY_START and clear RESTART */
42995 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
42996 +       break;
42997 +    case CM_GSTATUS_STARTING:
42998 +    case CM_GSTATUS_RUNNING:
42999 +       IASSERT (! cmRail->Levels[clvl].Online || lstat & CM_GSTATUS_RESTART);
43000 +       break;
43001 +    case CM_GSTATUS_ABSENT:
43002 +       IASSERT (lstat & CM_GSTATUS_RESTART);
43003 +    }
43004 +
43005 +    cmRail->Levels[clvl].Connected--;
43006 +
43007 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
43008 +}
43009 +
43010 +void
43011 +cm_restart_node (EP_RAIL *rail, unsigned nodeId)
43012 +{
43013 +    CM_RAIL *cmRail = rail->ClusterRail;
43014 +    int      base, lstat, lgstat;
43015 +    int             clvl, subClMin, subClMax;
43016 +    int      thisClId, myClId;
43017 +    unsigned long flags;
43018 +
43019 +    spin_lock_irqsave (&cmRail->Lock, flags);
43020 +    if (nodeId == rail->Position.pos_nodeid)
43021 +    {
43022 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
43023 +           RestartComms (cmRail, clvl);
43024 +    }
43025 +    else
43026 +    {
43027 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
43028 +           if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
43029 +               break;
43030 +       
43031 +       myClId   = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
43032 +       thisClId = nodeId - cmRail->Levels[clvl].MinNodeId;
43033 +       base     = thisClId * CM_GSTATUS_BITS;
43034 +       lstat    = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
43035 +       lgstat   = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap,  base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
43036 +
43037 +       CPRINTF6 (2, "%s: cm_restart_node: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s\n",
43038 +                 cmRail->Rail->Name, nodeId, clvl,
43039 +                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
43040 +                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
43041 +                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
43042 +       
43043 +       if (lgstat != CM_GSTATUS_CLOSING)
43044 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
43045 +    }
43046 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
43047 +}
43048 +
43049 +void
43050 +cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason)
43051 +{
43052 +    CM_RAIL *cmRail = rail->ClusterRail;
43053 +    unsigned long flags;
43054 +
43055 +    spin_lock_irqsave (&cmRail->Lock, flags);
43056 +    if (offline)
43057 +       cmRail->OfflineReasons |= reason;
43058 +    else
43059 +       cmRail->OfflineReasons &= ~reason;
43060 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
43061 +}
43062 +
43063 +static void
43064 +cm_remove_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail)
43065 +{
43066 +    CM_SUBSYS  *sys    = (CM_SUBSYS *) subsys;
43067 +    CM_RAIL    *cmRail = sys->Rails[rail->Number];
43068 +    int i, lvl, clvl;
43069 +
43070 +    cm_procfs_rail_fini (cmRail);
43071 +
43072 +    sys->Rails[rail->Number] = NULL;
43073 +    rail->ClusterRail        = NULL;
43074 +
43075 +#if defined(PER_CPU_TIMEOUT)
43076 +    StopPerCpuTimeouts (cmRail);
43077 +#else
43078 +    del_timer_sync (&cmRail->HeartbeatTimer);
43079 +#endif
43080 +    cmRail->NextRunTime      = 0;
43081 +    cmRail->NextDiscoverTime = 0;
43082 +    cmRail->NextHeartbeatTime = 0;
43083 +    
43084 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
43085 +    {
43086 +       for (lvl = 0; lvl <= clvl; lvl++)
43087 +       {
43088 +           CM_LEVEL *level = &cmRail->Levels[lvl];
43089 +           
43090 +           statemap_destroy (level->SubordinateMap[clvl]);
43091 +           
43092 +           for (i = 0; i < level->NumSegs; i++)
43093 +           {
43094 +               statemap_destroy (level->Sgmts[i].Maps[clvl].CurrentInputMap);
43095 +               statemap_destroy (level->Sgmts[i].Maps[clvl].InputMap);
43096 +               statemap_destroy (level->Sgmts[i].Maps[clvl].OutputMap);
43097 +           }
43098 +       }
43099 +       
43100 +       cmRail->Levels[clvl].Online = 0;
43101 +       
43102 +       statemap_destroy (cmRail->Levels[clvl].TmpMap);
43103 +       statemap_destroy (cmRail->Levels[clvl].GlobalMap);
43104 +       statemap_destroy (cmRail->Levels[clvl].LastGlobalMap);
43105 +       statemap_destroy (cmRail->Levels[clvl].SubTreeMap);
43106 +       statemap_destroy (cmRail->Levels[clvl].LocalMap);
43107 +    }
43108 +    
43109 +    spin_lock_destroy (&cmRail->Lock);
43110 +
43111 +    ep_free_inputq (cmRail->Rail, cmRail->PolledQueue);
43112 +    ep_free_inputq (cmRail->Rail, cmRail->IntrQueue);
43113 +    ep_free_outputq (cmRail->Rail, cmRail->MsgQueue);
43114 +
43115 +    KMEM_FREE (cmRail, sizeof (CM_RAIL));
43116 +}
43117 +
43118 +static int
43119 +cm_add_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail)
43120 +{
43121 +    CM_SUBSYS     *sys = (CM_SUBSYS *) subsys;
43122 +    ELAN_POSITION *pos = &rail->Position;
43123 +    CM_RAIL       *cmRail;
43124 +    int lvl, n, nn, clvl, span, i;
43125 +    unsigned long flags;
43126 +
43127 +    KMEM_ZALLOC (cmRail, CM_RAIL *, sizeof (CM_RAIL), 1);
43128 +
43129 +    if (cmRail == NULL)
43130 +       return (ENOMEM);
43131 +    
43132 +    cmRail->Rail     = rail;
43133 +    cmRail->NodeId   = pos->pos_nodeid;
43134 +    cmRail->NumNodes = pos->pos_nodes;
43135 +
43136 +    spin_lock_init (&cmRail->Lock);
43137 +
43138 +    if ((cmRail->IntrQueue   = ep_alloc_inputq (rail, EP_SYSTEMQ_INTR,   sizeof (CM_MSG), CM_INPUTQ_ENTRIES, IntrQueueCallback, cmRail)) == NULL ||
43139 +       (cmRail->PolledQueue = ep_alloc_inputq (rail, EP_SYSTEMQ_POLLED, sizeof (CM_MSG), CM_INPUTQ_ENTRIES, NULL, 0)) == NULL ||
43140 +       (cmRail->MsgQueue    = ep_alloc_outputq (rail, sizeof (CM_MSG), CM_NUM_MSG_BUFFERS)) == NULL)
43141 +    {
43142 +       goto failed;
43143 +    }
43144 +
43145 +    /* point to first "spare" message buffer */
43146 +    cmRail->NextSpareMsg = 0;
43147 +
43148 +    /* Compute the branching ratios from the switcy arity */
43149 +    for (lvl = 0; lvl < CM_MAX_LEVELS; lvl++)
43150 +       BranchingRatios[lvl] = (lvl < pos->pos_levels) ? pos->pos_arity[pos->pos_levels - lvl - 1] : 4;
43151 +    
43152 +    /* now determine the number of levels of hierachy we have */
43153 +    /* and how many nodes per level there are */
43154 +    for (lvl = 0, nn = 1, n = pos->pos_nodes; 
43155 +        n > 1; 
43156 +        nn *= BranchingRatios[lvl], n = n / BranchingRatios[lvl], lvl++)
43157 +    {
43158 +       int       nSegs = (n > BranchingRatios[lvl]) ? BranchingRatios[lvl] : n;
43159 +       int       nNodes = nn * nSegs;
43160 +       CM_LEVEL *level = &cmRail->Levels[lvl];
43161 +
43162 +       for (clvl = 0, span = pos->pos_arity[pos->pos_levels - clvl - 1]; 
43163 +            span < nNodes && clvl < pos->pos_levels - 1;
43164 +            clvl++, span *= pos->pos_arity[pos->pos_levels - clvl - 1])
43165 +           ;
43166 +       
43167 +       level->SwitchLevel = clvl;
43168 +       level->MinNodeId = (pos->pos_nodeid / nNodes) * nNodes;
43169 +       level->NumNodes = nNodes;
43170 +       level->NumSegs = nSegs;
43171 +    }
43172 +    
43173 +    cmRail->NumLevels      = lvl;
43174 +    cmRail->BroadcastLevel = lvl-1;
43175 +
43176 +    CPRINTF4 (2, "%s: NodeId=%d NumNodes=%d NumLevels=%d\n", 
43177 +             rail->Name, pos->pos_nodeid, pos->pos_nodes, cmRail->NumLevels);
43178 +
43179 +    LoadRouteTable (cmRail);
43180 +    
43181 +    /* Init SGMT constants */
43182 +    for (lvl = 0; lvl < cmRail->NumLevels; lvl++)
43183 +    {
43184 +       CM_LEVEL *level = &cmRail->Levels[lvl];
43185 +
43186 +       level->MySgmt = SegmentNo (cmRail, cmRail->NodeId, lvl);
43187 +       
43188 +       for (i = 0; i < CM_SGMTS_PER_LEVEL; i++)
43189 +       {
43190 +           CM_SGMT *sgmt = &level->Sgmts[i];
43191 +         
43192 +           sgmt->MsgNumber = lvl * CM_SGMTS_PER_LEVEL + i;
43193 +           sgmt->Level = lvl;
43194 +           sgmt->Sgmt = i;
43195 +       }
43196 +    }
43197 +
43198 +    /* Init maps for each cluster level */
43199 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
43200 +    {
43201 +       int nNodes = cmRail->Levels[clvl].NumNodes;
43202 +       int mapBits = (nNodes * CM_GSTATUS_BITS) + (nNodes * EP_SVC_NUM_INDICATORS);
43203 +       int clmin;
43204 +       int clmax;
43205 +       int clid = ClusterIds (cmRail, clvl, &clmin, &clmax);
43206 +
43207 +       for (lvl = 0; lvl <= clvl; lvl++)
43208 +       {
43209 +           CM_LEVEL *level = &cmRail->Levels[lvl];
43210 +
43211 +           level->SubordinateMap[clvl] = statemap_create (mapBits);
43212 +
43213 +           for (i = 0; i < level->NumSegs; i++)
43214 +           {
43215 +               level->Sgmts[i].Maps[clvl].CurrentInputMap = statemap_create (mapBits);
43216 +               level->Sgmts[i].Maps[clvl].InputMap        = statemap_create (mapBits);
43217 +               level->Sgmts[i].Maps[clvl].OutputMap       = statemap_create (mapBits);
43218 +           }
43219 +       }
43220 +       
43221 +       cmRail->Levels[clvl].Online = 0;
43222 +
43223 +       cmRail->Levels[clvl].TmpMap        = statemap_create (mapBits);
43224 +       cmRail->Levels[clvl].GlobalMap     = statemap_create (mapBits);
43225 +       cmRail->Levels[clvl].LastGlobalMap = statemap_create (mapBits);
43226 +       cmRail->Levels[clvl].SubTreeMap    = statemap_create (mapBits);
43227 +       cmRail->Levels[clvl].LocalMap      = statemap_create (mapBits);
43228 +
43229 +       /* Flag everyone outside my next lower cluster as sensed offline... */
43230 +       for (i = 0; i < clmin; i++)
43231 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
43232 +       
43233 +       for (i = clmax + 1; i < nNodes; i++)
43234 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
43235 +       
43236 +       /* ...and set my own state */
43237 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, clid * CM_GSTATUS_BITS,
43238 +                         CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
43239 +    }
43240 +    
43241 +    /* compute parameter hash to add to messages */
43242 +    cmRail->ParamHash = EP_PROTOCOL_VERSION;
43243 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_PERIODIC_DISCOVER_INTERVAL;
43244 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_URGENT_DISCOVER_INTERVAL;
43245 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_INTERVAL;
43246 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_DMA_RETRIES;
43247 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_MSG_RETRIES;
43248 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_BCAST_MSG_RETRIES;
43249 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_TIMER_SCHEDULE_TIMEOUT;
43250 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_TIMEOUT;
43251 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_DISCOVER_TIMEOUT;
43252 +    cmRail->ParamHash = cmRail->ParamHash * 127 + BT_NBIPUL;
43253 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_GSTATUS_BITS;
43254 +    cmRail->ParamHash = cmRail->ParamHash * 127 + EP_SVC_NUM_INDICATORS;
43255 +    cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumLevels;
43256 +    cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumNodes;
43257 +    for (i = 0; i < cmRail->NumLevels; i++)
43258 +       cmRail->ParamHash = cmRail->ParamHash * 127 + BranchingRatios[i];
43259 +    
43260 +#if defined(PER_CPU_TIMEOUT)
43261 +    StartPerCpuTimeouts (cmRail);
43262 +#endif
43263 +
43264 +    spin_lock_irqsave (&cmRail->Lock, flags);
43265 +
43266 +#if !defined(PER_CPU_TIMEOUT)
43267 +    /* Initialise the timer, but don't add it yet, since
43268 +     * __Schedule_Heartbeat() will do this. */
43269 +
43270 +    init_timer (&cmRail->HeartbeatTimer);
43271 +    
43272 +    cmRail->HeartbeatTimer.function = cm_heartbeat_timer;
43273 +    cmRail->HeartbeatTimer.data     = (unsigned long) cmRail;
43274 +    cmRail->HeartbeatTimer.expires  = lbolt + hz;
43275 +#endif
43276 +
43277 +    /* start sending heartbeats */
43278 +    __Schedule_Heartbeat (cmRail);
43279 +
43280 +    /* start discovering who else is out there */
43281 +    LowerTopLevel (cmRail, 0);
43282 +
43283 +    /* connect to myself straight away - I know I'm here */
43284 +    ep_connect_node (rail, cmRail->NodeId);
43285 +    
43286 +    /* add to all rails */
43287 +    sys->Rails[rail->Number] = cmRail;
43288 +    rail->ClusterRail = (void *) cmRail;
43289 +
43290 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
43291 +
43292 +    /* Enable the input queues */
43293 +    ep_enable_inputq (rail, cmRail->PolledQueue);
43294 +    ep_enable_inputq (rail, cmRail->IntrQueue);
43295 +
43296 +    /* Create the procfs entries */
43297 +    cm_procfs_rail_init (cmRail);
43298 +
43299 +    return 0;
43300 +
43301 + failed:
43302 +    cm_remove_rail (subsys, epsys, rail);
43303 +    return -ENOMEM;
43304 +}
43305 +
43306 +static void
43307 +cm_fini (EP_SUBSYS *subsys, EP_SYS *epsys)
43308 +{
43309 +    CM_SUBSYS *sys = (CM_SUBSYS *) subsys;
43310 +
43311 +    cm_procfs_fini(sys);
43312 +    
43313 +    KMEM_FREE (sys, sizeof (CM_SUBSYS));
43314 +}
43315 +
43316 +int
43317 +cm_init (EP_SYS *sys)
43318 +{
43319 +    CM_SUBSYS *subsys;
43320 +
43321 +    KMEM_ZALLOC (subsys, CM_SUBSYS *, sizeof (CM_SUBSYS), 1);
43322 +
43323 +    if (subsys == NULL)
43324 +       return (ENOMEM);
43325 +
43326 +    subsys->Subsys.Sys        = sys;
43327 +    subsys->Subsys.Name              = "cm";
43328 +    subsys->Subsys.Destroy    = cm_fini;
43329 +    subsys->Subsys.AddRail    = cm_add_rail;
43330 +    subsys->Subsys.RemoveRail = cm_remove_rail;
43331 +
43332 +    ep_subsys_add (sys, &subsys->Subsys);
43333 +
43334 +    cm_procfs_init (subsys);
43335 +
43336 +    /*
43337 +     * Initialise the machineid if it wasn't specified by
43338 +     * the modules.conf file - otherwise truncate it to 
43339 +     * 16 bits.
43340 +     */
43341 +    if (MachineId != -1)
43342 +       MachineId = (uint16_t) MachineId;
43343 +    else
43344 +    {
43345 +#if defined(LINUX_ALPHA)
43346 +       MachineId = (uint16_t)((5 << 12) | HZ);
43347 +#elif defined(LINUX_SPARC)
43348 +       MachineId = (uint16_t)((4 << 12) | HZ);
43349 +#elif defined(LINUX_I386)
43350 +       MachineId = (uint16_t)((3 << 12) | HZ);
43351 +#elif defined( LINUX_IA64)
43352 +       MachineId = (uint16_t)((2 << 12) | HZ);
43353 +#elif defined(LINUX_X86_64)
43354 +       MachineId = (uint16_t)((1 << 12) | HZ);
43355 +#else
43356 +       MachineId = (uint16_t)((0 << 12) | HZ);
43357 +#endif
43358 +    }
43359 +
43360 +    return (0);
43361 +}
43362 +
43363 +/*
43364 + * Local variables:
43365 + * c-file-style: "stroustrup"
43366 + * End:
43367 + */
43368 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/cm.h
43369 ===================================================================
43370 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/cm.h    2004-02-23 16:02:56.000000000 -0500
43371 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/cm.h 2005-07-28 14:52:52.854677824 -0400
43372 @@ -0,0 +1,412 @@
43373 +/*
43374 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43375 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
43376 + *
43377 + *    For licensing information please see the supplied COPYING file
43378 + *
43379 + */
43380 +
43381 +#ifndef __ELAN_CM_H
43382 +#define __ELAN_CM_H
43383 +
43384 +#ident "@(#)$Id: cm.h,v 1.14.2.1 2004/11/12 10:54:50 mike Exp $"
43385 +/*      $Source: /cvs/master/quadrics/epmod/cm.h,v $*/
43386 +
43387 +#include <elan/statemap.h>
43388 +
43389 +#if defined(DIGITAL_UNIX)
43390 +/*
43391 + * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible
43392 + * for handling all PCI interrupts and "funneled" operations.  When a kernel thread
43393 + * is made runnable, the scheduler will choose which cpu it will run on at that time,
43394 + * and will only execute a higher priority thread from another cpu's run queue when 
43395 + * it becomes totally idle (apparently also including user processes).  Also the 
43396 + * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed
43397 + * at "preemptable" places - so again have no guarantee on when they will execute if
43398 + * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64
43399 + * is incapable of scheduling a high priority kernel  thread within a deterministic time
43400 + * of when it should have become runnable - wonderfull.
43401 + *
43402 + * Hence the solution Compaq have proposed it to schedule a timeout onto all of the
43403 + * cpu's timeouts lists at the maximum frequency that we could want to execute code,
43404 + * then to handle the scheduling of work between these ourselves.  With a bit of luck
43405 + * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance
43406 + * to do our important work.
43407 + *
43408 + * However ..... this still is not reliable, since timeouts under Tru64 are still 
43409 + * only run when the currently running kernel thread "co-operates" by calling one
43410 + * of a number of functions which is permitted to run the "lwc"s AND is not holding
43411 + * any spinlocks AND is running ai IPL 0.   However Compaq are unable to provide
43412 + * any upper limit on the time between the "lwc"'s being run and so it is possible
43413 + * for all 4 cpus to not run them for an unbounded time.
43414 + *
43415 + * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to 
43416 + * hardclock() to "solve" this problem for Memory Channel.  However, since it
43417 + * is called within the clock interrupt it is not permissible to aquire any
43418 + * spinlocks, nor to run for "too long".  This means that it is not possible to
43419 + * call the heartbeat algorithm from this hook.  
43420 + *
43421 + * Our solution to these limitations is to use the hook to cause an elan interrupt 
43422 + * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device 
43423 + * to trap and ep_cprocTrap() can then run the heartbeat code.  However there is a lock 
43424 + * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to 
43425 + * use a trylock and if we fail, then hope that when the interrupt is delievered again
43426 + * some time later we will succeed.
43427 + *
43428 + * However this only works if the kernel is able to respond to the Elan interrupt,
43429 + * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has
43430 + * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval.
43431 + *
43432 + * In fact this is exactly the mechanism that other operating systems use to
43433 + * execute timeouts, since the hardclock interrupt posts a low priority 
43434 + * "soft interrupt" which "pre-eempts" the currently running thread and then
43435 + * executes the timeouts.To block timeouts you use splsoftclock() the same as 
43436 + * in Tru64.
43437 + */
43438 +#define PER_CPU_TIMEOUT                        TRUE
43439 +#endif
43440 +
43441 +
43442 +#define CM_SGMTS_PER_LEVEL             8                       /* maximum nodes in each segment */
43443 +#define CM_MAX_LEVELS                  6                       /* maximum depth of tree */
43444 +
43445 +/* message buffers/dmas/events etc */
43446 +#define CM_NUM_NODE_MSG_BUFFERS                (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */
43447 +#define CM_NUM_SPARE_MSG_BUFFERS       8                       /* spare msg buffers for non-connected nodes */
43448 +#define CM_NUM_MSG_BUFFERS             (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS)
43449 +
43450 +#define CM_INPUTQ_ENTRIES              128                     /* # entries in input queue */
43451 +
43452 +#define CM_PERIODIC_DISCOVER_INTERVAL  (5000)          /* 5s (infrequent resolution of established leader conflicts) */
43453 +#define CM_URGENT_DISCOVER_INTERVAL    (50)            /* 0.05s (more frequently than heartbeats 'cause they don't retry) */
43454 +#define CM_HEARTBEAT_INTERVAL          (125)           /* 0.125s */
43455 +#define CM_TIMER_SCHEDULE_TIMEOUT      (4000)          /* 4s     Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */
43456 +#define CM_THREAD_SCHEDULE_TIMEOUT     (30000)         /* 30s    Maximum time before a thread that's scheduled to run gets to run */
43457 +#define CM_THREAD_RUNNING_TIMEOUT      (30000)         /* 30s    Don't expect the manager thread to be running longer than this */
43458 +
43459 +#ifdef PER_CPU_TIMEOUT
43460 +#define CM_PERCPU_TIMEOUT_INTERVAL     (50)            /* 0.05s (must be less than all above intervals) */
43461 +#define CM_PACEMAKER_INTERVAL          (500)           /* 0.05s */
43462 +
43463 +#define CM_HEARTBEAT_OVERDUE           (250)           /* 0.25s Maximum time a timeout can be overdue before taking extreme action */
43464 +#endif
43465 +
43466 +#define CM_P2P_DMA_RETRIES             31
43467 +
43468 +/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES
43469 + * attempts to send one to be successfully received */
43470 +#define CM_P2P_MSG_RETRIES             8
43471 +
43472 +/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts
43473 + * to send one to be successfully received. */
43474 +#define CM_BCAST_MSG_RETRIES           40
43475 +
43476 +/* Heartbeat timeout allows for a node stalling and still getting its
43477 + * heartbeat. The 2 is to allow for unsynchronised polling times. */
43478 +#define CM_HEARTBEAT_TIMEOUT           (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL)
43479 +
43480 +/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people
43481 + * who don't see discovery are considered dead by their leader.  This
43482 + * ensures that by the time a node "discovers" it is a leader of a segment,
43483 + * the previous leader of that segment will have been deemed to be dead by
43484 + * its the parent segment's leader */
43485 +#define CM_DISCOVER_TIMEOUT            (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL)
43486 +
43487 +#define CM_WAITING_TIMEOUT             (CM_DISCOVER_TIMEOUT * 100)
43488 +
43489 +/*
43490 + * Convert all timeouts specified in mS into "ticks"
43491 + */
43492 +#define MSEC2TICKS(MSEC)               (((MSEC)*HZ)/1000)
43493 +
43494 +
43495 +/* statemap entry */
43496 +typedef struct cm_state_entry
43497 +{
43498 +    int16_t           level;                   /* cluster level to apply to */
43499 +    int16_t          offset;                   /* from statemap_findchange() */
43500 +    uint16_t          seg[BT_NBIPUL/16];       /* ditto */
43501 +} CM_STATEMAP_ENTRY;
43502 +
43503 +/* offset is >= 0 for a change to apply and */
43504 +#define STATEMAP_NOMORECHANGES (-1)            /* end of a set of updates */
43505 +#define STATEMAP_RESET         (-2)            /* reset the target map */
43506 +#define STATEMAP_NOOP          (-3)            /* null token */
43507 +
43508 +/* CM message format */
43509 +typedef int8_t CM_SEQ;                         /* heartbeat sequence numbers; at least 2 bits, signed */
43510 +
43511 +/*
43512 + * The message header is received into the last 64 byte block of 
43513 + * the input queue and the Version *MUST* be the last word of the 
43514 + * block to ensure that we can see that the whole of the message
43515 + * has reached main memory after we've seen the input queue pointer
43516 + * have been updated.
43517 + */
43518 +typedef struct ep_cm_hdr
43519 +{
43520 +    uint32_t          Pad0;
43521 +    uint32_t          Pad1;
43522 +
43523 +    uint8_t           Type;
43524 +    uint8_t           Level;
43525 +    CM_SEQ            Seq;                     /* precision at least 2 bits each*/
43526 +    CM_SEQ            AckSeq;
43527 +    
43528 +    uint16_t          NumMaps;
43529 +    uint16_t          MachineId;
43530 +
43531 +    uint16_t          NodeId;
43532 +    uint16_t          Checksum;
43533 +
43534 +    uint32_t           Timestamp;
43535 +    uint32_t           ParamHash;
43536 +    uint32_t          Version;
43537 +} CM_HDR;
43538 +
43539 +#define CM_HDR_SIZE        sizeof (CM_HDR)
43540 +
43541 +typedef struct cm_msg
43542 +{
43543 +    union {
43544 +       CM_STATEMAP_ENTRY   Statemaps[1];               /* piggy-backed statemap updates start here */
43545 +       uint8_t             Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE];
43546 +    } Payload;
43547 +    
43548 +    CM_HDR                 Hdr;
43549 +} CM_MSG;
43550 +
43551 +/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */
43552 +#define CM_MSG_MAXMAPS         (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY))
43553 +#define CM_MSG_MAP(mapno)      (CM_MSG_MAXMAPS - (mapno) - 1)
43554 +
43555 +/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */
43556 +#define CM_MSG_BASE(nmaps)     (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps]))
43557 +#define CM_MSG_SIZE(nmaps)     (sizeof (CM_MSG) - CM_MSG_BASE(nmaps))
43558 +
43559 +#define CM_MSG_VERSION                         0xcad00005
43560 +#define CM_MSG_TYPE_RESOLVE_LEADER             0
43561 +#define CM_MSG_TYPE_DISCOVER_LEADER            1
43562 +#define CM_MSG_TYPE_NOTIFY                     2
43563 +#define CM_MSG_TYPE_DISCOVER_SUBORDINATE       3
43564 +#define CM_MSG_TYPE_IMCOMING                   4
43565 +#define CM_MSG_TYPE_HEARTBEAT                  5
43566 +#define CM_MSG_TYPE_REJOIN                     6
43567 +
43568 +/* CM machine segment */
43569 +typedef struct cm_sgmtMaps
43570 +{
43571 +    u_char       InputMapValid;                        /* Input map has been set */
43572 +    u_char       OutputMapValid;               /* Output map has been set */
43573 +    u_char       SentChanges;                  /* got an outstanding STATEMAP_NOMORECHANGES to send */
43574 +    statemap_t  *OutputMap;                    /* state to send */
43575 +    statemap_t  *InputMap;                     /* state received */
43576 +    statemap_t  *CurrentInputMap;              /* state being received */
43577 +} CM_SGMTMAPS;
43578 +
43579 +typedef struct cm_sgmt
43580 +{
43581 +   u_char       State;
43582 +   u_char       SendMaps;
43583 +   u_char       MsgAcked;
43584 +   CM_SEQ      MsgSeq;
43585 +   CM_SEQ      AckSeq;
43586 +   u_int       NodeId;
43587 +   long                UpdateTick;
43588 +   long                WaitingTick;
43589 +   uint32_t    Timestamp;
43590 +   CM_SGMTMAPS  Maps[CM_MAX_LEVELS];           /* Maps[i] == state for cluster level i */
43591 +   u_short      MsgNumber;                     /* msg buffer to use */
43592 +   u_short     NumMaps;                        /* # maps in message buffer */
43593 +   u_short      Level;
43594 +   u_short      Sgmt;
43595 +} CM_SGMT;
43596 +
43597 +#define CM_SGMT_ABSENT         0               /* no one there at all */
43598 +#define CM_SGMT_WAITING                1               /* waiting for subtree to connect */
43599 +#define CM_SGMT_COMING         2               /* expecting a subtree to reconnect */
43600 +#define CM_SGMT_PRESENT                3               /* connected */
43601 +
43602 +typedef struct cm_level
43603 +{
43604 +    int               SwitchLevel;
43605 +    u_int             MinNodeId;
43606 +    u_int              NumNodes;
43607 +    u_int              NumSegs;
43608 +    u_int              MySgmt;
43609 +   
43610 +    /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */
43611 +    u_char             SubordinateMapValid[CM_MAX_LEVELS];
43612 +    statemap_t        *SubordinateMap[CM_MAX_LEVELS];
43613 +
43614 +    /* maps/flags for this cluster level */
43615 +    u_int              Online:1;                               /* I've gone online (seen myself running) */
43616 +    u_int             Restarting:1;                            /* driving my owm restart bit */
43617 +    u_char            OfflineReasons;                          /* forced offline by broadcast */
43618 +
43619 +    u_char             GlobalMapValid;
43620 +    u_char             SubTreeMapValid;
43621 +    u_long            Connected;
43622 +
43623 +    statemap_t        *LocalMap;               /* state bits I drive */
43624 +    statemap_t        *SubTreeMap;             /* OR of my and my subtree states */
43625 +    statemap_t        *GlobalMap;              /* OR of all node states */
43626 +    statemap_t        *LastGlobalMap;          /* last map I saw */
43627 +    statemap_t        *TmpMap;                 /* scratchpad */
43628 +
43629 +    CM_SGMT           Sgmts[CM_SGMTS_PER_LEVEL];
43630 +} CM_LEVEL;
43631 +
43632 +#define CM_ROLE_LEADER_CANDIDATE       0
43633 +#define CM_ROLE_LEADER                 1
43634 +#define CM_ROLE_SUBORDINATE            2
43635 +
43636 +/* global status bits */
43637 +#define CM_GSTATUS_STATUS_MASK         0x03    /* bits nodes drive to broadcast their status */
43638 +#define CM_GSTATUS_ABSENT              0x00    /* Off the network */
43639 +#define CM_GSTATUS_STARTING            0x01    /* I'm waiting for everyone to see me online */
43640 +#define CM_GSTATUS_RUNNING              0x03   /* up and running */
43641 +#define CM_GSTATUS_CLOSING             0x02    /* I'm waiting for everyone to see me offline */
43642 +
43643 +#define CM_GSTATUS_ACK_MASK            0x0c    /* bits node drive to ack other status */
43644 +#define CM_GSTATUS_MAY_START           0x04    /* Everyone thinks I may not start */
43645 +#define CM_GSTATUS_MAY_RUN             0x08    /* Everyone thinks I may not run */
43646 +
43647 +#define CM_GSTATUS_RESTART             0x10    /* Someone thinks I should restart */
43648 +#define CM_GSTATUS_BITS                        5
43649 +
43650 +#define CM_GSTATUS_BASE(node)          ((node) * CM_GSTATUS_BITS)
43651 +
43652 +#if defined(PER_CPU_TIMEOUT)
43653 +typedef struct cm_timeout_data
43654 +{
43655 +    long               ScheduledAt;                            /* lbolt timeout was scheduled to run at */
43656 +
43657 +    unsigned long       EarlyCount;                            /* # times run early than NextRun */
43658 +    unsigned long      MissedCount;                            /* # times run on time - but someone else was running it */
43659 +    unsigned long       WastedCount;                           /* # times we failed to get the spinlock */
43660 +    unsigned long      WorkCount;                              /* # times we're the one running */
43661 +
43662 +    unsigned long      WorstDelay;                             /* worst scheduling delay */
43663 +    unsigned long      BestDelay;                              /* best scheduling delay */
43664 +
43665 +    unsigned long      WorstLockDelay;                         /* worst delay before getting rail->Lock */
43666 +
43667 +    unsigned long      WorstHearbeatDelay;                     /* worst delay before calling DoHeartbeatWork */
43668 +} CM_TIMEOUT_DATA;
43669 +#endif
43670 +
43671 +typedef struct cm_rail
43672 +{
43673 +    EP_RAIL          *Rail;                                    /* rail we're associated with */
43674 +    struct list_head   Link;                                   /*   and linked on the CM_SUBSYS */
43675 +
43676 +    uint32_t          ParamHash;                               /* hash of critical parameters */
43677 +    uint32_t           Timestamp;
43678 +    long              DiscoverStartTick;                       /* when discovery start */
43679 +
43680 +    unsigned int       NodeId;                                 /* my node id */
43681 +    unsigned int       NumNodes;                               /*   and number of nodes */
43682 +    unsigned int       NumLevels;                              /* number of levels computed from machine size */
43683 +    int                       BroadcastLevel;
43684 +    long              BroadcastLevelTick;
43685 +    unsigned int       TopLevel;                               /* level at which I'm not a leader */
43686 +    unsigned char      Role;                                   /* state at TopLevel */
43687 +
43688 +    EP_INPUTQ        *PolledQueue;                             /* polled input queue */
43689 +    EP_INPUTQ        *IntrQueue;                               /* intr input queue */
43690 +    EP_OUTPUTQ       *MsgQueue;                                /* message  */
43691 +    unsigned int       NextSpareMsg;                           /* next "spare" message buffer to use */
43692 +
43693 +    EP_CM_RAIL_STATS   Stats;                                  /* statistics */
43694 +
43695 +    kmutex_t          Mutex;
43696 +    spinlock_t        Lock;
43697 +    
43698 +    long              NextHeartbeatTime;                       /* next time to check/send heartbeats */
43699 +    long              NextDiscoverTime;                        /* next time to progress discovery  */
43700 +    long              NextRunTime;                             /* the earlier of the above two or intr requires inputq poll*/
43701 +
43702 +    unsigned int       OfflineReasons;                         /* forced offline by procfs/manager thread stuck */
43703 +
43704 +#if defined(PER_CPU_TIMEOUT)
43705 +    spinlock_t        HeartbeatTimeoutsLock;                   /* spinlock to sequentialise per-cpu timeouts */
43706 +    long              HeartbeatTimeoutsStarted;                /* bitmap of which timeouts have started */
43707 +    long              HeartbeatTimeoutsStopped;                /* bitmap of which timeouts have stopped */
43708 +    long              HeartbeatTimeoutsShouldStop;             /* flag to indicate timeouts should stop */
43709 +    kcondvar_t        HeartbeatTimeoutsWait;                   /* place to sleep waiting for timeouts to stop */
43710 +    long              HeartbeatTimeoutRunning;                 /* someone is running the timeout - don't try for the lock */
43711 +
43712 +    long              HeartbeatTimeoutOverdue;                 /* heartbeat seen as overdue - interrupt requested */
43713 +
43714 +    CM_TIMEOUT_DATA   *HeartbeatTimeoutsData;                  /* per timeout data */
43715 +#else
43716 +    struct timer_list  HeartbeatTimer;                         /* timer for heartbeat/discovery */
43717 +#endif
43718 +
43719 +    CM_LEVEL           Levels[CM_MAX_LEVELS];
43720 +} CM_RAIL;
43721 +
43722 +/* OfflineReasons (both per-rail and  */
43723 +#define CM_OFFLINE_BROADCAST           (1 << 0)
43724 +#define CM_OFFLINE_PROCFS              (1 << 1)
43725 +#define CM_OFFLINE_MANAGER             (1 << 2)
43726 +
43727 +typedef struct cm_subsys
43728 +{
43729 +    EP_SUBSYS          Subsys;
43730 +    CM_RAIL            *Rails[EP_MAX_RAILS];
43731 +} CM_SUBSYS;
43732 +
43733 +extern int  MachineId;
43734 +
43735 +extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId);
43736 +extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId);
43737 +extern void cm_restart_comms (CM_RAIL *cmRail);
43738 +extern int  cm_init (EP_SYS *sys);
43739 +
43740 +extern void DisplayRail(EP_RAIL *rail);
43741 +extern void DisplaySegs (EP_RAIL *rail);
43742 +extern void DisplayStatus (EP_RAIL *rail);
43743 +
43744 +typedef struct proc_private
43745 +{
43746 +    struct nodeset_private *pr_next;
43747 +    EP_RAIL                *pr_rail;
43748 +    char                  *pr_data;
43749 +    int                     pr_data_len;
43750 +    unsigned               pr_off;
43751 +    unsigned               pr_len;
43752 +    DisplayInfo             pr_di;
43753 +} PROC_PRIVATE;
43754 +
43755 +extern void    proc_character_fill (long mode, char *fmt, ...);
43756 +extern int     proc_release (struct inode *inode, struct file *file);
43757 +extern ssize_t proc_read (struct file *file, char *buf, size_t count, loff_t *ppos);
43758 +
43759 +
43760 +extern void DisplayNodeMaps  (DisplayInfo *di, CM_RAIL *cmRail);
43761 +extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail);
43762 +extern void DisplayRailDo    (DisplayInfo *di, EP_RAIL *rail);
43763 +
43764 +extern int    cm_read_cluster(EP_RAIL *rail,char *page);
43765 +extern void   cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason);
43766 +
43767 +extern int    cm_svc_indicator_set      (EP_RAIL *rail, int svc_indicator);
43768 +extern int    cm_svc_indicator_clear    (EP_RAIL *rail, int svc_indicator);
43769 +extern int    cm_svc_indicator_is_set   (EP_RAIL *rail, int svc_indicator, int nodeId);
43770 +extern int    cm_svc_indicator_bitmap   (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
43771 +
43772 +/* cm_procfs.c */
43773 +extern void   cm_procfs_init (CM_SUBSYS *subsys);
43774 +extern void   cm_procfs_fini (CM_SUBSYS *subsys);
43775 +extern void   cm_procfs_rail_init (CM_RAIL *rail);
43776 +extern void   cm_procfs_rail_fini (CM_RAIL *rail);
43777 +
43778 +/*
43779 + * Local variables:
43780 + * c-file-style: "stroustrup"
43781 + * End:
43782 + */
43783 +#endif /* __ELAN_CM_H */
43784 +
43785 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/cm_procfs.c
43786 ===================================================================
43787 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/cm_procfs.c     2004-02-23 16:02:56.000000000 -0500
43788 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/cm_procfs.c  2005-07-28 14:52:52.855677672 -0400
43789 @@ -0,0 +1,254 @@
43790 +/*
43791 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
43792 + *    Copyright (c) 2002-2005 by Quadrics Ltd.
43793 + *
43794 + *    For licensing information please see the supplied COPYING file
43795 + *
43796 + */
43797 +
43798 +#ident "@(#)$Id: cm_procfs.c,v 1.5 2004/05/14 09:23:13 daniel Exp $"
43799 +/*      $Source: /cvs/master/quadrics/epmod/cm_procfs.c,v $ */
43800 +
43801 +#include <qsnet/kernel.h>
43802 +
43803 +#include <elan/kcomm.h>
43804 +
43805 +#include "kcomm_vp.h"
43806 +#include "debug.h"
43807 +#include "cm.h"
43808 +#include <elan/epsvc.h>
43809 +
43810 +#include <qsnet/procfs_linux.h>
43811 +
43812 +extern char *sprintClPeers (char *str, CM_RAIL *cmRail, int clvl);
43813 +
43814 +static int
43815 +proc_read_cluster(char *page, char **start, off_t off,
43816 +               int count, int *eof, void *data)
43817 +{
43818 +    CM_RAIL *cmRail = (CM_RAIL *) data;
43819 +    char    *p      = page;
43820 +
43821 +    page[0] = 0;
43822 +
43823 +    if (cmRail->Rail->State != EP_RAIL_STATE_RUNNING)
43824 +       p += sprintf(p, "<not running>\n");
43825 +    else
43826 +    {
43827 +       CM_LEVEL *cmLevel;
43828 +       unsigned long flags;
43829 +       int  i, j;
43830 +       char clNodeStr[32]; /* [%d-%d][%d-%d] */
43831 +       char seperate_with;
43832 +
43833 +       struct { int val; char *name; } bitvals[] = {
43834 +           {CM_OFFLINE_BROADCAST, "Broadcast"},
43835 +           {CM_OFFLINE_PROCFS,    "Offline"},
43836 +           {CM_OFFLINE_MANAGER,   "Manager"}};
43837 +       
43838 +       spin_lock_irqsave (&cmRail->Lock, flags);
43839 +       
43840 +       for (i = 0; i < cmRail->NumLevels; i++)
43841 +       {
43842 +           cmLevel = &cmRail->Levels[i];
43843 +           
43844 +           p += sprintf(p, "%23s %7s ", sprintClPeers (clNodeStr, cmRail, i), cmLevel->Online?"Online":"Offline");
43845 +           
43846 +           if ((cmLevel->Online ) | ( cmLevel->Connected > 0))
43847 +               p += sprintf(p, "Connected=%lu ", cmLevel->Connected);
43848 +           
43849 +           seperate_with = '<';
43850 +           
43851 +           if ( cmLevel->Restarting ) {
43852 +               p += sprintf(p, "%cRestarting", seperate_with);
43853 +               seperate_with = ',';
43854 +           }
43855 +           
43856 +           if ( ! (cmLevel->GlobalMapValid & cmLevel->SubTreeMapValid )) {
43857 +               p += sprintf(p, "%cMap Not Valid", seperate_with);
43858 +               seperate_with = ',';
43859 +           }
43860 +           
43861 +           if ( cmLevel->OfflineReasons ) {
43862 +               for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++)
43863 +                   if (cmLevel->OfflineReasons & bitvals[j].val) {
43864 +                       p += sprintf(p, "%c%s", seperate_with, bitvals[j].name);
43865 +                       seperate_with = ',';
43866 +                   }
43867 +           }
43868 +           if ( cmRail->OfflineReasons ) {
43869 +               for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++)
43870 +                   if (cmRail->OfflineReasons & bitvals[j].val) {
43871 +                       p += sprintf(p, "%c%s", seperate_with, bitvals[j].name);
43872 +                       seperate_with = ',';
43873 +                   }
43874 +           }
43875 +           
43876 +           if ( seperate_with != '<' ) 
43877 +               p += sprintf(p,">\n");
43878 +           else
43879 +               p += sprintf(p,"\n");
43880 +       }
43881 +       
43882 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
43883 +    }
43884 +
43885 +    return qsnet_proc_calc_metrics (page, start, off, count, eof, p - page);
43886 +}
43887 +
43888 +static struct rail_info
43889 +{
43890 +    char *name;
43891 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
43892 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
43893 +} rail_info[] = {
43894 +    {"cluster", proc_read_cluster, NULL},
43895 +};
43896 +
43897 +struct proc_dir_entry *svc_indicators_root;
43898 +
43899 +typedef struct svc_indicator_data
43900 +{
43901 +    int       svc_indicator;
43902 +    EP_RAIL  *rail;
43903 +} SVC_INDICATOR_DATA;
43904 +
43905 +static SVC_INDICATOR_DATA svc_indicator_data[EP_SVC_NUM_INDICATORS][EP_MAX_RAILS];
43906 +static char              *svc_indicator_names[EP_SVC_NUM_INDICATORS] = EP_SVC_NAMES;
43907 +
43908 +static int
43909 +proc_read_svc_indicator_rail_bitmap (char *page, char **start, off_t off,
43910 +                                    int count, int *eof, void *data)
43911 +{
43912 +    SVC_INDICATOR_DATA  *svc_data = (SVC_INDICATOR_DATA  *)data;
43913 +    unsigned int        nnodes   = ep_numnodes (ep_system());
43914 +    bitmap_t           *bitmap;
43915 +
43916 +    KMEM_ZALLOC (bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1);
43917 +
43918 +    cm_svc_indicator_bitmap (svc_data->rail, svc_data->svc_indicator, bitmap, 0, nnodes);
43919 +
43920 +    ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes);
43921 +    
43922 +    KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)));
43923 +    
43924 +    strcat (page, "\n");
43925 +
43926 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
43927 +}
43928 +
43929 +static int
43930 +proc_read_svc_indicator_bitmap(char *page, char **start, off_t off,
43931 +                              int count, int *eof, void *data)
43932 +{
43933 +    unsigned int         num      = (unsigned long) data;
43934 +    EP_SYS              *sys      = ep_system();
43935 +    unsigned int         nnodes   = ep_numnodes (sys);
43936 +    bitmap_t            *bitmap;
43937 +
43938 +    KMEM_ALLOC(bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1);
43939 +     
43940 +    ep_svc_indicator_bitmap (sys, num, bitmap, 0, nnodes);
43941 +
43942 +    ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes);
43943 +    
43944 +    KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)));
43945 +    
43946 +    strcat (page, "\n");
43947 +
43948 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
43949 +}
43950 +
43951 +void
43952 +cm_procfs_rail_init (CM_RAIL *cmRail)
43953 +{
43954 +    EP_RAIL *rail = cmRail->Rail;
43955 +    struct proc_dir_entry *p;
43956 +    int i;
43957 +    
43958 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
43959 +    {
43960 +       if ((p = create_proc_entry (rail_info[i].name, 0, cmRail->Rail->ProcDir)) != NULL)
43961 +       {
43962 +           p->read_proc  = rail_info[i].read_func;
43963 +           p->write_proc = rail_info[i].write_func;
43964 +           p->data       = cmRail;
43965 +           p->owner      = THIS_MODULE;
43966 +       }
43967 +    }
43968 +
43969 +    if ((rail->SvcIndicatorDir = proc_mkdir ("svc_indicators", cmRail->Rail->ProcDir)) != NULL)
43970 +    {
43971 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
43972 +       {
43973 +           if ((p = create_proc_entry (svc_indicator_names[i], 0, rail->SvcIndicatorDir)) != NULL)
43974 +           {
43975 +               svc_indicator_data[i][rail->Number].svc_indicator = i;
43976 +               svc_indicator_data[i][rail->Number].rail          = rail; 
43977 +               
43978 +               p->write_proc = NULL;
43979 +               p->read_proc  = proc_read_svc_indicator_rail_bitmap;
43980 +               p->data       = (void *)&svc_indicator_data[i][rail->Number];
43981 +               p->owner      = THIS_MODULE;
43982 +           }
43983 +       }
43984 +    }
43985 +}
43986 +
43987 +void
43988 +cm_procfs_rail_fini (CM_RAIL *cmRail)
43989 +{
43990 +    EP_RAIL *rail = cmRail->Rail;
43991 +    int i;
43992 +
43993 +    if (rail->SvcIndicatorDir)
43994 +    {
43995 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
43996 +           remove_proc_entry (svc_indicator_names[i], rail->SvcIndicatorDir);
43997 +
43998 +       remove_proc_entry ("svc_indicators", cmRail->Rail->ProcDir);
43999 +    }
44000 +
44001 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
44002 +       remove_proc_entry (rail_info[i].name, cmRail->Rail->ProcDir);
44003 +}
44004 +
44005 +void
44006 +cm_procfs_init (CM_SUBSYS *subsys)
44007 +{
44008 +    struct proc_dir_entry *p;
44009 +    int i;
44010 +
44011 +    qsnet_proc_register_hex (ep_config_root, "machine_id",      &MachineId,      0);
44012 +
44013 +    if ((svc_indicators_root = proc_mkdir("svc_indicators", ep_procfs_root)) != NULL)
44014 +    {
44015 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
44016 +       {
44017 +           if ((p = create_proc_entry (svc_indicator_names[i], 0, svc_indicators_root)) != NULL)
44018 +           {
44019 +               p->write_proc = NULL;
44020 +               p->read_proc  = proc_read_svc_indicator_bitmap;
44021 +               p->data       = (void *)(long) i;
44022 +               p->owner      = THIS_MODULE;
44023 +           }
44024 +       }
44025 +       
44026 +    }
44027 +}
44028 +
44029 +void
44030 +cm_procfs_fini (CM_SUBSYS *subsys)
44031 +{
44032 +    int i;
44033 +
44034 +    if (svc_indicators_root)
44035 +    {
44036 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
44037 +           remove_proc_entry (svc_indicator_names[i], svc_indicators_root);
44038 +       
44039 +       remove_proc_entry ("svc_indicators",   ep_procfs_root);
44040 +    }
44041 +
44042 +    remove_proc_entry ("machine_id",      ep_config_root);
44043 +}
44044 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/commands_elan4.c
44045 ===================================================================
44046 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/commands_elan4.c        2004-02-23 16:02:56.000000000 -0500
44047 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/commands_elan4.c     2005-07-28 14:52:52.855677672 -0400
44048 @@ -0,0 +1,173 @@
44049 +/*
44050 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44051 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44052 + *
44053 + *    For licensing information please see the supplied COPYING file
44054 + *
44055 + */
44056 +
44057 +#ident "@(#)$Id: commands_elan4.c,v 1.2 2003/10/23 15:07:53 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
44058 +/*      $Source: /cvs/master/quadrics/epmod/commands_elan4.c,v $*/
44059 +
44060 +#include <qsnet/kernel.h>
44061 +
44062 +#include <elan/kcomm.h>
44063 +
44064 +#include "kcomm_vp.h"
44065 +#include "kcomm_elan4.h"
44066 +#include "debug.h"
44067 +
44068 +#include <elan4/trtype.h>
44069 +
44070 +static __inline__ void
44071 +elan4_command_write (ELAN4_CQ *cq, E4_uint64 val, unsigned off)
44072 +{
44073 +    writeq (val, cq->cq_mapping + offsetof (E4_CommandPort, Command[off]));
44074 +}
44075 +
44076 +void
44077 +elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag)
44078 +{
44079 +    elan4_command_write (cq, tag | NOP_CMD, 0);
44080 +}
44081 +
44082 +void
44083 +elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data)
44084 +{
44085 +    elan4_command_write (cq, addr | WRITE_DWORD_CMD, 0);
44086 +    elan4_command_write (cq, data, 1);
44087 +}
44088 +
44089 +void
44090 +elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data)
44091 +{
44092 +    elan4_command_write (cq, addr | ADD_DWORD_CMD, 0);
44093 +    elan4_command_write (cq, data,                 1);
44094 +}
44095 +
44096 +void
44097 +elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype)
44098 +{
44099 +    elan4_command_write (cq, from | (datatype << COPY64_DATA_TYPE_SHIFT) | COPY64_CMD, 0);
44100 +    elan4_command_write (cq, to   | (datatype << COPY64_DATA_TYPE_SHIFT),              1);
44101 +}
44102 +
44103 +void
44104 +elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie)
44105 +{
44106 +    elan4_command_write (cq, (cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, 0);
44107 +}
44108 +
44109 +
44110 +void 
44111 +elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs)
44112 +{
44113 +    elan4_command_write (cq, regs->Registers[0] | RUN_THREAD_CMD, 0);
44114 +    elan4_command_write (cq, regs->Registers[1],                  1);
44115 +    elan4_command_write (cq, regs->Registers[2],                  2);
44116 +    elan4_command_write (cq, regs->Registers[3],                  3);
44117 +    elan4_command_write (cq, regs->Registers[4],                  4);
44118 +    elan4_command_write (cq, regs->Registers[5],                  5);
44119 +    elan4_command_write (cq, regs->Registers[6],                  6);
44120 +}
44121 +
44122 +void
44123 +elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma)
44124 +{
44125 +    E4_uint64 *dmaptr = (E4_uint64 *) dma;
44126 +
44127 +    elan4_command_write (cq, dmaptr[0] | RUN_DMA_CMD, 0);
44128 +    elan4_command_write (cq, dmaptr[1],               1);
44129 +    elan4_command_write (cq, dmaptr[2],               2);
44130 +    elan4_command_write (cq, dmaptr[3],               3);
44131 +    elan4_command_write (cq, dmaptr[4],               4);
44132 +    elan4_command_write (cq, dmaptr[5],               5);
44133 +    elan4_command_write (cq, dmaptr[6],               6);
44134 +}
44135 +
44136 +void
44137 +elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event)
44138 +{
44139 +    elan4_command_write (cq, event | SET_EVENT_CMD, 0);
44140 +}
44141 +
44142 +void
44143 +elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count)
44144 +{
44145 +    elan4_command_write (cq, SET_EVENTN_CMD,0);
44146 +    elan4_command_write (cq, event | count, 1);
44147 +}
44148 +    
44149 +void
44150 +elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1)
44151 +{
44152 +    elan4_command_write (cq, event | WAIT_EVENT_CMD, 0);
44153 +    elan4_command_write (cq, candt,                  1);
44154 +    elan4_command_write (cq, param0,                 2);
44155 +    elan4_command_write (cq, param1,                 3);
44156 +}
44157 +
44158 +void
44159 +elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command)
44160 +{
44161 +    elan4_command_write (cq, command | OPEN_STEN_PKT_CMD, 0);
44162 +}
44163 +
44164 +void
44165 +elan4_guard (ELAN4_CQ *cq, E4_uint64 command)
44166 +{
44167 +    elan4_command_write (cq, command | GUARD_CMD, 0);
44168 +}
44169 +
44170 +void
44171 +elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr)
44172 +{
44173 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
44174 +    elan4_command_write (cq, addr,                               1);
44175 +}
44176 +
44177 +void
44178 +elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0)
44179 +{
44180 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
44181 +    elan4_command_write (cq, addr,                               1);
44182 +    elan4_command_write (cq, p0,                                 2);
44183 +}
44184 +
44185 +void
44186 +elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1)
44187 +{
44188 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
44189 +    elan4_command_write (cq, addr,                               1);
44190 +    elan4_command_write (cq, p0,                                 2);
44191 +    elan4_command_write (cq, p1,                                 3);
44192 +}
44193 +
44194 +void
44195 +elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...)
44196 +{
44197 +    E4_uint32    ndword = ((trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT);
44198 +    va_list      ap;
44199 +    register int i;
44200 +
44201 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
44202 +    elan4_command_write (cq, addr,                               1);
44203 +    
44204 +    va_start (ap, addr);
44205 +    for (i = 2; i < ndword+2; i++) 
44206 +       elan4_command_write (cq, va_arg (ap, E4_uint64), i);
44207 +    va_end (ap);
44208 +}
44209 +
44210 +void
44211 +elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr)
44212 +{
44213 +    E4_uint32    ndword = ((trtype &TR_SIZE_MASK) >> TR_SIZE_SHIFT);
44214 +    register int i;
44215 +
44216 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
44217 +    elan4_command_write (cq, addr,                            1);
44218 +    for (i = 2; i < ndword+2; i++)
44219 +       elan4_command_write (cq, *ptr++, i);
44220 +}
44221 +
44222 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/conf_linux.c
44223 ===================================================================
44224 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/conf_linux.c    2004-02-23 16:02:56.000000000 -0500
44225 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/conf_linux.c 2005-07-28 14:52:52.856677520 -0400
44226 @@ -0,0 +1,309 @@
44227 +/*
44228 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44229 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44230 + *
44231 + *    For licensing information please see the supplied COPYING file
44232 + *
44233 + */
44234 +
44235 +#ident "@(#)$Id: conf_linux.c,v 1.37.2.3 2005/01/18 14:47:35 david Exp $"
44236 +/*      $Source: /cvs/master/quadrics/epmod/conf_linux.c,v $ */
44237 +
44238 +#include <qsnet/kernel.h>
44239 +#include <qsnet/autoconf.h>
44240 +
44241 +#include <elan/kcomm.h>
44242 +#include <elan/epsvc.h>
44243 +#include <elan/epcomms.h>
44244 +
44245 +#include "cm.h"
44246 +
44247 +#include "conf_linux.h"
44248 +
44249 +#include <linux/init.h>
44250 +#include <linux/module.h>
44251 +#include <linux/reboot.h>
44252 +#include <linux/notifier.h>
44253 +
44254 +/* Module parameters */
44255 +unsigned int epdebug        = 0;
44256 +unsigned int epdebug_console = 0;
44257 +unsigned int epdebug_cmlevel = 0;
44258 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44259 +unsigned int epdebug_check_sum = 0;
44260 +#endif
44261 +int         disabled        = 0;
44262 +int          sdram_assert    = 0;
44263 +int          assfail_mode    = 0;
44264 +int         txd_stabilise   = 7;
44265 +int         portals_envelopes = 0;
44266 +
44267 +/* External module parameters */
44268 +extern int     MaxSwitchLevels;
44269 +extern int      RejoinCheck;
44270 +extern int      RejoinPanic;
44271 +extern int      PositionCheck;
44272 +extern int      MachineId;
44273 +
44274 +/* Module globals */
44275 +EP_SYS          epsys;
44276 +
44277 +#ifdef MODULE
44278 +MODULE_AUTHOR("Quadrics Ltd");
44279 +MODULE_DESCRIPTION("Elan Kernel Comms");
44280 +
44281 +MODULE_LICENSE("GPL");
44282 +
44283 +MODULE_PARM(epdebug,         "i");
44284 +MODULE_PARM(epdebug_console, "i");
44285 +MODULE_PARM(epdebug_cmlevel, "i");
44286 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44287 +MODULE_PARM(epdebug_check_sum, "i");
44288 +#endif
44289 +MODULE_PARM(disabled,        "i");
44290 +
44291 +MODULE_PARM(MachineId,       "i");
44292 +MODULE_PARM(RejoinPanic,     "i");
44293 +MODULE_PARM(RejoinCheck,     "i");
44294 +MODULE_PARM(PositionCheck,   "i");
44295 +MODULE_PARM(MaxSwitchLevels, "i");
44296 +
44297 +MODULE_PARM(sdram_assert,    "i");
44298 +MODULE_PARM(assfail_mode,    "i");
44299 +MODULE_PARM(txd_stabilise,   "i");
44300 +MODULE_PARM(portals_envelopes,"i");
44301 +
44302 +/* epcomms.c large message service functions */
44303 +EXPORT_SYMBOL(ep_alloc_xmtr);
44304 +EXPORT_SYMBOL(ep_free_xmtr);
44305 +EXPORT_SYMBOL(ep_transmit_message);
44306 +EXPORT_SYMBOL(ep_multicast_message);
44307 +EXPORT_SYMBOL(ep_transmit_rpc);
44308 +
44309 +EXPORT_SYMBOL(ep_alloc_rcvr);
44310 +EXPORT_SYMBOL(ep_free_rcvr);
44311 +EXPORT_SYMBOL(ep_queue_receive);
44312 +EXPORT_SYMBOL(ep_requeue_receive);
44313 +EXPORT_SYMBOL(ep_rpc_put);
44314 +EXPORT_SYMBOL(ep_rpc_get);
44315 +EXPORT_SYMBOL(ep_complete_rpc);
44316 +EXPORT_SYMBOL(ep_complete_receive);
44317 +
44318 +EXPORT_SYMBOL(ep_poll_transmits);
44319 +EXPORT_SYMBOL(ep_enable_txcallbacks);
44320 +EXPORT_SYMBOL(ep_disable_txcallbacks);
44321 +
44322 +/* epcomms.c functions for accessing fields of rxds/txds */
44323 +EXPORT_SYMBOL(ep_rxd_arg);
44324 +EXPORT_SYMBOL(ep_rxd_len);
44325 +EXPORT_SYMBOL(ep_rxd_isrpc);
44326 +EXPORT_SYMBOL(ep_rxd_envelope);
44327 +EXPORT_SYMBOL(ep_rxd_payload);
44328 +EXPORT_SYMBOL(ep_rxd_node);
44329 +EXPORT_SYMBOL(ep_rxd_status);
44330 +EXPORT_SYMBOL(ep_rxd_statusblk);
44331 +EXPORT_SYMBOL(ep_txd_node);
44332 +EXPORT_SYMBOL(ep_txd_statusblk);
44333 +
44334 +/* kmap.c, nmh.c - handling mapping of pages into network memory */
44335 +EXPORT_SYMBOL(ep_dvma_reserve);
44336 +EXPORT_SYMBOL(ep_dvma_release);
44337 +EXPORT_SYMBOL(ep_dvma_load);
44338 +EXPORT_SYMBOL(ep_dvma_unload);
44339 +EXPORT_SYMBOL(ep_nmd_subset);
44340 +EXPORT_SYMBOL(ep_nmd_merge);
44341 +
44342 +EXPORT_SYMBOL(ep_system);
44343 +
44344 +/* kcomm.c */
44345 +EXPORT_SYMBOL(ep_nodeid);
44346 +EXPORT_SYMBOL(ep_numnodes);
44347 +EXPORT_SYMBOL(ep_waitfor_nodeid);
44348 +
44349 +/* railhints.c */
44350 +EXPORT_SYMBOL(ep_pickRail);
44351 +EXPORT_SYMBOL(ep_xmtr_bcastrail);
44352 +EXPORT_SYMBOL(ep_xmtr_prefrail);
44353 +EXPORT_SYMBOL(ep_xmtr_availrails);
44354 +EXPORT_SYMBOL(ep_xmtr_noderails);
44355 +EXPORT_SYMBOL(ep_rcvr_prefrail);
44356 +EXPORT_SYMBOL(ep_rcvr_availrails);
44357 +EXPORT_SYMBOL(ep_rxd_railmask);
44358 +
44359 +EXPORT_SYMBOL(ep_svc_indicator_bitmap);
44360 +EXPORT_SYMBOL(ep_svc_indicator_is_set);
44361 +EXPORT_SYMBOL(ep_svc_indicator_clear);
44362 +EXPORT_SYMBOL(ep_svc_indicator_set);
44363 +
44364 +/* cm.c */
44365 +EXPORT_SYMBOL(cm_svc_indicator_clear);
44366 +EXPORT_SYMBOL(cm_svc_indicator_set);
44367 +EXPORT_SYMBOL(cm_svc_indicator_is_set);
44368 +EXPORT_SYMBOL(cm_svc_indicator_bitmap);
44369 +
44370 +#endif
44371 +
44372 +EP_SYS *
44373 +ep_system()
44374 +{
44375 +    return (&epsys);
44376 +}
44377 +
44378 +void
44379 +ep_mod_inc_usecount()
44380 +{
44381 +    MOD_INC_USE_COUNT;
44382 +} 
44383 +
44384 +void
44385 +ep_mod_dec_usecount()
44386 +{
44387 +    MOD_DEC_USE_COUNT;
44388 +}
44389 +
44390 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
44391 +
44392 +#include <linux/dump.h>
44393 +
44394 +static int
44395 +ep_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
44396 +{
44397 +    if (event == DUMP_BEGIN)
44398 +       ep_shutdown (&epsys);
44399 +
44400 +    return (NOTIFY_DONE);
44401 +}
44402 +static struct notifier_block ep_dump_notifier = 
44403 +{
44404 +    notifier_call:     ep_dump_event,
44405 +    priority:          0,
44406 +};
44407 +
44408 +#endif
44409 +
44410 +static int
44411 +ep_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
44412 +{
44413 +    if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
44414 +       ep_shutdown (&epsys);
44415 +
44416 +    return (NOTIFY_DONE);
44417 +}
44418 +
44419 +static struct notifier_block ep_reboot_notifier = 
44420 +{
44421 +    notifier_call:     ep_reboot_event,
44422 +    priority:          0,
44423 +};
44424 +
44425 +static int
44426 +ep_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
44427 +{
44428 +    ep_shutdown (&epsys);
44429 +
44430 +    return (NOTIFY_DONE);
44431 +}
44432 +
44433 +static struct notifier_block ep_panic_notifier = 
44434 +{
44435 +    notifier_call:     ep_panic_event,
44436 +    priority:          0,
44437 +};
44438 +
44439 +/*
44440 + * Module configuration. 
44441 + */
44442 +#ifdef MODULE
44443 +static int __init ep_init(void)
44444 +#else
44445 +__initfunc(int ep_init(void))
44446 +#endif
44447 +{
44448 +    register int rmask = 0;
44449 +
44450 +    ep_procfs_init ();
44451 +
44452 +    ep_sys_init (&epsys);
44453 +
44454 +#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE)
44455 +    rmask = ep4_create_rails (&epsys, disabled);
44456 +#endif
44457 +    
44458 +    /* If we've brought up an elan4 rail, then disable all elan3 rails. */
44459 +    if ((rmask & ~disabled) != 0)
44460 +       disabled = ~rmask;
44461 +
44462 +#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE)
44463 +    rmask = ep3_create_rails (&epsys, disabled);
44464 +#endif
44465 +
44466 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
44467 +    register_dump_notifier (&ep_dump_notifier);
44468 +#endif
44469 +    register_reboot_notifier (&ep_reboot_notifier);
44470 +
44471 +#if !defined(NO_PANIC_NOTIFIER)
44472 +    notifier_chain_register (&panic_notifier_list, &ep_panic_notifier);
44473 +#endif
44474 +
44475 +    return (0);
44476 +}
44477 +
44478 +/*
44479 + * Module removal.
44480 + */
44481 +#ifdef MODULE
44482 +static void
44483 +__exit ep_exit(void)
44484 +{
44485 +    register int i;
44486 +
44487 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
44488 +    unregister_dump_notifier (&ep_dump_notifier);
44489 +#endif
44490 +    unregister_reboot_notifier (&ep_reboot_notifier);
44491 +
44492 +#if !defined(NO_PANIC_NOTIFIER)
44493 +    notifier_chain_unregister (&panic_notifier_list, &ep_panic_notifier);
44494 +#endif
44495 +
44496 +    for (i = 0; i < EP_MAX_RAILS; i++)
44497 +    {
44498 +       if (epsys.Rails[i])
44499 +       {
44500 +           switch (epsys.Rails[i]->State)
44501 +           {
44502 +           case EP_RAIL_STATE_UNINITIALISED:
44503 +               break;
44504 +
44505 +           case EP_RAIL_STATE_STARTED:
44506 +           case EP_RAIL_STATE_RUNNING:
44507 +           case EP_RAIL_STATE_INCOMPATIBLE:
44508 +               /* remove per-rail CM proc entries */
44509 +               ep_stop_rail (epsys.Rails[i]);
44510 +               break;
44511 +           }
44512 +
44513 +           /* remove EP proc rail entries after per-rail CM entries */
44514 +           ep_procfs_rail_fini (epsys.Rails[i]);
44515 +           ep_destroy_rail (epsys.Rails[i]);
44516 +       }
44517 +    }
44518 +
44519 +    ep_sys_fini (&epsys);
44520 +
44521 +    ep_procfs_fini ();
44522 +}
44523 +
44524 +/* Declare the module init and exit functions */
44525 +module_init(ep_init);
44526 +module_exit(ep_exit);
44527 +
44528 +#endif
44529 +
44530 +
44531 +/*
44532 + * Local variables:
44533 + * c-file-style: "stroustrup"
44534 + * End:
44535 + */
44536 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/conf_linux.h
44537 ===================================================================
44538 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/conf_linux.h    2004-02-23 16:02:56.000000000 -0500
44539 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/conf_linux.h 2005-07-28 14:52:52.856677520 -0400
44540 @@ -0,0 +1,29 @@
44541 +/*
44542 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44543 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44544 + *
44545 + *    For licensing information please see the supplied COPYING file
44546 + *
44547 + */
44548 +
44549 +#ident "@(#)$Id: conf_linux.h,v 1.6 2003/10/02 14:16:07 mike Exp $"
44550 +/*      $Source: /cvs/master/quadrics/epmod/conf_linux.h,v $*/
44551 +
44552 +#ifndef __ELAN_CONF_LINUX_H
44553 +#define __ELAN_CONF_LINUX_H
44554 +
44555 +extern void ep_procfs_init(void);
44556 +extern void ep_procfs_fini(void);
44557 +extern void ep_procfs_rail_init(EP_RAIL *rail);
44558 +extern void ep_procfs_rail_fini(EP_RAIL *rail);
44559 +
44560 +extern void ep_procfs_svc_indicator_create(int svc_indicator, char *name);
44561 +extern void ep_procfs_svc_indicator_remove(int svc_indicator, char *name);
44562 +
44563 +#endif /* __ELAN_CONF_LINUX_H */
44564 +
44565 +/*
44566 + * Local variables:
44567 + * c-file-style: "stroustrup"
44568 + * End:
44569 + */
44570 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/debug.c
44571 ===================================================================
44572 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/debug.c 2004-02-23 16:02:56.000000000 -0500
44573 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/debug.c      2005-07-28 14:52:52.857677368 -0400
44574 @@ -0,0 +1,145 @@
44575 +/*
44576 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44577 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44578 + *
44579 + *    For licensing information please see the supplied COPYING file
44580 + *
44581 + */
44582 +
44583 +#ident "@(#)$Id: debug.c,v 1.28.2.1 2004/11/12 10:54:50 mike Exp $"
44584 +/*      $Source: /cvs/master/quadrics/epmod/debug.c,v $*/
44585 +
44586 +#include <qsnet/kernel.h>
44587 +
44588 +#include <elan/kcomm.h>
44589 +
44590 +#include "debug.h"
44591 +
44592 +DisplayInfo di_ep_debug = {ep_debugf, DBG_DEBUG};
44593 +
44594 +/*
44595 + * Generate a partial bitmap string, for the bitmap from offset "off" for "count" bits,
44596 + * to allow for displaying of subsets, treat entry 0 of the bitmap as having value "base".
44597 + */
44598 +int
44599 +ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int off, int nbits)
44600 +{
44601 +    char entry[12];                                            /* space for N-N */
44602 +    register int i, j, len;
44603 +    register int notstart = off;
44604 +    register int notfirst = 0;
44605 +    char *p = str;
44606 +    
44607 +    for (i = off; i < nbits; i++)
44608 +    {
44609 +       if (BT_TEST (bitmap, i))
44610 +       {
44611 +           for (j = i+1; j < nbits; j++)
44612 +               if (! BT_TEST (bitmap, j))
44613 +                   break;
44614 +
44615 +           if (j == (i+1))
44616 +               len = (int)sprintf (entry, "%d", base + i);
44617 +           else
44618 +               len = (int)sprintf (entry, "%d-%d", base + i, base + j-1);
44619 +           
44620 +           /* NOTE the 2 is for: one for comma, one for (possible) closing bracket */
44621 +           if ((p - str) <= (nbytes - (len+3)))
44622 +               p += (int)sprintf (p, "%c%s", notfirst++ ? ',' : notstart ? ' ' : '[', entry);
44623 +           else
44624 +           {
44625 +               /* no more space on this line, so move onto next */
44626 +               sprintf (p, "%c", notfirst++ ? ',' : '[');
44627 +
44628 +               return (i);
44629 +           }
44630 +
44631 +           i = j;
44632 +       }
44633 +    }
44634 +    
44635 +    if (!notfirst)
44636 +       sprintf (str, "<empty>");
44637 +    else
44638 +       strcpy (p, "]");
44639 +
44640 +    return (-1);
44641 +}
44642 +
44643 +void
44644 +ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits)
44645 +{
44646 +    /* Tru64 kernel printf() truncates lines at 128 bytes - the man pages for printf (9)
44647 +     * do not mention this restriction, nor that it does not terminate the line with a
44648 +     * carriage return, this  is pretty naff. 
44649 +     * Linux has a similar limit though is much more generous at 1024 - and you can just 
44650 +     * look at the code to see why this has been done.
44651 +     *
44652 +     * Our nodeset information could well be longer than 128 characters,  so we're going to 
44653 +     * have to split it into a number of lines. */
44654 +
44655 +#define LINEBUF_SIZE           128
44656 +    char *p, linebuf[LINEBUF_SIZE+1];                          /* +1 for null termination */
44657 +    int i, noff, off = 0;
44658 +
44659 +    do {
44660 +       if (off == 0)
44661 +           p = linebuf + (int)sprintf (linebuf, "%s: %s ", prefix, tag);
44662 +       else
44663 +       {
44664 +           p = linebuf + (int)sprintf (linebuf, "%s:  ", prefix);
44665 +           for (i = 0; tag[i] != '\0'; i++)
44666 +               *p++ = ' ';
44667 +       }
44668 +
44669 +       noff = ep_sprintf_bitmap (p, &linebuf[LINEBUF_SIZE-1]-p, bitmap, base, off, nbits);
44670 +
44671 +       printk ("%s\n", linebuf);
44672 +
44673 +    } while ((off = noff) != -1);
44674 +
44675 +#undef LINEBUF_SIZE
44676 +}
44677 +
44678 +void
44679 +ep_debugf (long mode, char *fmt, ...)
44680 +{
44681 +   va_list ap;
44682 +   char prefix[32];
44683 +   
44684 +   va_start (ap, fmt);
44685 +#if defined(LINUX)
44686 +   sprintf (prefix, "[%08d.%04d] ", (int) lbolt, current->pid);
44687 +#else
44688 +   sprintf (prefix, "[%08d.----] ", (int) lbolt);
44689 +#endif
44690 +   qsnet_vdebugf ((mode & epdebug_console ? QSNET_DEBUG_CONSOLE: 0) | QSNET_DEBUG_BUFFER, prefix, fmt, ap);
44691 +   va_end (ap);
44692 +}
44693 +
44694 +int
44695 +ep_assfail (EP_RAIL *rail, const char *ex, const char *func, const char *file, const int line)
44696 +{
44697 +    qsnet_debugf (QSNET_DEBUG_BUFFER, "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
44698 +    
44699 +    printk (KERN_EMERG "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
44700 +    
44701 +    if (panicstr)
44702 +       return (0);
44703 +    
44704 +    if (assfail_mode & 1)                              /* return to BUG() */
44705 +       return 1;
44706 +    
44707 +    if (assfail_mode & 2)
44708 +       panic ("ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
44709 +    if (assfail_mode & 4)
44710 +       epdebug = 0;
44711 +    
44712 +    return 0;
44713 +}
44714 +
44715 +/*
44716 + * Local variables:
44717 + * c-file-style: "stroustrup"
44718 + * End:
44719 + */
44720 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/debug_elan4.c
44721 ===================================================================
44722 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/debug_elan4.c   2004-02-23 16:02:56.000000000 -0500
44723 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/debug_elan4.c        2005-07-28 14:52:52.857677368 -0400
44724 @@ -0,0 +1,59 @@
44725 +/*
44726 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44727 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44728 + *
44729 + *    For licensing information please see the supplied COPYING file
44730 + *
44731 + */
44732 +
44733 +#ident "@(#)$Id: debug_elan4.c,v 1.1 2004/05/19 10:21:04 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
44734 +/*      $Source: /cvs/master/quadrics/epmod/debug_elan4.c,v $*/
44735 +
44736 +#include <qsnet/kernel.h>
44737 +
44738 +#include <elan/kcomm.h>
44739 +
44740 +#include "kcomm_vp.h"
44741 +#include "kcomm_elan4.h"
44742 +#include "conf_linux.h"
44743 +#include "debug.h"
44744 +
44745 +static void
44746 +ep4_display_ecqs (EP4_RAIL *rail)
44747 +{
44748 +    struct list_head *el;
44749 +    unsigned long flags;
44750 +    int i;
44751 +
44752 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
44753 +    for (i = 0; i <EP4_NUM_ECQ; i++)
44754 +    {
44755 +       list_for_each (el, &rail->r_ecq_list[i]) {
44756 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
44757 +
44758 +           ep_debugf (DBG_DEBUG, "ECQ: type %d: avail %d cqnum %d\n", i, ecq->ecq_avail, elan4_cq2num (ecq->ecq_cq));
44759 +       }
44760 +    }
44761 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
44762 +}
44763 +
44764 +void
44765 +ep4_debug_rail (EP_RAIL *r)
44766 +{
44767 +    EP4_RAIL *rail = (EP4_RAIL *) r;
44768 +    EP_SYS   *sys  = rail->r_generic.System;
44769 +
44770 +    ep_debugf (DBG_DEBUG, "ep%d: is elan4 %d rev %c\n", rail->r_generic.Number,
44771 +              rail->r_generic.Devinfo.dev_instance, 'a' + rail->r_generic.Devinfo.dev_revision_id);
44772 +
44773 +    ep4_display_ecqs (rail);
44774 +
44775 +    ep_display_alloc (&sys->Allocator);
44776 +    ep_display_rmap (sys->Allocator.ResourceMap);
44777 +
44778 +    ep_display_alloc (&rail->r_generic.ElanAllocator);
44779 +    ep_display_alloc (&rail->r_generic.MainAllocator);
44780 +
44781 +    ep_display_rmap (rail->r_generic.ElanAllocator.ResourceMap);
44782 +}
44783 +
44784 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/debug.h
44785 ===================================================================
44786 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/debug.h 2004-02-23 16:02:56.000000000 -0500
44787 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/debug.h      2005-07-28 14:52:52.857677368 -0400
44788 @@ -0,0 +1,109 @@
44789 +/*
44790 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44791 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44792 + *
44793 + *    For licensing information please see the supplied COPYING file
44794 + *
44795 + */
44796 +
44797 +#ifndef _ELAN3_EPDEBUG_H
44798 +#define _ELAN3_EPDEBUG_H
44799 +
44800 +#ident "$Id: debug.h,v 1.18.2.1 2004/11/12 10:54:50 mike Exp $"
44801 +/*      $Source: /cvs/master/quadrics/epmod/debug.h,v $ */
44802 +
44803 +extern unsigned int epdebug;
44804 +extern unsigned int epdebug_console;
44805 +extern unsigned int epdebug_cmlevel;
44806 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
44807 +extern unsigned int epdebug_check_sum;
44808 +#endif
44809 +#define DBG_CONFIG             0x00000001                      /* Module configuration */
44810 +#define DBG_PROBE              0x00000002
44811 +#define DBG_ROUTETABLE         0x00000004
44812 +#define DBG_STATEMAP           0x00000008
44813 +
44814 +#define DBG_CM                 0x00000020
44815 +#define DBG_XMTR               0x00000040
44816 +#define DBG_RCVR               0x00000080
44817 +#define DBG_FORWARD            0x00000100
44818 +#define DBG_DISCON             0x00000200
44819 +#define DBG_EPTRAP             0x00000400
44820 +#define DBG_COMMAND            0x00000800
44821 +#define DBG_RETRY              0x00001000
44822 +#define DBG_DEBUG              0x00002000
44823 +#define DBG_NETWORK_ERROR      0x00004000
44824 +#define DBG_MSGSYS             0x00008000
44825 +#define DBG_MANAGER            0x00010000
44826 +#define DBG_KMAP               0x00020000
44827 +#define DBG_FAILOVER           0x00040000
44828 +#define DBG_MAPNMD             0x00080000
44829 +#define DBG_KMSG               0x00100000
44830 +#define DBG_SVC                 0x00200000
44831 +#define DBG_STABILISE          0x00400000
44832 +
44833 +#if defined(DEBUG_PRINTF)
44834 +
44835 +#  define EPRINTF0(m,fmt)                      ((epdebug&(m)) ? ep_debugf(m,fmt)                     : (void)0)
44836 +#  define EPRINTF1(m,fmt,a)                    ((epdebug&(m)) ? ep_debugf(m,fmt,a)                   : (void)0)
44837 +#  define EPRINTF2(m,fmt,a,b)                  ((epdebug&(m)) ? ep_debugf(m,fmt,a,b)                 : (void)0)
44838 +#  define EPRINTF3(m,fmt,a,b,c)                        ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c)               : (void)0)
44839 +#  define EPRINTF4(m,fmt,a,b,c,d)              ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d)             : (void)0)
44840 +#  define EPRINTF5(m,fmt,a,b,c,d,e)            ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e)           : (void)0)
44841 +#  define EPRINTF6(m,fmt,a,b,c,d,e,f)          ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f)         : (void)0)
44842 +#  define EPRINTF7(m,fmt,a,b,c,d,e,f,g)                ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g)       : (void)0)
44843 +#  define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h)      ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h)     : (void)0)
44844 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i)    ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i)   : (void)0)
44845 +#  define EPRINTF10(m,fmt,a,b,c,d,e,f,g,h,i,j) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i,j) : (void)0)
44846 +
44847 +#  define CPRINTF0(lvl,fmt)                    (((lvl) <= epdebug_cmlevel) ? EPRINTF0(DBG_CM,fmt)                   : (void)0)
44848 +#  define CPRINTF1(lvl,fmt,a)                  (((lvl) <= epdebug_cmlevel) ? EPRINTF1(DBG_CM,fmt,a)                 : (void)0)
44849 +#  define CPRINTF2(lvl,fmt,a,b)                        (((lvl) <= epdebug_cmlevel) ? EPRINTF2(DBG_CM,fmt,a,b)               : (void)0)
44850 +#  define CPRINTF3(lvl,fmt,a,b,c)              (((lvl) <= epdebug_cmlevel) ? EPRINTF3(DBG_CM,fmt,a,b,c)             : (void)0)
44851 +#  define CPRINTF4(lvl,fmt,a,b,c,d)            (((lvl) <= epdebug_cmlevel) ? EPRINTF4(DBG_CM,fmt,a,b,c,d)           : (void)0)
44852 +#  define CPRINTF5(lvl,fmt,a,b,c,d,e)          (((lvl) <= epdebug_cmlevel) ? EPRINTF5(DBG_CM,fmt,a,b,c,d,e)         : (void)0)
44853 +#  define CPRINTF6(lvl,fmt,a,b,c,d,e,f)                (((lvl) <= epdebug_cmlevel) ? EPRINTF6(DBG_CM,fmt,a,b,c,d,e,f)       : (void)0)
44854 +#  define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g)      (((lvl) <= epdebug_cmlevel) ? EPRINTF7(DBG_CM,fmt,a,b,c,d,e,f,g)     : (void)0)
44855 +#  define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h)    (((lvl) <= epdebug_cmlevel) ? EPRINTF8(DBG_CM,fmt,a,b,c,d,e,f,g,h)   : (void)0)
44856 +#  define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i)  (((lvl) <= epdebug_cmlevel) ? EPRINTF9(DBG_CM,fmt,a,b,c,d,e,f,g,h,i) : (void)0)
44857 +
44858 +#if defined __GNUC__
44859 +extern void ep_debugf (long mode, char *fmt, ...) __attribute__ ((format (printf,2,3)));
44860 +#else
44861 +extern void ep_debugf (long mode, char *fmt, ...);
44862 +#endif
44863 +
44864 +#else
44865 +
44866 +#  define EPRINTF0(m,fmt)                      (0)
44867 +#  define EPRINTF1(m,fmt,a)                    (0)
44868 +#  define EPRINTF2(m,fmt,a,b)                  (0)
44869 +#  define EPRINTF3(m,fmt,a,b,c)                        (0)
44870 +#  define EPRINTF4(m,fmt,a,b,c,d)              (0)
44871 +#  define EPRINTF5(m,fmt,a,b,c,d,e)            (0)
44872 +#  define EPRINTF6(m,fmt,a,b,c,d,e,f)          (0)
44873 +#  define EPRINTF7(m,fmt,a,b,c,d,e,f,g)                (0)
44874 +#  define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h)      (0)
44875 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i)    (0)
44876 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i,j)  (0)
44877 +
44878 +#  define CPRINTF0(lvl,fmt)                    (0)
44879 +#  define CPRINTF1(lvl,fmt,a)                  (0)
44880 +#  define CPRINTF2(lvl,fmt,a,b)                        (0)
44881 +#  define CPRINTF3(lvl,fmt,a,b,c)              (0)
44882 +#  define CPRINTF4(lvl,fmt,a,b,c,d)            (0)
44883 +#  define CPRINTF5(lvl,fmt,a,b,c,d,e)          (0)
44884 +#  define CPRINTF6(lvl,fmt,a,b,c,d,e,f)                (0)
44885 +#  define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g)      (0)
44886 +#  define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h)    (0)
44887 +#  define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i)  (0)
44888 +
44889 +#endif /* DEBUG */
44890 +
44891 +/*
44892 + * Local variables:
44893 + * c-file-style: "stroustrup"
44894 + * End:
44895 + */
44896 +#endif /* _ELAN3_EPDEBUG_H */
44897 +
44898 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S
44899 ===================================================================
44900 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S      2004-02-23 16:02:56.000000000 -0500
44901 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S   2005-07-28 14:52:52.858677216 -0400
44902 @@ -0,0 +1,133 @@
44903 +/*
44904 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
44905 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
44906 + *
44907 + *    For licensing information please see the supplied COPYING file
44908 + *
44909 + */
44910 +
44911 +#ident "@(#)$Id: epcomms_asm_elan4_thread.S,v 1.5 2004/04/25 11:25:43 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
44912 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_asm_elan4_thread.S,v $*/
44913 +
44914 +#include <elan4/events.h>
44915 +#include <elan4/commands.h>
44916 +
44917 +#include "assym_elan4.h"
44918 +
44919 +/* XXXXX - registers.h */
44920 +#define E4_MAIN_INT_SHIFT              14
44921 +
44922 +/*
44923 + * c_waitevent_interrupt (E4_uint64 *commandport, E4_Event *event, E4_uint64 count, E4_uint64 intcookie)
44924 + */
44925 +       .global c_waitevent_interrupt
44926 +c_waitevent_interrupt:
44927 +       add             %sp, -192, %sp
44928 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
44929 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
44930 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
44931 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
44932 +       nop                                             // BUG FIX: E4 RevA
44933 +       nop                                             // BUG FIX: E4 RevA
44934 +
44935 +       mov             %r7, %r18                       // (%r2) return pc
44936 +1:     call            2f
44937 +        mov            %sp, %r17                       // (%r1) SP
44938 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
44939 +       st32            %r16, [%sp]                     // event source block
44940 +       mov             MAKE_EXT_CLEAN_CMD, %r23
44941 +       st8             %r23, [%sp+56]                  // event source block
44942 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
44943 +       mov             %r23,%r23                       // BUG FIX: E4 RevA
44944 +       nop                                             // BUG FIX: E4 RevA
44945 +       nop                                             // BUG FIX: E4 RevA
44946 +       
44947 +       or              %r9, WAIT_EVENT_CMD, %r16                                               ! WAIT_EVENT_CMD | event
44948 +       sll8            %r10, 32, %r17
44949 +       or              %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17  !   ev_CountAndType
44950 +       mov             %sp, %r18                                                               !   ev_Source
44951 +       mov             %r8, %r19                                                               !   ev_Dest
44952 +       sll8            %r11, E4_MAIN_INT_SHIFT, %r20
44953 +       or              %r20, INTERRUPT_CMD, %r20                                               ! INTERRUPT_CMD | (cookie << E4_MAIN_INT_SHIFT)
44954 +       mov             NOP_CMD, %r21
44955 +       mov             NOP_CMD, %r22
44956 +       mov             NOP_CMD, %r23
44957 +
44958 +       st64suspend     %r16, [%r8]
44959 +       
44960 +3:     ld64            [%sp + 64], %r16                // restore call preserved register
44961 +       ld64            [%sp + 128], %r24
44962 +       jmpl            %r2+8, %r0                      // and return
44963 +        add            %sp, 192, %sp
44964 +
44965 +
44966 +#define EP4_RCVR_PENDING_STALLED               1               /* indicates thread has stalled for no descriptor (rcvr_pending_head) */
44967 +
44968 +#define RXD_DEBUG(VAL,RXD,TMP) \
44969 +       mov     VAL, TMP; \
44970 +       st8     TMP, [RXD + EP4_RXD_DEBUG]
44971 +
44972 +       
44973 +       /*
44974 +        * %r2  - rcvr elan
44975 +        * %r3  - rxd elan
44976 +        */
44977 +       .global c_queue_rxd
44978 +c_queue_rxd:
44979 +       RXD_DEBUG(1, %r3, %r23)
44980 +       
44981 +       ld16    [%r2 + EP4_RCVR_PENDING_TAILP], %r18    /* r18 == tailp, r19 = head */
44982 +       add     %r3, EP4_RXD_NEXT, %r4
44983 +       
44984 +       st8     %r0, [%r3 + EP4_RXD_NEXT]               /* rxd->rxd_next = NULL */
44985 +       st8     %r4, [%r2 + EP4_RCVR_PENDING_TAILP]     /* tailp = &rxd->rxd_next */
44986 +       st8     %r3, [%r18]                             /* *tailp = rxd */
44987 +
44988 +       cmp     %r19, EP4_RCVR_PENDING_STALLED          /* thread stalled ? */
44989 +       beq     1f
44990 +        mov    %r18, %r16                              /* must have used %r16, %r19, %r23 */
44991 +       mov     %r3, %r23
44992 +
44993 +       RXD_DEBUG(2, %r3, %r23)
44994 +       
44995 +       st8suspend %r16, [%r3 + EP4_RXD_QUEUED]         /* no - mark as queued - all done */
44996 +
44997 +1:     st8     %r16, [%r3 + EP4_RXD_QUEUED]            /* mark as queued */
44998 +
44999 +       RXD_DEBUG(3, %r3, %r23)
45000 +
45001 +       mov     %r3, %r8                                /* return rxd from c_stall_thread */
45002 +       ba      .epcomms_resume_thread                  /* resume the thread */
45003 +        ld64   [%r2 + EP4_RCVR_THREAD_STALL], %r0
45004 +
45005 +       /*
45006 +        *  c_stall_thread (EP4_RCVR_ELAN *rcvrElan)
45007 +        */
45008 +       .global c_stall_thread
45009 +c_stall_thread:
45010 +       add             %sp, -192, %sp
45011 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
45012 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
45013 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
45014 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
45015 +       nop                                             // BUG FIX: E4 RevA
45016 +       nop                                             // BUG FIX: E4 RevA
45017 +
45018 +       mov             EP4_RCVR_PENDING_STALLED, %r9   // Mark rcvr as stalled
45019 +       st8             %r9, [%r8 + EP4_RCVR_PENDING_HEAD]
45020 +
45021 +       // XXXX _ TBD should generate interrupt
45022 +
45023 +       mov             %r1, %r17                       // SP 
45024 +       mov             %r7, %r23                       // return pc
45025 +
45026 +       st64suspend     %r16, [%r8 + EP4_RCVR_THREAD_STALL]
45027 +       
45028 +.epcomms_resume_thread:
45029 +       /* %r8 == rxdElan */
45030 +       
45031 +       ld64            [%sp + 64], %r16                // restore call preserved register
45032 +       ld64            [%sp + 128], %r24
45033 +       jmpl            %r7+8, %r0                      // and return
45034 +        add            %sp, 192, %sp
45035 +
45036 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms.c
45037 ===================================================================
45038 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms.c       2004-02-23 16:02:56.000000000 -0500
45039 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms.c    2005-07-28 14:52:52.859677064 -0400
45040 @@ -0,0 +1,484 @@
45041 +/*
45042 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45043 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45044 + *
45045 + *    For licensing information please see the supplied COPYING file
45046 + *
45047 + */
45048 +
45049 +#ident "@(#)$Id: epcomms.c,v 1.71.2.6 2004/11/30 12:02:16 mike Exp $"
45050 +/*      $Source: /cvs/master/quadrics/epmod/epcomms.c,v $ */
45051 +
45052 +#include <qsnet/kernel.h>
45053 +#include <qsnet/kthread.h>
45054 +#include <qsnet/autoconf.h>
45055 +
45056 +#include <elan/kcomm.h>
45057 +#include <elan/epsvc.h>
45058 +#include <elan/epcomms.h>
45059 +#include "cm.h"
45060 +#include "debug.h"
45061 +
45062 +static void
45063 +ep_comms_thread (void *arg)
45064 +{
45065 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) arg;
45066 +    struct list_head *el;
45067 +
45068 +    kernel_thread_init ("ep_comms");
45069 +
45070 +    /* since ep_alloc_xmtr() has incremented the module use count,
45071 +     * we would be preventing the module from being unloaded, so
45072 +     * we decrement the use count since this thread must terminate
45073 +     * during unload of the module.
45074 +     */
45075 +    ep_mod_dec_usecount();
45076 +
45077 +    for (;;)
45078 +    {
45079 +       long nextRunTime = 0;
45080 +
45081 +       /* NOTE - subsys->Lock serializes us against flush/relocations
45082 +        *        caused by rail nodeset transitions.
45083 +        */
45084 +       kmutex_lock (&subsys->Lock);
45085 +       list_for_each (el, &subsys->Transmitters) {
45086 +           nextRunTime = ep_check_xmtr (list_entry (el, EP_XMTR, Link), nextRunTime);
45087 +       }
45088 +
45089 +       list_for_each (el, &subsys->Receivers) {
45090 +           nextRunTime = ep_check_rcvr (list_entry (el, EP_RCVR, Link), nextRunTime);
45091 +       }
45092 +       kmutex_unlock (&subsys->Lock);
45093 +
45094 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
45095 +       ep_csum_rxds (subsys);  
45096 +#endif
45097 +       nextRunTime = ep_forward_rxds (subsys, nextRunTime);
45098 +
45099 +       if (ep_kthread_sleep (&subsys->Thread, nextRunTime) < 0)
45100 +           break;
45101 +    }
45102 +
45103 +    ep_mod_inc_usecount();
45104 +
45105 +    ep_kthread_stopped (&subsys->Thread);
45106 +    kernel_thread_exit();
45107 +}
45108 +
45109 +int
45110 +ep_comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail)
45111 +{
45112 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) s;
45113 +    EP_COMMS_RAIL    *commsRail;
45114 +    struct list_head *el;
45115 +
45116 +    printk ("%s: vendorid=%x deviceid=%x\n", rail->Name, rail->Devinfo.dev_vendor_id, rail->Devinfo.dev_device_id);
45117 +
45118 +    switch (rail->Devinfo.dev_device_id)
45119 +    {
45120 +#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE)
45121 +    case PCI_DEVICE_ID_ELAN3:
45122 +       commsRail = ep3comms_add_rail (s, sys, rail);
45123 +       break;
45124 +#endif
45125 +#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE)
45126 +    case PCI_DEVICE_ID_ELAN4:
45127 +       commsRail = ep4comms_add_rail (s, sys, rail);
45128 +       break;
45129 +#endif
45130 +    default:
45131 +       return 0;
45132 +    }
45133 +
45134 +    if (commsRail == NULL)
45135 +       return 1;
45136 +
45137 +    commsRail->Rail   = rail;
45138 +    commsRail->Subsys = subsys;
45139 +
45140 +    kmutex_lock (&subsys->Lock);
45141 +    list_add_tail (&commsRail->Link, &subsys->Rails);
45142 +    
45143 +    list_for_each (el, &subsys->Receivers) {
45144 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45145 +
45146 +       EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail);
45147 +    }
45148 +       
45149 +    list_for_each (el, &subsys->Transmitters) {
45150 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45151 +
45152 +       EP_RAIL_OP (commsRail, Xmtr.AddRail) (xmtr, commsRail);
45153 +    }
45154 +
45155 +    kmutex_unlock (&subsys->Lock);
45156 +
45157 +    return 0;
45158 +}
45159 +
45160 +void
45161 +ep_comms_del_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail)
45162 +{
45163 +    EP_COMMS_SUBSYS  *subsys    = (EP_COMMS_SUBSYS *) s;
45164 +    EP_COMMS_RAIL    *commsRail = NULL;
45165 +    struct list_head *el;
45166 +
45167 +    kmutex_lock (&subsys->Lock);
45168 +    /* find out rail entry and remove from system list */
45169 +    list_for_each (el, &subsys->Rails) {
45170 +       if ((commsRail = list_entry (el, EP_COMMS_RAIL, Link))->Rail == rail)
45171 +           break;
45172 +    }
45173 +
45174 +    list_del (&commsRail->Link);
45175 +    
45176 +    list_for_each (el, &subsys->Receivers) {
45177 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45178 +
45179 +       EP_RAIL_OP(commsRail, Rcvr.DelRail) (rcvr, commsRail);
45180 +    }
45181 +       
45182 +    list_for_each (el, &subsys->Transmitters) {
45183 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45184 +
45185 +       EP_RAIL_OP(commsRail,Xmtr.DelRail) (xmtr, commsRail);
45186 +    }
45187 +
45188 +    kmutex_unlock (&subsys->Lock);
45189 +
45190 +    EP_RAIL_OP (commsRail, DelRail) (commsRail);
45191 +}
45192 +
45193 +void
45194 +ep_comms_fini (EP_SUBSYS *s, EP_SYS *sys)
45195 +{
45196 +    EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) s;
45197 +
45198 +    ep_kthread_stop (&subsys->Thread);
45199 +    ep_kthread_destroy (&subsys->Thread);
45200 +
45201 +    if (subsys->ForwardXmtr)
45202 +       ep_free_xmtr (subsys->ForwardXmtr);
45203 +
45204 +    spin_lock_destroy (&subsys->ForwardDescLock);
45205 +
45206 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
45207 +    spin_lock_destroy (&subsys->CheckSumDescLock);
45208 +#endif
45209 +
45210 +    kmutex_destroy (&subsys->Lock);
45211 +
45212 +    KMEM_FREE (subsys, sizeof (EP_COMMS_SUBSYS));
45213 +}
45214 +
45215 +int
45216 +ep_comms_init (EP_SYS *sys)
45217 +{
45218 +    EP_COMMS_SUBSYS *subsys;
45219 +
45220 +    KMEM_ZALLOC (subsys, EP_COMMS_SUBSYS *, sizeof (EP_COMMS_SUBSYS), 1);
45221 +
45222 +    if (subsys == NULL)
45223 +       return (ENOMEM);
45224 +
45225 +    INIT_LIST_HEAD (&subsys->Rails);
45226 +    INIT_LIST_HEAD (&subsys->Receivers);
45227 +    INIT_LIST_HEAD (&subsys->Transmitters);
45228 +    INIT_LIST_HEAD (&subsys->ForwardDescList);
45229 +
45230 +    kmutex_init (&subsys->Lock);
45231 +    spin_lock_init (&subsys->ForwardDescLock);
45232 +
45233 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
45234 +    INIT_LIST_HEAD (&subsys->CheckSumDescList);
45235 +    spin_lock_init (&subsys->CheckSumDescLock);
45236 +#endif
45237 +
45238 +    subsys->Subsys.Sys        = sys;
45239 +    subsys->Subsys.Name              = "epcomms";
45240 +    subsys->Subsys.Destroy    = ep_comms_fini;
45241 +    subsys->Subsys.AddRail    = ep_comms_add_rail;
45242 +    subsys->Subsys.RemoveRail = ep_comms_del_rail;
45243 +
45244 +    ep_subsys_add (sys, &subsys->Subsys);
45245 +    ep_kthread_init (&subsys->Thread);
45246 +
45247 +    if ((subsys->ForwardXmtr = ep_alloc_xmtr (subsys->Subsys.Sys)) == NULL)
45248 +       goto failed;
45249 +
45250 +    if (kernel_thread_create (ep_comms_thread, subsys) == NULL)
45251 +       goto failed;
45252 +    ep_kthread_started (&subsys->Thread);
45253 +
45254 +    return (0);
45255 +
45256 + failed:
45257 +    ep_subsys_del (sys, &subsys->Subsys);
45258 +    ep_comms_fini (&subsys->Subsys, sys);
45259 +
45260 +    return (ENOMEM);
45261 +}
45262 +
45263 +void
45264 +ep_comms_display (EP_SYS *sys, char *how)
45265 +{
45266 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME);
45267 +    struct list_head *el;
45268 +
45269 +    if (how == NULL || !strncmp (how, "rail", 4))
45270 +    {
45271 +       kmutex_lock (&subsys->Lock);
45272 +       list_for_each (el, &subsys->Rails) {
45273 +           EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
45274 +
45275 +           EP_RAIL_OP(commsRail, DisplayRail) (commsRail);
45276 +       }
45277 +       kmutex_unlock (&subsys->Lock);
45278 +    }
45279 +           
45280 +    if (how == NULL || !strncmp (how, "xmtr", 4))
45281 +       list_for_each (el, &subsys->Transmitters)
45282 +           ep_display_xmtr (&di_ep_debug, list_entry (el, EP_XMTR, Link));
45283 +
45284 +    if (how == NULL || !strncmp (how, "rcvr", 4)) 
45285 +       list_for_each (el, &subsys->Receivers)
45286 +           ep_display_rcvr (&di_ep_debug, list_entry (el, EP_RCVR, Link), (how && how[4] == ',') ? 1 : 0);
45287 +}
45288 +
45289 +int
45290 +ep_svc_indicator_set (EP_SYS *epsys, int svc_indicator) 
45291 +{
45292 +    EP_COMMS_SUBSYS  *subsys;
45293 +    struct list_head *el;
45294 +
45295 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d \n",svc_indicator);
45296 +
45297 +    if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS)
45298 +       return (EP_EINVAL);
45299 +
45300 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
45301 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_set: ep_subsys_find failed\n");
45302 +       return (EP_EINVAL);
45303 +    }
45304 +
45305 +
45306 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
45307 +    list_for_each (el, &subsys->Rails) { 
45308 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
45309 +
45310 +       cm_svc_indicator_set(commsRail->Rail, svc_indicator);
45311 +    }
45312 +    kmutex_unlock (&subsys->Lock);
45313 +
45314 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d success\n",svc_indicator);
45315 +    return (EP_SUCCESS);
45316 +}
45317 +
45318 +int
45319 +ep_svc_indicator_clear (EP_SYS *epsys, int svc_indicator) 
45320 +{
45321 +    EP_COMMS_SUBSYS  *subsys;
45322 +    struct list_head *el;
45323 +
45324 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d \n",svc_indicator);
45325 +
45326 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
45327 +       return (EP_EINVAL);
45328 +
45329 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
45330 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_clear: ep_subsys_find failed\n");
45331 +       return (EP_EINVAL);
45332 +    }
45333 +
45334 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
45335 +    list_for_each (el, &subsys->Rails) { 
45336 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
45337 +
45338 +       cm_svc_indicator_clear(commsRail->Rail, svc_indicator);
45339 +    }
45340 +    kmutex_unlock (&subsys->Lock);
45341 +
45342 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d success\n",svc_indicator);
45343 +    return (EP_SUCCESS);
45344 +}
45345 +
45346 +int 
45347 +ep_svc_indicator_is_set (EP_SYS *epsys, int svc_indicator, int nodeId) 
45348 +{
45349 +    EP_COMMS_SUBSYS  *subsys;
45350 +    struct list_head *el;
45351 +    int               set = 0;
45352 +
45353 +    EPRINTF2 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d \n", svc_indicator, nodeId);
45354 +
45355 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
45356 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_is_set: ep_subsys_find failed\n");
45357 +       return (0);
45358 +    }
45359 +
45360 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
45361 +    list_for_each (el, &subsys->Rails) { 
45362 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
45363 +
45364 +       set |= cm_svc_indicator_is_set(commsRail->Rail, svc_indicator, nodeId);
45365 +    }
45366 +    kmutex_unlock (&subsys->Lock);
45367 +
45368 +    EPRINTF3 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d returning %d\n", svc_indicator, nodeId, set);
45369 +    return set;
45370 +}
45371 +
45372 +int
45373 +ep_svc_indicator_bitmap (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) 
45374 +{
45375 +    EP_COMMS_SUBSYS  *subsys;
45376 +    struct list_head *el;
45377 +
45378 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_bitmap: svc %d\n", svc_indicator);
45379 +
45380 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
45381 +       return (-1);
45382 +
45383 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
45384 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_bitmap: ep_subsys_find failed\n");
45385 +       return (-2);
45386 +    }
45387 +
45388 +    /* clear bitmap */
45389 +    bt_zero (bitmap, nnodes);
45390 +
45391 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
45392 +    list_for_each (el, &subsys->Rails) { 
45393 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
45394 +
45395 +       /* this will or in each bit map */
45396 +       cm_svc_indicator_bitmap (commsRail->Rail, svc_indicator, bitmap, low, nnodes);
45397 +    }
45398 +    kmutex_unlock (&subsys->Lock);
45399 +
45400 +    return (0);
45401 +}
45402 +
45403 +int
45404 +ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) 
45405 +{
45406 +    int i;
45407 +
45408 +    EPRINTF1 (DBG_SVC,"ep_xmtr_svc_indicator_bitmap: svc %d\n", svc_indicator);
45409 +
45410 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
45411 +       return (-1);
45412 +
45413 +    /* clear bitmap */
45414 +    bt_zero (bitmap, nnodes);
45415 +
45416 +    for (i = 0; i < EP_MAX_RAILS; i++)
45417 +    {
45418 +       if (xmtr->RailMask & (1 << i) )
45419 +       {
45420 +           /* this will or in each bit map */
45421 +           cm_svc_indicator_bitmap (xmtr->Rails[i]->CommsRail->Rail, svc_indicator, bitmap, low, nnodes);
45422 +       }
45423 +    }
45424 +
45425 +    return (0);
45426 +}
45427 +
45428 +EP_RAILMASK
45429 +ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId)
45430 +{
45431 +    EP_COMMS_SUBSYS  *subsys;
45432 +    struct list_head *el;
45433 +    EP_RAILMASK       rmask=0;
45434 +
45435 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL)
45436 +       return (rmask);
45437 +
45438 +    kmutex_lock (&subsys->Lock); /* walking rails list and reading info from Rail */
45439 +    list_for_each (el, &subsys->Rails) { 
45440 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
45441 +
45442 +       if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId))
45443 +            rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
45444 +    }
45445 +    kmutex_unlock (&subsys->Lock);
45446 +
45447 +    return (rmask);
45448 +}
45449 +
45450 +EP_RAILMASK
45451 +ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId)
45452 +{
45453 +    EP_RAILMASK    rmask=0;
45454 +    EP_COMMS_RAIL *commsRail;
45455 +    int            i;
45456 +
45457 +    for (i = 0; i < EP_MAX_RAILS; i++)
45458 +    {
45459 +       if (xmtr->RailMask & (1 << i) )
45460 +       {
45461 +           commsRail = xmtr->Rails[i]->CommsRail;
45462 +
45463 +           if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId))
45464 +               rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
45465 +       }
45466 +    }   
45467 +    
45468 +    EPRINTF3 (DBG_SVC, "ep_xmtr_svc_indicator_railmask: svc %d node %d mask 0x%x\n",  svc_indicator, nodeId, rmask);
45469 +
45470 +    return (rmask);
45471 +}
45472 +
45473 +EP_RAILMASK
45474 +ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service)
45475 +{
45476 +    EP_COMMS_SUBSYS  *subsys;
45477 +    EP_RAILMASK       rmask=0;
45478 +    struct list_head *el;
45479 +    
45480 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL)
45481 +       return (rmask);
45482 +    
45483 +    kmutex_lock (&subsys->Lock);
45484 +    list_for_each (el, &subsys->Receivers) {
45485 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45486
45487 +       if (rcvr->Service == service)
45488 +           rmask |= rcvr->RailMask; 
45489 +    }
45490 +    kmutex_unlock(&subsys->Lock);
45491 +
45492 +    return (rmask);
45493 +}
45494 +
45495 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
45496 +uint32_t
45497 +ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags)
45498 +{
45499 +    EP_NMH   *nmh;
45500 +    int       i;
45501 +    uint16_t  check_data = 0;
45502 +    uint16_t  check_env  = 0;
45503 +
45504 +    for (i = 0; i < nFrags; i++) {
45505 +       /* find the nmh for this frag */
45506 +       nmh = ep_nmh_find (&sys->MappingTable, &nmd[i]);
45507 +
45508 +       ASSERT( nmh != NULL);
45509 +
45510 +       /* add the next frag to the check sum */
45511 +       check_data = nmh->nmh_ops->op_calc_check_sum (sys, nmh, &nmd[i], check_data);
45512 +    }
45513 +
45514 +    check_env = rolling_check_sum ((char *) env, offsetof(EP_ENVELOPE, CheckSum), 0);
45515 +
45516 +    return (EP_ENVELOPE_CHECK_SUM | ( (check_env & 0x7FFF) << 16) | (check_data & 0xFFFF));
45517 +}
45518 +#endif
45519 +
45520 +/*
45521 + * Local variables:
45522 + * c-file-style: "stroustrup"
45523 + * End:
45524 + */
45525 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan3.c
45526 ===================================================================
45527 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_elan3.c 2004-02-23 16:02:56.000000000 -0500
45528 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan3.c      2005-07-28 14:52:52.859677064 -0400
45529 @@ -0,0 +1,191 @@
45530 +/*
45531 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45532 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45533 + *
45534 + *    For licensing information please see the supplied COPYING file
45535 + *
45536 + */
45537 +
45538 +#ident "@(#)$Id: epcomms_elan3.c,v 1.60 2004/08/03 11:34:34 david Exp $"
45539 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3.c,v $ */
45540 +
45541 +#include <qsnet/kernel.h>
45542 +
45543 +#include <elan/kcomm.h>
45544 +#include <elan/epsvc.h>
45545 +#include <elan/epcomms.h>
45546 +
45547 +#include "kcomm_elan3.h"
45548 +#include "epcomms_elan3.h"
45549 +
45550 +void
45551 +ep3comms_flush_callback (void *arg, statemap_t *map)
45552 +{
45553 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
45554 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
45555 +    struct list_head *el;
45556 +
45557 +    kmutex_lock (&subsys->Lock);
45558 +    list_for_each (el, &subsys->Transmitters) {
45559 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45560 +       
45561 +       if (xmtr->Rails[commsRail->Rail->Number])
45562 +           ep3xmtr_flush_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
45563 +    }
45564 +
45565 +    list_for_each (el, &subsys->Receivers) {
45566 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45567 +       
45568 +       if (rcvr->Rails[commsRail->Rail->Number])
45569 +           ep3rcvr_flush_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
45570 +    }
45571 +    kmutex_unlock (&subsys->Lock);
45572 +}
45573 +
45574 +void
45575 +ep3comms_failover_callback (void *arg, statemap_t *map)
45576 +{
45577 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
45578 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
45579 +    struct list_head *el;
45580 +
45581 +    kmutex_lock (&subsys->Lock);
45582 +    list_for_each (el, &subsys->Transmitters) {
45583 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45584 +       
45585 +       if (xmtr->Rails[commsRail->Rail->Number])
45586 +           ep3xmtr_failover_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
45587 +    }
45588 +
45589 +    list_for_each (el, &subsys->Receivers) {
45590 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45591 +       
45592 +       if (rcvr->Rails[commsRail->Rail->Number])
45593 +           ep3rcvr_failover_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
45594 +    }
45595 +    kmutex_unlock (&subsys->Lock);
45596 +}
45597 +
45598 +void
45599 +ep3comms_disconnect_callback (void *arg, statemap_t *map)
45600 +{
45601 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
45602 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
45603 +    struct list_head *el;
45604 +
45605 +    kmutex_lock (&subsys->Lock);
45606 +    list_for_each (el, &subsys->Transmitters) {
45607 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
45608 +       
45609 +       if (xmtr->Rails[commsRail->Rail->Number])
45610 +           ep3xmtr_disconnect_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
45611 +    }
45612 +
45613 +    list_for_each (el, &subsys->Receivers) {
45614 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
45615 +       
45616 +       if (rcvr->Rails[commsRail->Rail->Number])
45617 +           ep3rcvr_disconnect_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
45618 +    }
45619 +    kmutex_unlock (&subsys->Lock);
45620 +}
45621 +
45622 +EP_COMMS_RAIL *
45623 +ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r)
45624 +{
45625 +    EP3_RAIL         *rail   = (EP3_RAIL *) r;
45626 +    ELAN3_DEV        *dev    = rail->Device;
45627 +    EP3_COMMS_RAIL   *commsRail;
45628 +    EP3_InputQueue    qdesc;
45629 +    int i;
45630 +
45631 +    KMEM_ZALLOC (commsRail, EP3_COMMS_RAIL *, sizeof (EP3_COMMS_RAIL), TRUE);
45632 +
45633 +    if (commsRail == NULL)
45634 +       return NULL;
45635 +    
45636 +    commsRail->Generic.Ops.DelRail          = ep3comms_del_rail;
45637 +    commsRail->Generic.Ops.DisplayRail      = ep3comms_display_rail;
45638 +    commsRail->Generic.Ops.Rcvr.AddRail     = ep3rcvr_add_rail;
45639 +    commsRail->Generic.Ops.Rcvr.DelRail     = ep3rcvr_del_rail;
45640 +    commsRail->Generic.Ops.Rcvr.Check       = ep3rcvr_check;
45641 +    commsRail->Generic.Ops.Rcvr.QueueRxd    = ep3rcvr_queue_rxd;
45642 +    commsRail->Generic.Ops.Rcvr.RpcPut      = ep3rcvr_rpc_put;
45643 +    commsRail->Generic.Ops.Rcvr.RpcGet      = ep3rcvr_rpc_get;
45644 +    commsRail->Generic.Ops.Rcvr.RpcComplete = ep3rcvr_rpc_complete;
45645 +
45646 +    commsRail->Generic.Ops.Rcvr.StealRxd    = ep3rcvr_steal_rxd;
45647 +
45648 +    commsRail->Generic.Ops.Rcvr.FillOutRailStats = ep3rcvr_fillout_rail_stats;
45649 +
45650 +    commsRail->Generic.Ops.Rcvr.DisplayRcvr = ep3rcvr_display_rcvr;
45651 +    commsRail->Generic.Ops.Rcvr.DisplayRxd  = ep3rcvr_display_rxd;
45652 +
45653 +    commsRail->Generic.Ops.Xmtr.AddRail     = ep3xmtr_add_rail;
45654 +    commsRail->Generic.Ops.Xmtr.DelRail     = ep3xmtr_del_rail;
45655 +    commsRail->Generic.Ops.Xmtr.Check       = ep3xmtr_check;
45656 +    commsRail->Generic.Ops.Xmtr.BindTxd     = ep3xmtr_bind_txd;
45657 +    commsRail->Generic.Ops.Xmtr.UnbindTxd   = ep3xmtr_unbind_txd;
45658 +    commsRail->Generic.Ops.Xmtr.PollTxd     = ep3xmtr_poll_txd;
45659 +    commsRail->Generic.Ops.Xmtr.CheckTxdState = ep3xmtr_check_txd_state;
45660 +
45661 +    commsRail->Generic.Ops.Xmtr.DisplayXmtr = ep3xmtr_display_xmtr;
45662 +    commsRail->Generic.Ops.Xmtr.DisplayTxd  = ep3xmtr_display_txd;
45663 +
45664 +    commsRail->Generic.Ops.Xmtr.FillOutRailStats = ep3xmtr_fillout_rail_stats;
45665 +
45666 +    /* Allocate the input queues at their fixed elan address */
45667 +    if (! (commsRail->QueueDescs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * sizeof (EP3_InputQueue), PAGESIZE), EP_PERM_ALL, 0)))
45668 +    {
45669 +       KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL));
45670 +       return NULL;
45671 +    }
45672 +
45673 +    qdesc.q_state          = E3_QUEUE_FULL;
45674 +    qdesc.q_base           = 0;
45675 +    qdesc.q_top            = 0;
45676 +    qdesc.q_fptr           = 0;
45677 +    qdesc.q_bptr           = 0;
45678 +    qdesc.q_size           = 0;
45679 +    qdesc.q_event.ev_Count = 0;
45680 +    qdesc.q_event.ev_Type  = 0;
45681 +
45682 +    /* Initialise all queue entries to be full */
45683 +    for (i = 0; i < EP_MSG_NSVC; i++)
45684 +       elan3_sdram_copyl_to_sdram (dev, &qdesc, commsRail->QueueDescs + (i * sizeof (EP3_InputQueue)), sizeof (EP3_InputQueue));
45685 +
45686 +    ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback,      commsRail);
45687 +    ep_register_callback (r, EP_CB_FLUSH_FLUSHING,  ep3comms_flush_callback,      commsRail);
45688 +    ep_register_callback (r, EP_CB_FAILOVER,        ep3comms_failover_callback,   commsRail);
45689 +    ep_register_callback (r, EP_CB_DISCONNECTING,   ep3comms_disconnect_callback, commsRail);
45690 +
45691 +    return (EP_COMMS_RAIL *) commsRail;
45692 +}
45693 +
45694 +void
45695 +ep3comms_del_rail (EP_COMMS_RAIL *r)
45696 +{
45697 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) r;
45698 +    EP_RAIL        *rail      = commsRail->Generic.Rail;
45699 +
45700 +    ep_remove_callback (rail, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback,      commsRail);
45701 +    ep_remove_callback (rail, EP_CB_FLUSH_FLUSHING,  ep3comms_flush_callback,      commsRail);
45702 +    ep_remove_callback (rail, EP_CB_FAILOVER,        ep3comms_failover_callback,   commsRail);
45703 +    ep_remove_callback (rail, EP_CB_DISCONNECTING,   ep3comms_disconnect_callback, commsRail);
45704 +
45705 +    ep_free_memory_elan (rail, EP_EPCOMMS_QUEUE_BASE);
45706 +
45707 +    KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL));
45708 +}
45709 +
45710 +void
45711 +ep3comms_display_rail (EP_COMMS_RAIL *r)
45712 +{
45713 +    
45714 +}
45715 +
45716 +/*
45717 + * Local variables:
45718 + * c-file-style: "stroustrup"
45719 + * End:
45720 + */
45721 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan3.h
45722 ===================================================================
45723 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_elan3.h 2004-02-23 16:02:56.000000000 -0500
45724 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan3.h      2005-07-28 14:52:52.860676912 -0400
45725 @@ -0,0 +1,330 @@
45726 +/*
45727 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45728 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45729 + *
45730 + *    For licensing information please see the supplied COPYING file
45731 + *
45732 + */
45733 +
45734 +#ifndef __EPCOMMS_ELAN3_H
45735 +#define __EPCOMMS_ELAN3_H
45736 +
45737 +#ident "@(#)$Id: epcomms_elan3.h,v 1.27.2.1 2004/11/12 10:54:51 mike Exp $"
45738 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3.h,v $ */
45739 +
45740 +#define EP3_DMAFAILCOUNT               3
45741 +
45742 +
45743 +/* Main/Elan spinlock */
45744 +typedef struct ep3_spinlock_elan 
45745 +{
45746 +    volatile E3_uint32 sl_lock;                /* main wants a lock */
45747 +    volatile E3_uint32 sl_seq;                 /* thread owns this word */
45748 +    /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */
45749 +    E3_uint64          sl_pad[14];             /* pad to 64-bytes */
45750 +} EP3_SPINLOCK_ELAN;
45751 +
45752 +/* Declare this as a main memory cache block for efficiency */
45753 +typedef struct ep3_spinlock_main {
45754 +    volatile E3_uint32 sl_seq;                 /* copy of seq number updated by Elan */
45755 +    volatile E3_uint32 sl_pad[15];             /* pad to 64-bytes */
45756 +} EP3_SPINLOCK_MAIN;
45757 +
45758 +#if defined (__ELAN3__)
45759 +
45760 +extern void ep3_spinblock (EP3_SPINLOCK_ELAN *, EP3_SPINLOCK_MAIN *);
45761 +
45762 +#define EP3_SPINENTER(SLE,SL) \
45763 +do {\
45764 +       (SLE)->sl_seq++; \
45765 +       if ((SLE)->sl_lock) \
45766 +               ep3_spinblock(SLE, SL);\
45767 +} while (0)
45768 +
45769 +#define EP3_SPINEXIT(SLE,SL) \
45770 +do {\
45771 +       (SL)->sl_seq = (SLE)->sl_seq;\
45772 +} while (0)
45773 +
45774 +#else
45775 +
45776 +#define EP3_SPINENTER(DEV,SLE,SL) do { \
45777 +    E3_uint32 seq; \
45778 +\
45779 +    mb();\
45780 +    elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);\
45781 +    mb();\
45782 +    seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\
45783 +    while (seq != (SL)->sl_seq)\
45784 +    {\
45785 +       while ((SL)->sl_seq == (seq - 1))\
45786 +       {\
45787 +           mb();\
45788 +\
45789 +           DELAY (1); \
45790 +       }\
45791 +       seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\
45792 +    }\
45793 +} while (0)
45794 +
45795 +#define EP3_SPINEXIT(DEV,SLE,SL) do { \
45796 +       wmb(); \
45797 +       elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);\
45798 +       mmiob(); \
45799 +} while (0)
45800 +
45801 +#endif /* ! __ELAN3__ */
45802 +
45803 +/* per-rail elan memory portion receive descriptor */
45804 +typedef struct ep3_rxd_rail_elan
45805 +{
45806 +    E3_DMA             Dmas[EP_MAXFRAG+1];                     /* Dma's for fetching data/putting data & status blk */
45807 +    E3_Event           ChainEvent[EP_MAXFRAG];                 /* Events to chain dmas */
45808 +    E3_BlockCopyEvent  DataEvent;                              /* message received block event */
45809 +    E3_BlockCopyEvent  DoneEvent;                              /* RPC status block event */
45810 +    
45811 +    EP_NMD             Data;                                   /* Network mapping handle for receive data */
45812 +
45813 +    E3_Addr            RxdMain;                                /* pointer to main memory portion */
45814 +
45815 +    E3_Addr            Next;                                   /* linked list when on pending list (elan address) */
45816 +
45817 +    E3_uint64          MainAddr;                               /* kernel address of ep_rxd_main */
45818 +} EP3_RXD_RAIL_ELAN;
45819 +
45820 +#define EP3_RXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_RXD_RAIL_ELAN), E3_DMA_ALIGN)
45821 +
45822 +/* per-rail main memory portion of receive descriptor */
45823 +typedef struct ep3_rxd_rail_main
45824 +{
45825 +    E3_uint32          DataEvent;                              /* dest for done event */
45826 +    E3_uint32          DoneEvent;                              /* dest for done event */
45827 +} EP3_RXD_RAIL_MAIN;
45828 +
45829 +#define EP3_RXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_RXD_RAIL_MAIN), sizeof (E3_uint32))
45830 +
45831 +#if !defined(__ELAN3__)
45832 +/* Kernel memory portion of per-rail receive descriptor */
45833 +typedef struct ep3_rxd_rail
45834 +{
45835 +    EP_RXD_RAIL                Generic;                                /* generic rxd rail */
45836 +
45837 +    EP3_COOKIE         DataCookie;                             /* Event cookie */
45838 +    EP3_COOKIE         DoneCookie;                             /* Event cookie */
45839 +    EP3_COOKIE         ChainCookie[EP_MAXFRAG];                /* Event cookie */
45840 +
45841 +    sdramaddr_t                RxdElan;                                /* per-rail elan receive descriptor */
45842 +    E3_Addr            RxdElanAddr;                            /*   and elan address */
45843 +
45844 +    EP3_RXD_RAIL_MAIN  *RxdMain;                               /* per-rail main receive descriptor */
45845 +    E3_Addr            RxdMainAddr;                            /*   and elan address */
45846 +
45847 +    EP_BACKOFF         Backoff;                                /* dma backoff */
45848 +} EP3_RXD_RAIL;
45849 +
45850 +#define EP3_NUM_RXD_PER_BLOCK  16
45851 +
45852 +typedef struct ep3_rxd_rail_block
45853 +{
45854 +    struct list_head   Link;
45855 +    
45856 +    EP3_RXD_RAIL        Rxd[EP3_NUM_RXD_PER_BLOCK];
45857 +} EP3_RXD_RAIL_BLOCK;
45858 +
45859 +#endif /* ! __ELAN3__ */
45860 +
45861 +typedef struct ep3_rcvr_rail_elan                              /* Elan memory service structure */
45862 +{
45863 +    EP3_SPINLOCK_ELAN  ThreadLock;                             /* elan memory portion of spin lock */
45864 +    EP3_SPINLOCK_ELAN  PendingLock;                            /* spin lock for pending rx list */
45865 +
45866 +    E3_Addr           PendingDescs;                            /* list of pending receive descriptors */
45867 +    E3_uint32          ThreadShouldHalt;                        /* marks that the thread should halt */
45868 +
45869 +    E3_uint64         MainAddr;                                /* kernel address of ep_rcvr (for StallThreadForNoDescs)*/
45870 +} EP3_RCVR_RAIL_ELAN;
45871 +
45872 +typedef struct ep3_rcvr_rail_main                              /* Main memory service strucure */
45873 +{
45874 +    EP3_SPINLOCK_MAIN  ThreadLock;                             /* main memory portion of spin lock */
45875 +    EP3_SPINLOCK_MAIN  PendingLock;                            /* spinlock for pending rx list */
45876 +
45877 +    volatile unsigned   PendingDescsTailp;                     /* next pointer of last receive descriptor on pending list */
45878 +} EP3_RCVR_RAIL_MAIN;
45879 +
45880 +#if !defined(__ELAN3__)
45881 +
45882 +typedef struct ep3_rcvr_rail_stats
45883 +{
45884 +    unsigned long some_stat;
45885 +} EP3_RCVR_RAIL_STATS;
45886 +
45887 +typedef struct ep3_rcvr_rail
45888 +{
45889 +    EP_RCVR_RAIL       Generic;                                /* generic portion */
45890 +    
45891 +    EP3_RCVR_RAIL_MAIN *RcvrMain;
45892 +    E3_Addr            RcvrMainAddr;
45893 +    sdramaddr_t         RcvrElan;
45894 +    E3_Addr             RcvrElanAddr;
45895 +
45896 +    sdramaddr_t                InputQueueBase;                         /* base of receive queue */
45897 +    E3_Addr            InputQueueAddr;                         /* elan address of receive queue */
45898 +
45899 +    E3_Addr            ThreadStack;                            /* Thread processor stack */
45900 +    E3_Addr            ThreadWaiting;                          /* Elan thread is waiting as no receive descriptors pending (sp stored here ) */
45901 +    E3_Addr            ThreadHalted;                           /* Elan thread is waiting as it was requested to halt */
45902 +
45903 +    struct list_head   FreeDescList;                           /* freelist of per-rail receive descriptors */
45904 +    unsigned int       FreeDescCount;                          /*   and number on free list */
45905 +    unsigned int        TotalDescCount;                                /*   total number created */
45906 +    spinlock_t         FreeDescLock;                           /*   and lock for free list */
45907 +    struct list_head    DescBlockList;                         /* list of receive descriptor blocks */
45908 +
45909 +    unsigned int        FreeDescWaiting;                       /* waiting for descriptors to be freed */
45910 +    kcondvar_t         FreeDescSleep;                          /*   and sleep here */
45911 +
45912 +    unsigned int       CleanupWaiting;                         /* waiting for cleanup */
45913 +    kcondvar_t         CleanupSleep;                           /*   and sleep here */
45914 +
45915 +    EP3_RCVR_RAIL_STATS stats;                                  /* elan3 specific rcvr_rail stats */
45916 +} EP3_RCVR_RAIL;
45917 +
45918 +#endif /* ! __ELAN3__ */
45919 +
45920 +/* per-rail portion of transmit descriptor */
45921 +typedef struct ep3_txd_rail_elan
45922 +{
45923 +    EP_ENVELOPE               Envelope;                                /* message envelope */
45924 +    EP_PAYLOAD        Payload;                                 /* message payload */
45925 +
45926 +    E3_BlockCopyEvent EnveEvent;                               /* envelope event */
45927 +    E3_BlockCopyEvent DataEvent;                               /* data transfer event */
45928 +    E3_BlockCopyEvent DoneEvent;                               /* rpc done event */
45929 +} EP3_TXD_RAIL_ELAN;
45930 +
45931 +#define EP3_TXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_TXD_RAIL_ELAN), E3_BLK_ALIGN)
45932 +
45933 +typedef struct ep3_txd_rail_main
45934 +{
45935 +    E3_uint32         EnveEvent;                               /* dest for envelope event */
45936 +    E3_uint32         DataEvent;                               /* dest for data transfer event */
45937 +    E3_uint32        DoneEvent;                                /* dest for rpc done event */
45938 +} EP3_TXD_RAIL_MAIN;
45939 +
45940 +#define EP3_TXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_TXD_RAIL_MAIN), E3_BLK_ALIGN)
45941 +
45942 +#if !defined(__ELAN3__)
45943 +
45944 +typedef struct ep3_txd_rail
45945 +{
45946 +    EP_TXD_RAIL               Generic;                                 /* generic txd rail */
45947 +
45948 +    EP3_COOKIE        EnveCookie;                              /* Event cookies */
45949 +    EP3_COOKIE        DataCookie;
45950 +    EP3_COOKIE        DoneCookie;
45951 +
45952 +    sdramaddr_t               TxdElan;                                 /* Elan TX descriptor */
45953 +    E3_Addr           TxdElanAddr;                             /*  and elan address */
45954 +
45955 +    EP3_TXD_RAIL_MAIN *TxdMain;                                        /* Elan Main memory tx descriptor */
45956 +    E3_Addr           TxdMainAddr;                             /*  and elan address */
45957 +
45958 +    EP_BACKOFF        Backoff;                                 /* dma backoff */
45959 +} EP3_TXD_RAIL;
45960 +
45961 +
45962 +#define EP3_NUM_TXD_PER_BLOCK  16
45963 +
45964 +typedef struct ep3_txd_rail_block
45965 +{
45966 +    struct list_head   Link;
45967 +    
45968 +    EP3_TXD_RAIL       Txd[EP3_NUM_TXD_PER_BLOCK];
45969 +} EP3_TXD_RAIL_BLOCK;
45970 +
45971 +typedef struct ep3_xmtr_rail_stats
45972 +{
45973 +    unsigned long some_stat;
45974 +} EP3_XMTR_RAIL_STATS;
45975 +
45976 +typedef struct ep3_xmtr_rail
45977 +{
45978 +    EP_XMTR_RAIL       Generic;                                /* generic portion */
45979 +
45980 +    struct list_head   FreeDescList;                           /* freelist of per-rail receive descriptors */
45981 +    unsigned int       FreeDescCount;                          /*   and number on free list */
45982 +    unsigned int        TotalDescCount;
45983 +    spinlock_t         FreeDescLock;                           /*   and lock for free list */
45984 +    struct list_head    DescBlockList;                         /* list of receive descriptor blocks */
45985 +
45986 +    unsigned int        FreeDescWaiting;                       /* waiting for descriptors to be freed */
45987 +    kcondvar_t          FreeDescSleep;                         /*   and sleep here */
45988 +
45989 +    EP3_XMTR_RAIL_STATS stats;                                  /* elan3 specific xmtr rail stats */
45990 +} EP3_XMTR_RAIL;
45991 +
45992 +typedef struct ep3_comms_rail
45993 +{
45994 +    EP_COMMS_RAIL      Generic;                                /* generic comms rail */
45995 +    sdramaddr_t                QueueDescs;                             /* input queue descriptors */
45996 +} EP3_COMMS_RAIL;
45997 +
45998 +/* epcommxTx_elan3.c */
45999 +extern void           ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
46000 +extern void           ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
46001 +extern void           ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
46002 +
46003 +/* epcommsRx_elan3.c */
46004 +extern void          CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdMainAddr, E3_uint32 PAckVal);
46005 +extern void           StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp);
46006 +extern void           StallThreadForHalted  (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp);
46007 +
46008 +extern void           ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
46009 +extern void           ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
46010 +extern void           ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
46011 +
46012 +/* epcomms_elan3.c */
46013 +extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r);
46014 +extern void           ep3comms_del_rail (EP_COMMS_RAIL *r);
46015 +extern void           ep3comms_display_rail (EP_COMMS_RAIL *r);
46016 +
46017 +/* epcommsTx_elan3.c */
46018 +extern int            ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
46019 +extern void           ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase);
46020 +extern int            ep3xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
46021 +extern long           ep3xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
46022 +extern void           ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
46023 +extern void           ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
46024 +extern int            ep3xmtr_check_txd_state(EP_TXD *txd);
46025 +
46026 +extern void           ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
46027 +extern void           ep3xmtr_display_txd  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
46028 +
46029 +extern void           ep3xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
46030 +
46031 +/* epcommsRx_elan3.c */
46032 +extern int           ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
46033 +extern void          ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
46034 +extern void          ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
46035 +extern void          ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
46036 +
46037 +extern EP_RXD       *ep3rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail);
46038 +
46039 +extern long          ep3rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
46040 +extern void           ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
46041 +extern void           ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
46042 +
46043 +extern void           ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
46044 +extern void           ep3rcvr_display_rxd  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
46045 +
46046 +extern void           ep3rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
46047 +
46048 +#endif /* !defined(__ELAN3__) */
46049 +
46050 +/*
46051 + * Local variables:
46052 + * c-file-style: "stroustrup"
46053 + * End:
46054 + */
46055 +#endif /* __EPCOMMS_ELAN3_H */
46056 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan3_thread.c
46057 ===================================================================
46058 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_elan3_thread.c  2004-02-23 16:02:56.000000000 -0500
46059 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan3_thread.c       2005-07-28 14:52:52.861676760 -0400
46060 @@ -0,0 +1,296 @@
46061 +/*
46062 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46063 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46064 + *
46065 + *    For licensing information please see the supplied COPYING file
46066 + *
46067 + */
46068 +
46069 +#ident "@(#)$Id: epcomms_elan3_thread.c,v 1.4 2004/01/20 11:03:15 david Exp $"
46070 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3_thread.c,v $ */
46071 +
46072 +//#include <qsnet/types.h>
46073 +
46074 +typedef char               int8_t;
46075 +typedef unsigned char      uint8_t;
46076 +typedef short              int16_t;
46077 +typedef unsigned short     uint16_t;
46078 +typedef int                int32_t;
46079 +typedef unsigned int       uint32_t;
46080 +typedef long long          int64_t;
46081 +typedef unsigned long long uint64_t;
46082 +
46083 +#include <elan3/e3types.h>
46084 +#include <elan3/events.h>
46085 +#include <elan3/elanregs.h>
46086 +#include <elan3/intrinsics.h>
46087 +
46088 +#include <elan/nmh.h>
46089 +#include <elan/kcomm.h>
46090 +#include <elan/epcomms.h>
46091 +
46092 +#include "kcomm_vp.h"
46093 +#include "kcomm_elan3.h"
46094 +#include "epcomms_elan3.h"
46095 +
46096 +#ifndef offsetof
46097 +#define offsetof(s, m)                 (unsigned long)(&(((s *)0)->m))
46098 +#endif
46099 +
46100 +EP3_RAIL_ELAN *rail;
46101 +EP3_RCVR_RAIL_ELAN *r;
46102 +EP3_RCVR_RAIL_MAIN *rm;
46103 +
46104 +void
46105 +ep3comms_rcvr (EP3_RAIL_ELAN *rail, EP3_RCVR_RAIL_ELAN *rcvrElan, EP3_RCVR_RAIL_MAIN *rcvrMain, 
46106 +             EP3_InputQueue *q, unsigned int *cookies)
46107 +{
46108 +    int           count = 1;
46109 +    E3_Addr       nfptr = q->q_fptr + q->q_size;
46110 +    E3_uint32     tmp;
46111 +    int           i;
46112 +    E3_Addr       buffer;
46113 +    int                  len;
46114 +    E3_DMA       *dma;
46115 +    E3_Event     *event;
46116 +
46117 +    /* clear the queue state to allow envelopes to arrive */
46118 +    q->q_state = 0;
46119 +
46120 +    for (;;)
46121 +    {
46122 +       if (! rcvrElan->ThreadShouldHalt)
46123 +           c_waitevent ((E3_Event *) &q->q_event, count);                                              /* HALT POINT */
46124 +
46125 +       if (rcvrElan->ThreadShouldHalt && nfptr == q->q_bptr)
46126 +       {
46127 +           asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan));
46128 +           asm volatile ("ta %0"        : /* no outputs */ : "i" (EP3_UNIMP_THREAD_HALTED));           /* HALT POINT */
46129 +           continue;
46130 +       }
46131 +
46132 +       count = 0;
46133 +       do {
46134 +           /* Process the message at nfptr */
46135 +           EP_ENVELOPE      *env = (EP_ENVELOPE *) nfptr;
46136 +           EP3_RXD_RAIL_ELAN *rxd;
46137 +           int ack;
46138 +           
46139 +           EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);                                        /* HALT POINT */
46140 +           
46141 +           while ((rxd = (EP3_RXD_RAIL_ELAN *)rcvrElan->PendingDescs) == 0)
46142 +           {
46143 +               /* no receive descriptors, so trap to the kernel to wait
46144 +                * for receive descriptor to be queued, we pass the rcvr
46145 +                * in %g1, so that the trap handler can restart us. */
46146 +               EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
46147 +               asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan));
46148 +               asm volatile ("ta %0"        : /* no outputs */ : "i" (EP3_UNIMP_TRAP_NO_DESCS));       /* HALT POINT */
46149 +               EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);                            /* HALT POINT */
46150 +           }
46151 +
46152 +           if (env->Version != EP_ENVELOPE_VERSION)
46153 +           {
46154 +               /* This envelope has been cancelled - so just consume it */
46155 +               EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
46156 +               goto consume_envelope;
46157 +           }
46158 +
46159 +           dma   = rxd->Dmas;
46160 +           event = rxd->ChainEvent;
46161 +
46162 +           if (EP_IS_MULTICAST(env->Attr))
46163 +           {
46164 +               dma->dma_type            = E3_DMA_TYPE (DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
46165 +               dma->dma_size            = BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t);
46166 +               dma->dma_source          = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, Bitmap);
46167 +               dma->dma_dest            = (E3_Addr) &((EP_RXD_MAIN *) rxd->RxdMain)->Bitmap;
46168 +               dma->dma_destEvent       = (E3_Addr) event;
46169 +               dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
46170 +               dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
46171 +               dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
46172 +               
46173 +               event->ev_Count = 1;
46174 +
46175 +               dma++; event++;
46176 +           }
46177 +
46178 +           if (env->nFrags == 0)
46179 +           {
46180 +               /* Generate a "get" DMA to accept the envelope and fire the rx handler */
46181 +               dma->dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
46182 +               dma->dma_size            = 0;
46183 +               dma->dma_destEvent       = (E3_Addr) &rxd->DataEvent;
46184 +               dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
46185 +               dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
46186 +               dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
46187 +               len = 0;
46188 +           }
46189 +           else
46190 +           {
46191 +               /* Generate the DMA chain to fetch the data */
46192 +               for (i = 0, buffer = rxd->Data.nmd_addr, len = 0; i < env->nFrags; i++, dma++, event++)
46193 +               {
46194 +                   dma->dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
46195 +                   dma->dma_size            = env->Frags[i].nmd_len;
46196 +                   dma->dma_source          = env->Frags[i].nmd_addr;
46197 +                   dma->dma_dest            = buffer;
46198 +                   dma->dma_destEvent       = (E3_Addr) event;
46199 +                   dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
46200 +                   dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
46201 +                   dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
46202 +                   
46203 +                   event->ev_Count = 1;
46204 +                   
46205 +                   buffer += dma->dma_size;
46206 +                   len    += dma->dma_size;
46207 +               }
46208 +               
46209 +               /* Point the last dma at the done event */
46210 +               (--dma)->dma_destEvent = (E3_Addr) &rxd->DataEvent;
46211 +               
46212 +               if (rxd->Data.nmd_len < len)
46213 +               {
46214 +                   /* The receive descriptor was too small for the message */
46215 +                   /* complete the message anyway,  but don't transfer any */
46216 +                   /* data,  we set the length to EP_MSG_TOO_BIG */
46217 +                   for (i = 0, dma = rxd->Dmas; i < env->nFrags; i++, dma++)
46218 +                       dma->dma_size = 0;
46219 +                   
46220 +                   len = EP_MSG_TOO_BIG;
46221 +               }
46222 +           }
46223 +           
46224 +           /* Store the received message length in the rxdElan for CompleteEnvelope */
46225 +           rxd->Data.nmd_len = len;
46226 +
46227 +           /* Initialise %g1 with the  "rxd" so the trap handler can
46228 +            * complete the envelope processing if we trap while sending the
46229 +            * packet */
46230 +           asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rxd));
46231 +
46232 +           /* Generate a packet to start the data transfer */
46233 +           c_open (EP_VP_DATA (env->NodeId));
46234 +           c_sendtrans2 (TR_THREADIDENTIFY, rxd->Dmas->dma_destCookieVProc, 0, 0);
46235 +           c_sendmem (TR_SENDACK | TR_REMOTEDMA, 0, rxd->Dmas); 
46236 +           ack = c_close();
46237 +           
46238 +           /*
46239 +            * If we trapped for an output timeout, then the trap handler will have
46240 +            * completed processing this envelope and cleared the spinlock, so we just
46241 +            * need to update the queue descriptor.
46242 +            */
46243 +           if (ack == EP3_PAckStolen)
46244 +               goto consume_envelope;
46245 +           
46246 +           if (ack != E3_PAckOk)
46247 +           {
46248 +               /* our packet got nacked, so trap into the kernel so that
46249 +                * it can complete processing of this envelope.
46250 +                */
46251 +               asm volatile ("ta %0" : /* no outputs */ : "i" (EP3_UNIMP_TRAP_PACKET_NACKED));         /* HALT POINT */
46252 +               goto consume_envelope;
46253 +           }
46254 +
46255 +           /* remove the RXD from the pending list */
46256 +           EP3_SPINENTER (&rcvrElan->PendingLock, &rcvrMain->PendingLock);
46257 +           if ((rcvrElan->PendingDescs = rxd->Next) == 0)
46258 +               rcvrMain->PendingDescsTailp = 0;
46259 +           EP3_SPINEXIT (&rcvrElan->PendingLock, &rcvrMain->PendingLock);
46260 +
46261 +           /* Copy the envelope information - as 5 64 byte chunks.
46262 +            * We force the parameters in g5, g6 so that they aren't
46263 +            * trashed by the loadblk32 into the locals/ins
46264 +            */
46265 +           if (EP_HAS_PAYLOAD(env->Attr))
46266 +           { 
46267 +               register void *src asm ("g5") = (void *) env;
46268 +               register void *dst asm ("g6") = (void *)  &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope;
46269 +
46270 +               asm volatile (
46271 +                   "and     %%sp,63,%%g7               ! Calculate stack alignment\n"
46272 +                   "add     %%g7,64,%%g7               ! Space to save the registers\n"
46273 +                   "sub     %%sp,%%g7,%%sp             ! align stack\n" 
46274 +                   "stblock64 %%l0,[%%sp]              ! save the locals and ins\n"
46275 +
46276 +                   "ldblock64 [%0 + 0],%%l0            ! load 64-byte block into locals/ins\n"         /* copy envelope */
46277 +                   "stblock64 %%l0,[%1 + 0]            ! store 64-byte block from local/ins\n"
46278 +                   "ldblock64 [%0 + 64],%%l0           ! load 64-byte block into locals/ins\n"
46279 +                   "stblock64 %%l0,[%1 + 64]           ! store 64-byte block from local/ins\n"
46280 +
46281 +                   "ldblock64 [%0 + 128],%%l0          ! load 64-byte block into locals/ins\n"         /* copy payload */
46282 +                   "stblock64 %%l0,[%1 + 128]          ! store 64-byte block from local/ins\n"
46283 +                   "ldblock64 [%0 + 192],%%l0          ! load 64-byte block into locals/ins\n"
46284 +                   "stblock64 %%l0,[%1 + 192]          ! store 64-byte block from local/ins\n"
46285 +
46286 +                   "ldblock64 [%%sp],%%l0              ! restore locals and ins\n"
46287 +                   "add     %%sp,%%g7,%%sp             ! restore stack pointer\n"
46288 +                   : /* outputs */
46289 +                   : /* inputs */ "r" (src), "r" (dst)
46290 +                   : /* clobbered */ "g5", "g6", "g7" );
46291 +           }
46292 +           else
46293 +           { 
46294 +               register void *src asm ("g5") = (void *) env;
46295 +               register void *dst asm ("g6") = (void *)  &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope;
46296 +
46297 +               asm volatile (
46298 +                   "and     %%sp,63,%%g7               ! Calculate stack alignment\n"
46299 +                   "add     %%g7,64,%%g7               ! Space to save the registers\n"
46300 +                   "sub     %%sp,%%g7,%%sp             ! align stack\n" 
46301 +                   "stblock64 %%l0,[%%sp]              ! save the locals and ins\n"
46302 +
46303 +                   "ldblock64 [%0 + 0],%%l0            ! load 64-byte block into locals/ins\n"
46304 +                   "stblock64 %%l0,[%1 + 0]            ! store 64-byte block from local/ins\n"
46305 +                   "ldblock64 [%0 + 64],%%l0           ! load 64-byte block into locals/ins\n"
46306 +                   "stblock64 %%l0,[%1 + 64]           ! store 64-byte block from local/ins\n"
46307 +
46308 +                   "ldblock64 [%%sp],%%l0              ! restore locals and ins\n"
46309 +                   "add     %%sp,%%g7,%%sp             ! restore stack pointer\n"
46310 +                   : /* outputs */
46311 +                   : /* inputs */ "r" (src), "r" (dst)
46312 +                   : /* clobbered */ "g5", "g6", "g7" );
46313 +           }
46314 +
46315 +           /* Store the message length to indicate that I've finished */
46316 +           ((EP_RXD_MAIN *) rxd->RxdMain)->Len = rxd->Data.nmd_len;                                    /* PCI write  */
46317 +           
46318 +           EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
46319 +
46320 +       consume_envelope:
46321 +           /* Sample the queue full bit *BEFORE* moving the fptr.
46322 +            * Then only clear it if it was full before, otherwise,
46323 +            * as soon as the fptr is moved on the queue could fill 
46324 +            * up, and so clearing it could mark a full queue as 
46325 +            * empty.
46326 +            *
46327 +            * While the full bit is set, the queue is in a 'steady
46328 +            * state', so it is safe to set the q_state
46329 +            * 
46330 +            */
46331 +           if (((tmp = q->q_state) & E3_QUEUE_FULL) == 0)
46332 +               q->q_fptr = nfptr;                              /* update queue */
46333 +           else
46334 +           {
46335 +               q->q_fptr = nfptr;                              /* update queue */
46336 +               q->q_state = tmp &~E3_QUEUE_FULL;               /* and clear full flag */
46337 +           }
46338 +
46339 +           count++;                                            /* bump message count */
46340 +           if (nfptr == q->q_top)                              /* queue wrap */
46341 +               nfptr = q->q_base;
46342 +           else
46343 +               nfptr += q->q_size;
46344 +
46345 +           c_break_busywait();                                 /* be nice              HALT POINT */
46346 +
46347 +       } while (nfptr != q->q_bptr);                           /* loop until Fptr == Bptr */
46348 +    }
46349 +}
46350 +
46351 +
46352 +/*
46353 + * Local variables:
46354 + * c-file-style: "stroustrup"
46355 + * End:
46356 + */
46357 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan4.c
46358 ===================================================================
46359 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_elan4.c 2004-02-23 16:02:56.000000000 -0500
46360 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan4.c      2005-07-28 14:52:52.862676608 -0400
46361 @@ -0,0 +1,392 @@
46362 +/*
46363 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46364 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46365 + *
46366 + *    For licensing information please see the supplied COPYING file
46367 + *
46368 + */
46369 +
46370 +#ident "@(#)$Id: epcomms_elan4.c,v 1.11.2.1 2004/10/28 11:53:28 david Exp $"
46371 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4.c,v $ */
46372 +
46373 +#include <qsnet/kernel.h>
46374 +
46375 +#include <elan/kcomm.h>
46376 +#include <elan/epsvc.h>
46377 +#include <elan/epcomms.h>
46378 +
46379 +#include "debug.h"
46380 +#include "kcomm_elan4.h"
46381 +#include "epcomms_elan4.h"
46382 +
46383 +static void
46384 +ep4comms_flush_interrupt (EP4_RAIL *rail, void *arg)
46385 +{
46386 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) arg;
46387 +    unsigned long  flags;
46388 +
46389 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
46390 +    commsRail->r_flush_count = 0;
46391 +    kcondvar_wakeupall (&commsRail->r_flush_sleep, &commsRail->r_flush_lock);
46392 +    spin_unlock_irqrestore  (&commsRail->r_flush_lock, flags);
46393 +}
46394 +
46395 +void
46396 +ep4comms_flush_start (EP4_COMMS_RAIL *commsRail)
46397 +{
46398 +    kmutex_lock (&commsRail->r_flush_mutex);
46399 +}
46400 +
46401 +void
46402 +ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail)
46403 +{
46404 +    unsigned long flags;
46405 +
46406 +    ep4_wait_event_cmd (commsRail->r_flush_mcq, 
46407 +                       commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event),
46408 +                       E4_EVENT_INIT_VALUE (-32 * commsRail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),
46409 +                       commsRail->r_flush_ecq->ecq_addr, 
46410 +                       INTERRUPT_CMD | (commsRail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT));
46411 +
46412 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
46413 +    while (commsRail->r_flush_count != 0)
46414 +       kcondvar_wait (&commsRail->r_flush_sleep, &commsRail->r_flush_lock, &flags);
46415 +    spin_unlock_irqrestore (&commsRail->r_flush_lock, flags);
46416 +    
46417 +    kmutex_unlock (&commsRail->r_flush_mutex);
46418 +}
46419 +
46420 +void
46421 +ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq)
46422 +{
46423 +    unsigned long flags;
46424 +
46425 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
46426 +
46427 +    elan4_set_event_cmd (cq, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event));
46428 +    
46429 +    commsRail->r_flush_count++;
46430 +    
46431 +    spin_unlock_irqrestore (&commsRail->r_flush_lock, flags);
46432 +}
46433 +
46434 +void
46435 +ep4comms_flush_callback (void *arg, statemap_t *map)
46436 +{
46437 +    EP4_COMMS_RAIL   *commsRail = (EP4_COMMS_RAIL *) arg;
46438 +    EP_COMMS_SUBSYS  *subsys    = commsRail->r_generic.Subsys;
46439 +    EP4_RAIL        *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
46440 +    unsigned int     rnum       = rail->r_generic.Number;
46441 +    struct list_head *el;
46442 +
46443 +    /*
46444 +     * We stall the retry thread from CB_FLUSH_FILTERING until
46445 +     * we've finished CB_FLUSH_FLUSHING to ensure that sten 
46446 +     * packets can not be being retried while we flush them
46447 +     * through.
46448 +     */
46449 +    switch (rail->r_generic.CallbackStep)
46450 +    {
46451 +    case EP_CB_FLUSH_FILTERING:
46452 +       ep_kthread_stall (&rail->r_retry_thread);
46453 +
46454 +       ep4comms_flush_start (commsRail);
46455 +       break;
46456 +
46457 +    case EP_CB_FLUSH_FLUSHING:
46458 +       break;
46459 +    }
46460 +
46461 +    kmutex_lock (&subsys->Lock);
46462 +    list_for_each (el, &subsys->Transmitters) {
46463 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
46464 +       
46465 +       if (xmtr->Rails[rnum])
46466 +           ep4xmtr_flush_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
46467 +    }
46468 +
46469 +    list_for_each (el, &subsys->Receivers) {
46470 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
46471 +       
46472 +       if (rcvr->Rails[rnum])
46473 +           ep4rcvr_flush_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
46474 +    }
46475 +    kmutex_unlock (&subsys->Lock);
46476 +
46477 +    switch (rail->r_generic.CallbackStep)
46478 +    {
46479 +    case EP_CB_FLUSH_FILTERING:
46480 +       ep4comms_flush_wait (commsRail);
46481 +       break;
46482 +
46483 +    case EP_CB_FLUSH_FLUSHING:
46484 +       ep_kthread_resume (&rail->r_retry_thread);
46485 +       break;
46486 +    }
46487 +}
46488 +
46489 +void
46490 +ep4comms_failover_callback (void *arg, statemap_t *map)
46491 +{
46492 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
46493 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
46494 +    unsigned int     rnum       = commsRail->Rail->Number;
46495 +    struct list_head *el;
46496 +
46497 +    kmutex_lock (&subsys->Lock);
46498 +    list_for_each (el, &subsys->Transmitters) {
46499 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
46500 +       
46501 +       if (xmtr->Rails[rnum])
46502 +           ep4xmtr_failover_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
46503 +    }
46504 +
46505 +    list_for_each (el, &subsys->Receivers) {
46506 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
46507 +       
46508 +       if (rcvr->Rails[rnum])
46509 +           ep4rcvr_failover_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
46510 +    }
46511 +    kmutex_unlock (&subsys->Lock);
46512 +}
46513 +
46514 +void
46515 +ep4comms_disconnect_callback (void *arg, statemap_t *map)
46516 +{
46517 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
46518 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
46519 +    unsigned int     rnum       = commsRail->Rail->Number;
46520 +    struct list_head *el;
46521 +
46522 +    kmutex_lock (&subsys->Lock);
46523 +    list_for_each (el, &subsys->Transmitters) {
46524 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
46525 +       
46526 +       if (xmtr->Rails[rnum])
46527 +           ep4xmtr_disconnect_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
46528 +    }
46529 +
46530 +    list_for_each (el, &subsys->Receivers) {
46531 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
46532 +       
46533 +       if (rcvr->Rails[rnum])
46534 +           ep4rcvr_disconnect_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
46535 +    }
46536 +    kmutex_unlock (&subsys->Lock);
46537 +}
46538 +
46539 +void
46540 +ep4comms_neterr_callback (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
46541 +{
46542 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
46543 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
46544 +    unsigned int     rnum       = commsRail->Rail->Number;
46545 +    struct list_head *el;
46546 +    
46547 +    /* First - stall the retry thread, so that it will no longer restart 
46548 +     *         any sten packets from the retry lists */
46549 +    ep_kthread_stall (&rail->r_retry_thread);
46550 +
46551 +    ep4comms_flush_start ((EP4_COMMS_RAIL *) commsRail);
46552 +
46553 +    /* Second - flush through all command queues for xmtrs and rcvrs */
46554 +    kmutex_lock (&subsys->Lock);
46555 +    list_for_each (el, &subsys->Transmitters) {
46556 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
46557 +       
46558 +       if (xmtr->Rails[rnum])
46559 +           ep4xmtr_neterr_flush (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies);
46560 +    }
46561 +    
46562 +    list_for_each (el, &subsys->Receivers) {
46563 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
46564 +       
46565 +       if (rcvr->Rails[rnum])
46566 +           ep4rcvr_neterr_flush (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies);
46567 +    }
46568 +    kmutex_unlock (&subsys->Lock);
46569 +
46570 +    /* Third - wait for flush to complete */
46571 +    ep4comms_flush_wait ((EP4_COMMS_RAIL *) commsRail);
46572 +    
46573 +    /* Fourth - flush through all command queues */
46574 +    ep4_flush_ecqs (rail);
46575 +    
46576 +    /* Fifth - search all the retry lists for the network error cookies */
46577 +    kmutex_lock (&subsys->Lock);
46578 +    list_for_each (el, &subsys->Transmitters) {
46579 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
46580 +       
46581 +       if (xmtr->Rails[rnum])
46582 +           ep4xmtr_neterr_check (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies);
46583 +    }
46584 +
46585 +    list_for_each (el, &subsys->Receivers) {
46586 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
46587 +       
46588 +       if (rcvr->Rails[rnum])
46589 +           ep4rcvr_neterr_check (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies);
46590 +    }
46591 +    kmutex_unlock (&subsys->Lock);
46592 +
46593 +    ep_kthread_resume (&rail->r_retry_thread);
46594 +}
46595 +
46596 +
46597 +EP_COMMS_RAIL *
46598 +ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r)
46599 +{
46600 +    EP4_RAIL       *rail = (EP4_RAIL *)r;
46601 +    ELAN4_DEV      *dev  = rail->r_ctxt.ctxt_dev;
46602 +    EP4_COMMS_RAIL *commsRail;
46603 +    E4_InputQueue   qdesc;
46604 +    int i;
46605 +
46606 +    KMEM_ZALLOC (commsRail, EP4_COMMS_RAIL *,sizeof (EP4_COMMS_RAIL), 1);
46607 +
46608 +    if (commsRail == NULL)
46609 +       return NULL;
46610 +    
46611 +    commsRail->r_generic.Ops.DelRail          = ep4comms_del_rail;
46612 +    commsRail->r_generic.Ops.DisplayRail      = ep4comms_display_rail;
46613 +    commsRail->r_generic.Ops.Rcvr.AddRail     = ep4rcvr_add_rail;
46614 +    commsRail->r_generic.Ops.Rcvr.DelRail     = ep4rcvr_del_rail;
46615 +    commsRail->r_generic.Ops.Rcvr.Check       = ep4rcvr_check;
46616 +    commsRail->r_generic.Ops.Rcvr.QueueRxd    = ep4rcvr_queue_rxd;
46617 +    commsRail->r_generic.Ops.Rcvr.RpcPut      = ep4rcvr_rpc_put;
46618 +    commsRail->r_generic.Ops.Rcvr.RpcGet      = ep4rcvr_rpc_get;
46619 +    commsRail->r_generic.Ops.Rcvr.RpcComplete = ep4rcvr_rpc_complete;
46620 +
46621 +    commsRail->r_generic.Ops.Rcvr.StealRxd    = ep4rcvr_steal_rxd;
46622 +
46623 +    commsRail->r_generic.Ops.Rcvr.DisplayRcvr = ep4rcvr_display_rcvr;
46624 +    commsRail->r_generic.Ops.Rcvr.DisplayRxd  = ep4rcvr_display_rxd;
46625 +
46626 +    commsRail->r_generic.Ops.Rcvr.FillOutRailStats = ep4rcvr_fillout_rail_stats;
46627 +
46628 +    commsRail->r_generic.Ops.Xmtr.AddRail     = ep4xmtr_add_rail;
46629 +    commsRail->r_generic.Ops.Xmtr.DelRail     = ep4xmtr_del_rail;
46630 +    commsRail->r_generic.Ops.Xmtr.Check       = ep4xmtr_check;
46631 +    commsRail->r_generic.Ops.Xmtr.BindTxd     = ep4xmtr_bind_txd;
46632 +    commsRail->r_generic.Ops.Xmtr.UnbindTxd   = ep4xmtr_unbind_txd;
46633 +    commsRail->r_generic.Ops.Xmtr.PollTxd     = ep4xmtr_poll_txd;
46634 +    commsRail->r_generic.Ops.Xmtr.CheckTxdState = ep4xmtr_check_txd_state;
46635 +
46636 +    commsRail->r_generic.Ops.Xmtr.DisplayXmtr = ep4xmtr_display_xmtr;
46637 +    commsRail->r_generic.Ops.Xmtr.DisplayTxd  = ep4xmtr_display_txd;
46638 +
46639 +    commsRail->r_generic.Ops.Xmtr.FillOutRailStats = ep4xmtr_fillout_rail_stats;
46640 +
46641 +    /* Allocate command queue space for flushing (1 dword for interrupt + 4 dwords for waitevent) */
46642 +    if ((commsRail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == NULL)
46643 +    {
46644 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
46645 +       return NULL;
46646 +    }
46647 +
46648 +    if ((commsRail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == NULL)
46649 +    {
46650 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
46651 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
46652 +       return NULL;
46653 +    }
46654 +
46655 +    /* Allocate and initialise the elan memory part */
46656 +    if ((commsRail->r_elan = ep_alloc_elan (r, EP4_COMMS_RAIL_ELAN_SIZE, 0, &commsRail->r_elan_addr)) == (sdramaddr_t) 0)
46657 +    {
46658 +       ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
46659 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
46660 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
46661 +       return NULL;
46662 +    }
46663 +
46664 +    ep4_register_intcookie (rail, &commsRail->r_flush_intcookie, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event),
46665 +                           ep4comms_flush_interrupt, commsRail);
46666 +
46667 +    elan4_sdram_writeq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType),
46668 +                       E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
46669 +
46670 +
46671 +    /* Allocate and initialise all the queue desriptors as "full" with no event */
46672 +    if ((commsRail->r_descs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * EP_QUEUE_DESC_SIZE, SDRAM_PAGE_SIZE), EP_PERM_ALL, 0)) == (sdramaddr_t) 0)
46673 +    {
46674 +       ep_free_elan (r, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE);
46675 +       ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
46676 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
46677 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
46678 +       return NULL;
46679 +    }
46680 +
46681 +    qdesc.q_bptr    = 0;
46682 +    qdesc.q_fptr    = 8;
46683 +    qdesc.q_control = E4_InputQueueControl (qdesc.q_bptr,qdesc.q_fptr, 8);
46684 +    qdesc.q_event   = 0;
46685 +
46686 +    for (i = 0; i < EP_MSG_NSVC; i++)
46687 +       elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qdesc, commsRail->r_descs + (i * EP_QUEUE_DESC_SIZE),
46688 +                                   sizeof (E4_InputQueue));
46689 +
46690 +    kmutex_init (&commsRail->r_flush_mutex);
46691 +    spin_lock_init (&commsRail->r_flush_lock);
46692 +    kcondvar_init (&commsRail->r_flush_sleep);
46693 +
46694 +    ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback,      commsRail);
46695 +    ep_register_callback (r, EP_CB_FLUSH_FLUSHING,  ep4comms_flush_callback,      commsRail);
46696 +    ep_register_callback (r, EP_CB_FAILOVER,        ep4comms_failover_callback,   commsRail);
46697 +    ep_register_callback (r, EP_CB_DISCONNECTING,   ep4comms_disconnect_callback, commsRail);
46698 +
46699 +    commsRail->r_neterr_ops.op_func = ep4comms_neterr_callback;
46700 +    commsRail->r_neterr_ops.op_arg  = commsRail;
46701 +    
46702 +    ep4_add_neterr_ops (rail, &commsRail->r_neterr_ops);
46703 +
46704 +    return (EP_COMMS_RAIL *) commsRail;
46705 +}
46706 +
46707 +void
46708 +ep4comms_del_rail (EP_COMMS_RAIL *r)
46709 +{
46710 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r;
46711 +    EP4_RAIL       *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
46712 +
46713 +    ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback,      commsRail);
46714 +    ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FLUSHING,  ep4comms_flush_callback,      commsRail);
46715 +    ep_remove_callback (&rail->r_generic, EP_CB_FAILOVER,        ep4comms_failover_callback,   commsRail);
46716 +    ep_remove_callback (&rail->r_generic, EP_CB_DISCONNECTING,   ep4comms_disconnect_callback, commsRail);
46717 +
46718 +    kcondvar_destroy (&commsRail->r_flush_sleep);
46719 +    spin_lock_destroy (&commsRail->r_flush_lock);
46720 +    kmutex_destroy (&commsRail->r_flush_mutex);
46721 +
46722 +    ep_free_memory_elan (&rail->r_generic, EP_EPCOMMS_QUEUE_BASE);
46723 +    ep_free_elan (&rail->r_generic, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE);
46724 +
46725 +    ep4_deregister_intcookie (rail, &commsRail->r_flush_intcookie);
46726 +
46727 +    ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
46728 +    ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
46729 +
46730 +    KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
46731 +}
46732 +
46733 +void
46734 +ep4comms_display_rail (EP_COMMS_RAIL *r)
46735 +{
46736 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r;
46737 +    EP4_RAIL       *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
46738 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
46739 +    
46740 +    ep4_display_rail (rail);
46741 +
46742 +    ep_debugf (DBG_DEBUG, "   flush count=%d mcq=%p ecq=%p event %llx.%llx.%llx\n", 
46743 +              commsRail->r_flush_count, commsRail->r_flush_mcq, commsRail->r_flush_ecq,
46744 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType)),
46745 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WritePtr)),
46746 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WriteValue)));
46747 +}
46748 +
46749 +/*
46750 + * Local variables:
46751 + * c-file-style: "stroustrup"
46752 + * End:
46753 + */
46754 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan4.h
46755 ===================================================================
46756 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_elan4.h 2004-02-23 16:02:56.000000000 -0500
46757 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan4.h      2005-07-28 14:52:52.863676456 -0400
46758 @@ -0,0 +1,470 @@
46759 +/*
46760 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46761 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46762 + *
46763 + *    For licensing information please see the supplied COPYING file
46764 + *
46765 + */
46766 +
46767 +#ifndef __EPCOMMS_ELAN4_H
46768 +#define __EPCOMMS_ELAN4_H
46769 +
46770 +#ident "@(#)$Id: epcomms_elan4.h,v 1.13.2.1 2004/11/12 10:54:51 mike Exp $"
46771 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4.h,v $ */
46772 +
46773 +
46774 +#include <elan4/types.h>
46775 +
46776 +/*
46777 + * Elan4 spinlocks are a pair of 64 bit words, one in elan sdram and one in main memory
46778 + * the sdram word holds the thread sequence number in the bottom 32 bits and the main
46779 + * lock in the top 32 bits.  The main memory word holds the sequence number only in
46780 + * it's bottom 32 bits */
46781 +
46782 +typedef volatile E4_uint64 EP4_SPINLOCK_MAIN;
46783 +typedef volatile E4_uint64 EP4_SPINLOCK_ELAN;
46784 +
46785 +#define EP4_SPINLOCK_SEQ       0
46786 +#define EP4_SPINLOCK_MLOCK     4
46787 +
46788 +#if defined(__elan4__)
46789 +
46790 +#define EP4_SPINENTER(CPORT,SLE,SLM) \
46791 +do { \
46792 +    register long tmp; \
46793 +\
46794 +    asm volatile ("ld4         [%1], %0\n" \
46795 +                 "inc          %0\n" \
46796 +                 "st4          %0, [%1]\n" \
46797 +                 "ld4          [%1 + 4], %0\n" \
46798 +                 "srl8,byte    %0, 4, %0\n" \
46799 +                 : /* outputs */ "=r" (tmp)  \
46800 +                 : /* inputs */ "r" (SLE), "r" (SLM)); \
46801 +\
46802 +    if (tmp) \
46803 +       ep4_spinblock (CPORT,SLE, SLM); \
46804 +} while (0)
46805 +
46806 +extern void ep4_spinblock(E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm);
46807 +
46808 +#define EP4_SPINEXIT(CPORT,SLE,SLM) \
46809 +do { \
46810 +    register long tmp; \
46811 +\
46812 +    asm volatile ("ld4         [%1], %0\n" \
46813 +                 "st4          %0, [%2]\n" \
46814 +                 : /* outputs */ "=r" (tmp) \
46815 +                 : /* inputs */ "r" (SLE), "r" (SLM)); \
46816 +} while (0)
46817 +
46818 +#else
46819 +
46820 +#define EP4_SPINENTER(DEV,SLE,SLM) \
46821 +do { \
46822 +    uint32_t seq; \
46823 +\
46824 +    mb(); \
46825 +    elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 1); \
46826 +    mb(); \
46827 +    while ((seq = elan4_sdram_readl (DEV, (SLE) + EP4_SPINLOCK_SEQ)) != *((uint32_t *) (SLM))) \
46828 +    { \
46829 +       while (*((uint32_t *) (SLM)) == (seq - 1)) \
46830 +       { \
46831 +           mb(); \
46832 +           DELAY(1); \
46833 +       } \
46834 +    } \
46835 +} while (0)
46836 +
46837 +#define EP4_SPINEXIT(DEV,SLE,SLM) \
46838 +do { \
46839 +    wmb(); \
46840 +    elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 0); \
46841 +} while (0)
46842 +
46843 +#endif /* !defined(__elan4__) */
46844 +
46845 +#define EP4_STEN_RETRYCOUNT    16
46846 +#define EP4_DMA_RETRYCOUNT     16
46847 +
46848 +typedef struct ep4_intr_cmd
46849 +{
46850 +    E4_uint64          c_write_cmd;
46851 +    E4_uint64          c_write_value;
46852 +    E4_uint64          c_intr_cmd;
46853 +} EP4_INTR_CMD;
46854 +
46855 +#define        EP4_INTR_CMD_NDWORDS    (sizeof (EP4_INTR_CMD) / 8)
46856 +
46857 +typedef struct ep4_rxd_sten_cmd
46858 +{
46859 +    E4_uint64          c_open;
46860 +
46861 +    E4_uint64          c_trans;
46862 +    E4_uint64          c_cookie;
46863 +    E4_uint64          c_dma_typeSize;
46864 +    E4_uint64          c_dma_cookie;
46865 +    E4_uint64          c_dma_vproc;
46866 +    E4_uint64          c_dma_srcAddr;
46867 +    E4_uint64          c_dma_dstAddr;
46868 +    E4_uint64          c_dma_srcEvent;
46869 +    E4_uint64          c_dma_dstEvent;
46870 +
46871 +    E4_uint64          c_ok_guard;
46872 +    E4_uint64          c_ok_write_cmd;
46873 +    E4_uint64          c_ok_write_value;
46874 +    
46875 +    E4_uint64          c_fail_guard;
46876 +    E4_uint64          c_fail_setevent;
46877 +
46878 +    E4_uint64          c_nop_cmd;
46879 +} EP4_RXD_STEN_CMD;
46880 +
46881 +#define EP4_RXD_STEN_CMD_NDWORDS       (sizeof (EP4_RXD_STEN_CMD) / 8)
46882 +
46883 +typedef struct ep4_rxd_dma_cmd
46884 +{
46885 +    E4_uint64          c_dma_typeSize;
46886 +    E4_uint64          c_dma_cookie;
46887 +    E4_uint64          c_dma_vproc;
46888 +    E4_uint64          c_dma_srcAddr;
46889 +    E4_uint64          c_dma_dstAddr;
46890 +    E4_uint64          c_dma_srcEvent;
46891 +    E4_uint64          c_dma_dstEvent;
46892 +    E4_uint64          c_nop_cmd;
46893 +} EP4_RXD_DMA_CMD;
46894 +
46895 +#define EP4_RXD_DMA_CMD_NDWORDS                (sizeof (EP4_RXD_DMA_CMD) / 8)
46896 +#define EP4_RXD_START_CMD_NDWORDS      (sizeof (E4_ThreadRegs) / 8)
46897 +
46898 +typedef struct ep4_rxd_rail_elan
46899 +{
46900 +    EP4_RXD_STEN_CMD    rxd_sten[EP_MAXFRAG+1];
46901 +
46902 +    EP4_INTR_CMD       rxd_done_cmd;                           /* command stream issued by done event (aligned to 64 bytes) */
46903 +    E4_Addr            rxd_next;                               /* linked list when on pending list (pad to 32 bytes)*/
46904 +    E4_Event32         rxd_failed;                             /* event set when sten packet fails */
46905 +
46906 +    EP4_INTR_CMD        rxd_failed_cmd;                                /* command stream issued by fail event (aligned to 64 bytes) */
46907 +    E4_uint64          rxd_queued;                             /* rxd queuing thread has executed (pad to 32 bytes)*/
46908 +
46909 +    E4_Event32         rxd_start;                              /* event to set to fire off and event chain (used as chain[0]) */
46910 +    E4_Event32         rxd_chain[EP_MAXFRAG];                  /* chained events (aligned to 32 bytes) */
46911 +    E4_Event32         rxd_done;                               /* event to fire done command stream causing interrupt (used as chain[EP_MAXFRAG]) */
46912 +
46913 +    E4_Addr            rxd_rxd;                                /* elan address of EP4_RXD_MAIN */
46914 +    E4_Addr            rxd_main;                               /* elan address of EP4_RXD_RAIL_MAIN */
46915 +    E4_uint64          rxd_debug;                              /* thread debug value */
46916 +
46917 +    EP_NMD             rxd_buffer;                             /* Network mapping descriptor for receive data */
46918 +} EP4_RXD_RAIL_ELAN;
46919 +
46920 +#define EP4_RXD_RAIL_ELAN_SIZE roundup(sizeof (EP4_RXD_RAIL_ELAN), 64)
46921 +
46922 +typedef struct ep4_rxd_rail_main
46923 +{
46924 +    E4_uint64          rxd_sent[EP_MAXFRAG+1];                 /* sten packet sent */
46925 +    E4_uint64          rxd_failed;                             /* sten packet failed */
46926 +    E4_uint64          rxd_done;                               /* operation complete */
46927 +
46928 +    E4_Addr            rxd_scq;                                /* command port for scq */
46929 +} EP4_RXD_RAIL_MAIN;
46930 +
46931 +#define EP4_RXD_RAIL_MAIN_SIZE roundup(sizeof (EP4_RXD_RAIL_MAIN), 8)
46932 +
46933 +#if !defined(__elan4__)
46934 +typedef struct ep4_rxd_rail
46935 +{
46936 +    EP_RXD_RAIL                rxd_generic;
46937 +
46938 +    struct list_head    rxd_retry_link;
46939 +    unsigned long       rxd_retry_time;
46940 +
46941 +    EP4_INTCOOKIE      rxd_intcookie;
46942 +
46943 +    sdramaddr_t                rxd_elan;
46944 +    EP_ADDR            rxd_elan_addr;
46945 +    
46946 +    EP4_RXD_RAIL_MAIN  *rxd_main;
46947 +    EP_ADDR            rxd_main_addr;
46948 +
46949 +    EP4_ECQ           *rxd_ecq;                                /* cq with 128 bytes targetted by event */
46950 +    EP4_ECQ           *rxd_scq;                                /* cq with 8 bytes targetted by main/thread store */
46951 +} EP4_RXD_RAIL;
46952 +
46953 +#define EP4_NUM_RXD_PER_BLOCK  16
46954 +
46955 +typedef struct ep4_rxd_rail_block
46956 +{
46957 +    struct list_head   blk_link;
46958 +    EP4_RXD_RAIL       blk_rxds[EP4_NUM_RXD_PER_BLOCK];
46959 +} EP4_RXD_RAIL_BLOCK;
46960 +
46961 +#endif /* !defined(__elan4__) */
46962 +
46963 +typedef struct ep4_rcvr_rail_elan
46964 +{
46965 +    E4_uint64          rcvr_thread_stall[8];                   /* place for thread to stall */
46966 +    E4_Event32         rcvr_qevent;                            /* Input queue event */
46967 +    E4_Event32         rcvr_thread_halt;                       /* place for thread to halt */
46968 +
46969 +    volatile E4_Addr    rcvr_pending_tailp;                    /* list of pending rxd's (elan addr) */
46970 +    volatile E4_Addr   rcvr_pending_head;                      /*   -- this pair aligned to 16 bytes */
46971 +
46972 +    EP4_SPINLOCK_ELAN  rcvr_thread_lock;                       /* spinlock for thread processing loop */
46973 +
46974 +    E4_uint64          rcvr_stall_intcookie;                   /* interrupt cookie to use when requseted to halt */
46975 +
46976 +    E4_uint64          rcvr_qbase;                             /* base of input queue */
46977 +    E4_uint64          rcvr_qlast;                             /* last item in input queue */
46978 +
46979 +    E4_uint64          rcvr_debug;                             /* thread debug value */
46980 +} EP4_RCVR_RAIL_ELAN;
46981 +
46982 +typedef struct ep4_rcvr_rail_main
46983 +{
46984 +    EP4_SPINLOCK_MAIN   rcvr_thread_lock;                      /* spinlock for thread processing loop */
46985 +} EP4_RCVR_RAIL_MAIN;
46986 +
46987 +#if !defined(__elan4__)
46988 +
46989 +typedef struct ep4_rcvr_rail_stats
46990 +{
46991 +    unsigned long some_stat;
46992 +} EP4_RCVR_RAIL_STATS;
46993 +
46994 +typedef struct ep4_rcvr_rail
46995 +{
46996 +    EP_RCVR_RAIL       rcvr_generic;                           /* generic portion */
46997 +    
46998 +    sdramaddr_t                rcvr_elan;
46999 +    EP_ADDR            rcvr_elan_addr;
47000 +
47001 +    EP4_RCVR_RAIL_MAIN *rcvr_main;
47002 +    EP_ADDR            rcvr_main_addr;
47003 +
47004 +    sdramaddr_t                rcvr_slots;                             /* input queue slots */
47005 +    EP_ADDR            rcvr_slots_addr;                        /*   and elan address */
47006 +
47007 +    EP_ADDR            rcvr_stack;                             /* stack for thread */
47008 +
47009 +    EP4_ECQ           *rcvr_ecq;                               /* command queue space for thread STEN packets */
47010 +    EP4_ECQ           *rcvr_resched;                           /* command queue space to reschedule the thread */
47011 +
47012 +    struct list_head    rcvr_freelist;                         /* freelist of per-rail receive descriptors */
47013 +    unsigned int        rcvr_freecount;                                /*   and number on free list */
47014 +    unsigned int        rcvr_totalcount;                               /*   total number created */
47015 +    spinlock_t          rcvr_freelock;                         /*   and lock for free list */
47016 +    struct list_head    rcvr_blocklist;                                /* list of receive descriptor blocks */
47017 +
47018 +    unsigned int        rcvr_freewaiting;                      /* waiting for descriptors to be freed */
47019 +    kcondvar_t         rcvr_freesleep;                         /*   and sleep here */
47020 +
47021 +    EP4_INTCOOKIE      rcvr_stall_intcookie;                   /* interrupt cookie for thread halt */
47022 +    unsigned char      rcvr_thread_halted;                     /* thread has been halted */
47023 +    unsigned char       rcvr_cleanup_waiting;                  /* waiting for cleanup */
47024 +    kcondvar_t          rcvr_cleanup_sleep;                    /*   and sleep here */
47025 +
47026 +    EP4_RETRY_OPS      rcvr_retryops;
47027 +
47028 +    struct list_head    rcvr_retrylist;                                /* list of txd's to retry envelopes for */
47029 +    struct list_head    rcvr_polllist;                         /* list of txd's to poll for completion */
47030 +    spinlock_t          rcvr_retrylock;
47031 +    
47032 +    EP4_RCVR_RAIL_STATS rcvr_stats;                             /* elan4 specific rcvr_rail stats */
47033 +
47034 +} EP4_RCVR_RAIL;
47035 +
47036 +#endif /* !defined(__elan4__) */
47037 +
47038 +typedef struct ep4_txd_rail_elan
47039 +{
47040 +    EP4_INTR_CMD        txd_env_cmd;                           /* command stream for envelope event (64 byte aligned) */
47041 +    E4_uint64          txd_pad0;                               /*  pad to 32 bytes */
47042 +    E4_Event32         txd_env;                                /* event set when STEN packet fails */
47043 +
47044 +    EP4_INTR_CMD       txd_done_cmd;                           /* command stream for done event (64 byte aligned) */
47045 +    E4_uint64          txd_pad1;                               /*  pad to 32 bytes */
47046 +    E4_Event32         txd_done;                               /* event set when transmit complete */
47047 +
47048 +    E4_Event32         txd_data;                               /* event set when xmit completes (=> phase becomes passive) */
47049 +} EP4_TXD_RAIL_ELAN;
47050 +
47051 +#define EP4_TXD_RAIL_ELAN_SIZE         roundup(sizeof(EP4_TXD_RAIL_ELAN), 64)
47052 +
47053 +typedef struct ep4_txd_rail_main
47054 +{
47055 +    E4_uint64          txd_env;
47056 +    E4_uint64          txd_data;
47057 +    E4_uint64          txd_done;
47058 +} EP4_TXD_RAIL_MAIN;
47059 +
47060 +#define EP4_TXD_RAIL_MAIN_SIZE         roundup(sizeof(EP4_TXD_RAIL_MAIN), 8)
47061 +
47062 +#if !defined (__elan4__)
47063 +typedef struct ep4_txd_rail
47064 +{
47065 +    EP_TXD_RAIL                txd_generic;
47066 +
47067 +    struct list_head    txd_retry_link;
47068 +    unsigned long      txd_retry_time;
47069 +
47070 +    EP4_INTCOOKIE      txd_intcookie;
47071 +
47072 +    sdramaddr_t                txd_elan;
47073 +    EP_ADDR            txd_elan_addr;
47074 +    
47075 +    EP4_TXD_RAIL_MAIN  *txd_main;
47076 +    EP_ADDR            txd_main_addr;
47077 +
47078 +    EP4_ECQ           *txd_ecq;
47079 +
47080 +    E4_uint64          txd_cookie;
47081 +} EP4_TXD_RAIL;
47082 +
47083 +#define EP4_NUM_TXD_PER_BLOCK  21
47084 +
47085 +typedef struct ep4_txd_rail_block
47086 +{
47087 +    struct list_head   blk_link;
47088 +    EP4_TXD_RAIL       blk_txds[EP4_NUM_TXD_PER_BLOCK];
47089 +} EP4_TXD_RAIL_BLOCK;
47090 +
47091 +typedef struct ep4_xmtr_rail_main
47092 +{
47093 +    E4_int64           xmtr_flowcnt;
47094 +} EP4_XMTR_RAIL_MAIN;
47095 +
47096 +typedef struct ep4_xmtr_rail_stats
47097 +{
47098 +    unsigned long some_stat;
47099 +} EP4_XMTR_RAIL_STATS;
47100 +
47101 +#define EP4_TXD_LIST_POLL      0
47102 +#define EP4_TXD_LIST_STALLED   1
47103 +#define EP4_TXD_LIST_RETRY     2
47104 +#define EP4_TXD_NUM_LISTS      3
47105 +typedef struct ep4_xmtr_rail
47106 +{
47107 +    EP_XMTR_RAIL       xmtr_generic;
47108 +
47109 +    EP4_XMTR_RAIL_MAIN *xmtr_main;
47110 +    EP_ADDR            xmtr_main_addr;
47111 +
47112 +    struct list_head    xmtr_freelist;
47113 +    unsigned int        xmtr_freecount;
47114 +    unsigned int        xmtr_totalcount;
47115 +    spinlock_t          xmtr_freelock;
47116 +    struct list_head    xmtr_blocklist;
47117 +    unsigned int        xmtr_freewaiting;
47118 +    kcondvar_t         xmtr_freesleep;
47119 +
47120 +    EP4_INTCOOKIE      xmtr_intcookie;                         /* interrupt cookie for "polled" descriptors */
47121 +
47122 +    ELAN4_CQ           *xmtr_cq;
47123 +    E4_int64           xmtr_flowcnt;
47124 +
47125 +    EP4_RETRY_OPS      xmtr_retryops;
47126 +
47127 +    struct list_head    xmtr_retrylist[EP4_TXD_NUM_LISTS];     /* list of txd's to retry envelopes for */
47128 +    struct list_head    xmtr_polllist;                         /* list of txd's to poll for completion */
47129 +    spinlock_t          xmtr_retrylock;
47130 +
47131 +    EP4_XMTR_RAIL_STATS stats;                                  /* elan4 specific xmtr rail stats */
47132 +} EP4_XMTR_RAIL;
47133 +
47134 +#define EP4_XMTR_CQSIZE                CQ_Size64K                              /* size of command queue for xmtr */
47135 +#define EP4_XMTR_FLOWCNT       (CQ_Size(EP4_XMTR_CQSIZE) / 512)        /* # of STEN packets which can fit in */
47136 +
47137 +typedef struct ep4_comms_rail_elan
47138 +{
47139 +    E4_Event32         r_flush_event;
47140 +} EP4_COMMS_RAIL_ELAN;
47141 +
47142 +#define EP4_COMMS_RAIL_ELAN_SIZE       roundup(sizeof (EP4_COMMS_RAIL_ELAN), 32)
47143 +
47144 +typedef struct ep4_comms_rail
47145 +{
47146 +    EP_COMMS_RAIL      r_generic;                              /* generic comms rail */
47147 +    sdramaddr_t                r_descs;                                /* input queue descriptors */
47148 +
47149 +    sdramaddr_t                r_elan;                                 /* elan portion */
47150 +    EP_ADDR            r_elan_addr;
47151 +
47152 +    kmutex_t           r_flush_mutex;                          /* sequentialise flush usage */
47153 +    EP4_INTCOOKIE       r_flush_intcookie;                     /* interrupt cookie to generate */
47154 +
47155 +    kcondvar_t         r_flush_sleep;                          /* place to sleep waiting */
47156 +    spinlock_t         r_flush_lock;                           /*   and spinlock to use */
47157 +
47158 +    unsigned int       r_flush_count;                          /* # setevents issued */
47159 +    EP4_ECQ           *r_flush_ecq;                            /* command queue for interrupt */
47160 +    EP4_ECQ           *r_flush_mcq;                            /* command queeu to issue waitevent */
47161 +
47162 +    EP4_NETERR_OPS      r_neterr_ops;                          /* network error fixup ops */
47163 +} EP4_COMMS_RAIL;
47164 +
47165 +/* epcommsTx_elan4.c */
47166 +extern void           ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
47167 +extern void           ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
47168 +extern void           ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
47169 +
47170 +extern void          ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
47171 +extern void          ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
47172 +
47173 +/* epcommsRx_elan4.c */
47174 +extern void           ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
47175 +extern void           ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
47176 +extern void           ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
47177 +
47178 +extern void          ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
47179 +extern void          ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
47180 +
47181 +/* epcomms_elan4.c */
47182 +extern void           ep4comms_flush_start (EP4_COMMS_RAIL *commsRail);
47183 +extern void           ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail);
47184 +extern void           ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq);
47185 +
47186 +extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r);
47187 +extern void           ep4comms_del_rail (EP_COMMS_RAIL *r);
47188 +extern void          ep4comms_display_rail (EP_COMMS_RAIL *r);
47189 +
47190 +/* epcommsTx_elan4.c */
47191 +extern int            ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
47192 +extern void           ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase);
47193 +extern int            ep4xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
47194 +extern long           ep4xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
47195 +extern void           ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
47196 +extern void           ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
47197 +extern int            ep4xmtr_check_txd_state(EP_TXD *txd);
47198 +
47199 +extern void           ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
47200 +extern void           ep4xmtr_display_txd  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
47201 +
47202 +extern void           ep4xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
47203 +
47204 +/* epcommsRx_elan4.c */
47205 +extern int           ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
47206 +extern void          ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
47207 +extern void          ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
47208 +extern void          ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
47209 +
47210 +extern EP_RXD       *ep4rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail);
47211 +
47212 +extern long          ep4rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
47213 +extern void           ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
47214 +extern void           ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
47215 +
47216 +extern void           ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
47217 +extern void           ep4rcvr_display_rxd  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
47218 +
47219 +extern void           ep4rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
47220 +
47221 +#endif /* !defined(__elan4__) */
47222 +
47223 +/*
47224 + * Local variables:
47225 + * c-file-style: "stroustrup"
47226 + * End:
47227 + */
47228 +#endif /* __EPCOMMS_ELAN4_H */
47229 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan4_thread.c
47230 ===================================================================
47231 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcomms_elan4_thread.c  2004-02-23 16:02:56.000000000 -0500
47232 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcomms_elan4_thread.c       2005-07-28 14:52:52.864676304 -0400
47233 @@ -0,0 +1,346 @@
47234 +/*
47235 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47236 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47237 + *
47238 + *    For licensing information please see the supplied COPYING file
47239 + *
47240 + */
47241 +
47242 +#ident "@(#)$Id: epcomms_elan4_thread.c,v 1.10.8.2 2004/09/28 10:36:51 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
47243 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4_thread.c,v $*/
47244 +
47245 +//#include <qsnet/types.h>
47246 +
47247 +typedef char           int8_t;
47248 +typedef unsigned char  uint8_t;
47249 +typedef short          int16_t;
47250 +typedef unsigned short uint16_t;
47251 +typedef int            int32_t;
47252 +typedef unsigned int   uint32_t;
47253 +typedef long           int64_t;
47254 +typedef unsigned long  uint64_t;
47255 +
47256 +#include <elan/nmh.h>
47257 +#include <elan/kcomm.h>
47258 +#include <elan/epcomms.h>
47259 +
47260 +#include <elan4/registers.h>
47261 +
47262 +#include "kcomm_vp.h"
47263 +#include "kcomm_elan4.h"
47264 +#include "epcomms_elan4.h"
47265 +
47266 +#include <elan4/trtype.h>
47267 +
47268 +/* assembler in epcomms_asm_elan4_thread.S */
47269 +extern void               c_waitevent_interrupt (E4_uint64 *cport, E4_Event32 *event, E4_uint64 count, E4_uint64 intcookie);
47270 +extern EP4_RXD_RAIL_ELAN *c_stall_thread (EP4_RCVR_RAIL_ELAN *rcvrRail);
47271 +
47272 +#define R32_to_R47             "%r32", "%r33", "%r34", "%r35", "%r36", "%r37", "%r38", "%r39", \
47273 +                               "%r40", "%r41", "%r42", "%r43", "%r44", "%r45", "%r46", "%r47"
47274 +#define R48_to_R63             "%r48", "%r49", "%r50", "%r51", "%r52", "%r53", "%r54", "%r55", \
47275 +                               "%r56", "%r57", "%r58", "%r59", "%r60", "%r61", "%r62", "%r63"
47276 +
47277 +/* proto types for code in asm_elan4_thread.S */
47278 +extern void c_waitevent (E4_uint64 *commandport, E4_Addr event, E4_uint64 count);
47279 +extern void c_reschedule(E4_uint64 *commandport);
47280 +
47281 +static inline unsigned long
47282 +c_load_u16(unsigned short *ptr)
47283 +{
47284 +    unsigned long value;
47285 +
47286 +    asm volatile ("ld2         [%1], %%r2\n"
47287 +                 "srl8,byte    %%r2, %1, %0\n"
47288 +                 "sll8         %0, 48, %0\n"
47289 +                 "srl8         %0, 48, %0\n"
47290 +                 : /* outputs */ "=r" (value) 
47291 +                 : /* inputs */ "r" (ptr)
47292 +                 : /* clobbered */ "%r2");
47293 +    return value;
47294 +}
47295 +
47296 +static inline unsigned long
47297 +c_load_u32(unsigned int *ptr)
47298 +{
47299 +    unsigned long value;
47300 +
47301 +    asm volatile ("ld4         [%1], %%r2\n"
47302 +                 "srl8,byte    %%r2, %1, %0\n"
47303 +                 "sll8         %0, 32, %0\n"
47304 +                 "srl8         %0, 32, %0\n"
47305 +                 : /* outputs */ "=r" (value) 
47306 +                 : /* inputs */ "r" (ptr)
47307 +                 : /* clobbered */ "%r2");
47308 +    return value;
47309 +}
47310 +
47311 +static inline void
47312 +c_store_u32(unsigned int *ptr, unsigned long value)
47313 +{
47314 +    asm volatile ("sll8,byte   %0, %1, %%r2\n"
47315 +                 "st4          %%r2, [%1]\n"
47316 +                 : /* no outputs */ 
47317 +                 : /* inputs */ "r" (value), "r" (ptr)
47318 +                 : /* clobbered */ "%r2");
47319 +}
47320 +
47321 +/* Reschedule the current Elan thread to the back of the run queue 
47322 + * if there is another one ready to run */
47323 +static inline void
47324 +c_yield (E4_uint64 *commandport)
47325 +{
47326 +    unsigned long rval;
47327 +
47328 +    asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */);
47329 +
47330 +    if (rval  & ICC_SIGNED_BIT)
47331 +       c_reschedule(commandport);
47332 +}
47333 +
47334 +/* Reschedule the current thread if we're in danger of exceeding the 
47335 + * thread instruction count */
47336 +static inline void
47337 +c_insn_check(E4_uint64 *commandport)
47338 +{
47339 +    unsigned long rval;
47340 +
47341 +    asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */);
47342 +
47343 +    if (rval & ICC_ZERO_BIT)
47344 +       c_reschedule(commandport);
47345 +}
47346 +
47347 +void
47348 +ep4_spinblock (E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm)
47349 +{
47350 +    do {
47351 +       unsigned long val = *sle & 0xfffffffff;
47352 +
47353 +       *slm = val;                                     /* Release my lock */
47354 +       
47355 +       while (*sle >> 32)                              /* Wait until the main */
47356 +           c_yield(cport);                             /* releases the lock */
47357 +       
47358 +       c_store_u32 ((unsigned int *) sle, val + 1);    /* and try and relock */
47359 +    } while (*sle >> 32);
47360 +}
47361 +
47362 +#define RESCHED_AFTER_PKTS     ((CQ_Size(CQ_Size64K) / 128) - 1)
47363 +
47364 +void
47365 +ep4comms_rcvr (EP4_RAIL_ELAN *rail, EP4_RCVR_RAIL_ELAN *rcvrElan, EP4_RCVR_RAIL_MAIN *rcvrMain,
47366 +              E4_InputQueue *inputq, E4_uint64 *cport, E4_uint64 *resched)
47367 +{
47368 +    long count = 1;
47369 +    long fptr  = inputq->q_fptr;
47370 +
47371 +    for (;;)
47372 +    {
47373 +       c_waitevent (cport, inputq->q_event, -count << 5);
47374 +
47375 +       count = 0;
47376 +
47377 +       while (fptr != inputq->q_bptr)
47378 +       {
47379 +           EP_ENVELOPE        *env      = (EP_ENVELOPE *) fptr;
47380 +           unsigned long       nodeid   = c_load_u32 (&env->NodeId);
47381 +           unsigned long       opencmd  = OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(nodeid));
47382 +           unsigned long       vproc    = EP_VP_DATA(rail->r_nodeid);
47383 +           EP_ATTRIBUTE        attr     = c_load_u32 (&env->Attr);
47384 +           unsigned long       txdRail  = c_load_u32 (&env->TxdRail);
47385 +           unsigned long       nFrags   = c_load_u32 (&env->nFrags);
47386 +           E4_uint64           cookie   = rail->r_cookies[nodeid];
47387 +           unsigned long       srcevent = (EP_IS_RPC(attr) ? txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_data) :
47388 +                                           txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done));
47389 +           EP4_RXD_RAIL_ELAN  *rxdElan;
47390 +           EP4_RXD_RAIL_MAIN  *rxdMain;
47391 +           EP_RXD_MAIN        *rxd;
47392 +           EP4_RXD_STEN_CMD   *sten;
47393 +           E4_Event32         *event;
47394 +           unsigned long       first;
47395 +           unsigned long       buffer;
47396 +           unsigned long       len;
47397 +           unsigned long       i;
47398 +
47399 +           EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
47400 +
47401 +           if ((rxdElan = (EP4_RXD_RAIL_ELAN *) rcvrElan->rcvr_pending_head) == 0)
47402 +           {
47403 +               EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
47404 +
47405 +               rxdElan = c_stall_thread (rcvrElan);
47406 +
47407 +               EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
47408 +           }
47409 +           
47410 +           if (c_load_u32 (&env->Version) != EP_ENVELOPE_VERSION)              /* envelope has been cancelled */
47411 +           {
47412 +               EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
47413 +               goto consume_envelope;
47414 +           }
47415 +
47416 +           rxd     = (EP_RXD_MAIN *) rxdElan->rxd_rxd;
47417 +           rxdMain = (EP4_RXD_RAIL_MAIN *) rxdElan->rxd_main;
47418 +           first   = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(attr) ? 1 : 0) + (nFrags == 0 ? 1 : nFrags));
47419 +           sten    = &rxdElan->rxd_sten[first];
47420 +           event   = &rxdElan->rxd_chain[first];
47421 +
47422 +           if (EP_IS_MULTICAST(attr))                          /* need to fetch broadcast bitmap */
47423 +           {
47424 +               sten->c_open          = opencmd;
47425 +               sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
47426 +               sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
47427 +               sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t), DMA_DataTypeWord, 0, EP4_DMA_RETRYCOUNT);
47428 +               sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
47429 +               sten->c_dma_vproc     = vproc;
47430 +               sten->c_dma_srcAddr   = c_load_u32 (&env->TxdMain.nmd_addr) + offsetof(EP_TXD_MAIN, Bitmap);
47431 +               sten->c_dma_dstAddr   = (E4_Addr) &rxd->Bitmap;
47432 +               sten->c_dma_srcEvent  = srcevent;
47433 +               sten->c_dma_dstEvent  = (E4_Addr) event;
47434 +
47435 +               event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS);
47436 +
47437 +               cookie += (EP4_COOKIE_INC << 1);
47438 +
47439 +               sten++; event++;
47440 +           }
47441 +
47442 +           if (nFrags == 0)
47443 +           {
47444 +               /* Generate an empty "get" DMA to accept the envelope and fire the rx handler */
47445 +               sten->c_open          = opencmd;
47446 +               sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
47447 +               sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
47448 +               sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
47449 +               sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
47450 +               sten->c_dma_vproc     = vproc;
47451 +               sten->c_dma_srcEvent  = srcevent;
47452 +               sten->c_dma_dstEvent  = (E4_Addr) event;
47453 +
47454 +               event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS);
47455 +
47456 +               len = 0;
47457 +
47458 +               cookie += (EP4_COOKIE_INC << 1);
47459 +           }
47460 +           else
47461 +           {
47462 +               /* Generate the DMA chain to fetch the data */
47463 +               for (i = 0, buffer = c_load_u32 (&rxdElan->rxd_buffer.nmd_addr), len = 0; i < nFrags; i++)
47464 +               {
47465 +                   unsigned long fragLen = c_load_u32 (&env->Frags[i].nmd_len);
47466 +
47467 +                   sten->c_open          = opencmd;
47468 +                   sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
47469 +                   sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
47470 +                   sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(fragLen, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
47471 +                   sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
47472 +                   sten->c_dma_vproc     = vproc;
47473 +                   sten->c_dma_srcAddr   = c_load_u32 (&env->Frags[i].nmd_addr);
47474 +                   sten->c_dma_dstAddr   = buffer;
47475 +                   sten->c_dma_srcEvent  = srcevent;
47476 +                   sten->c_dma_dstEvent  = (E4_Addr) event;
47477 +                   
47478 +                   event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS);
47479 +                   
47480 +                   buffer += fragLen;
47481 +                   len    += fragLen;
47482 +
47483 +                   cookie += (EP4_COOKIE_INC << 1);
47484 +
47485 +                   sten++; event++;
47486 +               }
47487 +               
47488 +               (--event)->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS);
47489 +
47490 +               if (c_load_u32 (&rxdElan->rxd_buffer.nmd_len) < len)
47491 +               {
47492 +                   /* The receive descriptor was too small for the message */
47493 +                   /* complete the message anyway,  but don't transfer any */
47494 +                   /* data,  we set the length to EP_MSG_TOO_BIG */
47495 +                   for (i = first, sten = &rxdElan->rxd_sten[first]; i <= EP_MAXFRAG; i++, sten++)
47496 +                       sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
47497 +                   
47498 +                   len = EP_MSG_TOO_BIG;
47499 +               }
47500 +           }
47501 +           
47502 +           /* Stuff the first STEN packet into the command queue, there's always enough space, 
47503 +            * since we will insert a waitevent at least once for the queue size */
47504 +           asm volatile ("ld64         [%0], %%r32\n"
47505 +                         "ld64         [%0 + 64], %%r48\n"
47506 +                         "st64         %%r32, [%1]\n"
47507 +                         "st64         %%r48, [%1]\n"
47508 +                         : /* no outputs */
47509 +                         : /* inputs */ "r" (&rxdElan->rxd_sten[first]), "r" (cport)
47510 +                         : /* clobbered */ R32_to_R47, R48_to_R63);
47511 +
47512 +           /* remove the RXD from the pending list */
47513 +           if ((rcvrElan->rcvr_pending_head = rxdElan->rxd_next) == 0)
47514 +               rcvrElan->rcvr_pending_tailp = (E4_Addr)&rcvrElan->rcvr_pending_head;
47515 +
47516 +           /* mark as not queued */
47517 +           rxdElan->rxd_queued = 0;
47518 +
47519 +           /* copy down the envelope */
47520 +           if (EP_HAS_PAYLOAD(attr))
47521 +               asm volatile ("ld64     [%0],    %%r32\n"
47522 +                             "ld64     [%0+64], %%r48\n"
47523 +                             "st64     %%r32, [%1]\n"
47524 +                             "ld64     [%0+128], %%r32\n"
47525 +                             "st64     %%r48, [%1+64]\n"
47526 +                             "ld64     [%0+192], %%r48\n"
47527 +                             "st64     %%r32, [%1 + 128]\n"
47528 +                             "st64     %%r48, [%1 + 192]\n"
47529 +                             : /* no outputs */
47530 +                             : /* inputs */    "r" (env), "r" (&rxd->Envelope)
47531 +                             : /* clobbered */ R32_to_R47, R48_to_R63);
47532
47533 +           else
47534 +               asm volatile ("ld64     [%0],    %%r32\n"
47535 +                             "ld64     [%0+64], %%r48\n"
47536 +                             "st64     %%r32, [%1]\n"
47537 +                             "st64     %%r48, [%1+64]\n"
47538 +                             : /* no outputs */
47539 +                             : /* inputs */    "r" (env), "r" (&rxd->Envelope)
47540 +                             : /* clobbered */ R32_to_R47, R48_to_R63);
47541 +
47542 +           /* Store the message length to indicate that I've finished */
47543 +           c_store_u32 (&rxd->Len, len);
47544 +           
47545 +           /* Finally update the network error cookie */
47546 +           rail->r_cookies[nodeid] = cookie;
47547 +
47548 +           EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
47549 +
47550 +       consume_envelope:
47551 +           if (fptr != rcvrElan->rcvr_qlast)
47552 +               fptr += EP_INPUTQ_SIZE;
47553 +           else
47554 +               fptr = rcvrElan->rcvr_qbase;
47555 +
47556 +           if (! rcvrElan->rcvr_stall_intcookie)
47557 +               inputq->q_fptr = fptr;
47558 +
47559 +           if (++count >= RESCHED_AFTER_PKTS)
47560 +               break;
47561 +
47562 +           c_insn_check (cport);
47563 +       }
47564 +       
47565 +       if (rcvrElan->rcvr_stall_intcookie)
47566 +       {
47567 +           c_waitevent_interrupt (cport, &rcvrElan->rcvr_thread_halt, -(1 << 5), rcvrElan->rcvr_stall_intcookie);
47568 +           inputq->q_fptr = fptr;
47569 +
47570 +           count++;                                            /* one extra as we were given an extra set to wake us up */
47571 +       }
47572 +    }
47573 +}
47574 +
47575 +/*
47576 + * Local variables:
47577 + * c-file-style: "stroustrup"
47578 + * End:
47579 + */
47580 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsFwd.c
47581 ===================================================================
47582 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsFwd.c    2004-02-23 16:02:56.000000000 -0500
47583 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsFwd.c 2005-07-28 14:52:52.864676304 -0400
47584 @@ -0,0 +1,310 @@
47585 +/*
47586 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47587 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47588 + *
47589 + *    For licensing information please see the supplied COPYING file
47590 + *
47591 + */
47592 +
47593 +#ident "@(#)$Id: epcommsFwd.c,v 1.12 2004/08/16 12:21:15 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
47594 +/*      $Source: /cvs/master/quadrics/epmod/epcommsFwd.c,v $*/
47595 +
47596 +#include <qsnet/kernel.h>
47597 +
47598 +#include <elan/kcomm.h>
47599 +#include <elan/epsvc.h>
47600 +#include <elan/epcomms.h>
47601 +
47602 +#include "debug.h"
47603 +
47604 +unsigned int epcomms_forward_limit = 8;
47605 +
47606 +static void
47607 +GenerateTree (unsigned nodeId, unsigned lowId, unsigned  highId, bitmap_t *bitmap, 
47608 +             unsigned *parentp, unsigned *childrenp, int *nchildrenp)
47609 +{
47610 +    int i;
47611 +    int count;
47612 +    int branch;
47613 +    int nSub;
47614 +    int branchIndex;
47615 +    int parent;
47616 +    int nBranch;
47617 +    int rem;
47618 +    int self;
47619 +    int branchRatio;
47620 +    int node;
47621 +    int x, y, z;
47622 +
47623 +
47624 +#ifdef DEBUG_PRINTF
47625 +    {
47626 +#define OVERFLOW "...]"
47627 +#define LINESZ  128
47628 +       char space[LINESZ+1];
47629 +
47630 +       if (ep_sprintf_bitmap (space, LINESZ-strlen(OVERFLOW), bitmap, 0, 0, (highId - lowId)+1) != -1)
47631 +           strcat (space, OVERFLOW);
47632 +
47633 +       EPRINTF3 (DBG_FORWARD, "GenerateTree; elan node low=%d node high=%d bitmap=%s\n", lowId, highId, space);
47634 +#undef OVERFLOW
47635 +#undef LINESZ
47636 +    }
47637 +#endif
47638 +
47639 +    /* Count the number of nodes in the partition */
47640 +    /* and work out which one I am */
47641 +    for (count = 0, self = ELAN_INVALID_NODE, i = lowId; i <= highId; i++)
47642 +    {
47643 +       if (BT_TEST (bitmap, i-lowId))
47644 +       {
47645 +           if (i == nodeId)
47646 +               self = count;
47647 +           count++;
47648 +       }
47649 +    }
47650 +
47651 +    EPRINTF2 (DBG_FORWARD, "GenerateTree: count=%d self=%d\n", count, self);
47652 +
47653 +    if (count == 0 || self == ELAN_INVALID_NODE)
47654 +    {
47655 +       *parentp    = ELAN_INVALID_NODE;
47656 +       *nchildrenp = 0;
47657 +       return;
47658 +    }
47659 +
47660 +    /* search for position in tree */
47661 +    branchRatio = EP_TREE_ARITY;               /* branching ratio */
47662 +    branch      = 0;                           /* start with process 0 */
47663 +    nSub        = count;                       /* and whole tree */
47664 +    branchIndex = -1;                          /* my branch # in parent */
47665 +    parent      = -1;                          /* my parent's group index # */
47666 +    
47667 +    while (branch != self)                     /* descend process tree */
47668 +    {                                          /* until I find myself */
47669 +       parent = branch;
47670 +       branch++;                               /* parent + 1 = first born */
47671 +       nSub--;                                 /* set # descendents */
47672 +       
47673 +       rem  = nSub % branchRatio;
47674 +       nSub = nSub / branchRatio + 1;
47675 +       x = rem * nSub;
47676 +       y = self - branch;
47677 +       
47678 +       if (y < x)                              /* my first 'rem' branches have */
47679 +       {                                       /* 1 more descendent... */
47680 +           branchIndex = y / nSub;
47681 +           branch += branchIndex * nSub;
47682 +       }
47683 +       else                                    /* than the rest of my branches */
47684 +       {
47685 +           nSub--;
47686 +           z = (y - x) / nSub;
47687 +           branchIndex = rem + z;
47688 +           branch += x + z * nSub;
47689 +       }
47690 +    }
47691 +
47692 +    branch++;                                  /* my first born */
47693 +    nSub--;                                    /* total # of my descendents */
47694 +    /* leaves + their parents may have # children < branchRatio */
47695 +    nBranch = (nSub < branchRatio) ? nSub : branchRatio;       
47696 +
47697 +    EPRINTF2 (DBG_FORWARD, "GenerateTree: parent=%d nBranch=%d\n", parent, nBranch);
47698 +
47699 +    /* Now calculate the real elan id's of the parent and my children */
47700 +    if (parent == -1)
47701 +       *parentp = ELAN_INVALID_NODE;
47702 +    else
47703 +    {
47704 +       for (i = lowId, node = 0; i <= highId; i++)
47705 +       {
47706 +           if (BT_TEST(bitmap, i-lowId))
47707 +               if (node++ == parent)
47708 +                   break;
47709 +       }
47710 +       *parentp = i;
47711 +    }
47712 +
47713 +    for (i = lowId, branchIndex = 0, node = 0; branchIndex < nBranch && i <= highId; i++)
47714 +    {
47715 +       if (BT_TEST(bitmap, i-lowId))
47716 +       {
47717 +           if (node == branch)
47718 +           {
47719 +               branch = branch + nSub / branchRatio + ((branchIndex < (nSub % branchRatio)) ? 1 : 0);
47720 +
47721 +               childrenp[branchIndex++] = i;
47722 +           }
47723 +           node++;
47724 +       }
47725 +    }
47726 +
47727 +    *nchildrenp = branchIndex;
47728 +}
47729 +
47730 +static void
47731 +ForwardTxDone (EP_TXD *txd, void *arg, EP_STATUS status)
47732 +{
47733 +    EP_FWD_DESC     *desc   = (EP_FWD_DESC *) arg;
47734 +    EP_RXD          *rxd    = desc->Rxd;
47735 +    EP_COMMS_SUBSYS *subsys = rxd->Rcvr->Subsys;
47736 +    unsigned long    flags;
47737 +
47738 +    /* XXXX: if transmit fails, could step to next node in this subtree ? */
47739 +
47740 +    spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47741 +
47742 +    if (--desc->NumChildren > 0)
47743 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47744 +    else
47745 +    {
47746 +       rxd->Rcvr->ForwardRxdCount--;
47747 +
47748 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47749 +
47750 +       KMEM_FREE (desc, sizeof (EP_FWD_DESC));
47751 +
47752 +       rxd->Handler (rxd);
47753 +    }
47754 +}
47755 +
47756 +long
47757 +ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime)
47758 +{
47759 +    unsigned long flags;
47760 +    int i, res;
47761 +
47762 +    spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47763 +    while (! list_empty (&subsys->ForwardDescList)) 
47764 +    {
47765 +       EP_RXD      *rxd     = (EP_RXD *) list_entry (subsys->ForwardDescList.next, EP_RXD, Link);
47766 +       EP_RXD_MAIN *rxdMain = rxd->RxdMain;
47767 +       EP_ENVELOPE *env     = &rxdMain->Envelope;
47768 +       EP_FWD_DESC *desc;
47769 +
47770 +       EPRINTF2 (DBG_FORWARD, "ep: forwarding rxd %p to range %x\n", rxd, env->Range);
47771 +
47772 +       list_del (&rxd->Link);
47773 +
47774 +       rxd->Rcvr->ForwardRxdCount++;
47775 +
47776 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47777 +
47778 +       KMEM_ALLOC (desc, EP_FWD_DESC *, sizeof (EP_FWD_DESC), 1);
47779 +
47780 +       if (desc == NULL)
47781 +       {
47782 +           spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47783 +           rxd->Rcvr->ForwardRxdCount--;
47784 +           spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47785 +
47786 +           rxd->Handler (rxd);
47787 +       }
47788 +       else
47789 +       {
47790 +           /* compute the spanning tree for this message */
47791 +           unsigned int destLo = EP_RANGE_LOW (env->Range);
47792 +           unsigned int destHi = EP_RANGE_HIGH (env->Range);
47793 +           unsigned int parent;
47794 +
47795 +           GenerateTree (subsys->Subsys.Sys->Position.pos_nodeid, destLo, destHi, rxdMain->Bitmap, &parent, desc->Children, &desc->NumChildren);
47796 +           
47797 +           if (desc->NumChildren == 0 || (epcomms_forward_limit && (rxd->Rcvr->ForwardRxdCount >= epcomms_forward_limit)))
47798 +           {
47799 +               EPRINTF5 (DBG_FORWARD, "ep; don't forward rxd %p to /%d (%d children/ %d forwarding (%d))\n",
47800 +                         rxd, rxd->Rcvr->Service, desc->NumChildren, rxd->Rcvr->ForwardRxdCount, epcomms_forward_limit);
47801 +
47802 +               spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47803 +               rxd->Rcvr->ForwardRxdCount--;
47804 +               spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47805 +
47806 +               KMEM_FREE (desc, sizeof (EP_FWD_DESC));
47807 +               
47808 +               rxd->Handler (rxd);
47809 +           }
47810 +           else
47811 +           {
47812 +               ep_nmd_subset (&desc->Data, &rxd->Data, 0, ep_rxd_len (rxd));
47813 +               desc->Rxd = rxd;
47814 +
47815 +               /* NOTE - cannot access 'desc' after last call to multicast, since it could complete
47816 +                *        and free the desc before we access it again.  Hence the reverse loop. */
47817 +               for (i = desc->NumChildren-1; i >= 0; i--)
47818 +               {
47819 +                   ASSERT (desc->Children[i] < subsys->Subsys.Sys->Position.pos_nodes);
47820 +
47821 +                   EPRINTF3 (DBG_FORWARD, "ep: forwarding rxd %p to node %d/%d\n", rxd, desc->Children[i], rxd->Rcvr->Service);
47822 +
47823 +                   if ((res = ep_multicast_forward (subsys->ForwardXmtr, desc->Children[i], rxd->Rcvr->Service, 0, 
47824 +                                                    ForwardTxDone, desc, env, EP_HAS_PAYLOAD(env->Attr) ? &rxdMain->Payload : NULL,  
47825 +                                                    rxdMain->Bitmap, &desc->Data, 1)) != EP_SUCCESS)
47826 +                   {
47827 +                       ep_debugf (DBG_FORWARD, "ep: ep_multicast_forward failed\n");
47828 +                       ForwardTxDone (NULL, desc, res);
47829 +                   }
47830 +               }
47831 +               
47832 +           }
47833 +       }
47834 +
47835 +       spin_lock_irqsave (&subsys->ForwardDescLock, flags);
47836 +    }
47837 +    spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
47838 +
47839 +    return (nextRunTime);
47840 +}
47841 +
47842 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47843 +void
47844 +ep_csum_rxds (EP_COMMS_SUBSYS *subsys)
47845 +{
47846 +    unsigned long flags;
47847
47848 +    spin_lock_irqsave (&subsys->CheckSumDescLock, flags);
47849 +    while (! list_empty (&subsys->CheckSumDescList)) 
47850 +    {
47851 +       EP_RXD      *rxd = (EP_RXD *) list_entry (subsys->CheckSumDescList.next, EP_RXD, CheckSumLink);
47852 +       EP_ENVELOPE *env = &rxd->RxdMain->Envelope;
47853 +
47854 +       list_del_init (&rxd->CheckSumLink);
47855 +       spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags);
47856 +
47857 +       if (env->CheckSum) {
47858 +           EP_NMD nmd;
47859 +           uint32_t csum; 
47860 +
47861 +           ep_nmd_subset ( &nmd, &rxd->Data, 0, ep_rxd_len (rxd));
47862 +
47863 +           csum = ep_calc_check_sum(subsys->Subsys.Sys, env, &nmd, 1);
47864 +           if ( env->CheckSum  != csum ) {
47865 +               int f;
47866 +       
47867 +
47868 +               printk("Check Sum Error: env(0x%x,0x%x) data(0x%x,0x%x)\n", ((csum >> 16) & 0x7FFF), ((env->CheckSum >> 16) & 0x7FFF), 
47869 +                      (csum & 0xFFFF),  (env->CheckSum & 0xFFFF));
47870 +               printk("Check Sum Error: Sent : NodeId %u Range 0x%x Service %u Version 0x%x Attr 0x%x\n", env->NodeId, env->Range, rxd->Rcvr->Service, env->Version, env->Attr);
47871 +               printk("Check Sum Error: Sent : Xid Generation 0x%x Handle 0x%x Unique 0x%llx\n", env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);
47872 +               printk("Check Sum Error: Sent : TxdRail 0x%x TxdMain nmd_addr 0x%x  nmd_len %u  nmd_attr 0x%x\n",  env->TxdRail, env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr ); 
47873 +               printk("Check Sum Error: Sent : nFrags %d \n", env->nFrags);
47874 +               for(f=0;f<env->nFrags;f++)
47875 +                   printk("Check Sum Error: Sent (%d): nmd_addr 0x%x   nmd_len %u   nmd_attr 0x%x\n", f,
47876 +                          env->Frags[f].nmd_addr, env->Frags[f].nmd_len, env->Frags[f].nmd_attr);
47877 +               printk("Check Sum Error: Recv : nmd_addr 0x%x   nmd_len %u   nmd_attr 0x%x\n",
47878 +                      nmd.nmd_addr, nmd.nmd_len, nmd.nmd_attr);
47879 +
47880 +           }
47881 +       }
47882 +       ep_rxd_received_now(rxd);
47883 +
47884 +       spin_lock_irqsave (&subsys->CheckSumDescLock, flags);
47885 +    }
47886 +    spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags);
47887 +}
47888 +#endif
47889 +
47890 +/*
47891 + * Local variables:
47892 + * c-file-style: "stroustrup"
47893 + * End:
47894 + */
47895 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsRx.c
47896 ===================================================================
47897 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsRx.c     2004-02-23 16:02:56.000000000 -0500
47898 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsRx.c  2005-07-28 14:52:52.866676000 -0400
47899 @@ -0,0 +1,1205 @@
47900 +/*
47901 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47902 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47903 + *
47904 + *    For licensing information please see the supplied COPYING file
47905 + *
47906 + */
47907 +
47908 +#ident "@(#)$Id: epcommsRx.c,v 1.27.2.5 2004/11/30 12:02:16 mike Exp $ $Name: QSNETMODULES-4-31_20050321 $"
47909 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx.c,v $*/
47910 +
47911 +#include <qsnet/kernel.h>
47912 +#include <elan/kcomm.h>
47913 +#include <elan/epsvc.h>
47914 +#include <elan/epcomms.h>
47915 +
47916 +#include "debug.h"
47917 +
47918 +unsigned int ep_rxd_lowat = 5;
47919 +
47920 +static int
47921 +AllocateRxdBlock (EP_RCVR *rcvr, EP_ATTRIBUTE attr, EP_RXD **rxdp)
47922 +{
47923 +    EP_RXD_BLOCK *blk;
47924 +    EP_RXD       *rxd;
47925 +    EP_RXD_MAIN  *pRxdMain;
47926 +    int                  i;
47927 +    unsigned long flags;
47928 +
47929 +    KMEM_ZALLOC (blk, EP_RXD_BLOCK *, sizeof (EP_RXD_BLOCK), ! (attr & EP_NO_SLEEP));
47930 +
47931 +    if (blk == NULL)
47932 +       return (ENOMEM);
47933 +
47934 +    if ((pRxdMain = ep_shared_alloc_main (rcvr->Subsys->Subsys.Sys, EP_RXD_MAIN_SIZE * EP_NUM_RXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0)
47935 +    {
47936 +       KMEM_FREE (blk, sizeof (EP_RXD_BLOCK));
47937 +       return (ENOMEM);
47938 +    }
47939 +    
47940 +    for (rxd = &blk->Rxd[0], i = 0; i < EP_NUM_RXD_PER_BLOCK; i++, rxd++)
47941 +    {
47942 +       rxd->Rcvr        = rcvr;
47943 +       rxd->RxdMain     = pRxdMain;
47944 +
47945 +       ep_nmd_subset (&rxd->NmdMain, &blk->NmdMain, (i * EP_RXD_MAIN_SIZE), EP_RXD_MAIN_SIZE);
47946 +
47947 +       /* move onto next descriptor */
47948 +       pRxdMain = (EP_RXD_MAIN *) ((unsigned long) pRxdMain + EP_RXD_MAIN_SIZE);
47949 +    }
47950 +
47951 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47952 +
47953 +    list_add  (&blk->Link, &rcvr->DescBlockList);
47954 +
47955 +    rcvr->TotalDescCount += EP_NUM_RXD_PER_BLOCK;
47956 +
47957 +    for (i = rxdp ? 1 : 0; i < EP_NUM_RXD_PER_BLOCK; i++)
47958 +    {
47959 +       
47960 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47961 +       INIT_LIST_HEAD (&blk->Rxd[i].CheckSumLink);
47962 +#endif
47963 +
47964 +       list_add (&blk->Rxd[i].Link, &rcvr->FreeDescList);
47965 +       
47966 +       rcvr->FreeDescCount++;
47967 +
47968 +       if (rcvr->FreeDescWanted)
47969 +       {
47970 +           rcvr->FreeDescWanted--;
47971 +           kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock);
47972 +       }
47973 +    }
47974 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
47975 +    
47976 +    if (rxdp)
47977 +    {
47978 +
47979 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47980 +       INIT_LIST_HEAD (&blk->Rxd[0].CheckSumLink);
47981 +#endif
47982 +              
47983 +       *rxdp = &blk->Rxd[0];
47984 +    }
47985 +    return (ESUCCESS);
47986 +}
47987 +
47988 +static void
47989 +FreeRxdBlock (EP_RCVR *rcvr, EP_RXD_BLOCK *blk)
47990 +{
47991 +    unsigned long flags;
47992 +
47993 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
47994 +
47995 +    list_del (&blk->Link);
47996 +
47997 +    rcvr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK;
47998 +    rcvr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK;
47999 +
48000 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48001 +
48002 +    ep_shared_free_main (rcvr->Subsys->Subsys.Sys, &blk->NmdMain);
48003 +    KMEM_FREE (blk, sizeof (EP_RXD_BLOCK));
48004 +}
48005 +
48006 +static EP_RXD *
48007 +GetRxd (EP_RCVR *rcvr, EP_ATTRIBUTE attr)
48008 +{
48009 +    EP_RXD *rxd;
48010 +    unsigned long flags;
48011 +    int low_on_rxds;
48012 +
48013 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
48014 +
48015 +    while (list_empty (&rcvr->FreeDescList))
48016 +    {
48017 +       if (! (attr & EP_NO_ALLOC))
48018 +       {
48019 +           spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48020 +
48021 +           if (AllocateRxdBlock (rcvr, attr, &rxd) == ESUCCESS)
48022 +               return (rxd);
48023 +
48024 +           spin_lock_irqsave (&rcvr->FreeDescLock, flags);
48025 +       }
48026 +
48027 +       if (attr & EP_NO_SLEEP)
48028 +       {
48029 +           IncrStat (rcvr->Subsys, NoFreeRxds);
48030 +           spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48031 +
48032 +           ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48033 +           return (NULL);
48034 +       }
48035 +
48036 +       rcvr->FreeDescWanted++;
48037 +       kcondvar_wait (&rcvr->FreeDescSleep, &rcvr->FreeDescLock, &flags);
48038 +    }
48039 +
48040 +    rxd = list_entry (rcvr->FreeDescList.next, EP_RXD, Link);
48041 +
48042 +    list_del (&rxd->Link);
48043 +
48044 +    /* Wakeup the descriptor primer thread if there's not many left */
48045 +    low_on_rxds = (--rcvr->FreeDescCount < ep_rxd_lowat);
48046 +
48047 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48048 +
48049 +    if (low_on_rxds)
48050 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48051 +
48052 +    return (rxd);
48053 +}
48054 +
48055 +static void
48056 +FreeRxd (EP_RCVR *rcvr, EP_RXD *rxd)
48057 +{
48058 +    unsigned long flags;
48059 +
48060 +    ASSERT (EP_XID_INVALID(rxd->MsgXid));
48061 +
48062 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
48063 +
48064 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
48065 +    ASSERT(list_empty(&rxd->CheckSumLink));
48066 +#endif
48067 +   
48068 +    list_add (&rxd->Link, &rcvr->FreeDescList);
48069 +
48070 +    rcvr->FreeDescCount++;
48071 +
48072 +    if (rcvr->FreeDescWanted)                                  /* someone waiting for a receive */
48073 +    {                                                          /* descriptor, so wake them up */
48074 +       rcvr->FreeDescWanted--;
48075 +       kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock);
48076 +    }
48077 +    
48078 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48079 +}
48080 +
48081 +int
48082 +ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr)
48083 +{
48084 +    EP_RCVR_RAIL *rcvrRail;
48085 +    EP_RXD       *rxd;
48086 +    int           rnum;
48087 +    unsigned long flags;
48088 +
48089 +    if ((rxd = GetRxd (rcvr, attr)) == NULL)
48090 +       return (ENOMEM);
48091 +
48092 +    rxd->Handler      = handler;
48093 +    rxd->Arg          = arg;
48094 +    rxd->Data         = *nmd;
48095 +    rxd->RxdMain->Len = EP_RXD_PENDING;
48096 +    
48097 +    spin_lock_irqsave (&rcvr->Lock, flags);
48098 +
48099 +    list_add_tail (&rxd->Link, &rcvr->ActiveDescList);
48100 +    
48101 +    if (EP_IS_PREFRAIL_SET(attr))
48102 +       rnum = EP_ATTR2PREFRAIL(attr);
48103 +    else 
48104 +       rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd));
48105 +
48106 +    if (rnum < 0 || !(EP_NMD_RAILMASK(nmd) & EP_RAIL2RAILMASK(rnum) & rcvr->RailMask))
48107 +       rcvrRail = NULL;
48108 +    else
48109 +       rcvrRail = rcvr->Rails[rnum];
48110 +
48111 +    EPRINTF7 (DBG_RCVR,"ep_queue_receive: rxd=%p svc %d nmd=%08x,%d,%x rnum=%d rcvrRail=%p\n",
48112 +             rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, rnum, rcvrRail);
48113 +
48114 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
48115 +
48116 +    if (rcvrRail == NULL || !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail))
48117 +    {
48118 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
48119 +
48120 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48121 +    }
48122 +
48123 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48124 +
48125 +    return (ESUCCESS);
48126 +}
48127 +
48128 +void
48129 +ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr)
48130 +{
48131 +    EP_RCVR      *rcvr = rxd->Rcvr;
48132 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
48133 +    int           rnum = ep_pickRail(EP_NMD_RAILMASK(&rxd->Data));
48134 +    EP_RCVR_RAIL *rcvrRail;
48135 +    unsigned long flags;
48136 +
48137 +    ASSERT (rxd->RxdRail == NULL);
48138 +
48139 +    EPRINTF5 (DBG_RCVR,"ep_requeue_receive: rxd=%p svc %d nmd=%08x,%d,%x\n", 
48140 +             rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
48141 +
48142 +    rxd->Handler      = handler;
48143 +    rxd->Arg          = arg;
48144 +    rxd->Data         = *nmd;
48145 +    rxd->RxdMain->Len = EP_RXD_PENDING;
48146 +    
48147 +    spin_lock_irqsave (&rcvr->Lock, flags);
48148 +    
48149 +    list_add_tail (&rxd->Link, &rcvr->ActiveDescList);
48150 +
48151 +    /*
48152 +     * Rail selection: if they've asked for a particular rail, then use it, otherwise if
48153 +     *                 the rail it was last received on is mapped for the nmd and is available
48154 +     *                 then use that one, otherwise pick one that is mapped by the nmd.
48155 +     */
48156 +    if (EP_IS_PREFRAIL_SET(attr))
48157 +       rnum = EP_ATTR2PREFRAIL(attr);
48158 +    
48159 +    if (rnum < 0 || ! (EP_RAIL2RAILMASK (rnum) & EP_NMD_RAILMASK(nmd) & ep_rcvr_availrails (rcvr)))
48160 +       rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd));
48161 +
48162 +    if (rnum < 0)
48163 +       rcvrRail = NULL;
48164 +    else
48165 +    {
48166 +       rcvrRail = rcvr->Rails[rnum];
48167 +
48168 +       if (! (EP_NMD_RAILMASK(&rxd->Data) & EP_RAIL2RAILMASK(rnum)) && ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) < 0)
48169 +           rcvrRail = NULL;
48170 +    }
48171 +
48172 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
48173 +
48174 +    if (rcvrRail == NULL || !EP_RCVR_OP(rcvrRail, QueueRxd) (rxd, rcvrRail))
48175 +    {
48176 +       EPRINTF1 (DBG_RCVR, "ep_requeue_receive: rcvrRail=%p - setting unbound\n", rcvrRail);
48177 +
48178 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
48179 +
48180 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48181 +    }
48182 +
48183 +    if (rcvr->CleanupWaiting)
48184 +       kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock);
48185 +    rcvr->CleanupWaiting = 0;
48186 +
48187 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48188 +}
48189 +
48190 +void
48191 +
48192 +ep_complete_receive (EP_RXD *rxd)
48193 +{
48194 +    EP_RCVR *rcvr = rxd->Rcvr;
48195 +    unsigned long flags;
48196 +
48197 +    ASSERT (rxd->RxdRail == NULL && rxd->State == EP_RXD_COMPLETED);
48198 +
48199 +    FreeRxd (rcvr, rxd);
48200 +
48201 +    /* if we're waiting for cleanup, then wake them up */
48202 +    spin_lock_irqsave (&rcvr->Lock, flags);
48203 +    if (rcvr->CleanupWaiting)
48204 +       kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock);
48205 +    rcvr->CleanupWaiting = 0;
48206 +    spin_unlock_irqrestore (&rcvr->Lock, flags);   
48207 +}
48208 +
48209 +int
48210 +ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *local, EP_NMD *remote, int nFrags)
48211 +{
48212 +    EP_RCVR      *rcvr = rxd->Rcvr;
48213 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
48214 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
48215 +    unsigned long flags;
48216 +
48217 +    spin_lock_irqsave (&rcvr->Lock, flags);
48218 +    
48219 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
48220 +    {
48221 +       EPRINTF2 (DBG_RCVR, "ep_rpc_put: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
48222 +       
48223 +       /* rxd no longer on active list - just free it */
48224 +       /* off and return an error */
48225 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
48226 +       
48227 +       return EP_CONN_RESET;
48228 +    }
48229 +    else
48230 +    {
48231 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
48232 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
48233 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
48234 +       EP_RAIL          *rail      = commsRail->Rail;
48235 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
48236 +       int               i;
48237 +       
48238 +       /* Attempt to ensure that the local nmds are mapped */
48239 +       for (i = 0; i < nFrags; i++)
48240 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
48241 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
48242 +    
48243 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
48244 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
48245 +       {
48246 +           rxd->State = EP_RXD_PUT_ACTIVE;
48247 +
48248 +           EP_RCVR_OP(rcvrRail, RpcPut) (rxd, local, remote, nFrags);
48249 +       }
48250 +       else
48251 +       {
48252 +           /* RPC completion cannot progress - either node is no longer connected on this 
48253 +            * rail or some of the source/destination NMDs are not mapped on this rail.
48254 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
48255 +           EPRINTF4 (DBG_RCVR, "%s: ep_rpc_put: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd,
48256 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
48257 +
48258 +           rxd->State = EP_RXD_PUT_STALLED;
48259 +
48260 +           if (nodeRail->State == EP_NODE_CONNECTED)
48261 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48262 +       }
48263 +
48264 +       /* install the handler */
48265 +       rxd->Handler = handler;
48266 +       rxd->Arg     = arg;
48267 +       
48268 +       /* store the arguements */
48269 +       rxd->nFrags = nFrags;
48270 +       for (i = 0; i < nFrags; i++)
48271 +       {
48272 +           rxd->Local[i]  = local[i];
48273 +           rxd->Remote[i] = remote[i];
48274 +       }
48275 +    }
48276 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48277 +
48278 +    return EP_SUCCESS;
48279 +}
48280 +
48281 +int
48282 +ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *remote, EP_NMD *local, int nFrags)
48283 +{
48284 +    EP_RCVR      *rcvr = rxd->Rcvr;
48285 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
48286 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
48287 +    unsigned long flags;
48288 +
48289 +    spin_lock_irqsave (&rcvr->Lock, flags);
48290 +    
48291 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
48292 +    {
48293 +       EPRINTF2 (DBG_RCVR, "ep_rpc_get: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
48294 +       
48295 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
48296 +
48297 +       return EP_CONN_RESET;
48298 +    }
48299 +    else
48300 +    {
48301 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
48302 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
48303 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
48304 +       EP_RAIL          *rail      = commsRail->Rail;
48305 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
48306 +       int               i;
48307 +       
48308 +       /* Attempt to ensure that the local nmds are mapped */
48309 +       for (i = 0; i < nFrags; i++)
48310 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
48311 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
48312 +
48313 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
48314 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
48315 +       {
48316 +           rxd->State = EP_RXD_GET_ACTIVE;
48317 +
48318 +           EP_RCVR_OP (rcvrRail, RpcGet) (rxd, local, remote, nFrags);
48319 +       }
48320 +       else
48321 +       {
48322 +           /* RPC completion cannot progress - either node is no longer connected on this 
48323 +            * node or some of the source/destination NMDs are not mapped on this rail.
48324 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
48325 +           EPRINTF4 (DBG_RCVR, "%s: ep_rpc_get: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, 
48326 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
48327 +           
48328 +           rxd->State = EP_RXD_GET_STALLED;
48329 +
48330 +           if (nodeRail->State == EP_NODE_CONNECTED)
48331 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48332 +       }
48333 +
48334 +       /* install the handler */
48335 +       rxd->Handler = handler;
48336 +       rxd->Arg     = arg;
48337 +       
48338 +       /* store the arguements */
48339 +       rxd->nFrags = nFrags;
48340 +       for (i = 0; i < nFrags; i++)
48341 +       {
48342 +           rxd->Local[i]  = local[i];
48343 +           rxd->Remote[i] = remote[i];
48344 +       }
48345 +    }
48346 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48347 +    
48348 +    return EP_SUCCESS;
48349 +}
48350 +
48351 +int
48352 +ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, EP_NMD *local, EP_NMD *remote, int nFrags)
48353 +{
48354 +    EP_RCVR      *rcvr = rxd->Rcvr;
48355 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
48356 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
48357 +    unsigned long flags;
48358 +
48359 +    spin_lock_irqsave (&rcvr->Lock, flags);
48360 +
48361 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
48362 +    {
48363 +       EPRINTF2 (DBG_RCVR, "ep_complete_rpc: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
48364 +       
48365 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
48366 +       return EP_CONN_RESET;
48367 +    }
48368 +    else
48369 +    {
48370 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
48371 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
48372 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
48373 +       EP_RAIL          *rail      = commsRail->Rail;
48374 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
48375 +       int               i;
48376 +
48377 +       if (blk == NULL)
48378 +           bzero (&rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK));
48379 +       else
48380 +           bcopy (blk, &rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK));
48381 +
48382 +       /* Attempt to ensure that the local nmds are mapped */
48383 +       for (i = 0; i < nFrags; i++)
48384 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
48385 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
48386 +
48387 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
48388 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
48389 +       {
48390 +           rxd->State = EP_RXD_COMPLETE_ACTIVE;
48391 +
48392 +           EP_RCVR_OP (rcvrRail, RpcComplete) (rxd, local, remote, nFrags);
48393 +       }
48394 +       else
48395 +       {
48396 +           /* RPC completion cannot progress - either node is no longer connected on this 
48397 +            * node or some of the source/destination NMDs are not mapped on this rail.
48398 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
48399 +           EPRINTF4 (DBG_RCVR, "%s: ep_complete_rpc: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, 
48400 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
48401 +
48402 +           rxd->State = EP_RXD_COMPLETE_STALLED;
48403 +
48404 +           if (nodeRail->State == EP_NODE_CONNECTED)
48405 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
48406 +       }
48407 +
48408 +       /* install the handler */
48409 +       rxd->Handler = handler;
48410 +       rxd->Arg     = arg;
48411 +       
48412 +       /* store the arguements */
48413 +       rxd->nFrags = nFrags;
48414 +       for (i = 0; i < nFrags; i++)
48415 +       {
48416 +           rxd->Local[i]  = local[i];
48417 +           rxd->Remote[i] = remote[i];
48418 +       }
48419 +    }
48420 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48421 +       
48422 +    return (ESUCCESS);
48423 +}
48424 +
48425 +/* functions for accessing fields of rxds */
48426 +void            *ep_rxd_arg(EP_RXD *rxd)               { return (rxd->Arg); }
48427 +int              ep_rxd_len(EP_RXD *rxd)               { return (rxd->RxdMain->Len); }
48428 +EP_STATUS       ep_rxd_status(EP_RXD *rxd)             { return (rxd->RxdMain->Len < 0 ? rxd->RxdMain->Len : EP_SUCCESS); }
48429 +int              ep_rxd_isrpc(EP_RXD *rxd)             { return (EP_IS_RPC(rxd->RxdMain->Envelope.Attr) != 0); }
48430 +EP_ENVELOPE     *ep_rxd_envelope(EP_RXD *rxd)          { return (&rxd->RxdMain->Envelope); }
48431 +EP_PAYLOAD      *ep_rxd_payload(EP_RXD *rxd)           { return (EP_HAS_PAYLOAD(rxd->RxdMain->Envelope.Attr) ? &rxd->RxdMain->Payload : NULL); }
48432 +int              ep_rxd_node(EP_RXD *rxd)              { return (rxd->RxdMain->Envelope.NodeId); }
48433 +EP_STATUSBLK    *ep_rxd_statusblk(EP_RXD *rxd)                 { return (&rxd->RxdMain->StatusBlk); }
48434 +EP_RAILMASK      ep_rxd_railmask(EP_RXD *rxd)          { return (rxd->Data.nmd_attr); }
48435 +
48436 +static void
48437 +ProcessNmdMapResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg)
48438 +{
48439 +    EP_RXD_RAIL  *rxdRail  = rxd->RxdRail;
48440 +    EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail;
48441 +    EP_RAIL      *rail     = rcvrRail->CommsRail->Rail;
48442 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[rxd->RxdMain->Envelope.NodeId];
48443 +    int           i;
48444 +
48445 +    ASSERT (msg->Body.MapNmd.nFrags == rxd->nFrags);
48446 +    
48447 +    for (i = 0; i < rxd->nFrags; i++)
48448 +       rxd->Remote[i] = msg->Body.MapNmd.Nmd[i];
48449 +    
48450 +    if (nodeRail->State == EP_NODE_CONNECTED &&        /* node is still connected on this rail */
48451 +       (ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* NMDs are now valid for this rail */
48452 +    {
48453 +       switch (rxd->State)
48454 +       {
48455 +       case EP_RXD_PUT_STALLED:
48456 +           rxd->State = EP_RXD_PUT_ACTIVE;
48457 +
48458 +           EP_RCVR_OP(rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48459 +           break;
48460 +
48461 +       case EP_RXD_GET_STALLED:
48462 +           rxd->State = EP_RXD_GET_ACTIVE;
48463 +
48464 +           EP_RCVR_OP(rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48465 +           break;
48466 +           
48467 +       case EP_RXD_COMPLETE_STALLED:
48468 +           rxd->State = EP_RXD_COMPLETE_ACTIVE;
48469 +
48470 +           EP_RCVR_OP(rcvrRail, RpcComplete) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48471 +           break;
48472 +
48473 +       default:
48474 +           panic ("ProcessNmdMapResponse: XID match but rxd in invalid state\n");
48475 +           break;
48476 +       }
48477 +
48478 +       rxd->NextRunTime = 0;
48479 +    }
48480 +    else
48481 +       ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr, rxd);
48482 +}
48483 +
48484 +static void
48485 +ProcessFailoverResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg)
48486 +{
48487 +    /* XXXX - TBD */
48488 +#ifdef NOTYET
48489 +    EP_COMMS_SUBSYS *subsys   = rcvr->Subsys;
48490 +    EP_RXD_RAIL     *rxdRail  = rxd->RxdRail;
48491 +    EP_RCVR_RAIL    *rcvrRail = rxdRail->RcvrRail;
48492 +    EP_RAIL         *rail     = rcvrRail->CommsRail->Rail;
48493 +    EP_RCVR_RAIL    *nRcvrRail;
48494 +    EP_RXD_RAIL     *nRxdRail;
48495 +
48496 +    ASSERT (rxd->RxdMain->Envelope.Attr & EP_RPC);
48497 +
48498 +    EPRINTF6 (DBG_RCVR, "ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p Xid=%016llx state %x.%x - txd on rail %d\n", rcvr, rxd, 
48499 +             rxd->MsgXid.Unique, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, msg->Body.FailoverTxd.Rail);
48500 +
48501 +    if ((nRcvrRail = rcvr->Rails[msg->Body.FailoverTxd.Rail]) == NULL ||
48502 +       (nRcvrRail->Rcvr->RailMask & EP_RAIL2RAILMASK (rail->Number)) == NULL)
48503 +    {
48504 +       ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr,rxd);
48505 +       return;
48506 +    }
48507 +
48508 +
48509 +    nRxdRail = EP_RCVR_OP (nrcvrRail, GetRxd) (rcvr, nRcvrRail);
48510 +
48511 +
48512 +    /* If the RPC was in progress, then rollback and mark it as flagged, 
48513 +     * this will then get treated as though the NMDs were not mapped
48514 +     * for the rail when the user initiated the operation.
48515 +     */
48516 +    switch (rxdRail->RxdMain->DataEvent)
48517 +    {
48518 +    case EP_EVENT_ACTIVE|EP_RXD_PHASE_PUT:
48519 +    case EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT:
48520 +       ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE ||
48521 +               rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING);
48522 +       
48523 +       nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT;
48524 +       nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING;
48525 +       break;
48526 +
48527 +    case EP_EVENT_ACTIVE|EP_RXD_PHASE_GET:
48528 +    case EP_EVENT_FLAGGED|EP_RXD_PHASE_GET:
48529 +       ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE ||
48530 +               rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING);
48531 +       
48532 +       nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_GET;
48533 +       nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING;
48534 +       break;
48535 +
48536 +    case EP_EVENT_PRIVATE:
48537 +       switch (rxdRail->RxdMain->DoneEvent)
48538 +       {
48539 +       case EP_EVENT_ACTIVE|EP_RXD_PHASE_COMPLETE:
48540 +       case EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE:
48541 +           nRxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE;
48542 +           nRxdRail->RxdMain->DoneEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE;
48543 +           break;
48544 +
48545 +       case EP_EVENT_PENDING:
48546 +           break;
48547 +
48548 +       default:
48549 +           panic ("ep_rcvr_xid_msg_handler: rxd in invalid state\n");
48550 +       }
48551 +       break;
48552 +
48553 +    default:
48554 +       panic ("ep_rcvr_xid_msg_handler: rxd in invalid staten");
48555 +    }
48556 +    
48557 +    UnbindRxdFromRail (rxd, rxdRail);
48558 +
48559 +    /* Mark rxdRail as no longer active */
48560 +    rxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE;
48561 +    rxdRail->RxdMain->DoneEvent = EP_EVENT_PRIVATE;
48562 +
48563 +    sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);
48564 +    sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);
48565 +    
48566 +    FreeRxdRail (rcvrRail, rxdRail);
48567 +
48568 +    BindRxdToRail (rxd, nRxdRail);
48569 +
48570 +    ep_kthread_schedule (&subsys->Thread, lbolt);
48571 +#endif
48572 +}
48573 +
48574 +void
48575 +ep_rcvr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg)
48576 +{
48577 +    EP_RCVR          *rcvr = (EP_RCVR *) arg;
48578 +    struct list_head *el;
48579 +    unsigned long     flags;
48580 +
48581 +    spin_lock_irqsave (&rcvr->Lock, flags);
48582 +    list_for_each (el, &rcvr->ActiveDescList) {
48583 +       EP_RXD *rxd = list_entry (el,EP_RXD, Link);
48584 +
48585 +       if (EP_XIDS_MATCH (msg->Hdr.Xid, rxd->MsgXid))
48586 +       {
48587 +           EP_INVALIDATE_XID (rxd->MsgXid);
48588 +
48589 +           switch (msg->Hdr.Type)
48590 +           {
48591 +           case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE:
48592 +               ProcessNmdMapResponse (rcvr, rxd, msg);
48593 +               break;
48594 +
48595 +           case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE:
48596 +               ProcessFailoverResponse (rcvr, rxd, msg);
48597 +               break;
48598 +
48599 +           default:
48600 +               panic ("ep_rcvr_xid_msg_handler: XID match but invalid message type\n");
48601 +           }
48602 +       }
48603 +    }
48604 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48605 +}
48606 +
48607 +
48608 +EP_RCVR *
48609 +ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvs)
48610 +{
48611 +    EP_COMMS_SUBSYS  *subsys;
48612 +    EP_RCVR          *rcvr;
48613 +    struct list_head *el;
48614 +    extern int portals_envelopes;
48615 +
48616 +    if (portals_envelopes && (svc == EP_MSG_SVC_PORTALS_SMALL || svc == EP_MSG_SVC_PORTALS_LARGE))
48617 +    {
48618 +       printk ("ep: use %d envelopes rather than %d for portals %s message service\n", sys->Position.pos_nodes * 16, nenvs,
48619 +               svc == EP_MSG_SVC_PORTALS_SMALL ? "small" : "large");
48620 +
48621 +       nenvs = portals_envelopes;
48622 +    }
48623 +
48624 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL)
48625 +       return (NULL);
48626 +
48627 +    KMEM_ZALLOC (rcvr, EP_RCVR *, sizeof (EP_RCVR), 1);
48628 +
48629 +    if (rcvr == NULL)
48630 +       return (NULL);
48631 +
48632 +    rcvr->Subsys            = subsys;
48633 +    rcvr->Service           = svc;
48634 +    rcvr->InputQueueEntries = nenvs;
48635 +    rcvr->FreeDescCount     = 0;
48636 +    rcvr->TotalDescCount    = 0;
48637 +    rcvr->ForwardRxdCount   = 0;
48638 +
48639 +    spin_lock_init (&rcvr->Lock);
48640 +    INIT_LIST_HEAD (&rcvr->ActiveDescList);
48641 +
48642 +    kcondvar_init (&rcvr->CleanupSleep);
48643 +    kcondvar_init (&rcvr->FreeDescSleep);
48644 +    spin_lock_init (&rcvr->FreeDescLock);
48645 +    INIT_LIST_HEAD (&rcvr->FreeDescList);
48646 +    INIT_LIST_HEAD (&rcvr->DescBlockList);
48647 +
48648 +    ep_xid_cache_init (sys, &rcvr->XidCache);
48649 +
48650 +    rcvr->XidCache.MessageHandler = ep_rcvr_xid_msg_handler;
48651 +    rcvr->XidCache.Arg            = rcvr;
48652 +
48653 +    kmutex_lock (&subsys->Lock);
48654 +    /* See if this service is already in use */
48655 +    list_for_each (el, &subsys->Receivers) {
48656 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
48657 +
48658 +       if (rcvr->Service == svc)
48659 +       {
48660 +           KMEM_FREE (rcvr, sizeof (EP_RCVR));
48661 +           kmutex_unlock (&subsys->Lock);   
48662 +           return NULL;
48663 +       }
48664 +    }
48665 +    
48666 +    
48667 +    list_add_tail (&rcvr->Link, &subsys->Receivers);
48668 +
48669 +    ep_procfs_rcvr_add(rcvr);
48670 +
48671 +    /* Now add all rails which are already started */
48672 +    list_for_each (el, &subsys->Rails) { 
48673 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
48674 +
48675 +       EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail);
48676 +    }
48677 +    kmutex_unlock (&subsys->Lock);   
48678 +
48679 +    ep_mod_inc_usecount();
48680 +
48681 +    return (rcvr);
48682 +}
48683 +
48684 +void
48685 +ep_free_rcvr (EP_RCVR *rcvr)
48686 +{
48687 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
48688 +    EP_SYS           *sys    = subsys->Subsys.Sys;
48689 +    struct list_head  list;
48690 +    struct list_head *el,*nel;
48691 +    unsigned long flags;
48692 +    
48693 +    kmutex_lock (&subsys->Lock);
48694 +    list_for_each (el, &subsys->Rails) { 
48695 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
48696 +       
48697 +       EP_RAIL_OP (commsRail, Rcvr.DelRail) (rcvr, commsRail);
48698 +    }
48699 +
48700 +    ep_procfs_rcvr_del(rcvr);
48701 +
48702 +    list_del (&rcvr->Link);
48703 +    kmutex_unlock (&subsys->Lock);
48704 +
48705 +    INIT_LIST_HEAD (&list);
48706 +
48707 +    /* abort all rxds - should not be bound to a rail */
48708 +    spin_lock_irqsave (&rcvr->Lock, flags);   
48709 +    for (;;)
48710 +    {
48711 +       if (! list_empty (&rcvr->ActiveDescList))
48712 +       {
48713 +           list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
48714 +               EP_RXD *rxd = list_entry (el, EP_RXD, Link);
48715 +               
48716 +               ASSERT (rxd->RxdRail == NULL);
48717 +               ASSERT (rxd->RxdMain->Len == EP_RXD_PENDING);
48718 +               
48719 +               rxd->State = EP_RXD_COMPLETED;
48720 +               rxd->RxdMain->Len = EP_SHUTDOWN;
48721 +               
48722 +               list_del (&rxd->Link);
48723 +               list_add_tail (&rxd->Link, &list);
48724 +           }
48725 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
48726 +           
48727 +           while (! list_empty (&list))
48728 +           {
48729 +               EP_RXD *rxd = list_entry (list.next, EP_RXD, Link);
48730 +               
48731 +               list_del (&rxd->Link);
48732 +               
48733 +               if (rxd->Handler) 
48734 +                   rxd->Handler (rxd);
48735 +           }
48736 +           spin_lock_irqsave (&rcvr->Lock, flags);   
48737 +           continue;
48738 +       }
48739 +
48740 +       if (rcvr->FreeDescCount == rcvr->TotalDescCount)
48741 +           break;
48742 +
48743 +       rcvr->CleanupWaiting++;
48744 +       kcondvar_wait (&rcvr->CleanupSleep, &rcvr->Lock, &flags);
48745 +    }
48746 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48747 +
48748 +    /* must all be in free list */
48749 +    ASSERT( rcvr->FreeDescCount ==  rcvr->TotalDescCount);
48750 +
48751 +    while (! list_empty(& rcvr->DescBlockList) )
48752 +       FreeRxdBlock (rcvr, list_entry (rcvr->DescBlockList.next, EP_RXD_BLOCK, Link));
48753 +
48754 +    /* had better be all gone now */
48755 +    ASSERT((rcvr->FreeDescCount == 0) && (rcvr->TotalDescCount == 0));
48756 +
48757 +    ep_xid_cache_destroy (sys, &rcvr->XidCache);
48758
48759 +    spin_lock_destroy (&rcvr->Lock);
48760 +    KMEM_FREE (rcvr, sizeof (EP_RCVR));
48761 +
48762 +    ep_mod_dec_usecount();
48763 +}
48764 +
48765 +EP_RXD *
48766 +StealRxdFromOtherRail (EP_RCVR *rcvr)
48767 +{
48768 +    EP_RXD          *rxd;
48769 +    int               i;
48770 +       
48771 +    /* looking at the the rcvr railmask to find a rail to try to steal rxd from */
48772 +    for (i = 0; i < EP_MAX_RAILS; i++) 
48773 +       if (rcvr->RailMask & (1 << i) ) 
48774 +           if ((rxd = EP_RCVR_OP (rcvr->Rails[i], StealRxd) (rcvr->Rails[i])) != NULL)
48775 +               return rxd;
48776 +
48777 +    return NULL;
48778 +}
48779 +
48780 +long
48781 +CheckUnboundRxd (EP_RCVR *rcvr, EP_RXD *rxd, long nextRunTime)
48782 +{
48783 +    EP_SYS       *sys = rcvr->Subsys->Subsys.Sys;
48784 +    EP_RCVR_RAIL *rcvrRail;
48785 +    int           rnum;
48786 +    
48787 +    if ((rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(&rxd->Data))) < 0)
48788 +       rnum = ep_rcvr_prefrail (rcvr, ep_rcvr_availrails (rcvr));
48789 +    
48790 +    if ( rnum < 0 )    {
48791 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
48792 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
48793 +       
48794 +       return (nextRunTime);
48795 +    }
48796 +
48797 +    ASSERT ( rnum >= 0 );
48798 +
48799 +    rcvrRail = rcvr->Rails[rnum];
48800 +
48801 +    ASSERT ( rcvrRail != NULL);
48802 +
48803 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
48804 +
48805 +    if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rnum)) &&                   /* not mapped already and */
48806 +        ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) == 0) ||            /* failed mapping, or */
48807 +       !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail))                               /* failed to queue */
48808 +    {
48809 +       ASSERT (rxd->RxdRail == NULL);
48810 +
48811 +       EPRINTF4 (DBG_RCVR,"CheckUnboundRxd: rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", rcvr, rxd, rnum, rcvrRail);
48812 +
48813 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
48814 +       
48815 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
48816 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
48817 +    }
48818 +
48819 +    return (nextRunTime);
48820 +}
48821 +
48822 +int
48823 +CheckRxdNmdsMapped (EP_RCVR *rcvr, EP_RXD *rxd)
48824 +{
48825 +    EP_RXD_RAIL *rxdRail = rxd->RxdRail;
48826 +    EP_RXD_MAIN *rxdMain = rxd->RxdMain;
48827 +    EP_ENVELOPE *env     = &rxdMain->Envelope;
48828 +    EP_SYS      *sys     = rcvr->Subsys->Subsys.Sys;
48829 +    EP_RAIL     *rail    = rxdRail->RcvrRail->CommsRail->Rail;
48830 +    int                 i;
48831 +
48832 +    /* Try and map the local NMDs before checking to see if we can proceed */
48833 +    if (! (ep_nmd2railmask (rxd->Local, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))
48834 +    {
48835 +       EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Local NMDs not mapped\n", rail->Name, rcvr, rxd);
48836 +       
48837 +       for (i = 0; i < rxd->nFrags; i++)
48838 +           if (! (EP_NMD_RAILMASK(&rxd->Local[i]) & EP_RAIL2RAILMASK(rail->Number)))
48839 +               if (ep_nmd_map_rails (sys, &rxd->Local[i], EP_RAIL2RAILMASK(rail->Number)))
48840 +                   rxd->NextRunTime = lbolt + RESOURCE_RETRY_TIME;
48841 +    }
48842 +    
48843 +    /* Try and map remote NMDs if they are not valid for this rail */
48844 +    if (! (ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))
48845 +    {
48846 +       EP_MANAGER_MSG_BODY msgBody;
48847 +
48848 +       EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Remote NMDs not mapped\n", rail->Name, rcvr, rxd);
48849 +
48850 +       if (EP_XID_INVALID(rxd->MsgXid))
48851 +           rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
48852 +
48853 +       msgBody.MapNmd.nFrags   = rxd->nFrags;
48854 +       msgBody.MapNmd.Railmask = EP_RAIL2RAILMASK (rail->Number);
48855 +       for (i = 0; i < rxd->nFrags; i++)
48856 +           msgBody.MapNmd.Nmd[i] = rxd->Remote[i];
48857 +
48858 +       if (ep_send_message (rail, env->NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST, rxd->MsgXid, &msgBody) == 0)
48859 +           rxd->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
48860 +       else
48861 +           rxd->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
48862 +
48863 +       return 0;
48864 +    }
48865 +
48866 +    if ((ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)) != 0)
48867 +    {
48868 +       rxd->NextRunTime = 0;
48869 +       return 1;
48870 +    }
48871 +
48872 +    return 0;
48873 +}
48874 +
48875 +long
48876 +ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime)
48877 +{
48878 +    struct list_head *el, *nel;
48879 +    unsigned long     flags;
48880 +    int               i;
48881 +
48882 +    /* Check to see if we're low on rxds */
48883 +    if (rcvr->FreeDescCount < ep_rxd_lowat)
48884 +       AllocateRxdBlock (rcvr, 0, NULL);
48885 +
48886 +    for (i = 0; i < EP_MAX_RAILS; i++) 
48887 +       if (rcvr->RailMask & (1 << i) )
48888 +           nextRunTime = EP_RCVR_OP (rcvr->Rails[i], Check) (rcvr->Rails[i], nextRunTime);
48889 +
48890 +    /* See if we have any rxd's which need to be handled */
48891 +    spin_lock_irqsave (&rcvr->Lock, flags);
48892 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
48893 +       EP_RXD      *rxd     = list_entry (el, EP_RXD, Link);
48894 +       EP_RXD_MAIN *rxdMain = rxd->RxdMain;
48895 +       EP_ENVELOPE *env     = &rxdMain->Envelope;
48896 +       EP_RXD_RAIL *rxdRail = rxd->RxdRail;
48897 +
48898 +       if (rxdRail == NULL)
48899 +           nextRunTime = CheckUnboundRxd (rcvr, rxd, nextRunTime);
48900 +       else
48901 +       {
48902 +           EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail;
48903 +           EP_RAIL      *rail     = rcvrRail->CommsRail->Rail;
48904 +
48905 +           if (rxd->RxdMain->Len == EP_RXD_PENDING ||                          /* envelope not received yet */
48906 +               rail->Nodes[env->NodeId].State != EP_NODE_CONNECTED)            /* will be failing over */
48907 +               continue;
48908 +
48909 +           switch (rxd->State)
48910 +           {
48911 +           case EP_RXD_PUT_STALLED:
48912 +               if (CheckRxdNmdsMapped (rcvr, rxd))
48913 +               {
48914 +                   rxd->State = EP_RXD_PUT_ACTIVE;
48915 +
48916 +                   EP_RCVR_OP (rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48917 +               }
48918 +               break;
48919 +
48920 +           case EP_RXD_GET_STALLED:
48921 +               if (CheckRxdNmdsMapped (rcvr, rxd))
48922 +               {
48923 +                   rxd->State = EP_RXD_GET_ACTIVE;
48924 +
48925 +                   EP_RCVR_OP (rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48926 +               }
48927 +               break;
48928 +           
48929 +           case EP_RXD_COMPLETE_STALLED:
48930 +               if (CheckRxdNmdsMapped (rcvr, rxd))
48931 +               {
48932 +                   rxd->State = EP_RXD_COMPLETE_ACTIVE;
48933 +
48934 +                   EP_RCVR_OP (rcvrRail, RpcComplete)(rxd, rxd->Local, rxd->Remote, rxd->nFrags);
48935 +               }
48936 +               break;
48937 +           }
48938 +               
48939 +           if (rxd->NextRunTime && (nextRunTime == 0 || AFTER (nextRunTime, rxd->NextRunTime)))
48940 +               nextRunTime = rxd->NextRunTime;
48941 +       }
48942 +    }
48943 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
48944 +    
48945 +    return (nextRunTime);
48946 +}
48947 +
48948 +void
48949 +ep_display_rxd (DisplayInfo *di, EP_RXD *rxd)
48950 +{
48951 +    EP_RXD_MAIN *rxdMain  = rxd->RxdMain;
48952 +    EP_ENVELOPE *env      = &rxdMain->Envelope;
48953 +    EP_RXD_RAIL *rxdRail  = rxd->RxdRail;
48954 +    
48955 +    (di->func)(di->arg, "  RXD: %p State=%x RxdMain=%p(%x.%x.%x) Data=%x.%x.%x %s\n", rxd,
48956 +              rxd->State, rxd->RxdMain, rxd->NmdMain.nmd_addr, rxd->NmdMain.nmd_len,
48957 +              rxd->NmdMain.nmd_attr, rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr,
48958 +              rxd->RxdMain->Len == EP_RXD_PENDING ? "Pending" : "Active");
48959 +    (di->func)(di->arg, "      NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d XID=%08x.%08x.%016llx\n",
48960 +              env->NodeId,  EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail, env->TxdMain.nmd_addr,
48961 +              env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags, env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);;
48962 +    (di->func)(di->arg, "      Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr);
48963 +    (di->func)(di->arg, "      Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr);
48964 +    (di->func)(di->arg, "      Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr);
48965 +    (di->func)(di->arg, "      Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr);
48966 +
48967 +    if (rxdRail) EP_RCVR_OP (rxdRail->RcvrRail, DisplayRxd) (di, rxdRail);
48968 +}
48969 +
48970 +void
48971 +ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full)
48972 +{
48973 +    int               freeCount    = 0;
48974 +    int                      activeCount  = 0;
48975 +    int                      pendingCount = 0;
48976 +    int                      railCounts[EP_MAX_RAILS];
48977 +    struct list_head *el;
48978 +    int               i;
48979 +    unsigned long     flags;
48980 +
48981 +    for (i = 0; i <EP_MAX_RAILS; i++)
48982 +       railCounts[i] = 0;
48983 +
48984 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
48985 +    list_for_each (el, &rcvr->FreeDescList)
48986 +       freeCount++;
48987 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
48988 +
48989 +    spin_lock_irqsave (&rcvr->Lock, flags);
48990 +    list_for_each (el, &rcvr->ActiveDescList) {
48991 +       EP_RXD      *rxd     = list_entry (el, EP_RXD, Link);
48992 +       EP_RXD_RAIL *rxdRail = rxd->RxdRail;
48993 +
48994 +       if (rxd->RxdMain->Len == EP_RXD_PENDING)
48995 +           pendingCount++;
48996 +       else
48997 +           activeCount++;
48998 +
48999 +       if (rxdRail)
49000 +           railCounts[rxdRail->RcvrRail->CommsRail->Rail->Number]++;
49001 +    }
49002 +
49003 +    (di->func)(di->arg, "RCVR: rcvr=%p number=%d\n", rcvr, rcvr->Service);
49004 +    (di->func)(di->arg, "      RXDS Free=%d (%d) Pending=%d Active=%d Rails=%d.%d.%d.%d\n",
49005 +              freeCount, rcvr->FreeDescCount, pendingCount, activeCount, railCounts[0], railCounts[1],
49006 +              railCounts[2], railCounts[3]);
49007 +
49008 +    for (i = 0; i < EP_MAX_RAILS; i++)
49009 +       if (rcvr->Rails[i] != NULL)
49010 +           EP_RCVR_OP (rcvr->Rails[i], DisplayRcvr) (di, rcvr->Rails[i]);
49011 +
49012 +    list_for_each (el, &rcvr->ActiveDescList) {
49013 +       EP_RXD *rxd = list_entry (el, EP_RXD, Link);
49014 +
49015 +       if (rxd->RxdMain->Len != EP_RXD_PENDING || full)
49016 +           ep_display_rxd (di, rxd);
49017 +    }
49018 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49019 +}
49020 +
49021 +void
49022 +ep_rxd_received_now(EP_RXD *rxd)
49023 +{
49024 +    EP_ENVELOPE   *env  = &rxd->RxdMain->Envelope;
49025 +    EP_RCVR       *rcvr = rxd->Rcvr;
49026 +    unsigned long  flags;
49027 +
49028 +    INC_STAT(rcvr->stats,rx);
49029 +    ADD_STAT(rcvr->stats,rx_len, rxd->RxdMain->Len);
49030 +
49031 +    if (rxd->RxdMain->Len < 0 || !EP_IS_MULTICAST(env->Attr))
49032 +    {
49033 +       rxd->Handler (rxd);
49034 +    }
49035 +    else
49036 +    {
49037 +       EPRINTF5 (DBG_RCVR, "ep_rxd_received: forward rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, 
49038 +                 rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd));
49039 +
49040 +       spin_lock_irqsave (&rcvr->Subsys->ForwardDescLock, flags);
49041 +       list_add_tail (&rxd->Link, &rcvr->Subsys->ForwardDescList);
49042 +       spin_unlock_irqrestore (&rcvr->Subsys->ForwardDescLock, flags);
49043 +       
49044 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
49045 +    } 
49046 +}
49047 +
49048 +#if defined(CONFIG_EP_NO_CHECK_SUM)
49049 +void
49050 +ep_rxd_received(EP_RXD *rxd) 
49051 +{
49052 +   ep_rxd_received_now(rxd);
49053 +}
49054 +
49055 +#else
49056 +
49057 +void
49058 +ep_rxd_received(EP_RXD *rxd) 
49059 +{
49060 +  EP_ENVELOPE   *env  = &rxd->RxdMain->Envelope;
49061 +
49062 +  if (env->CheckSum) 
49063 +      ep_rxd_queue_csum(rxd);
49064 +  else 
49065 +      ep_rxd_received_now(rxd);
49066 +}
49067 +
49068 +void
49069 +ep_rxd_queue_csum(EP_RXD *rxd)
49070 +{
49071 +    EP_RCVR       *rcvr = rxd->Rcvr;
49072 +    unsigned long flags;
49073 +
49074 +    EPRINTF5 (DBG_RCVR, "ep_rxd_queue_csum: rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, 
49075 +             rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd));
49076 +    
49077 +    spin_lock_irqsave (&rcvr->Subsys->CheckSumDescLock, flags);
49078 +    list_add_tail (&rxd->CheckSumLink, &rcvr->Subsys->CheckSumDescList);
49079 +    spin_unlock_irqrestore (&rcvr->Subsys->CheckSumDescLock, flags);
49080 +    
49081 +    ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
49082 +}
49083 +#endif
49084 +
49085 +void
49086 +ep_rcvr_fillout_stats(EP_RCVR *rcvr, char *str)
49087 +{
49088 +    sprintf(str+strlen(str),"Rx     %lu  %lu /sec\n",       GET_STAT_TOTAL(rcvr->stats,rx),      GET_STAT_PER_SEC(rcvr->stats,rx) );
49089 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr->stats,rx_len) / (1024*1024),  GET_STAT_PER_SEC(rcvr->stats,rx_len) / (1024*1024));
49090 +}
49091 +
49092 +void
49093 +ep_rcvr_rail_fillout_stats(EP_RCVR_RAIL *rcvr_rail, char *str)
49094 +{
49095 +    sprintf(str+strlen(str),"Rx     %lu  %lu /sec\n",       GET_STAT_TOTAL(rcvr_rail->stats,rx),      GET_STAT_PER_SEC(rcvr_rail->stats,rx) );
49096 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr_rail->stats,rx_len) / (1024*1024),  GET_STAT_PER_SEC(rcvr_rail->stats,rx_len) / (1024*1024));
49097 +}
49098 +
49099 +
49100 +/*
49101 + * Local variables:
49102 + * c-file-style: "stroustrup"
49103 + * End:
49104 + */
49105 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsRx_elan3.c
49106 ===================================================================
49107 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsRx_elan3.c       2004-02-23 16:02:56.000000000 -0500
49108 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsRx_elan3.c    2005-07-28 14:52:52.870675392 -0400
49109 @@ -0,0 +1,1776 @@
49110 +/*
49111 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
49112 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
49113 + *
49114 + *    For licensing information please see the supplied COPYING file
49115 + *
49116 + */
49117 +
49118 +#ident "@(#)$Id: epcommsRx_elan3.c,v 1.19.2.4 2005/03/10 15:24:08 mike Exp $"
49119 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx_elan3.c,v $ */
49120 +
49121 +#include <qsnet/kernel.h>
49122 +
49123 +#include <elan/kcomm.h>
49124 +#include <elan/epsvc.h>
49125 +#include <elan/epcomms.h>
49126 +
49127 +#include "kcomm_vp.h"
49128 +#include "kcomm_elan3.h"
49129 +#include "epcomms_elan3.h"
49130 +#include "debug.h"
49131 +
49132 +#define RCVR_TO_RAIL(rcvrRail)         ((EP3_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail)
49133 +#define RCVR_TO_DEV(rcvrRail)          (RCVR_TO_RAIL(rcvrRail)->Device)
49134 +#define RCVR_TO_SUBSYS(rcvrRail)       (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys)
49135 +
49136 +static void RxDataEvent (EP3_RAIL *rail, void *arg);
49137 +static void RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
49138 +static void RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
49139 +
49140 +static EP3_COOKIE_OPS RxDataCookieOps = 
49141 +{
49142 +    RxDataEvent,
49143 +    RxDataRetry,
49144 +    NULL, /* DmaCancelled */
49145 +    RxDataVerify,
49146 +};
49147 +
49148 +static void RxDoneEvent (EP3_RAIL *rail, void *arg);
49149 +static void RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
49150 +static void RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
49151 +
49152 +static EP3_COOKIE_OPS RxDoneCookieOps = 
49153 +{
49154 +    RxDoneEvent,
49155 +    RxDoneRetry,
49156 +    NULL, /* DmaCancelled */
49157 +    RxDoneVerify,
49158 +};
49159 +
49160 +static int
49161 +AllocateRxdRailBlock (EP3_RCVR_RAIL *rcvrRail)
49162 +{
49163 +    EP3_RAIL         *rail = RCVR_TO_RAIL(rcvrRail);
49164 +    ELAN3_DEV         *dev  = rail->Device;
49165 +    EP3_RXD_RAIL_BLOCK *blk;
49166 +    EP3_RXD_RAIL       *rxdRail;
49167 +    sdramaddr_t        pRxdElan;
49168 +    EP3_RXD_RAIL_MAIN  *pRxdMain;
49169 +    E3_Addr           pRxdElanAddr;
49170 +    E3_Addr           pRxdMainAddr;
49171 +    E3_BlockCopyEvent  event;
49172 +    int                       i, j;
49173 +    unsigned long      flags;
49174 +
49175 +    KMEM_ZALLOC (blk, EP3_RXD_RAIL_BLOCK *, sizeof (EP3_RXD_RAIL_BLOCK), 1);
49176 +    if (blk == NULL)
49177 +       return 0;
49178 +
49179 +    if ((pRxdElan = ep_alloc_elan (&rail->Generic, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdElanAddr)) == (sdramaddr_t) 0)
49180 +    {
49181 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
49182 +       return 0;
49183 +    }
49184 +
49185 +    if ((pRxdMain = ep_alloc_main (&rail->Generic, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdMainAddr)) == (sdramaddr_t) 0)
49186 +    {
49187 +       ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
49188 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
49189 +       return 0;
49190 +    }
49191 +    
49192 +    if (ReserveDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK, 0) != ESUCCESS)
49193 +    {
49194 +       ep_free_main (&rail->Generic, pRxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK);
49195 +       ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
49196 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
49197 +       return 0;
49198 +    }
49199 +
49200 +    for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++)
49201 +    {
49202 +       rxdRail->Generic.RcvrRail = (EP_RCVR_RAIL *) rcvrRail;
49203 +       rxdRail->RxdElan          = pRxdElan;
49204 +       rxdRail->RxdElanAddr      = pRxdElanAddr;
49205 +       rxdRail->RxdMain          = pRxdMain;
49206 +       rxdRail->RxdMainAddr      = pRxdMainAddr;
49207 +
49208 +       elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain),  0);
49209 +       elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next),     0);
49210 +       elan3_sdram_writeq (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr), (long) rxdRail);
49211 +
49212 +       for (j = 0; j < EP_MAXFRAG; j++)
49213 +       {
49214 +           RegisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j], pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), &RxDataCookieOps, (void *) rxdRail);
49215 +
49216 +           event.ev_Type  = EV_TYPE_DMA | (pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[j+1]));
49217 +           event.ev_Count = 0;
49218 +
49219 +           elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), sizeof (E3_BlockCopyEvent));
49220 +       }
49221 +       
49222 +       RegisterCookie (&rail->CookieTable, &rxdRail->DataCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), &RxDataCookieOps, (void *) rxdRail);
49223 +       RegisterCookie (&rail->CookieTable, &rxdRail->DoneCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), &RxDoneCookieOps, (void *) rxdRail);
49224 +
49225 +       EP3_INIT_COPY_EVENT (event, rxdRail->DataCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DataEvent), 1);
49226 +       elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent));
49227 +
49228 +       EP3_INIT_COPY_EVENT (event, rxdRail->DoneCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DoneEvent), 1);
49229 +       elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent));
49230 +       
49231 +       pRxdMain->DataEvent = EP3_EVENT_FREE;
49232 +       pRxdMain->DoneEvent = EP3_EVENT_FREE;
49233 +
49234 +       /* move onto next descriptor */
49235 +       pRxdElan     += EP3_RXD_RAIL_ELAN_SIZE;
49236 +       pRxdElanAddr += EP3_RXD_RAIL_ELAN_SIZE;
49237 +       pRxdMain      = (EP3_RXD_RAIL_MAIN *) ((unsigned long) pRxdMain + EP3_RXD_RAIL_MAIN_SIZE);
49238 +       pRxdMainAddr += EP3_RXD_RAIL_MAIN_SIZE;
49239 +    }
49240 +
49241 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
49242 +
49243 +    list_add  (&blk->Link, &rcvrRail->DescBlockList);
49244 +    rcvrRail->TotalDescCount += EP3_NUM_RXD_PER_BLOCK;
49245 +    rcvrRail->FreeDescCount  += EP3_NUM_RXD_PER_BLOCK;
49246 +
49247 +    for (i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++)
49248 +       list_add (&blk->Rxd[i].Generic.Link, &rcvrRail->FreeDescList);
49249 +
49250 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
49251 +    
49252 +    return 1;
49253 +}
49254 +
49255 +static void
49256 +FreeRxdRailBlock (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL_BLOCK *blk)
49257 +{
49258 +    EP3_RAIL     *rail = RCVR_TO_RAIL(rcvrRail);
49259 +    EP3_RXD_RAIL *rxdRail;
49260 +    unsigned long flags;
49261 +    int i, j;
49262 +
49263 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
49264 +
49265 +    list_del (&blk->Link);
49266 +
49267 +    rcvrRail->TotalDescCount -= EP3_NUM_RXD_PER_BLOCK;
49268 +
49269 +    for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++)
49270 +    {
49271 +
49272 +       rcvrRail->FreeDescCount--;
49273 +
49274 +       list_del (&rxdRail->Generic.Link);
49275 +       
49276 +       for (j = 0; j < EP_MAXFRAG; j++)
49277 +           DeregisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j]);
49278 +       
49279 +       DeregisterCookie (&rail->CookieTable, &rxdRail->DataCookie);
49280 +       DeregisterCookie (&rail->CookieTable, &rxdRail->DoneCookie);
49281 +    }
49282 +
49283 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
49284 +
49285 +    ReleaseDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK);
49286 +
49287 +    ep_free_main (&rail->Generic, blk->Rxd[0].RxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK);
49288 +    ep_free_elan (&rail->Generic, blk->Rxd[0].RxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
49289 +
49290 +    KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
49291 +}
49292 +
49293 +static EP3_RXD_RAIL *
49294 +GetRxdRail (EP3_RCVR_RAIL *rcvrRail)
49295 +{
49296 +    EP3_RXD_RAIL *rxdRail;
49297 +    unsigned long flags;
49298 +    int low_on_rxds;
49299 +
49300 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
49301 +
49302 +    if (list_empty (&rcvrRail->FreeDescList))
49303 +       rxdRail = NULL;
49304 +    else
49305 +    {
49306 +       rxdRail = list_entry (rcvrRail->FreeDescList.next, EP3_RXD_RAIL, Generic.Link);
49307 +
49308 +       list_del (&rxdRail->Generic.Link);
49309 +
49310 +       rcvrRail->FreeDescCount--;
49311 +    }
49312 +
49313 +    /* Wakeup the descriptor primer thread if there's not many left */
49314 +    low_on_rxds = (rcvrRail->FreeDescCount < ep_rxd_lowat);
49315 +
49316 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
49317 +
49318 +    if (low_on_rxds)
49319 +       ep_kthread_schedule (&RCVR_TO_SUBSYS(rcvrRail)->Thread, lbolt);
49320 +
49321 +    return (rxdRail);
49322 +}
49323 +
49324 +static void
49325 +FreeRxdRail (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL *rxdRail)
49326 +{
49327 +    unsigned long flags;
49328 +
49329 +#if defined(DEBUG_ASSERT)
49330 +    {
49331 +       EP_RAIL  *rail = (EP_RAIL *) RCVR_TO_RAIL(rcvrRail);
49332 +       ELAN3_DEV *dev = RCVR_TO_DEV (rcvrRail);
49333 +
49334 +       EP_ASSERT (rail, rxdRail->Generic.RcvrRail == &rcvrRail->Generic);
49335 +       
49336 +       EP_ASSERT (rail, rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE);
49337 +       EP_ASSERT (rail, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE);
49338 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
49339 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
49340 +
49341 +       rxdRail->RxdMain->DataEvent = EP3_EVENT_FREE;
49342 +       rxdRail->RxdMain->DoneEvent = EP3_EVENT_FREE;
49343 +    }
49344 +#endif
49345 +
49346 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
49347 +    
49348 +    list_add (&rxdRail->Generic.Link, &rcvrRail->FreeDescList);
49349 +
49350 +    rcvrRail->FreeDescCount++;
49351 +
49352 +    if (rcvrRail->FreeDescWaiting)
49353 +    {
49354 +       rcvrRail->FreeDescWaiting--;
49355 +       kcondvar_wakeupall (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock);
49356 +    }
49357 +
49358 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
49359 +}
49360 +
49361 +static void
49362 +BindRxdToRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail)
49363 +{
49364 +    EP3_RAIL *rail = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail);
49365 +
49366 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
49367 +
49368 +    EPRINTF3 (DBG_RCVR, "%s: BindRxdToRail: rxd=%p rxdRail=%p\n",  rail->Generic.Name, rxd, rxdRail);
49369 +
49370 +    elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain), rxd->NmdMain.nmd_addr);                        /* PCI write */
49371 +
49372 +    rxd->RxdRail         = &rxdRail->Generic;
49373 +    rxdRail->Generic.Rxd = rxd;
49374 +}
49375 +
49376 +static void
49377 +UnbindRxdFromRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail)
49378 +{
49379 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49380 +
49381 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
49382 +    ASSERT (rxd->RxdRail == &rxdRail->Generic && rxdRail->Generic.Rxd == rxd);
49383 +    
49384 +    EPRINTF3 (DBG_RCVR, "%s: UnbindRxdFromRail: rxd=%p rxdRail=%p\n",  RCVR_TO_RAIL(rxdRail->Generic.RcvrRail)->Generic.Name, rxd, rxdRail);
49385 +
49386 +    rxd->RxdRail         = NULL;
49387 +    rxdRail->Generic.Rxd = NULL;
49388 +
49389 +    if (rcvrRail->CleanupWaiting)
49390 +       kcondvar_wakeupall (&rcvrRail->CleanupSleep, &rxd->Rcvr->Lock);
49391 +    rcvrRail->CleanupWaiting = 0;
49392 +}
49393 +
49394 +static void
49395 +LockRcvrThread (EP3_RCVR_RAIL *rcvrRail)
49396 +{
49397 +    EP_COMMS_RAIL     *commsRail   = rcvrRail->Generic.CommsRail;
49398 +    EP3_RAIL          *rail        = RCVR_TO_RAIL(rcvrRail);
49399 +    ELAN3_DEV        *dev         = rail->Device;
49400 +    sdramaddr_t        sle         = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
49401 +    EP3_SPINLOCK_MAIN *sl          = &rcvrRail->RcvrMain->ThreadLock;
49402 +    E3_uint32          RestartBits = 0;
49403 +    int                delay       = 1;
49404 +    E3_uint32          seq;
49405 +    E3_uint32          reg;
49406 +
49407 +    ASSERT (SPINLOCK_HELD (&rcvrRail->Generic.Rcvr->Lock));
49408 +
49409 +    mb();
49410 +    elan3_sdram_writel (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);
49411 +    mb();
49412 +    seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
49413 +    while (seq != sl->sl_seq)
49414 +    {
49415 +       while (sl->sl_seq == (seq - 1))
49416 +       {
49417 +           mb();
49418 +
49419 +           if ((read_reg32 (dev, Exts.InterruptReg) & (INT_TProc | INT_TProcHalted)) != 0 && spin_trylock (&dev->IntrLock))
49420 +           {
49421 +               reg=read_reg32 (dev, Exts.InterruptReg);
49422 +               ELAN_REG_REC(reg);
49423 +
49424 +               if ((reg & (INT_TProc | INT_TProcHalted)) != 0&& 
49425 +                   elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)) != sl->sl_seq)
49426 +               {
49427 +                   EPRINTF1 (DBG_RCVR, "%s: LockRcvrThread - thread trapped\n", rail->Generic.Name);
49428 +
49429 +                   /* The thread processor has *really* trapped, and the spinlock is still held.
49430 +                    * thus is must have trapped due to a network error - we need to complete the
49431 +                    * actions required for this envelope, since we may be spin-locking the receiver
49432 +                    * to search the dma retry lists for a particular dma.  So must ensure that
49433 +                    * if the thread had trapped then the dma has been queued onto the retry list
49434 +                    * *before* we inspect them.
49435 +                    */
49436 +                   IncrStat (commsRail, LockRcvrTrapped);
49437 +
49438 +                   /* We're going to generate a spurious interrupt here - since we will
49439 +                    * handle the thread processor trap directly */
49440 +                   ELAN_REG_REC(reg);
49441 +                   if (HandleTProcTrap (dev, &RestartBits))
49442 +                   {
49443 +                       /* NOTE - this is not an assert, since the "store" to unlock the lock could
49444 +                        *        be held up on the PCI interface, whilst the thread processor has
49445 +                        *        gone on and switched to a new thread, which has then trapped, and
49446 +                        *        our read of the InterruptReg can overtake the unlock write.
49447 +                        *
49448 +                        * ASSERT (dev->ThreadTrap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 
49449 +                        *         elan3_sdram_readl (dev, rcvr->RcvrElan + offsetof (EP_RCVR_ELAN, PendingRxDescsElan)));
49450 +                        */
49451 +
49452 +                       PULSE_SCHED_STATUS (dev, RestartBits);
49453 +
49454 +                       DeliverTProcTrap (dev, dev->ThreadTrap, INT_TProc);
49455 +                   }
49456 +               }
49457 +               spin_unlock (&dev->IntrLock);
49458 +           }
49459 +           
49460 +           DELAY (delay); delay++;
49461 +       }
49462 +       seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
49463 +    }
49464 +}
49465 +
49466 +static void
49467 +UnlockRcvrThread (EP3_RCVR_RAIL *rcvrRail)
49468 +{
49469 +    EP3_RAIL   *rail = RCVR_TO_RAIL(rcvrRail);
49470 +    sdramaddr_t sle  = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
49471 +
49472 +    mb();
49473 +    elan3_sdram_writel (rail->Device, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);
49474 +    mmiob(); 
49475 +}
49476 +
49477 +void
49478 +CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdElanAddr, E3_uint32 PAckVal)
49479 +{
49480 +    ELAN3_DEV         *dev       = rail->Device;
49481 +    sdramaddr_t        rxdElan   = ep_elan2sdram (&rail->Generic, rxdElanAddr);
49482 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr));
49483 +    EP_RXD_MAIN       *rxdMain   = rxdRail->Generic.Rxd->RxdMain;
49484 +    EP_ENVELOPE       *env       = &rxdMain->Envelope;
49485 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49486 +    EP_COMMS_RAIL     *commsRail = rcvrRail->Generic.CommsRail;
49487 +    EP_RCVR           *rcvr      = rcvrRail->Generic.Rcvr;
49488 +    sdramaddr_t        queue     = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue);
49489 +    sdramaddr_t        sle       = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
49490 +    EP3_SPINLOCK_MAIN *sl        = &rcvrRail->RcvrMain->ThreadLock;
49491 +    int               nodeId;
49492 +    EP_NODE_RAIL     *nodeRail;
49493 +    E3_DMA_BE         dma;
49494 +    E3_Addr           nfptr;
49495 +    E3_Addr          next;
49496 +
49497 +    ASSERT (commsRail->Rail == &rail->Generic);
49498 +    ASSERT (rxdElanAddr == elan3_sdram_readl (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs)));
49499 +
49500 +    IncrStat (commsRail, CompleteEnvelope);
49501 +
49502 +    /* We don't need to aquire the NodeLock here (however we might be holding it),
49503 +     * since this can only get called while the node is connected, or disconnecting.
49504 +     * If the node is disconnecting, then we can get called from FlushDisconnecting()
49505 +     * while holding the NodeLock - after we cannot get called again until the node 
49506 +     * has reconnected from scratch.
49507 +     */
49508 +    /* Copy the envelope information */
49509 +    nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr));
49510 +
49511 +    if (nfptr == elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)))
49512 +       nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base));
49513 +    else
49514 +       nfptr += elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size));
49515 +
49516 +    /* Copy the envelope and payload (unconditionally) */
49517 +    elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr), env, EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE);
49518 +
49519 +    ASSERT (env->Version == EP_ENVELOPE_VERSION);
49520 +
49521 +    /* Copy the received message length */
49522 +    rxdMain->Len = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len));
49523 +       
49524 +    /* Remove the RXD from the pending desc list */
49525 +    if ((next = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0)
49526 +       rcvrRail->RcvrMain->PendingDescsTailp = 0;
49527 +    elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next);
49528 +
49529 +    /* Copy the DMA descriptor to queue on the approriate retry list */
49530 +    elan3_sdram_copyq_from_sdram (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]), &dma, sizeof (E3_DMA));        /* PCI read block */
49531 +    
49532 +    EP_ASSERT (&rail->Generic, dma.s.dma_direction == DMA_READ);;
49533 +
49534 +#if defined(DEBUG_ASSERT) && defined(DEBUG_SDRAM_ASSERT)
49535 +    /* NOTE: not an assertion, since the thread packet could have successfully
49536 +     *       transferred the "put" dma to the far side - which could then have
49537 +     *       completed - but the far side will see a network error which will
49538 +     *       cause the virtual circuit to be dropped by the far side and this 
49539 +     *       DMA will be removed */
49540 +    if (rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE ||
49541 +       elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1)
49542 +    {
49543 +       printk ("CompleteEnvelope: suspicious dma : Node=%d DataBlock=%d Event=%d\n", 
49544 +               env->NodeId, rxdRail->RxdMain->DataEvent, 
49545 +               elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)));
49546 +    }
49547 +#endif
49548 +    
49549 +    EPRINTF6 (DBG_RCVR, "%s: CompleteEnvelope: rxd=%p NodeId=%d Xid=%llx Cookies=%08x,%08x\n", commsRail->Rail->Name,
49550 +             rxdRail, env->NodeId, (long long) env->Xid.Unique, dma.s.dma_srcCookieVProc, dma.s.dma_destCookieVProc);
49551 +
49552 +    /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will
49553 +     * be read from the EP_RETRY_DMA rather than the original DMA - this can then get reused 
49554 +     * and an incorrect DMA descriptor sent */
49555 +    dma.s.dma_source    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]);
49556 +    dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
49557 +    
49558 +    nodeId   = EP_VP_TO_NODE(dma.s.dma_srcVProc);
49559 +    nodeRail = &rail->Generic.Nodes[nodeId];
49560 +
49561 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
49562 +
49563 +    if (PAckVal != E3_PAckOk)
49564 +    {
49565 +       if (nodeRail->State == EP_NODE_CONNECTED)
49566 +           QueueDmaForRetry (rail, &dma, EP_RETRY_LOW_PRI_RETRY);
49567 +       else
49568 +           QueueDmaOnStalledList (rail, &dma);
49569 +    }
49570 +
49571 +    /* Finaly forcefully drop the spinlock for the thread */
49572 +    sl->sl_seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
49573 +
49574 +    wmb();
49575 +}
49576 +
49577 +void
49578 +StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp)
49579 +{
49580 +    ELAN3_DEV      *dev       = rail->Device;
49581 +    sdramaddr_t    rcvrElan   = ep_elan2sdram (&rail->Generic, rcvrElanAddr);
49582 +    EP3_RCVR_RAIL  *rcvrRail  = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr));
49583 +    EP_RCVR        *rcvr      = rcvrRail->Generic.Rcvr;
49584 +    EP_COMMS_RAIL  *commsRail = rcvrRail->Generic.CommsRail;
49585 +
49586 +    EPRINTF3 (DBG_RCVR, "%s: StallThreadForNoDescs - rcvrRail=%p sp=%x\n", commsRail->Rail->Name, rcvrRail, sp);
49587 +    
49588 +    IncrStat (commsRail, StallThread);
49589 +
49590 +    /* NOTE: spin lock not required as thread is trapped */
49591 +    
49592 +    if (rcvrRail->RcvrMain->PendingDescsTailp != 0)
49593 +    {
49594 +       EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - pending descriptors, wakeup thread\n", commsRail->Rail->Name);
49595 +       
49596 +       /*
49597 +        * A receive buffer was queued after the thread had decided to go to 
49598 +        * sleep, but before the event interrupt occured.  Just restart the
49599 +        * thread to consume the envelope.
49600 +        */
49601 +       IssueRunThread (rail, sp);
49602 +    }
49603 +    else
49604 +    {
49605 +       EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - set ThreadWaiting\n", commsRail->Rail->Name);
49606 +       
49607 +       IncrStat (commsRail, ThrdWaiting);
49608 +
49609 +       /* Mark the rcvr as waiting for a rxd, and schedule a call of ep_check_rcvr
49610 +        * to attempt to "steal" a descriptor from a different rail */
49611 +       rcvrRail->ThreadWaiting = sp;
49612 +
49613 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
49614 +    }
49615 +}
49616 +
49617 +void
49618 +StallThreadForHalted (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp)
49619 +{
49620 +    ELAN3_DEV     *dev       = rail->Device;
49621 +    sdramaddr_t    rcvrElan  = ep_elan2sdram (&rail->Generic, rcvrElanAddr);
49622 +    EP3_RCVR_RAIL *rcvrRail  = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr));
49623 +    EP_RCVR       *rcvr      = rcvrRail->Generic.Rcvr;
49624 +    unsigned long  flags     = 0;
49625 +
49626 +    spin_lock_irqsave (&rcvr->Lock, flags);
49627 +
49628 +    rcvrRail->ThreadHalted = sp;
49629 +
49630 +    EPRINTF2 (DBG_EPTRAP, "%s: StallThreadForHalted: sp=%08x\n", rail->Generic.Name, sp);
49631 +
49632 +    if (rcvrRail->CleanupWaiting)
49633 +       kcondvar_wakeupone (&rcvrRail->CleanupSleep, &rcvr->Lock);
49634 +    rcvrRail->CleanupWaiting = 0;
49635 +
49636 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49637 +}
49638 +/*
49639 + * RxDataEvent: arg == EP3_RXD_RAIL
49640 + *   Called on completion of receiving data.
49641 + */
49642 +static void
49643 +RxDataEvent (EP3_RAIL *rail, void *arg)
49644 +{
49645 +    EP3_RXD_RAIL  *rxdRail  = (EP3_RXD_RAIL *) arg;
49646 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49647 +    EP_RXD        *rxd      = rxdRail->Generic.Rxd;
49648 +    EP_ENVELOPE   *env      = &rxd->RxdMain->Envelope;
49649 +    EP_RCVR       *rcvr     = rxd->Rcvr;
49650 +    ELAN3_DEV    *dev      = rail->Device;
49651 +    unsigned long flags;
49652 +    int delay = 1;
49653 +
49654 +    spin_lock_irqsave (&rcvr->Lock, flags);
49655 +    for (;;)
49656 +    {
49657 +       if (EP3_EVENT_FIRED (rxdRail->DataCookie, rxdRail->RxdMain->DataEvent))
49658 +           break;
49659 +
49660 +       if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), rxdRail->DataCookie, rxdRail->RxdMain->DataEvent))
49661 +       {
49662 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
49663 +               panic ("RxDataEvent: events set but block copy not completed\n");
49664 +           DELAY(delay);
49665 +           delay <<= 1;
49666 +       }
49667 +       else
49668 +       {
49669 +           printk ("%s: RxDataEvent: rxd %p not complete [%x,%x,%x]\n", rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent,
49670 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)),
49671 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)));
49672 +           
49673 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
49674 +           return;
49675 +       }
49676 +       mb();
49677 +    }
49678 +
49679 +    /*
49680 +     * Note, since the thread will have sent the "get" dma before copying the
49681 +     * envelope, we must check that it has completed doing this,  if not then
49682 +     * it might be that the thread trapped due to a network error, so we must
49683 +     * spinlock against the thread 
49684 +     */
49685 +    if (rxd->RxdMain->Len == EP_RXD_PENDING)
49686 +    {
49687 +       LockRcvrThread (rcvrRail);
49688 +       UnlockRcvrThread (rcvrRail);
49689 +
49690 +       ASSERT (env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING);
49691 +    }
49692 +
49693 +    EPRINTF7 (DBG_RCVR, "%s: RxDataEvent: rxd=%p rxdRail=%p completed from elan node %d [XID=%llx] Length %d State %x\n", 
49694 +             rail->Generic.Name, rxd, rxdRail, env->NodeId, (long long) env->Xid.Unique, rxd->RxdMain->Len, rxd->State);
49695 +
49696 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_PUT_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE);
49697 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));      /* PCI read */
49698 +    EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE);
49699 +
49700 +    rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
49701 +    rxd->Data.nmd_attr          = EP_RAIL2RAILMASK (rail->Generic.Number);
49702 +
49703 +    if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr))
49704 +       rxd->State = EP_RXD_RPC_IN_PROGRESS;
49705 +    else
49706 +    {
49707 +       rxd->State = EP_RXD_COMPLETED;
49708 +
49709 +       /* remove from active list */
49710 +       list_del (&rxd->Link);
49711 +
49712 +       UnbindRxdFromRail (rxd, rxdRail);
49713 +       FreeRxdRail (rcvrRail, rxdRail);
49714 +    }
49715 +
49716 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49717 +    ep_rxd_received (rxd);
49718 +
49719 +}
49720 +
49721 +/* 
49722 + * RxDataRetry: arg == EP3_RXD_RAIL
49723 + *   Called on retry of "get" dma of large transmit data
49724 + *   and rpc_get/rpc_put and "put" of datavec of rpc completion.
49725 + */
49726 +static void
49727 +RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
49728 +{
49729 +    EP3_RXD_RAIL  *rxdRail  = (EP3_RXD_RAIL *) arg;
49730 +    EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail;
49731 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49732 +
49733 +#if defined(DEBUG_ASSERT)
49734 +    RxDataVerify (rail, arg, dma);
49735 +#endif
49736 +
49737 +    IncrStat (commsRail, RxDataRetry);
49738 +
49739 +    EPRINTF4 (DBG_RCVR, "%s: RxDataRetry: rcvr %p rxd %p [XID=%llx]\n", rail->Generic.Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique);
49740 +
49741 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DATA));
49742 +}
49743 +
49744 +static void
49745 +RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
49746 +{
49747 +#if defined(DEBUG_ASSERT)
49748 +    EP3_RXD_RAIL   *rxdRail  = (EP3_RXD_RAIL *) arg;
49749 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49750 +
49751 +    if (dma->s.dma_direction == DMA_WRITE)
49752 +    {
49753 +       EP_ASSERT (&rail->Generic, 
49754 +                  (rxd->State == EP_RXD_RECEIVE_ACTIVE  && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) ||
49755 +                  (rxd->State == EP_RXD_PUT_ACTIVE      && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) ||
49756 +                  (rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_ACTIVE));
49757 +       EP_ASSERT (&rail->Generic, SDRAM_ASSERT (rxd->State == EP_RXD_COMPLETE_ACTIVE ?
49758 +                                                elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1:            /* PCI read */
49759 +                                                elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 1));          /* PCI read */
49760 +    }
49761 +    else
49762 +    {
49763 +       EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_READ_REQUEUE);
49764 +       
49765 +#if defined(DEBUG_SDRAM_ASSERT)
49766 +       /* NOTE: not an assertion, since the "get" DMA can still be running if
49767 +        *       it's packet got a network error - and then the "put" from the
49768 +        *       far side has completed - however the virtual circuit should
49769 +        *       then be dropped by the far side and this DMA will be removed */
49770 +       if (EP_VP_TO_NODE(dma->s.dma_srcVProc) != ep_rxd_node(rxd) || 
49771 +           (rxd->State != EP_RXD_RECEIVE_ACTIVE && rxd->State != EP_RXD_GET_ACTIVE) ||
49772 +           rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE ||
49773 +           elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1)
49774 +       {
49775 +           EPRINTF6 (DBG_RCVR, "%s: RxDataRetry: suspicious dma : VProc=%d NodeId=%d State=%d DataBlock=%x Event=%d\n",  
49776 +                     rail->Generic.Name, EP_VP_TO_NODE(dma->s.dma_srcVProc), ep_rxd_node(rxd), rxd->State, rxdRail->RxdMain->DataEvent, 
49777 +                     elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)));
49778 +       }
49779 +#endif /* defined(DEBUG_SDRAM_ASSERT) */
49780 +    }
49781 +#endif /* DEBUG_ASSERT */
49782 +}
49783 +
49784 +/*
49785 + * RxDoneEvent: arg == EP_RXD
49786 + *   Called on completion of large receive.
49787 + */
49788 +static void
49789 +RxDoneEvent (EP3_RAIL *rail, void *arg)
49790 +{
49791 +    EP3_RXD_RAIL  *rxdRail   = (EP3_RXD_RAIL *) arg;
49792 +    EP3_RCVR_RAIL *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49793 +    EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail;
49794 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49795 +    EP_RCVR       *rcvr      = rxd->Rcvr;
49796 +    ELAN3_DEV    *dev       = rail->Device;
49797 +    int            delay     = 1;
49798 +    unsigned long  flags;
49799 +
49800 +    spin_lock_irqsave (&rcvr->Lock, flags);
49801 +    for (;;)
49802 +    {
49803 +       if (EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))
49804 +           break;
49805 +       
49806 +       if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))
49807 +       {
49808 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
49809 +               panic ("RxDoneEvent: events set but block copy not completed\n");
49810 +           DELAY(delay);
49811 +           delay <<= 1;
49812 +       }
49813 +       else
49814 +       {
49815 +           printk ("RxDoneEvent: rxd %p not complete [%x,%x.%x]\n", rxd, rxdRail->RxdMain->DoneEvent,
49816 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)),
49817 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type)));
49818 +           
49819 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
49820 +           return;
49821 +       }
49822 +       mb();
49823 +    }
49824 +
49825 +    EPRINTF4 (DBG_RCVR, "%s: RxDoneEvent: rxd %p completed from elan node %d [XID=%llx]\n", 
49826 +             commsRail->Rail->Name, rxd, rxd->RxdMain->Envelope.NodeId, (long long) rxd->RxdMain->Envelope.Xid.Unique);
49827 +    
49828 +    IncrStat (commsRail, RxDoneEvent);
49829 +
49830 +    EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DataEvent  == EP3_EVENT_PRIVATE);
49831 +    EP_ASSERT (&rail->Generic, EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent));
49832 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));      /* PCI read */
49833 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));      /* PCI read */
49834 +
49835 +    /* mark rxd as private  */
49836 +    rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
49837 +
49838 +    /* remove from active list */
49839 +    list_del (&rxd->Link);
49840 +
49841 +    UnbindRxdFromRail (rxd, rxdRail);
49842 +    FreeRxdRail (rcvrRail, rxdRail);
49843 +       
49844 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
49845 +
49846 +    rxd->Handler (rxd);
49847 +}
49848 +
49849 +/* 
49850 + * RxDoneRetry: arg == EP_RXD
49851 + *   Called on retry of "put" of RPC completion status block
49852 + */
49853 +static void
49854 +RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
49855 +{
49856 +    EP3_RXD_RAIL  *rxdRail   = (EP3_RXD_RAIL *) arg;
49857 +    EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail;
49858 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
49859 +
49860 +#if defined(DEBUG_ASSERT)
49861 +    RxDoneVerify (rail, arg, dma);
49862 +#endif
49863 +
49864 +    IncrStat (commsRail, RxDoneRetry);
49865 +
49866 +    EPRINTF4 (DBG_RCVR, "%s: RxDoneRetry: rcvr %p rxd %p [XID=%llx]\n", commsRail->Rail->Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique);
49867 +
49868 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DONE));
49869 +}
49870 +
49871 +static void
49872 +RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
49873 +{
49874 +#if defined(DEBUG_ASSERT)
49875 +    EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg;
49876 +    EP_RXD       *rxd     = rxdRail->Generic.Rxd;
49877 +
49878 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == ep_rxd_node(rxd));
49879 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DoneEvent  == EP3_EVENT_ACTIVE);
49880 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1));     /* PCI read */
49881 +#endif /* defined(DEBUG_ASSERT) */
49882 +}
49883 +
49884 +int
49885 +ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r)
49886 +{
49887 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r;
49888 +    EP3_RAIL      *rail     = RCVR_TO_RAIL(rcvrRail);
49889 +    ELAN3_DEV     *dev      = rail->Device;
49890 +    EP3_RXD_RAIL  *rxdRail;
49891 +
49892 +    ASSERT ( SPINLOCK_HELD(&rxd->Rcvr->Lock));
49893 +
49894 +    if ((rxdRail = GetRxdRail (rcvrRail)) == NULL)
49895 +       return 0;
49896 +
49897 +    /* Flush the Elan TLB if mappings have changed */
49898 +    ep_perrail_dvma_sync (&rail->Generic);
49899 +
49900 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr), rxd->Data.nmd_addr);              /* PCI write */
49901 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len),  rxd->Data.nmd_len);               /* PCI write */
49902 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_attr), rxd->Data.nmd_attr);              /* PCI write */
49903 +
49904 +    /* Bind the rxdRail and rxd together */
49905 +    BindRxdToRail (rxd, rxdRail);
49906 +    
49907 +    /* Mark as active */
49908 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);
49909 +
49910 +    rxdRail->RxdMain->DataEvent  = EP3_EVENT_ACTIVE;
49911 +    rxdRail->RxdMain->DoneEvent  = EP3_EVENT_PRIVATE;
49912 +
49913 +    /* Interlock with StallThreadForNoDescs */
49914 +    spin_lock (&dev->IntrLock);
49915 +
49916 +    EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p\n", rail->Generic.Name, rxd->Rcvr, rxd, rxdRail);
49917 +
49918 +    EP3_SPINENTER (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock);
49919 +
49920 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next), 0);                                        /* PCI write */
49921 +    if (rcvrRail->RcvrMain->PendingDescsTailp == 0)
49922 +       elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), rxdRail->RxdElanAddr);       /* PCI write */
49923 +    else
49924 +       elan3_sdram_writel (dev, rcvrRail->RcvrMain->PendingDescsTailp, rxdRail->RxdElanAddr);                          /* PCI write */
49925 +    rcvrRail->RcvrMain->PendingDescsTailp = rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next);
49926 +    
49927 +    EP3_SPINEXIT (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock);
49928 +
49929 +    /* If the thread has paused because it was woken up with no receive buffer */
49930 +    /* ready, then wake it up to process the one we've just added */
49931 +    if (rcvrRail->ThreadWaiting)
49932 +    {
49933 +       EPRINTF1 (DBG_RCVR, "%s: DoReceive: ThreadWaiting - restart thread\n", rail->Generic.Name);
49934 +
49935 +       IssueRunThread (rail, rcvrRail->ThreadWaiting);
49936 +
49937 +       rcvrRail->ThreadWaiting = (E3_Addr) 0;
49938 +    }
49939 +
49940 +    spin_unlock (&dev->IntrLock);
49941 +
49942 +    return 1;
49943 +}
49944 +
49945 +void
49946 +ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
49947 +{
49948 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
49949 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
49950 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
49951 +    ELAN3_DEV         *dev       = rail->Device;
49952 +
49953 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
49954 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
49955 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
49956 +    E3_DMA_BE         dmabe;
49957 +    int                       i, len;
49958 +
49959 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_PUT_ACTIVE);
49960 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
49961 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
49962 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
49963 +
49964 +    /* Flush the Elan TLB if mappings have changed */
49965 +    ep_perrail_dvma_sync (&rail->Generic);
49966 +    
49967 +    /* Generate the DMA chain to put the data in two loops to burst
49968 +     * the data across the PCI bus */
49969 +    for (len = 0, i = (nFrags-1), local += (nFrags-1), remote += (nFrags-1); i >= 0;   len += local->nmd_len, i--, local--, remote--)
49970 +    {
49971 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
49972 +       dmabe.s.dma_size            = local->nmd_len;
49973 +       dmabe.s.dma_source          = local->nmd_addr;
49974 +       dmabe.s.dma_dest            = remote->nmd_addr;
49975 +       dmabe.s.dma_destEvent       = (E3_Addr) 0;
49976 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId);
49977 +       if (i == (nFrags-1))
49978 +           dmabe.s.dma_srcEvent    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent);
49979 +       else
49980 +           dmabe.s.dma_srcEvent    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]);
49981 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
49982 +       
49983 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
49984 +                 (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, dmabe.s.dma_srcCookieVProc);
49985 +       
49986 +       if (i != 0)
49987 +           elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */
49988 +    }
49989 +    
49990 +    for (i = 0; i < nFrags; i++)
49991 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
49992 +    
49993 +    /* Initialise the data event */
49994 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);                   /* PCI write */
49995 +    rxdMain->DataEvent = EP3_EVENT_ACTIVE;
49996 +   
49997 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
49998 +
49999 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
50000 +    {
50001 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
50002 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_put: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
50003 +       
50004 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
50005 +    }
50006 +    
50007 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
50008 +}
50009 +
50010 +void
50011 +ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
50012 +{
50013 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
50014 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
50015 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
50016 +    ELAN3_DEV         *dev       = rail->Device;
50017 +
50018 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
50019 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
50020 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
50021 +    E3_DMA_BE         dmabe;
50022 +    int                       i, len;
50023 +
50024 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_GET_ACTIVE);
50025 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
50026 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
50027 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
50028 +       
50029 +    /* Flush the Elan TLB if mappings have changed */
50030 +    ep_perrail_dvma_sync (&rail->Generic);
50031 +    
50032 +    /* Generate the DMA chain to get the data in two loops to burst
50033 +     * the data across the PCI bus */
50034 +    for (len = 0, i = (nFrags-1), remote += (nFrags-1), local += (nFrags-1); i >= 0;   len += remote->nmd_len, i--, remote--, local--)
50035 +    {
50036 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
50037 +       dmabe.s.dma_size            = remote->nmd_len;
50038 +       dmabe.s.dma_source          = remote->nmd_addr;
50039 +       dmabe.s.dma_dest            = local->nmd_addr;
50040 +       if (i == (nFrags-1))
50041 +           dmabe.s.dma_destEvent   = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent);
50042 +       else
50043 +           dmabe.s.dma_destEvent   = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]);
50044 +       dmabe.s.dma_destCookieVProc = LocalCookie (rail, env->NodeId);
50045 +       dmabe.s.dma_srcEvent        = (E3_Addr) 0;
50046 +       dmabe.s.dma_srcCookieVProc  = RemoteCookie (rail, env->NodeId);
50047 +       
50048 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
50049 +                 (long long) env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len, dmabe.s.dma_destCookieVProc, 
50050 +                 dmabe.s.dma_srcCookieVProc);
50051 +       
50052 +       /* 
50053 +        * Always copy down the dma descriptor, since we issue it as a READ_REQUEUE
50054 +        * dma, and the elan will fetch the descriptor to send out of the link from
50055 +        * the rxdElan->Dmas[i] location,  before issueing the DMA chain we modify
50056 +        * the dma_source.
50057 +        */
50058 +       elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */
50059 +    }
50060 +    
50061 +    for (i = 0; i < nFrags; i++)
50062 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
50063 +    
50064 +    /* Initialise the data event */
50065 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);                   /* PCI write */
50066 +    rxdMain->DataEvent  = EP3_EVENT_ACTIVE;
50067 +    
50068 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
50069 +
50070 +    /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will
50071 +     * be read from the EP_RETRY_DMA rather than the orignal DMA - this can then get reused 
50072 +     * and an incorrect DMA descriptor sent */
50073 +    dmabe.s.dma_source    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]);
50074 +    dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
50075 +    
50076 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
50077 +    {
50078 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
50079 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_get: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
50080 +       
50081 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
50082 +    }
50083 +
50084 +    BucketStat (rxd->Rcvr->Subsys, RPCGet, len);
50085 +}
50086 +       
50087 +void
50088 +ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
50089 +{
50090 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
50091 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
50092 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
50093 +    ELAN3_DEV         *dev       = rail->Device;
50094 +
50095 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
50096 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
50097 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
50098 +    E3_DMA_BE         dmabe;
50099 +    int                       i, len;
50100 +    
50101 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE);
50102 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
50103 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
50104 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
50105 +
50106 +    /* Flush the Elan TLB if mappings have changed */
50107 +    ep_perrail_dvma_sync (&rail->Generic);
50108 +    
50109 +    /* Initialise the status block dma */
50110 +    dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
50111 +    dmabe.s.dma_size            = sizeof (EP_STATUSBLK);
50112 +    dmabe.s.dma_source          = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk);
50113 +    dmabe.s.dma_dest            = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk);
50114 +    dmabe.s.dma_destEvent       = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent);
50115 +    dmabe.s.dma_destCookieVProc = EP_VP_DATA(env->NodeId);
50116 +    dmabe.s.dma_srcEvent        = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent);
50117 +    dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
50118 +    
50119 +    EPRINTF8 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
50120 +             (long long) env->Xid.Unique, dmabe.s.dma_source, dmabe.s.dma_dest, dmabe.s.dma_size, dmabe.s.dma_destCookieVProc, 
50121 +             dmabe.s.dma_srcCookieVProc);
50122 +
50123 +    for (len = 0, i = EP_MAXFRAG, remote += (nFrags-1), local += (nFrags-1); i > EP_MAXFRAG-nFrags; len += local->nmd_len, i--, local--, remote--)
50124 +    {
50125 +       /* copy down previous dma */
50126 +       elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]),  sizeof (E3_DMA));    /* PCI write block */
50127 +       
50128 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
50129 +       dmabe.s.dma_size            = local->nmd_len;
50130 +       dmabe.s.dma_source          = local->nmd_addr;
50131 +       dmabe.s.dma_dest            = remote->nmd_addr;
50132 +       dmabe.s.dma_destEvent       = (E3_Addr) 0;
50133 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId);
50134 +       dmabe.s.dma_srcEvent        = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i-1]);
50135 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
50136 +       
50137 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
50138 +                 (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, 
50139 +                 dmabe.s.dma_srcCookieVProc);
50140 +    }
50141 +    
50142 +    for (i = EP_MAXFRAG-nFrags; i < EP_MAXFRAG; i++)
50143 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
50144 +    
50145 +    /* Initialise the done event */
50146 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 1);                                   /* PCI write */
50147 +    rxdMain->DoneEvent  = EP3_EVENT_ACTIVE;
50148 +
50149 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
50150 +
50151 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
50152 +    {
50153 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
50154 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
50155 +       
50156 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
50157 +    }
50158 +
50159 +    BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len);
50160 +}
50161 +       
50162 +void
50163 +ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
50164 +{
50165 +    EP3_RAIL          *rail   = (EP3_RAIL *) commsRail->Rail;
50166 +    sdramaddr_t        qdescs = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs;
50167 +    EP3_RCVR_RAIL     *rcvrRail;
50168 +    EP3_InputQueue     qdesc;
50169 +    sdramaddr_t        stack;
50170 +    unsigned long      flags;
50171 +
50172 +    KMEM_ZALLOC (rcvrRail, EP3_RCVR_RAIL *, sizeof (EP3_RCVR_RAIL), TRUE);
50173 +
50174 +    kcondvar_init (&rcvrRail->CleanupSleep);
50175 +    spin_lock_init (&rcvrRail->FreeDescLock);
50176 +    INIT_LIST_HEAD (&rcvrRail->FreeDescList);
50177 +    INIT_LIST_HEAD (&rcvrRail->DescBlockList);
50178 +
50179 +    rcvrRail->Generic.CommsRail = commsRail;
50180 +    rcvrRail->Generic.Rcvr      = rcvr;
50181 +
50182 +    rcvrRail->RcvrMain       = ep_alloc_main (&rail->Generic, sizeof (EP3_RCVR_RAIL_MAIN), 0, &rcvrRail->RcvrMainAddr);
50183 +    rcvrRail->RcvrElan       = ep_alloc_elan (&rail->Generic, sizeof (EP3_RCVR_RAIL_ELAN), 0, &rcvrRail->RcvrElanAddr);
50184 +    rcvrRail->InputQueueBase = ep_alloc_elan (&rail->Generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->InputQueueAddr);
50185 +    stack                    = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rcvrRail->ThreadStack);
50186 +
50187 +    rcvrRail->TotalDescCount = 0;
50188 +    rcvrRail->FreeDescCount  = 0;
50189 +
50190 +    /* Initialise the main/elan spin lock */
50191 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_lock), 0);
50192 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_seq),  0);
50193 +
50194 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_lock), 0);
50195 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_seq), 0);
50196 +    
50197 +    /* Initialise the receive lists */
50198 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), 0);
50199 +    
50200 +    /* Initialise the ThreadShould Halt */
50201 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), 0);
50202 +
50203 +    /* Initialise pointer to the ep_rcvr_rail */
50204 +    elan3_sdram_writeq (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr), (unsigned long) rcvrRail);
50205 +
50206 +    /* Initialise elan visible main memory */
50207 +    rcvrRail->RcvrMain->ThreadLock.sl_seq  = 0;
50208 +    rcvrRail->RcvrMain->PendingLock.sl_seq = 0;
50209 +    rcvrRail->RcvrMain->PendingDescsTailp  = 0;
50210 +
50211 +    /* initialise and copy down the input queue descriptor */
50212 +    qdesc.q_state          = E3_QUEUE_FULL;
50213 +    qdesc.q_base           = rcvrRail->InputQueueAddr;
50214 +    qdesc.q_top            = rcvrRail->InputQueueAddr + (rcvr->InputQueueEntries-1) * EP_INPUTQ_SIZE;
50215 +    qdesc.q_fptr           = rcvrRail->InputQueueAddr;
50216 +    qdesc.q_bptr           = rcvrRail->InputQueueAddr + EP_INPUTQ_SIZE;
50217 +    qdesc.q_size           = EP_INPUTQ_SIZE;
50218 +    qdesc.q_event.ev_Count = 0;
50219 +    qdesc.q_event.ev_Type  = 0;
50220 +
50221 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, qdescs + rcvr->Service * sizeof (EP3_InputQueue), sizeof (EP3_InputQueue));
50222 +
50223 +    spin_lock_irqsave (&rcvr->Lock, flags);
50224 +    rcvr->Rails[rail->Generic.Number] = &rcvrRail->Generic;
50225 +    rcvr->RailMask |= EP_RAIL2RAILMASK (rail->Generic.Number);
50226 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50227 +
50228 +    /* initialise and run the Elan thread to process the queue */
50229 +    IssueRunThread (rail, ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "ep3comms_rcvr"),
50230 +                                          rcvrRail->ThreadStack, stack, EP3_STACK_SIZE, 5,
50231 +                                          rail->RailElanAddr, rcvrRail->RcvrElanAddr, rcvrRail->RcvrMainAddr,
50232 +                                          EP_MSGQ_ADDR(rcvr->Service),
50233 +                                          rail->ElanCookies));
50234 +}
50235 +
50236 +void
50237 +ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
50238 +{
50239 +    EP3_RAIL         *rail     = (EP3_RAIL *) commsRail->Rail;
50240 +    EP3_RCVR_RAIL    *rcvrRail = (EP3_RCVR_RAIL *) rcvr->Rails[rail->Generic.Number];  
50241 +    unsigned long     flags;
50242 +    struct list_head *el, *nel;
50243 +
50244 +    EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: removing rail\n", rail->Generic.Name);
50245 +
50246 +    /* flag the rail as no longer available */
50247 +    spin_lock_irqsave (&rcvr->Lock, flags);
50248 +    rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number);
50249 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50250 +    
50251 +    /* mark the input queue descriptor as full */
50252 +    SetQueueLocked(rail, ((EP3_COMMS_RAIL *)commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue));
50253 +
50254 +    /* need to halt the thread first         */
50255 +    /*   set ThreadShouldHalt in elan memory */
50256 +    /*   then trigger the event              */
50257 +    /*   and wait on haltWait                */
50258 +    elan3_sdram_writel  (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), TRUE);
50259 +
50260 +    IssueSetevent (rail,  EP_MSGQ_ADDR(rcvr->Service) + offsetof(EP3_InputQueue, q_event));
50261 +
50262 +    spin_lock_irqsave (&rcvr->Lock, flags);
50263 +
50264 +    while (rcvrRail->ThreadHalted == 0)
50265 +    {
50266 +       rcvrRail->CleanupWaiting++;
50267 +       kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags);
50268 +    }
50269 +
50270 +    /* at this point the thread is halted and it has no envelopes */
50271
50272 +    /* we need to wait until all the rxd's in the list that are 
50273 +     * bound to the rail we are removing are not pending 
50274 +     */
50275 +    for (;;)
50276 +    {
50277 +       int mustWait = 0;
50278 +       
50279 +       list_for_each (el, &rcvr->ActiveDescList) {
50280 +           EP_RXD       *rxd     = list_entry (el,EP_RXD, Link);
50281 +           EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail;
50282 +
50283 +           if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING)
50284 +           {
50285 +               mustWait++;
50286 +               break;
50287 +           }
50288 +       }
50289 +       
50290 +       if (! mustWait)
50291 +           break;
50292 +
50293 +       EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: waiting for active rxd's to be returned\n", rail->Generic.Name);
50294 +
50295 +       rcvrRail->CleanupWaiting++;
50296 +       kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags);
50297 +    }
50298 +
50299 +    /* at this point all rxd's in the list that are bound to the deleting rail are not pending */
50300 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50301 +       EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
50302 +       EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail;
50303 +       
50304 +       if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail))
50305 +       {
50306 +           /* here we need to unbind the remaining rxd's */
50307 +           rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
50308 +           rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
50309
50310 +           elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);  /* PCI write */
50311 +           elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);  /* PCI write */
50312 +
50313 +           UnbindRxdFromRail (rxd, rxdRail);
50314 +           FreeRxdRail(rcvrRail,  rxdRail );
50315 +       }
50316 +    }
50317 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50318 +    
50319 +    /* wait for all rxd's for this rail to become free */
50320 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
50321 +    while (rcvrRail->FreeDescCount != rcvrRail->TotalDescCount)
50322 +    {
50323 +       rcvrRail->FreeDescWaiting++;
50324 +       kcondvar_wait (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock, &flags);
50325 +    }
50326 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
50327 +
50328 +    /* can now remove the rail as it can no longer be used */
50329 +    spin_lock_irqsave (&rcvr->Lock, flags);
50330 +    rcvr->Rails[rail->Generic.Number] = NULL;
50331 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50332 +
50333 +    /* all the rxd's accociated with DescBlocks must be in the FreeDescList */
50334 +    ASSERT (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount);
50335 +
50336 +    /* run through the DescBlockList deleting them */
50337 +    while (!list_empty (&rcvrRail->DescBlockList))
50338 +       FreeRxdRailBlock (rcvrRail, list_entry(rcvrRail->DescBlockList.next, EP3_RXD_RAIL_BLOCK , Link));
50339 +
50340 +    /* it had better be empty after that */
50341 +    ASSERT ((rcvrRail->TotalDescCount == 0) && (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount));
50342 +    
50343 +    ep_free_elan (&rail->Generic, rcvrRail->ThreadStack, EP3_STACK_SIZE);
50344 +    ep_free_elan (&rail->Generic, rcvrRail->InputQueueAddr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries);
50345 +    ep_free_elan (&rail->Generic, rcvrRail->RcvrElanAddr, sizeof (EP3_RCVR_RAIL_ELAN));
50346 +    ep_free_main (&rail->Generic, rcvrRail->RcvrMainAddr, sizeof (EP3_RCVR_RAIL_MAIN));
50347 +
50348 +    KMEM_FREE (rcvrRail, sizeof (EP3_RCVR_RAIL));
50349 +}
50350 +
50351 +EP_RXD *
50352 +ep3rcvr_steal_rxd (EP_RCVR_RAIL *r)
50353 +{
50354 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r;
50355 +    EP3_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
50356 +    EP_RCVR       *rcvr     = rcvrRail->Generic.Rcvr;
50357 +    E3_Addr        rxdElanAddr;
50358 +    unsigned long flags;
50359 +
50360 +    spin_lock_irqsave (&rcvr->Lock, flags);
50361 +
50362 +    LockRcvrThread (rcvrRail);
50363 +    if ((rxdElanAddr = elan3_sdram_readl (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs))) != 0)
50364 +    {
50365 +       sdramaddr_t  rxdElan  = ep_elan2sdram (&rail->Generic, rxdElanAddr);
50366 +       EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr));
50367 +       EP_RXD      *rxd      = rxdRail->Generic.Rxd;
50368 +       sdramaddr_t  next;
50369 +       
50370 +       EPRINTF2 (DBG_RCVR, "%s: StealRxdFromOtherRail stealing rxd %p\n", rail->Generic.Name, rail);
50371 +       
50372 +       /* Remove the RXD from the pending desc list */
50373 +       if ((next = elan3_sdram_readl (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0)
50374 +           rcvrRail->RcvrMain->PendingDescsTailp = 0;
50375 +       elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next);
50376 +       UnlockRcvrThread (rcvrRail);
50377 +       
50378 +       UnbindRxdFromRail (rxd, rxdRail);
50379 +       
50380 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50381 +       
50382 +       /* Mark rxdRail as no longer active */
50383 +       rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
50384 +       rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
50385 +       elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);
50386 +       elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);
50387 +       
50388 +       FreeRxdRail (rcvrRail, rxdRail);
50389 +
50390 +       return rxd;
50391 +    }
50392 +
50393 +    UnlockRcvrThread (rcvrRail);
50394 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50395 +
50396 +    return NULL;
50397 +}
50398 +
50399 +long
50400 +ep3rcvr_check (EP_RCVR_RAIL *r, long nextRunTime)
50401 +{
50402 +    EP3_RCVR_RAIL    *rcvrRail = (EP3_RCVR_RAIL *) r;
50403 +    EP3_RAIL         *rail     = RCVR_TO_RAIL (rcvrRail);
50404 +    EP_RCVR          *rcvr     = rcvrRail->Generic.Rcvr;
50405 +    EP_COMMS_SUBSYS *subsys    = rcvr->Subsys;
50406 +    EP_SYS           *sys       = subsys->Subsys.Sys;
50407 +    EP_RXD           *rxd;
50408 +    unsigned long     flags;
50409 +
50410 +    if (rcvrRail->FreeDescCount < ep_rxd_lowat && !AllocateRxdRailBlock (rcvrRail))
50411 +    {
50412 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->Generic.Name);
50413 +               
50414 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
50415 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
50416 +    }
50417 +    
50418 +    if (rcvrRail->ThreadWaiting && (rxd = StealRxdFromOtherRail (rcvr)) != NULL)
50419 +    {
50420 +       /* Map the receive buffer into this rail as well */
50421 +       EPRINTF4 (DBG_RCVR, "%s: mapping rxd->Data (%08x.%08x.%08x) into this rails\n",
50422 +                 rail->Generic.Name, rxd->Data.nmd_addr,rxd->Data.nmd_len, rxd->Data.nmd_attr);
50423 +
50424 +       spin_lock_irqsave (&rcvr->Lock, flags);
50425 +       if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rail->Generic.Number)) &&                /* not already mapped and */
50426 +            ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rail->Generic.Number)) == 0) ||        /* failed to map it */
50427 +           ep3rcvr_queue_rxd (rxd, &rcvrRail->Generic))                                                /* or failed to queue it */
50428 +       {
50429 +           EPRINTF5 (DBG_RCVR,"%s: stolen rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", 
50430 +                     rail->Generic.Name, rcvr, rxd, rail->Generic.Number, rcvrRail);
50431 +               
50432 +           if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
50433 +               nextRunTime = lbolt + RESOURCE_RETRY_TIME;
50434 +       }
50435 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50436 +    }
50437 +    
50438 +    return nextRunTime;
50439 +}
50440 +
50441 +static void
50442 +ep3rcvr_flush_filtering (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
50443 +{
50444 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail;
50445 +    EP3_RAIL       *rail      = (EP3_RAIL *) commsRail->Generic.Rail;
50446 +    ELAN3_DEV      *dev       = rail->Device;
50447 +    sdramaddr_t    qdesc      = commsRail->QueueDescs + rcvr->Service*sizeof (EP3_InputQueue);
50448 +    E3_Addr        qTop       = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_top));
50449 +    E3_Addr        qBase      = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_base));
50450 +    E3_Addr        qSize      = elan3_sdram_readl (dev,qdesc + offsetof (EP3_InputQueue, q_size));
50451 +    E3_uint32      nfptr, qbptr;
50452 +    unsigned long  flags;
50453 +    
50454 +    spin_lock_irqsave (&rcvr->Lock, flags);
50455 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
50456 +
50457 +    nfptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_fptr));
50458 +    qbptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_bptr));
50459 +    
50460 +    if (nfptr == qTop)
50461 +       nfptr = qBase;
50462 +    else
50463 +       nfptr += qSize;
50464 +    
50465 +    while (nfptr != qbptr)
50466 +    {
50467 +       unsigned nodeId = elan3_sdram_readl (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + 
50468 +                                      offsetof (EP_ENVELOPE, NodeId));
50469 +       
50470 +       EPRINTF3 (DBG_DISCON, "%s: ep3rcvr_flush_filtering: nodeId=%d State=%d\n", rail->Generic.Name, nodeId, rail->Generic.Nodes[nodeId].State);
50471 +       
50472 +       if (rail->Generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE)
50473 +           elan3_sdram_writel (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + 
50474 +                         offsetof (EP_ENVELOPE, Version), 0);
50475 +       
50476 +       if (nfptr == qTop)
50477 +           nfptr = qBase;
50478 +       else
50479 +           nfptr += qSize;
50480 +    }
50481 +    
50482 +    UnlockRcvrThread (rcvrRail);                                                                               /* PCI unlock */
50483 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50484 +}
50485 +
50486 +static void
50487 +ep3rcvr_flush_flushing (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
50488 +{
50489 +    EP3_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
50490 +    struct list_head *el, *nel;
50491 +    unsigned long     flags;
50492 +
50493 +    spin_lock_irqsave (&rcvr->Lock, flags);
50494 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
50495 +    
50496 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50497 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
50498 +       EP3_RXD_RAIL *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
50499 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
50500 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId];
50501 +
50502 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
50503 +           continue;
50504 +       
50505 +       EPRINTF6 (DBG_DISCON, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p state %x.%x elan node %d\n", rail->Generic.Name,
50506 +                 rcvr, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, env->NodeId);
50507 +       
50508 +       switch (rxd->State)
50509 +       {
50510 +       case EP_RXD_FREE:
50511 +           printk ("ep3rcvr_flush_flushing: rxd state is free but bound to a fail\n");
50512 +           break;
50513 +
50514 +       case EP_RXD_RECEIVE_ACTIVE:
50515 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                /* incomplete message receive */
50516 +           {
50517 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", 
50518 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
50519 +               
50520 +               nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
50521 +               continue;
50522 +           }
50523 +           break;
50524 +           
50525 +       default:
50526 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
50527 +
50528 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))    /* incomplete RPC */
50529 +           {
50530 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", 
50531 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
50532 +               
50533 +               EP_INVALIDATE_XID (rxd->MsgXid);                        /* Ignore any previous NMD map responses */
50534 +               
50535 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
50536 +               continue;
50537 +           }
50538 +           break;
50539 +
50540 +       case EP_RXD_BEEN_ABORTED:
50541 +           printk ("ep3rcvr_flush_flushing: rxd state is aborted but bound to a fail\n");
50542 +           break;
50543 +       }
50544 +
50545 +       EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", 
50546 +                 rail->Generic.Name, rcvr, rxd, env->NodeId);
50547 +    }    
50548 +
50549 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
50550 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50551 +}
50552 +
50553 +void
50554 +ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
50555 +{
50556 +    EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail);
50557 +
50558 +    switch (rail->Generic.CallbackStep)
50559 +    {
50560 +    case EP_CB_FLUSH_FILTERING:
50561 +       ep3rcvr_flush_filtering (rcvr, rcvrRail);
50562 +       break;
50563 +
50564 +    case EP_CB_FLUSH_FLUSHING:
50565 +       ep3rcvr_flush_flushing (rcvr, rcvrRail);
50566 +       break;
50567 +    }
50568 +}
50569 +
50570 +void
50571 +ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
50572 +{
50573 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
50574 +    EP3_RAIL         *rail   = RCVR_TO_RAIL (rcvrRail);
50575 +    ELAN3_DEV        *dev    = rail->Device;
50576 +    struct list_head *el, *nel;
50577 +    unsigned long     flags;
50578 +#ifdef SUPPORT_RAIL_FAILOVER
50579 +    EP_SYS           *sys    = subsys->Subsys.Sys;
50580 +#endif
50581 +   
50582 +    spin_lock_irqsave (&rcvr->Lock, flags);
50583 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
50584 +    
50585 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50586 +       EP_RXD             *rxd      = list_entry (el, EP_RXD, Link);
50587 +       EP3_RXD_RAIL       *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
50588 +       EP_ENVELOPE        *env      = &rxd->RxdMain->Envelope;
50589 +       EP_NODE_RAIL       *nodeRail = &rail->Generic.Nodes[env->NodeId];
50590 +#ifdef SUPPORT_RAIL_FAILOVER
50591 +       EP_MANAGER_MSG_BODY msgBody;
50592 +       EP_NODE            *node     = &sys->Nodes[env->NodeId];
50593 +#endif
50594 +       
50595 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED)
50596 +           continue;
50597 +
50598 +       EPRINTF6 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p elan node %d state %x.%x\n", rail->Generic.Name, rcvr, rxd, env->NodeId,
50599 +                 rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent);
50600 +
50601 +       switch (rxd->State)
50602 +       {
50603 +       case EP_RXD_FREE:
50604 +           printk ("ep4rcvr_failover_callback: rxd state is free but bound to a fail\n");
50605 +           break;
50606 +
50607 +       case EP_RXD_RECEIVE_ACTIVE:
50608 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                /* incomplete message receive */
50609 +           {
50610 +               EPRINTF4 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
50611 +               
50612 +               UnbindRxdFromRail (rxd, rxdRail);
50613 +               
50614 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
50615 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
50616 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
50617 +               
50618 +               /* clear the data event - the done event should already be zero */
50619 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
50620 +               
50621 +               FreeRxdRail (rcvrRail, rxdRail);
50622 +               
50623 +               /* epcomms thread will requeue on different rail */
50624 +               ep_kthread_schedule (&subsys->Thread, lbolt);
50625 +               continue;
50626 +           }
50627 +           break;
50628 +
50629 +       default:
50630 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
50631 +
50632 +#ifdef SUPPORT_RAIL_FAILOVER
50633 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent) && !(EP_IS_NO_FAILOVER(env->Attr)))         /* incomplete RPC, which can be failed over  */
50634 +           {
50635 +               EPRINTF7 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p State %x.%x Xid %llxx MsgXid %llxx nodeId %d - failover\n", 
50636 +                         rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, 
50637 +                         (long long) env->Xid.Unique, (long long) rxd->MsgXid.Unique, env->NodeId);
50638 +               
50639 +               if (EP_XID_INVALID(rxd->MsgXid))
50640 +                   rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
50641 +               
50642 +               /* XXXX maybe only send the message if the node failover retry is now ? */
50643 +               msgBody.Failover.Xid      = env->Xid;
50644 +               msgBody.Failover.Railmask = node->ConnectedRails;
50645 +               
50646 +               ep_send_message (&rail->Generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody);
50647 +               
50648 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
50649 +               continue;
50650 +           }
50651 +#endif
50652 +           break;
50653 +
50654 +       case EP_RXD_BEEN_ABORTED:
50655 +           printk ("ep3rcvr_failover_callback: rxd state is aborted but bound to a rail\n");
50656 +           break;
50657 +       }
50658 +
50659 +       EPRINTF3 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->Generic.Name, rxd, env->NodeId);
50660 +    }
50661 +    
50662 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
50663 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50664 +}
50665 +
50666 +void
50667 +ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
50668 +{
50669 +    EP3_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
50670 +    ELAN3_DEV        *dev = rail->Device;
50671 +    struct list_head *el, *nel;
50672 +    struct list_head  rxdList;
50673 +    unsigned long     flags;
50674 +
50675 +    INIT_LIST_HEAD (&rxdList);
50676 +    
50677 +    spin_lock_irqsave (&rcvr->Lock, flags);
50678 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
50679 +    
50680 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50681 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
50682 +       EP3_RXD_RAIL *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
50683 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
50684 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId];
50685 +       
50686 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
50687 +           continue;
50688 +
50689 +       EPRINTF4 (DBG_DISCON, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p elan node %d\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
50690 +
50691 +       switch (rxd->State)
50692 +       {
50693 +       case EP_RXD_FREE:
50694 +           printk ("ep3rcvr_disconnect_callback: rxd state is free but bound to a fail\n");
50695 +           break;
50696 +
50697 +       case EP_RXD_RECEIVE_ACTIVE:
50698 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                        /* incomplete message receive */
50699 +           {
50700 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
50701 +               
50702 +               UnbindRxdFromRail (rxd, rxdRail);
50703 +               
50704 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
50705 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
50706 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
50707 +               
50708 +               /* clear the data event - the done event should already be zero */
50709 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
50710 +               
50711 +               FreeRxdRail (rcvrRail, rxdRail);
50712 +
50713 +               /* remark it as pending if it was partially received */
50714 +               rxd->RxdMain->Len = EP_RXD_PENDING;
50715 +               
50716 +               /* epcomms thread will requeue on different rail */
50717 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50718 +               continue;
50719 +           }
50720 +           break;
50721 +
50722 +       default:
50723 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
50724 +
50725 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))    /* incomplete RPC */
50726 +           {
50727 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - not able to failover\n",
50728 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
50729 +           
50730 +               /* Mark as no longer active */
50731 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
50732 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
50733 +               
50734 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
50735 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);       /* PCI write */
50736 +               
50737 +               UnbindRxdFromRail (rxd, rxdRail);
50738 +               FreeRxdRail (rcvrRail, rxdRail);
50739 +
50740 +               /* Ignore any previous NMD/failover responses */
50741 +               EP_INVALIDATE_XID (rxd->MsgXid);
50742 +               
50743 +               /* Remove from active list */
50744 +               list_del (&rxd->Link);
50745 +               
50746 +               if (rxd->State == EP_RXD_RPC_IN_PROGRESS)                               /* ownder by user .... */
50747 +                   rxd->State = EP_RXD_BEEN_ABORTED;
50748 +               else                                                                    /* queue for completion */
50749 +               {
50750 +                   rxd->RxdMain->Len = EP_CONN_RESET;                                  /* ensure ep_rxd_status() fails */
50751 +                   list_add_tail (&rxd->Link, &rxdList);
50752 +               }
50753 +               continue;
50754 +           }
50755 +           break;
50756 +
50757 +       case EP_RXD_BEEN_ABORTED:
50758 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
50759 +           break;
50760 +       }
50761 +           
50762 +       EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
50763 +                 rail->Generic.Name, rcvr, rxd, env->NodeId);
50764 +    }
50765 +    
50766 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
50767 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50768 +
50769 +    while (! list_empty (&rxdList)) 
50770 +    {
50771 +       EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link);
50772 +
50773 +       list_del (&rxd->Link);
50774 +
50775 +       rxd->Handler (rxd);
50776 +    }
50777 +}
50778 +
50779 +void
50780 +ep3rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r)
50781 +{
50782 +    EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) r;
50783 +    sdramaddr_t   rxdElan = rxdRail->RxdElan;
50784 +    EP3_RAIL     *rail    = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail);
50785 +    ELAN3_DEV    *dev     = rail->Device;
50786 +
50787 +    (di->func)(di->arg, "      ChainEvent=%x.%x %x.%x\n",
50788 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Count)),
50789 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Type)),
50790 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Count)),
50791 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Type)));
50792 +    (di->func)(di->arg, "      ChainEvent=%x.%x %x.%x\n",
50793 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Count)),
50794 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Type)),
50795 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Count)),
50796 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Type)));
50797 +    (di->func)(di->arg, "      DataEvent=%x.%x DoneEvent=%x.%x\n",
50798 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)),
50799 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)),
50800 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)),
50801 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type)));
50802 +    (di->func)(di->arg, "      Data=%x Len=%x\n",
50803 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr)),
50804 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len)));
50805 +}
50806 +
50807 +void
50808 +ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r)
50809 +{
50810 +    EP3_RCVR_RAIL  *rcvrRail  = (EP3_RCVR_RAIL *) r;
50811 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail;
50812 +    EP3_RAIL       *rail      = RCVR_TO_RAIL (rcvrRail);
50813 +    ELAN3_DEV      *dev       = rail->Device;
50814 +    sdramaddr_t     queue     = commsRail->QueueDescs + rcvrRail->Generic.Rcvr->Service * sizeof (EP3_InputQueue);
50815 +    E3_Addr         qbase      = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base));
50816 +    E3_Addr         qtop       = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top));
50817 +    E3_uint32       qsize      = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size));
50818 +    int             freeCount  = 0;
50819 +    int             blockCount = 0;
50820 +    unsigned long   flags;
50821 +    struct list_head *el;
50822 +
50823 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
50824 +    list_for_each (el, &rcvrRail->FreeDescList)
50825 +       freeCount++;
50826 +    list_for_each (el, &rcvrRail->DescBlockList)
50827 +       blockCount++;
50828 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
50829 +
50830 +    (di->func)(di->arg, "                 Rail %d FreeDesc %d (%d) Total %d Blocks %d %s\n",
50831 +              rail->Generic.Number, rcvrRail->FreeDescCount, freeCount, rcvrRail->TotalDescCount, blockCount, 
50832 +              rcvrRail->ThreadWaiting ? "ThreadWaiting" : "");
50833 +    
50834 +    (di->func)(di->arg, "                 InputQueue state=%x bptr=%x size=%x top=%x base=%x fptr=%x\n",
50835 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_state)),
50836 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_bptr)),
50837 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size)),
50838 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)),
50839 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base)),
50840 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr)));
50841 +    (di->func)(di->arg, "                            event=%x.%x [%x.%x] wevent=%x.%x\n",
50842 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Type)),
50843 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Count)),
50844 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Source)),
50845 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Dest)),
50846 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wevent)),
50847 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wcount)));
50848 +    
50849 +    LockRcvrThread (rcvrRail);
50850 +    {
50851 +       E3_Addr     nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr));
50852 +       EP_ENVELOPE env;
50853 +       
50854 +       if (nfptr == qtop)
50855 +           nfptr = qbase;
50856 +       else
50857 +           nfptr += qsize;
50858 +
50859 +       while (nfptr != elan3_sdram_readl (dev, queue + offsetof (E3_Queue, q_bptr)))
50860 +       {
50861 +           elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr),
50862 +                                         &env, sizeof (EP_ENVELOPE));
50863 +           
50864 +           (di->func)(di->arg, "                 ENVELOPE Version=%x Attr=%x Xid=%08x.%08x.%016llx\n",
50865 +                      env.Version, env.Attr, env.Xid.Generation, env.Xid.Handle, (long long) env.Xid.Unique);
50866 +           (di->func)(di->arg, "                          NodeId=%x Range=%x TxdRail=%x TxdMain=%x.%x.%x\n",
50867 +                      env.NodeId, env.Range, env.TxdRail, env.TxdMain.nmd_addr,
50868 +                      env.TxdMain.nmd_len, env.TxdMain.nmd_attr);
50869 +           
50870 +           
50871 +           if (nfptr == qtop)
50872 +               nfptr = qbase;
50873 +           else
50874 +               nfptr += qsize;
50875 +       }
50876 +    }
50877 +    UnlockRcvrThread (rcvrRail);
50878 +}
50879 +
50880 +void
50881 +ep3rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) {
50882 +    /* no stats here yet */
50883 +    /* EP3_RCVR_RAIL * ep4rcvr_rail = (EP3_RCVR_RAIL *) rcvr_rail; */
50884 +}
50885 +
50886 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsRx_elan4.c
50887 ===================================================================
50888 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsRx_elan4.c       2004-02-23 16:02:56.000000000 -0500
50889 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsRx_elan4.c    2005-07-28 14:52:52.874674784 -0400
50890 @@ -0,0 +1,1758 @@
50891 +/*
50892 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
50893 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
50894 + *
50895 + *    For licensing information please see the supplied COPYING file
50896 + *
50897 + */
50898 +
50899 +#ident "@(#)$Id: epcommsRx_elan4.c,v 1.30.2.3 2005/03/10 15:24:09 mike Exp $"
50900 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx_elan4.c,v $ */
50901 +
50902 +#include <qsnet/kernel.h>
50903 +
50904 +#include <elan/kcomm.h>
50905 +#include <elan/epsvc.h>
50906 +#include <elan/epcomms.h>
50907 +
50908 +#include "debug.h"
50909 +#include "kcomm_vp.h"
50910 +#include "kcomm_elan4.h"
50911 +#include "epcomms_elan4.h"
50912 +
50913 +#include <elan4/trtype.h>
50914 +
50915 +#define RCVR_TO_COMMS(rcvrRail)                ((EP4_COMMS_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail)
50916 +#define RCVR_TO_RAIL(rcvrRail)         ((EP4_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail)
50917 +#define RCVR_TO_DEV(rcvrRail)          (RCVR_TO_RAIL(rcvrRail)->r_ctxt.ctxt_dev)
50918 +#define RCVR_TO_SUBSYS(rcvrRail)       (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys)
50919 +
50920 +#define RXD_TO_RCVR(txdRail)           ((EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail)
50921 +#define RXD_TO_RAIL(txdRail)           RCVR_TO_RAIL(RXD_TO_RCVR(rxdRail))
50922 +
50923 +static void rxd_interrupt (EP4_RAIL *rail, void *arg);
50924 +
50925 +static __inline__ void 
50926 +__ep4_rxd_assert_free (EP4_RXD_RAIL *rxdRail, const char *file, const int line)
50927 +{
50928 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail);
50929 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
50930 +    register int i, failed = 0;
50931 +    
50932 +    for (i = 0; i <= EP_MAXFRAG; i++)
50933 +       if (((rxdRail)->rxd_main->rxd_sent[i] != EP4_STATE_FREE)) 
50934 +           failed |= (1 << i);
50935 +    
50936 +    if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_FREE))
50937 +       failed |= (1 << 5);
50938 +    if (((rxdRail)->rxd_main->rxd_done   != EP4_STATE_FREE)) 
50939 +       failed |= (1 << 6);
50940 +    
50941 +    if (sdram_assert)
50942 +    {
50943 +       if (((elan4_sdram_readq (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)) >> 32) != 0)) 
50944 +           failed |= (1 << 7);
50945 +       for (i = 0; i < EP_MAXFRAG; i++)
50946 +           if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)) >> 32) != 0)) 
50947 +               failed |= (1 << (8 + i));
50948 +       if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0)) 
50949 +           failed |= (1 << 12);
50950 +       if (((int)(elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) 
50951 +           failed |= (1 << 13);
50952 +    }
50953 +
50954 +    if (failed)
50955 +    {
50956 +       printk ("__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
50957 +
50958 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
50959 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
50960 +
50961 +       for (i = 0; i <= EP_MAXFRAG; i++)
50962 +           (rxdRail)->rxd_main->rxd_sent[i] = EP4_STATE_FREE;
50963 +
50964 +       (rxdRail)->rxd_main->rxd_failed = EP4_STATE_FREE;
50965 +       (rxdRail)->rxd_main->rxd_done   = EP4_STATE_FREE;
50966 +
50967 +       if (sdram_assert)
50968 +       {
50969 +           elan4_sdram_writew (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev,
50970 +                               (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType) + 4, 0);
50971 +
50972 +           for (i = 0; i < EP_MAXFRAG; i++)
50973 +               elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType) + 4, 0);
50974 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0);
50975 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32);
50976 +       }
50977 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_free");
50978 +    }
50979 +}
50980 +
50981 +static __inline__ void
50982 +__ep4_rxd_assert_pending(EP4_RXD_RAIL *rxdRail, const char *file, const int line)
50983 +{ 
50984 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rcvrRail);
50985 +    register int failed = 0;
50986 +
50987 +    failed |= ((rxdRail)->rxd_main->rxd_done != EP4_STATE_ACTIVE);
50988 +
50989 +    if (failed)
50990 +    {
50991 +       printk ("__ep4_rxd_assert_pending: %s - %d\n", file, line);
50992 +
50993 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_pending: %s - %d\n", file, line);
50994 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
50995 +
50996 +       (rxdRail)->rxd_main->rxd_done = EP4_STATE_ACTIVE;
50997 +
50998 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_pending");
50999 +    }
51000 +}
51001 +
51002 +static __inline__ void
51003 +__ep4_rxd_assert_private(EP4_RXD_RAIL *rxdRail, const char *file, const int line)
51004 +{
51005 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail);
51006 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
51007 +    register int failed = 0;
51008 +
51009 +    if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_ACTIVE)) failed |= (1 << 0);
51010 +    if (((rxdRail)->rxd_main->rxd_done != EP4_STATE_PRIVATE))  failed |= (1 << 1);
51011 +    
51012 +    if (sdram_assert)
51013 +    {
51014 +       if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0))           failed |= (1 << 2);
51015 +       if (((int) (elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) failed |= (1 << 3);
51016 +    }
51017 +
51018 +    if (failed)
51019 +    {
51020 +       printk ("__ep4_rxd_assert_private: %s - %d\n", file, line);
51021 +
51022 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_private: %s - %d\n", file, line);
51023 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
51024 +
51025 +       (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51026 +       (rxdRail)->rxd_main->rxd_done   = EP4_STATE_PRIVATE;
51027 +
51028 +       if (sdram_assert)
51029 +       {
51030 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0);
51031 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32);
51032 +       }
51033 +
51034 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_private");
51035 +    }
51036 +}
51037 +
51038 +static __inline__ void
51039 +__ep4_rxd_private_to_free (EP4_RXD_RAIL *rxdRail)
51040 +{
51041 +    register int i;
51042 +
51043 +    for (i = 0; i <= EP_MAXFRAG; i++)
51044 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_FREE;
51045
51046 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_FREE;
51047 +    rxdRail->rxd_main->rxd_done   = EP4_STATE_FREE;
51048 +}
51049 +
51050 +static __inline__ void
51051 +__ep4_rxd_force_private (EP4_RXD_RAIL *rxdRail)
51052 +{
51053 +    EP4_RAIL  *rail = RXD_TO_RAIL(rxdRail);
51054 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
51055 +
51056 +    (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
51057 +    (rxdRail)->rxd_main->rxd_done = EP4_STATE_PRIVATE;
51058 +
51059 +    if (sdram_assert) 
51060 +       elan4_sdram_writeq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
51061 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51062 +}
51063 +
51064 +#define EP4_RXD_ASSERT_FREE(rxdRail)           __ep4_rxd_assert_free(rxdRail, __FILE__, __LINE__)
51065 +#define EP4_RXD_ASSERT_PENDING(rxdRail)                __ep4_rxd_assert_pending(rxdRail, __FILE__, __LINE__)
51066 +#define EP4_RXD_ASSERT_PRIVATE(rxdRail)                __ep4_rxd_assert_private(rxdRail, __FILE__, __LINE__)
51067 +#define EP4_RXD_PRIVATE_TO_FREE(rxdRail)       __ep4_rxd_private_to_free(rxdRail)
51068 +#define EP4_RXD_FORCE_PRIVATE(rxdRail)         __ep4_rxd_force_private(rxdRail)
51069 +
51070 +static int
51071 +alloc_rxd_block (EP4_RCVR_RAIL *rcvrRail)
51072 +{
51073 +    EP4_RAIL           *rail = RCVR_TO_RAIL (rcvrRail);
51074 +    ELAN4_DEV          *dev  = rail->r_ctxt.ctxt_dev;
51075 +    EP4_RXD_RAIL_BLOCK *blk;
51076 +    EP4_RXD_RAIL_MAIN  *rxdMain;
51077 +    EP_ADDR            rxdMainAddr;
51078 +    sdramaddr_t                rxdElan;
51079 +    EP_ADDR            rxdElanAddr;
51080 +    EP4_RXD_RAIL       *rxdRail;
51081 +    unsigned long       flags;
51082 +    int                 i, j;
51083 +
51084 +    KMEM_ZALLOC (blk, EP4_RXD_RAIL_BLOCK *, sizeof (EP4_RXD_RAIL_BLOCK), 1);
51085 +
51086 +    if (blk == NULL)
51087 +       return 0;
51088 +
51089 +    if ((rxdElan = ep_alloc_elan (&rail->r_generic, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdElanAddr)) == (sdramaddr_t) 0)
51090 +    {
51091 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
51092 +       return 0;
51093 +    }
51094 +
51095 +    if ((rxdMain = ep_alloc_main (&rail->r_generic, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdMainAddr)) == (EP4_RXD_RAIL_MAIN *) NULL)
51096 +    {
51097 +       ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51098 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
51099 +       return 0;
51100 +    }
51101 +
51102 +    if (ep4_reserve_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK, 0) != 0)
51103 +    {
51104 +       ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51105 +       ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51106 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
51107 +
51108 +       return 0;
51109 +    }
51110 +
51111 +    for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++)
51112 +    {
51113 +       rxdRail->rxd_generic.RcvrRail = &rcvrRail->rcvr_generic;
51114 +       rxdRail->rxd_elan             = rxdElan;
51115 +       rxdRail->rxd_elan_addr        = rxdElanAddr;
51116 +       rxdRail->rxd_main             = rxdMain;
51117 +       rxdRail->rxd_main_addr        = rxdMainAddr;
51118 +
51119 +       /* reserve 128 bytes of "event" cq space for the chained STEN packets */
51120 +       if ((rxdRail->rxd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_RXD_STEN_CMD_NDWORDS)) == NULL)
51121 +           goto failed;
51122 +
51123 +       /* allocate a single word of "setevent" command space */
51124 +       if ((rxdRail->rxd_scq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL)
51125 +       {
51126 +           ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
51127 +           goto failed;
51128 +       }
51129 +
51130 +       /* initialise the completion events */
51131 +       for (j = 0; j <= EP_MAXFRAG; j++)
51132 +           rxdMain->rxd_sent[i] = EP4_STATE_FREE;
51133 +
51134 +       rxdMain->rxd_done   = EP4_STATE_FREE;
51135 +       rxdMain->rxd_failed = EP4_STATE_FREE;
51136 +
51137 +       /* initialise the scq for the thread */
51138 +       rxdMain->rxd_scq = rxdRail->rxd_scq->ecq_addr;
51139 +
51140 +       /* initialise the "start" event to copy the first STEN packet into the command queue */
51141 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType),
51142 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS));
51143 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopySource),
51144 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]));
51145 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopyDest),
51146 +                           rxdRail->rxd_ecq->ecq_addr);
51147 +
51148 +       /* initialise the "chain" events to copy the next STEN packet into the command queue */
51149 +       for (j = 0; j < EP_MAXFRAG; j++)
51150 +       {
51151 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CountAndType),
51152 +                               E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
51153 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopySource),
51154 +                               rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j+1]));
51155 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopyDest),
51156 +                               rxdRail->rxd_ecq->ecq_addr);
51157 +       }
51158 +
51159 +       /* initialise the portions of the sten packets which don't change */
51160 +       for (j = 0; j < EP_MAXFRAG+1; j++)
51161 +       {
51162 +           if (j < EP_MAXFRAG)
51163 +               elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent),
51164 +                                   rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j]));
51165 +           else
51166 +               elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent),
51167 +                                   rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done));
51168 +
51169 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_guard),
51170 +                               GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
51171 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_cmd),
51172 +                               WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_sent[j])));
51173 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_value),
51174 +                               EP4_STATE_FINISHED);
51175 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_guard),
51176 +                               GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
51177 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_setevent),
51178 +                               SET_EVENT_CMD | (rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed)));
51179 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_nop_cmd),
51180 +                               NOP_CMD);
51181 +       }
51182 +
51183 +       /* register a main interrupt cookie */
51184 +       ep4_register_intcookie (rail, &rxdRail->rxd_intcookie, rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
51185 +                               rxd_interrupt, rxdRail);
51186 +
51187 +       /* initialise the command stream for the done event */
51188 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_cmd),
51189 +                           WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_done)));
51190 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_value),
51191 +                           EP4_STATE_FINISHED);
51192 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_intr_cmd),
51193 +                           INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT));
51194 +
51195 +       /* initialise the command stream for the fail event */
51196 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_cmd),
51197 +                           WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_failed)));
51198 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_value),
51199 +                           EP4_STATE_FAILED);
51200 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_intr_cmd),
51201 +                           INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT));
51202 +
51203 +       /* initialise the done and fail events */
51204 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
51205 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51206 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopySource),
51207 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd));
51208 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopyDest),
51209 +                           rxdRail->rxd_ecq->ecq_addr);
51210 +
51211 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType),
51212 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
51213 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopySource),
51214 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd));
51215 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopyDest),
51216 +                           rxdRail->rxd_ecq->ecq_addr);
51217 +       
51218 +       /* initialise the pointer to the main memory portion */
51219 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main), 
51220 +                           rxdMainAddr);
51221 +
51222 +       /* move onto next descriptor */
51223 +       rxdElan     += EP4_RXD_RAIL_ELAN_SIZE;
51224 +       rxdElanAddr += EP4_RXD_RAIL_ELAN_SIZE;
51225 +       rxdMain      = (EP4_RXD_RAIL_MAIN *) ((unsigned long) rxdMain + EP4_RXD_RAIL_MAIN_SIZE);
51226 +       rxdMainAddr += EP4_RXD_RAIL_MAIN_SIZE;
51227 +    }
51228 +
51229 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
51230 +
51231 +    list_add  (&blk->blk_link, &rcvrRail->rcvr_blocklist);
51232 +
51233 +    rcvrRail->rcvr_totalcount += EP4_NUM_RXD_PER_BLOCK;
51234 +    rcvrRail->rcvr_freecount  += EP4_NUM_RXD_PER_BLOCK;
51235 +
51236 +    for (i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++)
51237 +       list_add (&blk->blk_rxds[i].rxd_generic.Link, &rcvrRail->rcvr_freelist);
51238 +
51239 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
51240 +
51241 +    return 1;
51242 +
51243 + failed:
51244 +    while (--i >= 0)
51245 +    {
51246 +       rxdRail--;
51247 +
51248 +       ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
51249 +       ep4_put_ecq (rail, rxdRail->rxd_scq, 1);
51250 +
51251 +       ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie);
51252 +    }
51253 +
51254 +    ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK);
51255 +    
51256 +    ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51257 +    ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51258 +    KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
51259 +
51260 +    return 0;
51261 +}
51262 +
51263 +
51264 +static void
51265 +free_rxd_block (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL_BLOCK *blk)
51266 +{
51267 +    EP4_RAIL     *rail = RCVR_TO_RAIL (rcvrRail);
51268 +    EP4_RXD_RAIL *rxdRail;
51269 +    unsigned long flags;
51270 +    int           i;
51271 +
51272 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
51273 +
51274 +    list_del (&blk->blk_link);
51275 +
51276 +    rcvrRail->rcvr_totalcount -= EP4_NUM_RXD_PER_BLOCK;
51277 +
51278 +    for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++)
51279 +    {
51280 +       rcvrRail->rcvr_freecount--;
51281 +
51282 +       ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
51283 +       ep4_put_ecq (rail, rxdRail->rxd_scq, 1);
51284 +
51285 +       ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie);
51286 +
51287 +       list_del (&rxdRail->rxd_generic.Link);
51288 +    }
51289 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
51290 +
51291 +    ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK);
51292 +
51293 +    ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51294 +    ep_free_elan (&rail->r_generic, blk->blk_rxds[0].rxd_elan_addr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
51295 +
51296 +    KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
51297 +}
51298 +
51299 +static EP4_RXD_RAIL *
51300 +get_rxd_rail (EP4_RCVR_RAIL *rcvrRail)
51301 +{
51302 +    EP_COMMS_SUBSYS  *subsys = RCVR_TO_SUBSYS(rcvrRail);
51303 +    EP4_RXD_RAIL     *rxdRail;
51304 +    unsigned long flags;
51305 +    int low_on_rxds;
51306 +
51307 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
51308 +
51309 +    if (list_empty (&rcvrRail->rcvr_freelist))
51310 +       rxdRail = NULL;
51311 +    else
51312 +    {
51313 +       rxdRail = list_entry (rcvrRail->rcvr_freelist.next, EP4_RXD_RAIL, rxd_generic.Link);
51314 +
51315 +       EP4_RXD_ASSERT_FREE(rxdRail);
51316 +
51317 +       list_del (&rxdRail->rxd_generic.Link);
51318 +
51319 +       rcvrRail->rcvr_freecount--;
51320 +    }
51321 +    /* Wakeup the descriptor primer thread if there's not many left */
51322 +    low_on_rxds = (rcvrRail->rcvr_freecount < ep_rxd_lowat);
51323 +
51324 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
51325 +
51326 +    if (low_on_rxds)
51327 +       ep_kthread_schedule (&subsys->Thread, lbolt);
51328 +
51329 +    return (rxdRail);
51330 +}
51331 +
51332 +static void
51333 +free_rxd_rail (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL *rxdRail)
51334 +{
51335 +    unsigned long flags;
51336 +
51337 +    EP4_RXD_ASSERT_FREE(rxdRail);
51338 +
51339 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
51340 +    
51341 +    list_add (&rxdRail->rxd_generic.Link, &rcvrRail->rcvr_freelist);
51342 +
51343 +    rcvrRail->rcvr_freecount++;
51344 +
51345 +    if (rcvrRail->rcvr_freewaiting)
51346 +    {
51347 +       rcvrRail->rcvr_freewaiting--;
51348 +       kcondvar_wakeupall (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock);
51349 +    }
51350 +
51351 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
51352 +}
51353 +
51354 +static void
51355 +bind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail)
51356 +{
51357 +    EP4_RAIL *rail = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail);
51358 +
51359 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
51360 +
51361 +    EPRINTF3 (DBG_RCVR, "%s: bind_rxd_rail: rxd=%p rxdRail=%p\n",  rail->r_generic.Name, rxd, rxdRail);
51362 +
51363 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_rxd), rxd->NmdMain.nmd_addr);                      /* PCI write */
51364 +
51365 +    rxd->RxdRail             = &rxdRail->rxd_generic;
51366 +    rxdRail->rxd_generic.Rxd = rxd;
51367 +}
51368 +
51369 +static void
51370 +unbind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail)
51371 +{
51372 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
51373 +    
51374 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
51375 +    ASSERT (rxd->RxdRail == &rxdRail->rxd_generic && rxdRail->rxd_generic.Rxd == rxd);
51376 +
51377 +    EP4_RXD_ASSERT_PRIVATE (rxdRail);
51378 +
51379 +    EPRINTF3 (DBG_RCVR, "%s: unbind_rxd_rail: rxd=%p rxdRail=%p\n",  RCVR_TO_RAIL(rcvrRail)->r_generic.Name, rxd, rxdRail);
51380 +
51381 +    rxd->RxdRail             = NULL;
51382 +    rxdRail->rxd_generic.Rxd = NULL;
51383 +
51384 +    if (rcvrRail->rcvr_cleanup_waiting)
51385 +       kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rxd->Rcvr->Lock);
51386 +    rcvrRail->rcvr_cleanup_waiting = 0;
51387 +
51388 +    EP4_RXD_PRIVATE_TO_FREE (rxdRail);
51389 +}
51390 +
51391 +
51392 +static void
51393 +rcvr_stall_interrupt (EP4_RAIL *rail, void *arg)
51394 +{
51395 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg;
51396 +    EP_RCVR       *rcvr     = rcvrRail->rcvr_generic.Rcvr;
51397 +    unsigned long  flags;
51398 +
51399 +    spin_lock_irqsave (&rcvr->Lock, flags);
51400 +    
51401 +    EPRINTF1 (DBG_RCVR, "rcvr_stall_interrupt: rcvrRail %p thread halted\n", rcvrRail);
51402 +
51403 +    rcvrRail->rcvr_thread_halted = 1;
51404 +
51405 +    kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock);
51406 +
51407 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51408 +}
51409 +
51410 +static void
51411 +rcvr_stall_haltop (ELAN4_DEV *dev, void *arg)
51412 +{
51413 +    EP4_RCVR_RAIL  *rcvrRail  = (EP4_RCVR_RAIL *) arg;
51414 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
51415 +    EP_RCVR        *rcvr      = rcvrRail->rcvr_generic.Rcvr;
51416 +    sdramaddr_t     qdesc     = ((EP4_COMMS_RAIL *) commsRail)->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
51417 +    E4_uint64       qbptr     = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
51418 +
51419 +    /* Mark the queue as full by writing the fptr */
51420 +    if (qbptr == (rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)))
51421 +       elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), rcvrRail->rcvr_slots_addr);
51422 +    else
51423 +       elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), qbptr + EP_INPUTQ_SIZE);
51424 +
51425 +    /* Notify the thread that it should stall after processing any outstanding envelopes */
51426 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie),
51427 +                       rcvrRail->rcvr_stall_intcookie.int_val);
51428 +
51429 +    /* Issue a swtevent to the queue event to wake the thread up */
51430 +    ep4_set_event_cmd (rcvrRail->rcvr_resched, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent));
51431 +}
51432 +
51433 +static void
51434 +rxd_interrupt (EP4_RAIL *rail, void *arg)
51435 +{
51436 +    EP4_RXD_RAIL      *rxdRail  = (EP4_RXD_RAIL *) arg;
51437 +    EP4_RCVR_RAIL     *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
51438 +    EP_RCVR           *rcvr     = rcvrRail->rcvr_generic.Rcvr;
51439 +    EP4_RXD_RAIL_MAIN *rxdMain  = rxdRail->rxd_main;
51440 +    unsigned long      delay    = 1;
51441 +    EP_RXD            *rxd;
51442 +    EP_ENVELOPE       *env;
51443 +    unsigned long      flags;
51444 +
51445 +    spin_lock_irqsave (&rcvr->Lock, flags);
51446 +
51447 +    for (;;)
51448 +    {
51449 +       if (rxdMain->rxd_done == EP4_STATE_FINISHED || rxdMain->rxd_failed == EP4_STATE_FAILED)
51450 +           break;
51451 +
51452 +       /* The write to rxd_done could be held up in the PCI bridge even though
51453 +        * we've seen the interrupt cookie.  Unlike elan3, there is no possibility
51454 +        * of spurious interrupts since we flush the command queues on node 
51455 +        * disconnection and the txcallback mechanism */
51456 +       mb();
51457 +
51458 +       if (delay > EP4_EVENT_FIRING_TLIMIT)
51459 +       {
51460 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
51461 +
51462 +           EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "rxd_interrupt - not finished\n");
51463 +           return;
51464 +       }
51465 +       DELAY(delay);
51466 +       delay <<= 1;
51467 +    }
51468 +
51469 +    if (rxdMain->rxd_done != EP4_STATE_FINISHED)
51470 +    {
51471 +       EPRINTF8 (DBG_RETRY, "%s: rxd_interrupt: rxdRail %p retry: done=%d failed=%d NodeId=%d XID=%08x.%08x.%016llx\n",
51472 +                 rail->r_generic.Name, rxdRail, (int)rxdMain->rxd_done, (int)rxdMain->rxd_failed, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.NodeId,
51473 +                 rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Generation, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Handle, 
51474 +                 rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Unique);
51475 +    
51476 +       spin_lock (&rcvrRail->rcvr_retrylock);
51477 +
51478 +       rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;                        /* XXXX backoff ? */
51479 +
51480 +       list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist);
51481 +
51482 +       ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time);
51483 +       spin_unlock (&rcvrRail->rcvr_retrylock);
51484 +
51485 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
51486 +       return;
51487 +    }
51488 +    
51489 +    rxd = rxdRail->rxd_generic.Rxd;
51490 +    env = &rxd->RxdMain->Envelope;
51491 +
51492 +    /*
51493 +     * Note, since the thread will have sent the remote dma packet before copying 
51494 +     * the envelope, we must check that it has completed doing this,  we do this
51495 +     * by acquiring the spinlock against the thread which it only drops once it's
51496 +     * completed.
51497 +     */
51498 +    if (rxd->RxdMain->Len == EP_RXD_PENDING)
51499 +    {
51500 +       EP4_SPINENTER (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock),
51501 +                      &rcvrRail->rcvr_main->rcvr_thread_lock);
51502 +       
51503 +       EP4_SPINEXIT (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock),
51504 +                     &rcvrRail->rcvr_main->rcvr_thread_lock);
51505 +       
51506 +       ASSERT (env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING);
51507 +    }
51508 +
51509 +    EPRINTF8 (DBG_RCVR, "%s: rxd_interrupt: rxd %p finished from %d XID %08x.%08x.%016llx len %d attr %x\n", rail->r_generic.Name, 
51510 +             rxd, rxd->RxdMain->Envelope.NodeId, rxd->RxdMain->Envelope.Xid.Generation, rxd->RxdMain->Envelope.Xid.Handle, 
51511 +             rxd->RxdMain->Envelope.Xid.Unique,  rxd->RxdMain->Len, rxd->RxdMain->Envelope.Attr);
51512 +
51513 +    rxdMain->rxd_done  = EP4_STATE_PRIVATE;
51514 +    rxd->Data.nmd_attr = EP_RAIL2RAILMASK (rail->r_generic.Number);
51515 +
51516 +    switch (rxd->State)
51517 +    {
51518 +    case EP_RXD_RECEIVE_ACTIVE:
51519 +       if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr))
51520 +           rxd->State = EP_RXD_RPC_IN_PROGRESS;
51521 +       else
51522 +       {
51523 +           rxd->State = EP_RXD_COMPLETED;
51524 +
51525 +           /* remove from active list */
51526 +           list_del (&rxd->Link);
51527 +
51528 +           unbind_rxd_rail (rxd, rxdRail);
51529 +           free_rxd_rail (rcvrRail, rxdRail);
51530 +       }
51531 +
51532 +       if (rxd->RxdMain->Len >= 0) {
51533 +           INC_STAT(rcvrRail->rcvr_generic.stats,rx);
51534 +           ADD_STAT(rcvrRail->rcvr_generic.stats,rx_len,rxd->RxdMain->Len);
51535 +           INC_STAT(rail->r_generic.Stats,rx);
51536 +           ADD_STAT(rail->r_generic.Stats,rx_len,rxd->RxdMain->Len);
51537 +       }
51538 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
51539 +       ep_rxd_received (rxd);
51540 +
51541 +       break;
51542 +
51543 +    case EP_RXD_PUT_ACTIVE:
51544 +    case EP_RXD_GET_ACTIVE:
51545 +       rxd->State = EP_RXD_RPC_IN_PROGRESS;
51546 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
51547 +       
51548 +       rxd->Handler (rxd);
51549 +       break;
51550 +
51551 +    case EP_RXD_COMPLETE_ACTIVE:
51552 +       rxd->State = EP_RXD_COMPLETED;
51553 +
51554 +       /* remove from active list */
51555 +       list_del (&rxd->Link);
51556 +
51557 +       unbind_rxd_rail (rxd, rxdRail);
51558 +       free_rxd_rail (rcvrRail, rxdRail);
51559 +
51560 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
51561 +
51562 +       rxd->Handler(rxd);
51563 +       break;
51564 +
51565 +    default:
51566 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
51567 +
51568 +       printk ("%s: rxd_interrupt: rxd %p in invalid state %d\n", rail->r_generic.Name, rxd, rxd->State);
51569 +       /* NOTREACHED */
51570 +    }
51571 +}
51572 +
51573 +static void
51574 +ep4rcvr_flush_filtering (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
51575 +{
51576 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
51577 +    EP4_RAIL       *rail      = RCVR_TO_RAIL(rcvrRail);
51578 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
51579 +    sdramaddr_t    qdesc      = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
51580 +    E4_Addr        qbase      = rcvrRail->rcvr_slots_addr;
51581 +    E4_Addr        qlast      = qbase + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1);
51582 +    E4_uint64      qfptr, qbptr;
51583 +    unsigned long  flags;
51584 +    
51585 +    spin_lock_irqsave (&rcvr->Lock, flags);
51586 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51587 +    
51588 +    /* zip down the input queue and invalidate any envelope we find to a node which is locally passivated */
51589 +    qfptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr));
51590 +    qbptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
51591 +
51592 +    while (qfptr != qbptr)
51593 +    {
51594 +       unsigned int nodeId = elan4_sdram_readl (dev, rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, NodeId));
51595 +
51596 +       EPRINTF3 (DBG_DISCON, "%s: ep4rcvr_flush_filtering: nodeId=%d State=%d\n", rail->r_generic.Name, nodeId, rail->r_generic.Nodes[nodeId].State);
51597 +       
51598 +       if (rail->r_generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE)
51599 +           elan4_sdram_writel (dev,  rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, Version), 0);
51600 +       
51601 +       if (qfptr != qlast)
51602 +           qfptr += EP_INPUTQ_SIZE;
51603 +       else
51604 +           qfptr = qbase;
51605 +    }
51606 +
51607 +    /* Insert an setevent command into the thread's command queue
51608 +     * to ensure that all sten packets have completed */
51609 +    elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS);
51610 +    ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq);
51611 +    
51612 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51613 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51614 +}
51615 +
51616 +static void
51617 +ep4rcvr_flush_flushing (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
51618 +{
51619 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
51620 +    ELAN4_DEV       *dev  = rail->r_ctxt.ctxt_dev;
51621 +    struct list_head *el, *nel;
51622 +    struct list_head  rxdList;
51623 +    unsigned long     flags;
51624 +
51625 +    INIT_LIST_HEAD (&rxdList);
51626 +    
51627 +    /* remove any sten packates which are retrying to nodes which are being passivated */
51628 +    spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags);
51629 +    list_for_each_safe (el, nel, &rcvrRail->rcvr_retrylist) {
51630 +       EP4_RXD_RAIL *rxdRail  = list_entry (el, EP4_RXD_RAIL, rxd_retry_link);
51631 +       EP_ENVELOPE  *env      = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope;
51632 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
51633 +
51634 +       if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
51635 +       {
51636 +           EPRINTF2 (DBG_XMTR, "%s; ep4rcvr_flush_flushing: removing rxdRail %p from retry list\n", rail->r_generic.Name, rxdRail);
51637 +           
51638 +           list_del (&rxdRail->rxd_retry_link);
51639 +       }
51640 +    }
51641 +    spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags);
51642 +
51643 +    spin_lock_irqsave (&rcvr->Lock, flags);
51644 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51645 +    
51646 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
51647 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
51648 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51649 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
51650 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
51651 +
51652 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL (rxdRail, rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
51653 +           continue;
51654 +       
51655 +       EPRINTF5 (DBG_DISCON, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p state %d elan node %d\n", 
51656 +                 rail->r_generic.Name, rcvr, rxd, (int)rxdRail->rxd_main->rxd_done, env->NodeId);
51657 +       
51658 +       switch (rxd->State)
51659 +       {
51660 +       case EP_RXD_FREE:
51661 +           printk ("ep4rcvr_flush_flushing: rxd state is free but bound to a fail\n");
51662 +           break;
51663 +
51664 +       case EP_RXD_RECEIVE_ACTIVE:
51665 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete message receive */
51666 +           {
51667 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", 
51668 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId);
51669 +               
51670 +               nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
51671 +               continue;
51672 +           }
51673 +           break;
51674 +           
51675 +       default:
51676 +           EP4_ASSERT (rail, EP_IS_RPC(env->Attr));
51677 +
51678 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete RPC */
51679 +           {
51680 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", 
51681 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId);
51682 +               
51683 +               EP_INVALIDATE_XID (rxd->MsgXid);                        /* Ignore any previous NMD map responses */
51684 +               
51685 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
51686 +               continue;
51687 +           }
51688 +           break;
51689 +
51690 +       case EP_RXD_BEEN_ABORTED:
51691 +           printk ("ep4rcvr_flush_flushing: rxd state is aborted but bound to a fail\n");
51692 +           break;
51693 +       }
51694 +
51695 +       EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", 
51696 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
51697 +    }    
51698 +
51699 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51700 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51701 +}
51702 +
51703 +void
51704 +ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
51705 +{
51706 +    EP4_RAIL *rail = RCVR_TO_RAIL(rcvrRail);
51707 +
51708 +    switch (rail->r_generic.CallbackStep)
51709 +    {
51710 +    case EP_CB_FLUSH_FILTERING:
51711 +       ep4rcvr_flush_filtering (rcvr, rcvrRail);
51712 +       break;
51713 +
51714 +    case EP_CB_FLUSH_FLUSHING:
51715 +       ep4rcvr_flush_flushing (rcvr, rcvrRail);
51716 +       break;
51717 +    }
51718 +}
51719 +
51720 +void
51721 +ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
51722 +{
51723 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
51724 +    EP4_RAIL         *rail   = RCVR_TO_RAIL (rcvrRail);
51725 +    ELAN4_DEV       *dev    = rail->r_ctxt.ctxt_dev;
51726 +    struct list_head *el, *nel;
51727 +    unsigned long     flags;
51728 +#if SUPPORT_RAIL_FAILOVER
51729 +    EP_SYS           *sys    = subsys->Subsys.Sys;
51730 +#endif
51731 +    
51732 +    spin_lock_irqsave (&rcvr->Lock, flags);
51733 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51734 +    
51735 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
51736 +       EP_RXD             *rxd      = list_entry (el, EP_RXD, Link);
51737 +       EP4_RXD_RAIL       *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51738 +       EP_ENVELOPE        *env      = &rxd->RxdMain->Envelope;
51739 +       EP_NODE_RAIL       *nodeRail = &rail->r_generic.Nodes[env->NodeId];
51740 +#if SUPPORT_RAIL_FAILOVER
51741 +       EP_NODE            *node     = &sys->Nodes[env->NodeId];
51742 +       EP_MANAGER_MSG_BODY msgBody;
51743 +#endif
51744 +       
51745 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED)
51746 +           continue;
51747 +
51748 +       EPRINTF5 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p elan node %d state %d\n", 
51749 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId, (int)rxdRail->rxd_main->rxd_done);
51750 +
51751 +       switch (rxd->State)
51752 +       {
51753 +       case EP_RXD_FREE:
51754 +           printk ("ep4rcvr_failover_callback: rxd state is free but bound to a rail\n");
51755 +           break;
51756 +
51757 +       case EP_RXD_RECEIVE_ACTIVE:
51758 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                        /* incomplete message receive */
51759 +           {
51760 +               EPRINTF4 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId);
51761 +
51762 +               EP4_RXD_FORCE_PRIVATE(rxdRail);
51763 +               
51764 +               unbind_rxd_rail (rxd, rxdRail);
51765 +
51766 +               free_rxd_rail (rcvrRail, rxdRail);
51767 +           
51768 +               /* epcomms thread will requeue on different rail */
51769 +               ep_kthread_schedule (&subsys->Thread, lbolt);
51770 +               continue;
51771 +           }
51772 +           break;
51773 +
51774 +       default:
51775 +           EP4_ASSERT (rail, EP_IS_RPC(env->Attr));
51776 +
51777 +#if SUPPORT_RAIL_FAILOVER
51778 +           /* XXXX - no rail failover for now .... */
51779 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE && !EP_IS_NO_FAILOVER(env->Attr))       /* incomplete RPC, which can be failed over */
51780 +           {
51781 +               EPRINTF6 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p State %d Xid %llxx MsgXid %llxx nodeId %d - failover\n", 
51782 +                         rail->r_generic.Name, rxd, rxd->State, env->Xid.Unique, rxd->MsgXid.Unique, env->NodeId);
51783 +               
51784 +               if (EP_XID_INVALID(rxd->MsgXid))
51785 +                   rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
51786 +               
51787 +               /* XXXX maybe only send the message if the node failover retry is now ? */
51788 +               msgBody.Failover.Xid      = env->Xid;
51789 +               msgBody.Failover.Railmask = node->ConnectedRails;
51790 +               
51791 +               ep_send_message (&rail->r_generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody);
51792 +               
51793 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
51794 +               continue;
51795 +           }
51796 +#endif
51797 +           break;
51798 +
51799 +       case EP_RXD_BEEN_ABORTED:
51800 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
51801 +           break;
51802 +       }
51803 +       EPRINTF3 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->r_generic.Name, rxd, env->NodeId);
51804 +    }
51805 +    
51806 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51807 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51808 +}
51809 +
51810 +void
51811 +ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
51812 +{
51813 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
51814 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
51815 +    struct list_head *el, *nel;
51816 +    struct list_head  rxdList;
51817 +    unsigned long     flags;
51818 +
51819 +    INIT_LIST_HEAD (&rxdList);
51820 +    
51821 +    spin_lock_irqsave (&rcvr->Lock, flags);
51822 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51823 +    
51824 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
51825 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
51826 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51827 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
51828 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
51829 +       
51830 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
51831 +           continue;
51832 +
51833 +       EPRINTF5 (DBG_DISCON, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p elan node %d state %x\n", rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State);
51834 +
51835 +       switch (rxd->State)
51836 +       {
51837 +       case EP_RXD_FREE:
51838 +           printk ("ep4rcvr_disconnect_callback: rxd state is free but bound to a rail\n");
51839 +           break;
51840 +
51841 +       case EP_RXD_RECEIVE_ACTIVE:
51842 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete message receive */
51843 +           {
51844 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId);
51845 +
51846 +               EP4_RXD_FORCE_PRIVATE (rxdRail);
51847 +               
51848 +               unbind_rxd_rail (rxd, rxdRail);
51849 +               free_rxd_rail (rcvrRail, rxdRail);
51850 +               
51851 +               /* remark it as pending if it was partially received */
51852 +               rxd->RxdMain->Len = EP_RXD_PENDING;
51853 +               
51854 +               /* epcomms thread will requeue on different rail */
51855 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
51856 +               continue;
51857 +           }
51858 +           break;
51859 +
51860 +       default:
51861 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE || rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE)            /* incomplete RPC */
51862 +           {
51863 +               EPRINTF5 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d state %x - not able to failover\n",
51864 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State);
51865 +           
51866 +               EP4_RXD_FORCE_PRIVATE (rxdRail);
51867 +
51868 +               unbind_rxd_rail (rxd, rxdRail);
51869 +               free_rxd_rail (rcvrRail, rxdRail);
51870 +
51871 +               /* Ignore any previous NMD/failover responses */
51872 +               EP_INVALIDATE_XID (rxd->MsgXid);
51873 +               
51874 +               /* Remove from active list */
51875 +               list_del (&rxd->Link);
51876 +               
51877 +               if (rxd->State == EP_RXD_RPC_IN_PROGRESS)                               /* ownder by user .... */
51878 +                   rxd->State = EP_RXD_BEEN_ABORTED;
51879 +               else                                                                    /* queue for completion */
51880 +               {
51881 +                   rxd->RxdMain->Len = EP_CONN_RESET;                                  /* ensure ep_rxd_status() fails */
51882 +                   list_add_tail (&rxd->Link, &rxdList);
51883 +               }
51884 +               continue;
51885 +           }
51886 +           break;
51887 +
51888 +       case EP_RXD_BEEN_ABORTED:
51889 +           printk ("ep4rcvr_disconnect_callback: rxd state is aborted but bound to a rail\n");
51890 +           break;
51891 +       }
51892 +
51893 +       printk ("%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
51894 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
51895 +       EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
51896 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
51897 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
51898 +    }
51899 +    
51900 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51901 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51902 +
51903 +    while (! list_empty (&rxdList)) 
51904 +    {
51905 +       EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link);
51906 +
51907 +       list_del (&rxd->Link);
51908 +
51909 +       rxd->Handler (rxd);
51910 +    }
51911 +}
51912 +
51913 +void
51914 +ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
51915 +{
51916 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
51917 +    EP4_RAIL       *rail      = RCVR_TO_RAIL (rcvrRail);
51918 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
51919 +    unsigned long   flags;
51920 +
51921 +    spin_lock_irqsave (&rcvr->Lock, flags);
51922 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51923 +
51924 +    /* Insert an setevent command into the thread's command queue
51925 +     * to ensure that all sten packets have completed */
51926 +    elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS);
51927 +    ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq);
51928 +    
51929 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51930 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51931 +}
51932 +
51933 +void
51934 +ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
51935 +{
51936 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
51937 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
51938 +    struct list_head *el;
51939 +    unsigned long     flags;
51940 +
51941 +    spin_lock_irqsave (&rcvr->Lock, flags);
51942 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51943 +    
51944 +    list_for_each (el, &rcvr->ActiveDescList) {
51945 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
51946 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
51947 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
51948 +
51949 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || env->NodeId != nodeId)
51950 +           continue;
51951 +
51952 +       if (rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE)
51953 +       {
51954 +           EP_NETERR_COOKIE cookie;
51955 +           unsigned int     first, this;
51956 +
51957 +           if (rxd->State == EP_RXD_RECEIVE_ACTIVE)
51958 +               first = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(env->Attr) ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags));
51959 +           else
51960 +               first = (EP_MAXFRAG+1) - rxd->nFrags;
51961 +
51962 +           for (this = first; this < (EP_MAXFRAG+1); this++)
51963 +               if (rxdRail->rxd_main->rxd_sent[this] == EP4_STATE_ACTIVE)
51964 +                   break;
51965 +           
51966 +           if (this > first)
51967 +           {
51968 +               /* Look at the last completed STEN packet and if it's neterr cookie matches, then change
51969 +                * the rxd to look the same as if the sten packet had failed and then schedule it for retry */
51970 +               cookie = elan4_sdram_readq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[--this].c_cookie));
51971 +               
51972 +               if (cookie == cookies[0] || cookie == cookies[1])
51973 +               {
51974 +                   EPRINTF5 (DBG_NETWORK_ERROR, "%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d\n",
51975 +                             rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this);
51976 +                   
51977 +                   printk ("%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d : time %ld\n",
51978 +                           rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this, rxdRail->rxd_retry_time);
51979 +                   
51980 +                   rxdRail->rxd_main->rxd_sent[this] = EP4_STATE_ACTIVE;
51981 +                   rxdRail->rxd_main->rxd_failed     = EP4_STATE_FAILED;
51982 +                   
51983 +                   spin_lock (&rcvrRail->rcvr_retrylock);
51984 +                   
51985 +                   ASSERT (rxdRail->rxd_retry_time == 0);
51986 +
51987 +                   rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;
51988 +                       
51989 +                   list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist);
51990 +                       
51991 +                   ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time);
51992 +                   
51993 +                   spin_unlock (&rcvrRail->rcvr_retrylock);
51994 +               }
51995 +           }
51996 +       }
51997 +    }
51998 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
51999 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52000 +}
52001 +
52002 +int
52003 +ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r)
52004 +{
52005 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r;
52006 +    EP4_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
52007 +    ELAN4_DEV     *dev      = rail->r_ctxt.ctxt_dev;
52008 +    EP4_RXD_RAIL  *rxdRail;
52009 +    register int   i;
52010 +
52011 +    ASSERT (SPINLOCK_HELD(&rxd->Rcvr->Lock));
52012 +
52013 +    if ((rxdRail = get_rxd_rail (rcvrRail)) == NULL)
52014 +       return 0;
52015 +    
52016 +    /* Flush the Elan TLB if mappings have changed */
52017 +    ep_perrail_dvma_sync (&rail->r_generic);
52018 +
52019 +    EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p buffer %x len %x\n", 
52020 +             rail->r_generic.Name, rxd->Rcvr, rxd, rxdRail, rxd->Data.nmd_addr, rxd->Data.nmd_len);
52021 +
52022 +    /* bind the rxdRail and rxd together */
52023 +    bind_rxd_rail (rxd, rxdRail);
52024 +
52025 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_addr), rxd->Data.nmd_addr);       /* PCI write */
52026 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_len),  rxd->Data.nmd_len);                /* PCI write */
52027 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_attr), rxd->Data.nmd_attr);       /* PCI write */
52028 +
52029 +    /* Mark as active */
52030 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), 
52031 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52032 +    
52033 +    for (i = 0; i <= EP_MAXFRAG; i++)
52034 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
52035 +
52036 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52037 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
52038 +
52039 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x00, /* %r0 */
52040 +                       ep_symbol (&rail->r_threadcode, "c_queue_rxd"));
52041 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x10, /* %r2 */
52042 +                       rcvrRail->rcvr_elan_addr);
52043 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x18, /* %r3 */
52044 +                       rxdRail->rxd_elan_addr);
52045 +
52046 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType),
52047 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS));
52048 +
52049 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_start));
52050 +
52051 +    return 1;
52052 +}
52053 +
52054 +void
52055 +ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
52056 +{
52057 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
52058 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
52059 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
52060 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
52061 +    sdramaddr_t     rxdElan   = rxdRail->rxd_elan;
52062 +    EP_ENVELOPE    *env       = &rxd->RxdMain->Envelope;
52063 +    unsigned long   first     = (EP_MAXFRAG+1) - nFrags;
52064 +    EP4_RXD_DMA_CMD cmd;
52065 +    register int    i, len;
52066 +
52067 +    EP4_ASSERT (rail, rxd->State == EP_RXD_PUT_ACTIVE);
52068 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
52069 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
52070 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52071 +
52072 +    /* Flush the Elan TLB if mappings have changed */
52073 +    ep_perrail_dvma_sync (&rail->r_generic);
52074 +
52075 +    /* Generate the DMA chain to put the data */
52076 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
52077 +    {
52078 +       cmd.c_dma_typeSize     = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
52079 +       cmd.c_dma_cookie       = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
52080 +       cmd.c_dma_vproc        = EP_VP_DATA(env->NodeId);
52081 +       cmd.c_dma_srcAddr      = local->nmd_addr;
52082 +       cmd.c_dma_dstAddr      = remote->nmd_addr;
52083 +       if (i == (nFrags-1))
52084 +           cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done);
52085 +       else
52086 +           cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]);
52087 +       cmd.c_dma_dstEvent     = 0;
52088 +       cmd.c_nop_cmd          = NOP_CMD;
52089 +
52090 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
52091 +                 rail->r_generic.Name, rxd, env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len);
52092 +       
52093 +       elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD));
52094 +    }
52095 +
52096 +    /* Initialise the event chain */
52097 +    for (i = 0; i < nFrags-1; i++)
52098 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
52099 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
52100 +
52101 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
52102 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52103 +
52104 +    for (i = 0; i <= EP_MAXFRAG; i++)
52105 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
52106 +
52107 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52108 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
52109 +
52110 +    /* Initialise the previous event to start the whole chain off */
52111 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
52112 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
52113 +
52114 +    ASSERT (rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
52115 +
52116 +    /* finally issue the setevent to start the whole chain */
52117 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
52118 +
52119 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
52120 +}    
52121 +
52122 +void
52123 +ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
52124 +{
52125 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
52126 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
52127 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
52128 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
52129 +    sdramaddr_t      rxdElan  = rxdRail->rxd_elan;
52130 +    EP_ENVELOPE     *env      = &rxd->RxdMain->Envelope;
52131 +    unsigned long    first    = (EP_MAXFRAG+1) - nFrags;
52132 +    register int    i, len;
52133 +
52134 +    EP4_ASSERT (rail, rxd->State == EP_RXD_GET_ACTIVE);
52135 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
52136 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
52137 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52138 +
52139 +    /* Flush the Elan TLB if mappings have changed */
52140 +    ep_perrail_dvma_sync (&rail->r_generic);
52141 +
52142 +    /* Generate the DMA chain to put the data */
52143 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
52144 +    {
52145 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
52146 +                 rail->r_generic.Name, rxd, env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len);
52147 +       
52148 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_open),
52149 +                           OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(env->NodeId)));
52150 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_trans),
52151 +                           SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16));
52152 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_cookie),
52153 +                           ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_STEN);
52154 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_typeSize),
52155 +                           E4_DMA_TYPE_SIZE (local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT));
52156 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_cookie),
52157 +                           ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA);
52158 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_vproc),
52159 +                           EP_VP_DATA (rail->r_generic.Position.pos_nodeid));
52160 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcAddr),
52161 +                           remote->nmd_addr);
52162 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstAddr),
52163 +                           local->nmd_addr);
52164 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcEvent),
52165 +                           0);
52166 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstEvent),
52167 +                           i == (nFrags-1) ? rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done) : 
52168 +                                             rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]));
52169 +    }
52170 +
52171 +    /* Initialise the event chain */
52172 +    for (i = 0; i < nFrags-1; i++)
52173 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
52174 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
52175 +
52176 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
52177 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52178 +
52179 +    for (i = 0; i <= EP_MAXFRAG; i++)
52180 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
52181 +
52182 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52183 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
52184 +
52185 +    /* Initialise the previous event to start the whole chain off */
52186 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
52187 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
52188 +
52189 +    ASSERT (rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
52190 +
52191 +    /* finally issue the setevent to start the whole chain */
52192 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
52193 +
52194 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
52195 +}
52196 +
52197 +void
52198 +ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
52199 +{
52200 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
52201 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
52202 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
52203 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
52204 +    sdramaddr_t     rxdElan   = rxdRail->rxd_elan;
52205 +    EP_ENVELOPE    *env       = &rxd->RxdMain->Envelope;
52206 +    unsigned long   first     = (EP_MAXFRAG+1) - nFrags - 1;
52207 +    EP4_RXD_DMA_CMD cmd;
52208 +    register int    i, len;
52209 +
52210 +    EP4_ASSERT (rail, rxd->State == EP_RXD_COMPLETE_ACTIVE);
52211 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
52212 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
52213 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52214 +
52215 +    /* Flush the Elan TLB if mappings have changed */
52216 +    ep_perrail_dvma_sync (&rail->r_generic);
52217 +
52218 +    /* Generate the DMA chain to put the data */
52219 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
52220 +    {
52221 +       cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
52222 +       cmd.c_dma_cookie   = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
52223 +       cmd.c_dma_vproc    = EP_VP_DATA(env->NodeId);
52224 +       cmd.c_dma_srcAddr  = local->nmd_addr;
52225 +       cmd.c_dma_dstAddr  = remote->nmd_addr;
52226 +       cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]);
52227 +       cmd.c_dma_dstEvent = 0;
52228 +       cmd.c_nop_cmd      = NOP_CMD;
52229 +
52230 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
52231 +                 rail->r_generic.Name, rxd, env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len);
52232 +
52233 +       elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD));
52234 +    }
52235 +    
52236 +    /* Initialise the status block dma */
52237 +    cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(EP_STATUSBLK_SIZE, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
52238 +    cmd.c_dma_cookie   = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
52239 +    cmd.c_dma_vproc    = EP_VP_DATA(env->NodeId);
52240 +    cmd.c_dma_srcAddr  = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk);
52241 +    cmd.c_dma_dstAddr  = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk);
52242 +    cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done);
52243 +    cmd.c_dma_dstEvent = env->TxdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done);;
52244 +    cmd.c_nop_cmd      = NOP_CMD;
52245 +
52246 +    EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%x\n", 
52247 +             rail->r_generic.Name, rxd, env->Xid.Unique, (int) cmd.c_dma_srcAddr, (int) cmd.c_dma_dstAddr, EP_STATUSBLK_SIZE);
52248 +
52249 +    elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[EP_MAXFRAG]), sizeof (EP4_RXD_DMA_CMD));
52250 +
52251 +    /* Initialise the event chain */
52252 +    for (i = 0; i < nFrags; i++)
52253 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
52254 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
52255 +
52256 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
52257 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52258 +
52259 +    for (i = 0; i <= EP_MAXFRAG; i++)
52260 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
52261 +
52262 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52263 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
52264 +
52265 +    /* Initialise the previous event to start the whole chain off */
52266 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
52267 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
52268 +
52269 +    ASSERT (rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
52270 +
52271 +    /* finally issue the setevent to start the whole chain */
52272 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
52273 +
52274 +    BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len);
52275 +}
52276 +
52277 +EP_RXD *
52278 +ep4rcvr_steal_rxd (EP_RCVR_RAIL *r)
52279 +{
52280 +    /* XXXX - TBD */
52281 +    return NULL;
52282 +}
52283 +
52284 +long
52285 +ep4rcvr_check (EP_RCVR_RAIL *r, long nextRunTime)
52286 +{
52287 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r;
52288 +    EP4_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
52289 +
52290 +    if (rcvrRail->rcvr_freecount < ep_rxd_lowat && !alloc_rxd_block (rcvrRail))
52291 +    {
52292 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->r_generic.Name);
52293 +               
52294 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
52295 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
52296 +    }
52297 +    
52298 +    return nextRunTime;
52299 +}
52300 +
52301 +unsigned long
52302 +ep4rcvr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
52303 +{
52304 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg;
52305 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
52306 +    unsigned long  flags;
52307 +
52308 +    spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags);
52309 +    while (! list_empty (&rcvrRail->rcvr_retrylist))
52310 +    {
52311 +       EP4_RXD_RAIL *rxdRail = list_entry (rcvrRail->rcvr_retrylist.next, EP4_RXD_RAIL, rxd_retry_link);
52312 +       EP_ENVELOPE  *env     = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope;
52313 +       unsigned int  first   = (EP_MAXFRAG+1) - ((env->Attr & EP_MULTICAST ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags));
52314 +       
52315 +       if (BEFORE (lbolt, rxdRail->rxd_retry_time))
52316 +       {
52317 +           if (nextRunTime == 0 || AFTER (nextRunTime, rxdRail->rxd_retry_time))
52318 +               nextRunTime = rxdRail->rxd_retry_time;
52319 +
52320 +           break;
52321 +       }
52322 +
52323 +       list_del (&rxdRail->rxd_retry_link);
52324 +       rxdRail->rxd_retry_time = 0;
52325 +
52326 +       /* determine which sten packet to resubmit */
52327 +       for (; first < (EP_MAXFRAG+1); first++)
52328 +           if (rxdRail->rxd_main->rxd_sent[first] == EP4_STATE_ACTIVE)
52329 +               break;
52330 +
52331 +       EPRINTF3 (DBG_RETRY, "%s: ep4rcvr_retry: rxdRail %p, reissuing sten[%d]\n", rail->r_generic.Name, rxdRail, first);
52332 +
52333 +       /* re-initialise the fail event */
52334 +       elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType),
52335 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52336 +
52337 +       rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52338 +
52339 +       /* re-initialise the chain event to resubmit this sten packet */
52340 +       elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first-1].ev_CountAndType),
52341 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
52342 +       
52343 +       /* finally issue the setevent to start the chain again */
52344 +       ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
52345 +    }
52346 +    spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags);
52347 +    
52348 +    return nextRunTime;
52349 +}
52350 +
52351 +void
52352 +ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
52353 +{
52354 +    EP4_RAIL          *rail   = (EP4_RAIL *) commsRail->Rail;
52355 +    ELAN4_DEV         *dev    = rail->r_ctxt.ctxt_dev;
52356 +    sdramaddr_t        qdescs = ((EP4_COMMS_RAIL *) commsRail)->r_descs;
52357 +    EP4_RCVR_RAIL     *rcvrRail;
52358 +    E4_InputQueue      qdesc;
52359 +    E4_ThreadRegs      tregs;
52360 +    sdramaddr_t        stack;
52361 +    unsigned long      flags;
52362 +
52363 +    KMEM_ZALLOC (rcvrRail, EP4_RCVR_RAIL *, sizeof (EP4_RCVR_RAIL), 1);
52364 +
52365 +    spin_lock_init (&rcvrRail->rcvr_freelock);
52366 +    INIT_LIST_HEAD (&rcvrRail->rcvr_freelist);
52367 +    INIT_LIST_HEAD (&rcvrRail->rcvr_blocklist);
52368 +
52369 +    kcondvar_init (&rcvrRail->rcvr_cleanup_sleep);
52370 +    kcondvar_init (&rcvrRail->rcvr_freesleep);
52371 +
52372 +    INIT_LIST_HEAD (&rcvrRail->rcvr_retrylist);
52373 +    spin_lock_init (&rcvrRail->rcvr_retrylock);
52374 +
52375 +    rcvrRail->rcvr_generic.CommsRail = commsRail;
52376 +    rcvrRail->rcvr_generic.Rcvr      = rcvr;
52377 +
52378 +    rcvrRail->rcvr_main  = ep_alloc_main (&rail->r_generic, sizeof (EP4_RCVR_RAIL_MAIN), 0, &rcvrRail->rcvr_main_addr);
52379 +    rcvrRail->rcvr_elan  = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RCVR_RAIL_ELAN), 0, &rcvrRail->rcvr_elan_addr);
52380 +    rcvrRail->rcvr_slots = ep_alloc_elan (&rail->r_generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->rcvr_slots_addr);
52381 +    stack                = ep_alloc_elan (&rail->r_generic, EP4_STACK_SIZE, 0, &rcvrRail->rcvr_stack);
52382 +
52383 +    /* allocate a command queue for the thread to use, plus space for it to wait/reschedule */
52384 +    rcvrRail->rcvr_ecq     = ep4_alloc_ecq (rail, CQ_Size64K);
52385 +    rcvrRail->rcvr_resched = ep4_get_ecq (rail, EP4_ECQ_ATOMIC, 8);
52386 +
52387 +    ep4_register_intcookie (rail, &rcvrRail->rcvr_stall_intcookie, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie),
52388 +                           rcvr_stall_interrupt, rcvrRail);
52389 +
52390 +    /* Initialise the elan portion */
52391 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent.ev_CountAndType), 0);
52392 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_halt.ev_CountAndType), 0);
52393 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), 0);
52394 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp),
52395 +                       rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head));
52396 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head), 0);
52397 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie), 0);
52398 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qbase), rcvrRail->rcvr_slots_addr);
52399 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qlast), 
52400 +                       rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1));
52401 +
52402 +    /* Initialise the main memory portion */
52403 +    rcvrRail->rcvr_main->rcvr_thread_lock = 0;
52404 +
52405 +    /* Install our retry handler */
52406 +    rcvrRail->rcvr_retryops.op_func = ep4rcvr_retry;
52407 +    rcvrRail->rcvr_retryops.op_arg  = rcvrRail;
52408 +
52409 +    ep4_add_retry_ops (rail, &rcvrRail->rcvr_retryops);
52410 +
52411 +    /* Update the queue desriptor */
52412 +    qdesc.q_bptr    = rcvrRail->rcvr_slots_addr;
52413 +    qdesc.q_fptr    = rcvrRail->rcvr_slots_addr;
52414 +    qdesc.q_control = E4_InputQueueControl (rcvrRail->rcvr_slots_addr, rcvrRail->rcvr_slots_addr + (EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)), EP_INPUTQ_SIZE);
52415 +    qdesc.q_event   = rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent);
52416 +
52417 +    ep4_write_qdesc (rail, qdescs + (rcvr->Service * EP_QUEUE_DESC_SIZE), &qdesc);
52418 +
52419 +    spin_lock_irqsave (&rcvr->Lock, flags);
52420 +    rcvr->Rails[rail->r_generic.Number] = &rcvrRail->rcvr_generic;
52421 +    rcvr->RailMask |= EP_RAIL2RAILMASK (rail->r_generic.Number);
52422 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52423 +
52424 +    {
52425 +       sdramaddr_t stackTop     = stack + EP4_STACK_SIZE;
52426 +       E4_Addr     stackTopAddr = rcvrRail->rcvr_stack + EP4_STACK_SIZE;
52427 +
52428 +       ep4_init_thread (rail, &tregs, stackTop, stackTopAddr, ep_symbol (&rail->r_threadcode, "ep4comms_rcvr"), 6, 
52429 +                        (E4_uint64) rail->r_elan_addr, (E4_uint64) rcvrRail->rcvr_elan_addr, (E4_uint64) rcvrRail->rcvr_main_addr,
52430 +                        (E4_uint64) EP_MSGQ_ADDR(rcvr->Service), (E4_uint64) rcvrRail->rcvr_ecq->ecq_addr, (E4_uint64) rcvrRail->rcvr_resched->ecq_addr);
52431 +    }
52432 +    
52433 +    /* Issue the command to the threads private command queue */
52434 +    elan4_run_thread_cmd (rcvrRail->rcvr_ecq->ecq_cq, &tregs);
52435 +
52436 +    ep_procfs_rcvr_add_rail(&(rcvrRail->rcvr_generic));
52437 +}
52438 +
52439 +void
52440 +ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
52441 +{
52442 +    EP4_RAIL         *rail     = (EP4_RAIL *) commsRail->Rail;
52443 +    EP4_RCVR_RAIL    *rcvrRail = (EP4_RCVR_RAIL *) rcvr->Rails[rail->r_generic.Number];  
52444 +    ELAN4_HALTOP      haltop;
52445 +    struct list_head *el, *nel;
52446 +    unsigned long     flags;
52447 +
52448 +    ep_procfs_rcvr_del_rail(&(rcvrRail->rcvr_generic));
52449 +
52450 +    /* Run a halt operation to mark the input queue as full and
52451 +     * request the thread to halt */
52452 +    haltop.op_mask     = INT_DiscardingHighPri | INT_TProcHalted;
52453 +    haltop.op_function = rcvr_stall_haltop;
52454 +    haltop.op_arg      = rcvrRail;
52455 +
52456 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &haltop);
52457 +
52458 +    /* Wait for the thread to tell us it's processed the input queue */
52459 +    spin_lock_irqsave (&rcvr->Lock, flags);
52460 +    while (! rcvrRail->rcvr_thread_halted)
52461 +       kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags);
52462 +    rcvrRail->rcvr_thread_halted = 0;
52463 +
52464 +    /* flag the rail as no longer available */
52465 +    rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number);
52466 +
52467 +    /* wait for all active communications to terminate */
52468 +    for (;;)
52469 +    {
52470 +       int mustWait = 0;
52471 +
52472 +       list_for_each (el, &rcvr->ActiveDescList) {
52473 +           EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
52474 +           EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail;
52475 +           
52476 +           if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING)
52477 +           {
52478 +               mustWait++;
52479 +               break;
52480 +           }
52481 +       }
52482 +
52483 +       if (! mustWait)
52484 +           break;
52485 +
52486 +       rcvrRail->rcvr_cleanup_waiting++;
52487 +       kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags);
52488 +    }
52489 +
52490 +    /* at this point all rxd's in the list that are bound to the deleting rail are pending */
52491 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
52492 +       EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
52493 +       EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail;
52494 +
52495 +       if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail))
52496 +       {
52497 +           EP4_RXD_ASSERT_PENDING (rxdRail);
52498 +           EP4_RXD_FORCE_PRIVATE (rxdRail);
52499 +
52500 +           unbind_rxd_rail (rxd, rxdRail);
52501 +           free_rxd_rail (rcvrRail, rxdRail);
52502 +       }
52503 +    }
52504 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52505 +
52506 +    /* wait for all rxd's for this rail to become free */
52507 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
52508 +    while (rcvrRail->rcvr_freecount != rcvrRail->rcvr_totalcount)
52509 +    {
52510 +       rcvrRail->rcvr_freewaiting++;
52511 +       kcondvar_wait (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock, &flags);
52512 +    }
52513 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
52514 +
52515 +    /* can now remove the rail as it can no longer be used */
52516 +    spin_lock_irqsave (&rcvr->Lock, flags);
52517 +    rcvr->Rails[rail->r_generic.Number] = NULL;
52518 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52519 +
52520 +    /* all the rxd's accociated with DescBlocks must be in the FreeDescList */
52521 +    ASSERT (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount);
52522 +
52523 +    /* run through the DescBlockList deleting them */
52524 +    while (!list_empty (&rcvrRail->rcvr_blocklist))
52525 +       free_rxd_block (rcvrRail, list_entry(rcvrRail->rcvr_blocklist.next, EP4_RXD_RAIL_BLOCK , blk_link));
52526 +
52527 +    /* it had better be empty after that */
52528 +    ASSERT ((rcvrRail->rcvr_totalcount == 0) && (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount));
52529 +
52530 +    ep4_remove_retry_ops (rail, &rcvrRail->rcvr_retryops);
52531 +
52532 +    ep4_deregister_intcookie (rail, &rcvrRail->rcvr_stall_intcookie);
52533 +
52534 +    ep4_put_ecq (rail, rcvrRail->rcvr_resched, 8);
52535 +    ep4_free_ecq (rail, rcvrRail->rcvr_ecq);
52536 +
52537 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_stack, EP4_STACK_SIZE);
52538 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_slots_addr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries);
52539 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_elan_addr, sizeof (EP4_RCVR_RAIL_ELAN));
52540 +    ep_free_main (&rail->r_generic, rcvrRail->rcvr_main_addr, sizeof (EP4_RCVR_RAIL_MAIN));
52541 +
52542 +    KMEM_FREE (rcvrRail, sizeof (EP4_RCVR_RAIL));
52543 +}
52544 +
52545 +void
52546 +ep4rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r)
52547 +{
52548 +    EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) r;
52549 +    sdramaddr_t   rxdElan = rxdRail->rxd_elan;
52550 +    EP4_RAIL     *rail    = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail);
52551 +    ELAN4_DEV    *dev     = rail->r_ctxt.ctxt_dev;
52552 +    int i;
52553 +
52554 +    (di->func)(di->arg, "    Rail %d rxd %p elan %lx(%x) main %p(%x) ecq %d scq %d debug %llx\n", rail->r_generic.Number,
52555 +              rxdRail, rxdRail->rxd_elan, rxdRail->rxd_elan_addr, rxdRail->rxd_main, rxdRail->rxd_main_addr,
52556 +              elan4_cq2num(rxdRail->rxd_ecq->ecq_cq), elan4_cq2num(rxdRail->rxd_scq->ecq_cq),
52557 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_debug)));
52558 +    (di->func)(di->arg, "          start    %016llx %016llx %016llx [%016llx %016llx]\n",
52559 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)),
52560 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[0])),
52561 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[1])),
52562 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_cookie)),
52563 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_dma_cookie)));
52564 +              
52565 +    for (i = 0; i < EP_MAXFRAG; i++)
52566 +       (di->func)(di->arg, "          chain[%d] %016llx %016llx %016llx [%016llx %016llx]\n", i,
52567 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)),
52568 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[0])),
52569 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[1])),
52570 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_cookie)),
52571 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_dma_cookie)));
52572 +    (di->func)(di->arg, "          done    %016llx %016llx %016llx -> %016llx\n",
52573 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)),
52574 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[0])),
52575 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[1])),
52576 +              rxdRail->rxd_main->rxd_done);
52577 +    (di->func)(di->arg, "          fail    %016llx %016llx %016llx -> %016llx\n",
52578 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)),
52579 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[0])),
52580 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[1])),
52581 +              rxdRail->rxd_main->rxd_failed);
52582 +    (di->func)(di->arg, "          next %016llx queued %016llx main %016llx\n",
52583 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_next)),
52584 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_queued)),
52585 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main)));
52586 +    (di->func)(di->arg, "          sent %016llx %016llx %016llx %016llx %016llx\n",
52587 +              rxdRail->rxd_main->rxd_sent[0], rxdRail->rxd_main->rxd_sent[1], rxdRail->rxd_main->rxd_sent[2],
52588 +              rxdRail->rxd_main->rxd_sent[3], rxdRail->rxd_main->rxd_sent[4]);
52589 +}
52590 +
52591 +void
52592 +ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r)
52593 +{
52594 +    EP_RCVR          *rcvr       = r->Rcvr;
52595 +    EP4_RCVR_RAIL    *rcvrRail   = (EP4_RCVR_RAIL *) r;
52596 +    EP4_COMMS_RAIL   *commsRail  = RCVR_TO_COMMS(rcvrRail);
52597 +    EP4_RAIL         *rail       = RCVR_TO_RAIL (rcvrRail);
52598 +    ELAN4_DEV        *dev        = rail->r_ctxt.ctxt_dev;
52599 +    sdramaddr_t       rcvrElan   = rcvrRail->rcvr_elan;
52600 +    sdramaddr_t       qdesc      = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
52601 +    sdramaddr_t       event      = rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent);
52602 +    unsigned int      freeCount  = 0;
52603 +    unsigned int      blockCount = 0;
52604 +    struct list_head *el;
52605 +    unsigned long     flags;
52606 +    
52607 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
52608 +    list_for_each (el, &rcvrRail->rcvr_freelist)
52609 +       freeCount++;
52610 +    list_for_each (el, &rcvrRail->rcvr_blocklist)
52611 +       blockCount++;
52612 +    spin_unlock_irqrestore(&rcvrRail->rcvr_freelock, flags);
52613 +
52614 +    (di->func)(di->arg, "      Rail %d elan %lx(%x) main %p(%x) ecq %d resched %d debug %llx\n",
52615 +              rail->r_generic.Number, rcvrRail->rcvr_elan, rcvrRail->rcvr_elan_addr,
52616 +              rcvrRail->rcvr_main, rcvrRail->rcvr_main_addr, elan4_cq2num(rcvrRail->rcvr_ecq->ecq_cq),
52617 +              elan4_cq2num (rcvrRail->rcvr_resched->ecq_cq),
52618 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_debug)));
52619 +    (di->func)(di->arg, "        free %d (%d) total %d blocks %d\n",
52620 +              rcvrRail->rcvr_freecount, freeCount, rcvrRail->rcvr_totalcount, blockCount);
52621 +    (di->func)(di->arg, "        spinlock %016llx %016llx\n", rcvrRail->rcvr_main->rcvr_thread_lock,
52622 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock)));
52623 +    (di->func)(di->arg, "        queue: bptr %016llx fptr %016llx control %016llx (base %lx %x)\n",
52624 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr)),
52625 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr)),
52626 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_control)),
52627 +              rcvrRail->rcvr_slots, rcvrRail->rcvr_slots_addr);
52628 +    (di->func)(di->arg, "        event %016llx %016llx %016llx\n",
52629 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_CountAndType)),
52630 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[0])),
52631 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[1])));
52632 +    (di->func)(di->arg, "        pending_tailp %016llx pending_head %016llx\n", 
52633 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp)),
52634 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head)));
52635 +}
52636 +
52637 +void
52638 +ep4rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) {
52639 +    /* no stats here yet */
52640 +    /* EP4_RCVR_RAIL * ep4rcvr_rail = (EP4_RCVR_RAIL *) rcvr_rail; */
52641 +}
52642 +
52643 +
52644 +/*
52645 + * Local variables:
52646 + * c-file-style: "stroustrup"
52647 + * End:
52648 + */
52649 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsTx.c
52650 ===================================================================
52651 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsTx.c     2004-02-23 16:02:56.000000000 -0500
52652 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsTx.c  2005-07-28 14:52:52.876674480 -0400
52653 @@ -0,0 +1,919 @@
52654 +/*
52655 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
52656 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
52657 + *
52658 + *    For licensing information please see the supplied COPYING file
52659 + *
52660 + */
52661 +
52662 +#ident "@(#)$Id: epcommsTx.c,v 1.25.2.5 2004/12/09 10:02:42 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
52663 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx.c,v $*/
52664 +
52665 +#include <qsnet/kernel.h>
52666 +
52667 +#include <elan/kcomm.h>
52668 +#include <elan/epsvc.h>
52669 +#include <elan/epcomms.h>
52670 +
52671 +#include "cm.h"
52672 +#include "debug.h"
52673 +
52674 +unsigned int ep_txd_lowat = 5;
52675 +
52676 +static int
52677 +AllocateTxdBlock (EP_XMTR *xmtr, EP_ATTRIBUTE attr, EP_TXD **txdp)
52678 +{
52679 +    EP_TXD_BLOCK *blk;
52680 +    EP_TXD       *txd;
52681 +    EP_TXD_MAIN  *pTxdMain;
52682 +    int                  i;
52683 +    unsigned long flags;
52684 +
52685 +    EPRINTF1 (DBG_XMTR, "AllocateTxdBlock: xmtr=%p\n", xmtr);
52686 +
52687 +    KMEM_ZALLOC (blk, EP_TXD_BLOCK *, sizeof (EP_TXD_BLOCK), ! (attr & EP_NO_SLEEP));
52688 +
52689 +    if (blk == NULL)
52690 +       return -ENOMEM;
52691 +
52692 +    if ((pTxdMain = ep_shared_alloc_main (xmtr->Subsys->Subsys.Sys, EP_TXD_MAIN_SIZE * EP_NUM_TXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0)
52693 +    {
52694 +       KMEM_FREE (blk, sizeof (EP_TXD_BLOCK));
52695 +       return -ENOMEM;
52696 +    }
52697 +
52698 +    for (txd = &blk->Txd[0], i = 0; i < EP_NUM_TXD_PER_BLOCK; i++, txd++)
52699 +    {
52700 +       txd->Xmtr     = xmtr;
52701 +       txd->TxdMain = pTxdMain;
52702 +
52703 +       ep_nmd_subset (&txd->NmdMain, &blk->NmdMain, (i * EP_TXD_MAIN_SIZE), EP_TXD_MAIN_SIZE);
52704 +
52705 +       /* move onto next descriptor */
52706 +       pTxdMain = (EP_TXD_MAIN *) ((unsigned long) pTxdMain + EP_TXD_MAIN_SIZE);
52707 +    }
52708 +
52709 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52710 +
52711 +    list_add  (&blk->Link, &xmtr->DescBlockList);
52712 +    xmtr->TotalDescCount += EP_NUM_TXD_PER_BLOCK;
52713 +
52714 +    for (i = txdp ? 1 : 0; i < EP_NUM_TXD_PER_BLOCK; i++)
52715 +    {
52716 +       list_add (&blk->Txd[i].Link, &xmtr->FreeDescList);
52717 +
52718 +       xmtr->FreeDescCount++;
52719 +
52720 +       if (xmtr->FreeDescWanted)
52721 +       {
52722 +           xmtr->FreeDescWanted--;
52723 +           kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock);
52724 +       }
52725 +    }
52726 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52727 +    
52728 +    if (txdp)
52729 +       *txdp = &blk->Txd[0];
52730 +
52731 +    return 0;
52732 +}
52733 +
52734 +static void
52735 +FreeTxdBlock (EP_XMTR *xmtr, EP_TXD_BLOCK *blk)
52736 +{
52737 +    unsigned long flags;
52738 +
52739 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52740 +    list_del (&blk->Link);
52741 +
52742 +    xmtr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK;
52743 +    xmtr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK;
52744 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52745 +
52746 +    ep_shared_free_main (xmtr->Subsys->Subsys.Sys, &blk->NmdMain);
52747 +    KMEM_FREE (blk, sizeof (EP_TXD_BLOCK));
52748 +}
52749 +
52750 +static EP_TXD *
52751 +GetTxd (EP_XMTR *xmtr, EP_ATTRIBUTE attr)
52752 +{
52753 +    EP_COMMS_SUBSYS *subsys = xmtr->Subsys;
52754 +    EP_TXD          *txd;
52755 +    int low_on_txds;
52756 +    unsigned long flags;
52757 +
52758 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52759 +
52760 +    while (list_empty (&xmtr->FreeDescList))
52761 +    {
52762 +       if (! (attr & EP_NO_ALLOC))
52763 +       {
52764 +           spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52765 +
52766 +           if (AllocateTxdBlock (xmtr, attr, &txd) == ESUCCESS)
52767 +               return (txd);
52768 +
52769 +           spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52770 +       }
52771 +
52772 +       if (attr & EP_NO_SLEEP)
52773 +       {
52774 +           spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52775 +
52776 +           return (NULL);
52777 +       }
52778 +
52779 +       xmtr->FreeDescWanted++;
52780 +       kcondvar_wait (&xmtr->FreeDescSleep, &xmtr->FreeDescLock, &flags);
52781 +    }
52782 +
52783 +    txd = list_entry (xmtr->FreeDescList.next, EP_TXD, Link);
52784 +
52785 +    list_del (&txd->Link);
52786 +
52787 +    /* Wakeup the descriptor primer thread if there's not many left */
52788 +    low_on_txds = (--xmtr->FreeDescCount < ep_txd_lowat);
52789 +
52790 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52791 +
52792 +    if (low_on_txds)
52793 +       ep_kthread_schedule (&subsys->Thread, lbolt);
52794 +
52795 +    return (txd);
52796 +}
52797 +
52798 +void
52799 +FreeTxd (EP_XMTR *xmtr, EP_TXD *txd)
52800 +{
52801 +    unsigned long flags;
52802 +
52803 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
52804 +    
52805 +    list_add (&txd->Link, &xmtr->FreeDescList);
52806 +
52807 +    xmtr->FreeDescCount++;
52808 +
52809 +    if (xmtr->FreeDescWanted)                                  /* someone waiting for a receive */
52810 +    {                                                          /* descriptor, so wake them up */
52811 +       xmtr->FreeDescWanted--;
52812 +       kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock);
52813 +    }
52814 +    
52815 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
52816 +}
52817 +
52818 +int
52819 +TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail)
52820 +{
52821 +    EP_TXD      *txd  = txdRail->Txd;
52822 +    EP_XMTR     *xmtr = txd->Xmtr;
52823 +    EP_ATTRIBUTE attr = txd->Envelope.Attr;
52824 +    int                 stabilise;
52825 +    extern int   txd_stabilise;
52826 +
52827 +    switch (EP_ATTR2TYPE (attr)) 
52828 +    {
52829 +    case EP_TYPE_SVC_INDICATOR:                                /* is the rail in the current service indicator rail mask */
52830 +       if ((txd_stabilise & 4) == 0)
52831 +           return 0;
52832 +
52833 +       stabilise = (ep_xmtr_svc_indicator_railmask (xmtr, EP_ATTR2DATA (attr), txd->NodeId) & EP_RAIL2RAILMASK (rail->Number)) == 0;
52834 +       break;
52835 +
52836 +    case EP_TYPE_TIMEOUT:
52837 +       if ((txd_stabilise & 2) == 0)
52838 +           return 0;
52839 +
52840 +       stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_ATTR2DATA(attr));
52841 +       break;
52842 +
52843 +    default:
52844 +       if ((txd_stabilise & 1) == 0)
52845 +           return 0;
52846 +
52847 +       stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_DEFAULT_TIMEOUT);
52848 +       break;
52849 +    }
52850 +
52851 +    if (stabilise)
52852 +    {
52853 +       txd->Envelope.Attr = EP_SET_TXD_STABALISING(txd->Envelope.Attr);
52854 +       txd->RetryTime     = lbolt;
52855 +
52856 +       ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);    
52857 +    }
52858 +
52859 +    return stabilise;
52860 +}
52861 +
52862 +void ep_xmtr_txd_stat(EP_XMTR *xmtr, EP_TXD *txd) 
52863 +{
52864 +    int f;
52865 +    unsigned long size;
52866 +    EP_TXD_RAIL *txdRail = txd->TxdRail;
52867 +
52868 +    size = 0;
52869 +    for (f=0; f < txd->Envelope.nFrags; f++)
52870 +       size += txd->Envelope.Frags[f].nmd_len;
52871 +
52872 +    INC_STAT(xmtr->stats,tx);
52873 +    ADD_STAT(xmtr->stats,tx_len, size);  
52874 +    
52875 +    if ((txdRail != NULL) && (txdRail->XmtrRail != NULL)){
52876 +       INC_STAT(txdRail->XmtrRail->stats,tx);
52877 +       ADD_STAT(txdRail->XmtrRail->stats,tx_len, size); 
52878 +       
52879 +       if ((txdRail->XmtrRail->CommsRail != NULL) && ( txdRail->XmtrRail->CommsRail->Rail != NULL)) {
52880 +           INC_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx);
52881 +           ADD_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx_len, size);
52882 +       }
52883 +    }
52884 +}
52885 +
52886 +static int
52887 +PollActiveTransmitList (EP_XMTR *xmtr, int flag)
52888 +{
52889 +    struct list_head *el, *nel;
52890 +    struct list_head list;
52891 +    unsigned long flags;
52892 +    int count;
52893 +
52894 +    INIT_LIST_HEAD (&list);
52895 +
52896 +    spin_lock_irqsave (&xmtr->Lock, flags);
52897 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
52898 +       EP_TXD      *txd     = list_entry (el, EP_TXD, Link);
52899 +       EP_TXD_RAIL *txdRail = txd->TxdRail;
52900 +       
52901 +       if (txdRail == NULL)
52902 +           continue;
52903 +
52904 +       ASSERT (txdRail->Txd == txd);
52905 +       
52906 +       if (EP_XMTR_OP (txdRail->XmtrRail,PollTxd) (txdRail->XmtrRail, txdRail, flags))
52907 +       {
52908 +           list_del (&txd->Link);                              /* remove from active transmit list */
52909 +           list_add_tail (&txd->Link, &list);                  /* and add to list to call handlers */
52910 +       }
52911 +    }
52912 +    
52913 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
52914 +
52915 +    for (count = 0; !list_empty (&list); count++)
52916 +    {
52917 +       EP_TXD *txd = list_entry (list.next, EP_TXD, Link);
52918 +
52919 +       list_del (&txd->Link);
52920 +
52921 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
52922 +
52923 +       FreeTxd (xmtr, txd);
52924 +    }
52925 +    return (count);
52926 +}
52927 +
52928 +static inline void
52929 +DoTransmit (EP_XMTR *xmtr, EP_TXD *txd)
52930 +{
52931 +    EP_RAILMASK   nmdRailMask = ep_nmd2railmask (txd->Envelope.Frags, txd->Envelope.nFrags);
52932 +    EP_XMTR_RAIL *xmtrRail;
52933 +    unsigned long flags;
52934 +    int rnum;
52935 +
52936 +    spin_lock_irqsave (&xmtr->Lock, flags);
52937 +
52938 +    if (EP_IS_SVC_INDICATOR(txd->Envelope.Attr))
52939 +       nmdRailMask = nmdRailMask & ep_xmtr_svc_indicator_railmask(xmtr, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId);
52940 +
52941 +    if (EP_IS_PREFRAIL_SET(txd->Envelope.Attr))
52942 +       rnum = EP_ATTR2PREFRAIL(txd->Envelope.Attr);
52943 +    else 
52944 +       rnum = ep_xmtr_prefrail (xmtr, nmdRailMask, txd->NodeId);
52945 +    
52946 +    if (rnum < 0 || !(nmdRailMask & EP_RAIL2RAILMASK(rnum)))
52947 +       xmtrRail = NULL;
52948 +    else
52949 +       xmtrRail = xmtr->Rails[rnum];
52950 +    
52951 +    /* Allocate the XID while holding the xmtr->Lock from our XID cache */
52952 +    txd->Envelope.Xid = ep_xid_cache_alloc (xmtr->Subsys->Subsys.Sys, &xmtr->XidCache);
52953 +    
52954 +    EPRINTF7 (DBG_XMTR, "ep: transmit txd %p to %d/%d: Xid %llx nFrags %d [%08x.%d]\n",
52955 +             txd, txd->NodeId, txd->Service, (long long) txd->Envelope.Xid.Unique, 
52956 +             txd->Envelope.nFrags, txd->Envelope.Frags[0].nmd_addr, txd->Envelope.Frags[0].nmd_len);
52957 +
52958 +    /* Store time transmit started to timeout if not received */
52959 +    txd->TimeStamp = lbolt;
52960 +    
52961 +    /* Initialise the retry backoff */
52962 +    txd->Backoff.type = EP_BACKOFF_FREE;
52963 +
52964 +    list_add_tail (&txd->Link, &xmtr->ActiveDescList);
52965 +
52966 +    if (xmtrRail == NULL || !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE))
52967 +       ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);
52968 +    
52969 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
52970 +
52971 +    if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
52972 +       PollActiveTransmitList (xmtr, POLL_TX_LIST);
52973 +}
52974 +
52975 +EP_STATUS
52976 +ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
52977 +                    EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
52978 +{
52979 +    EP_TXD       *txd;
52980 +    int           i, len;
52981 +
52982 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
52983 +       return (EP_EINVAL);
52984 +
52985 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
52986 +       return (EP_ENOMEM);
52987 +
52988 +    txd->Handler = handler;
52989 +    txd->Arg     = arg;
52990 +    txd->Service = service;
52991 +    txd->NodeId  = (unsigned short) dest;
52992 +
52993 +    /* Initialise the envelope */
52994 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
52995 +    txd->Envelope.Attr      = EP_CLEAR_LOCAL_ATTR(attr);
52996 +    txd->Envelope.Range     = EP_RANGE (dest, dest);
52997 +    txd->Envelope.TxdMain   = txd->NmdMain;
52998 +    txd->Envelope.nFrags    = nFrags;
52999 +
53000 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
53001 +       txd->Envelope.Frags[i] = nmd[i];
53002 +
53003 +    if (payload)
53004 +    {
53005 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
53006 +
53007 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
53008 +    }
53009 +
53010 +    DoTransmit (xmtr, txd);
53011 +
53012 +    BucketStat (xmtr->Subsys, DataXmit, len);
53013 +
53014 +    return (EP_SUCCESS);
53015 +}
53016 +
53017 +EP_STATUS
53018 +ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, EP_SERVICE service, 
53019 +                    EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
53020 +{
53021 +    EP_SYS       *sys = xmtr->Subsys->Subsys.Sys;
53022 +    EP_TXD       *txd;
53023 +    int           nnodes;
53024 +    int           i, len;
53025 +    unsigned long flags;    
53026 +
53027 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
53028 +       return (EP_EINVAL);
53029 +
53030 +    if (destLo == -1) 
53031 +       destLo = sys->Position.pos_nodeid & ~(EP_MAX_NODES-1);
53032 +
53033 +    if (destHi == -1 && (destHi = ((sys->Position.pos_nodeid + EP_MAX_NODES) & ~(EP_MAX_NODES-1)) - 1) >= sys->Position.pos_nodes)
53034 +       destHi = sys->Position.pos_nodes-1;
53035 +
53036 +    nnodes = (destHi-destLo+1);
53037 +
53038 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
53039 +       return (EP_ENOMEM);
53040 +
53041 +    txd->Handler = handler;
53042 +    txd->Arg     = arg;
53043 +    txd->Service = service;
53044 +
53045 +    /* Initialise the envelope */
53046 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
53047 +    txd->Envelope.Attr      = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr));
53048 +    txd->Envelope.Range     = EP_RANGE (destLo, destHi);
53049 +    txd->Envelope.TxdMain   = txd->NmdMain;
53050 +    txd->Envelope.nFrags    = nFrags;
53051 +
53052 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
53053 +       txd->Envelope.Frags[i] = nmd[i];
53054 +
53055 +    if (payload)
53056 +    {
53057 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
53058 +
53059 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
53060 +    }
53061 +
53062 +    spin_lock_irqsave (&sys->NodeLock, flags);
53063 +    if (EP_IS_SVC_INDICATOR(attr)) 
53064 +       ep_xmtr_svc_indicator_bitmap(xmtr, EP_ATTR2DATA(attr), txd->TxdMain->Bitmap, destLo, nnodes);
53065 +    else
53066 +       bt_subset (statemap_tobitmap(sys->NodeSet), txd->TxdMain->Bitmap, destLo, nnodes);
53067 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
53068 +
53069 +    if (bitmap != NULL)                                                                        /* bitmap supplied, so intersect it with */
53070 +       bt_intersect (txd->TxdMain->Bitmap, bitmap, nnodes);                            /* the current node set map */
53071 +    
53072 +    if ((attr & EP_NOT_MYSELF) && destLo <= sys->Position.pos_nodeid && sys->Position.pos_nodeid <= destHi)
53073 +       BT_CLEAR (txd->TxdMain->Bitmap, (sys->Position.pos_nodeid-destLo));                     /* clear myself if not wanted */
53074 +
53075 +    if ((i = bt_lowbit (txd->TxdMain->Bitmap, nnodes)) < 0)
53076 +    {
53077 +       FreeTxd (xmtr, txd);
53078 +       return (EP_NODE_DOWN);
53079 +    }
53080 +
53081 +    txd->NodeId = (unsigned short) i;
53082 +
53083 +    DoTransmit (xmtr, txd);
53084 +
53085 +    BucketStat (xmtr->Subsys, McastXmit, len);
53086 +
53087 +    return (EP_SUCCESS);
53088 +}
53089 +
53090 +EP_STATUS
53091 +ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr,
53092 +                EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
53093 +{
53094 +    EP_TXD       *txd;
53095 +    int           i, len;
53096 +
53097 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
53098 +       return (EP_EINVAL);
53099 +
53100 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
53101 +       return (EP_ENOMEM);
53102 +
53103 +    txd->Handler = handler;
53104 +    txd->Arg     = arg;
53105 +    txd->Service = service;
53106 +    txd->NodeId  = dest;
53107 +
53108 +    /* Initialise the envelope */
53109 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
53110 +    txd->Envelope.Attr      = EP_SET_RPC(EP_CLEAR_LOCAL_ATTR(attr));    
53111 +    txd->Envelope.Range     = EP_RANGE (dest, dest);
53112 +    txd->Envelope.TxdMain   = txd->NmdMain;
53113 +    txd->Envelope.nFrags    = nFrags;
53114 +     
53115 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
53116 +       txd->Envelope.Frags[i] = nmd[i];
53117 +
53118 +    if (payload)
53119 +    {
53120 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
53121 +
53122 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
53123 +    }
53124 +
53125 +    DoTransmit (xmtr, txd);
53126 +
53127 +    BucketStat (xmtr->Subsys, RPCXmit, len);
53128 +
53129 +    return (EP_SUCCESS);
53130 +}
53131 +
53132 +EP_STATUS
53133 +ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg,
53134 +                     EP_ENVELOPE *env,  EP_PAYLOAD *payload, bitmap_t *bitmap, EP_NMD *nmd, int nFrags)
53135 +{
53136 +    EP_TXD       *txd;
53137 +    int           i, len;
53138 +
53139 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
53140 +       return (EP_EINVAL);
53141 +
53142 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
53143 +       return (EP_ENOMEM);
53144 +
53145 +    txd->Handler = handler;
53146 +    txd->Arg     = arg;
53147 +    txd->Service = service;
53148 +    txd->NodeId  = (unsigned short) dest;
53149 +
53150 +    /* Initialise the envelope */
53151 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
53152 +    txd->Envelope.Attr      = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr));
53153 +    txd->Envelope.Range     = env->Range;
53154 +    txd->Envelope.TxdMain   = txd->NmdMain;
53155 +    txd->Envelope.nFrags    = nFrags;
53156 +
53157 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
53158 +       txd->Envelope.Frags[i] = nmd[i];
53159 +
53160 +    bt_copy (bitmap, txd->TxdMain->Bitmap, EP_RANGE_HIGH(env->Range) - EP_RANGE_LOW(env->Range) + 1);
53161 +
53162 +    if (payload)
53163 +    {
53164 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
53165 +
53166 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
53167 +    }
53168 +
53169 +    DoTransmit (xmtr, txd);
53170 +
53171 +    BucketStat (xmtr->Subsys, McastXmit, len);
53172 +
53173 +    return (EP_SUCCESS);
53174 +}
53175 +
53176 +int
53177 +ep_poll_transmits (EP_XMTR *xmtr)
53178 +{
53179 +    return (PollActiveTransmitList (xmtr, POLL_TX_LIST));
53180 +}
53181 +
53182 +int
53183 +ep_enable_txcallbacks (EP_XMTR *xmtr)
53184 +{
53185 +    return (PollActiveTransmitList (xmtr, ENABLE_TX_CALLBACK));
53186 +}
53187 +
53188 +int
53189 +ep_disable_txcallbacks (EP_XMTR *xmtr)
53190 +{
53191 +    return (PollActiveTransmitList (xmtr, DISABLE_TX_CALLBACK));
53192 +}
53193 +
53194 +/* functions for accessing fields of txds */
53195 +int              ep_txd_node(EP_TXD *txd)              { return (txd->NodeId); }
53196 +EP_STATUSBLK    *ep_txd_statusblk(EP_TXD *txd)                 { return (&txd->TxdMain->StatusBlk); }
53197 +
53198 +void
53199 +ep_xmtr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg)
53200 +{
53201 +    EP_XMTR          *xmtr = (EP_XMTR *) arg;
53202 +    EP_SYS           *sys  = xmtr->Subsys->Subsys.Sys;
53203 +    struct list_head *el,*nel;
53204 +    unsigned long     flags;
53205 +
53206 +    switch (msg->Hdr.Type)
53207 +    {
53208 +    case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST:
53209 +       spin_lock_irqsave (&xmtr->Lock, flags);
53210 +       list_for_each (el, &xmtr->ActiveDescList) {
53211 +           EP_TXD      *txd     = list_entry (el, EP_TXD, Link);
53212 +           EP_TXD_RAIL *txdRail = txd->TxdRail;
53213 +
53214 +           if (txdRail != NULL && EP_XIDS_MATCH (msg->Body.Failover.Xid, txd->Envelope.Xid))
53215 +           {
53216 +               EP_XMTR_RAIL       *xmtrRail = txdRail->XmtrRail;
53217 +               EP_RAIL            *rail     = xmtrRail->CommsRail->Rail;
53218 +               EP_MANAGER_MSG_BODY msgBody;
53219 +               int                 rnum;
53220 +
53221 +               if (! (msg->Body.Failover.Railmask & EP_RAIL2RAILMASK (rail->Number)))
53222 +               {
53223 +                   /* Need to failover this txd to a different rail, select a rail from
53224 +                    * the set that she has asked us to use and which is connected to her
53225 +                    * on this transmitter.   If there are no such rails, then in all probability
53226 +                    * we're offline on all common rails and eventually she will see we have no
53227 +                    * rails in common and abort the receive. */
53228 +                   if ((rnum = ep_xmtr_prefrail (xmtr, msg->Body.Failover.Railmask, txd->NodeId)) < 0)
53229 +                       ep_debugf (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST but can't determine rail (%04x,%04x,%d,%04x)\n",
53230 +                                  rail->Name, msg->Body.Failover.Railmask, xmtr->RailMask, txd->NodeId, sys->Nodes[txd->NodeId].ConnectedRails);
53231 +                   else
53232 +                   {
53233 +                       EP_XMTR_RAIL *nXmtrRail = xmtr->Rails[rnum];
53234 +
53235 +                       EPRINTF4 (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST txd=%p XID=%llx-> rail %d\n", rail->Name, txd, (long long) txd->Envelope.Xid.Unique, rnum);
53236 +
53237 +                       /* Bind the txd rail onto the new rail - it doesn't matter if we fail
53238 +                        * as it will remain bound to the original rail */
53239 +                       (void) EP_XMTR_OP (nXmtrRail, BindTxd) (txd, nXmtrRail, EP_TXD_PHASE_PASSIVE);
53240 +                   }
53241 +               }
53242 +
53243 +               /* Send a failover response including an envelope update */
53244 +               msgBody.FailoverTxd.Rail     = rail->Number;
53245 +               msgBody.FailoverTxd.Xid      = txd->Envelope.Xid;
53246 +               msgBody.FailoverTxd.TxdRail  = txd->Envelope.TxdRail;
53247 +
53248 +               ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE, msg->Hdr.Xid, &msgBody);
53249 +           }
53250 +       }
53251 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
53252 +       break;
53253 +
53254 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: {
53255 +       int         txd_has_not_sent_envelope = 0;
53256 +       EP_TXD      *txd            = NULL;
53257 +       EP_TXD_RAIL *txdRail        = NULL;
53258 +
53259 +       if (msg->Body.NodeState.NetworkErrorState != 0)
53260 +           ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt + MESSAGE_RETRY_TIME);
53261 +       else
53262 +       {
53263 +           spin_lock_irqsave (&xmtr->Lock, flags);
53264 +           list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
53265 +               
53266 +               txd     = list_entry (el, EP_TXD, Link);
53267 +               txdRail = txd->TxdRail;
53268 +               
53269 +               if (txdRail != NULL && EP_XIDS_MATCH (msg->Hdr.Xid, txd->Envelope.Xid)) {
53270 +                   txd_has_not_sent_envelope = EP_XMTR_OP(txdRail->XmtrRail,CheckTxdState)(txd);
53271 +                   break;
53272 +               }
53273 +           }
53274 +           
53275 +           if (txd_has_not_sent_envelope) {
53276 +               EPRINTF2 (DBG_STABILISE, "ep_xmtr_xid_msg_handler: GET_NODE_STATE_RESPONSE txd=%p XID=%llx not sent envelope\n",
53277 +                         txd, (long long) txd->Envelope.Xid.Unique);
53278 +
53279 +               /* at this point it has finished stabalising */
53280 +               txd->Envelope.Attr = EP_CLEAR_TXD_STABALISING(txd->Envelope.Attr);
53281 +
53282 +               /* store railmask into txd if not a service indicator or timeout */
53283 +               if (EP_IS_NO_TYPE(txd->Envelope.Attr))
53284 +                   txd->Envelope.Attr = EP_SET_DATA(txd->Envelope.Attr, EP_TYPE_RAILMASK, msg->Body.NodeState.Railmask);
53285 +
53286 +               spin_unlock_irqrestore (&xmtr->Lock, flags);
53287 +               
53288 +               /* TXD is now no longer bound to a rail , so let ep_check_xmtr() handle it */
53289 +               ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);
53290 +           }
53291 +           else
53292 +               spin_unlock_irqrestore (&xmtr->Lock, flags);    
53293 +       }
53294 +       break;
53295 +    }
53296 +    default:
53297 +       panic ("ep_xmtr_xid_msg_handler: XID match but invalid message type\n");
53298 +    }
53299 +}
53300 +
53301 +EP_XMTR *
53302 +ep_alloc_xmtr (EP_SYS *sys)
53303 +{
53304 +    EP_COMMS_SUBSYS   *subsys;
53305 +    EP_XMTR          *xmtr;
53306 +    struct list_head *el;
53307 +
53308 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL)
53309 +       return (NULL);
53310 +
53311 +    KMEM_ZALLOC (xmtr, EP_XMTR *, sizeof (EP_XMTR), 1);
53312 +
53313 +    if (xmtr == NULL)
53314 +       return (NULL);
53315 +    
53316 +    xmtr->Subsys = subsys;
53317 +
53318 +    spin_lock_init (&xmtr->Lock);
53319 +    INIT_LIST_HEAD (&xmtr->ActiveDescList);
53320 +    
53321 +    kcondvar_init (&xmtr->FreeDescSleep);
53322 +    spin_lock_init (&xmtr->FreeDescLock);
53323 +    INIT_LIST_HEAD (&xmtr->FreeDescList);
53324 +    INIT_LIST_HEAD (&xmtr->DescBlockList);
53325 +
53326 +    ep_xid_cache_init (sys, &xmtr->XidCache);
53327 +
53328 +    xmtr->XidCache.MessageHandler = ep_xmtr_xid_msg_handler;
53329 +    xmtr->XidCache.Arg            = xmtr;
53330 +
53331 +    kmutex_lock (&subsys->Lock);
53332 +    list_add_tail (&xmtr->Link, &subsys->Transmitters);
53333 +
53334 +    ep_procfs_xmtr_add(xmtr);
53335 +
53336 +    /* Now add all rails which are already started */
53337 +    list_for_each (el, &subsys->Rails) { 
53338 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
53339 +
53340 +       EP_RAIL_OP(commsRail, Xmtr.AddRail) (xmtr, commsRail);
53341 +    }
53342 +    kmutex_unlock (&subsys->Lock);
53343 +
53344 +    ep_mod_inc_usecount();
53345 +
53346 +    return (xmtr);
53347 +}
53348 +
53349 +void
53350 +ep_free_xmtr (EP_XMTR *xmtr)
53351 +{
53352 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
53353 +    EP_SYS           *sys    = subsys->Subsys.Sys;
53354 +    struct list_head *el;
53355 +    
53356 +    kmutex_lock (&subsys->Lock);
53357 +    list_for_each (el, &subsys->Rails) { 
53358 +       EP_COMMS_RAIL *rail = list_entry (el, EP_COMMS_RAIL, Link);
53359 +
53360 +       EP_RAIL_OP(rail,Xmtr.DelRail) (xmtr, rail);
53361 +    }
53362 +
53363 +    list_del (&xmtr->Link);
53364 +    kmutex_unlock (&subsys->Lock);
53365 +
53366 +    /* all the desc's must be free */
53367 +    ASSERT(xmtr->FreeDescCount == xmtr->TotalDescCount);
53368 +
53369 +    /* delete the descs */
53370 +    while (!list_empty (&xmtr->DescBlockList))
53371 +       FreeTxdBlock( xmtr, list_entry(xmtr->DescBlockList.next, EP_TXD_BLOCK , Link));
53372 +
53373 +    /* they had better all be gone now */
53374 +    ASSERT((xmtr->FreeDescCount == 0) && (xmtr->TotalDescCount == 0));
53375 +
53376 +    ep_procfs_xmtr_del(xmtr);
53377 +
53378 +    ep_xid_cache_destroy (sys, &xmtr->XidCache);
53379 +
53380 +    spin_lock_destroy (&xmtr->Lock);
53381 +    KMEM_FREE (xmtr, sizeof (EP_XMTR));
53382 +
53383 +    ep_mod_dec_usecount();
53384 +}
53385 +
53386 +long
53387 +ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime)
53388 +{
53389 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
53390 +    EP_SYS           *sys    = subsys->Subsys.Sys;
53391 +    struct list_head *el, *nel;
53392 +    struct list_head  txdList;
53393 +    unsigned long       flags;
53394 +    int                 timed_out=0;
53395 +    int                 i;
53396 +    EP_MANAGER_MSG_BODY body;
53397 +
53398 +    INIT_LIST_HEAD (&txdList);
53399 +
53400 +    /* See if we have any txd's which need to be bound to a rail */
53401 +    spin_lock_irqsave (&xmtr->Lock, flags);
53402 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
53403 +       EP_TXD      *txd      = list_entry (el, EP_TXD, Link);
53404 +       EP_NODE     *node     = &sys->Nodes[txd->NodeId];
53405 +       EP_RAILMASK nodeRails = node->ConnectedRails & xmtr->RailMask;
53406 +       EP_ENVELOPE *env      = &txd->Envelope;
53407 +
53408 +       if (EP_IS_TXD_STABALISING(txd->Envelope.Attr)) 
53409 +       {
53410 +           ASSERT(txd->TxdRail != NULL);
53411 +
53412 +           if (AFTER (lbolt, txd->RetryTime))
53413 +           {
53414 +               EPRINTF6 (DBG_STABILISE, "ep_check_xmtr txd=%p txdRail=%p send get node state to %d Xid=%08x.%08x.%016llx\n",
53415 +                         txd, txd->TxdRail, txd->NodeId, env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);
53416 +               
53417 +               body.Service = txd->Service;
53418 +               if (ep_send_message ( txd->TxdRail->XmtrRail->CommsRail->Rail, txd->NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE, env->Xid, &body) == 0)
53419 +                   txd->RetryTime = lbolt + (MESSAGE_RETRY_TIME << ep_backoff (&txd->Backoff, EP_BACKOFF_STABILISE));
53420 +               else
53421 +                   txd->RetryTime = lbolt + MSGBUSY_RETRY_TIME;
53422 +           }
53423 +
53424 +           ep_kthread_schedule (&subsys->Thread, txd->RetryTime);
53425 +           continue;
53426 +       }
53427 +
53428 +       if (txd->TxdRail != NULL)
53429 +           continue;
53430 +
53431 +       switch (EP_ATTR2TYPE(txd->Envelope.Attr)) 
53432 +       {
53433 +       case EP_TYPE_SVC_INDICATOR: 
53434 +       {
53435 +           EP_RAILMASK       rmask=0;
53436 +           struct list_head *tmp;
53437 +
53438 +           list_for_each (tmp, &subsys->Rails) { 
53439 +               EP_COMMS_RAIL *commsRail = list_entry (tmp, EP_COMMS_RAIL, Link);
53440 +               if ( cm_svc_indicator_is_set(commsRail->Rail, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId))
53441 +                   rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
53442 +           } 
53443 +           nodeRails &= rmask;
53444 +           break;
53445 +       }
53446 +       case EP_TYPE_TIMEOUT:
53447 +           timed_out = AFTER(lbolt, txd->TimeStamp + EP_ATTR2DATA(txd->Envelope.Attr)) ? (1) : (0);
53448 +           break;
53449 +       case EP_TYPE_RAILMASK:
53450 +           nodeRails &= EP_ATTR2DATA(txd->Envelope.Attr);
53451 +           break;
53452 +       default:
53453 +           timed_out = AFTER(lbolt, txd->TimeStamp +  EP_DEFAULT_TIMEOUT) ? (1) : (0);
53454 +           break;
53455 +       }
53456 +
53457 +       if (nodeRails == 0 || timed_out || (EP_IS_NO_FAILOVER(env->Attr) && EP_IS_PREFRAIL_SET(env->Attr) && 
53458 +                                           (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))) == 0))
53459 +       {
53460 +           EPRINTF5 (timed_out ? DBG_STABILISE : DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx to %d no rails connected or cannot failover (nodeRails=0x%x,timed_out=%d\n", 
53461 +                     txd, (long long) env->Xid.Unique, txd->NodeId, nodeRails, timed_out);
53462 +
53463 +           list_del  (&txd->Link);
53464 +           list_add_tail (&txd->Link, &txdList);
53465 +       }
53466 +       else
53467 +       {
53468 +           EP_XMTR_RAIL *xmtrRail;
53469 +           int i, len, rnum;
53470 +
53471 +           if (EP_IS_PREFRAIL_SET(env->Attr) && (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))))
53472 +               rnum = EP_ATTR2PREFRAIL(env->Attr);
53473 +           else
53474 +               rnum = ep_pickRail (nodeRails);
53475 +
53476 +           EPRINTF3 (DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx mapping NMDs onto rail %d \n", txd, (long long) env->Xid.Unique, rnum);
53477 +
53478 +           for (i = len = 0; i < env->nFrags; i++, len += env->Frags[i].nmd_len)
53479 +               ep_nmd_map_rails (sys, &env->Frags[i], nodeRails);
53480 +
53481 +           if ((xmtrRail = xmtr->Rails[rnum]) == NULL || 
53482 +               !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE))
53483 +               ep_kthread_schedule (&subsys->Thread, lbolt + RESOURCE_RETRY_TIME);
53484 +       }
53485 +    }
53486 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53487 +
53488 +    while (! list_empty (&txdList))
53489 +    {
53490 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
53491 +       list_del (&txd->Link);
53492 +
53493 +       txd->Handler (txd, txd->Arg, EP_NODE_DOWN);
53494 +       FreeTxd (xmtr, txd);
53495 +    }
53496 +
53497 +    /* Check to see if we're low on txds */
53498 +    if (xmtr->FreeDescCount < ep_txd_lowat)
53499 +       AllocateTxdBlock (xmtr, 0, NULL);
53500 +    
53501 +    /* Then check each rail */
53502 +    for (i = 0; i < EP_MAX_RAILS; i++) 
53503 +       if (xmtr->RailMask & (1 << i) ) 
53504 +           nextRunTime = EP_XMTR_OP (xmtr->Rails[i],Check) (xmtr->Rails[i], nextRunTime);
53505 +    return (nextRunTime);
53506 +}
53507 +
53508 +void
53509 +ep_display_txd (DisplayInfo *di, EP_TXD *txd)
53510 +{
53511 +    EP_ENVELOPE *env     = &txd->Envelope;
53512 +    EP_TXD_RAIL *txdRail = txd->TxdRail;
53513 +
53514 +    (di->func)(di->arg, "TXD: %p Version=%x Attr=%x Xid=%08x.%08x.%016llx\n", txd, 
53515 +              env->Version, env->Attr, env->Xid.Generation, env->Xid.Handle, (long long) env->Xid.Unique);
53516 +    (di->func)(di->arg,  "     NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d\n",
53517 +              env->NodeId, EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail,
53518 +              env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags);
53519 +    (di->func)(di->arg,  "       Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr);
53520 +    (di->func)(di->arg,  "       Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr);
53521 +    (di->func)(di->arg,  "       Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr);
53522 +    (di->func)(di->arg,  "       Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr);
53523 +
53524 +    if (txdRail != NULL) EP_XMTR_OP (txdRail->XmtrRail, DisplayTxd) (di, txdRail);
53525 +}
53526 +
53527 +void
53528 +ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr)
53529 +{
53530 +    int               freeCount   = 0;
53531 +    int               activeCount = 0;
53532 +    struct list_head *el;
53533 +    int               i;
53534 +    unsigned long     flags;
53535 +
53536 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
53537 +    list_for_each (el, &xmtr->FreeDescList)
53538 +       freeCount++;
53539 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
53540 +
53541 +    spin_lock_irqsave (&xmtr->Lock, flags);
53542 +    list_for_each (el, &xmtr->ActiveDescList)
53543 +       activeCount++;
53544 +    
53545 +    (di->func)(di->arg, "ep_display_xmtr: xmtr=%p Free=%d Active=%d\n", xmtr, freeCount, activeCount);
53546 +    for (i = 0; i < EP_MAX_RAILS; i++)
53547 +       if (xmtr->Rails[i]) EP_XMTR_OP (xmtr->Rails[i], DisplayXmtr) (di, xmtr->Rails[i]);
53548 +
53549 +    list_for_each (el,&xmtr->ActiveDescList)
53550 +       ep_display_txd (di, list_entry (el, EP_TXD, Link));
53551 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53552 +}
53553 +
53554 +void
53555 +ep_xmtr_fillout_stats(EP_XMTR *xmtr, char *str)
53556 +{
53557 +    sprintf(str+strlen(str),"Tx     %lu  %lu /sec\n",       GET_STAT_TOTAL(xmtr->stats,tx),      GET_STAT_PER_SEC(xmtr->stats,tx) );
53558 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr->stats,tx_len) / (1024*1024),  GET_STAT_PER_SEC(xmtr->stats,tx_len) / (1024*1024));
53559 +}
53560 +
53561 +void
53562 +ep_xmtr_rail_fillout_stats(EP_XMTR_RAIL *xmtr_rail, char *str)
53563 +{
53564 +    sprintf(str+strlen(str),"Tx     %lu  %lu /sec\n",       GET_STAT_TOTAL(xmtr_rail->stats,tx),      GET_STAT_PER_SEC(xmtr_rail->stats,tx) );
53565 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr_rail->stats,tx_len) / (1024*1024),  GET_STAT_PER_SEC(xmtr_rail->stats,tx_len) / (1024*1024));
53566 +}
53567 +
53568 +/*
53569 + * Local variables:
53570 + * c-file-style: "stroustrup"
53571 + * End:
53572 + */
53573 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsTx_elan3.c
53574 ===================================================================
53575 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsTx_elan3.c       2004-02-23 16:02:56.000000000 -0500
53576 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsTx_elan3.c    2005-07-28 14:52:52.878674176 -0400
53577 @@ -0,0 +1,1173 @@
53578 +/*
53579 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
53580 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
53581 + *
53582 + *    For licensing information please see the supplied COPYING file
53583 + *
53584 + */
53585 +
53586 +#ident "@(#)$Id: epcommsTx_elan3.c,v 1.17.2.2 2004/11/12 10:54:51 mike Exp $"
53587 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx_elan3.c,v $ */
53588 +
53589 +#include <qsnet/kernel.h>
53590 +
53591 +#include <elan/kcomm.h>
53592 +#include <elan/epsvc.h>
53593 +#include <elan/epcomms.h>
53594 +
53595 +#include "kcomm_vp.h"
53596 +#include "kcomm_elan3.h"
53597 +#include "epcomms_elan3.h"
53598 +#include "debug.h"
53599 +
53600 +#define XMTR_TO_RAIL(xmtrRail)         ((EP3_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail)
53601 +#define XMTR_TO_DEV(xmtrRail)          (XMTR_TO_RAIL(xmtrRail)->Device)
53602 +#define XMTR_TO_SUBSYS(xmtrRail)       (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys)
53603 +
53604 +static void TxEnveEvent (EP3_RAIL *rail, void *arg);
53605 +static void TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
53606 +static void TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
53607 +
53608 +static EP3_COOKIE_OPS EnveCookieOps =
53609 +{
53610 +    TxEnveEvent,
53611 +    TxEnveRetry,
53612 +    NULL, /* DmaCancelled */
53613 +    TxEnveVerify
53614 +};
53615 +
53616 +static void TxDataEvent (EP3_RAIL *rail, void *arg);
53617 +static void TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
53618 +static void TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
53619 +
53620 +static EP3_COOKIE_OPS DataCookieOps =
53621 +{
53622 +    TxDataEvent,
53623 +    TxDataRetry,
53624 +    NULL, /* DmaCancelled */
53625 +    TxDataVerify
53626 +};
53627 +
53628 +static void TxDoneEvent (EP3_RAIL *dev, void *arg);
53629 +static void TxDoneRetry (EP3_RAIL *dev, void *arg, E3_DMA_BE *dma, int status);
53630 +static void TxDoneVerify (EP3_RAIL  *dev, void *arg, E3_DMA_BE *dma);
53631 +
53632 +static EP3_COOKIE_OPS DoneCookieOps = 
53633 +{
53634 +    TxDoneEvent,
53635 +    TxDoneRetry,
53636 +    NULL, /* DmaCancelled */
53637 +    TxDoneVerify,
53638 +} ;
53639 +
53640 +static int
53641 +AllocateTxdRailBlock (EP3_XMTR_RAIL *xmtrRail)
53642 +{
53643 +    EP3_RAIL          *rail = XMTR_TO_RAIL (xmtrRail);
53644 +    ELAN3_DEV         *dev = rail->Device;
53645 +    EP3_TXD_RAIL_BLOCK *blk;
53646 +    EP3_TXD_RAIL       *txdRail;
53647 +    sdramaddr_t        pTxdElan;
53648 +    EP3_TXD_RAIL_MAIN  *pTxdMain;
53649 +    E3_Addr           pTxdElanAddr;
53650 +    E3_Addr           pTxdMainAddr;
53651 +    E3_BlockCopyEvent  event;
53652 +    int                       i;
53653 +    unsigned long      flags;
53654 +
53655 +    KMEM_ZALLOC (blk, EP3_TXD_RAIL_BLOCK *, sizeof (EP3_TXD_RAIL_BLOCK), 1);
53656 +
53657 +    if (blk == NULL)
53658 +       return 0;
53659 +
53660 +    if ((pTxdElan = ep_alloc_elan (&rail->Generic, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdElanAddr)) == (sdramaddr_t) 0)
53661 +    {
53662 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
53663 +       return 0;
53664 +    }
53665 +
53666 +    if ((pTxdMain = ep_alloc_main (&rail->Generic, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdMainAddr)) == (EP3_TXD_RAIL_MAIN *) NULL)
53667 +    {
53668 +       ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53669 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
53670 +       return 0;
53671 +    }
53672 +    
53673 +    if (ReserveDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK, 0) != ESUCCESS)
53674 +    {
53675 +       ep_free_main (&rail->Generic, pTxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53676 +       ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53677 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
53678 +       return 0;
53679 +    }
53680 +
53681 +    for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++)
53682 +    {
53683 +       txdRail->Generic.XmtrRail = &xmtrRail->Generic;
53684 +       txdRail->TxdElan          = pTxdElan;
53685 +       txdRail->TxdElanAddr      = pTxdElanAddr;
53686 +       txdRail->TxdMain          = pTxdMain;
53687 +       txdRail->TxdMainAddr      = pTxdMainAddr;
53688 +
53689 +       RegisterCookie (&rail->CookieTable, &txdRail->EnveCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), &EnveCookieOps, (void *) txdRail);
53690 +       RegisterCookie (&rail->CookieTable, &txdRail->DataCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), &DataCookieOps, (void *) txdRail);
53691 +       RegisterCookie (&rail->CookieTable, &txdRail->DoneCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), &DoneCookieOps, (void *) txdRail);
53692 +
53693 +       EP3_INIT_COPY_EVENT (event, txdRail->EnveCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, EnveEvent), 0);
53694 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), sizeof (E3_BlockCopyEvent));
53695 +
53696 +       EP3_INIT_COPY_EVENT (event, txdRail->DataCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DataEvent), 0);
53697 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent));
53698 +
53699 +       EP3_INIT_COPY_EVENT (event, txdRail->DoneCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DoneEvent), 0);
53700 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent));
53701 +       
53702 +       pTxdMain->EnveEvent = EP3_EVENT_FREE;
53703 +       pTxdMain->DataEvent = EP3_EVENT_FREE;
53704 +       pTxdMain->DoneEvent = EP3_EVENT_FREE;
53705 +
53706 +       /* move onto next descriptor */
53707 +       pTxdElan     += EP3_TXD_RAIL_ELAN_SIZE;
53708 +       pTxdElanAddr += EP3_TXD_RAIL_ELAN_SIZE;
53709 +       pTxdMain      = (EP3_TXD_RAIL_MAIN *) ((unsigned long) pTxdMain + EP3_TXD_RAIL_MAIN_SIZE);
53710 +       pTxdMainAddr += EP3_TXD_RAIL_MAIN_SIZE;
53711 +    }
53712 +
53713 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53714 +
53715 +    list_add  (&blk->Link, &xmtrRail->DescBlockList);
53716 +    xmtrRail->TotalDescCount += EP3_NUM_TXD_PER_BLOCK;
53717 +    xmtrRail->FreeDescCount  += EP3_NUM_TXD_PER_BLOCK;
53718 +
53719 +    for (i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++)
53720 +       list_add (&blk->Txd[i].Generic.Link, &xmtrRail->FreeDescList);
53721 +
53722 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53723 +    
53724 +    return 1;
53725 +}
53726 +
53727 +static void
53728 +FreeTxdRailBlock (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL_BLOCK *blk)
53729 +{
53730 +    EP3_RAIL     *rail = XMTR_TO_RAIL(xmtrRail);
53731 +    EP3_TXD_RAIL *txdRail;
53732 +    unsigned long flags;
53733 +    int i;
53734 +
53735 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53736 +
53737 +    list_del (&blk->Link);
53738 +    
53739 +    xmtrRail->TotalDescCount -= EP3_NUM_TXD_PER_BLOCK;
53740 +    
53741 +    for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++)
53742 +    {
53743 +       xmtrRail->FreeDescCount--;
53744 +       
53745 +       list_del (&txdRail->Generic.Link);
53746 +       
53747 +       DeregisterCookie (&rail->CookieTable, &txdRail->EnveCookie);
53748 +       DeregisterCookie (&rail->CookieTable, &txdRail->DataCookie);
53749 +       DeregisterCookie (&rail->CookieTable, &txdRail->DoneCookie);
53750 +    }
53751 +
53752 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53753 +
53754 +    ReleaseDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK);
53755 +
53756 +    ep_free_main (&rail->Generic, blk->Txd[0].TxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53757 +    ep_free_elan (&rail->Generic, blk->Txd[0].TxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
53758 +    KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
53759 +}
53760 +
53761 +static EP3_TXD_RAIL *
53762 +GetTxdRail (EP3_XMTR_RAIL *xmtrRail)
53763 +{
53764 +    EP_COMMS_SUBSYS  *subsys = xmtrRail->Generic.Xmtr->Subsys;
53765 +    EP3_TXD_RAIL     *txdRail;
53766 +    int low_on_txds;
53767 +    unsigned long flags;
53768 +
53769 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53770 +
53771 +    if (list_empty (&xmtrRail->FreeDescList))
53772 +       txdRail = NULL;
53773 +    else
53774 +    {
53775 +       txdRail = list_entry (xmtrRail->FreeDescList.next, EP3_TXD_RAIL, Generic.Link);
53776 +
53777 +#if defined(DEBUG)
53778 +       {
53779 +           EP_RAIL   *rail = xmtrRail->Generic.CommsRail->Rail;
53780 +           ELAN3_DEV *dev  = ((EP3_RAIL *) rail)->Device;
53781 +           
53782 +           EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_FREE);
53783 +           EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_FREE);
53784 +           EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_FREE);
53785 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
53786 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
53787 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
53788 +       }
53789 +#endif
53790 +       
53791 +       list_del (&txdRail->Generic.Link);
53792 +
53793 +       xmtrRail->FreeDescCount--;
53794 +    }
53795 +    /* Wakeup the descriptor primer thread if there's not many left */
53796 +    low_on_txds = (xmtrRail->FreeDescCount < ep_txd_lowat);
53797 +
53798 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53799 +
53800 +    if (low_on_txds)
53801 +       ep_kthread_schedule (&subsys->Thread, lbolt);
53802 +
53803 +    return (txdRail);
53804 +}
53805 +
53806 +static void
53807 +FreeTxdRail (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL *txdRail)
53808 +{
53809 +    unsigned long flags;
53810 +
53811 +#if defined(DEBUG_ASSERT)
53812 +    {
53813 +       EP_RAIL   *rail = xmtrRail->Generic.CommsRail->Rail;
53814 +       ELAN3_DEV *dev  = ((EP3_RAIL *) rail)->Device;
53815 +
53816 +       EP_ASSERT (rail, txdRail->Generic.XmtrRail == &xmtrRail->Generic);
53817 +       
53818 +       EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_PRIVATE);
53819 +       EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_PRIVATE);
53820 +       EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_PRIVATE);
53821 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
53822 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
53823 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
53824 +       
53825 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_FREE;
53826 +       txdRail->TxdMain->DataEvent = EP3_EVENT_FREE;
53827 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_FREE;
53828 +    }
53829 +#endif
53830 +
53831 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
53832 +    
53833 +    list_add (&txdRail->Generic.Link, &xmtrRail->FreeDescList);
53834 +
53835 +    xmtrRail->FreeDescCount++;
53836 +
53837 +    if (xmtrRail->FreeDescWaiting)
53838 +    {
53839 +       xmtrRail->FreeDescWaiting--;
53840 +       kcondvar_wakeupall (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock);
53841 +    }
53842 +
53843 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
53844 +}
53845 +
53846 +static void
53847 +BindTxdToRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
53848 +{
53849 +    ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock));
53850 +
53851 +    EPRINTF6 (DBG_XMTR, "%s: BindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
53852 +             XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, 
53853 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique);
53854 +
53855 +    txd->TxdRail = &txdRail->Generic;
53856 +    txdRail->Generic.Txd = txd;
53857 +}
53858 +
53859 +static void
53860 +UnbindTxdFromRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
53861 +{
53862 +    ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock));
53863 +    ASSERT (txd->TxdRail == &txdRail->Generic && txdRail->Generic.Txd == txd);
53864 +
53865 +    EPRINTF6 (DBG_XMTR, "%s: UnbindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
53866 +             XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, 
53867 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique);
53868 +    txd->TxdRail = NULL;
53869 +    txdRail->Generic.Txd = NULL; 
53870 +}
53871 +
53872 +/*
53873 + * TxEnveEvent: arg == EP_TXD
53874 + *    Called when envelope delivered
53875 + */
53876 +static void
53877 +TxEnveEvent (EP3_RAIL *rail, void *arg)
53878 +{
53879 +    panic ("TxEnveEvent");
53880 +}
53881 +
53882 +/*
53883 + * TxEnveRetry: arg == EP3_TXD_RAIL
53884 + *    Called on retry of dma of large message envelope.
53885 + */
53886 +static void
53887 +TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
53888 +{
53889 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) arg;
53890 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53891 +    
53892 +    EPRINTF3 (DBG_XMTR, "%s: TxEnveRetry: xmtr %p txd %p\n",  rail->Generic.Name, xmtrRail, txdRail);
53893 +    
53894 +    EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE);
53895 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */
53896 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId);
53897 +
53898 +    if (! TxdShouldStabalise (&txdRail->Generic, &rail->Generic))
53899 +       QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_ENVELOPE));
53900 +    else
53901 +       QueueDmaForRetry (rail, dma, EP_RETRY_STABALISING);     /* place dma on stabilising list for neterr fixup */
53902 +}
53903 +
53904 +static void
53905 +TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
53906 +{
53907 +    EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg;
53908 +    
53909 +    EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE);
53910 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */
53911 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId);
53912 +}
53913 +
53914 +/*
53915 + * TxDataEvent: arg == EP3_TXD
53916 + *    Called on completion of a large transmit.
53917 + */
53918 +static void
53919 +TxDataEvent (EP3_RAIL *rail, void *arg)
53920 +{
53921 +    EP3_TXD_RAIL      *txdRail  = (EP3_TXD_RAIL *) arg;
53922 +    EP3_XMTR_RAIL     *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53923 +    EP_XMTR          *xmtr     = xmtrRail->Generic.Xmtr;
53924 +    EP3_TXD_RAIL_MAIN *txdMain  = txdRail->TxdMain;
53925 +    sdramaddr_t        txdElan  = txdRail->TxdElan;
53926 +    int                delay    = 1;
53927 +    EP_TXD            *txd;
53928 +    unsigned long      flags;
53929 +
53930 +    spin_lock_irqsave (&xmtr->Lock, flags);
53931 +    for (;;)
53932 +    {
53933 +       if (EP3_EVENT_FIRED (txdRail->DataCookie, txdMain->DataEvent))
53934 +           break;
53935 +
53936 +       if (EP3_EVENT_FIRING (rail->Device, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdMain->DataEvent))                /* PCI read */
53937 +       {
53938 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
53939 +               panic ("TxDataEvent: events set but block copy not completed\n");
53940 +           DELAY(delay);
53941 +           delay <<= 1;
53942 +       }
53943 +       else
53944 +       {
53945 +           EPRINTF3 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p previously collecting by polling\n", 
53946 +                     rail->Generic.Name, xmtrRail, txdRail);
53947 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
53948 +           return;
53949 +       }
53950 +       mb();
53951 +    }
53952 +
53953 +    if ((txd = txdRail->Generic.Txd) == NULL ||                        /* If there is no txd, or if the descriptor is marked */
53954 +       !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr)) ||       /* as no interrupt, or been reused as an RPC, */
53955 +       (EP_IS_RPC(txd->Envelope.Attr)))                        /* then we were either called as a result of a previous */
53956 +    {                                                          /* tx which was completed by polling or as a result */
53957 +       spin_unlock_irqrestore (&xmtr->Lock, flags);            /* of a EnableTxCallBack/DisableTxCallback */
53958 +
53959 +       EPRINTF4 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p recyled (%x)\n", 
53960 +                 rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0);
53961 +       return;
53962 +    }
53963 +
53964 +    ASSERT (EP3_EVENT_FIRED (txdRail->EnveCookie, txdMain->EnveEvent));
53965 +
53966 +    EPRINTF5 (DBG_XMTR, "%s: TxDataEvent : xmtrRail=%p txdRail=%p tx=%p XID=%llx\n", 
53967 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
53968 +    
53969 +    ep_xmtr_txd_stat(xmtr,txd);
53970 +    
53971 +    /* remove from active transmit lists */
53972 +    list_del (&txd->Link);
53973 +
53974 +    UnbindTxdFromRail (txd, txdRail);
53975 +    
53976 +    /* clear the done flags for next time round */
53977 +    txdMain->EnveEvent = EP3_EVENT_PRIVATE;
53978 +    txdMain->DataEvent = EP3_EVENT_PRIVATE;
53979 +    txdMain->DoneEvent = EP3_EVENT_PRIVATE;
53980 +    
53981 +    FreeTxdRail (xmtrRail, txdRail);
53982 +
53983 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
53984 +    
53985 +    txd->Handler (txd, txd->Arg, EP_SUCCESS);
53986 +    
53987 +    FreeTxd (xmtr, txd);
53988 +}
53989 +
53990 +/*
53991 + * TxDataRetry: arg == EP3_TXD
53992 + *    Called on retry of remote "put" dma of large transmit data.
53993 + */
53994 +static void
53995 +TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
53996 +{
53997 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) arg;
53998 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
53999 +    EP_TXD        *txd      = txdRail->Generic.Txd;
54000 +
54001 +    EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && 
54002 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) ||  /* PCI read */
54003 +                               (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
54004 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0))));  /* PCI read */
54005 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId);
54006 +
54007 +    EPRINTF5 (DBG_XMTR, "%s: TxDataRetry: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", 
54008 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
54009 +    
54010 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_DATA));
54011 +}
54012 +
54013 +static void
54014 +TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
54015 +{
54016 +    EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg;
54017 +    EP_TXD       *txd     = txdRail->Generic.Txd;
54018 +
54019 +    EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && 
54020 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) ||  /* PCI read */
54021 +                               (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
54022 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0))));  /* PCI read */
54023 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId);
54024 +}
54025 +
54026 +/*
54027 + * TxDoneEvent: arg == EP3_TXD
54028 + *    Called on completion of a RPC.
54029 + */
54030 +static void
54031 +TxDoneEvent (EP3_RAIL *rail, void *arg)
54032 +{
54033 +    EP3_TXD_RAIL      *txdRail  = (EP3_TXD_RAIL *) arg;
54034 +    EP3_XMTR_RAIL     *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
54035 +    EP_XMTR          *xmtr     = xmtrRail->Generic.Xmtr;
54036 +    int                delay   = 1;
54037 +    EP_TXD           *txd;
54038 +    unsigned long      flags;
54039 +
54040 +    spin_lock_irqsave (&xmtr->Lock, flags);
54041 +
54042 +    for (;;)
54043 +    {
54044 +       if (EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && 
54045 +           EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
54046 +           break;
54047 +       
54048 +       if (EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && 
54049 +           EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdRail->TxdMain->DataEvent))
54050 +       {
54051 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
54052 +               panic ("TxDoneEvent: events set but block copy not completed\n");
54053 +           DELAY(delay);
54054 +           delay <<= 1;
54055 +       }
54056 +       else
54057 +       {
54058 +           EPRINTF3 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txdRail %p previously collecting by polling\n", 
54059 +                     rail->Generic.Name, xmtr, txdRail);
54060 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
54061 +           return;
54062 +       }
54063 +       mb();
54064 +    }
54065 +
54066 +    if ((txd = txdRail->Generic.Txd) == NULL ||                                                 /* If there is no txd, or if the descriptor is marked */
54067 +       !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr) || EP_IS_RPC(txd->Envelope.Attr))) /* marked as no interrupt, or been reused as an transmit, */
54068 +    {                                                                                   /* then we were either called as a result of a previous */
54069 +       spin_unlock_irqrestore (&xmtr->Lock, flags);                                     /* tx which was completed by polling or as a result */
54070 +                                                                                        /* of a EnableTxCallBack/DisableTxCallback */
54071 +
54072 +       EPRINTF4 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txd %p recyled (%x)\n", 
54073 +                 rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0);
54074 +       return; 
54075 +    }
54076 +
54077 +    EPRINTF5 (DBG_XMTR, "%s: TxDoneEvent: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", 
54078 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
54079 +
54080 +    ep_xmtr_txd_stat(xmtr,txd);
54081 +
54082 +    /* remove from active transmit list */
54083 +    list_del (&txd->Link);
54084 +    
54085 +    UnbindTxdFromRail (txd, txdRail);
54086 +    
54087 +    /* clear the done flags for next time round */
54088 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54089 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54090 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54091 +    
54092 +    FreeTxdRail (xmtrRail, txdRail);
54093 +
54094 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54095 +           
54096 +    if (txd->Handler)
54097 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
54098 +       
54099 +    FreeTxd (xmtr, txd);
54100 +}
54101 +
54102 +/*
54103 + * TxDoneRetry: arg == EP3_TXD
54104 + */
54105 +static void
54106 +TxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
54107 +{
54108 +    panic ("TxDoneRetry");
54109 +}
54110 +
54111 +static void
54112 +TxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
54113 +{
54114 +    panic ("TxDoneVerify");
54115 +}
54116 +
54117 +static void
54118 +EnableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
54119 +{
54120 +    ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device;
54121 +
54122 +    EPRINTF3 (DBG_XMTR, "%s: EnableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail);
54123 +
54124 +    txd->Envelope.Attr = EP_SET_INTERRUPT_ENABLED(txd->Envelope.Attr);
54125 +               
54126 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY);
54127 +       
54128 +    if (EP_IS_RPC(txd->Envelope.Attr))
54129 +    {
54130 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY);
54131 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type),  EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DoneCookie.Cookie);
54132 +    }
54133 +    else
54134 +    {
54135 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DataCookie.Cookie);
54136 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY);
54137 +    }
54138 +}
54139 +
54140 +static void
54141 +DisableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
54142 +{
54143 +    ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device;
54144 +
54145 +    EPRINTF3 (DBG_XMTR, "%s: DisableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail);
54146 +
54147 +    txd->Envelope.Attr = EP_CLEAR_INTERRUPT_ENABLED(txd->Envelope.Attr);
54148 +
54149 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY);
54150 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY);
54151 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY);
54152 +}
54153 +
54154 +static void
54155 +InitialiseTxdRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail, int phase)
54156 +{
54157 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
54158 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
54159 +
54160 +    /* Flush the Elan TLB if mappings have changed */
54161 +    ep_perrail_dvma_sync (&rail->Generic);
54162 +
54163 +    /* Initialise the per-rail fields in the envelope */
54164 +    txd->Envelope.TxdRail = txdRail->TxdElanAddr;
54165 +    txd->Envelope.NodeId  = rail->Generic.Position.pos_nodeid;
54166 +
54167 +    /* Initialise the dma backoff */
54168 +    txdRail->Backoff.type = EP_BACKOFF_FREE;
54169 +
54170 +    /* Initialise the per-rail events */
54171 +    switch (phase)
54172 +    {
54173 +    case EP_TXD_PHASE_ACTIVE:
54174 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 1);
54175 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 
54176 +                           (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + (EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0));
54177 +       
54178 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_ACTIVE;
54179 +       txdRail->TxdMain->DataEvent = EP3_EVENT_ACTIVE;
54180 +       break;
54181 +       
54182 +    case EP_TXD_PHASE_PASSIVE:
54183 +       ASSERT (EP_IS_RPC(txd->Envelope.Attr));
54184 +
54185 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);
54186 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);
54187 +
54188 +       txdRail->TxdMain->EnveEvent = txdRail->EnveCookie.Cookie;
54189 +       txdRail->TxdMain->DataEvent = txdRail->DataCookie.Cookie;
54190 +       break;
54191 +    }
54192 +
54193 +    if (! EP_IS_RPC(txd->Envelope.Attr))
54194 +       txdRail->TxdMain->DoneEvent = txdRail->DoneCookie.Cookie;
54195 +    else
54196 +    {
54197 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 1);
54198 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_ACTIVE;
54199 +    }
54200 +
54201 +    if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
54202 +       DisableTransmitCallback (txd, txdRail);
54203 +    else
54204 +       EnableTransmitCallback (txd, txdRail);
54205 +
54206 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
54207 +    if ( epdebug_check_sum ) 
54208 +       txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags);
54209 +    else
54210 +#endif
54211 +       txd->Envelope.CheckSum = 0;  
54212 +
54213 +    /* copy the envelope and payload if present down to sdram */
54214 +    elan3_sdram_copyl_to_sdram (rail->Device, &txd->Envelope, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Envelope), EP_ENVELOPE_SIZE);
54215 +    
54216 +    if (EP_HAS_PAYLOAD(txd->Envelope.Attr))
54217 +       elan3_sdram_copyl_to_sdram (rail->Device, &txd->Payload, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Payload), EP_PAYLOAD_SIZE);
54218 +}
54219 +
54220 +void
54221 +ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
54222 +{
54223 +    EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail);
54224 +    struct list_head *el;
54225 +    unsigned long flags;
54226 +
54227 +    switch (rail->Generic.CallbackStep)
54228 +    {
54229 +    case EP_CB_FLUSH_FILTERING:
54230 +       /* only need to acquire/release the Lock to ensure that
54231 +        * the node state transition has been noticed. */
54232 +       spin_lock_irqsave (&xmtr->Lock, flags);
54233 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54234 +       break;
54235 +
54236 +    case EP_CB_FLUSH_FLUSHING:
54237 +       spin_lock_irqsave (&xmtr->Lock, flags);
54238 +       
54239 +       list_for_each (el, &xmtr->ActiveDescList) {
54240 +           EP_TXD       *txd      = list_entry (el, EP_TXD, Link);
54241 +           EP3_TXD_RAIL *txdRail  = (EP3_TXD_RAIL *) txd->TxdRail;
54242 +           EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
54243 +           
54244 +           if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
54245 +               continue;
54246 +           
54247 +           if (EP_IS_RPC(txd->Envelope.Attr))
54248 +           {
54249 +               if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
54250 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
54251 +               else if (! EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
54252 +                   nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
54253 +           }
54254 +           else
54255 +           {
54256 +               if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
54257 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
54258 +           }
54259 +       }
54260 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
54261 +       break;
54262 +
54263 +    default:
54264 +       panic ("ep3xmtr_flush_callback: invalid callback step\n");
54265 +       break;
54266 +    }
54267 +}
54268 +
54269 +void
54270 +ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
54271 +{
54272 +    EP3_RAIL         *rail   = XMTR_TO_RAIL (xmtrRail);
54273 +    struct list_head  txdList;
54274 +    struct list_head *el, *nel;
54275 +    unsigned long flags;
54276 +#ifdef SUPPORT_RAIL_FAILOVER
54277 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
54278 +#endif
54279 +
54280 +    INIT_LIST_HEAD (&txdList);
54281 +
54282 +    spin_lock_irqsave (&xmtr->Lock, flags);
54283 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
54284 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
54285 +       EP3_TXD_RAIL *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
54286 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
54287 +           
54288 +       /* Only progress relocation of txd's bound to this rail */
54289 +       if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED)
54290 +           continue;
54291 +       
54292 +#ifdef SUPPORT_RAIL_FAILOVER
54293 +       /* Transmit data not been sent, so just restart on different rail */
54294 +       if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
54295 +       {
54296 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d unbind an retry\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
54297 +           
54298 +           UnbindTxdFromRail (txd, txdRail);
54299 +           
54300 +           /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54301 +           txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54302 +           txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54303 +           txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54304 +
54305 +           /* reset all events, since non of them could have been set */
54306 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                          /* PCI write */
54307 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                          /* PCI write */
54308 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                          /* PCI write */
54309 +           
54310 +           FreeTxdRail (xmtrRail, txdRail);
54311 +           
54312 +           /* epcomms thread will restart on different rail */
54313 +           ep_kthread_schedule (&subsys->Thread, lbolt);
54314 +           continue;
54315 +       }
54316 +
54317 +       if (EP_IS_RPC(txd->Envelope.Attr) && !EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
54318 +       {
54319 +           if (EP_IS_NO_FAILOVER(txd->Envelope.Attr))
54320 +           {
54321 +               EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d - not able to failover\n",
54322 +                         rail->Generic.Name, xmtr, txd, txd->NodeId);
54323 +
54324 +               list_del (&txd->Link);
54325 +               UnbindTxdFromRail (txd, txdRail);
54326 +               
54327 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54328 +               txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54329 +               txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54330 +               txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54331 +               
54332 +               /* envelope and data events must have been set, so only clear the done event */
54333 +               EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
54334 +               EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
54335 +
54336 +               elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                              /* PCI write */
54337 +               
54338 +               FreeTxdRail (xmtrRail, txdRail);
54339 +           
54340 +               list_add_tail (&txd->Link, &txdList);
54341 +               continue;
54342 +           }
54343 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d passive\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
54344 +           
54345 +           nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
54346 +           continue;
54347 +       }
54348 +
54349 +       EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
54350 +#endif
54351 +
54352 +    }
54353 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54354 +
54355 +    while (! list_empty (&txdList)) 
54356 +    {
54357 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
54358 +
54359 +       list_del (&txd->Link);
54360 +
54361 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
54362 +       
54363 +       FreeTxd (xmtr, txd);
54364 +    }
54365 +}
54366 +
54367 +
54368 +void
54369 +ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
54370 +{
54371 +    EP3_RAIL         *rail = XMTR_TO_RAIL (xmtrRail);
54372 +    struct list_head *el, *nel;
54373 +    struct list_head  txdList;
54374 +    unsigned long flags;
54375 +    
54376 +    INIT_LIST_HEAD (&txdList);
54377 +
54378 +    spin_lock_irqsave (&xmtr->Lock, flags);
54379 +
54380 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
54381 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
54382 +       EP3_TXD_RAIL *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
54383 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
54384 +           
54385 +       if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
54386 +           continue;
54387 +       
54388 +       if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) &&
54389 +           EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
54390 +           EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
54391 +       {
54392 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p completed to node %d\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
54393 +           continue;
54394 +       }
54395 +
54396 +       /* Remove from active list */
54397 +       list_del (&txd->Link);
54398 +       
54399 +       UnbindTxdFromRail (txd, txdRail);
54400 +       
54401 +       /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54402 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54403 +       txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54404 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54405 +       
54406 +       /* reset the envelope and data events, since only they could have been set */
54407 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                              /* PCI write */
54408 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                              /* PCI write */
54409 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                              /* PCI write */
54410 +       
54411 +       FreeTxdRail (xmtrRail, txdRail);
54412 +           
54413 +       EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
54414 +
54415 +       /* add to the list of txd's which are to be completed */
54416 +       list_add_tail (&txd->Link, &txdList);
54417 +    }
54418 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54419 +    
54420 +    while (! list_empty (&txdList)) 
54421 +    {
54422 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
54423 +
54424 +       list_del (&txd->Link);
54425 +
54426 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
54427 +       
54428 +       FreeTxd (xmtr, txd);
54429 +    }
54430 +}
54431 +
54432 +int
54433 +ep3xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how)
54434 +{
54435 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
54436 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) t;
54437 +    EP_TXD        *txd      = txdRail->Generic.Txd;
54438 +
54439 +    switch (how)
54440 +    {
54441 +    case ENABLE_TX_CALLBACK:
54442 +       if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
54443 +           EnableTransmitCallback (txd, txdRail);
54444 +       break;
54445 +
54446 +    case DISABLE_TX_CALLBACK:
54447 +       if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
54448 +           DisableTransmitCallback (txd, txdRail);
54449 +       break;
54450 +    }
54451 +
54452 +    if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) &&
54453 +       EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
54454 +       EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
54455 +    {
54456 +       EPRINTF3 (DBG_XMTR, "%s: ep3xmtr_poll_txd: txd=%p XID=%llx completed\n", 
54457 +                 XMTR_TO_RAIL (xmtrRail)->Generic.Name, txd, (long long) txd->Envelope.Xid.Unique);
54458 +
54459 +       ep_xmtr_txd_stat(xmtrRail->Generic.Xmtr,txd);
54460 +
54461 +       UnbindTxdFromRail (txd, txdRail);
54462 +       
54463 +       /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54464 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54465 +       txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54466 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54467 +       
54468 +       FreeTxdRail (xmtrRail, txdRail);
54469 +
54470 +       return 1;
54471 +    }
54472 +
54473 +    return 0;
54474 +}
54475 +
54476 +int
54477 +ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase)
54478 +{
54479 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
54480 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
54481 +    EP3_TXD_RAIL  *txdRail;
54482 +    E3_DMA_BE      dmabe;
54483 +
54484 +    if ((txdRail = GetTxdRail (xmtrRail)) == NULL)
54485 +       return 0;
54486 +
54487 +    switch (phase)
54488 +    {
54489 +    case EP_TXD_PHASE_ACTIVE:
54490 +       if (rail->Generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED)
54491 +       {
54492 +           EPRINTF2 (DBG_XMTR, "%s: TransmitTxdOnRail: node %u not connected on this rail\n", rail->Generic.Name, txd->NodeId);
54493 +
54494 +           /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54495 +           txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54496 +           txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54497 +           txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54498 +
54499 +           /* reset all events, since non of them could have been set */
54500 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                          /* PCI write */
54501 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                          /* PCI write */
54502 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                          /* PCI write */
54503 +
54504 +           FreeTxdRail (xmtrRail, txdRail);
54505 +           return 0;
54506 +       }
54507 +
54508 +       InitialiseTxdRail (txd, txdRail, phase);
54509 +
54510 +       /* Initialise the dma descriptor */
54511 +       dmabe.s.dma_type            = E3_DMA_TYPE (DMA_BYTE, DMA_WRITE, DMA_QUEUED, EP3_DMAFAILCOUNT);
54512 +       dmabe.s.dma_size            = (EP_HAS_PAYLOAD(txd->Envelope.Attr) ? EP_INPUTQ_SIZE : EP_ENVELOPE_SIZE);
54513 +       dmabe.s.dma_source          = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, Envelope);
54514 +       dmabe.s.dma_dest            = (E3_Addr) 0;
54515 +       dmabe.s.dma_destEvent       = EP_MSGQ_ADDR(txd->Service);
54516 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (txd->NodeId);
54517 +       dmabe.s.dma_srcEvent        = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent);
54518 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, txd->NodeId);
54519 +
54520 +       EPRINTF8 (DBG_XMTR, "%s: TransmitTxdOnRail: txd=%p txdRail=%p @ %x XID=%llx dest=%u srcEvent=%x srcCookie=%x\n", rail->Generic.Name, 
54521 +                 txd, txdRail, txdRail->TxdElanAddr, (long long) txd->Envelope.Xid.Unique, txd->NodeId, dmabe.s.dma_srcEvent, dmabe.s.dma_srcCookieVProc);
54522 +       
54523 +       BindTxdToRail (txd, txdRail);
54524 +       
54525 +       if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
54526 +           QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
54527 +       break;
54528 +
54529 +    case EP_TXD_PHASE_PASSIVE:
54530 +       InitialiseTxdRail (txd, txdRail, EP_TXD_PHASE_PASSIVE);                         /* initialise as passive (updated envelope) */
54531 +       
54532 +       EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE);     /* unbind from existing rail */
54533 +
54534 +       BindTxdToRail (txd, txdRail);                                                   /* and bind it to our new rail */
54535 +       break;
54536 +    }
54537 +
54538 +    return 1;
54539 +}
54540 +
54541 +void
54542 +ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase)
54543 +{
54544 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) txd->TxdRail;
54545 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
54546 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
54547 +
54548 +    /* XXXX - TBD assertions on phase */
54549 +
54550 +    UnbindTxdFromRail (txd, txdRail);
54551 +    
54552 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54553 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54554 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54555 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54556 +    
54557 +    /* reset the envelope and data events, since only they could have been set */
54558 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                         /* PCI write */
54559 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                         /* PCI write */
54560 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                         /* PCI write */         
54561 +    
54562 +    FreeTxdRail (xmtrRail, txdRail);
54563 +}
54564 +
54565 +long
54566 +ep3xmtr_check (EP_XMTR_RAIL *x, long nextRunTime)
54567 +{
54568 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
54569 +
54570 +    if (xmtrRail->FreeDescCount < ep_txd_lowat && !AllocateTxdRailBlock(xmtrRail))
54571 +    {
54572 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->Generic.Name);
54573 +               
54574 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
54575 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
54576 +    }
54577 +    
54578 +    return nextRunTime;
54579 +}
54580 +
54581 +void
54582 +ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
54583 +{
54584 +    EP3_XMTR_RAIL *xmtrRail;
54585 +    unsigned long  flags;
54586 +
54587 +    KMEM_ZALLOC (xmtrRail, EP3_XMTR_RAIL *, sizeof (EP3_XMTR_RAIL), 1);
54588 +
54589 +    spin_lock_init (&xmtrRail->FreeDescLock);
54590 +    kcondvar_init  (&xmtrRail->FreeDescSleep);
54591 +    INIT_LIST_HEAD (&xmtrRail->FreeDescList);
54592 +    INIT_LIST_HEAD (&xmtrRail->DescBlockList);
54593 +
54594 +    xmtrRail->Generic.CommsRail = commsRail;
54595 +    xmtrRail->Generic.Xmtr      = xmtr;
54596 +
54597 +    spin_lock_irqsave (&xmtr->Lock, flags);
54598 +
54599 +    xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->Generic;
54600 +    xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
54601 +
54602 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54603 +}
54604 +
54605 +void
54606 +ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
54607 +{
54608 +    EP3_RAIL         *rail     = (EP3_RAIL *) commsRail->Rail;
54609 +    EP3_XMTR_RAIL    *xmtrRail = (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number];
54610 +    unsigned long     flags;
54611 +
54612 +    /* rail mask set as not usable */
54613 +    spin_lock_irqsave (&xmtr->Lock, flags);
54614 +    xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number);
54615 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54616 +
54617 +    /* wait for all txd's for this rail to become free */
54618 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
54619 +    while (xmtrRail->FreeDescCount != xmtrRail->TotalDescCount)
54620 +    {
54621 +       xmtrRail->FreeDescWaiting++;
54622 +       kcondvar_wait (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock, &flags);
54623 +    }
54624 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
54625 +
54626 +    spin_lock_irqsave (&xmtr->Lock, flags);
54627 +    xmtr->Rails[commsRail->Rail->Number] = NULL;
54628 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54629 +
54630 +    /* need to free up the txd's and blocks */
54631 +    /* all the txd's accociated with DescBlocks must be in the FreeDescList */
54632 +    ASSERT (xmtrRail->TotalDescCount == xmtrRail->FreeDescCount);
54633 +
54634 +    /* run through the DescBlockList deleting them */
54635 +    while (!list_empty (&xmtrRail->DescBlockList))
54636 +       FreeTxdRailBlock (xmtrRail, list_entry(xmtrRail->DescBlockList.next, EP3_TXD_RAIL_BLOCK , Link));
54637 +    
54638 +    /* it had better be empty after that */
54639 +    ASSERT ((xmtrRail->FreeDescCount == 0) && (xmtrRail->TotalDescCount == 0));
54640 +
54641 +    spin_lock_destroy (&xmtrRail->FreeDescLock);
54642 +    kcondvar_destroy (&xmtrRail->FreeDescSleep);
54643 +
54644 +    KMEM_FREE (xmtrRail, sizeof (EP3_XMTR_RAIL));
54645 +}
54646 +
54647 +void
54648 +ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x)
54649 +{
54650 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
54651 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
54652 +    struct list_head *el;
54653 +    unsigned long flags;
54654 +    int freeCount = 0;
54655 +
54656 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
54657 +    list_for_each (el, &xmtrRail->FreeDescList)
54658 +       freeCount++;
54659 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
54660 +
54661 +    (di->func)(di->arg, "                 Rail=%d Free=%d Total=%d (%d)\n",
54662 +               rail->Generic.Number, xmtrRail->FreeDescCount, xmtrRail->TotalDescCount, freeCount);
54663 +}
54664 +
54665 +void
54666 +ep3xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t)
54667 +{
54668 +    EP3_TXD_RAIL      *txdRail   = (EP3_TXD_RAIL *) t;
54669 +    EP3_XMTR_RAIL     *xmtrRail  = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
54670 +    EP3_TXD_RAIL_MAIN *txdMain   = txdRail->TxdMain;
54671 +    sdramaddr_t        txdElan   = txdRail->TxdElan;
54672 +    EP3_RAIL          *rail      = (EP3_RAIL *) xmtrRail->Generic.CommsRail->Rail;
54673 +    ELAN3_DEV         *dev       = rail->Device;
54674 +    
54675 +    (di->func)(di->arg, "     EnveEvent=%x DataEvent=%x DoneEvent=%x Rail=%s\n", 
54676 +              txdMain->EnveEvent, txdMain->DataEvent, txdMain->DoneEvent, rail->Generic.Name);
54677 +    (di->func)(di->arg, "     EnveEvent=%x.%x DataEvent=%x.%x DoneEvent=%x.%x\n",
54678 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)),
54679 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type)),
54680 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)),
54681 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type)),
54682 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)),
54683 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type)));
54684 +}
54685 +
54686 +int
54687 +ep3xmtr_check_txd_state (EP_TXD *txd)
54688 +{
54689 +    EP3_TXD_RAIL  *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
54690 +    EP3_XMTR_RAIL *xmtrRail  = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
54691 +    EP3_RAIL      *rail      = XMTR_TO_RAIL (xmtrRail);
54692 +    E3_Addr        enveEvent = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent);
54693 +    EP3_RETRY_DMA *retry = NULL;
54694 +
54695 +    struct list_head *el;
54696 +    struct list_head *nel;
54697 +    unsigned long     flags;
54698 +
54699 +    /*  is enevelope event is really not set */
54700 +    if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent )) 
54701 +       return (0);
54702 +    
54703 +    /* remove matching dma from stalled list */            
54704 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
54705 +    
54706 +    list_for_each_safe(el, nel,  &rail->DmaRetries[EP_RETRY_STABALISING]) {
54707 +       retry = list_entry (el, EP3_RETRY_DMA, Link);
54708 +       
54709 +       if ( retry->Dma.s.dma_srcEvent == enveEvent ) {
54710 +           /* remove from retry list */
54711 +           list_del (&retry->Link);
54712 +           break; /* there can only be one */
54713 +       } 
54714 +    }
54715 +    ASSERT ( retry != NULL); /* must find one in list */
54716 +    ASSERT ( retry->Dma.s.dma_srcEvent == enveEvent ); /* better still be the right type then */    
54717 +
54718 +    /* add to free list */
54719 +    list_add (&retry->Link, &rail->DmaRetryFreeList);
54720 +
54721 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);    
54722 +                       
54723 +    UnbindTxdFromRail (txd, txdRail);
54724 +       
54725 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
54726 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
54727 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
54728 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
54729 +    
54730 +    /* reset the envelope and data events, since only they could have been set */
54731 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                         /* PCI write */
54732 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                         /* PCI write */
54733 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                         /* PCI write */         
54734 +    
54735 +    FreeTxdRail (xmtrRail, txdRail);
54736 +
54737 +    return (1);
54738 +}
54739 +
54740 +void
54741 +ep3xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) {
54742 +    /* no stats here yet */
54743 +    /* EP3_XMTR_RAIL * ep3xmtr_rail = (EP3_XMTR_RAIL *) xmtr_rail; */
54744 +}
54745 +
54746 +/*
54747 + * Local variables:
54748 + * c-file-style: "stroustrup"
54749 + * End:
54750 + */
54751 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsTx_elan4.c
54752 ===================================================================
54753 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/epcommsTx_elan4.c       2004-02-23 16:02:56.000000000 -0500
54754 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/epcommsTx_elan4.c    2005-07-28 14:52:52.881673720 -0400
54755 @@ -0,0 +1,1389 @@
54756 +/*
54757 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
54758 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
54759 + *
54760 + *    For licensing information please see the supplied COPYING file
54761 + *
54762 + */
54763 +
54764 +#ident "@(#)$Id: epcommsTx_elan4.c,v 1.26.2.4 2004/11/12 10:54:51 mike Exp $"
54765 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx_elan4.c,v $ */
54766 +
54767 +#include <qsnet/kernel.h>
54768 +
54769 +#include <elan/kcomm.h>
54770 +#include <elan/epsvc.h>
54771 +#include <elan/epcomms.h>
54772 +
54773 +#include "debug.h"
54774 +#include "kcomm_vp.h"
54775 +#include "kcomm_elan4.h"
54776 +#include "epcomms_elan4.h"
54777 +
54778 +#include <elan4/trtype.h>
54779 +
54780 +#define XMTR_TO_COMMS(xmtrRail)                ((EP4_COMMS_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail)
54781 +#define XMTR_TO_RAIL(xmtrRail)         ((EP4_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail)
54782 +#define XMTR_TO_DEV(xmtrRail)          (XMTR_TO_RAIL(xmtrRail)->r_ctxt.ctxt_dev)
54783 +#define XMTR_TO_SUBSYS(xmtrRail)       (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys)
54784 +
54785 +#define TXD_TO_XMTR(txdRail)           ((EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail)
54786 +#define TXD_TO_RAIL(txdRail)           XMTR_TO_RAIL(TXD_TO_XMTR(txdRail))
54787 +
54788 +static void txd_interrupt (EP4_RAIL *rail, void *arg);
54789 +static void poll_interrupt (EP4_RAIL *rail, void *arg);
54790 +
54791 +static __inline__ int
54792 +on_list (struct list_head *ent, struct list_head *list)
54793 +{
54794 +    struct list_head *el;
54795 +    unsigned int count = 0;
54796 +    list_for_each (el, list) {
54797 +       if (el == ent)
54798 +           count++;
54799 +    }
54800 +    return count;
54801 +}
54802 +
54803 +static __inline__ void
54804 +__ep4_txd_assert_free (EP4_TXD_RAIL *txdRail, const char *file, const int line)
54805 +{
54806 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
54807 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
54808 +    register int   failed   = 0;
54809 +    
54810 +    if ((txdRail)->txd_retry_time     != 0)              failed |= (1 << 0);
54811 +    if ((txdRail)->txd_main->txd_env  != EP4_STATE_FREE) failed |= (1 << 1);
54812 +    if ((txdRail)->txd_main->txd_data != EP4_STATE_FREE) failed |= (1 << 2);
54813 +    if ((txdRail)->txd_main->txd_done != EP4_STATE_FREE) failed |= (1 << 3);
54814 +
54815 +    if (sdram_assert)
54816 +    {
54817 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32)  != -32) failed |= (1 << 4);
54818 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 5);
54819 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 6);
54820 +    }
54821 +
54822 +    if (failed)
54823 +    {
54824 +       printk ("__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54825 +
54826 +       ep_debugf (DBG_DEBUG, "__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54827 +       ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
54828 +
54829 +       (txdRail)->txd_retry_time     = 0;
54830 +       (txdRail)->txd_main->txd_env  = EP4_STATE_FREE;
54831 +       (txdRail)->txd_main->txd_data = EP4_STATE_FREE;
54832 +       (txdRail)->txd_main->txd_done = EP4_STATE_FREE;
54833 +
54834 +       if (sdram_assert)
54835 +       {
54836 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)  + 4, -32);
54837 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0);
54838 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0);
54839 +       }
54840 +       EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_free");
54841 +    }
54842 +}
54843 +
54844 +static __inline__ void
54845 +__ep4_txd_assert_finished (EP4_TXD_RAIL *txdRail, const char *file, const int line)
54846 +{
54847 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
54848 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
54849 +    register int   failed   = 0;
54850 +    
54851 +    if ((txdRail)->txd_retry_time     != 0)                  failed |= (1 << 0);
54852 +    if ((txdRail)->txd_main->txd_env  != EP4_STATE_FINISHED) failed |= (1 << 1);
54853 +    if ((txdRail)->txd_main->txd_data != EP4_STATE_FINISHED) failed |= (1 << 2);
54854 +    if ((txdRail)->txd_main->txd_done != EP4_STATE_FINISHED) failed |= (1 << 3);
54855 +    
54856 +    if (sdram_assert)
54857 +    {
54858 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32)  != -32) failed |= (1 << 4);
54859 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 5);
54860 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 6);
54861 +    }
54862 +
54863 +    if (failed)
54864 +    {
54865 +       printk ("__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54866 +
54867 +       ep_debugf (DBG_DEBUG, "__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
54868 +       ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
54869 +
54870 +       (txdRail)->txd_retry_time     = 0;
54871 +       (txdRail)->txd_main->txd_env  = EP4_STATE_FINISHED;
54872 +       (txdRail)->txd_main->txd_data = EP4_STATE_FINISHED;
54873 +       (txdRail)->txd_main->txd_done = EP4_STATE_FINISHED;
54874 +
54875 +       if (sdram_assert)
54876 +       {
54877 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)  + 4, -32);
54878 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0);
54879 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0);
54880 +       }
54881 +       EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_finished");
54882 +    }
54883 +}
54884 +
54885 +static __inline__ int
54886 +__ep4_txd_assfail (EP4_TXD_RAIL *txdRail, const char *expr, const char *file, const int line)
54887 +{
54888 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
54889 +
54890 +    printk ("__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr);
54891 +
54892 +    ep_debugf (DBG_DEBUG, "__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr);
54893 +    ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
54894 +
54895 +    EP_ASSFAIL (XMTR_TO_RAIL (xmtrRail), "__ep4_txd_assfail");
54896 +
54897 +    return 0;
54898 +}
54899 +
54900 +#define EP4_TXD_ASSERT(txdRail, EX)            ((void) ((EX) || (__ep4_txd_assfail(txdRail, #EX, __FILE__, __LINE__))))
54901 +#define EP4_TXD_ASSERT_FREE(txdRail)           __ep4_txd_assert_free(txdRail, __FILE__, __LINE__)
54902 +#define EP4_TXD_ASSERT_FINISHED(txdRail)       __ep4_txd_assert_finished(txdRail, __FILE__, __LINE__)
54903 +
54904 +static int
54905 +alloc_txd_block (EP4_XMTR_RAIL *xmtrRail)
54906 +{
54907 +    EP4_RAIL           *rail = XMTR_TO_RAIL(xmtrRail);
54908 +    ELAN4_DEV          *dev  = XMTR_TO_DEV(xmtrRail);
54909 +    EP4_TXD_RAIL_BLOCK *blk;
54910 +    EP4_TXD_RAIL_MAIN  *txdMain;
54911 +    EP_ADDR            txdMainAddr;
54912 +    sdramaddr_t                txdElan;
54913 +    EP_ADDR            txdElanAddr;
54914 +    EP4_TXD_RAIL       *txdRail;
54915 +    unsigned long       flags;
54916 +    int                 i;
54917 +
54918 +    KMEM_ZALLOC (blk, EP4_TXD_RAIL_BLOCK *, sizeof (EP4_TXD_RAIL_BLOCK), 1);
54919 +
54920 +    if (blk == NULL)
54921 +       return 0;
54922 +
54923 +    if ((txdElan = ep_alloc_elan (&rail->r_generic, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdElanAddr)) == (sdramaddr_t) 0)
54924 +    {
54925 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54926 +       return 0;
54927 +    }
54928 +
54929 +    if ((txdMain = ep_alloc_main (&rail->r_generic, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdMainAddr)) == (EP4_TXD_RAIL_MAIN *) NULL)
54930 +    {
54931 +       ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54932 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54933 +       return 0;
54934 +    }
54935 +
54936 +    if (ep4_reserve_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK, 0) != 0)
54937 +    {
54938 +       ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54939 +       ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
54940 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
54941 +       return 0;
54942 +    }
54943 +
54944 +    for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++)
54945 +    {
54946 +       txdRail->txd_generic.XmtrRail = &xmtrRail->xmtr_generic;
54947 +       txdRail->txd_elan             = txdElan;
54948 +       txdRail->txd_elan_addr        = txdElanAddr;
54949 +       txdRail->txd_main             = txdMain;
54950 +       txdRail->txd_main_addr        = txdMainAddr;
54951 +
54952 +       /* We only need to reserve space for one command stream, since the sten packet
54953 +        * can only be retrying *before* the dma source event is set.
54954 +        * reserve bytes of "event" cq space for the completion write + interrupt */
54955 +       if ((txdRail->txd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_INTR_CMD_NDWORDS)) == NULL)
54956 +           goto failed;
54957 +
54958 +       /* register the main interrupt cookies */
54959 +       ep4_register_intcookie (rail, &txdRail->txd_intcookie, txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done), txd_interrupt, txdRail);
54960 +
54961 +       /* initialise the events */
54962 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
54963 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54964 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopySource),
54965 +                           txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd));
54966 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopyDest),
54967 +                           txdRail->txd_ecq->ecq_addr);
54968 +
54969 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
54970 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
54971 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WritePtr),
54972 +                           txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_data));
54973 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WriteValue),
54974 +                           EP4_STATE_FINISHED);
54975 +
54976 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
54977 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54978 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopySource),
54979 +                           txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd));
54980 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopyDest),
54981 +                           txdRail->txd_ecq->ecq_addr);
54982 +
54983 +       /* Initialise the command streams */
54984 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_cmd),
54985 +                           WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_env)));
54986 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_value),
54987 +                           EP4_STATE_FAILED);
54988 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_intr_cmd),
54989 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
54990 +
54991 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_cmd),
54992 +                           WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_done)));
54993 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_value),
54994 +                           EP4_STATE_FINISHED);
54995 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
54996 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
54997 +
54998 +       txdMain->txd_env  = EP4_STATE_FREE;
54999 +       txdMain->txd_data = EP4_STATE_FREE;
55000 +       txdMain->txd_done = EP4_STATE_FREE;
55001 +
55002 +       /* move onto next descriptor */
55003 +       txdElan     += EP4_TXD_RAIL_ELAN_SIZE;
55004 +       txdElanAddr += EP4_TXD_RAIL_ELAN_SIZE;
55005 +       txdMain      = (EP4_TXD_RAIL_MAIN *) ((unsigned long) txdMain + EP4_TXD_RAIL_MAIN_SIZE);
55006 +       txdMainAddr += EP4_TXD_RAIL_MAIN_SIZE;
55007 +    }
55008 +
55009 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55010 +
55011 +    list_add  (&blk->blk_link, &xmtrRail->xmtr_blocklist);
55012 +
55013 +    xmtrRail->xmtr_totalcount += EP4_NUM_TXD_PER_BLOCK;
55014 +    xmtrRail->xmtr_freecount  += EP4_NUM_TXD_PER_BLOCK;
55015 +
55016 +    for (i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++)
55017 +       list_add (&blk->blk_txds[i].txd_generic.Link, &xmtrRail->xmtr_freelist);
55018 +
55019 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55020 +
55021 +    return 1;
55022 +
55023 + failed:
55024 +    while (--i >= 0)
55025 +    {
55026 +       ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS);
55027 +       ep4_deregister_intcookie (rail, &txdRail->txd_intcookie);
55028 +    }
55029 +    ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK);
55030 +
55031 +    ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
55032 +    ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
55033 +
55034 +    KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
55035 +
55036 +    return 0;
55037 +}
55038 +
55039 +static void
55040 +free_txd_block (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL_BLOCK *blk)
55041 +{
55042 +    EP4_RAIL     *rail = XMTR_TO_RAIL (xmtrRail);
55043 +    EP4_TXD_RAIL *txdRail;
55044 +    unsigned long flags;
55045 +    int           i;
55046 +
55047 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55048 +
55049 +    list_del (&blk->blk_link);
55050 +
55051 +    xmtrRail->xmtr_totalcount -= EP4_NUM_TXD_PER_BLOCK;
55052 +
55053 +    for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++)
55054 +    {
55055 +       xmtrRail->xmtr_freecount--;
55056 +
55057 +       ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS);
55058 +
55059 +       ep4_deregister_intcookie (rail, &txdRail->txd_intcookie);
55060 +
55061 +       list_del (&txdRail->txd_generic.Link);
55062 +    }
55063 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55064 +
55065 +    ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK);
55066 +
55067 +    ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
55068 +    ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
55069 +
55070 +    KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
55071 +}
55072 +
55073 +static EP4_TXD_RAIL *
55074 +get_txd_rail (EP4_XMTR_RAIL *xmtrRail)
55075 +{
55076 +    EP_COMMS_SUBSYS  *subsys = XMTR_TO_SUBSYS(xmtrRail);
55077 +    EP4_TXD_RAIL     *txdRail;
55078 +    unsigned long flags;
55079 +    int low_on_txds;
55080 +
55081 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55082 +
55083 +    if (list_empty (&xmtrRail->xmtr_freelist))
55084 +       txdRail = NULL;
55085 +    else
55086 +    {
55087 +       txdRail = list_entry (xmtrRail->xmtr_freelist.next, EP4_TXD_RAIL, txd_generic.Link);
55088 +
55089 +       EP4_TXD_ASSERT_FREE(txdRail);
55090 +
55091 +       list_del (&txdRail->txd_generic.Link);
55092 +
55093 +       xmtrRail->xmtr_freecount--;
55094 +    }
55095 +    /* Wakeup the descriptor primer thread if there's not many left */
55096 +    low_on_txds = (xmtrRail->xmtr_freecount < ep_txd_lowat);
55097 +
55098 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55099 +
55100 +    if (low_on_txds)
55101 +       ep_kthread_schedule (&subsys->Thread, lbolt);
55102 +
55103 +
55104 +    return (txdRail);
55105 +}
55106 +
55107 +static void
55108 +free_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
55109 +{
55110 +    unsigned long flags;
55111 +
55112 +    EP4_TXD_ASSERT_FREE(txdRail);
55113 +
55114 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55115 +    
55116 +    list_add (&txdRail->txd_generic.Link, &xmtrRail->xmtr_freelist);
55117 +
55118 +    xmtrRail->xmtr_freecount++;
55119 +
55120 +    if (xmtrRail->xmtr_freewaiting)
55121 +    {
55122 +       xmtrRail->xmtr_freewaiting--;
55123 +       kcondvar_wakeupall (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock);
55124 +    }
55125 +
55126 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55127 +}
55128 +
55129 +static void
55130 +bind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
55131 +{
55132 +    EPRINTF6 (DBG_XMTR, "%s: bind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
55133 +             XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, 
55134 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, txd->Envelope.Xid.Unique);
55135 +
55136 +    txd->TxdRail = &txdRail->txd_generic;
55137 +    txdRail->txd_generic.Txd = txd;
55138 +}
55139 +
55140 +static void
55141 +unbind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
55142 +{
55143 +    EP4_TXD_ASSERT (txdRail, txd->TxdRail == &txdRail->txd_generic && txdRail->txd_generic.Txd == txd);
55144 +
55145 +    EPRINTF6 (DBG_XMTR, "%s: unbind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
55146 +             XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, 
55147 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, txd->Envelope.Xid.Unique);
55148 +
55149 +
55150 +    txdRail->txd_generic.Txd = NULL; 
55151 +    txd->TxdRail = NULL;
55152 +}
55153 +
55154 +static void
55155 +initialise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail, unsigned int phase)
55156 +{
55157 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail;
55158 +    EP4_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
55159 +    ELAN4_DEV     *dev      = rail->r_ctxt.ctxt_dev;
55160 +    
55161 +    /* Flush the Elan TLB if mappings have changed */
55162 +    ep_perrail_dvma_sync (&rail->r_generic);
55163 +    
55164 +    /* Initialise the per-rail fields in the envelope */
55165 +    txd->Envelope.TxdRail = txdRail->txd_elan_addr;
55166 +    txd->Envelope.NodeId  = rail->r_generic.Position.pos_nodeid;
55167 +
55168 +    /* Allocate a network error fixup cookie */
55169 +    txdRail->txd_cookie = ep4_neterr_cookie (rail, txd->NodeId) | EP4_COOKIE_STEN;
55170 +
55171 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
55172 +    if ( epdebug_check_sum ) 
55173 +       txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags);
55174 +    else
55175 +#endif
55176 +       txd->Envelope.CheckSum = 0;  
55177 +
55178 +    /* Initialise the per-rail events */
55179 +    switch (phase)
55180 +    {
55181 +    case EP_TXD_PHASE_ACTIVE:
55182 +    {
55183 +       unsigned int nsets = (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + ( EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0);
55184 +
55185 +       if (! EP_IS_RPC(txd->Envelope.Attr))
55186 +       {
55187 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
55188 +                               E4_EVENT_INIT_VALUE (-32 * nsets, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55189 +
55190 +           txdRail->txd_main->txd_data = EP4_STATE_FINISHED;
55191 +       }
55192 +       else
55193 +       {
55194 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
55195 +                               E4_EVENT_INIT_VALUE(-32 * nsets , E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
55196 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
55197 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55198 +
55199 +           txdRail->txd_main->txd_data = EP4_STATE_ACTIVE;
55200 +       }
55201 +                  
55202 +       txdRail->txd_main->txd_env  = EP4_STATE_ACTIVE;
55203 +       txdRail->txd_main->txd_done = EP4_STATE_ACTIVE;
55204 +       break;
55205 +    }
55206 +
55207 +    case EP_TXD_PHASE_PASSIVE:
55208 +       EP4_TXD_ASSERT (txdRail, EP_IS_RPC(txd->Envelope.Attr));
55209 +       
55210 +       txdRail->txd_main->txd_env  = EP4_STATE_FINISHED;
55211 +       txdRail->txd_main->txd_data = EP4_STATE_FINISHED;
55212 +       txdRail->txd_main->txd_done = EP4_STATE_ACTIVE;
55213 +
55214 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
55215 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55216 +       break;
55217 +    }
55218 +
55219 +   if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
55220 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD);
55221 +}
55222 +
55223 +static void
55224 +terminate_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
55225 +{
55226 +    EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\
55227 +                     (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\
55228 +                     E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\
55229 +
55230 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
55231 +    txdRail->txd_main->txd_env  = EP4_STATE_FREE;
55232 +    txdRail->txd_main->txd_data = EP4_STATE_FREE;
55233 +    txdRail->txd_main->txd_done = EP4_STATE_FREE;
55234 +
55235 +#if defined(DEBUG_ASSERT)
55236 +    if (sdram_assert)
55237 +    {
55238 +       ELAN4_DEV *dev = XMTR_TO_RAIL (xmtrRail)->r_ctxt.ctxt_dev;
55239 +
55240 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
55241 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
55242 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
55243 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55244 +    }
55245 +#endif 
55246 +}
55247 +
55248 +static void
55249 +defer_txd_rail (EP4_TXD_RAIL *txdRail)
55250 +{
55251 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
55252 +    EP4_RAIL        *rail     = XMTR_TO_RAIL(xmtrRail);
55253 +    ELAN4_DEV       *dev      = rail->r_ctxt.ctxt_dev;
55254 +    EP_COMMS_SUBSYS *subsys   = XMTR_TO_SUBSYS(xmtrRail);
55255 +
55256 +    EPRINTF5 (DBG_XMTR, "%s: defer_txd_rail: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n",
55257 +             rail->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55258 +                   
55259 +    /* transmit has completed, but the data dma has not completed
55260 +     * (because of network error fixup), we queue the txdRail onto a list
55261 +     * to be polled for completion later.
55262 +     */
55263 +    if (txdRail->txd_retry_time)
55264 +    {
55265 +       EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 ||
55266 +                                 on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1));
55267 +
55268 +       list_del (&txdRail->txd_retry_link);
55269 +
55270 +       txdRail->txd_main->txd_env = EP4_STATE_FINISHED;
55271 +
55272 +       /* re-initialise the envelope event */
55273 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
55274 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55275 +    }
55276 +    
55277 +    txdRail->txd_retry_time = lbolt;
55278 +       
55279 +    list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]);
55280 +       
55281 +    ep_kthread_schedule (&subsys->Thread, lbolt);
55282 +}
55283 +
55284 +static void
55285 +finalise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
55286 +{
55287 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
55288 +
55289 +    EP4_TXD_ASSERT_FINISHED (txdRail);
55290 +
55291 +    unbind_txd_rail (txd, txdRail);
55292 +    
55293 +    terminate_txd_rail (xmtrRail, txdRail);
55294 +    free_txd_rail (xmtrRail, txdRail);
55295 +}
55296 +
55297 +static void
55298 +txd_interrupt (EP4_RAIL *rail, void *arg)
55299 +{
55300 +    EP4_TXD_RAIL    *txdRail  = (EP4_TXD_RAIL *) arg;
55301 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
55302 +    EP_XMTR         *xmtr     = xmtrRail->xmtr_generic.Xmtr;
55303 +    int              delay    = 1;
55304 +    EP_TXD          *txd;
55305 +    unsigned long    flags;
55306 +
55307 +    spin_lock_irqsave (&xmtr->Lock, flags);
55308 +    for (;;)
55309 +    {
55310 +       if (txdRail->txd_main->txd_done == EP4_STATE_FINISHED || txdRail->txd_main->txd_env == EP4_STATE_FAILED)
55311 +           break;
55312 +       
55313 +       /* The write to txd_done could be held up in the PCI bridge even though
55314 +        * we've seen the interrupt cookie.  Unlike elan3, there is no possibility
55315 +        * of spurious interrupts since we flush the command queues on node 
55316 +        * disconnection and the txcallback mechanism */
55317 +       mb();
55318 +
55319 +       if (delay > EP4_EVENT_FIRING_TLIMIT)
55320 +       {
55321 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
55322 +
55323 +           EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "txd_interrupt - not finished\n");
55324 +           return;
55325 +       }
55326 +       DELAY (delay);
55327 +       delay <<= 1;
55328 +    }
55329 +
55330 +    txd = txdRail->txd_generic.Txd;
55331 +
55332 +    if (txdRail->txd_main->txd_env == EP4_STATE_FAILED)
55333 +    {
55334 +       spin_lock (&xmtrRail->xmtr_retrylock);
55335 +
55336 +       EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time == 0);                         /* cannot be on retry/poll list */
55337 +       EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_done != EP4_STATE_FINISHED);    /* data xfer cannot have finished */
55338 +
55339 +       if (TxdShouldStabalise (&txdRail->txd_generic, &rail->r_generic))
55340 +       {
55341 +           EPRINTF6 (DBG_STABILISE, "%s: txd_interrupt: stablise xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
55342 +                     xmtrRail, txdRail, txd, txd->Envelope.Xid.Unique, txd->NodeId);
55343 +
55344 +           txdRail->txd_retry_time = lbolt;                    /* indicate on retry list */
55345 +           
55346 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]);
55347 +       }
55348 +       else
55349 +       {
55350 +           EPRINTF6 (DBG_RETRY, "%s: txd_interrupt: retry xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
55351 +                     xmtrRail, txdRail, txd, txd->Envelope.Xid.Unique, txd->NodeId);
55352 +
55353 +           txdRail->txd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;            /* XXXX: backoff ? */
55354 +           
55355 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]);
55356 +           
55357 +           ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time);
55358 +       }
55359 +       spin_unlock (&xmtrRail->xmtr_retrylock);
55360 +
55361 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
55362 +       return;
55363 +    }
55364 +
55365 +    EP4_TXD_ASSERT (txdRail, txd != NULL && !(EP_IS_NO_INTERRUPT(txd->Envelope.Attr)));
55366 +
55367 +    EPRINTF6 (DBG_XMTR, "%s: txd_interrupt: xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
55368 +             xmtrRail, txdRail, txd, txd->Envelope.Xid.Unique, txd->NodeId);
55369 +            
55370 +    if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED)
55371 +    {
55372 +       defer_txd_rail (txdRail);
55373 +
55374 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
55375 +    }
55376 +    else
55377 +    {
55378 +       /* remove from active transmit list */
55379 +       list_del (&txd->Link);
55380 +
55381 +       ep_xmtr_txd_stat(xmtr,txd);
55382 +
55383 +       finalise_txd (txd, txdRail);
55384 +       
55385 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
55386 +       
55387 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
55388 +       
55389 +       FreeTxd (xmtr, txd);
55390 +    }
55391 +}
55392 +
55393 +static void
55394 +poll_interrupt (EP4_RAIL *rail, void *arg)
55395 +{
55396 +    EP4_XMTR_RAIL   *xmtrRail = (EP4_XMTR_RAIL *) arg;
55397 +
55398 +    ep_poll_transmits (xmtrRail->xmtr_generic.Xmtr);
55399 +}
55400 +
55401 +void
55402 +issue_envelope_packet (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
55403 +{
55404 +    EP_TXD    *txd    = txdRail->txd_generic.Txd;
55405 +    ELAN4_CQ  *cq     = xmtrRail->xmtr_cq;
55406 +    E4_uint64 *blk0   = (E4_uint64 *) &txd->Envelope;
55407 +    E4_uint64 *blk1   = EP_HAS_PAYLOAD(txd->Envelope.Attr) ? (E4_uint64 *) &txd->Payload : NULL;
55408 +    E4_Addr    qaddr  = EP_MSGQ_ADDR(txd->Service);
55409 +
55410 +    EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\
55411 +                     (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\
55412 +                     E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\
55413 +
55414 +    elan4_open_packet (cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(txd->NodeId)));
55415 +    elan4_sendtrans0 (cq, TR_INPUT_Q_GETINDEX, EP_MSGQ_ADDR(txd->Service));
55416 +           
55417 +    /* send the payload if present */
55418 +    if (blk0) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 0,   blk0);
55419 +    if (blk1) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 128, blk1);
55420 +
55421 +    elan4_sendtrans1 (cq, TR_INPUT_Q_COMMIT, qaddr, txdRail->txd_cookie);
55422 +
55423 +    elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
55424 +    elan4_write_dword_cmd (cq, txdRail->txd_main_addr + offsetof (EP4_TXD_RAIL_MAIN, txd_env), EP4_STATE_FINISHED);
55425 +           
55426 +    elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_STEN_RETRYCOUNT));
55427 +    elan4_set_event_cmd (cq, txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_env));
55428 +    
55429 +    elan4_write_dword_cmd (cq, xmtrRail->xmtr_main_addr + offsetof (EP4_XMTR_RAIL_MAIN, xmtr_flowcnt), ++xmtrRail->xmtr_flowcnt);
55430 +}
55431 +
55432 +void
55433 +ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
55434 +{
55435 +    EP4_RAIL       *rail      = XMTR_TO_RAIL (xmtrRail);
55436 +    EP4_COMMS_RAIL *commsRail = XMTR_TO_COMMS (xmtrRail);
55437 +    struct list_head *el, *nel;
55438 +    unsigned long flags;
55439 +
55440 +    switch (rail->r_generic.CallbackStep)
55441 +    {
55442 +    case EP_CB_FLUSH_FILTERING:
55443 +       /* need to acquire/release the Lock to ensure that the node state
55444 +        * transition has been noticed and no new envelopes are queued to 
55445 +        * nodes which are passivating. */
55446 +       spin_lock_irqsave (&xmtr->Lock, flags);
55447 +
55448 +       /* Then we insert a "setevent" into the command queue to flush
55449 +        * through the envelopes which have already been submitted */
55450 +       ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq);
55451 +
55452 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
55453 +
55454 +       break;
55455 +
55456 +    case EP_CB_FLUSH_FLUSHING:
55457 +       /* remove any envelopes which are retrying to nodes which are going down */
55458 +       spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55459 +       list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) {
55460 +           EP4_TXD_RAIL *txdRail  = list_entry (el, EP4_TXD_RAIL, txd_retry_link);
55461 +           EP_TXD       *txd      = txdRail->txd_generic.Txd;
55462 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
55463 +           
55464 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_env == EP4_STATE_FAILED);
55465 +           
55466 +           if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
55467 +           {
55468 +               EPRINTF2 (DBG_XMTR, "%s; ep4xmtr_flush_callback: removing txdRail %p from retry list\n", rail->r_generic.Name, txdRail);
55469 +               
55470 +               EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
55471 +
55472 +               list_del (&txdRail->txd_retry_link);
55473 +               list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]);
55474 +           }
55475 +       }
55476 +       spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55477 +
55478 +       /* Determine whether we have active or passive messages to 
55479 +        * any node which is passivating */
55480 +       spin_lock_irqsave (&xmtr->Lock, flags);
55481 +       list_for_each (el, &xmtr->ActiveDescList) {
55482 +           EP_TXD       *txd      = list_entry (el, EP_TXD, Link);
55483 +           EP4_TXD_RAIL *txdRail  = (EP4_TXD_RAIL *) txd->TxdRail;
55484 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
55485 +           
55486 +           if (txdRail == NULL || txdRail->txd_generic.XmtrRail != &xmtrRail->xmtr_generic || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
55487 +               continue;
55488 +           
55489 +           EPRINTF5 (DBG_XMTR, "%s: flush txd=%p txdRail=%p data=%llx done=%llx\n", rail->r_generic.Name,
55490 +                     txd, txdRail, txdRail->txd_main->txd_data, txdRail->txd_main->txd_done);
55491 +
55492 +           if (EP_IS_RPC(txd->Envelope.Attr))
55493 +           {
55494 +               if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
55495 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
55496 +               else if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
55497 +                   nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
55498 +           }
55499 +           else
55500 +           {
55501 +               if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
55502 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
55503 +           }
55504 +       }
55505 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
55506 +       break;
55507 +
55508 +    default:
55509 +       panic ("ep4xmtr_flush_callback: invalid callback step\n");
55510 +       break;
55511 +    }
55512 +}
55513 +
55514 +void
55515 +ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
55516 +{
55517 +    EP4_RAIL         *rail   = XMTR_TO_RAIL (xmtrRail);
55518 +    struct list_head  txdList;
55519 +    struct list_head *el, *nel;
55520 +    unsigned long flags;
55521 +
55522 +    INIT_LIST_HEAD (&txdList);
55523 +
55524 +    spin_lock_irqsave (&xmtr->Lock, flags);
55525 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
55526 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
55527 +       EP4_TXD_RAIL *txdRail   = (EP4_TXD_RAIL *) txd->TxdRail;
55528 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
55529 +           
55530 +       /* Only progress relocation of txd's bound to this rail */
55531 +       if (! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED)
55532 +           continue;
55533 +       
55534 +       /* XXXX - no rail failover for now ....*/
55535 +
55536 +       EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->r_generic.Name, xmtr, txd, txd->NodeId);
55537 +    }
55538 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55539 +
55540 +    while (! list_empty (&txdList)) 
55541 +    {
55542 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
55543 +
55544 +       list_del (&txd->Link);
55545 +
55546 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
55547 +       
55548 +       FreeTxd (xmtr, txd);
55549 +    }
55550 +}
55551 +
55552 +
55553 +void
55554 +ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
55555 +{
55556 +    EP4_RAIL         *rail = XMTR_TO_RAIL (xmtrRail);
55557 +    ELAN4_DEV        *dev  = rail->r_ctxt.ctxt_dev;
55558 +    struct list_head *el, *nel;
55559 +    struct list_head  txdList;
55560 +    unsigned long flags;
55561 +    
55562 +    INIT_LIST_HEAD (&txdList);
55563 +
55564 +    spin_lock_irqsave (&xmtr->Lock, flags);
55565 +
55566 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
55567 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
55568 +       EP4_TXD_RAIL *txdRail   = (EP4_TXD_RAIL *) txd->TxdRail;
55569 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
55570 +           
55571 +       if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
55572 +           continue;
55573 +       
55574 +       if (txdRail->txd_main->txd_done == EP4_STATE_ACTIVE)
55575 +       {
55576 +
55577 +           EPRINTF8 (DBG_DISCON, "ep4xmtr_disconnect_callback: txdRail=%p : events %llx,%llx,%llx done %llx,%llx,%llx retry %lx\n",txdRail,
55578 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)),
55579 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)),
55580 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)),
55581 +                     txdRail->txd_main->txd_env, txdRail->txd_main->txd_data, txdRail->txd_main->txd_done,
55582 +                     txdRail->txd_retry_time);
55583 +                      
55584 +           if (txdRail->txd_retry_time)
55585 +           {
55586 +               /* re-initialise the envelope event */
55587 +               elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
55588 +                                   E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55589 +               
55590 +               EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1);
55591 +
55592 +               txdRail->txd_retry_time  = 0;
55593 +
55594 +               list_del (&txdRail->txd_retry_link);
55595 +           }
55596 +
55597 +           /* Remove from active list */
55598 +           list_del (&txd->Link);
55599 +       
55600 +           unbind_txd_rail (txd, txdRail);
55601 +
55602 +           terminate_txd_rail (xmtrRail, txdRail);
55603 +           free_txd_rail (xmtrRail, txdRail);
55604 +           
55605 +           EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->r_generic.Name, xmtr, txd, txd->NodeId);
55606 +
55607 +           /* add to the list of txd's which are to be completed */
55608 +           list_add_tail (&txd->Link, &txdList);
55609 +       }
55610 +    }
55611 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55612 +    
55613 +    while (! list_empty (&txdList)) 
55614 +    {
55615 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
55616 +
55617 +       list_del (&txd->Link);
55618 +
55619 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
55620 +       
55621 +       FreeTxd (xmtr, txd);
55622 +    }
55623 +}
55624 +
55625 +void
55626 +ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
55627 +{
55628 +    EP4_COMMS_RAIL   *commsRail = XMTR_TO_COMMS (xmtrRail);
55629 +    unsigned long flags;
55630 +    
55631 +    spin_lock_irqsave (&xmtr->Lock, flags);
55632 +
55633 +    /* insert a "setevent" into the command queue to flush
55634 +     * through the envelopes which have already been submitted */
55635 +    ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq);
55636 +
55637 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55638 +}
55639 +
55640 +void
55641 +ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
55642 +{
55643 +    EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail);
55644 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
55645 +    struct list_head *el;
55646 +    unsigned long flags;
55647 +    
55648 +    spin_lock_irqsave (&xmtr->Lock, flags);
55649 +    list_for_each (el, &xmtr->ActiveDescList) {
55650 +       EP_TXD       *txd     = list_entry (el, EP_TXD, Link);
55651 +       EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail;
55652 +           
55653 +       if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || txd->NodeId != nodeId)
55654 +           continue;
55655 +       
55656 +       /* The only non-dma associated with a txd is the initial sten packet, if it has been acked 
55657 +        * and the neterr cookie matches, then change it to look like it's been acked since the
55658 +        * INPUT_Q_COMMIT transaction has already been executed */
55659 +       if (txdRail->txd_main->txd_env == EP4_STATE_FAILED && (txdRail->txd_cookie == cookies[0] || txdRail->txd_cookie == cookies[1]))
55660 +       {
55661 +           EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4xmtr_neterr_callback: cookie <%lld%s%s%s%s> matches txd %p txdRail %p\n", 
55662 +                    rail->r_generic.Name, EP4_COOKIE_STRING(txdRail->txd_cookie), txd, txdRail);
55663 +
55664 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
55665 +
55666 +           txdRail->txd_main->txd_env = EP4_STATE_FINISHED;
55667 +
55668 +           /* re-initialise the envelope event */
55669 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
55670 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55671 +           
55672 +           spin_lock (&xmtrRail->xmtr_retrylock);
55673 +
55674 +           EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 ||
55675 +                                     on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1));
55676 +
55677 +           txdRail->txd_retry_time = 0;
55678 +
55679 +           list_del (&txdRail->txd_retry_link);
55680 +
55681 +           spin_unlock (&xmtrRail->xmtr_retrylock);
55682 +       }
55683 +    }
55684 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55685 +}
55686 +
55687 +int
55688 +ep4xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how)
55689 +{
55690 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
55691 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
55692 +    EP4_TXD_RAIL  *txdRail  = (EP4_TXD_RAIL *) t;
55693 +    EP_TXD        *txd      = txdRail->txd_generic.Txd;
55694 +
55695 +    if (! EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
55696 +       return 0;
55697 +
55698 +    switch (how)
55699 +    {
55700 +    case ENABLE_TX_CALLBACK:
55701 +       if (!EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr))
55702 +       {
55703 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
55704 +                               INTERRUPT_CMD | (xmtrRail->xmtr_intcookie.int_val << E4_MAIN_INT_SHIFT));
55705 +
55706 +           txd->Envelope.Attr |= EP_INTERRUPT_ENABLED;
55707 +       }
55708 +       break;
55709 +
55710 +    case DISABLE_TX_CALLBACK:
55711 +       if (EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr & EP_INTERRUPT_ENABLED))
55712 +       {
55713 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD);
55714 +
55715 +           txd->Envelope.Attr &= ~EP_INTERRUPT_ENABLED;
55716 +       }
55717 +    }
55718 +    
55719 +    if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED && txdRail->txd_main->txd_data == EP4_STATE_FINISHED && txdRail->txd_main->txd_done == EP4_STATE_FINISHED)
55720 +    {
55721 +       EPRINTF3 (DBG_XMTR, "%s: ep4xmtr_poll_txd: txd=%p XID=%llx completed\n",
55722 +                 XMTR_TO_RAIL (xmtrRail)->r_generic.Name, txd, txd->Envelope.Xid.Unique);
55723 +       
55724 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
55725 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
55726 +
55727 +
55728 +       ep_xmtr_txd_stat(xmtrRail->xmtr_generic.Xmtr,txd);
55729 +
55730 +       finalise_txd (txd, txdRail);
55731 +
55732 +       return 1;
55733 +    }
55734 +
55735 +    return 0;
55736 +}
55737 +
55738 +int
55739 +ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase)
55740 +{
55741 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
55742 +    EP4_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
55743 +    EP4_TXD_RAIL  *txdRail;
55744 +    unsigned long  flags;
55745 +
55746 +    if ((txdRail = get_txd_rail (xmtrRail)) == NULL)
55747 +       return 0;
55748 +    
55749 +    switch (phase)
55750 +    {
55751 +    case EP_TXD_PHASE_ACTIVE:
55752 +       if (rail->r_generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED)
55753 +       {
55754 +           EPRINTF2 (DBG_XMTR, "%s: ep4xmtr_bind_txd: node %u not connected on this rail\n", rail->r_generic.Name, txd->NodeId);
55755 +
55756 +           free_txd_rail (xmtrRail, txdRail);
55757 +           return 0;
55758 +       }
55759 +
55760 +       initialise_txd (txd, txdRail, EP_TXD_PHASE_ACTIVE);
55761 +
55762 +       bind_txd_rail (txd, txdRail);
55763 +       
55764 +       /* generate the STEN packet to transfer the envelope */
55765 +       spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55766 +       if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT)
55767 +           issue_envelope_packet (xmtrRail, txdRail);
55768 +       else
55769 +       {
55770 +           txdRail->txd_retry_time = lbolt;
55771 +
55772 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]);
55773 +
55774 +           ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time);
55775 +       }
55776 +       spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55777 +       break;
55778 +
55779 +    case EP_TXD_PHASE_PASSIVE:
55780 +       initialise_txd (txd, txdRail, EP_TXD_PHASE_PASSIVE);
55781 +       
55782 +       EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE);     /* unbind from existing rail */
55783 +
55784 +       bind_txd_rail (txd, txdRail);                                                   /* and bind it to our new rail */
55785 +       break;
55786 +    }
55787 +
55788 +    return 1;
55789 +}
55790 +
55791 +void
55792 +ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase)
55793 +{
55794 +    /* XXXX - TBD */
55795 +}
55796 +
55797 +long
55798 +ep4xmtr_check (EP_XMTR_RAIL *x, long nextRunTime)
55799 +{
55800 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
55801 +    EP_XMTR       *xmtr     = xmtrRail->xmtr_generic.Xmtr;
55802 +    struct list_head  txdList;
55803 +    struct list_head *el, *nel;
55804 +    unsigned long flags;
55805 +
55806 +    INIT_LIST_HEAD (&txdList);
55807 +
55808 +    if (xmtrRail->xmtr_freecount < ep_txd_lowat && !alloc_txd_block (xmtrRail))
55809 +    {
55810 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name);
55811 +               
55812 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
55813 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
55814 +    }
55815 +
55816 +    spin_lock_irqsave (&xmtr->Lock, flags);
55817 +    list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]) {
55818 +       EP4_TXD_RAIL *txdRail = list_entry (el, EP4_TXD_RAIL, txd_retry_link);
55819 +
55820 +       if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED)
55821 +       {
55822 +           ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n",
55823 +                      XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55824 +                   
55825 +           nextRunTime = lbolt + HZ;
55826 +       }
55827 +       else
55828 +       {
55829 +           EP_TXD *txd = txdRail->txd_generic.Txd;
55830 +
55831 +           ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n",
55832 +                      XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55833 +
55834 +           EPRINTF5 (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n",
55835 +                     XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
55836 +           EPRINTF3  (DBG_XMTR, "%s:    done %x data %x\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name,
55837 +                      txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_done),
55838 +                      txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_data));
55839 +
55840 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
55841 +
55842 +           /* remove txd from active list and add to list to call handlers */
55843 +           list_del (&txd->Link);
55844 +           list_add_tail (&txd->Link, &txdList);
55845 +
55846 +           /* remove and free of txdRail */
55847 +           txdRail->txd_retry_time = 0;
55848 +           list_del (&txdRail->txd_retry_link);
55849 +
55850 +           finalise_txd (txd, txdRail);
55851 +
55852 +       }
55853 +    }
55854 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55855 +
55856 +    while (! list_empty (&txdList))
55857 +    {
55858 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
55859 +
55860 +       list_del (&txd->Link);
55861 +
55862 +       ep_xmtr_txd_stat (xmtr,txd);
55863 +
55864 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
55865 +
55866 +       FreeTxd (xmtr, txd);
55867 +    }
55868 +
55869 +    return nextRunTime;
55870 +}
55871 +
55872 +unsigned long
55873 +ep4xmtr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
55874 +{
55875 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) arg;
55876 +    ELAN4_DEV     *dev      = XMTR_TO_DEV(xmtrRail);
55877 +    unsigned long  flags;
55878 +
55879 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
55880 +    while (! list_empty (&xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]))
55881 +    {
55882 +       EP4_TXD_RAIL *txdRail = list_entry (xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY].next, EP4_TXD_RAIL, txd_retry_link);
55883 +
55884 +       if (BEFORE (lbolt, txdRail->txd_retry_time))
55885 +       {
55886 +           if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time))
55887 +               nextRunTime = txdRail->txd_retry_time;
55888 +
55889 +           break;
55890 +       }
55891 +
55892 +       if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT)
55893 +       {
55894 +           txdRail->txd_retry_time = 0;
55895 +
55896 +           list_del (&txdRail->txd_retry_link);
55897 +           
55898 +           /* re-initialise the envelope event */
55899 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
55900 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
55901 +           
55902 +           EPRINTF3 (DBG_RETRY, "%s: ep4xmtr_retry: re-issue envelope packet to %d for txdRail=%p\n", 
55903 +                     rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId, txdRail);
55904 +           
55905 +           txdRail->txd_main->txd_env = EP4_STATE_ACTIVE;
55906 +           
55907 +           issue_envelope_packet (xmtrRail, txdRail);
55908 +       }
55909 +       else
55910 +       {
55911 +           EPRINTF2 (DBG_RETRY, "%s: ep4xmtr_retry: cannot re-issue envelope packet to %d\n", rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId);
55912 +
55913 +           if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time))
55914 +               nextRunTime = txdRail->txd_retry_time;
55915 +
55916 +           break;
55917 +       }
55918 +    }
55919 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
55920 +    
55921 +    return nextRunTime;
55922 +}
55923 +
55924 +void
55925 +ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
55926 +{
55927 +    EP4_RAIL         *rail   = (EP4_RAIL *) commsRail->Rail;
55928 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
55929 +    EP4_XMTR_RAIL    *xmtrRail;
55930 +    unsigned long     flags;
55931 +    int                      i;
55932 +
55933 +    KMEM_ZALLOC (xmtrRail, EP4_XMTR_RAIL *, sizeof (EP4_XMTR_RAIL), 1);
55934 +
55935 +    spin_lock_init (&xmtrRail->xmtr_freelock);
55936 +    kcondvar_init  (&xmtrRail->xmtr_freesleep);
55937 +    INIT_LIST_HEAD (&xmtrRail->xmtr_freelist);
55938 +    INIT_LIST_HEAD (&xmtrRail->xmtr_blocklist);
55939 +
55940 +    for (i = 0; i < EP4_TXD_NUM_LISTS; i++)
55941 +       INIT_LIST_HEAD (&xmtrRail->xmtr_retrylist[i]);
55942 +    spin_lock_init (&xmtrRail->xmtr_retrylock);
55943 +
55944 +    xmtrRail->xmtr_generic.CommsRail = commsRail;
55945 +    xmtrRail->xmtr_generic.Xmtr      = xmtr;
55946 +
55947 +    xmtrRail->xmtr_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_XMTR_RAIL_MAIN), 0, &xmtrRail->xmtr_main_addr);
55948 +    xmtrRail->xmtr_cq   = elan4_alloccq (&rail->r_ctxt, EP4_XMTR_CQSIZE, CQ_EnableAllBits, CQ_Priority);
55949 +
55950 +    xmtrRail->xmtr_retryops.op_func = ep4xmtr_retry;
55951 +    xmtrRail->xmtr_retryops.op_arg  = xmtrRail;
55952 +
55953 +    ep4_add_retry_ops (rail, &xmtrRail->xmtr_retryops);
55954 +
55955 +    ep4_register_intcookie (rail, &xmtrRail->xmtr_intcookie, xmtrRail->xmtr_main_addr,
55956 +                           poll_interrupt, xmtrRail);
55957 +
55958 +    spin_lock_irqsave (&xmtr->Lock, flags);
55959 +
55960 +    xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->xmtr_generic;
55961 +    xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
55962 +
55963 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55964 +
55965 +    ep_kthread_schedule (&subsys->Thread, lbolt);
55966 +
55967 +    ep_procfs_xmtr_add_rail(&(xmtrRail->xmtr_generic));
55968 +}
55969 +
55970 +void
55971 +ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
55972 +{
55973 +    EP4_RAIL         *rail     = (EP4_RAIL *) commsRail->Rail;
55974 +    EP4_XMTR_RAIL    *xmtrRail = (EP4_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number];
55975 +    unsigned long     flags;
55976 +
55977 +    /* rail mask set as not usable */
55978 +    spin_lock_irqsave (&xmtr->Lock, flags);
55979 +    xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number);
55980 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55981 +
55982 +    ep_procfs_xmtr_del_rail(&(xmtrRail->xmtr_generic));
55983 +
55984 +    /* wait for all txd's for this rail to become free */
55985 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
55986 +    while (xmtrRail->xmtr_freecount != xmtrRail->xmtr_totalcount)
55987 +    {
55988 +       xmtrRail->xmtr_freewaiting++;
55989 +       kcondvar_wait (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock, &flags);
55990 +    }
55991 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
55992 +
55993 +    spin_lock_irqsave (&xmtr->Lock, flags);
55994 +    xmtr->Rails[commsRail->Rail->Number] = NULL;
55995 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55996 +
55997 +    /* all the txd's accociated with DescBlocks must be in the freelist */
55998 +    ASSERT (xmtrRail->xmtr_totalcount == xmtrRail->xmtr_freecount);
55999 +
56000 +    /* run through the DescBlockList deleting them */
56001 +    while (!list_empty (&xmtrRail->xmtr_blocklist))
56002 +       free_txd_block (xmtrRail, list_entry(xmtrRail->xmtr_blocklist.next, EP4_TXD_RAIL_BLOCK , blk_link));
56003 +    
56004 +    /* it had better be empty after that */
56005 +    ASSERT ((xmtrRail->xmtr_freecount == 0) && (xmtrRail->xmtr_totalcount == 0));
56006 +
56007 +    ep4_deregister_intcookie (rail, &xmtrRail->xmtr_intcookie);
56008 +
56009 +    ep4_remove_retry_ops (rail, &xmtrRail->xmtr_retryops);
56010 +
56011 +    elan4_freecq (&rail->r_ctxt, xmtrRail->xmtr_cq);
56012 +    ep_free_main (&rail->r_generic, xmtrRail->xmtr_main_addr, sizeof (EP4_XMTR_RAIL_MAIN));
56013 +
56014 +    spin_lock_destroy (&xmtrRail->xmtr_retrylock);
56015 +
56016 +    spin_lock_destroy (&xmtrRail->xmtr_freelock);
56017 +    kcondvar_destroy (&xmtrRail->xmtr_freesleep);
56018 +
56019 +    KMEM_FREE (xmtrRail, sizeof (EP4_XMTR_RAIL));
56020 +}
56021 +
56022 +void
56023 +ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x)
56024 +{
56025 +    EP4_XMTR_RAIL    *xmtrRail     = (EP4_XMTR_RAIL *) x;
56026 +    EP4_RAIL         *rail         = XMTR_TO_RAIL (xmtrRail);
56027 +    unsigned int      freeCount    = 0;
56028 +    unsigned int      pollCount    = 0;
56029 +    unsigned int      stalledCount = 0;
56030 +    unsigned int      retryCount   = 0;
56031 +    struct list_head *el;
56032 +    unsigned long     flags;
56033 +
56034 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
56035 +    list_for_each (el, &xmtrRail->xmtr_freelist)
56036 +       freeCount++;
56037 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
56038 +
56039 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
56040 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL])
56041 +       pollCount++;
56042 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED])
56043 +       stalledCount++;
56044 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY])
56045 +       retryCount++;
56046 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
56047 +
56048 +    (di->func)(di->arg, "        rail=%d free=%d total=%d (%d) (retry %d,%d,%d)\n",
56049 +              rail->r_generic.Number, xmtrRail->xmtr_freecount, xmtrRail->xmtr_totalcount, 
56050 +              freeCount, pollCount, stalledCount, retryCount);
56051 +    (di->func)(di->arg, "        cq %d flowcnt %lld,%lld\n", elan4_cq2num (xmtrRail->xmtr_cq), xmtrRail->xmtr_flowcnt, xmtrRail->xmtr_main->xmtr_flowcnt);
56052 +}
56053 +
56054 +void
56055 +ep4xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t)
56056 +{
56057 +    EP4_TXD_RAIL      *txdRail  = (EP4_TXD_RAIL *) t;
56058 +    EP4_XMTR_RAIL     *xmtrRail = TXD_TO_XMTR(txdRail);
56059 +    EP4_TXD_RAIL_MAIN *txdMain  = txdRail->txd_main;
56060 +    sdramaddr_t        txdElan  = txdRail->txd_elan;
56061 +    EP4_RAIL          *rail     = XMTR_TO_RAIL (xmtrRail);
56062 +    ELAN4_DEV         *dev      = XMTR_TO_DEV (xmtrRail);
56063 +    char             *list     = "";
56064 +    unsigned long      flags;
56065 +
56066 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
56067 +    if (txdRail->txd_retry_time)
56068 +    {
56069 +       if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]))
56070 +           list = " poll";
56071 +       else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]))
56072 +           list = " stalled";
56073 +       else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]))
56074 +           list = " retry";
56075 +       else
56076 +           list = " ERROR";
56077 +    }
56078 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
56079 +
56080 +    (di->func)(di->arg, "      Rail %d txd %p elan %lx (%x) main %p (%x) cookie <%lld%s%s%s%s> ecq %d %s\n", rail->r_generic.Number,
56081 +              txdRail, txdRail->txd_elan, txdRail->txd_elan_addr, txdRail->txd_main, txdRail->txd_main_addr, 
56082 +              EP4_COOKIE_STRING(txdRail->txd_cookie), elan4_cq2num (txdRail->txd_ecq->ecq_cq), list);
56083 +    
56084 +    (di->func)(di->arg, "        env  %016llx %016llx %016llx -> %016llx\n",
56085 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)),
56086 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[0])),
56087 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[1])),
56088 +              txdMain->txd_env);
56089 +    (di->func)(di->arg, "        data %016llx %016llx %016llx -> %016llx\n",
56090 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)),
56091 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[0])),
56092 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[1])),
56093 +              txdMain->txd_data);
56094 +    (di->func)(di->arg, "        done %016llx %016llx %016llx -> %016llx\n",
56095 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)),
56096 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[0])),
56097 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[1])),
56098 +              txdMain->txd_done);
56099 +}
56100 +
56101 +int
56102 +ep4xmtr_check_txd_state (EP_TXD *txd) 
56103 +{
56104 +    EP4_TXD_RAIL  *txdRail  = (EP4_TXD_RAIL *) txd->TxdRail;
56105 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail;
56106 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
56107 +    unsigned long  flags;
56108 +
56109 +    if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED)
56110 +       return 0;
56111 +
56112 +    EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
56113 +
56114 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
56115 +    EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1);
56116 +
56117 +    list_del (&txdRail->txd_retry_link);
56118 +    txdRail->txd_retry_time  = 0;
56119 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
56120 +    
56121 +    /* re-initialise the envelope event */
56122 +    elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
56123 +                       E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
56124 +           
56125 +    unbind_txd_rail (txd, txdRail);
56126 +
56127 +    terminate_txd_rail (xmtrRail, txdRail);
56128 +    free_txd_rail (xmtrRail, txdRail);
56129 +
56130 +    return 1;
56131 +}
56132 +
56133 +void
56134 +ep4xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) {
56135 +    /* no stats here yet */
56136 +    /* EP4_XMTR_RAIL * ep4xmtr_rail = (EP4_XMTR_RAIL *) xmtr_rail; */
56137 +}
56138 +
56139 +
56140 +/*
56141 + * Local variables:
56142 + * c-file-style: "stroustrup"
56143 + * End:
56144 + */
56145 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/ep_procfs.c
56146 ===================================================================
56147 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/ep_procfs.c     2004-02-23 16:02:56.000000000 -0500
56148 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/ep_procfs.c  2005-07-28 14:52:52.881673720 -0400
56149 @@ -0,0 +1,331 @@
56150 +
56151 +/*
56152 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
56153 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
56154 + *
56155 + *    For licensing information please see the supplied COPYING file
56156 + *
56157 + */
56158 +
56159 +#ident "@(#)$Id: ep_procfs.c,v 1.5.6.4 2005/03/20 11:23:33 david Exp $"
56160 +/*      $Source: /cvs/master/quadrics/epmod/ep_procfs.c,v $*/
56161 +
56162 +#include <qsnet/kernel.h>
56163 +
56164 +#include <elan/kcomm.h>
56165 +#include <elan/epsvc.h>
56166 +#include <elan/epcomms.h>
56167 +
56168 +#include "cm.h"
56169 +#include "debug.h"
56170 +#include "conf_linux.h"
56171 +
56172 +#include "kcomm_vp.h"
56173 +#include "kcomm_elan4.h"
56174 +#include "epcomms_elan4.h"
56175 +
56176 +#include <qsnet/procfs_linux.h>
56177 +
56178 +struct proc_dir_entry *ep_procfs_xmtr_root;
56179 +struct proc_dir_entry *ep_procfs_rcvr_root;
56180 +
56181 +static int
56182 +ep_proc_open (struct inode *inode, struct file *file)
56183 +{
56184 +    PROC_PRIVATE *pr;
56185 +    int           pages = 4;
56186 +
56187 +    if ((pr = kmalloc (sizeof (PROC_PRIVATE), GFP_KERNEL)) == NULL)
56188 +       return (-ENOMEM);
56189 +    
56190 +    do {       
56191 +       pr->pr_data_len = PAGESIZE * pages;
56192 +
56193 +       KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1);
56194 +       if (pr->pr_data == NULL) 
56195 +       { 
56196 +           pr->pr_len  = sprintf (pr->pr_data, "Out of Memory\n");
56197 +           break;
56198 +       } 
56199 +       
56200 +       pr->pr_off     = 0;
56201 +       pr->pr_len     = 0;
56202 +       pr->pr_data[0] = 0;
56203 +       
56204 +       pr->pr_di.func  = proc_character_fill;
56205 +       pr->pr_di.arg   = (long)pr;
56206 +       
56207 +       if (!strcmp("debug_xmtr", file->f_dentry->d_iname)) 
56208 +       {   
56209 +           EP_XMTR *xmtr = (EP_XMTR *)(PDE(inode)->data);
56210 +           ep_display_xmtr (&pr->pr_di, xmtr);
56211 +       }
56212 +       
56213 +       if (!strcmp("debug_rcvr", file->f_dentry->d_iname)) 
56214 +       {
56215 +           EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data);
56216 +           ep_display_rcvr (&pr->pr_di, rcvr, 0);
56217 +       }
56218 +       
56219 +       if (!strcmp("debug_full", file->f_dentry->d_iname)) 
56220 +       {
56221 +           EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data);
56222 +           ep_display_rcvr (&pr->pr_di, rcvr, 1);
56223 +       }
56224 +
56225 +       if ( pr->pr_len < pr->pr_data_len) 
56226 +           break; /* we managed to get all the output into the buffer */
56227 +
56228 +       pages++;
56229 +       KMEM_FREE ( pr->pr_data,  pr->pr_data_len);
56230 +    } while (1);
56231 +       
56232 +
56233 +    file->private_data = (void *) pr;
56234 +
56235 +    MOD_INC_USE_COUNT;
56236 +    return (0);
56237 +}
56238 +
56239 +struct file_operations ep_proc_operations = 
56240 +{
56241 +    read:      proc_read,
56242 +    open:      ep_proc_open,
56243 +    release:   proc_release,
56244 +};
56245 +
56246 +static int
56247 +proc_read_rcvr_stats(char *page, char **start, off_t off,
56248 +                    int count, int *eof, void *data)
56249 +{
56250 +    EP_RCVR *rcvr = (EP_RCVR *)data;
56251 +    
56252 +    if (rcvr == NULL) 
56253 +       sprintf(page,"proc_read_rcvr_stats rcvr=NULL\n");
56254 +    else {
56255 +       page[0] = 0;
56256 +       ep_rcvr_fillout_stats(rcvr,page);
56257 +    }
56258 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
56259 +}
56260 +
56261 +static int
56262 +proc_read_rcvr_rail_stats(char *page, char **start, off_t off,
56263 +                    int count, int *eof, void *data)
56264 +{
56265 +    EP_RCVR_RAIL *rcvr_rail = (EP_RCVR_RAIL *)data;
56266 +
56267 +    if (rcvr_rail == NULL) {
56268 +       strcpy(page,"proc_read_rcvr_rail_stats rcvr_rail=NULL");
56269 +    } else {
56270 +       page[0] = 0;
56271 +       ep_rcvr_rail_fillout_stats(rcvr_rail, page);
56272 +       EP_RCVR_OP(rcvr_rail,FillOutRailStats)(rcvr_rail,page);
56273 +    }
56274 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
56275 +}
56276 +
56277 +void
56278 +ep_procfs_rcvr_add(EP_RCVR *rcvr)
56279 +{ 
56280 +    /* ep/rcvr/service_number/stats       */
56281 +    /* ep/rcvr/service_number/debug_rcvr  */
56282 +    /* ep/rcvr/service_number/debug_full  */
56283 +    struct proc_dir_entry *p;
56284 +    char str[32];
56285 +
56286 +    sprintf(str,"%d", rcvr->Service);
56287 +
56288 +    rcvr->procfs_root = proc_mkdir (str, ep_procfs_rcvr_root);
56289 +
56290 +    if ((p = create_proc_entry ("stats", 0,  rcvr->procfs_root)) != NULL)
56291 +    {
56292 +       p->write_proc = NULL;
56293 +       p->read_proc  = proc_read_rcvr_stats;
56294 +       p->data       = rcvr;
56295 +       p->owner      = THIS_MODULE;
56296 +    }
56297 +
56298 +    if ((p = create_proc_entry ("debug_rcvr", 0, rcvr->procfs_root)) != NULL)
56299 +    {
56300 +       p->proc_fops = &ep_proc_operations;
56301 +       p->owner     = THIS_MODULE;
56302 +       p->data      = rcvr;
56303 +    }
56304 +
56305 +    if ((p = create_proc_entry ("debug_full", 0, rcvr->procfs_root)) != NULL)
56306 +    {
56307 +       p->proc_fops = &ep_proc_operations;
56308 +       p->owner     = THIS_MODULE;
56309 +       p->data      = rcvr;
56310 +    }
56311 +}
56312 +
56313 +void
56314 +ep_procfs_rcvr_del(EP_RCVR *rcvr)
56315 +{  
56316 +    char str[32];
56317 +    sprintf(str,"%d", rcvr->Service);
56318 +
56319 +    remove_proc_entry ("debug_full", rcvr->procfs_root);
56320 +    remove_proc_entry ("debug_rcvr", rcvr->procfs_root);
56321 +    remove_proc_entry ("stats",      rcvr->procfs_root);
56322 +
56323 +    remove_proc_entry (str, ep_procfs_rcvr_root);
56324 +}
56325 +
56326 +void 
56327 +ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail)
56328 +{
56329 +    /* ep/rcvr/service_number/railN/stats */
56330 +
56331 +    struct proc_dir_entry *p;
56332 +    char str[32];
56333 +    sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number);
56334 +
56335 +    rcvrRail->procfs_root = proc_mkdir (str, rcvrRail->Rcvr->procfs_root);
56336 +    
56337 +    if ((p = create_proc_entry ("stats", 0,  rcvrRail->procfs_root)) != NULL)
56338 +    {
56339 +       p->write_proc = NULL;
56340 +       p->read_proc  = proc_read_rcvr_rail_stats;
56341 +       p->data       = rcvrRail;
56342 +       p->owner      = THIS_MODULE;
56343 +    } 
56344 +}
56345 +
56346 +void 
56347 +ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail)
56348 +{
56349 +    char str[32];
56350 +    sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number);
56351 +
56352 +    remove_proc_entry ("stats", rcvrRail->procfs_root);
56353 +
56354 +    remove_proc_entry (str, rcvrRail->Rcvr->procfs_root);
56355 +}
56356 +
56357 +
56358 +
56359 +
56360 +static int
56361 +proc_read_xmtr_stats(char *page, char **start, off_t off,
56362 +                    int count, int *eof, void *data)
56363 +{
56364 +    EP_XMTR *xmtr = (EP_XMTR *)data;
56365 +
56366 +    if (xmtr == NULL) 
56367 +       strcpy(page,"proc_read_xmtr_stats xmtr=NULL\n");
56368 +    else {
56369 +       page[0] = 0;
56370 +       ep_xmtr_fillout_stats(xmtr, page);
56371 +    }
56372 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
56373 +}
56374 +
56375 +static int
56376 +proc_read_xmtr_rail_stats(char *page, char **start, off_t off,
56377 +                    int count, int *eof, void *data)
56378 +{
56379 +    EP_XMTR_RAIL *xmtr_rail = (EP_XMTR_RAIL *)data;
56380 +
56381 +    if (xmtr_rail == NULL) 
56382 +       strcpy(page,"proc_read_xmtr_rail_stats xmtr_rail=NULL\n");
56383 +    else {
56384 +       page[0] = 0;
56385 +       ep_xmtr_rail_fillout_stats(xmtr_rail, page);
56386 +       EP_XMTR_OP(xmtr_rail,FillOutRailStats)(xmtr_rail,page);
56387 +    }
56388 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
56389 +}
56390 +
56391 +void
56392 +ep_procfs_xmtr_add(EP_XMTR *xmtr)
56393 +{ 
56394 +    /* ep/xmtr/service_number/stats       */
56395 +    /* ep/xmtr/service_number/debug_xmtr  */
56396 +    struct proc_dir_entry *p;
56397 +    char str[32];
56398 +
56399 +    sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr);
56400 +
56401 +    xmtr->procfs_root = proc_mkdir (str, ep_procfs_xmtr_root);
56402 +
56403 +    if ((p = create_proc_entry ("stats", 0,  xmtr->procfs_root)) != NULL)
56404 +    {
56405 +       p->write_proc = NULL;
56406 +       p->read_proc  = proc_read_xmtr_stats;
56407 +       p->data       = xmtr;
56408 +       p->owner      = THIS_MODULE;
56409 +    } 
56410 +
56411 +    if ((p = create_proc_entry ("debug_xmtr", 0, xmtr->procfs_root)) != NULL)
56412 +    {
56413 +       p->proc_fops = &ep_proc_operations;
56414 +       p->owner     = THIS_MODULE;
56415 +       p->data      = xmtr;
56416 +    }
56417 +}
56418 +
56419 +void
56420 +ep_procfs_xmtr_del(EP_XMTR *xmtr)
56421 +{  
56422 +    char str[32];
56423 +    sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr);
56424 +
56425 +    remove_proc_entry ("stats",      xmtr->procfs_root);
56426 +    remove_proc_entry ("debug_xmtr", xmtr->procfs_root);
56427 +
56428 +    remove_proc_entry (str, ep_procfs_xmtr_root);
56429 +}
56430 +
56431 +void 
56432 +ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail)
56433 +{
56434 +    /* ep/xmtr/service_number/railN/stats */
56435 +    
56436 +    struct proc_dir_entry *p;
56437 +    char str[32];
56438 +    sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number);
56439 +
56440 +    xmtrRail->procfs_root = proc_mkdir (str, xmtrRail->Xmtr->procfs_root);
56441 +
56442 +    if ((p = create_proc_entry ("stats", 0,  xmtrRail->procfs_root)) != NULL)
56443 +    {
56444 +       p->write_proc = NULL;
56445 +       p->read_proc  = proc_read_xmtr_rail_stats;
56446 +       p->data       = xmtrRail;
56447 +       p->owner      = THIS_MODULE;
56448 +    } 
56449 +}
56450 +
56451 +void 
56452 +ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail)
56453 +{
56454 +    char str[32];
56455 +    sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number);
56456 +
56457 +    remove_proc_entry ("stats", xmtrRail->procfs_root);
56458 +
56459 +    remove_proc_entry (str, xmtrRail->Xmtr->procfs_root);
56460 +}
56461 +
56462 +void
56463 +ep_procfs_rcvr_xmtr_init(void)
56464 +{
56465 +    ep_procfs_rcvr_root = proc_mkdir ("rcvr", ep_procfs_root);
56466 +    ep_procfs_xmtr_root = proc_mkdir ("xmtr", ep_procfs_root); 
56467 +}
56468 +
56469 +void
56470 +ep_procfs_rcvr_xmtr_fini(void)
56471 +{
56472 +    remove_proc_entry ("rcvr", ep_procfs_root);
56473 +    remove_proc_entry ("xmtr", ep_procfs_root);
56474 +}
56475 +
56476 +/*
56477 + * Local variables:
56478 + * c-file-style: "stroustrup"
56479 + * End:
56480 + */
56481 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kalloc.c
56482 ===================================================================
56483 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kalloc.c        2004-02-23 16:02:56.000000000 -0500
56484 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kalloc.c     2005-07-28 14:52:52.882673568 -0400
56485 @@ -0,0 +1,677 @@
56486 +/*
56487 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
56488 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
56489 + *
56490 + *    For licensing information please see the supplied COPYING file
56491 + *
56492 + */
56493 +
56494 +#ident "@(#)$Id: kalloc.c,v 1.17.8.2 2004/12/14 10:19:14 mike Exp $"
56495 +/*      $Source: /cvs/master/quadrics/epmod/kalloc.c,v $ */
56496 +
56497 +#include <qsnet/kernel.h>
56498 +
56499 +#include <elan/kcomm.h>
56500 +
56501 +#include "debug.h"
56502 +
56503 +static void
56504 +HashInPool (EP_ALLOC *alloc, EP_POOL *pool)
56505 +{
56506 +    int idx0 = HASH (pool->Handle.nmh_nmd.nmd_addr);
56507 +    int idx1 = HASH (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len);
56508 +
56509 +    list_add (&pool->HashBase, &alloc->HashBase[idx0]);
56510 +    list_add (&pool->HashTop, &alloc->HashTop[idx1]);
56511 +}
56512 +
56513 +static void
56514 +HashOutPool (EP_ALLOC *alloc, EP_POOL *pool)
56515 +{
56516 +    list_del (&pool->HashBase);
56517 +    list_del (&pool->HashTop);
56518 +}
56519 +
56520 +static EP_POOL *
56521 +LookupPool (EP_ALLOC *alloc, EP_ADDR addr)
56522 +{
56523 +    struct list_head *el;
56524 +    
56525 +    list_for_each (el, &alloc->HashBase[HASH(addr)]) {
56526 +       EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56527 +       
56528 +       if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len))
56529 +           return (pool);
56530 +    }
56531 +    
56532 +    list_for_each (el, &alloc->HashTop[HASH(addr)]) {
56533 +       EP_POOL *pool = list_entry (el, EP_POOL, HashTop);
56534 +       
56535 +       if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len))
56536 +           return (pool);
56537 +    }
56538 +    
56539 +    return (NULL);
56540 +}
56541 +
56542 +static EP_POOL *
56543 +AllocatePool (EP_ALLOC *alloc, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr)
56544 +{
56545 +    EP_ADDR base = 0;
56546 +    EP_POOL *pool;
56547 +    EP_RAIL *rail;
56548 +    int i, railmask = 0;
56549 +    struct list_head *el;
56550 +
56551 +    KMEM_ZALLOC (pool, EP_POOL *, sizeof (EP_POOL), !(attr & EP_NO_SLEEP));
56552 +    
56553 +    if (pool == NULL)
56554 +       return (NULL);
56555 +    
56556 +    if (addr != 0)
56557 +       base = addr;
56558 +    else
56559 +    {
56560 +       for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
56561 +       {
56562 +           KMEM_ZALLOC (pool->Bitmaps[i - LN2_MIN_SIZE], bitmap_t *, BT_BITOUL(1 << (LN2_MAX_SIZE-i)) * sizeof (bitmap_t), !(attr & EP_NO_SLEEP));
56563 +           if (pool->Bitmaps[i - LN2_MIN_SIZE] == NULL)
56564 +               goto failed;
56565 +       }
56566 +    
56567 +       if ((base = ep_rmalloc (alloc->ResourceMap, size, !(attr & EP_NO_SLEEP))) == 0)
56568 +           goto failed;
56569 +    }
56570 +
56571 +    switch (alloc->Type)
56572 +    {
56573 +    case EP_ALLOC_TYPE_PRIVATE_SDRAM:
56574 +       rail = alloc->Data.Private.Rail;
56575 +
56576 +       if ((pool->Buffer.Sdram = rail->Operations.SdramAlloc (rail, base, size)) == 0)
56577 +           goto failed;
56578 +
56579 +       ep_perrail_sdram_map (rail, base, pool->Buffer.Sdram, size, perm, attr);
56580 +
56581 +       pool->Handle.nmh_nmd.nmd_addr = base;
56582 +       pool->Handle.nmh_nmd.nmd_len  = size;
56583 +       break;
56584 +       
56585 +    case EP_ALLOC_TYPE_PRIVATE_MAIN:
56586 +        KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP));
56587 +       if (pool->Buffer.Ptr == 0)
56588 +           goto failed;
56589 +
56590 +       ep_perrail_kaddr_map (alloc->Data.Private.Rail, base, pool->Buffer.Ptr, size, perm, attr);
56591 +
56592 +       pool->Handle.nmh_nmd.nmd_addr = base;
56593 +       pool->Handle.nmh_nmd.nmd_len  = size;
56594 +       break;
56595 +
56596 +    case EP_ALLOC_TYPE_SHARED_MAIN:
56597 +        KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP));
56598 +       if (pool->Buffer.Ptr == 0)
56599 +           goto failed;
56600 +
56601 +       list_for_each (el, &alloc->Data.Shared.Rails) {
56602 +           EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
56603 +
56604 +           ep_perrail_kaddr_map (rail, base, pool->Buffer.Ptr, size, perm, attr);
56605 +
56606 +           railmask |= (1 << rail->Number);
56607 +       }
56608 +       pool->Handle.nmh_nmd.nmd_addr = base;
56609 +       pool->Handle.nmh_nmd.nmd_len  = size;
56610 +       pool->Handle.nmh_nmd.nmd_attr = EP_NMD_ATTR (alloc->Data.Shared.System->Position.pos_nodeid, railmask);
56611 +
56612 +       ep_nmh_insert (&alloc->Data.Shared.System->MappingTable, &pool->Handle);
56613 +       break;
56614 +
56615 +    default:
56616 +       goto failed;
56617 +    }
56618 +
56619 +    return (pool);
56620 +    
56621 + failed:
56622 +    if (addr == 0 && base)
56623 +       ep_rmfree (alloc->ResourceMap, size, base);
56624 +
56625 +    for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
56626 +       if (pool->Bitmaps[i - LN2_MIN_SIZE] != NULL)
56627 +           KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t));
56628 +    
56629 +    KMEM_FREE (pool, sizeof (EP_POOL));
56630 +    return (NULL);
56631 +}
56632 +
56633 +static void
56634 +FreePool (EP_ALLOC *alloc, EP_POOL *pool)
56635 +{
56636 +    struct list_head *el;
56637 +    int i;
56638 +
56639 +    switch (alloc->Type)
56640 +    {
56641 +    case EP_ALLOC_TYPE_PRIVATE_SDRAM:
56642 +       ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56643 +
56644 +       alloc->Data.Private.Rail->Operations.SdramFree (alloc->Data.Private.Rail, pool->Buffer.Sdram, pool->Handle.nmh_nmd.nmd_len);
56645 +       break;
56646 +       
56647 +    case EP_ALLOC_TYPE_PRIVATE_MAIN:
56648 +       ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56649 +
56650 +       KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len));
56651 +       break;
56652 +
56653 +    case EP_ALLOC_TYPE_SHARED_MAIN:
56654 +       ep_nmh_remove (&alloc->Data.Shared.System->MappingTable, &pool->Handle);
56655 +
56656 +       list_for_each (el, &alloc->Data.Shared.Rails) {
56657 +           EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
56658 +
56659 +           ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56660 +       }
56661 +
56662 +       KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len));
56663 +       break;
56664 +    }
56665 +    
56666 +    if (pool->Bitmaps[0])
56667 +    {
56668 +       ep_rmfree (alloc->ResourceMap, pool->Handle.nmh_nmd.nmd_len, pool->Handle.nmh_nmd.nmd_addr);
56669 +    
56670 +       for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
56671 +           KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t));
56672 +    }
56673 +    
56674 +    KMEM_FREE (pool, sizeof (EP_POOL));
56675 +}
56676 +
56677 +static int
56678 +AddRail (EP_ALLOC *alloc, EP_RAIL *rail)
56679 +{
56680 +    struct list_head *el;
56681 +    EP_RAIL_ENTRY *l;
56682 +    unsigned long flags;
56683 +    int i;
56684 +
56685 +    ASSERT (alloc->Type == EP_ALLOC_TYPE_SHARED_MAIN);
56686 +
56687 +    KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1);
56688 +
56689 +    if (l == NULL)
56690 +       return (ENOMEM);
56691 +
56692 +    l->Rail = rail;
56693 +
56694 +    spin_lock_irqsave (&alloc->Lock, flags);
56695 +    for (i = 0; i < NHASH; i++)
56696 +    {
56697 +       list_for_each (el, &alloc->HashBase[i]) {
56698 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56699 +
56700 +           ep_perrail_kaddr_map (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Buffer.Ptr, 
56701 +                                 pool->Handle.nmh_nmd.nmd_len, EP_PERM_WRITE, EP_NO_SLEEP);
56702 +
56703 +           pool->Handle.nmh_nmd.nmd_attr |= EP_NMD_ATTR (0, 1 << rail->Number);
56704 +       }
56705 +    }
56706 +
56707 +    list_add (&l->Link, &alloc->Data.Shared.Rails);
56708 +
56709 +    spin_unlock_irqrestore (&alloc->Lock, flags); 
56710 +    return (0);
56711 +}
56712 +
56713 +static void
56714 +RemoveRail (EP_ALLOC *alloc, EP_RAIL *rail)
56715 +{
56716 +    struct list_head *el;
56717 +    unsigned long flags;
56718 +    int i;
56719 +
56720 +    spin_lock_irqsave (&alloc->Lock, flags);
56721 +    for (i = 0; i < NHASH; i++)
56722 +    {
56723 +       list_for_each (el, &alloc->HashBase[i]) {
56724 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56725 +
56726 +           ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56727 +
56728 +           pool->Handle.nmh_nmd.nmd_attr &= ~EP_NMD_ATTR (0, 1 << rail->Number);
56729 +       }
56730 +    }
56731 +
56732 +    list_for_each (el, &alloc->Data.Shared.Rails) {
56733 +       EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link);
56734 +       if (tmp->Rail == rail)
56735 +       {
56736 +           list_del (el);
56737 +           KMEM_FREE(tmp, sizeof (EP_RAIL_ENTRY));
56738 +           break;
56739 +       }
56740 +    }
56741 +
56742 +    spin_unlock_irqrestore (&alloc->Lock, flags);
56743 +}
56744 +
56745 +static EP_POOL *
56746 +AllocateBlock (EP_ALLOC *alloc, unsigned size, EP_ATTRIBUTE attr, int *offset)
56747 +{
56748 +    int block, j, k;
56749 +    unsigned long flags;
56750 +    EP_POOL *pool;
56751 +
56752 +
56753 +    if (size > MAX_SIZE)
56754 +    {
56755 +       if ((attr & EP_NO_ALLOC) || (pool  = AllocatePool (alloc, 0, size, alloc->Perm, attr)) == NULL)
56756 +           return (NULL);
56757 +
56758 +       spin_lock_irqsave (&alloc->Lock, flags);
56759 +       HashInPool (alloc, pool);
56760 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56761 +
56762 +       *offset = 0;
56763 +
56764 +       return pool;
56765 +    }
56766 +
56767 +    spin_lock_irqsave (&alloc->Lock, flags);
56768 +
56769 +    /* Round up size to next power of 2 */
56770 +    for (k = LN2_MIN_SIZE; (1 << k) < size; k++)
56771 +       ;
56772 +    
56773 +    /* k now has ln2 of the size to allocate. */
56774 +    /* find the free list with the smallest block we can use*/
56775 +    for (j = k; j <= LN2_MAX_SIZE && list_empty (&alloc->Freelists[j - LN2_MIN_SIZE]); j++)
56776 +       ;
56777 +    
56778 +    /* j has ln2 of the smallest size block we can use */
56779 +    if (j < LN2_MAX_SIZE)
56780 +    {
56781 +       int nbits = 1 << (LN2_MAX_SIZE-j);
56782 +       
56783 +       pool  = list_entry (alloc->Freelists[j - LN2_MIN_SIZE].next, EP_POOL, Link[j - LN2_MIN_SIZE]);
56784 +       block = (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) << j);
56785 +       
56786 +       BT_CLEAR (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j);
56787 +       
56788 +       if (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) == -1)
56789 +           list_del (&pool->Link[j - LN2_MIN_SIZE]);
56790 +    }
56791 +    else
56792 +    {
56793 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56794 +       
56795 +       if ((attr & EP_NO_ALLOC) || (pool  = AllocatePool (alloc, 0, MAX_SIZE, alloc->Perm, attr)) == NULL)
56796 +           return (NULL);
56797 +
56798 +       block = 0;
56799 +       j = LN2_MAX_SIZE;
56800 +       
56801 +       spin_lock_irqsave (&alloc->Lock, flags);
56802 +       
56803 +       HashInPool (alloc, pool);
56804 +    }
56805 +    
56806 +    /* Split it until the buddies are the correct size, putting one
56807 +     * buddy back on the free list and continuing to split the other */
56808 +    while (--j >= k)
56809 +    {
56810 +       list_add (&pool->Link[j - LN2_MIN_SIZE], &alloc->Freelists[j - LN2_MIN_SIZE]);
56811 +       
56812 +       BT_SET (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j);
56813 +       
56814 +       block += (1 << j);
56815 +    }
56816 +    spin_unlock_irqrestore (&alloc->Lock, flags);
56817 +
56818 +    *offset = block;
56819 +
56820 +    return (pool);
56821 +}
56822 +
56823 +static void
56824 +FreeBlock (EP_ALLOC *alloc, EP_ADDR addr, unsigned size)
56825 +{
56826 +    EP_POOL *pool;
56827 +    int  k, block = 0;
56828 +    unsigned long flags;
56829 +    
56830 +    spin_lock_irqsave (&alloc->Lock, flags);
56831 +    /* Round up size to next power of 2 */
56832 +    for (k = LN2_MIN_SIZE; (1 << k) < size; k++)
56833 +       ;
56834 +
56835 +    /* Find the pool containing this block */
56836 +    pool = LookupPool (alloc, addr);
56837 +
56838 +    /* It must exist */
56839 +    ASSERT (pool != NULL);
56840 +
56841 +    /* If we're freeing a subset of it, then update the bitmaps */
56842 +    if (size <= MAX_SIZE)
56843 +    {
56844 +       ASSERT (BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (addr - pool->Handle.nmh_nmd.nmd_addr) >> k) == 0);
56845 +       
56846 +       block = addr - pool->Handle.nmh_nmd.nmd_addr;
56847 +       
56848 +       while (k < LN2_MAX_SIZE && BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1))
56849 +       {
56850 +           BT_CLEAR (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1);
56851 +           
56852 +           if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1)
56853 +               list_del (&pool->Link[k - LN2_MIN_SIZE]);
56854 +           
56855 +           k++;
56856 +       }
56857 +    }
56858 +
56859 +    if (k >= LN2_MAX_SIZE)
56860 +    {
56861 +       HashOutPool (alloc, pool);
56862 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56863 +
56864 +       FreePool (alloc, pool);
56865 +    }
56866 +    else
56867 +    {
56868 +       if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1)
56869 +           list_add (&pool->Link[k - LN2_MIN_SIZE], &alloc->Freelists[k - LN2_MIN_SIZE]);
56870 +
56871 +       BT_SET (pool->Bitmaps[k - LN2_MIN_SIZE], block >> k);
56872 +
56873 +       spin_unlock_irqrestore (&alloc->Lock, flags);
56874 +    }
56875 +}
56876 +
56877 +static void
56878 +InitialiseAllocator (EP_ALLOC *alloc, EP_ALLOC_TYPE type, unsigned int perm, EP_RMAP *rmap)
56879 +{
56880 +    int i;
56881 +
56882 +    spin_lock_init (&alloc->Lock);
56883 +
56884 +    alloc->Type        = type;
56885 +    alloc->ResourceMap = rmap;
56886 +    alloc->Perm        = perm;
56887 +
56888 +    for (i = 0; i < NHASH; i++)
56889 +    {
56890 +       (&alloc->HashBase[i])->next = &alloc->HashBase[i];
56891 +
56892 +       INIT_LIST_HEAD (&alloc->HashBase[i]);
56893 +       INIT_LIST_HEAD (&alloc->HashTop[i]);
56894 +    }
56895 +    
56896 +    for (i = 0; i < NUM_FREELISTS; i++)
56897 +       INIT_LIST_HEAD (&alloc->Freelists[i]);
56898 +}
56899 +
56900 +static void
56901 +DestroyAllocator (EP_ALLOC *alloc)
56902 +{
56903 +    struct list_head *el, *next;
56904 +    int i;
56905 +
56906 +    for (i = 0; i < NHASH; i++)
56907 +    {
56908 +       list_for_each_safe (el, next, &alloc->HashBase[i]) { 
56909 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56910 +
56911 +           printk ("!!DestroyAllocator: pool=%p type=%d addr=%x len=%x\n", pool, alloc->Type,
56912 +                   pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
56913 +
56914 +           list_del (&pool->HashBase);
56915 +           list_del (&pool->HashTop);
56916 +
56917 +           // XXXX: FreePool (alloc, pool);
56918 +       }
56919 +    }
56920 +
56921 +    spin_lock_destroy (&alloc->Lock);
56922 +}
56923 +
56924 +void
56925 +ep_display_alloc (EP_ALLOC *alloc)
56926 +{
56927 +    struct list_head *el;
56928 +    int i;
56929 +    int npools = 0;
56930 +    int nbytes = 0;
56931 +    int nfree = 0;
56932 +    unsigned long flags;
56933 +
56934 +    spin_lock_irqsave (&alloc->Lock, flags);
56935 +
56936 +    ep_debugf (DBG_DEBUG, "Kernel comms memory allocator %p type %d\n", alloc, alloc->Type);
56937 +    for (i = 0; i < NHASH; i++)
56938 +    {
56939 +       list_for_each (el, &alloc->HashBase[i]) {
56940 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
56941 +
56942 +           ep_debugf (DBG_DEBUG, "  POOL %4x: %p -> %x.%x\n", i, pool, pool->Handle.nmh_nmd.nmd_addr,
56943 +                      pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len);
56944 +
56945 +           npools++;
56946 +           nbytes += pool->Handle.nmh_nmd.nmd_len;
56947 +       }
56948 +    }
56949 +    
56950 +    for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i++)
56951 +    {
56952 +       int n = 0;
56953 +
56954 +       list_for_each (el, &alloc->Freelists[i - LN2_MIN_SIZE]) {
56955 +           EP_POOL *pool  = list_entry (el, EP_POOL, Link[i - LN2_MIN_SIZE]);
56956 +           int      nbits = bt_nbits (pool->Bitmaps[i - LN2_MIN_SIZE], 1 << (LN2_MAX_SIZE - i));
56957 +
56958 +           n += nbits;
56959 +           nfree += (nbits << i);
56960 +       }
56961 +       
56962 +       if (n != 0)
56963 +           ep_debugf (DBG_DEBUG, "  SIZE %5d : num %d\n", (1 << i), n);
56964 +    }
56965 +    ep_debugf (DBG_DEBUG, "%d pools with %d bytes and %d bytes free\n", npools, nbytes, nfree);
56966 +
56967 +    spin_unlock_irqrestore (&alloc->Lock, flags);
56968 +}
56969 +
56970 +/* per-rail allocators */
56971 +void
56972 +ep_alloc_init (EP_RAIL *rail)
56973 +{
56974 +    EP_RMAP *rmap = ep_rmallocmap (EP_PRIVATE_RMAP_SIZE, "PrivateMap", 1);
56975 +
56976 +    ep_rmfree (rmap, EP_PRIVATE_TOP-EP_PRIVATE_BASE, EP_PRIVATE_BASE);
56977 +
56978 +    InitialiseAllocator (&rail->ElanAllocator, EP_ALLOC_TYPE_PRIVATE_SDRAM, EP_PERM_ALL, rmap);
56979 +    InitialiseAllocator (&rail->MainAllocator, EP_ALLOC_TYPE_PRIVATE_MAIN, EP_PERM_WRITE, rmap);
56980 +
56981 +    rail->ElanAllocator.Data.Private.Rail = rail;
56982 +    rail->MainAllocator.Data.Private.Rail = rail;
56983 +}
56984 +
56985 +void
56986 +ep_alloc_fini (EP_RAIL *rail)
56987 +{
56988 +    EP_RMAP *rmap = rail->ElanAllocator.ResourceMap;
56989 +
56990 +    DestroyAllocator (&rail->ElanAllocator);
56991 +    DestroyAllocator (&rail->MainAllocator);
56992 +    
56993 +    ep_rmfreemap (rmap);
56994 +}
56995 +
56996 +sdramaddr_t
56997 +ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr)
56998 +{
56999 +    EP_POOL *pool = AllocatePool (&rail->ElanAllocator, addr, size, perm, attr);
57000 +    unsigned long flags;
57001 +
57002 +    if (pool == NULL)
57003 +       return (0);
57004 +
57005 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
57006 +    HashInPool (&rail->ElanAllocator, pool);
57007 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
57008 +
57009 +    return (pool->Buffer.Sdram);
57010 +}
57011 +
57012 +void
57013 +ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr)
57014 +{
57015 +    EP_POOL *pool;
57016 +    unsigned long flags;
57017 +
57018 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
57019 +    pool = LookupPool (&rail->ElanAllocator, addr);
57020 +    
57021 +    HashOutPool (&rail->ElanAllocator, pool);
57022 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
57023 +    
57024 +    FreePool (&rail->ElanAllocator, pool);
57025 +}
57026 +
57027 +sdramaddr_t
57028 +ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp)
57029 +{
57030 +    int             offset;
57031 +    EP_POOL *pool;
57032 +
57033 +    if ((pool = AllocateBlock (&rail->ElanAllocator, size, attr, &offset)) == NULL)
57034 +       return (0);
57035 +    
57036 +    *addrp  = pool->Handle.nmh_nmd.nmd_addr + offset;
57037 +
57038 +    return (pool->Buffer.Sdram + offset);
57039 +}
57040 +
57041 +void
57042 +ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size)
57043 +{
57044 +    FreeBlock (&rail->ElanAllocator, addr, size);
57045 +}
57046 +
57047 +void *
57048 +ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp)
57049 +{
57050 +    int             offset;
57051 +    EP_POOL *pool;
57052 +
57053 +    if ((pool = AllocateBlock (&rail->MainAllocator, size, attr, &offset)) == NULL)
57054 +       return (NULL);
57055 +    
57056 +    *addrp  = pool->Handle.nmh_nmd.nmd_addr + offset;
57057 +
57058 +    return ((void *) ((unsigned long) pool->Buffer.Ptr + offset));
57059 +}
57060 +
57061 +void
57062 +ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size)
57063 +{
57064 +    FreeBlock (&rail->MainAllocator, addr, size);
57065 +}
57066 +
57067 +sdramaddr_t
57068 +ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr)
57069 +{
57070 +    EP_POOL    *pool;
57071 +    sdramaddr_t res;
57072 +    unsigned long flags;
57073 +
57074 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
57075 +    if ((pool = LookupPool (&rail->ElanAllocator, addr)) == NULL)
57076 +       res = 0;
57077 +    else
57078 +       res = pool->Buffer.Sdram + (addr - pool->Handle.nmh_nmd.nmd_addr);
57079 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
57080 +
57081 +    return (res);
57082 +}
57083 +
57084 +void *
57085 +ep_elan2main (EP_RAIL *rail, EP_ADDR addr)
57086 +{
57087 +    EP_POOL *pool;
57088 +    void *res;
57089 +    unsigned long flags;
57090 +
57091 +    spin_lock_irqsave (&rail->MainAllocator.Lock, flags);
57092 +    if ((pool = LookupPool (&rail->MainAllocator, addr)) == NULL)
57093 +       res = NULL;
57094 +    else
57095 +       res = (void *) ((unsigned long) pool->Buffer.Ptr + (addr - pool->Handle.nmh_nmd.nmd_addr));
57096 +    spin_unlock_irqrestore (&rail->MainAllocator.Lock, flags);
57097 +
57098 +    return (res);
57099 +}
57100 +
57101 +/* shared allocators */
57102 +int
57103 +ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail)
57104 +{
57105 +    return (AddRail (&sys->Allocator, rail));
57106 +}
57107 +
57108 +void
57109 +ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail)
57110 +{
57111 +    RemoveRail (&sys->Allocator, rail);
57112 +}
57113 +
57114 +void
57115 +ep_shared_alloc_init (EP_SYS *sys)
57116 +{
57117 +    EP_RMAP *rmap = ep_rmallocmap (EP_SHARED_RMAP_SIZE, "shared_alloc_map", 1);
57118 +
57119 +    ep_rmfree (rmap, EP_SHARED_TOP - EP_SHARED_BASE, EP_SHARED_BASE);
57120 +
57121 +    InitialiseAllocator (&sys->Allocator, EP_ALLOC_TYPE_SHARED_MAIN, EP_PERM_WRITE, rmap);
57122 +
57123 +    INIT_LIST_HEAD (&sys->Allocator.Data.Shared.Rails);
57124 +
57125 +    sys->Allocator.Data.Shared.System = sys;
57126 +}
57127 +
57128 +void
57129 +ep_shared_alloc_fini (EP_SYS *sys)
57130 +{
57131 +    EP_RMAP *rmap = sys->Allocator.ResourceMap;
57132 +
57133 +    DestroyAllocator (&sys->Allocator);
57134 +
57135 +    ep_rmfreemap (rmap);
57136 +}
57137 +
57138 +void *
57139 +ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd)
57140 +{
57141 +    int offset;
57142 +    EP_POOL *pool;
57143 +
57144 +    if ((pool = AllocateBlock (&sys->Allocator, size, attr, &offset)) == NULL)
57145 +       return (NULL);
57146 +
57147 +    ep_nmd_subset (nmd, &pool->Handle.nmh_nmd, offset, size);
57148 +
57149 +    return ((void *) ((unsigned long) pool->Buffer.Ptr + offset));
57150 +}
57151 +
57152 +void
57153 +ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd)
57154 +{
57155 +    FreeBlock (&sys->Allocator, nmd->nmd_addr, nmd->nmd_len);
57156 +}
57157 +
57158 +/*
57159 + * Local variables:
57160 + * c-file-style: "stroustrup"
57161 + * End:
57162 + */
57163 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm.c
57164 ===================================================================
57165 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kcomm.c 2004-02-23 16:02:56.000000000 -0500
57166 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm.c      2005-07-28 14:52:52.885673112 -0400
57167 @@ -0,0 +1,1448 @@
57168 +/*
57169 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
57170 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
57171 + *
57172 + *    For licensing information please see the supplied COPYING file
57173 + *
57174 + */
57175 +
57176 +#ident "@(#)$Id: kcomm.c,v 1.50.2.9 2004/12/09 10:02:42 david Exp $"
57177 +/*      $Source: /cvs/master/quadrics/epmod/kcomm.c,v $ */
57178 +
57179 +#include <qsnet/kernel.h>
57180 +#include <qsnet/kthread.h>
57181 +
57182 +#include <elan/kcomm.h>
57183 +#include <elan/epsvc.h>
57184 +#include <elan/epcomms.h>
57185 +
57186 +#include "cm.h"
57187 +#include "debug.h"
57188 +
57189 +int MaxSwitchLevels = 5;                               /* Max 1024 sized machine */
57190 +
57191 +static char *NodeStateNames[EP_NODE_NUM_STATES] = 
57192 +{
57193 +    "Disconnected",
57194 +    "Connecting",
57195 +    "Connnected",
57196 +    "LeavingConnected",
57197 +    "LocalPassivate",
57198 +    "RemotePassivate",
57199 +    "Passivated",
57200 +    "Disconnecting",
57201 +};
57202 +
57203 +static void
57204 +ep_xid_cache_fill (EP_SYS *sys, EP_XID_CACHE *cache)
57205 +{
57206 +    unsigned long flags;
57207 +
57208 +    spin_lock_irqsave (&sys->XidLock, flags);
57209 +
57210 +    cache->Current = sys->XidNext;
57211 +    cache->Last    = cache->Current + EP_XID_CACHE_CHUNKS-1;
57212 +
57213 +    sys->XidNext += EP_XID_CACHE_CHUNKS;
57214 +
57215 +    spin_unlock_irqrestore (&sys->XidLock, flags);
57216 +}
57217 +
57218 +EP_XID
57219 +ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache)
57220 +{
57221 +    EP_XID xid;
57222 +    
57223 +    if (cache->Current == cache->Last)
57224 +       ep_xid_cache_fill (sys, cache);
57225 +
57226 +    xid.Generation = sys->XidGeneration;
57227 +    xid.Handle     = cache->Handle;
57228 +    xid.Unique     = cache->Current++;
57229 +
57230 +    return (xid);
57231 +}
57232 +
57233 +void
57234 +ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache)
57235 +{
57236 +    /* Stall manager thread - it doesn't lock the XidCacheList */
57237 +    ep_kthread_stall (&sys->ManagerThread);
57238 +
57239 +    cache->Handle = ++sys->XidHandle;
57240 +
57241 +    list_add_tail (&cache->Link, &sys->XidCacheList);
57242 +
57243 +    ep_kthread_resume (&sys->ManagerThread);
57244 +}
57245 +
57246 +void
57247 +ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache)
57248 +{
57249 +    /* Stall manager thread - it doesn't lock the XidCacheList */
57250 +    ep_kthread_stall (&sys->ManagerThread);
57251 +
57252 +    list_del (&cache->Link);
57253 +
57254 +    ep_kthread_resume (&sys->ManagerThread);
57255 +}
57256 +
57257 +EP_XID_CACHE *
57258 +ep_xid_cache_find (EP_SYS *sys, EP_XID xid)
57259 +{
57260 +    struct list_head *el;
57261 +
57262 +    list_for_each (el, &sys->XidCacheList) {
57263 +       EP_XID_CACHE *cache = list_entry (el, EP_XID_CACHE, Link);
57264 +
57265 +       if (sys->XidGeneration == xid.Generation && cache->Handle == xid.Handle)
57266 +           return (cache);
57267 +    }
57268 +
57269 +    return (NULL);
57270 +}
57271 +
57272 +static int
57273 +MsgBusy (EP_RAIL *rail, EP_OUTPUTQ *outputq, int slotNum)
57274 +{
57275 +    switch (rail->Operations.OutputQState (rail, outputq, slotNum))
57276 +    {
57277 +    case EP_OUTPUTQ_BUSY:                      /* still busy */
57278 +       return 1;
57279 +       
57280 +    case EP_OUTPUTQ_FAILED:                    /* NACKed */
57281 +    {
57282 +#if defined(DEBUG_PRINTF)
57283 +       EP_MANAGER_MSG *msg = rail->Operations.OutputQMsg (rail, outputq, slotNum);
57284 +
57285 +       EPRINTF4 (DBG_MANAGER, "%s: kcomm msg %d type %d to %d failed\n", rail->Name, slotNum, msg->Hdr.Type, msg->Hdr.DestId);
57286 +#endif
57287 +       break;
57288 +    }
57289 +    
57290 +    case EP_OUTPUTQ_FINISHED:                  /* anything else is finished */
57291 +       break;
57292 +    }
57293 +
57294 +    return 0;
57295 +}
57296 +
57297 +int
57298 +ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body)
57299 +{
57300 +    EP_SYS         *sys  = rail->System;
57301 +    EP_NODE        *node = &sys->Nodes[nodeId];
57302 +    int             n    = EP_MANAGER_OUTPUTQ_SLOTS;
57303 +    int             slotNum;
57304 +    int             rnum;
57305 +    EP_RAIL        *msgRail;
57306 +    EP_MANAGER_MSG *msg;
57307 +    unsigned long   flags;
57308 +
57309 +    ASSERT (! EP_XID_INVALID (xid));
57310 +
57311 +    if ((rnum = ep_pickRail (node->ConnectedRails)) >= 0)
57312 +       msgRail = sys->Rails[rnum];
57313 +    else
57314 +    {
57315 +       if (EP_MANAGER_MSG_TYPE_CONNECTED(type))
57316 +       {
57317 +           ep_debugf (DBG_MANAGER, "%s: no rails available, trying to send type %d to %d\n", rail->Name, type, nodeId);
57318 +           return -EHOSTDOWN;
57319 +       }
57320 +
57321 +       ep_debugf (DBG_MANAGER, "%s: no rails connected to %d - using receiving rail\n", rail->Name, nodeId);
57322 +
57323 +       msgRail = rail;
57324 +    }
57325 +    
57326 +
57327 +    spin_lock_irqsave (&msgRail->ManagerOutputQLock, flags);
57328 +
57329 +    slotNum = msgRail->ManagerOutputQNextSlot;
57330 +
57331 +    while (n-- > 0 && MsgBusy (msgRail, msgRail->ManagerOutputQ, slotNum))             /* search for idle message buffer */
57332 +    {
57333 +       if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS)
57334 +           msgRail->ManagerOutputQNextSlot = 0;
57335 +      
57336 +       slotNum = msgRail->ManagerOutputQNextSlot;
57337 +    }
57338 +
57339 +    if (n == 0)                                                        /* all message buffers busy */
57340 +    {
57341 +       spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags);
57342 +
57343 +       ep_debugf (DBG_MANAGER, "%s: all message buffers busy: trying to send type %d to %d\n", msgRail->Name, type, nodeId);
57344 +       return -EBUSY;
57345 +    }
57346 +
57347 +    msg = msgRail->Operations.OutputQMsg (msgRail, msgRail->ManagerOutputQ, slotNum);
57348 +    
57349 +    EPRINTF7 (DBG_MANAGER, "%s: ep_send_message: type=%d nodeId=%d rail=%d xid=%08x.%08x.%016llx\n", 
57350 +             msgRail->Name, type, nodeId, rail->Number, xid.Generation, xid.Handle, (long long) xid.Unique);
57351 +
57352 +    msg->Hdr.Version    = EP_MANAGER_MSG_VERSION;
57353 +    msg->Hdr.Type       = type;
57354 +    msg->Hdr.Rail       = rail->Number;
57355 +    msg->Hdr.NodeId     = msgRail->Position.pos_nodeid;
57356 +    msg->Hdr.DestId     = nodeId;
57357 +    msg->Hdr.Xid        = xid;
57358 +    msg->Hdr.Checksum   = 0;
57359 +
57360 +    if (body) bcopy (body, &msg->Body, sizeof (EP_MANAGER_MSG_BODY));
57361 +
57362 +    msg->Hdr.Checksum = CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE);
57363 +
57364 +    if (rail->Operations.OutputQSend (msgRail, msgRail->ManagerOutputQ, slotNum, EP_MANAGER_MSG_SIZE,
57365 +                                     nodeId, EP_SYSTEMQ_MANAGER, EP_MANAGER_OUTPUTQ_RETRIES) < 0)
57366 +       IncrStat (msgRail, SendMessageFailed);
57367 +    
57368 +    if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS) /* check this one last next time */
57369 +       msgRail->ManagerOutputQNextSlot = 0;
57370 +
57371 +    spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags);
57372 +
57373 +    return 0;
57374 +}
57375 +
57376 +void
57377 +ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason)
57378 +{
57379 +    EP_NODE            *node = &sys->Nodes[nodeId];
57380 +    EP_MANAGER_MSG_BODY body;
57381 +    EP_XID              xid;
57382 +    kcondvar_t          sleep;
57383 +    int                 rnum;
57384 +    unsigned long       flags;
57385 +
57386 +    if (nodeId > sys->Position.pos_nodes)
57387 +       return;
57388 +
57389 +    strncpy (body.PanicReason, reason, sizeof (body.PanicReason));
57390 +
57391 +    kcondvar_init (&sleep);
57392 +    spin_lock_irqsave (&sys->NodeLock, flags);
57393 +    for (;;)
57394 +    {
57395 +       if (node->ConnectedRails == 0)
57396 +           break;
57397 +
57398 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
57399 +           if (node->ConnectedRails & (1 << rnum))
57400 +               break;
57401 +
57402 +       xid = ep_xid_cache_alloc(sys, &sys->Rails[rnum]->XidCache);
57403 +       
57404 +       if (ep_send_message (sys->Rails[rnum], nodeId, EP_MANAGER_MSG_TYPE_REMOTE_PANIC, xid, &body) == 0)
57405 +           break;
57406 +
57407 +       if (kcondvar_timedwaitsig (&sleep, &sys->NodeLock, &flags, lbolt + hz) == CV_RET_SIGPENDING)
57408 +           break;
57409 +    }
57410 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57411 +    kcondvar_destroy (&sleep);
57412 +}
57413 +
57414 +static void
57415 +ProcessNeterrRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
57416 +{
57417 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr request - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, msg->Body.Cookies[0], msg->Body.Cookies[1]);
57418 +
57419 +    rail->Operations.NeterrFixup (rail, msg->Hdr.NodeId, msg->Body.Cookies);
57420 +    
57421 +    ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_NETERR_RESPONSE, msg->Hdr.Xid, &msg->Body);
57422 +}
57423 +
57424 +
57425 +static void
57426 +ProcessNeterrResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
57427 +{
57428 +    EP_SYS       *sys      = rail->System;
57429 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
57430 +    unsigned long flags;
57431 +
57432 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr response - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, msg->Body.Cookies[0], msg->Body.Cookies[1]);
57433 +
57434 +    spin_lock_irqsave (&sys->NodeLock, flags);
57435 +    if (EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid))
57436 +    {
57437 +       EP_INVALIDATE_XID (nodeRail->MsgXid);
57438 +
57439 +       if (nodeRail->NetworkErrorCookies[0] != 0 && nodeRail->NetworkErrorCookies[0] == msg->Body.Cookies[0])
57440 +           nodeRail->NetworkErrorCookies[0] = 0;
57441 +
57442 +       if (nodeRail->NetworkErrorCookies[1] != 0 && nodeRail->NetworkErrorCookies[1] == msg->Body.Cookies[1])
57443 +           nodeRail->NetworkErrorCookies[1] = 0;
57444 +       
57445 +       if (nodeRail->NetworkErrorCookies[0] == 0 && nodeRail->NetworkErrorCookies[1] == 0)
57446 +           nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_ATOMIC_PACKET;
57447 +    }
57448 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57449 +}
57450 +
57451 +
57452 +static void
57453 +ProcessGetNodeState (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
57454 +{
57455 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
57456 +    unsigned int service = msg->Body.Service;
57457 +
57458 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessGetNodeState: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
57459 +             NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : "");
57460 +
57461 +    msg->Body.NodeState.State             = nodeRail->State;
57462 +    msg->Body.NodeState.NetworkErrorState = nodeRail->NetworkErrorState;
57463 +    msg->Body.NodeState.Railmask          = ep_rcvr_railmask (rail->System, service);
57464 +
57465 +    if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0)
57466 +       printk ("%s: get node state for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
57467 +}
57468 +
57469 +static void
57470 +ProcessFlushRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
57471 +{
57472 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
57473 +
57474 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushRequest: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
57475 +             NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : "");
57476 +
57477 +    switch (nodeRail->State)
57478 +    {
57479 +    case EP_NODE_REMOTE_PASSIVATE:
57480 +       nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;     /* retransmit our flush request quickly */
57481 +       EPRINTF3 (DBG_MANAGER, "%s: ProcessFlushRequest: NextRunTime -> %lx (%lx)\n", rail->Name, nodeRail->NextRunTime, lbolt);
57482 +       /* DROPTHROUGH */
57483 +
57484 +    case EP_NODE_PASSIVATED:
57485 +    case EP_NODE_DISCONNECTED:
57486 +       if (nodeRail->NetworkErrorState != 0)
57487 +           break;
57488 +
57489 +       if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE, msg->Hdr.Xid, NULL) < 0)
57490 +           printk ("%s: flush request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
57491 +       break;
57492 +       
57493 +    default:
57494 +       EPRINTF4 (DBG_MANAGER, "%s: flush request for %s[%d] - node not in approriate state - %s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, NodeStateNames[nodeRail->State]);
57495 +       break;
57496 +    }
57497 +}
57498 +
57499 +static void
57500 +ProcessFlushResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
57501 +{
57502 +    EP_NODE_RAIL *nodeRail= &rail->Nodes[msg->Hdr.NodeId];
57503 +
57504 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushResponse: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
57505 +             NodeStateNames[nodeRail->State], EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid) ? " (XIDS match)" : "");
57506 +
57507 +    if (nodeRail->State == EP_NODE_REMOTE_PASSIVATE && EP_XIDS_MATCH(nodeRail->MsgXid, msg->Hdr.Xid))
57508 +    {
57509 +       EP_INVALIDATE_XID (nodeRail->MsgXid);
57510 +
57511 +       printk ("%s: flush response from %d - move to passivated list\n", rail->Name, msg->Hdr.NodeId);
57512 +       list_del (&nodeRail->Link);
57513 +
57514 +       /* Node is now passivated - attempt to failover  messages */
57515 +       list_add_tail (&nodeRail->Link, &rail->PassivatedList);
57516 +       nodeRail->State = EP_NODE_PASSIVATED;
57517 +    }
57518 +    else
57519 +    {
57520 +       printk ("%s: flush response from %d - not passivating (%s) or XIDs mismatch (%llx %llx)\n", rail->Name, 
57521 +               msg->Hdr.NodeId, NodeStateNames[nodeRail->State], (long long) nodeRail->MsgXid.Unique, (long long) msg->Hdr.Xid.Unique);
57522 +    }
57523 +}
57524 +
57525 +static void
57526 +ProcessMapNmdRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
57527 +{
57528 +    EP_SYS          *sys     = rail->System;
57529 +    EP_MAP_NMD_BODY *msgBody = &msg->Body.MapNmd;
57530 +    int              i;
57531 +
57532 +    EPRINTF4 (DBG_MANAGER, "%s: Map NMD request from %d for %d NMDs to railmask %x\n", rail->Name, msg->Hdr.NodeId, msgBody->nFrags, msgBody->Railmask);
57533 +    
57534 +    for (i = 0; i < msgBody->nFrags; i++)
57535 +       ep_nmd_map_rails (sys, &msgBody->Nmd[i], msgBody->Railmask);
57536 +    
57537 +    /* Must flush TLBs before responding */
57538 +    for (i = 0; i < EP_MAX_RAILS; i++)
57539 +       if (sys->Rails[i] && sys->Rails[i]->TlbFlushRequired)
57540 +           ep_perrail_dvma_sync (sys->Rails[i]);
57541 +
57542 +    if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0)
57543 +       printk ("%s: map nmd request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
57544 +}
57545 +
57546 +static void
57547 +ProcessXidMessage (EP_RAIL *msgRail, EP_MANAGER_MSG *msg, EP_XID xid)
57548 +{
57549 +    EP_XID_CACHE *xidCache = ep_xid_cache_find (msgRail->System, xid);
57550 +
57551 +    EPRINTF6 (DBG_MANAGER, "%s: ProcessXidMessage: XID=%08x.%0x8.%016llx -> %p(%p)\n",
57552 +             msgRail->Name, xid.Generation, xid.Handle, (long long) xid.Unique,
57553 +             xidCache  ? xidCache->MessageHandler : 0, xidCache  ? xidCache->Arg : 0);
57554 +    
57555 +    if (xidCache != NULL)
57556 +       xidCache->MessageHandler (xidCache->Arg, msg);
57557 +}
57558 +
57559 +static void
57560 +ProcessMessage (EP_RAIL *msgRail, void *arg, void *msgbuf)
57561 +{
57562 +    EP_SYS         *sys  = msgRail->System;
57563 +    EP_MANAGER_MSG *msg  = (EP_MANAGER_MSG *) msgbuf;
57564 +    uint16_t        csum = msg->Hdr.Checksum;
57565 +    EP_RAIL        *rail;
57566 +
57567 +    if (msg->Hdr.Version != EP_MANAGER_MSG_VERSION)
57568 +       return;
57569 +
57570 +    msg->Hdr.Checksum= 0;
57571 +    if (CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE) != csum)
57572 +    {
57573 +       printk ("%s: checksum failed on msg from %d (%d) (%x != %x) ?\n", msgRail->Name, msg->Hdr.NodeId, msg->Hdr.Type, csum, CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE));
57574 +       return;
57575 +    }
57576 +
57577 +    if ((rail = sys->Rails[msg->Hdr.Rail]) == NULL)
57578 +    {
57579 +       printk ("%s: rail no longer exists for msg from %d?\n", msgRail->Name, msg->Hdr.NodeId);
57580 +       return;
57581 +    }
57582 +
57583 +    EPRINTF7 (DBG_MANAGER, "%s: ProcessMessage (%s) type=%d node=%d XID=%08x.%0x8.%016llx\n", 
57584 +             msgRail->Name, rail->Name, msg->Hdr.Type, msg->Hdr.NodeId,
57585 +             msg->Hdr.Xid.Generation, msg->Hdr.Xid.Handle, msg->Hdr.Xid.Unique);
57586 +
57587 +    switch (msg->Hdr.Type)
57588 +    {
57589 +    case EP_MANAGER_MSG_TYPE_REMOTE_PANIC:
57590 +       msg->Body.PanicReason[EP_PANIC_STRLEN] = '\0';          /* ensure string terminated */
57591 +
57592 +       printk ("%s: remote panic call from elan node %d - %s\n", msgRail->Name, msg->Hdr.NodeId, msg->Body.PanicReason);
57593 +       panic ("ep: remote panic request\n");
57594 +       break;
57595 +
57596 +    case EP_MANAGER_MSG_TYPE_NETERR_REQUEST:
57597 +       ProcessNeterrRequest (msgRail, rail, msg);
57598 +       break;
57599 +
57600 +    case EP_MANAGER_MSG_TYPE_NETERR_RESPONSE:
57601 +       ProcessNeterrResponse (msgRail, rail, msg);
57602 +       break;
57603 +
57604 +    case EP_MANAGER_MSG_TYPE_FLUSH_REQUEST:
57605 +       ProcessFlushRequest (msgRail, rail, msg);
57606 +       break;
57607 +
57608 +    case EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE:
57609 +       ProcessFlushResponse (msgRail, rail, msg);
57610 +       break;
57611 +
57612 +    case EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST:
57613 +       ProcessMapNmdRequest (msgRail, rail, msg);
57614 +       break;
57615 +
57616 +    case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE:
57617 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
57618 +       break;
57619 +
57620 +    case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST:
57621 +       ProcessXidMessage (msgRail, msg, msg->Body.Failover.Xid);
57622 +       break;
57623 +
57624 +    case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE:
57625 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
57626 +       break;
57627 +       
57628 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE:
57629 +       ProcessGetNodeState (msgRail, rail, msg);
57630 +       break;
57631 +
57632 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: 
57633 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
57634 +       break;
57635 +
57636 +    default:
57637 +       printk ("%s: Unknown message type %d from %d\n", msgRail->Name, msg->Hdr.Type, msg->Hdr.NodeId);
57638 +       break;
57639 +    }
57640 +}
57641 +
57642 +
57643 +static void
57644 +ManagerQueueEvent (EP_RAIL *rail, void *arg)
57645 +{
57646 +    ep_kthread_schedule ((EP_KTHREAD *) arg, lbolt);
57647 +}
57648 +
57649 +void
57650 +UpdateConnectionState (EP_RAIL *rail, statemap_t *map)
57651 +{
57652 +    EP_SYS *sys = rail->System;
57653 +    bitmap_t seg;
57654 +    int offset, nodeId;
57655 +    unsigned long flags;
57656 +    
57657 +    while ((offset = statemap_findchange (map, &seg, 1)) >= 0)
57658 +    {
57659 +       for (nodeId = offset; nodeId < (offset + BT_NBIPUL) && nodeId < rail->Position.pos_nodes; nodeId++)
57660 +       {
57661 +           EP_NODE      *node     = &sys->Nodes[nodeId];
57662 +           EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId];
57663 +
57664 +           if (statemap_getbits (map, nodeId, 1))
57665 +           {
57666 +               spin_lock_irqsave (&sys->NodeLock, flags);
57667 +
57668 +               switch (nodeRail->State)
57669 +               {
57670 +               case EP_NODE_DISCONNECTED:
57671 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnected \n", rail->Name, nodeId);
57672 +                   break;
57673 +
57674 +               case EP_NODE_CONNECTING:
57675 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Connect\n", rail->Name, nodeId);
57676 +                   
57677 +                   /* load the route table entry *before*  setting the state
57678 +                    * to connected, since DMA's can be initiated as soon as
57679 +                    * the node is marked as connected */
57680 +                   rail->Operations.LoadNodeRoute (rail, nodeId);
57681 +                   
57682 +                   nodeRail->State = EP_NODE_CONNECTED;
57683 +                   
57684 +                   statemap_setbits (rail->NodeSet, nodeId, 1, 1);
57685 +                   if (statemap_getbits (sys->NodeSet, nodeId, 1) == 0)
57686 +                       statemap_setbits (sys->NodeSet, nodeId, 1, 1);
57687 +
57688 +                   /* Add to rails connected to this node */
57689 +                   node->ConnectedRails |= (1 << rail->Number);
57690 +
57691 +                   /* Finally lower the per-node context filter */
57692 +                   rail->Operations.LowerFilter (rail, nodeId);
57693 +                   break;
57694 +                   
57695 +               case EP_NODE_LEAVING_CONNECTED:
57696 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Local Passivate\n", rail->Name, nodeId);
57697 +                   
57698 +                   /* Raise the per-node context filter */
57699 +                   rail->Operations.RaiseFilter (rail, nodeId);
57700 +
57701 +                   /* If it's resolving network errors it will be on the NodeNeterrList,
57702 +                    * remove if from this list before placing it on the LocalPassivateList
57703 +                    * as we'll resolve the network error later in RemotePassivate */
57704 +                   if (nodeRail->NetworkErrorState)
57705 +                       list_del (&nodeRail->Link);
57706 +
57707 +                   list_add_tail (&nodeRail->Link, &rail->LocalPassivateList);
57708 +                   nodeRail->State = EP_NODE_LOCAL_PASSIVATE;
57709 +
57710 +                   /* Remove from rails connected to this node */
57711 +                   node->ConnectedRails &= ~(1 << rail->Number);
57712 +                   break;
57713 +
57714 +               default:
57715 +                   printk ("%s: Node %d - in NodeChangeMap with state %d\n", rail->Name, nodeId, nodeRail->State);
57716 +                   panic ("Node in NodeChangeMap with invalid state\n");
57717 +                   break;
57718 +               }
57719 +               spin_unlock_irqrestore (&sys->NodeLock, flags);
57720 +           }
57721 +       }
57722 +    }
57723 +}
57724 +
57725 +void
57726 +ProgressNetworkError (EP_RAIL *rail, EP_NODE_RAIL *nodeRail)
57727 +{
57728 +    EP_SYS             *sys    = rail->System;
57729 +    int                 nodeId = nodeRail - rail->Nodes;
57730 +    EP_MANAGER_MSG_BODY msg;
57731 +
57732 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_REMOTE_PASSIVATE);
57733 +
57734 +    if (BEFORE (lbolt, nodeRail->NextRunTime))
57735 +       return;
57736 +
57737 +    if (nodeRail->NetworkErrorState & EP_NODE_NETERR_DMA_PACKET)
57738 +       nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_DMA_PACKET;
57739 +    
57740 +    if (nodeRail->NetworkErrorState & EP_NODE_NETERR_ATOMIC_PACKET)
57741 +    {
57742 +       if (EP_XID_INVALID (nodeRail->MsgXid))
57743 +           nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache);
57744 +
57745 +       msg.Cookies[0] = nodeRail->NetworkErrorCookies[0];
57746 +       msg.Cookies[1] = nodeRail->NetworkErrorCookies[1];
57747 +       
57748 +       EPRINTF4 (DBG_NETWORK_ERROR, "%s: progress neterr - node %d cookies %llx %llx\n", rail->Name, nodeId, msg.Cookies[0], msg.Cookies[1]);
57749 +
57750 +       if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_NETERR_REQUEST, nodeRail->MsgXid, &msg) == 0)
57751 +           nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
57752 +       else
57753 +           nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
57754 +    }
57755 +}
57756 +
57757 +long
57758 +ProgressNodeLists (EP_RAIL *rail, long nextRunTime)
57759 +{
57760 +    EP_SYS           *sys = rail->System;
57761 +    struct list_head *el, *nel;
57762 +    unsigned long flags;
57763 +
57764 +    spin_lock_irqsave (&sys->NodeLock, flags);
57765 +    list_for_each_safe (el, nel, &rail->NetworkErrorList) {
57766 +       EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link);
57767 +       int           nodeId   = nodeRail - rail->Nodes;
57768 +
57769 +       ProgressNetworkError (rail, nodeRail);
57770 +
57771 +       if (nodeRail->NetworkErrorState == 0)
57772 +       {
57773 +           EPRINTF2 (DBG_NETWORK_ERROR, "%s: lower context filter for node %d due to network error\n", rail->Name, nodeId);
57774 +           printk ("%s: lower context filter for node %d due to network error\n", rail->Name, nodeId);
57775 +
57776 +           rail->Operations.LowerFilter (rail, nodeId);
57777 +
57778 +           list_del (&nodeRail->Link);
57779 +           continue;
57780 +       }
57781 +       
57782 +       if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
57783 +           nextRunTime = nodeRail->NextRunTime;
57784 +    }
57785 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57786 +
57787 +    if (! list_empty (&rail->LocalPassivateList))
57788 +    {
57789 +       EPRINTF1 (DBG_MANAGER, "%s: Locally Passivating Nodes\n", rail->Name);
57790 +       
57791 +       /* We have disconnected from some nodes or have left ourselves
57792 +        * flush through all communications and determine whether we
57793 +        * need to perform rail failover */
57794 +       rail->Operations.FlushFilters (rail);
57795 +       
57796 +       ep_call_callbacks (rail, EP_CB_FLUSH_FILTERING, rail->NodeSet);
57797 +
57798 +       rail->Operations.FlushQueues (rail);
57799 +
57800 +       ep_call_callbacks (rail, EP_CB_FLUSH_FLUSHING, rail->NodeSet);
57801 +
57802 +       while (! list_empty (&rail->LocalPassivateList))
57803 +       {
57804 +           EP_NODE_RAIL *nodeRail = list_entry (rail->LocalPassivateList.next, EP_NODE_RAIL, Link);
57805 +           int           nodeId   = nodeRail - rail->Nodes;
57806 +
57807 +           list_del (&nodeRail->Link);
57808 +
57809 +           rail->Operations.UnloadNodeRoute (rail, nodeId);
57810 +           
57811 +           if (nodeRail->NetworkErrorState == 0 && nodeRail->MessageState == 0)
57812 +           {
57813 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnecting\n", rail->Name, nodeId);
57814 +
57815 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57816 +               nodeRail->State = EP_NODE_DISCONNECTING;
57817 +           }
57818 +           else
57819 +           {
57820 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Remote Passivate\n", rail->Name, nodeId);
57821 +
57822 +               list_add_tail (&nodeRail->Link, &rail->RemotePassivateList);
57823 +               nodeRail->State = EP_NODE_REMOTE_PASSIVATE;
57824 +
57825 +               if (nodeRail->NetworkErrorState == 0)
57826 +                   nodeRail->NextRunTime = lbolt;
57827 +           }
57828 +       }
57829 +
57830 +       ep_call_callbacks (rail, EP_CB_PASSIVATED, rail->NodeSet);
57831 +    }
57832 +
57833 +    list_for_each_safe (el, nel, &rail->RemotePassivateList) {
57834 +       EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link);
57835 +       int           nodeId   = nodeRail - rail->Nodes;
57836 +       EP_NODE      *node     = &sys->Nodes[nodeId];
57837 +
57838 +       if (node->ConnectedRails == 0)                          /* no rails connected to this node (anymore) */
57839 +       {
57840 +           /*  Remove from this list */
57841 +           list_del (&nodeRail->Link);
57842 +
57843 +           EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Remote Passivate -> Disconnecting\n", rail->Name, nodeId);
57844 +
57845 +           /* transition towards disconnected */
57846 +           list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57847 +           nodeRail->State = EP_NODE_DISCONNECTING;
57848 +           continue;
57849 +       }
57850 +
57851 +       EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n",
57852 +                 rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState,
57853 +                 nodeRail->NextRunTime, nextRunTime);
57854 +
57855 +       if (nodeRail->NetworkErrorState)
57856 +       {
57857 +           ProgressNetworkError (rail, nodeRail);
57858 +       }
57859 +       else if (! BEFORE (lbolt, nodeRail->NextRunTime))
57860 +       {
57861 +           if (EP_XID_INVALID (nodeRail->MsgXid))
57862 +               nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache);
57863 +
57864 +           if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_FLUSH_REQUEST, nodeRail->MsgXid, NULL) == 0)
57865 +               nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
57866 +           else
57867 +               nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
57868 +       }
57869 +
57870 +       if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
57871 +           nextRunTime = nodeRail->NextRunTime;
57872 +    }
57873 +    
57874 +    if (! list_empty (&rail->PassivatedList)) 
57875 +    {
57876 +       ep_call_callbacks (rail, EP_CB_FAILOVER, rail->NodeSet);
57877 +
57878 +       list_for_each_safe (el, nel, &rail->PassivatedList) {
57879 +           EP_NODE_RAIL *nodeRail = list_entry (rail->PassivatedList.next, EP_NODE_RAIL, Link);
57880 +           int           nodeId   = nodeRail - rail->Nodes;
57881 +           EP_NODE      *node     = &sys->Nodes[nodeId];
57882 +
57883 +           ASSERT (nodeRail->NetworkErrorState == 0);
57884 +
57885 +           if (node->ConnectedRails == 0)
57886 +           {
57887 +               /*  Remove from this list */
57888 +               list_del (&nodeRail->Link);
57889 +
57890 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Passivated -> Disconnecting\n", rail->Name, nodeId);
57891 +
57892 +               /* transition towards disconnected */
57893 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57894 +               nodeRail->State = EP_NODE_DISCONNECTING;
57895 +               continue;
57896 +           }
57897 +           
57898 +           EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n",
57899 +                     rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState,
57900 +                     nodeRail->NextRunTime, nextRunTime);
57901 +
57902 +           if (nodeRail->MessageState == 0)
57903 +           {
57904 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d, no messages, Passivated -> Disconnecting\n", rail->Name,nodeId);
57905 +
57906 +               list_del (&nodeRail->Link);
57907 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
57908 +               nodeRail->State = EP_NODE_DISCONNECTING;
57909 +               continue;
57910 +           }
57911 +
57912 +           nodeRail->MessageState = 0;
57913 +           nodeRail->NextRunTime  = lbolt + FAILOVER_RETRY_TIME;
57914 +
57915 +           if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
57916 +               nextRunTime = nodeRail->NextRunTime;
57917 +       }
57918 +    }
57919 +
57920 +    if (! list_empty (&rail->DisconnectingList))
57921 +    {
57922 +       ep_call_callbacks (rail, EP_CB_DISCONNECTING, rail->NodeSet);
57923 +
57924 +       while (! list_empty (&rail->DisconnectingList))
57925 +       {
57926 +           EP_NODE_RAIL *nodeRail = list_entry (rail->DisconnectingList.next, EP_NODE_RAIL, Link);
57927 +           int           nodeId   = nodeRail - rail->Nodes;
57928 +           EP_NODE      *node     = &sys->Nodes[nodeId];
57929 +
57930 +           EPRINTF2 (DBG_MANAGER, "%s: Node %d, Disconnecting -> Disconnected\n", rail->Name, nodeId);
57931 +
57932 +           list_del (&nodeRail->Link);
57933 +
57934 +           rail->Operations.NodeDisconnected (rail, nodeId);
57935 +
57936 +           /* Clear the network error state */
57937 +           nodeRail->NextRunTime            = 0;
57938 +           nodeRail->NetworkErrorState      = 0;
57939 +           nodeRail->NetworkErrorCookies[0] = 0;
57940 +           nodeRail->NetworkErrorCookies[1] = 0;
57941 +
57942 +           /* Clear the message state */
57943 +           nodeRail->MessageState = 0;
57944 +
57945 +           cm_node_disconnected (rail, nodeId);
57946 +
57947 +           nodeRail->State = EP_NODE_DISCONNECTED;
57948 +           
57949 +           statemap_setbits (rail->NodeSet, nodeId, 0, 1);
57950 +
57951 +           if (node->ConnectedRails == 0)
57952 +               statemap_setbits (sys->NodeSet, nodeId, 0, 1);
57953 +       }
57954 +
57955 +       ep_call_callbacks (rail, EP_CB_DISCONNECTED, rail->NodeSet);
57956 +    }
57957 +
57958 +    return (nextRunTime);
57959 +}
57960 +
57961 +void
57962 +DisplayNodes (EP_RAIL *rail)
57963 +{
57964 +    EP_SYS *sys = rail->System;
57965 +    int i, state, count;
57966 +    unsigned long flags;
57967 +
57968 +    spin_lock_irqsave (&sys->NodeLock, flags);
57969 +
57970 +    for (state = 0; state < EP_NODE_NUM_STATES; state++)
57971 +    {
57972 +       for (count = i = 0; i < rail->Position.pos_nodes; i++)
57973 +       {
57974 +           ASSERT (rail->Nodes[i].State < EP_NODE_NUM_STATES);
57975 +
57976 +           if (rail->Nodes[i].State == state)
57977 +               if (state != EP_NODE_DISCONNECTED)
57978 +                   printk ("%s %d", !count++ ? NodeStateNames[state] : "", i);
57979 +       }
57980 +       if (count)
57981 +           printk ("%s (%d total)\n", state == EP_NODE_DISCONNECTED ? NodeStateNames[state] : "", count);
57982 +    }
57983 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
57984 +}
57985 +
57986 +static void
57987 +PositionFound (EP_RAIL *rail, ELAN_POSITION *pos)
57988 +{
57989 +    EP_SYS           *sys = rail->System;
57990 +    struct list_head *el;
57991 +    int i;
57992 +
57993 +    /* only called from the ep_managage whilst rail->State == EP_RAIL_STATE_STARTED */
57994 +    ASSERT ( rail->State == EP_RAIL_STATE_STARTED );
57995 +
57996 +#if defined(PER_CPU_TIMEOUT)
57997 +    /*
57998 +     * On Tru64 - if we're running in a "funnelled" thread, then we will be 
57999 +     * unable to start the per-cpu timeouts, so if we return then eventually
58000 +     * the ep_manager() thread will find the network position and we're
58001 +     * in control of our own destiny.
58002 +     */
58003 +    if (THREAD_IS_FUNNELED(current_thread()))
58004 +    {
58005 +       ep_kthread_schedule (&sys->ManagerThread, lbolt);
58006 +       return;
58007 +    }
58008 +#endif
58009 +
58010 +    sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid);
58011 +
58012 +    if (pos->pos_levels > MaxSwitchLevels)
58013 +    {
58014 +       for (i = 0; i < (pos->pos_levels - MaxSwitchLevels); i++)
58015 +           pos->pos_nodes /= pos->pos_arity[i];
58016 +
58017 +       for (i = 0; i < MaxSwitchLevels; i++)
58018 +           pos->pos_arity[i] = pos->pos_arity[i + (pos->pos_levels - MaxSwitchLevels)];
58019 +
58020 +       pos->pos_levels = MaxSwitchLevels;
58021 +       pos->pos_nodeid = pos->pos_nodeid % pos->pos_nodes;
58022 +                               
58023 +       printk ("%s: limiting switch levels to %d\n", rail->Name, MaxSwitchLevels);
58024 +       printk ("%s: nodeid=%d level=%d numnodes=%d\n", rail->Name, pos->pos_nodeid, pos->pos_levels, pos->pos_nodes);
58025 +
58026 +       sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid);
58027 +    }
58028 +
58029 +    if (rail->Position.pos_mode != ELAN_POS_UNKNOWN && rail->Position.pos_nodeid != pos->pos_nodeid)
58030 +    {
58031 +       printk ("%s: NodeId has changed from %d to %d\n", rail->Name, rail->Position.pos_nodeid, pos->pos_nodeid);
58032 +       panic ("ep: PositionFound: NodeId has changed\n");
58033 +    }
58034 +
58035 +    if (sys->Position.pos_mode != ELAN_POS_UNKNOWN && (sys->Position.pos_nodeid != pos->pos_nodeid || sys->Position.pos_nodes != pos->pos_nodes))
58036 +    {
58037 +       printk ("%s: position incompatible - disabling rail\n", rail->Name);
58038 +       rail->State = EP_RAIL_STATE_INCOMPATIBLE;
58039 +       return;
58040 +    }
58041 +
58042 +    if (sys->Position.pos_mode == ELAN_POS_UNKNOWN)
58043 +    {
58044 +       sys->Position = *pos;
58045 +       sys->NodeSet  = statemap_create (pos->pos_nodes);
58046 +       KMEM_ZALLOC (sys->Nodes, EP_NODE *, pos->pos_nodes * sizeof (EP_NODE), 1);
58047 +    }
58048 +
58049 +    rail->Position             = *pos;
58050 +    rail->SwitchBroadcastLevel = pos->pos_levels - 1;
58051 +    rail->State                = EP_RAIL_STATE_RUNNING;
58052 +
58053 +    for (i = 0; i < pos->pos_levels; i++)
58054 +    {
58055 +       rail->SwitchProbeTick[i]   = lbolt;
58056 +       rail->SwitchLast[i].uplink = 4;
58057 +    }
58058 +
58059 +    rail->Operations.PositionFound (rail, pos);
58060 +
58061 +    INIT_LIST_HEAD (&rail->NetworkErrorList);
58062 +    INIT_LIST_HEAD (&rail->LocalPassivateList);
58063 +    INIT_LIST_HEAD (&rail->RemotePassivateList);
58064 +    INIT_LIST_HEAD (&rail->PassivatedList);
58065 +    INIT_LIST_HEAD (&rail->DisconnectingList);
58066 +
58067 +    rail->NodeSet       = statemap_create (rail->Position.pos_nodes);
58068 +    rail->NodeChangeMap = statemap_create (rail->Position.pos_nodes);
58069 +    rail->NodeChangeTmp = statemap_create (rail->Position.pos_nodes);
58070 +
58071 +    KMEM_ZALLOC (rail->Nodes, EP_NODE_RAIL *, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL), 1);
58072 +
58073 +    for (i = 0; i < rail->Position.pos_nodes; i++)
58074 +    {
58075 +       spin_lock_init (&rail->Nodes[i].CookieLock);
58076 +
58077 +       INIT_LIST_HEAD (&rail->Nodes[i].StalledDmas);
58078 +
58079 +       rail->Nodes[i].State = EP_NODE_DISCONNECTED;
58080 +    }
58081 +
58082 +    /* Notify all subsystems that a new rail has been enabled */
58083 +    kmutex_lock (&sys->SubsysLock);
58084 +    list_for_each (el, &sys->Subsystems) { 
58085 +       EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
58086 +
58087 +       if (subsys->AddRail)
58088 +           subsys->AddRail (subsys, sys, rail);
58089 +
58090 +       /* XXXX: what to do if the subsystem refused to add the rail ? */
58091 +    }
58092 +    kmutex_unlock (&sys->SubsysLock);
58093 +
58094 +    /* Now enable the manager input queue */
58095 +    ep_enable_inputq (rail, rail->ManagerInputQ);
58096 +}
58097 +
58098 +static void
58099 +ep_manager (void *arg)
58100 +{
58101 +    EP_SYS            *sys = (EP_SYS *) arg;
58102 +    struct list_head *el;
58103 +    ELAN_POSITION     pos;
58104 +    unsigned long     flags;
58105 +
58106 +    kernel_thread_init ("ep_manager");
58107 +    kernel_thread_become_highpri();
58108 +
58109 +    for (;;)
58110 +    {
58111 +       long nextRunTime = lbolt + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT);
58112 +
58113 +       list_for_each (el, &sys->ManagedRails) {
58114 +           EP_RAIL *rail = list_entry (el, EP_RAIL, ManagerLink);
58115 +
58116 +           switch (rail->State)
58117 +           {
58118 +           case EP_RAIL_STATE_STARTED:
58119 +               if (ProbeNetwork (rail, &pos) == 0)
58120 +               {
58121 +                   PositionFound (rail, &pos);
58122 +                   break;
58123 +               }
58124 +
58125 +               if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + HZ))
58126 +                   nextRunTime = lbolt + HZ;
58127 +               break;
58128 +
58129 +           case EP_RAIL_STATE_RUNNING:
58130 +               if (ep_poll_inputq (rail, rail->ManagerInputQ, 100, ProcessMessage, rail) >= 100)
58131 +                   nextRunTime = lbolt;
58132 +               
58133 +               /* Handle any nodes which the cluster membership subsystem
58134 +                * has indicated are to begin connecting or disconnecting */
58135 +               spin_lock_irqsave (&sys->NodeLock, flags);
58136 +               if (! statemap_changed (rail->NodeChangeMap))
58137 +                   spin_unlock_irqrestore (&sys->NodeLock, flags);
58138 +               else
58139 +               {
58140 +                   /*
58141 +                    * Take a copy of the statemap, and zero all entries so
58142 +                    * we only see new requests next time
58143 +                    */
58144 +                   statemap_copy (rail->NodeChangeTmp, rail->NodeChangeMap);
58145 +                   statemap_zero (rail->NodeChangeMap);
58146 +                   spin_unlock_irqrestore (&sys->NodeLock, flags);
58147 +                   
58148 +                   UpdateConnectionState (rail, rail->NodeChangeTmp);
58149 +               }
58150 +
58151 +               nextRunTime = ProgressNodeLists (rail, nextRunTime);
58152 +
58153 +               if (statemap_changed (rail->NodeSet))
58154 +               {
58155 +                   ep_call_callbacks (rail, EP_CB_NODESET, rail->NodeSet);
58156 +
58157 +                   statemap_clearchanges (rail->NodeSet);
58158 +               }
58159 +               break;
58160 +
58161 +           case EP_RAIL_STATE_INCOMPATIBLE:
58162 +               break;
58163 +           }
58164 +       }
58165 +
58166 +
58167 +       EPRINTF5 (DBG_MANAGER, "ep_manager: sleep now=%lx nextRunTime=%lx (%ld) [%lx (%ld)]\n",
58168 +                 lbolt, nextRunTime, nextRunTime ? nextRunTime - lbolt : 0, sys->ManagerThread.next_run,
58169 +                 sys->ManagerThread.next_run ? sys->ManagerThread.next_run - lbolt : 0);
58170 +
58171 +       if (ep_kthread_sleep (&sys->ManagerThread, nextRunTime) < 0)
58172 +           break;
58173 +    }
58174 +
58175 +    ep_kthread_stopped (&sys->ManagerThread);
58176 +    kernel_thread_exit();
58177 +}
58178 +
58179 +void
58180 +ep_connect_node (EP_RAIL *rail, int nodeId)
58181 +{
58182 +    EP_SYS       *sys  = rail->System;
58183 +    EP_NODE_RAIL *node = &rail->Nodes[nodeId];
58184 +    unsigned long flags;
58185 +  
58186 +    spin_lock_irqsave (&sys->NodeLock, flags);
58187 +
58188 +    EPRINTF2 (DBG_MANAGER, "%s: ep_connect_node: nodeId %d\n", rail->Name, nodeId);
58189 +
58190 +    ASSERT (node->State == EP_NODE_DISCONNECTED && statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0);
58191 +    
58192 +    node->State = EP_NODE_CONNECTING;
58193 +
58194 +    statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1);
58195 +
58196 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
58197 +
58198 +    ep_kthread_schedule (&sys->ManagerThread, lbolt);
58199 +}
58200 +
58201 +int
58202 +ep_disconnect_node (EP_RAIL *rail, int nodeId)
58203 +{
58204 +    EP_SYS       *sys  = rail->System;
58205 +    EP_NODE_RAIL *node = &rail->Nodes[nodeId];
58206 +    int                  state;
58207 +    unsigned long flags;
58208 +  
58209 +    spin_lock_irqsave (&sys->NodeLock, flags);
58210 +    
58211 +    EPRINTF3 (DBG_MANAGER, "%s: ep_disconnect_node: nodeId %d - %s\n", rail->Name, nodeId, NodeStateNames[node->State]);
58212 +
58213 +    switch (state = node->State)
58214 +    {
58215 +    case EP_NODE_CONNECTING:
58216 +       statemap_setbits (rail->NodeChangeMap, nodeId, 0, 1);
58217 +
58218 +       node->State = EP_NODE_DISCONNECTED;
58219 +       break;
58220 +       
58221 +    case EP_NODE_CONNECTED:
58222 +       statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1);
58223 +
58224 +       node->State = EP_NODE_LEAVING_CONNECTED;
58225 +       break;
58226 +
58227 +    case EP_NODE_LEAVING_CONNECTED:
58228 +       /* no assert on NodeChangeMap as the map could have been taken but not acted on */
58229 +       break;
58230 +       
58231 +    default:
58232 +       ASSERT (statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0);
58233 +       break;
58234 +    }
58235 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
58236 +
58237 +    if (state == EP_NODE_CONNECTED)
58238 +       ep_kthread_schedule (&sys->ManagerThread, lbolt);
58239 +
58240 +    return state;
58241 +}
58242 +
58243 +int
58244 +ep_manager_add_rail (EP_SYS *sys, EP_RAIL *rail)
58245 +{
58246 +    if ((rail->ManagerOutputQ = ep_alloc_outputq (rail, EP_MANAGER_MSG_SIZE, EP_MANAGER_OUTPUTQ_SLOTS)) == NULL)
58247 +       return -ENOMEM;
58248 +
58249 +    if ((rail->ManagerInputQ = ep_alloc_inputq (rail, EP_SYSTEMQ_MANAGER, EP_MANAGER_MSG_SIZE, EP_MANAGER_INPUTQ_SLOTS,
58250 +                                                ManagerQueueEvent, &sys->ManagerThread)) == NULL)
58251 +    {
58252 +       ep_free_outputq (rail, rail->ManagerOutputQ);
58253 +       return -ENOMEM;
58254 +    }
58255 +
58256 +    spin_lock_init (&rail->ManagerOutputQLock);
58257 +
58258 +    ep_xid_cache_init (sys, &rail->XidCache);
58259 +
58260 +    ep_kthread_stall (&sys->ManagerThread);
58261 +    list_add_tail (&rail->ManagerLink, &sys->ManagedRails);
58262 +    ep_kthread_resume (&sys->ManagerThread);
58263 +
58264 +    return (0);
58265 +}
58266 +
58267 +void
58268 +ep_manager_remove_rail (EP_SYS *sys, EP_RAIL *rail)
58269 +{
58270 +    if (rail->ManagerInputQ != NULL)
58271 +    {
58272 +       ep_kthread_stall (&sys->ManagerThread);
58273 +       list_del (&rail->ManagerLink);
58274 +       ep_kthread_resume (&sys->ManagerThread);
58275 +
58276 +       ep_xid_cache_destroy (sys, &rail->XidCache);
58277 +
58278 +       spin_lock_destroy (&rail->ManagerOutputQLock);
58279 +
58280 +       ep_disable_inputq (rail, rail->ManagerInputQ);
58281 +       ep_free_inputq (rail, rail->ManagerInputQ);
58282 +       ep_free_outputq (rail, rail->ManagerOutputQ);
58283 +    }
58284 +}
58285 +
58286 +int
58287 +ep_manager_init (EP_SYS *sys)
58288 +{
58289 +    INIT_LIST_HEAD (&sys->ManagedRails);
58290 +
58291 +    ep_kthread_init (&sys->ManagerThread);
58292 +
58293 +    if (kernel_thread_create (ep_manager, (void *) sys) == 0)
58294 +       return (ENOMEM);
58295 +    
58296 +    ep_kthread_started (&sys->ManagerThread);
58297 +    
58298 +    return (0);
58299 +}
58300 +
58301 +void
58302 +ep_manager_fini (EP_SYS *sys)
58303 +{
58304 +    ep_kthread_stop (&sys->ManagerThread);
58305 +    ep_kthread_destroy (&sys->ManagerThread);
58306 +}
58307 +
58308 +int
58309 +ep_sys_init (EP_SYS *sys)
58310 +{
58311 +    kmutex_init (&sys->SubsysLock);   
58312 +    kmutex_init (&sys->StartStopLock);
58313 +    spin_lock_init (&sys->NodeLock);
58314 +
58315 +    INIT_LIST_HEAD (&sys->Subsystems);
58316 +
58317 +    /* initialise the xid allocators */
58318 +    spin_lock_init (&sys->XidLock);
58319 +    INIT_LIST_HEAD (&sys->XidCacheList);
58320 +
58321 +    /* initially don't know where we are in the network */
58322 +    sys->Position.pos_mode = ELAN_POS_UNKNOWN;
58323 +
58324 +    /* initialise the network mapping descriptor hash tables */
58325 +    ep_nmh_init (&sys->MappingTable);
58326 +
58327 +    /* intialise the shared allocators */
58328 +    ep_shared_alloc_init (sys);
58329 +
58330 +    /* initialise the dvma space */
58331 +    ep_dvma_init (sys);
58332 +
58333 +    /* intiialise the rail manager */
58334 +    ep_manager_init (sys);
58335 +
58336 +    /* initialise all subsystems */
58337 +    cm_init (sys);
58338 +    ep_comms_init (sys);
58339 +    //ep_msgsys_init (sys);
58340 +
58341 +    return (0);
58342 +}
58343 +
58344 +void
58345 +ep_sys_fini (EP_SYS *sys)
58346 +{
58347 +    /* Destroy the subsystems in the reverse order to their creation */
58348 +    while (! list_empty (&sys->Subsystems))
58349 +    {
58350 +       EP_SUBSYS *subsys = list_entry (sys->Subsystems.prev, EP_SUBSYS, Link);
58351 +
58352 +       list_del (&subsys->Link);
58353 +       
58354 +       subsys->Destroy (subsys, sys);
58355 +    }
58356 +
58357 +    ep_manager_fini(sys);
58358 +    ep_dvma_fini (sys);
58359 +    ep_shared_alloc_fini (sys);
58360 +
58361 +    ep_nmh_fini (&sys->MappingTable);
58362 +
58363 +    if (sys->Position.pos_mode != ELAN_POS_UNKNOWN) {
58364 +       statemap_destroy (sys->NodeSet);
58365 +       KMEM_FREE(sys->Nodes, sys->Position.pos_nodes * sizeof (EP_NODE));
58366 +    }
58367 +
58368 +    spin_lock_destroy (&sys->XidLock);
58369 +
58370 +    spin_lock_destroy (&sys->NodeLock);
58371 +    kmutex_destroy (&sys->SubsysLock);
58372 +    kmutex_destroy (&sys->StartStopLock);
58373 +}
58374 +
58375 +void
58376 +ep_shutdown (EP_SYS *sys)
58377 +{
58378 +    sys->Shutdown = 1;
58379 +}
58380 +
58381 +int
58382 +ep_init_rail (EP_SYS *sys, EP_RAIL *rail)
58383 +{
58384 +    static int rnum;
58385 +
58386 +    rail->System              = sys;
58387 +    rail->State               = EP_RAIL_STATE_UNINITIALISED;
58388 +    rail->Number              = rnum++;
58389 +    rail->Position.pos_mode   = ELAN_POS_UNKNOWN;
58390 +    rail->Position.pos_nodeid = ELAN_INVALID_NODE;
58391 +
58392 +    rail->CallbackRegistered  = 0;
58393 +
58394 +    sprintf (rail->Name, "ep%d", rail->Number);
58395 +
58396 +    /* Initialise externally visible locks */
58397 +    kmutex_init (&rail->CallbackLock);
58398 +
58399 +    ep_alloc_init (rail);
58400 +
58401 +    sys->Rails[rail->Number] = rail;
58402 +
58403 +    return 0;
58404 +}
58405 +
58406 +void
58407 +ep_destroy_rail (EP_RAIL *rail)
58408 +{
58409 +    ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED);
58410 +
58411 +    ep_alloc_fini (rail);
58412 +
58413 +    kmutex_destroy (&rail->CallbackLock);
58414 +
58415 +    rail->System->Rails[rail->Number] = NULL;
58416 +
58417 +    rail->Operations.DestroyRail (rail);
58418 +}
58419 +
58420 +/* We need to traverse the Subsystems lists backwards
58421 + * but it's not defined in <linux/list.h> */
58422 +#define list_for_each_backwards(pos,list) \
58423 +       for (pos = (list)->prev; pos != (list); \
58424 +            pos = (pos)->prev)
58425 +
58426 +void
58427 +__ep_stop_rail (EP_RAIL *rail)
58428 +{
58429 +    /* called holding the sys->Lock */
58430 +    EP_SYS           *sys = rail->System;
58431 +    struct list_head *el;
58432 +
58433 +    rail->Operations.StallRail (rail);
58434 +
58435 +    /* Notify all subsystems that this rail is being stopped */
58436 +    if (rail->State == EP_RAIL_STATE_RUNNING)
58437 +    {
58438 +       kmutex_lock (&sys->SubsysLock);
58439 +       list_for_each_backwards (el, &sys->Subsystems) { 
58440 +           EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
58441 +           
58442 +           if (subsys->RemoveRail)
58443 +               subsys->RemoveRail (subsys, sys, rail);
58444 +       }
58445 +       kmutex_unlock (&sys->SubsysLock);
58446 +
58447 +       ep_manager_remove_rail (sys, rail);
58448 +
58449 +       KMEM_FREE (rail->Nodes, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL));
58450 +
58451 +       statemap_destroy (rail->NodeChangeTmp);
58452 +       statemap_destroy (rail->NodeChangeMap);
58453 +       statemap_destroy (rail->NodeSet);
58454 +    }
58455 +
58456 +    ep_dvma_remove_rail (sys, rail);
58457 +    ep_shared_alloc_remove_rail (sys, rail);
58458 +
58459 +    rail->Operations.StopRail (rail);
58460 +
58461 +    rail->State = EP_RAIL_STATE_UNINITIALISED;
58462 +}
58463 +
58464 +void
58465 +ep_stop_rail (EP_RAIL *rail)
58466 +{
58467 +    EP_SYS *sys = rail->System;
58468 +
58469 +    /* stall ep_manager                      */
58470 +    /* and remove the rail from the manaager */
58471 +
58472 +    ep_kthread_stall (&sys->ManagerThread);
58473 +    if ( rail->State == EP_RAIL_STATE_STARTED ) 
58474 +        ep_manager_remove_rail (sys, rail);
58475 +    ep_kthread_resume (&sys->ManagerThread);
58476 +
58477 +    __ep_stop_rail (rail);
58478 +}
58479 +
58480 +int
58481 +ep_start_rail (EP_RAIL *rail)
58482 +{
58483 +    EP_SYS *sys = rail->System;
58484 +
58485 +    ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED);
58486 +
58487 +    if (rail->Operations.StartRail (rail) < 0)
58488 +       return -ENXIO;
58489 +    
58490 +    kmutex_lock (&sys->StartStopLock);
58491 +    /* Add this rail to the shared allocator */
58492 +    if (ep_shared_alloc_add_rail (rail->System, rail))
58493 +       goto failed;
58494 +
58495 +    /* Add this rail to dvma kmap */
58496 +    if (ep_dvma_add_rail (rail->System, rail))
58497 +       goto failed;
58498 +
58499 +    /* rail is now started */
58500 +    rail->State = EP_RAIL_STATE_STARTED;
58501 +
58502 +    /* notify the rail manager of the new rail */
58503 +    if (ep_manager_add_rail (rail->System, rail))
58504 +       goto failed;
58505 +
58506 +    kmutex_unlock (&sys->StartStopLock);
58507 +    return (ESUCCESS);
58508 +
58509 + failed:
58510 +    printk ("%s: start failed\n", rail->Name);
58511 +    kmutex_unlock (&sys->StartStopLock);
58512 +    __ep_stop_rail (rail);
58513 +
58514 +    return (ENOMEM);   
58515 +}
58516 +
58517 +void
58518 +ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys)
58519 +{
58520 +    kmutex_lock (&sys->SubsysLock);
58521 +    list_add_tail (&subsys->Link, &sys->Subsystems);
58522 +    kmutex_unlock (&sys->SubsysLock);
58523 +}
58524 +
58525 +void
58526 +ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys)
58527 +{
58528 +    kmutex_lock (&sys->SubsysLock);
58529 +    list_del (&subsys->Link);
58530 +    kmutex_unlock (&sys->SubsysLock);
58531 +}
58532 +
58533 +EP_SUBSYS *
58534 +ep_subsys_find (EP_SYS *sys, char *name)
58535 +{
58536 +    struct list_head *el;
58537 +
58538 +    ASSERT ( !in_interrupt());
58539 +
58540 +    kmutex_lock (&sys->SubsysLock); 
58541 +    list_for_each (el, &sys->Subsystems) {
58542 +       EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
58543 +
58544 +       if (! strcmp (subsys->Name, name))
58545 +       {
58546 +           kmutex_unlock (&sys->SubsysLock);
58547 +           return (subsys);
58548 +       }
58549 +    }
58550 +
58551 +    kmutex_unlock (&sys->SubsysLock);
58552 +    return (NULL);
58553 +}
58554 +
58555 +int
58556 +ep_waitfor_nodeid (EP_SYS *sys)
58557 +{
58558 +    int i, printed = 0;
58559 +    kcondvar_t Sleep;
58560 +    spinlock_t Lock;
58561 +
58562 +    kcondvar_init (&Sleep);
58563 +    spin_lock_init (&Lock);
58564 +
58565 +#define TICKS_TO_WAIT  (10*hz)
58566 +#define TICKS_PER_LOOP (hz/10)
58567 +    for (i = 0; sys->Position.pos_mode == ELAN_POS_UNKNOWN && i < TICKS_TO_WAIT; i += TICKS_PER_LOOP)
58568 +    {
58569 +       if (! printed++)
58570 +           printk ("ep: waiting for network position to be found\n");
58571 +
58572 +       spin_lock (&Lock);
58573 +       kcondvar_timedwait (&Sleep, &Lock, NULL, lbolt + TICKS_PER_LOOP);
58574 +       spin_unlock (&Lock);
58575 +    }
58576 +
58577 +    if (sys->Position.pos_mode == ELAN_POS_UNKNOWN)
58578 +       printk ("ep: network position not found after waiting\n");
58579 +    else if (printed)
58580 +       printk ("ep: network position found at nodeid %d\n", sys->Position.pos_nodeid);
58581 +
58582 +    spin_lock_destroy (&Lock);
58583 +    kcondvar_destroy (&Sleep);
58584 +
58585 +    return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid);
58586 +}
58587 +
58588 +int
58589 +ep_nodeid (EP_SYS *sys)
58590 +{
58591 +    return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid);
58592 +}
58593 +
58594 +int
58595 +ep_numnodes (EP_SYS *sys)
58596 +{
58597 +    return (sys->Position.pos_nodes);
58598 +}
58599 +
58600 +void
58601 +ep_fillout_stats(EP_RAIL *r, char *str) 
58602 +{
58603 +    sprintf(str+strlen(str),"SendMessageFailed %lu NeterrAtomicPacket %lu NeterrDmaPacket %lu \n", r->Stats.SendMessageFailed, r->Stats.NeterrAtomicPacket, r->Stats.NeterrDmaPacket);
58604 +    sprintf(str+strlen(str),"Rx %lu  %lu /sec\n",   GET_STAT_TOTAL(r->Stats,rx), GET_STAT_PER_SEC(r->Stats,rx) ); 
58605 +    sprintf(str+strlen(str),"MBytes %lu  %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,rx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,rx_len) / (1024*1024)); 
58606 +    sprintf(str+strlen(str),"Tx %lu  %lu /sec\n",   GET_STAT_TOTAL(r->Stats,tx), GET_STAT_PER_SEC(r->Stats,tx) ); 
58607 +    sprintf(str+strlen(str),"MBytes %lu  %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,tx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,tx_len) / (1024*1024)); 
58608 +}
58609 +
58610 +
58611 +/*
58612 + * Local variables:
58613 + * c-file-style: "stroustrup"
58614 + * End:
58615 + */
58616 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan3.c
58617 ===================================================================
58618 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kcomm_elan3.c   2004-02-23 16:02:56.000000000 -0500
58619 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan3.c        2005-07-28 14:52:52.886672960 -0400
58620 @@ -0,0 +1,504 @@
58621 +
58622 +/*
58623 + *    Copyright (c) 2003 by Quadrics Ltd.
58624 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
58625 + *
58626 + *    For licensing information please see the supplied COPYING file
58627 + *
58628 + */
58629 +
58630 +#ident "@(#)$Id: kcomm_elan3.c,v 1.31.8.3 2004/11/30 12:02:17 mike Exp $"
58631 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan3.c,v $ */
58632 +
58633 +#include <qsnet/kernel.h>
58634 +
58635 +#include <elan/kcomm.h>
58636 +
58637 +#include "kcomm_vp.h"
58638 +#include "kcomm_elan3.h"
58639 +#include "conf_linux.h"
58640 +
58641 +extern EP_CODE threadcode_elan3;
58642 +
58643 +unsigned int
58644 +ep3_create_rails (EP_SYS *sys, unsigned int disabled)
58645 +{
58646 +    unsigned int rmask = 0;
58647 +    ELAN3_DEV   *dev;
58648 +    EP_RAIL     *rail;
58649 +    int          i;
58650 +
58651 +    for (i = 0; i < EP_MAX_RAILS; i++)
58652 +    {
58653 +       if ((dev = elan3_device (i)) != NULL)
58654 +       {
58655 +           if ((rail = ep3_create_rail (sys, dev)) != NULL)
58656 +           {
58657 +               if (disabled & (1 << rail->Number))
58658 +                   printk ("%s: auto-start of device disabled by configuration\n", rail->Name);
58659 +               else
58660 +                   ep_start_rail (rail);
58661 +               
58662 +               ep_procfs_rail_init(rail);
58663 +
58664 +               rmask |= (1 << rail->Number);
58665 +           }
58666 +       }
58667 +    }
58668 +
58669 +    return rmask;
58670 +}
58671 +
58672 +EP_RAIL *
58673 +ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev)
58674 +{
58675 +    EP3_RAIL *rail;
58676 +    int       res;
58677 +
58678 +    KMEM_ZALLOC (rail, EP3_RAIL *, sizeof (EP3_RAIL), TRUE);
58679 +
58680 +    if (rail == NULL)
58681 +       return (EP_RAIL *) NULL;
58682 +
58683 +    if ((res = ep_init_rail (sys, &rail->Generic)) != 0)
58684 +    {
58685 +       KMEM_FREE (rail, sizeof (EP3_RAIL));
58686 +       return (EP_RAIL *) NULL;
58687 +    }
58688 +
58689 +    rail->Device = dev;
58690 +
58691 +    /* Install our rail operations */
58692 +    rail->Generic.Operations.DestroyRail      = ep3_destroy_rail;
58693 +    rail->Generic.Operations.StartRail        = ep3_start_rail;
58694 +    rail->Generic.Operations.StallRail        = ep3_stall_rail;
58695 +    rail->Generic.Operations.StopRail         = ep3_stop_rail;
58696 +
58697 +    rail->Generic.Operations.SdramAlloc       = ep3_sdram_alloc;
58698 +    rail->Generic.Operations.SdramFree        = ep3_sdram_free;
58699 +    rail->Generic.Operations.SdramWriteb      = ep3_sdram_writeb;
58700 +
58701 +    rail->Generic.Operations.KaddrMap         = ep3_kaddr_map;
58702 +    rail->Generic.Operations.SdramMap         = ep3_sdram_map;
58703 +    rail->Generic.Operations.Unmap            = ep3_unmap;
58704 +
58705 +    rail->Generic.Operations.DvmaReserve      = ep3_dvma_reserve;
58706 +    rail->Generic.Operations.DvmaRelease      = ep3_dvma_release;
58707 +    rail->Generic.Operations.DvmaSetPte       = ep3_dvma_set_pte;
58708 +    rail->Generic.Operations.DvmaReadPte      = ep3_dvma_read_pte;
58709 +    rail->Generic.Operations.DvmaUnload       = ep3_dvma_unload;
58710 +    rail->Generic.Operations.FlushTlb         = ep3_flush_tlb;
58711 +
58712 +    rail->Generic.Operations.ProbeRoute       = ep3_probe_route;
58713 +    rail->Generic.Operations.PositionFound    = ep3_position_found;
58714 +    rail->Generic.Operations.CheckPosition    = ep3_check_position;
58715 +    rail->Generic.Operations.NeterrFixup      = ep3_neterr_fixup;
58716 +
58717 +    rail->Generic.Operations.LoadSystemRoute  = ep3_load_system_route;
58718 +
58719 +    rail->Generic.Operations.LoadNodeRoute    = ep3_load_node_route;
58720 +    rail->Generic.Operations.UnloadNodeRoute  = ep3_unload_node_route;
58721 +    rail->Generic.Operations.LowerFilter      = ep3_lower_filter;
58722 +    rail->Generic.Operations.RaiseFilter      = ep3_raise_filter;
58723 +    rail->Generic.Operations.NodeDisconnected = ep3_node_disconnected;
58724 +
58725 +    rail->Generic.Operations.FlushFilters     = ep3_flush_filters;
58726 +    rail->Generic.Operations.FlushQueues      = ep3_flush_queues;
58727 +
58728 +    rail->Generic.Operations.AllocInputQ      = ep3_alloc_inputq;
58729 +    rail->Generic.Operations.FreeInputQ       = ep3_free_inputq;
58730 +    rail->Generic.Operations.EnableInputQ     = ep3_enable_inputq;
58731 +    rail->Generic.Operations.DisableInputQ    = ep3_disable_inputq;
58732 +    rail->Generic.Operations.PollInputQ       = ep3_poll_inputq;
58733 +
58734 +    rail->Generic.Operations.AllocOutputQ     = ep3_alloc_outputq;
58735 +    rail->Generic.Operations.FreeOutputQ      = ep3_free_outputq;
58736 +    rail->Generic.Operations.OutputQMsg       = ep3_outputq_msg;
58737 +    rail->Generic.Operations.OutputQState     = ep3_outputq_state;
58738 +    rail->Generic.Operations.OutputQSend      = ep3_outputq_send;
58739 +
58740 +    rail->Generic.Operations.FillOutStats     = ep3_fillout_stats;
58741 +
58742 +    rail->Generic.Devinfo = dev->Devinfo;
58743 +
58744 +    printk ("%s: connected via elan3 rev%c device %d\n", rail->Generic.Name,
58745 +           'a' + dev->Devinfo.dev_revision_id, dev->Instance);
58746 +
58747 +    return (EP_RAIL *) rail;
58748 +}
58749 +
58750 +void
58751 +ep3_destroy_rail (EP_RAIL *r)
58752 +{
58753 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58754 +    
58755 +    KMEM_FREE (rail, sizeof (EP3_RAIL));
58756 +}
58757 +
58758 +static int
58759 +ep3_attach_rail (EP3_RAIL *rail)
58760 +{
58761 +    ELAN3_DEV        *dev = rail->Device;
58762 +    ELAN3_CTXT       *ctxt;
58763 +    ELAN_CAPABILITY  *cap;
58764 +    int               ctx;
58765 +    unsigned long     flags;
58766 +
58767 +    if ((ctxt = elan3_alloc (dev, TRUE)) == (ELAN3_CTXT *) NULL)
58768 +    {
58769 +       printk ("%s: cannot allocate elan context\n", rail->Generic.Name);
58770 +       return -ENXIO;
58771 +    }
58772 +    
58773 +    ctxt->Operations = &ep3_elan3_ops;
58774 +    ctxt->Private    = (void *) rail;
58775 +    
58776 +    /* Initialise a capability and attach to the elan*/
58777 +    KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE);
58778 +    
58779 +    elan_nullcap (cap);
58780 +    
58781 +    cap->cap_type        = ELAN_CAP_TYPE_KERNEL;
58782 +    cap->cap_version     = ELAN_CAP_VERSION_NUMBER;
58783 +    cap->cap_mycontext   = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
58784 +    cap->cap_lowcontext  = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
58785 +    cap->cap_highcontext = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
58786 +    cap->cap_railmask    = 1 << dev->Devinfo.dev_rail;
58787 +    
58788 +    /* Ensure the context filter is raised while we initialise */
58789 +    elan3_block_inputter (ctxt, TRUE);
58790 +
58791 +    if (elan3_doattach (ctxt, cap) != 0)
58792 +    {
58793 +       printk ("%s: cannot attach to kernel context\n", rail->Generic.Name);
58794 +
58795 +       KMEM_FREE (cap, sizeof (ELAN_CAPABILITY));
58796 +       elan3_free (ctxt);
58797 +       return -ENXIO;
58798 +    }
58799 +    KMEM_FREE (cap, sizeof (ELAN_CAPABILITY));
58800 +
58801 +    /* now attach to all the kernel comms input/dmaring/data contexts */
58802 +    spin_lock_irqsave (&dev->IntrLock, flags);
58803 +
58804 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
58805 +    {
58806 +       /* place it in the info table.  NOTE: don't call elan3mmu_set_info, as this */
58807 +       /* will queue the info again on the devices info list */
58808 +       dev->CtxtTable[ctx] = ctxt;
58809 +       
58810 +       elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58811 +       elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1);
58812 +    }
58813 +
58814 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
58815 +    {
58816 +       /* place it in the info table.  NOTE: don't call elan3mmu_set_info, as this */
58817 +       /* will queue the info again on the devices info list */
58818 +       dev->CtxtTable[ctx] = ctxt;
58819 +       
58820 +       elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58821 +       elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1);
58822 +    }
58823 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
58824 +
58825 +    /* Stash the ctxt,commandport, mmu and route table */
58826 +    rail->Ctxt        = ctxt;
58827 +    rail->CommandPort = ctxt->CommandPort;
58828 +    rail->Elan3mmu    = ctxt->Elan3mmu;
58829 +    rail->RouteTable  = ctxt->RouteTable;
58830 +
58831 +    return 0;
58832 +}
58833 +
58834 +static void
58835 +ep3_detach_rail (EP3_RAIL *rail)
58836 +{
58837 +    ELAN3_DEV *dev = rail->Device;
58838 +    unsigned long flags;
58839 +    int ctx;
58840 +
58841 +    /* detach from the elan */
58842 +    spin_lock_irqsave (&dev->IntrLock, flags);
58843 +
58844 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
58845 +    {
58846 +       dev->CtxtTable[ctx] = NULL;
58847 +       elan3mmu_detach (dev, ctx);
58848 +    }
58849 +
58850 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
58851 +    {
58852 +       dev->CtxtTable[ctx] = NULL;
58853 +       elan3mmu_detach (dev, ctx);
58854 +    }
58855 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
58856 +
58857 +    elan3_dodetach(rail->Ctxt);
58858 +    elan3_free (rail->Ctxt);
58859 +
58860 +    rail->Ctxt        = NULL;
58861 +    rail->CommandPort = 0;
58862 +    rail->Elan3mmu    = NULL;
58863 +    rail->RouteTable  = NULL;
58864 +}
58865 +
58866 +int
58867 +ep3_start_rail (EP_RAIL *r)
58868 +{
58869 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
58870 +    int           i, res;
58871 +    unsigned long flags;
58872 +
58873 +    if ((res = ep3_attach_rail (rail)) != 0)
58874 +       return res;
58875 +
58876 +    spin_lock_init (&rail->CookieLock);
58877 +    kmutex_init (&rail->HaltOpMutex);
58878 +    kcondvar_init (&rail->HaltOpSleep);
58879 +
58880 +    /* Initialise event interrupt cookie table */
58881 +    InitialiseCookieTable (&rail->CookieTable);
58882 +
58883 +    /* Load and map the thread code */
58884 +    rail->ThreadCode = threadcode_elan3;
58885 +    if (ep_loadcode (&rail->Generic, &rail->ThreadCode) != ESUCCESS)
58886 +       goto failed;
58887 +
58888 +    /* Map the command port to be visible to the Elan */
58889 +    ep3_ioaddr_map (&rail->Generic, EP3_COMMANDPORT_ADDR, rail->Ctxt->CommandPage, PAGESIZE, EP_PERM_WRITE);
58890 +    rail->CommandPortAddr = EP3_COMMANDPORT_ADDR + (rail->Ctxt->CommandPort - rail->Ctxt->CommandPage);
58891 +
58892 +    /* Allocate the elan visible sdram/main memory */
58893 +    if ((rail->RailElan = ep_alloc_elan (&rail->Generic, sizeof (EP3_RAIL_ELAN), 0, &rail->RailElanAddr)) == 0 ||
58894 +       (rail->RailMain = ep_alloc_main (&rail->Generic, sizeof (EP3_RAIL_MAIN), 0, &rail->RailMainAddr)) == 0)
58895 +    {
58896 +       goto failed;
58897 +    }
58898 +
58899 +    /* Allocate the system input queues at their fixed elan address */
58900 +    if (! (rail->QueueDescs = ep_alloc_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE, PAGESIZE, EP_PERM_ALL, 0)))
58901 +       goto failed;
58902 +
58903 +    /* Initialise all queue entries to be full */
58904 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
58905 +       elan3_sdram_writel (rail->Device, EP_SYSTEMQ_DESC(rail->QueueDescs, i) + offsetof (EP3_InputQueue, q_state), E3_QUEUE_FULL);
58906 +
58907 +    /* initialise the dma rings */
58908 +    if (DmaRingsCreate (rail))
58909 +       goto failed;
58910 +    
58911 +    if (InitialiseDmaRetries (rail))
58912 +       goto failed;
58913 +
58914 +    if (ep3_init_probenetwork (rail))
58915 +       goto failed;
58916 +
58917 +    /* can now drop the context filter for the system context */
58918 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
58919 +    elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, FALSE, 0, NULL);
58920 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
58921 +
58922 +    return 0;
58923 +
58924 + failed:
58925 +    printk ("ep3_start_rail: failed for rail %d\n", rail->Generic.Number);
58926 +    ep3_stop_rail (&rail->Generic);
58927 +
58928 +    return -ENOMEM;
58929 +}
58930 +
58931 +void
58932 +ep3_stall_rail (EP_RAIL *r)
58933 +{
58934 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
58935 +    int           ctx;
58936 +    unsigned long flags;
58937 +
58938 +    /* raise all the context filters */
58939 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
58940 +
58941 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
58942 +       elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58943 +
58944 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
58945 +       elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58946 +
58947 +    elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, TRUE, 0, NULL);
58948 +
58949 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
58950 +}
58951 +
58952 +void
58953 +ep3_stop_rail (EP_RAIL *r)
58954 +{
58955 +    EP3_RAIL *rail = (EP3_RAIL *) r;
58956 +
58957 +    ep3_destroy_probenetwork (rail);
58958 +
58959 +    if (rail->DmaRetryInitialised)
58960 +       DestroyDmaRetries (rail);
58961 +
58962 +    DmaRingsRelease(rail);
58963 +
58964 +    if (rail->Generic.State == EP_RAIL_STATE_RUNNING)
58965 +    {
58966 +       KMEM_FREE (rail->MainCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32));
58967 +
58968 +       ep_free_elan (&rail->Generic, rail->ElanCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32));
58969 +    }
58970 +
58971 +    if (rail->QueueDescs)
58972 +       ep_free_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE);
58973 +    rail->QueueDescs = 0;
58974 +
58975 +    if (rail->RailMain)
58976 +       ep_free_main (&rail->Generic, rail->RailMainAddr, sizeof (EP3_RAIL_MAIN));
58977 +    rail->RailMain = 0;
58978 +
58979 +    if (rail->RailElan)
58980 +       ep_free_elan (&rail->Generic, rail->RailElanAddr, sizeof (EP3_RAIL_ELAN));
58981 +    rail->RailElan = 0;
58982 +
58983 +    ep_unloadcode (&rail->Generic, &rail->ThreadCode);
58984 +
58985 +    DestroyCookieTable (&rail->CookieTable);
58986 +
58987 +    ep_perrail_unmap (&rail->Generic, rail->Ctxt->CommandPage, PAGESIZE);
58988 +
58989 +    kcondvar_destroy (&rail->HaltOpSleep);
58990 +    kmutex_destroy (&rail->HaltOpMutex);
58991 +    spin_lock_destroy (&rail->CookieLock);
58992 +
58993 +    ep3_detach_rail (rail);
58994 +}
58995 +
58996 +void
58997 +ep3_position_found (EP_RAIL *r, ELAN_POSITION *pos)
58998 +{
58999 +    EP3_RAIL   *rail = (EP3_RAIL *) r;
59000 +    sdramaddr_t addr;
59001 +
59002 +    rail->SwitchBroadcastLevelTick = lbolt;
59003 +
59004 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, NodeId), pos->pos_nodeid);
59005 +
59006 +    /* Allocate Network Identify cookie state */
59007 +    KMEM_ZALLOC (rail->MainCookies, E3_uint32 *, pos->pos_nodes * sizeof (E3_uint32), 1);
59008 +    
59009 +    if (! (addr = ep_alloc_elan (&rail->Generic, pos->pos_nodes * sizeof (E3_uint32), 0, &rail->ElanCookies)))
59010 +       panic ("ep: PositionFound: cannot allocate elan cookies array\n");
59011 +
59012 +    elan3_sdram_zeroq_sdram (rail->Device, addr, pos->pos_nodes * sizeof (E3_uint32));
59013 +
59014 +    ep3_probe_position_found (rail, pos);
59015 +}
59016 +
59017 +sdramaddr_t
59018 +ep3_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size)
59019 +{
59020 +    return elan3_sdram_alloc (((EP3_RAIL *) r)->Device, size);
59021 +}
59022 +
59023 +void
59024 +ep3_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size)
59025 +{
59026 +    elan3_sdram_free (((EP3_RAIL *) r)->Device, addr, size);
59027 +}
59028 +
59029 +void
59030 +ep3_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val)
59031 +{
59032 +    elan3_sdram_writeb (((EP3_RAIL *) r)->Device, addr, val);
59033 +}
59034 +
59035 +void
59036 +ep3_flush_tlb (EP_RAIL *r)
59037 +{
59038 +    EP3_RAIL *rail = (EP3_RAIL *) r;
59039 +    ELAN3_DEV *dev = rail->Device;
59040 +    unsigned long flags;
59041 +
59042 +    spin_lock_irqsave (&dev->TlbLock, flags);
59043 +    
59044 +    IncrStat (dev, TlbFlushes);
59045 +    
59046 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH);
59047 +    mmiob ();
59048 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
59049 +
59050 +    while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED))
59051 +       mb();
59052 +}
59053 +
59054 +void
59055 +ep3_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode)
59056 +{
59057 +    EP3_RAIL  *rail = (EP3_RAIL *) r;
59058 +    E3_uint16  flits[MAX_FLITS];
59059 +    int        nflits;
59060 +    
59061 +    nflits = GenerateRoute (&rail->Generic.Position, flits, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY);
59062 +       
59063 +    if (LoadRoute (rail->Device, rail->RouteTable, vp, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
59064 +    {
59065 +       /* XXXX: whilst LoadRoute() can fail - it is not likely. */
59066 +       panic ("ep3_load_system_route: cannot load p2p route entry\n");
59067 +    }
59068 +}
59069 +
59070 +void
59071 +ep3_load_node_route (EP_RAIL *r, unsigned nodeId)
59072 +{
59073 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
59074 +    E3_uint16     flits[MAX_FLITS];
59075 +    int           nflits;
59076 +
59077 +    nflits = GenerateRoute (&rail->Generic.Position, flits, nodeId, nodeId, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
59078 +
59079 +    if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId), EP3_CONTEXT_NUM(rail->Generic.Position.pos_nodeid), nflits, flits) != 0)
59080 +       panic ("ep3_load_node_route: cannot load p2p data route entry\n");
59081 +}
59082 +
59083 +void
59084 +ep3_unload_node_route (EP_RAIL *r, unsigned nodeId)
59085 +{
59086 +    EP3_RAIL *rail = (EP3_RAIL *) r;
59087 +
59088 +    ClearRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId));
59089 +}
59090 +
59091 +void
59092 +ep3_lower_filter (EP_RAIL *r, unsigned nodeId)
59093 +{
59094 +    EP3_RAIL *rail = (EP3_RAIL *) r;
59095 +    unsigned long flags;
59096 +
59097 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
59098 +    elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 0, 0, NULL);
59099 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
59100 +}
59101 +
59102 +void
59103 +ep3_raise_filter (EP_RAIL *r, unsigned nodeId)
59104 +{
59105 +    EP3_RAIL *rail = (EP3_RAIL *) r;
59106 +    unsigned long flags;
59107 +
59108 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
59109 +    elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 1, 0, NULL);
59110 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
59111 +}
59112 +
59113 +void
59114 +ep3_node_disconnected (EP_RAIL *r, unsigned nodeId)
59115 +{
59116 +    FreeStalledDmas ((EP3_RAIL *) r, nodeId);
59117 +}
59118 +
59119 +void
59120 +ep3_fillout_stats(EP_RAIL *r, char *str) 
59121 +{
59122 +    /* no stats here yet */
59123 +    /* EP3_RAIL *ep3rail = (EP3_RAIL *)r; */
59124 +}
59125 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan3.h
59126 ===================================================================
59127 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kcomm_elan3.h   2004-02-23 16:02:56.000000000 -0500
59128 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan3.h        2005-07-28 14:52:52.887672808 -0400
59129 @@ -0,0 +1,431 @@
59130 +/*
59131 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
59132 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
59133 + *
59134 + *    For licensing information please see the supplied COPYING file
59135 + *
59136 + */
59137 +
59138 +#ifndef __EP_KCOMM_ELAN3_H
59139 +#define __EP_KCOMM_ELAN3_H
59140 +
59141 +#ident "@(#)$Id: kcomm_elan3.h,v 1.50.8.3 2004/12/14 10:19:14 mike Exp $ $Name: QSNETMODULES-4-31_20050321 $"
59142 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan3.h,v $*/
59143 +
59144 +#if !defined(__ELAN3__)
59145 +#include <elan3/elanregs.h>
59146 +#include <elan3/elandev.h>
59147 +#include <elan3/elanvp.h>
59148 +#include <elan3/elan3mmu.h>
59149 +#include <elan3/elanctxt.h>
59150 +#include <elan3/elandebug.h>
59151 +#endif /* !defined(__ELAN3__) */
59152 +
59153 +#include <elan3/trtype.h>
59154 +
59155 +/* private address allocation */
59156 +#define EP3_TEXT_BASE                  0xFF000000              /* base address for thread code (defined in makerules.elan3) */
59157 +#define EP3_COMMANDPORT_ADDR           0xFFF00000              /* mapping address for elan command port */
59158 +
59159 +#define EP3_STACK_SIZE                 1024                    /* default thread code stack size */
59160 +
59161 +#define EP3_PACEMAKER_EVENTADDR                0xfeedbeef              /* mis-aligned address used by heartbeat pacemaker */
59162 +
59163 +/* context number allocation */
59164 +#define EP3_CONTEXT_NUM(nodeId)                ((ELAN3_KCOMM_BASE_CONTEXT_NUM + (nodeId)) | SYS_CONTEXT_BIT)
59165 +#define EP3_CONTEXT_ISDATA(ctx)                (((ctx) & MAX_ROOT_CONTEXT_MASK) >= ELAN3_KCOMM_BASE_CONTEXT_NUM && \
59166 +                                        ((ctx) & MAX_ROOT_CONTEXT_MASK) <= ELAN3_KCOMM_TOP_CONTEXT_NUM)
59167 +#define EP3_CONTEXT_TO_NODE(ctx)       (((ctx) & MAX_ROOT_CONTEXT_MASK) - ELAN3_KCOMM_BASE_CONTEXT_NUM)
59168 +
59169 +/* DMA issueing rings */
59170 +#define EP3_RING_CRITICAL              0
59171 +#define EP3_RING_CRITICAL_LEN          128
59172 +#define EP3_RING_HIGH_PRI              1
59173 +#define EP3_RING_HIGH_PRI_LEN          64
59174 +#define EP3_RING_LOW_PRI               2
59175 +#define EP3_RING_LOW_PRI_LEN           32
59176 +#define EP3_NUM_RINGS                  3
59177 +
59178 +/* Value to "return" from c_close() when envelope handled  by the trap handler */
59179 +#define EP3_PAckStolen                 4
59180 +
59181 +/* unimplemented instruction trap types for thread code */
59182 +#define EP3_UNIMP_TRAP_NO_DESCS                0
59183 +#define EP3_UNIMP_TRAP_PACKET_NACKED   1
59184 +#define EP3_UNIMP_THREAD_HALTED                2
59185 +#define EP3_NUM_UNIMP_TRAPS            3
59186 +
59187 +/* forward declarations */
59188 +typedef struct ep3_rail        EP3_RAIL;
59189 +
59190 +/* block copy elan3 inputter queue - with waitvent0 */
59191 +typedef struct ep3_inputqueue
59192 +{
59193 +    volatile E3_uint32 q_state;        /* queue is full=bit0, queue is locked=bit8 */
59194 +    volatile E3_Addr   q_bptr;         /* block aligned ptr to current back item */
59195 +    E3_uint32          q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
59196 +    E3_Addr            q_top;          /* block aligned ptr to last queue item */
59197 +    E3_Addr            q_base;         /* block aligned ptr to first queue item */
59198 +    volatile E3_Addr   q_fptr;         /* block aligned ptr to current front item */
59199 +    E3_BlockCopyEvent  q_event;        /* queue block copy event */
59200 +    E3_uint32          q_pad[4];       /* pad to 64 bytes */
59201 +    E3_Addr            q_wevent;       /* WaitEvent0 struct */
59202 +    E3_int32           q_wcount;
59203 +} EP3_InputQueue;
59204 +
59205 +
59206 +#if !defined(__ELAN3__)
59207 +
59208 +/* dma retries types and retry times */
59209 +typedef struct ep3_retry_dma
59210 +{
59211 +    struct list_head    Link;                                  /* chained on free/retry list */
59212 +    long               RetryTime;                              /* "lbolt" to retry at */
59213 +    E3_DMA_BE          Dma;                                    /* DMA (in main memory) */
59214 +} EP3_RETRY_DMA;
59215 +
59216 +typedef struct ep3_dma_ring
59217 +{
59218 +    sdramaddr_t                pEvent;  
59219 +    E3_Addr            epEvent;
59220 +    
59221 +    sdramaddr_t                pDma;     
59222 +    E3_Addr            epDma; 
59223 +    
59224 +    E3_uint32         *pDoneBlk; 
59225 +    E3_Addr            epDoneBlk; 
59226 +    
59227 +    int                        Entries;                                /* number of slots in array  */
59228 +    int                        Position;                               /* current position in array */
59229 +
59230 +    ioaddr_t            CommandPort;
59231 +    ioaddr_t           CommandPage;
59232 +    DeviceMappingHandle CommandPageHandle;
59233 +} EP3_DMA_RING;
59234 +
59235 +#define DMA_RING_EVENT(ring,n)         ((ring)->pEvent + (n)*sizeof (E3_BlockCopyEvent))
59236 +#define DMA_RING_EVENT_ELAN(ring,n)    ((ring)->epEvent + (n)*sizeof (E3_BlockCopyEvent))
59237 +
59238 +#define DMA_RING_DMA(ring,n)           ((ring)->pDma   + (n)*sizeof (E3_DMA))
59239 +#define DMA_RING_DMA_ELAN(ring,n)      ((ring)->epDma   + (n)*sizeof (E3_DMA))
59240 +
59241 +#define DMA_RING_DONE_ELAN(ring,n)     ((ring)->epDoneBlk + (n)*sizeof (E3_uint32))
59242 +
59243 +/* Event interrupt cookie operations and lookup table */
59244 +typedef struct ep3_cookie_ops
59245 +{
59246 +    void       (*Event)       (EP3_RAIL *rail, void *arg);                             /* called from the interrupt handler when an event is "set" */
59247 +    void       (*DmaRetry)    (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error);  /* called from the interrupt handler when a DMA is "nacked" */
59248 +    void       (*DmaCancelled)(EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);             /* called from the interrupt handler/flush disconnecting when cancelled. */
59249 +    void       (*DmaVerify)   (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);             /* called from multiple places, to check dma is consistent with state. */
59250 +} EP3_COOKIE_OPS;
59251 +
59252 +typedef struct ep3_cookie
59253 +{
59254 +    struct ep3_cookie  *Next;                                  /* Cookies are chained in hash table. */
59255 +    E3_uint32          Cookie;                                 /* Cooke store in ev_Type */
59256 +    EP3_COOKIE_OPS     *Operations;                            /* Cookie operations */
59257 +    void              *Arg;                                    /* Users arguement. */
59258 +} EP3_COOKIE;
59259 +
59260 +#define EP3_COOKIE_HASH_SIZE           (256)
59261 +#define EP3_HASH_COOKIE(a)             ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP3_COOKIE_HASH_SIZE-1))
59262 +
59263 +typedef struct ep3_cookie_table
59264 +{
59265 +    spinlock_t         Lock;
59266 +    EP3_COOKIE        *Entries[EP3_COOKIE_HASH_SIZE];
59267 +} EP3_COOKIE_TABLE;
59268 +
59269 +#endif /* !defined(__ELAN3__) */
59270 +
59271 +#define EP3_EVENT_FREE                 ((1 << 4) | EV_WCOPY)
59272 +#define EP3_EVENT_ACTIVE               ((2 << 4) | EV_WCOPY)
59273 +/* DONE == Cookie */
59274 +#define EP3_EVENT_FAILED               ((3 << 4) | EV_WCOPY)
59275 +#define EP3_EVENT_PRIVATE              ((4 << 4) | EV_WCOPY)
59276 +
59277 +/* The event cookie can get posted (and seen) before the write has */
59278 +/* hit main memory - in this case the event count is <= 0 and the block */
59279 +/* will be marked as ACTIVE - but could transition to DONE at any time */
59280 +/* Also for a word copy event, the value written into the "done" word */
59281 +/* can be the event interrupt cookie rather than the "source" value */
59282 +/* this happens since the uCode does not wait for the write to have */
59283 +/* occured before overwriting TMP_0 with the cookie */
59284 +#define EP3_EVENT_FIRING(edev, event, cookie, done) \
59285 +       (((((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie) || (done) == EP3_EVENT_ACTIVE) && \
59286 +        (int) elan3_sdram_readl (edev, (event) + offsetof (E3_BlockCopyEvent, ev_Count)) <= 0)
59287 +#define EP3_EVENT_FIRED(cookie, done) \
59288 +       (((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie)
59289 +
59290 +
59291 +/* Time limit to wait while event is firing and block write has not occured */
59292 +#define EP3_EVENT_FIRING_TLIMIT                16384                   /* 1023 uS */
59293 +
59294 +#define EP3_INIT_COPY_EVENT(event, cookie, dest, intr)                                                 \
59295 +{                                                                                                      \
59296 +       (event).ev_Count  = 0;                                                                          \
59297 +       (event).ev_Type   = (intr) ? EV_TYPE_BCOPY | EV_TYPE_EVIRQ | (cookie).Cookie : EV_TYPE_BCOPY;   \
59298 +       (event).ev_Source = (cookie).Cookie | EV_WCOPY;                                                 \
59299 +       (event).ev_Dest   = (dest) | EV_TYPE_BCOPY_WORD;                                                \
59300 +}
59301 +
59302 +#if !defined(__ELAN3__)
59303 +
59304 +/* Generic input queues which can be polled */
59305 +typedef struct ep3_inputq
59306 +{
59307 +    EP3_COOKIE         q_cookie;
59308 +    unsigned int       q_slotSize;
59309 +    unsigned int       q_slotCount;
59310 +
59311 +    void              *q_slots;
59312 +    EP_ADDR            q_slotsAddr;
59313 +
59314 +    EP_INPUTQ_CALLBACK *q_callback;
59315 +    void              *q_arg;
59316 +
59317 +    sdramaddr_t                q_desc;
59318 +    E3_Addr            q_descAddr;
59319 +
59320 +    E3_Addr            q_base;
59321 +    E3_Addr            q_top;
59322 +    E3_Addr            q_fptr;
59323 +
59324 +    E3_uint32          q_waitCount;
59325 +} EP3_INPUTQ;
59326 +
59327 +typedef struct ep3_outputq
59328 +{
59329 +    EP3_COOKIE         q_cookie;
59330 +
59331 +    unsigned int       q_slotCount;                            /* # slots allocated */
59332 +    unsigned int       q_slotSize;                             /* size of each slot (rounded up) */
59333 +
59334 +    sdramaddr_t                q_elan;
59335 +    E3_Addr            q_elanAddr;
59336 +
59337 +    void              *q_main;
59338 +    E3_Addr            q_mainAddr;
59339 +} EP3_OUTPUTQ;
59340 +
59341 +#endif /* !defined(__ELAN3__) */
59342 +
59343 +/* per-rail elan memory portion of device */
59344 +typedef struct ep3_rail_elan
59345 +{
59346 +    E3_uint16           ProbeSource0[TR_TRACEROUTE_ENTRIES];   /* 32 byte aligned */
59347 +    E3_uint16           ProbeSource1[TR_TRACEROUTE_ENTRIES];
59348 +
59349 +    E3_BlockCopyEvent   ProbeDone;                             /* 16 byte aligned */
59350 +    E3_Event            ProbeStart;                            /* 8 byte aligned */
59351 +    
59352 +    E3_uint32           ProbeType;                             /* 4 byte aligned */
59353 +    E3_uint32           ProbeLevel;
59354 +
59355 +    E3_uint32           NodeId;
59356 +} EP3_RAIL_ELAN;
59357 +
59358 +/* values for ProbeType */
59359 +#define PROBE_SINGLE                   0
59360 +#define PROBE_MULTIPLE                 1
59361 +/* number of attempts for each type */
59362 +#define PROBE_SINGLE_ATTEMPTS          10
59363 +#define PROBE_SINGLE_TIMEOUTS          5
59364 +#define PROBE_MULTIPLE_ATTEMPTS                20
59365 +#define PROBE_MULTIPLE_TIMEOUTS                10
59366 +
59367 +/* per-rail elan memory portsion of device */
59368 +typedef struct ep3_rail_main
59369 +{
59370 +    E3_uint16          ProbeDest0[TR_TRACEROUTE_ENTRIES];      /* 32 byte aligned */
59371 +    E3_uint16          ProbeDest1[TR_TRACEROUTE_ENTRIES];
59372 +    
59373 +    E3_uint32          ProbeDone;                              /* 4 byte aligned */
59374 +    E3_uint32          ProbeResult;
59375 +    E3_uint32          ProbeLevel;
59376 +} EP3_RAIL_MAIN;
59377 +
59378 +#if !defined(__ELAN3__)
59379 +
59380 +struct ep3_rail
59381 +{
59382 +    EP_RAIL            Generic;                                /* Generic rail */
59383 +
59384 +    ELAN3_DEV          *Device;                                        /* Elan device we're using */
59385 +    ELAN3_CTXT        *Ctxt;                                   /* Elan context struct */
59386 +    ioaddr_t            CommandPort;                           /* commandport from context */
59387 +    E3_Addr            CommandPortAddr;                        /*  and address mapped into elan */
59388 +
59389 +    ELAN3_ROUTE_TABLE  *RouteTable;                            /* routetable from context */
59390 +    ELAN3MMU          *Elan3mmu;                               /* elanmmu from context */
59391 +
59392 +    EP3_COOKIE_TABLE    CookieTable;                           /* Event cookie table */
59393 +
59394 +    EP_CODE            ThreadCode;                             /* copy of thread code */
59395 +    unsigned int        CommandPortEventTrap;                  /* flag to indicate command port eventint queue overflow trap */
59396 +
59397 +    sdramaddr_t         RailElan;                              /* Elan visible main/sdram portions of */
59398 +    E3_Addr             RailElanAddr;                          /* device structure */
59399 +    EP3_RAIL_MAIN      *RailMain;
59400 +    E3_Addr            RailMainAddr;
59401 +
59402 +    /* small system message queues */
59403 +    sdramaddr_t                QueueDescs;                             /* Input Queue descriptors */
59404 +
59405 +    /* Network position prober */
59406 +    E3_Addr            ProbeStack;                             /* Network position thread command structure */
59407 +    EP3_COOKIE         ProbeCookie;                            /*   event cookie for Done event */
59408 +    kcondvar_t         ProbeWait;                              /*   place to wait on probe thread */
59409 +    spinlock_t         ProbeLock;                              /*     and lock */
59410 +    volatile int        ProbeDone;                             /*     and flag to indicate it's done */
59411 +
59412 +    E3_uint16          ProbeDest0[TR_TRACEROUTE_ENTRIES];      /* last result of CheckNetworkPosition */
59413 +    E3_uint16          ProbeDest1[TR_TRACEROUTE_ENTRIES];
59414 +    E3_uint32          ProbeResult;
59415 +
59416 +    long               ProbeLevelTick[ELAN_MAX_LEVELS];
59417 +    long               SwitchBroadcastLevelTick;
59418 +
59419 +    /* rings for issueing dmas */
59420 +    EP3_DMA_RING        DmaRings[EP3_NUM_RINGS];
59421 +
59422 +    /* retry lists for dmas */
59423 +    struct list_head    DmaRetries[EP_NUM_RETRIES];            /* Dma retry lists */
59424 +    struct list_head    DmaRetryFreeList;                      /*   and free list */
59425 +    u_int              DmaRetryCount;                          /*   and total retry count */
59426 +    u_int              DmaRetryReserved;                       /*   and number reserved */
59427 +    u_int              DmaRetryThreadShouldStall;              /*   count of reasons to stall retries */
59428 +    u_int              DmaRetryThreadStarted:1;                /*   dma retry thread running */
59429 +    u_int              DmaRetryThreadShouldStop:1;             /*     but should stop */
59430 +    u_int              DmaRetryThreadStopped:1;                /*     and now it's stopped */
59431 +    u_int              DmaRetryInitialised:1;                  /* have initialise dma retries */
59432 +
59433 +    spinlock_t         DmaRetryLock;                           /*   spinlock protecting lists */
59434 +    kcondvar_t         DmaRetryWait;                           /*   place retry thread sleeps */
59435 +    long               DmaRetryTime;                           /*   and when it will next wakeup */
59436 +    unsigned int        DmaRetrySleeping;                      /*   and it's sleeping there */
59437 +
59438 +    /* Network Identify Cookies */
59439 +    E3_uint32         *MainCookies;                            /* One cookie allocator per-node for main*/
59440 +    E3_Addr            ElanCookies;                            /*   and one for elan */
59441 +    spinlock_t         CookieLock;                             /* spinlock to protect main cookies */
59442 +
59443 +    /* Halt operation flags for flushing. */
59444 +    kmutex_t            HaltOpMutex;                           /* serialize access to halt operations */
59445 +    unsigned int       HaltOpCompleted;                        /* flag to indicate halt operation completed */
59446 +    kcondvar_t         HaltOpSleep;                            /*   place to wait for it to complete */
59447 +
59448 +    /* Network error state */
59449 +    kcondvar_t         NetworkErrorSleep;                      /* place to sleep for network error halt operation */
59450 +    u_int              NetworkErrorFlushed;                    /*   and flag to indicate flushed */
59451 +
59452 +
59453 +    EP3_RAIL_STATS     Stats;                                  /* statistics */
59454 +};
59455 +
59456 +/* support.c */
59457 +
59458 +extern ELAN3_OPS  ep3_elan3_ops;
59459 +
59460 +extern E3_uint32    LocalCookie (EP3_RAIL *rail, unsigned int remoteNode);
59461 +extern E3_uint32    RemoteCookie (EP3_RAIL *rail, unsigned int remoteNode);
59462 +
59463 +extern void         InitialiseCookieTable (EP3_COOKIE_TABLE *table);
59464 +extern void         DestroyCookieTable (EP3_COOKIE_TABLE *table);
59465 +extern void         RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie, 
59466 +                                   E3_Addr event, EP3_COOKIE_OPS *ops, void *arg);
59467 +extern void         DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie);
59468 +extern EP3_COOKIE   *LookupCookie (EP3_COOKIE_TABLE *table, uint32_t cookie);
59469 +extern EP3_COOKIE   *LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr);
59470 +
59471 +extern int          DmaRingsCreate (EP3_RAIL *rail);
59472 +extern void         DmaRingsRelease (EP3_RAIL *rail);
59473 +extern int          IssueDma (EP3_RAIL *rail, E3_DMA_BE *dma, int type, int retryThread);
59474 +
59475 +extern int          IssueWaitevent (EP3_RAIL *rail, E3_Addr value);
59476 +extern void         IssueSetevent (EP3_RAIL *rail, E3_Addr value);
59477 +extern void         IssueRunThread (EP3_RAIL *rail, E3_Addr value);
59478 +extern long         DmaRetryTime (int type);
59479 +extern int          InitialiseDmaRetries (EP3_RAIL *rail);
59480 +extern void         DestroyDmaRetries (EP3_RAIL *rail);
59481 +extern int          ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr);
59482 +extern void         ReleaseDmaRetries (EP3_RAIL *rail, int count);
59483 +extern void         StallDmaRetryThread (EP3_RAIL *rail);
59484 +extern void         ResumeDmaRetryThread (EP3_RAIL *rail);
59485 +extern void         QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval);
59486 +extern void         QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma);
59487 +extern void         FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId);
59488 +
59489 +extern void         SetQueueLocked(EP3_RAIL *rail, sdramaddr_t qaddr);
59490 +
59491 +/* threadcode_elan3.c */
59492 +extern E3_Addr    ep3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack,
59493 +                                  int stackSize, int nargs, ...);
59494 +
59495 +/* probenetwork.c */
59496 +extern int        ep3_init_probenetwork (EP3_RAIL *rail);
59497 +extern void       ep3_destroy_probenetwork (EP3_RAIL *rail);
59498 +extern void       ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos);
59499 +extern int        ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw);
59500 +extern int        ep3_check_position (EP_RAIL *rail);
59501 +
59502 +/* neterr_elan3.c */
59503 +extern void       ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
59504 +
59505 +/* kcomm_elan3.c */
59506 +extern EP_RAIL    *ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev);
59507 +extern void        ep3_destroy_rail (EP_RAIL *rail);
59508 +
59509 +extern int         ep3_start_rail (EP_RAIL *rail);
59510 +extern void        ep3_stall_rail (EP_RAIL *rail);
59511 +extern void        ep3_stop_rail (EP_RAIL *rail);
59512 +
59513 +extern void       ep3_position_found (EP_RAIL *rail, ELAN_POSITION *pos);
59514 +
59515 +extern sdramaddr_t ep3_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size);
59516 +extern void        ep3_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size);
59517 +extern void        ep3_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
59518 +
59519 +extern void        ep3_flush_tlb (EP_RAIL *r);
59520 +extern void        ep3_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode);
59521 +extern void        ep3_load_node_route (EP_RAIL *r, unsigned int nodeId);
59522 +extern void        ep3_unload_node_route (EP_RAIL *r, unsigned int nodeId);
59523 +extern void        ep3_lower_filter (EP_RAIL *r, unsigned int nodeId);
59524 +extern void        ep3_raise_filter (EP_RAIL *rail, unsigned int nodeId);
59525 +extern void        ep3_node_disconnected (EP_RAIL *r, unsigned int nodeId);
59526 +
59527 +extern void        ep3_fillout_stats(EP_RAIL *rail, char *str);
59528 +
59529 +/* kmap_elan3.c */
59530 +extern void        ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr);
59531 +extern void        ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr);
59532 +extern void        ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm);
59533 +extern void        ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len);
59534 +extern void       *ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages);
59535 +extern void        ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private);
59536 +extern void        ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm);
59537 +extern physaddr_t  ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index);
59538 +extern void        ep3_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages);
59539 +
59540 +/* kmsg_elan3.c */
59541 +extern EP_INPUTQ  *ep3_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount,
59542 +                                    EP_INPUTQ_CALLBACK *callback, void *arg);
59543 +extern void        ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q);
59544 +extern void        ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q);
59545 +extern void        ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q);
59546 +extern int         ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
59547 +extern EP_OUTPUTQ *ep3_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount);
59548 +extern void        ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q);
59549 +extern void       *ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
59550 +extern int         ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
59551 +extern int         ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size,
59552 +                                    unsigned int nodeId, unsigned int qnum, unsigned int retries);
59553 +
59554 +/* support_elan3.c */
59555 +extern void        ep3_flush_filters (EP_RAIL *r);
59556 +extern void        ep3_flush_queues (EP_RAIL *r);
59557 +
59558 +#endif /* !defined(__ELAN3__) */
59559 +
59560 +#endif /* __EP_KCOMM_ELAN3_H */
59561 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan4.c
59562 ===================================================================
59563 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kcomm_elan4.c   2004-02-23 16:02:56.000000000 -0500
59564 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan4.c        2005-07-28 14:52:52.888672656 -0400
59565 @@ -0,0 +1,526 @@
59566 +/*
59567 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
59568 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
59569 + *
59570 + *    For licensing information please see the supplied COPYING file
59571 + *
59572 + */
59573 +
59574 +#ident "@(#)$Id: kcomm_elan4.c,v 1.16.2.3 2004/11/30 12:02:17 mike Exp $ $Name: QSNETMODULES-4-31_20050321 $"
59575 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan4.c,v $*/
59576 +
59577 +#include <qsnet/kernel.h>
59578 +#include <qsnet/kthread.h>
59579 +
59580 +#include <elan/kcomm.h>
59581 +
59582 +#include "kcomm_vp.h"
59583 +#include "kcomm_elan4.h"
59584 +#include "conf_linux.h"
59585 +
59586 +extern EP_CODE threadcode_elan4;
59587 +
59588 +unsigned int
59589 +ep4_create_rails (EP_SYS *sys, unsigned int disabled)
59590 +{
59591 +    unsigned int rmask = 0;
59592 +    ELAN4_DEV   *dev;
59593 +    EP_RAIL     *rail;
59594 +    int          i;
59595 +
59596 +    for (i = 0; i < EP_MAX_RAILS; i++)
59597 +    {
59598 +       if ((dev = elan4_reference_device (i, ELAN4_STATE_STARTED)) != NULL)
59599 +       {
59600 +           if ((rail = ep4_create_rail (sys, dev)) == NULL)
59601 +               elan4_dereference_device (dev);
59602 +           else
59603 +           {
59604 +               if (disabled & (1 << rail->Number))
59605 +                   printk ("%s: auto-start of device disabled by configuration\n", rail->Name);
59606 +               else
59607 +                   ep_start_rail (rail);
59608 +               
59609 +               ep_procfs_rail_init(rail);
59610 +
59611 +               rmask |= (1 << rail->Number);
59612 +           }
59613 +       }
59614 +    }
59615 +
59616 +    if (rmask)
59617 +       qsnet_debug_alloc();
59618 +
59619 +    return rmask;
59620 +}
59621 +
59622 +EP_RAIL *
59623 +ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev)
59624 +{
59625 +    EP4_RAIL *rail;
59626 +    int res;
59627 +
59628 +    KMEM_ZALLOC (rail, EP4_RAIL *, sizeof (EP4_RAIL), 1);
59629 +
59630 +    if (rail == NULL)
59631 +       return (EP_RAIL *) NULL;
59632 +    
59633 +    if ((res = ep_init_rail (sys, &rail->r_generic)) != 0)
59634 +    {
59635 +       KMEM_FREE (rail, sizeof (EP4_RAIL));
59636 +       return (EP_RAIL *) NULL;
59637 +    }
59638 +
59639 +    rail->r_ctxt.ctxt_dev = dev;
59640 +
59641 +    /* install our rail operations */
59642 +    rail->r_generic.Operations.DestroyRail      = ep4_destroy_rail;
59643 +    rail->r_generic.Operations.StartRail        = ep4_start_rail;
59644 +    rail->r_generic.Operations.StallRail        = ep4_stall_rail;
59645 +    rail->r_generic.Operations.StopRail         = ep4_stop_rail;    
59646 +
59647 +    rail->r_generic.Operations.SdramAlloc       = ep4_sdram_alloc;
59648 +    rail->r_generic.Operations.SdramFree        = ep4_sdram_free;
59649 +    rail->r_generic.Operations.SdramWriteb      = ep4_sdram_writeb;
59650 +
59651 +    rail->r_generic.Operations.KaddrMap         = ep4_kaddr_map;
59652 +    rail->r_generic.Operations.SdramMap         = ep4_sdram_map;
59653 +    rail->r_generic.Operations.Unmap            = ep4_unmap;
59654 +
59655 +    rail->r_generic.Operations.DvmaReserve     = ep4_dvma_reserve;
59656 +    rail->r_generic.Operations.DvmaRelease     = ep4_dvma_release;
59657 +    rail->r_generic.Operations.DvmaSetPte      = ep4_dvma_set_pte;
59658 +    rail->r_generic.Operations.DvmaReadPte     = ep4_dvma_read_pte;
59659 +    rail->r_generic.Operations.DvmaUnload      = ep4_dvma_unload;
59660 +    rail->r_generic.Operations.FlushTlb                = ep4_flush_tlb;
59661 +
59662 +    rail->r_generic.Operations.ProbeRoute       = ep4_probe_route;
59663 +
59664 +    rail->r_generic.Operations.PositionFound    = ep4_position_found;
59665 +    rail->r_generic.Operations.CheckPosition    = ep4_check_position;
59666 +    rail->r_generic.Operations.NeterrFixup      = ep4_neterr_fixup;
59667 +
59668 +    rail->r_generic.Operations.LoadSystemRoute  = ep4_load_system_route;
59669 +
59670 +    rail->r_generic.Operations.LoadNodeRoute    = ep4_load_node_route;
59671 +    rail->r_generic.Operations.UnloadNodeRoute  = ep4_unload_node_route;
59672 +    rail->r_generic.Operations.LowerFilter     = ep4_lower_filter;
59673 +    rail->r_generic.Operations.RaiseFilter     = ep4_raise_filter;
59674 +    rail->r_generic.Operations.NodeDisconnected = ep4_node_disconnected;
59675 +
59676 +    rail->r_generic.Operations.FlushFilters     = ep4_flush_filters;
59677 +    rail->r_generic.Operations.FlushQueues     = ep4_flush_queues;
59678 +
59679 +    rail->r_generic.Operations.AllocInputQ     = ep4_alloc_inputq;
59680 +    rail->r_generic.Operations.FreeInputQ      = ep4_free_inputq;
59681 +    rail->r_generic.Operations.EnableInputQ     = ep4_enable_inputq;
59682 +    rail->r_generic.Operations.DisableInputQ    = ep4_disable_inputq;
59683 +    rail->r_generic.Operations.PollInputQ      = ep4_poll_inputq;
59684 +
59685 +    rail->r_generic.Operations.AllocOutputQ     = ep4_alloc_outputq;
59686 +    rail->r_generic.Operations.FreeOutputQ     = ep4_free_outputq;
59687 +    rail->r_generic.Operations.OutputQMsg      = ep4_outputq_msg;
59688 +    rail->r_generic.Operations.OutputQState     = ep4_outputq_state;
59689 +    rail->r_generic.Operations.OutputQSend     = ep4_outputq_send;
59690 +
59691 +    rail->r_generic.Operations.FillOutStats     = ep4_fillout_stats;
59692 +    rail->r_generic.Operations.Debug           = ep4_debug_rail;
59693 +
59694 +    rail->r_generic.Devinfo = dev->dev_devinfo;
59695 +
59696 +    printk ("%s: connected via elan4 rev%c device %d\n", rail->r_generic.Name,
59697 +           'a' + dev->dev_devinfo.dev_revision_id, dev->dev_instance);
59698 +
59699 +    return (EP_RAIL *) rail;
59700 +}
59701 +
59702 +void
59703 +ep4_destroy_rail (EP_RAIL *r)
59704 +{
59705 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59706 +
59707 +    elan4_dereference_device (rail->r_ctxt.ctxt_dev);
59708 +
59709 +    KMEM_FREE (rail, sizeof (EP4_RAIL));
59710 +}
59711 +
59712 +static int
59713 +ep4_attach_rail (EP4_RAIL *r)
59714 +{
59715 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
59716 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
59717 +    unsigned   ctx;
59718 +
59719 +    if (elan4_insertctxt (dev, &rail->r_ctxt, &ep4_trap_ops) != 0)
59720 +       return -ENOMEM;
59721 +    
59722 +    if ((rail->r_routetable = elan4_alloc_routetable (dev, 4)) == NULL)        /* 512 << 4 == 8192 entries */
59723 +    {
59724 +       elan4_removectxt (dev, &rail->r_ctxt);
59725 +       return -ENOMEM;
59726 +    }
59727 +    elan4_set_routetable (&rail->r_ctxt, rail->r_routetable);
59728 +
59729 +    /* Attach to the kernel comms nextwork context */
59730 +    if (elan4_attach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM) < 0)
59731 +    {
59732 +       elan4_free_routetable (dev, rail->r_routetable);
59733 +       elan4_removectxt (dev, &rail->r_ctxt);
59734 +
59735 +       return -EBUSY;
59736 +    }
59737 +
59738 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
59739 +       elan4_attach_filter (&rail->r_ctxt, ctx);
59740 +
59741 +    return 0;
59742 +}
59743 +
59744 +static void
59745 +ep4_detach_rail (EP4_RAIL *rail)
59746 +{
59747 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
59748 +    unsigned   ctx;
59749 +
59750 +    elan4_detach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM);
59751 +
59752 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
59753 +       elan4_detach_filter (&rail->r_ctxt, ctx);
59754 +
59755 +    if (rail->r_routetable)
59756 +    {
59757 +       elan4_set_routetable (&rail->r_ctxt, NULL);
59758 +       elan4_free_routetable (dev, rail->r_routetable);
59759 +    }
59760 +
59761 +    elan4_removectxt (dev, &rail->r_ctxt);
59762 +}
59763 +
59764 +int
59765 +ep4_start_rail (EP_RAIL *r)
59766 +{
59767 +    EP4_RAIL     *rail = (EP4_RAIL *) r;
59768 +    ELAN4_DEV    *dev  = rail->r_ctxt.ctxt_dev;
59769 +    E4_InputQueue qdesc;
59770 +    int           i, res;
59771 +
59772 +    if ((res = ep4_attach_rail (rail)) < 0)
59773 +       return res;
59774 +
59775 +    /* Initialise main interrupt cookie table */
59776 +    spin_lock_init (&rail->r_intcookie_lock);
59777 +    for (i = 0; i < EP4_INTCOOKIE_HASH_SIZE; i++)
59778 +       INIT_LIST_HEAD (&rail->r_intcookie_hash[i]);
59779 +
59780 +    kmutex_init (&rail->r_haltop_mutex);
59781 +    kcondvar_init (&rail->r_haltop_sleep);
59782 +    spin_lock_init (&rail->r_haltop_lock);
59783 +
59784 +    spin_lock_init (&rail->r_cookie_lock);
59785 +
59786 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_EVENT]);
59787 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_ATOMIC]);
59788 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_SINGLE]);
59789 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_MAIN]);
59790 +    spin_lock_init (&rail->r_ecq_lock);
59791 +
59792 +    ep_kthread_init (&rail->r_retry_thread);
59793 +    INIT_LIST_HEAD (&rail->r_retry_ops);
59794 +
59795 +    INIT_LIST_HEAD (&rail->r_neterr_ops);
59796 +
59797 +    kmutex_init (&rail->r_flush_mutex);
59798 +    kcondvar_init (&rail->r_flush_sleep);
59799 +
59800 +    /* Allocate the elan visible sdram/main memory */
59801 +    if ((rail->r_elan = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RAIL_ELAN), 0, &rail->r_elan_addr)) == 0 ||
59802 +       (rail->r_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_RAIL_MAIN), 0, &rail->r_main_addr)) == 0)
59803 +    {
59804 +       goto failed;
59805 +    }
59806 +
59807 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
59808 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[i].ev_CountAndType), 0);
59809 +
59810 +    elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
59811 +
59812 +    /* Allocate the system input queues at their fixed elan address */
59813 +    /*   avoid sdram address aliasing by allocating the min sdram pagesize */
59814 +    if (! (rail->r_queuedescs= ep_alloc_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE, SDRAM_PAGE_SIZE, EP_PERM_ALL, 0)))
59815 +       goto failed;
59816 +
59817 +    /* Initialise the input queue descriptor as "full" with no event */
59818 +    qdesc.q_bptr    = 0;
59819 +    qdesc.q_fptr    = 8;
59820 +    qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8);
59821 +    qdesc.q_event   = 0;
59822 +
59823 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
59824 +       elan4_sdram_copyq_to_sdram (dev, &qdesc, EP_SYSTEMQ_DESC (rail->r_queuedescs, i), sizeof (E4_InputQueue));
59825 +
59826 +    /* Allocate the resource map for command queue mappings */
59827 +    if ((rail->r_ecq_rmap = ep_rmallocmap (EP4_ECQ_RMAPSIZE, "r_ecq_rmap", 1)) == NULL)
59828 +       goto failed;
59829 +    
59830 +    ep_rmfree (rail->r_ecq_rmap, EP4_ECQ_TOP - EP4_ECQ_BASE, EP4_ECQ_BASE);
59831 +
59832 +    /* register an interrupt cookie & allocate command queues for command queue flushing */
59833 +    rail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4);
59834 +    rail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1);
59835 +
59836 +    if (rail->r_flush_mcq == NULL || rail->r_flush_ecq == NULL)
59837 +       goto failed;
59838 +
59839 +    ep4_register_intcookie (rail, &rail->r_flush_intcookie, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event), ep4_flush_interrupt, rail);
59840 +
59841 +    /* startup the retry thread */
59842 +    if (kernel_thread_create (ep4_retry_thread, (void *) rail) == 0)
59843 +       goto failed;
59844 +    ep_kthread_started (&rail->r_retry_thread);
59845 +    
59846 +    ep4_initialise_dma_retries (rail);
59847 +
59848 +    if ((rail->r_event_ecq = ep4_alloc_ecq (rail, CQ_Size1K)) == NULL)
59849 +       goto failed;
59850 +    
59851 +    rail->r_threadcode = threadcode_elan4;
59852 +    if (ep_loadcode (&rail->r_generic, &rail->r_threadcode))
59853 +       goto failed;
59854 +
59855 +    elan4_flush_icache (&rail->r_ctxt);
59856 +
59857 +    if (ep4_probe_init (rail))
59858 +       goto failed;
59859 +
59860 +    /* can now drop the context filter for the system context */
59861 +    elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_HIGH_PRI);
59862 +
59863 +    return 0;
59864 +
59865 + failed:
59866 +    printk ("ep4_start_rail: failed for rail '%s'\n", rail->r_generic.Name);
59867 +    ep4_stop_rail (&rail->r_generic);
59868 +
59869 +    return -ENOMEM;
59870 +}
59871 +
59872 +void
59873 +ep4_stall_rail (EP_RAIL *r)
59874 +{
59875 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59876 +    unsigned  ctx;
59877 +
59878 +    /* Raise all the context filters */
59879 +    elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_DISCARD_ALL);
59880 +
59881 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
59882 +       elan4_set_filter (&rail->r_ctxt, ctx, E4_FILTER_DISCARD_ALL);
59883 +}
59884 +
59885 +void
59886 +ep4_stop_rail (EP_RAIL *r)
59887 +{
59888 +    EP4_RAIL *rail = (EP4_RAIL *) r;
59889 +
59890 +    if (rail->r_generic.State == EP_RAIL_STATE_RUNNING) /* undo ep4_position_found() */
59891 +    {
59892 +       ELAN_POSITION *pos  = &rail->r_generic.Position;
59893 +       EP_ADDR        addr = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies));
59894 +
59895 +       ep_free_elan (&rail->r_generic, addr, pos->pos_nodes * sizeof (E4_uint64));
59896 +
59897 +       KMEM_FREE (rail->r_cookies, pos->pos_nodes * sizeof (E4_uint64));
59898 +    }
59899 +
59900 +    ep4_probe_destroy (rail);
59901 +
59902 +    ep_unloadcode (&rail->r_generic, &rail->r_threadcode);
59903 +
59904 +    if (rail->r_event_ecq)
59905 +       ep4_free_ecq (rail, rail->r_event_ecq);
59906 +    rail->r_event_ecq = NULL;
59907 +
59908 +    ep4_finalise_dma_retries (rail);
59909 +
59910 +    ep_kthread_stop (&rail->r_retry_thread);
59911 +    ep_kthread_destroy (&rail->r_retry_thread);
59912 +
59913 +    if (rail->r_flush_intcookie.int_arg)
59914 +       ep4_deregister_intcookie (rail, &rail->r_flush_intcookie);
59915 +    rail->r_flush_intcookie.int_arg = NULL;
59916 +
59917 +    if (rail->r_flush_mcq)
59918 +       ep4_put_ecq (rail, rail->r_flush_mcq, 4);
59919 +    rail->r_flush_mcq = NULL;
59920 +
59921 +    if (rail->r_flush_ecq)
59922 +       ep4_put_ecq (rail, rail->r_flush_ecq, 1);
59923 +    rail->r_flush_ecq = NULL;
59924 +
59925 +    if (rail->r_ecq_rmap)
59926 +       ep_rmfreemap (rail->r_ecq_rmap);
59927 +    
59928 +    if (rail->r_queuedescs)
59929 +       ep_free_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE);
59930 +    rail->r_queuedescs = 0;
59931 +
59932 +    if (rail->r_elan)
59933 +       ep_free_elan (&rail->r_generic, rail->r_elan_addr, sizeof (EP4_RAIL_ELAN));
59934 +    rail->r_elan = 0;
59935 +
59936 +    if (rail->r_main)
59937 +       ep_free_main (&rail->r_generic, rail->r_main_addr, sizeof (EP4_RAIL_MAIN));
59938 +    rail->r_main = NULL;
59939 +
59940 +    kcondvar_destroy (&rail->r_flush_sleep);
59941 +    kmutex_destroy (&rail->r_flush_mutex);
59942 +
59943 +    spin_lock_destroy (&rail->r_ecq_lock);
59944 +    spin_lock_destroy (&rail->r_cookie_lock);
59945 +
59946 +    spin_lock_destroy (&rail->r_haltop_lock);
59947 +    kcondvar_destroy(&rail->r_haltop_sleep);
59948 +    kmutex_destroy (&rail->r_haltop_mutex);
59949 +    spin_lock_destroy (&rail->r_intcookie_lock);
59950 +
59951 +    ep4_detach_rail (rail);
59952 +}
59953 +
59954 +void
59955 +ep4_position_found (EP_RAIL *r, ELAN_POSITION *pos)
59956 +{
59957 +    EP4_RAIL   *rail = (EP4_RAIL *) r;
59958 +    sdramaddr_t cookies;
59959 +    EP_ADDR     addr;
59960 +    int         i;
59961 +
59962 +    KMEM_ZALLOC (rail->r_cookies, E4_uint64 *, pos->pos_nodes * sizeof (E4_uint64), 1);
59963 +
59964 +    if (! (cookies = ep_alloc_elan (&rail->r_generic, pos->pos_nodes * sizeof (E4_uint64), 0, &addr)))
59965 +       panic ("ep4_position_found: cannot allocate elan cookies array\n");
59966 +
59967 +    for (i = 0; i < pos->pos_nodes; i++)
59968 +       elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, cookies + (i * sizeof (E4_uint64)), 0);
59969 +    
59970 +    for (i = 0; i < pos->pos_nodes; i++)
59971 +       rail->r_cookies[i] = 0;
59972 +
59973 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_nodeid), pos->pos_nodeid);
59974 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies), addr);
59975 +
59976 +    ep4_probe_position_found (rail, pos);
59977 +}
59978 +
59979 +sdramaddr_t
59980 +ep4_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size)
59981 +{
59982 +    ELAN4_DEV *dev = ((EP4_RAIL *) r)->r_ctxt.ctxt_dev;
59983 +
59984 +    if (size >= SDRAM_PAGE_SIZE)
59985 +       return elan4_sdram_alloc (dev, size);
59986 +    else
59987 +    {
59988 +       sdramaddr_t block = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
59989 +       sdramaddr_t sdram = block + (addr & (SDRAM_PAGE_SIZE-1));
59990 +
59991 +       /* free of the portion before sdram */
59992 +       if (sdram > block)
59993 +           elan4_sdram_free (dev, block, sdram - block);
59994 +
59995 +       /* free of the portion after sdram */
59996 +       if ((block + SDRAM_PAGE_SIZE) > (sdram + size))
59997 +           elan4_sdram_free (dev, sdram + size, block + SDRAM_PAGE_SIZE - (sdram + size));
59998 +
59999 +       return sdram;
60000 +    }
60001 +}
60002 +
60003 +void
60004 +ep4_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size)
60005 +{
60006 +    elan4_sdram_free (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, size);
60007 +}
60008 +
60009 +void
60010 +ep4_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val)
60011 +{
60012 +    elan4_sdram_writeb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, val);
60013 +}
60014 +
60015 +void
60016 +ep4_flush_tlb (EP_RAIL *r)
60017 +{
60018 +    elan4mmu_flush_tlb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev);
60019 +}
60020 +
60021 +void
60022 +ep4_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode)
60023 +{
60024 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
60025 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
60026 +    E4_VirtualProcessEntry route;
60027 +
60028 +    if (elan4_generate_route (&rail->r_generic.Position, &route, ELAN4_KCOMM_CONTEXT_NUM, 
60029 +                             lowNode, highNode, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI | FIRST_TIMEOUT(3)) < 0)
60030 +    {
60031 +       panic ("ep4_load_system_route: generate route failed\n");
60032 +       /* NOTREACHED */
60033 +    }
60034 +
60035 +    elan4_write_route (dev, rail->r_routetable, vp, &route);
60036 +}
60037 +
60038 +void
60039 +ep4_load_node_route (EP_RAIL *r, unsigned nodeId)
60040 +{
60041 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
60042 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
60043 +    E4_VirtualProcessEntry route;
60044 +
60045 +    if (elan4_generate_route (&rail->r_generic.Position, &route, EP4_CONTEXT_NUM(rail->r_generic.Position.pos_nodeid),
60046 +                             nodeId, nodeId, FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3)) < 0)
60047 +    {
60048 +       panic ("ep4_load_node_route: generate route failed\n");
60049 +       /* NOTREACHED */
60050 +    }
60051 +
60052 +    elan4_write_route (dev, rail->r_routetable, EP_VP_DATA(nodeId), &route);
60053 +}
60054 +
60055 +void
60056 +ep4_unload_node_route (EP_RAIL *r, unsigned nodeId)
60057 +{
60058 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
60059 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
60060 +    
60061 +    elan4_invalidate_route (dev, rail->r_routetable, EP_VP_DATA(nodeId));
60062 +}
60063 +
60064 +void
60065 +ep4_lower_filter (EP_RAIL *r, unsigned nodeId)
60066 +{
60067 +    EP4_RAIL *rail = (EP4_RAIL *) r;
60068 +
60069 +    elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_HIGH_PRI);
60070 +}
60071 +
60072 +void
60073 +ep4_raise_filter (EP_RAIL *r, unsigned nodeId)
60074 +{
60075 +    EP4_RAIL *rail = (EP4_RAIL *) r;
60076 +
60077 +    elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_DISCARD_ALL);
60078 +}
60079 +
60080 +void
60081 +ep4_node_disconnected (EP_RAIL *r, unsigned nodeId)
60082 +{
60083 +    ep4_free_stalled_dmas ((EP4_RAIL *) r, nodeId);
60084 +}
60085 +
60086 +void
60087 +ep4_fillout_stats(EP_RAIL *r, char *str) 
60088 +{
60089 +    /* no stats here yet */
60090 +    /* EP4_RAIL *ep4rail = (EP4_RAIL *)r; */
60091 +}
60092 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan4.h
60093 ===================================================================
60094 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kcomm_elan4.h   2004-02-23 16:02:56.000000000 -0500
60095 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_elan4.h        2005-07-28 14:52:52.889672504 -0400
60096 @@ -0,0 +1,443 @@
60097 +/*
60098 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60099 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
60100 + *
60101 + *    For licensing information please see the supplied COPYING file
60102 + *
60103 + */
60104 +
60105 +#ifndef __EP_KCOMM_ELAN4_H
60106 +#define __EP_KCOMM_ELAN4_H
60107 +
60108 +#ident "@(#)$Id: kcomm_elan4.h,v 1.16.2.2 2004/12/14 10:19:14 mike Exp $ $Name: QSNETMODULES-4-31_20050321 $"
60109 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan4.h,v $*/
60110 +
60111 +#include <elan4/types.h>
60112 +
60113 +#include <elan4/dma.h>
60114 +#include <elan4/events.h>
60115 +#include <elan4/commands.h>
60116 +
60117 +#if !defined(__elan4__)
60118 +#include <elan4/device.h>
60119 +#endif /* ! defined(__elan4__) */
60120 +
60121 +/* private address allocation */
60122 +#define EP4_TEXT_BASE                  0xF8000000              /* base address for thread code (defined in makerules.elan4) */
60123 +#define EP4_ECQ_BASE                   0xFF000000              /* address space for mapping command queues */
60124 +#define EP4_ECQ_TOP                    0xFF800000
60125 +
60126 +#define EP4_ECQ_RMAPSIZE               128
60127 +#define EP4_STACK_SIZE                 1024                    /* default thread code stack size */
60128 +#define EP4_MAX_LEVELS                 8                       /* same as ELAN_MAX_LEVELS */
60129 +
60130 +/* context number allocation */
60131 +#define EP4_CONTEXT_NUM(nodeId)                (ELAN4_KCOMM_BASE_CONTEXT_NUM + (nodeId))
60132 +#define EP4_CONTEXT_ISDATA(ctx)                ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM && \
60133 +                                        (ctx) <= ELAN4_KCOMM_TOP_CONTEXT_NUM)
60134 +#define EP4_CONTEXT_TO_NODE(ctx)       ((ctx) - ELAN4_KCOMM_BASE_CONTEXT_NUM)
60135 +
60136 +/*
60137 + * network error cookie format:
60138 + *  -------------------------------------------------
60139 + *  | unique cookie value | Remote | DMA | Location |
60140 + *  -------------------------------------------------
60141 + * [63:4] Cookie   - unique cookie number
60142 + * [3]    Thread   - cookie generated by thread code
60143 + * [2]   Remote   - cookie generated by remote end
60144 + * [1]    STEN     - cookie is for a STEN packet
60145 + * [0]    DMA      - cookie is for a DMA
60146 + */
60147 +#define EP4_COOKIE_DMA         (1    << 0)
60148 +#define EP4_COOKIE_STEN                (1    << 1)
60149 +#define EP4_COOKIE_REMOTE      (1    << 2)
60150 +#define EP4_COOKIE_THREAD      (1    << 3)
60151 +#define EP4_COOKIE_INC         (1ull << 4)
60152 +
60153 +#define EP4_COOKIE_STRING(val) ((val) & ~(EP4_COOKIE_INC-1)) >> 4, \
60154 +                               ((val) & EP4_COOKIE_DMA)    ? ",dma" : "", \
60155 +                               ((val) & EP4_COOKIE_REMOTE) ? ",remote" : "", \
60156 +                               ((val) & EP4_COOKIE_THREAD) ? ",thread" : "", \
60157 +                               ((val) & EP4_COOKIE_STEN)   ? ",sten" : ""
60158 +/*
60159 + * Done "word" values 
60160 + */
60161 +#define EP4_STATE_FREE         0
60162 +#define EP4_STATE_ACTIVE       1
60163 +#define EP4_STATE_FINISHED     2
60164 +#define EP4_STATE_FAILED       3
60165 +#define EP4_STATE_PRIVATE      4
60166 +
60167 +#define EP4_EVENT_FIRING_TLIMIT        16384                   /* 1023 uS */
60168 +
60169 +/* forward declarations */
60170 +typedef struct ep4_rail        EP4_RAIL;
60171 +
60172 +#if !defined(__elan4__)
60173 +
60174 +typedef struct ep4_intcookie
60175 +{
60176 +    struct list_head            int_link;
60177 +    E4_uint64                   int_val;
60178 +    void                      (*int_callback)(EP4_RAIL *rail, void *arg);
60179 +    void                       *int_arg;
60180 +} EP4_INTCOOKIE;
60181 +
60182 +#define EP4_INTCOOKIE_HASH_SIZE        256
60183 +#define EP4_INTCOOKIE_HASH(a)          ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP4_INTCOOKIE_HASH_SIZE-1))
60184 +
60185 +typedef struct ep4_ecq
60186 +{
60187 +    struct list_head   ecq_link;                               /* linked on r_ecq_list */
60188 +    ELAN4_INTOP                ecq_intop;                              /* main interrupt op space */
60189 +    ELAN4_CQ          *ecq_cq;                                 /* command queue */
60190 +    E4_Addr            ecq_addr;                               /* address mapped into elan */
60191 +    unsigned int       ecq_avail;                              /* # dwords still available */
60192 +
60193 +    spinlock_t         ecq_lock;                               /* spinlock for main accesses */
60194 +    sdramaddr_t                ecq_event;                              /* event for flushing "event" queues */
60195 +    EP_ADDR            ecq_event_addr;
60196 +    struct ep4_ecq     *ecq_flushcq;                           /*  and command port to issue setevent to */
60197 +} EP4_ECQ;
60198 +
60199 +#define EP4_ECQ_EVENT          0                               /* command queues targetted by multi-blocks events */
60200 +#define EP4_ECQ_ATOMIC         1                               /* command queues targetted by atomic store operations */
60201 +#define EP4_ECQ_SINGLE         2                               /* command queues targetted by single word commands from main */
60202 +#define EP4_ECQ_MAIN           3                               /* command queues targetted by multi word commands from main */
60203 +#define EP4_NUM_ECQ            4
60204 +
60205 +#define EP4_ECQ_Size(which)            ((which) == EP4_ECQ_EVENT  ? CQ_Size64K : \
60206 +                                        (which) == EP4_ECQ_ATOMIC ? CQ_Size8K  : \
60207 +                                        (which) == EP4_ECQ_SINGLE ? CQ_Size1K  : \
60208 +                                        (which) == EP4_ECQ_MAIN   ? CQ_Size8K  : \
60209 +                                        CQ_Size1K)
60210 +
60211 +typedef struct ep4_dma_retry
60212 +{
60213 +    struct list_head    retry_link;                            /* chained on free/retry list */
60214 +    unsigned long      retry_time;                             /* "lbolt" to retry at */
60215 +    E4_DMA             retry_dma;                              /* DMA (in main memory) */
60216 +} EP4_DMA_RETRY;
60217 +
60218 +#define EP4_DMA_RETRY_CQSIZE           CQ_Size8K                               /* size of command queue for dma retry */
60219 +#define EP4_DMA_RETRY_FLOWCNT          (CQ_Size(EP4_DMA_RETRY_CQSIZE)/72)      /* # of reissued DMA's which can fit in */
60220 +
60221 +typedef struct ep4_inputq
60222 +{
60223 +    EP4_INTCOOKIE      q_intcookie;
60224 +    unsigned int       q_slotSize;
60225 +    unsigned int       q_slotCount;
60226 +
60227 +    void              *q_slots;
60228 +    EP_ADDR            q_slotsAddr;
60229 +    
60230 +    EP_INPUTQ_CALLBACK *q_callback;
60231 +    void              *q_arg;
60232 +
60233 +    sdramaddr_t                q_desc;
60234 +    EP_ADDR            q_descAddr;
60235 +    EP_ADDR            q_eventAddr;
60236 +    EP4_ECQ           *q_wcq;                                  /* command queue to issue waitevent to */
60237 +    EP4_ECQ           *q_ecq;                                  /* command queue targetted by event to generate interrupt */
60238 +
60239 +    EP_ADDR            q_fptr;                                 /* cached current front pointer */
60240 +    EP_ADDR            q_last;                                 /* elan addr for last queue slot  */
60241 +
60242 +    atomic_t           q_fired;                                /* atomic flag that interrupt received */
60243 +    unsigned int       q_count;                                /* count of slots consumed */
60244 +} EP4_INPUTQ;
60245 +
60246 +typedef struct ep4_outputq
60247 +{
60248 +    spinlock_t         q_lock;
60249 +    unsigned int       q_slotCount;
60250 +    unsigned int       q_slotSize;
60251 +    unsigned int        q_dwords;
60252 +    ELAN4_CQ          *q_cq;
60253 +    void              *q_main;
60254 +    EP_ADDR            q_mainAddr;
60255 +    unsigned int       q_retries;
60256 +} EP4_OUTPUTQ;
60257 +
60258 +#endif /* ! defined(__elan4__) */
60259 +
60260 +typedef struct ep4_check_sten
60261 +{
60262 +    E4_uint64          c_reset_event_cmd;                      /* WRITEDWORD to reset start event */
60263 +    E4_uint64          c_reset_event_value;
60264 +
60265 +    E4_uint64          c_open;                                 /* OPEN VP_PROBE(lvl) */
60266 +    E4_uint64          c_trans_traceroute0;                    /* SENDTRANS TR_TRACEROUTE 0s */
60267 +    E4_uint64          c_addr_traceroute0;
60268 +    E4_uint64          c_data_traceroute0[8];
60269 +    E4_uint64          c_trans_traceroute1;                    /* SENDTRANS TR_TRACEROUTE 1s */
60270 +    E4_uint64          c_addr_traceroute1;
60271 +    E4_uint64          c_data_traceroute1[8];
60272 +    E4_uint64          c_trans_sendack;                        /* SENDTRANS SENDACK */
60273 +    E4_uint64          c_addr_sendack;
60274 +    
60275 +    E4_uint64          c_guard_ok;                             /* GUARD OK - write level */
60276 +    E4_uint64          c_writedword_ok;
60277 +    E4_uint64          c_value_ok;
60278 +    
60279 +    E4_uint64          c_guard_fail;                           /* GUARD FAIL - chain setevent/write fail */
60280 +    E4_uint64          c_setevent_fail;
60281 +    E4_uint64          c_setevent_nop;
60282 +    E4_uint64          c_nop_pad;
60283 +} EP4_CHECK_STEN;
60284 +
60285 +#define EP4_CHECK_STEN_NDWORDS (sizeof (EP4_CHECK_STEN) >> 3)
60286 +
60287 +typedef struct ep4_rail_elan
60288 +{
60289 +    EP4_CHECK_STEN     r_check_sten[EP4_MAX_LEVELS];
60290 +    E4_Event32         r_check_fail;                                   /* Check failed (== r_check_start[-1]) */
60291 +    E4_Event32          r_check_start[EP4_MAX_LEVELS];
60292 +
60293 +    E4_Event32         r_qevents[EP_NUM_SYSTEMQ];
60294 +    E4_Event32         r_flush_event;
60295 +
60296 +    E4_uint64          r_nodeid;
60297 +#ifdef __elan4__
60298 +    E4_uint64         *r_cookies;
60299 +#else
60300 +    E4_Addr            r_cookies;
60301 +#endif
60302 +} EP4_RAIL_ELAN;
60303 +
60304 +#define TRACEROUTE_ENTRIES     16                      /* 2 * ELAN_MAX_LEVELS */
60305 +#define TRACEROUTE_NDWORDS     (TRACEROUTE_ENTRIES/2)
60306 +
60307 +typedef struct ep4_rail_main
60308 +{
60309 +    E4_uint32          r_probe_dest0[TRACEROUTE_ENTRIES];
60310 +    E4_uint32          r_probe_dest1[TRACEROUTE_ENTRIES];
60311 +    E4_uint64          r_probe_result;
60312 +    E4_uint64          r_probe_level;
60313 +
60314 +    E4_uint64           r_dma_flowcnt;                         /*  count of dma's queued */
60315 +} EP4_RAIL_MAIN;
60316 +
60317 +#define EP4_PROBE_ACTIVE       (0xffff)
60318 +#define EP4_PROBE_FAILED       (0xfffe)
60319 +
60320 +#if !defined(__elan4__)
60321 +
60322 +typedef struct ep4_retry_ops
60323 +{
60324 +    struct list_head   op_link;
60325 +    unsigned long     (*op_func)(EP4_RAIL *rail, void *arg, unsigned long nextRunTime);
60326 +    void              *op_arg;
60327 +} EP4_RETRY_OPS;
60328 +
60329 +typedef struct ep4_neterr_ops
60330 +{
60331 +    struct list_head   op_link;
60332 +    void             (*op_func) (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
60333 +    void              *op_arg;
60334 +} EP4_NETERR_OPS;
60335 +
60336 +struct ep4_rail
60337 +{
60338 +    EP_RAIL            r_generic;
60339 +    ELAN4_CTXT         r_ctxt;
60340 +    ELAN4_ROUTE_TABLE  *r_routetable;
60341 +    
60342 +    spinlock_t         r_intcookie_lock;
60343 +    struct list_head    r_intcookie_hash[EP4_INTCOOKIE_HASH_SIZE];
60344 +
60345 +    sdramaddr_t                r_elan;
60346 +    EP_ADDR            r_elan_addr;
60347 +    EP4_RAIL_MAIN      *r_main;
60348 +    EP_ADDR            r_main_addr;
60349 +    
60350 +    EP_CODE            r_threadcode;                           /* copy of thread code */
60351 +
60352 +    sdramaddr_t                r_queuedescs;                           /* systemq queue descriptors */
60353 +
60354 +    E4_uint64         *r_cookies;                              /* network error cookies */
60355 +    spinlock_t          r_cookie_lock;                         /*    and spin lock */
60356 +
60357 +    kcondvar_t         r_probe_wait;                           /* network position probing */
60358 +    spinlock_t         r_probe_lock;
60359 +    volatile int       r_probe_done;
60360 +    EP4_INTCOOKIE      r_probe_intcookie;
60361 +    EP4_ECQ           *r_probe_cq;
60362 +    E4_uint32          r_probe_source0[TRACEROUTE_ENTRIES];
60363 +    E4_uint32          r_probe_source1[TRACEROUTE_ENTRIES];
60364 +
60365 +    kmutex_t           r_haltop_mutex;                         /* halt/flush operations */
60366 +    ELAN4_HALTOP       r_haltop;
60367 +    ELAN4_DMA_FLUSHOP   r_flushop;
60368 +    kcondvar_t         r_haltop_sleep;
60369 +    spinlock_t         r_haltop_lock;
60370 +
60371 +    struct list_head    r_ecq_list[EP4_NUM_ECQ];               /* list of statically allocated command queues */
60372 +    EP_RMAP           *r_ecq_rmap;                             /* resource map for command queue mappings */
60373 +    spinlock_t          r_ecq_lock;                            /* spinlock for list/space management */
60374 +
60375 +    kmutex_t           r_flush_mutex;                          /* serialize command queue flushing */
60376 +    unsigned long      r_flush_count;                          /* # setevents issued for flushing */
60377 +    EP4_ECQ           *r_flush_mcq;                            /*   and command queue for waitevent */
60378 +    EP4_ECQ            *r_flush_ecq;                           /*   and command queue for interrupt */
60379 +    EP4_INTCOOKIE       r_flush_intcookie;                     /*   and interrupt cookie */
60380 +    kcondvar_t          r_flush_sleep;                         /*   and place to sleep ... */
60381 +
60382 +    EP_KTHREAD         r_retry_thread;                         /* retry thread */
60383 +    struct list_head    r_retry_ops;                           /*  list of retry operations */
60384 +
60385 +    EP4_RETRY_OPS       r_dma_ops;                             /* dma retry operations */
60386 +    EP4_ECQ           *r_dma_ecq;                              /*   command queue to reissue DMAs */
60387 +    E4_uint64           r_dma_flowcnt;                         /*   count of dma's reissued */
60388 +    struct list_head    r_dma_retrylist[EP_NUM_RETRIES];       /*   retry lists  */
60389 +    struct list_head    r_dma_freelist;                                /*   and free list */
60390 +    spinlock_t          r_dma_lock;                            /*   and spinlock to protect lists */
60391 +    unsigned long       r_dma_allocated;                       /*   # retries allocated*/
60392 +    unsigned long       r_dma_reserved;                                /*   # retries reserved */
60393 +
60394 +    EP4_ECQ           *r_event_ecq;                            /* command queue for occasional setevents */
60395 +
60396 +    struct list_head    r_neterr_ops;                          /* list of neterr fixup operations */
60397 +
60398 +    ELAN4_IPROC_TRAP    r_iproc_trap;
60399 +    ELAN4_TPROC_TRAP    r_tproc_trap;
60400 +} ;
60401 +
60402 +#define EP4_CTXT_TO_RAIL(ctxt) ((EP4_RAIL *) (((unsigned long) (ctxt)) - offsetof (EP4_RAIL, r_ctxt)))
60403 +
60404 +#if defined(DEBUG_ASSERT)
60405 +#define EP4_ASSERT(rail,EXPR)                  EP_ASSERT(&((rail)->r_generic), EXPR)
60406 +#define EP4_SDRAM_ASSERT(rail,off,value)       EP4_ASSERT(rail, (sdram_assert ? elan4_sdram_readq ((rail)->r_ctxt.ctxt_dev, (off)) == (value) : 1))
60407 +#else
60408 +#define EP4_ASSERT(rail,EXPR)
60409 +#define EP4_SDRAM_ASSERT(rail,off,value)
60410 +#endif
60411 +
60412 +/* kcomm_elan4.c */
60413 +extern EP_RAIL    *ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev);
60414 +extern void        ep4_destroy_rail (EP_RAIL *rail);
60415 +
60416 +extern int         ep4_start_rail (EP_RAIL *rail);
60417 +extern void        ep4_stall_rail (EP_RAIL *rail);
60418 +extern void        ep4_stop_rail (EP_RAIL *rail);
60419 +
60420 +extern void       ep4_debug_rail (EP_RAIL *rail);
60421 +
60422 +extern void        ep4_position_found (EP_RAIL *rail, ELAN_POSITION *pos);
60423 +
60424 +extern sdramaddr_t ep4_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size);
60425 +extern void        ep4_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size);
60426 +extern void        ep4_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
60427 +
60428 +extern void        ep4_flush_tlb (EP_RAIL *r);
60429 +extern void        ep4_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode);
60430 +extern void        ep4_load_node_route (EP_RAIL *r, unsigned int nodeId);
60431 +extern void        ep4_unload_node_route (EP_RAIL *r, unsigned int nodeId);
60432 +extern void        ep4_lower_filter (EP_RAIL *r, unsigned int nodeId);
60433 +extern void        ep4_raise_filter (EP_RAIL *rail, unsigned int nodeId);
60434 +extern void        ep4_node_disconnected (EP_RAIL *r, unsigned int nodeId);
60435 +
60436 +/* kmap_elan4.c */
60437 +extern void        ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr);
60438 +extern void        ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr);
60439 +extern void        ep4_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm);
60440 +extern void        ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len);
60441 +extern void       *ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages);
60442 +extern void        ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private);
60443 +extern void        ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm);
60444 +extern physaddr_t  ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index);
60445 +extern void        ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages);
60446 +
60447 +/* kmsg_elan4.c */
60448 +extern EP_INPUTQ  *ep4_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount,
60449 +                                    EP_INPUTQ_CALLBACK *callback, void *arg);
60450 +extern void        ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q);
60451 +extern void        ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q);
60452 +extern void        ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q);
60453 +extern int         ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
60454 +extern EP_OUTPUTQ *ep4_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount);
60455 +extern void        ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q);
60456 +extern void       *ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
60457 +extern int         ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
60458 +extern int         ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size,
60459 +                                    unsigned int nodeId, unsigned int qnum, unsigned int retries);
60460 +
60461 +/* probenetwork_elan4.c */
60462 +extern int         ep4_probe_init (EP4_RAIL *r);
60463 +extern void        ep4_probe_destroy (EP4_RAIL *r);
60464 +extern void        ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos);
60465 +extern int         ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw);
60466 +extern int         ep4_check_position (EP_RAIL *rail);
60467 +
60468 +/* support_elan4.c */
60469 +extern ELAN4_TRAP_OPS ep4_trap_ops;
60470 +extern void           ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg);
60471 +extern void           ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp);
60472 +extern EP4_INTCOOKIE *ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie);
60473 +extern E4_uint64      ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node);
60474 +
60475 +extern void           ep4_flush_filters (EP_RAIL *r);
60476 +extern void           ep4_flush_queues (EP_RAIL *r);
60477 +extern void          ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc);
60478 +
60479 +extern EP4_ECQ       *ep4_alloc_ecq (EP4_RAIL *rail, unsigned int cqsize);
60480 +extern void           ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq);
60481 +extern EP4_ECQ      *ep4_get_ecq (EP4_RAIL *rail, unsigned int which, unsigned int ndwords);
60482 +extern void           ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned int ndwords);
60483 +
60484 +extern void           ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag);
60485 +extern void           ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event);
60486 +extern void           ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1);
60487 +
60488 +extern void           ep4_flush_interrupt (EP4_RAIL *rail, void *arg);
60489 +extern void           ep4_flush_ecqs (EP4_RAIL *rail);
60490 +
60491 +extern void           ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, 
60492 +                                      EP_ADDR stackAddr, E4_Addr startpc, int nargs,...);
60493 +
60494 +extern void           ep4_initialise_dma_retries (EP4_RAIL *rail);
60495 +extern void           ep4_finalise_dma_retries (EP4_RAIL *rail);
60496 +extern int            ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, unsigned int attr);
60497 +extern void          ep4_release_dma_retries(EP4_RAIL *rail, unsigned int count);
60498 +extern void           ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval);
60499 +extern void           ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma);
60500 +extern void           ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId);
60501 +extern void           ep4_display_rail (EP4_RAIL *rail);
60502 +
60503 +extern void           ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
60504 +extern void           ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
60505 +extern void           ep4_retry_thread (EP4_RAIL *rail);
60506 +
60507 +/* neterr_elan4.c */
60508 +extern void           ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops);
60509 +extern void           ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops);
60510 +extern void           ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
60511 +
60512 +/* commands_elan4.c */
60513 +extern void           elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag);
60514 +extern void           elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data);
60515 +extern void           elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data);
60516 +extern void           elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype);
60517 +extern void           elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie);
60518 +extern void           elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs);
60519 +extern void           elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma);
60520 +extern void           elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event);
60521 +extern void           elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count);
60522 +extern void           elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1);
60523 +extern void           elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command);
60524 +extern void           elan4_guard (ELAN4_CQ *cq, E4_uint64 command);
60525 +extern void           elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr);
60526 +extern void           elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0);
60527 +extern void           elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1);
60528 +extern void           elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...);
60529 +extern void           elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr);
60530 +
60531 +extern void           ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
60532 +extern void          ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
60533 +extern void           ep4_retry_thread (EP4_RAIL *rail);
60534 +
60535 +extern void           ep4_fillout_stats(EP_RAIL *rail, char *str);
60536 +
60537 +#endif /* ! defined(__elan4__) */
60538 +
60539 +#endif /* __EP_KCOMM_ELAN4_H */
60540 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_vp.h
60541 ===================================================================
60542 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kcomm_vp.h      2004-02-23 16:02:56.000000000 -0500
60543 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kcomm_vp.h   2005-07-28 14:52:52.889672504 -0400
60544 @@ -0,0 +1,36 @@
60545 +/*
60546 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60547 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
60548 + *
60549 + *    For licensing information please see the supplied COPYING file
60550 + *
60551 + */
60552 +
60553 +#ifndef __EP_KCOMM_VP_H
60554 +#define __EP_KCOMM_VP_H
60555 +
60556 +#ident "@(#)$Id: kcomm_vp.h,v 1.2 2004/03/24 11:32:56 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
60557 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_vp.h,v $*/
60558 +
60559 +#define EP_MAX_NODES                   2048                    /* Max nodes we support */
60560 +
60561 +/* virtual process allocation */
60562 +#define EP_VP_NODE_BASE                        (0)
60563 +#define EP_VP_DATA_BASE                        (EP_VP_NODE_BASE + EP_MAX_NODES)
60564 +#define EP_VP_PROBE_BASE               (EP_VP_DATA_BASE + EP_MAX_NODES)
60565 +#define EP_VP_PROBE_COUNT              ELAN_MAX_LEVELS
60566 +
60567 +#define EP_VP_BCAST_BASE               (EP_VP_PROBE_BASE + EP_VP_PROBE_COUNT)
60568 +#define EP_VP_BCAST_COUNT              (CM_SGMTS_PER_LEVEL * (CM_MAX_LEVELS - 1) + 1)
60569 +
60570 +#define EP_VP_NODE(nodeId)             (EP_VP_NODE_BASE + (nodeId))
60571 +#define EP_VP_DATA(nodeId)             (EP_VP_DATA_BASE + (nodeId))
60572 +#define EP_VP_PROBE(lvl)               (EP_VP_PROBE_BASE + (lvl))
60573 +#define EP_VP_BCAST(lvl,sgmt)          (EP_VP_BCAST_BASE + ((lvl) - 1)*CM_SGMTS_PER_LEVEL + (sgmt))
60574 +
60575 +#define EP_VP_TO_NODE(vp)              ((vp) & (EP_MAX_NODES-1))
60576 +#define EP_VP_ISDATA(vp)               ((vp) >= EP_VP_DATA_BASE && (vp) < (EP_VP_DATA_BASE + EP_MAX_NODES))
60577 +
60578 +#endif /* __EP_KCOMM_VP_H */
60579 +
60580 +
60581 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kmap.c
60582 ===================================================================
60583 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kmap.c  2004-02-23 16:02:56.000000000 -0500
60584 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kmap.c       2005-07-28 14:52:52.890672352 -0400
60585 @@ -0,0 +1,561 @@
60586 +/*
60587 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60588 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
60589 + *
60590 + *    For licensing information please see the supplied COPYING file
60591 + *
60592 + */
60593 +
60594 +#ident "@(#)$Id: kmap.c,v 1.10.6.2 2004/12/14 10:19:14 mike Exp $"
60595 +/*      $Source: /cvs/master/quadrics/epmod/kmap.c,v $*/
60596 +
60597 +#include <qsnet/kernel.h>
60598 +#include <qsnet/kpte.h>
60599 +
60600 +#include <elan/kcomm.h>
60601 +
60602 +#include "debug.h"
60603 +
60604 +#if defined(DIGITAL_UNIX)
60605 +#  define kernel_map                   (first_task->map)
60606 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
60607 +#elif defined(LINUX)
60608 +#  define kernel_map                   get_kern_mm()
60609 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
60610 +#elif defined(SOLARIS)
60611 +#  define kernel_map                   &kas
60612 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
60613 +#endif
60614 +
60615 +void
60616 +ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned long len,  unsigned int perm, int ep_attr)
60617 +{
60618 +    rail->Operations.KaddrMap (rail, eaddr, kaddr, len, perm, ep_attr);
60619 +}
60620 +
60621 +void
60622 +ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr)
60623 +{
60624 +    rail->Operations.SdramMap (rail, eaddr, saddr, len, perm, ep_attr);
60625 +}
60626 +
60627 +void
60628 +ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len)
60629 +{
60630 +    rail->Operations.Unmap (rail, eaddr, len);
60631 +}
60632 +
60633 +void
60634 +ep_perrail_dvma_sync (EP_RAIL *rail)
60635 +{
60636 +    if (rail->TlbFlushRequired)
60637 +    {
60638 +       rail->TlbFlushRequired = 0;
60639 +
60640 +       rail->Operations.FlushTlb (rail);
60641 +    }
60642 +}
60643 +
60644 +
60645 +static int ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask);
60646 +
60647 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
60648 +static uint16_t ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum);
60649 +#endif
60650 +
60651 +EP_NMH_OPS ep_dvma_nmh_ops = 
60652 +{
60653 +    ep_dvma_map_rails,
60654 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
60655 +    ep_dvma_calc_check_sum
60656 +#endif
60657 +};
60658 +
60659 +extern void
60660 +ep_dvma_init (EP_SYS *sys)
60661 +{
60662 +    EP_DVMA_STATE *d = &sys->DvmaState;
60663 +
60664 +    kmutex_init (&d->dvma_lock);
60665 +    
60666 +    INIT_LIST_HEAD (&d->dvma_handles);
60667 +    INIT_LIST_HEAD (&d->dvma_rails);
60668 +
60669 +    d->dvma_rmap = ep_rmallocmap (EP_DVMA_RMAP_SIZE, "dvma_rmap", 1);
60670 +
60671 +    ep_rmfree (d->dvma_rmap, EP_DVMA_TOP - EP_DVMA_BASE, EP_DVMA_BASE);
60672 +}
60673 +
60674 +extern void
60675 +ep_dvma_fini (EP_SYS *sys)
60676 +{
60677 +    EP_DVMA_STATE *d = &sys->DvmaState;
60678 +
60679 +    ep_rmfreemap (d->dvma_rmap);
60680 +
60681 +    kmutex_destroy (&d->dvma_lock);
60682 +}
60683 +    
60684 +extern int
60685 +ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail)
60686 +{
60687 +    EP_DVMA_STATE    *d = &sys->DvmaState;
60688 +    EP_RAIL_ENTRY    *l;
60689 +    struct list_head *el;
60690 +
60691 +    KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1);
60692 +
60693 +    if (l == NULL)
60694 +       return (ENOMEM);
60695 +
60696 +    kmutex_lock (&d->dvma_lock);
60697 +
60698 +    l->Rail = rail;
60699 +
60700 +    list_add_tail (&l->Link, &d->dvma_rails);
60701 +
60702 +    list_for_each (el, &d->dvma_handles) {
60703 +       EP_DVMA_NMH *desc   = list_entry (el, EP_DVMA_NMH, dvma_link);
60704 +       int          npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT;
60705 +
60706 +       desc->dvma_rails[rail->Number] = rail;
60707 +       desc->dvma_railmask |= ( 1 << rail->Number);
60708 +
60709 +       desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages);
60710 +    }
60711 +
60712 +    kmutex_unlock (&d->dvma_lock);
60713 +    return (0);
60714 +}
60715 +
60716 +extern void
60717 +ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail)
60718 +{
60719 +    EP_DVMA_STATE    *d = &sys->DvmaState;
60720 +    struct list_head *el;
60721 +
60722 +    kmutex_lock (&d->dvma_lock);
60723 +    
60724 +    list_for_each (el, &d->dvma_handles) {
60725 +       EP_DVMA_NMH *desc   = list_entry (el, EP_DVMA_NMH, dvma_link);
60726 +       int          npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT;
60727 +
60728 +       desc->dvma_rails[rail->Number] = NULL;
60729 +       desc->dvma_railmask &= ~(1 << rail->Number);
60730 +
60731 +       rail->Operations.DvmaRelease (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages, desc->dvma_private[rail->Number]);
60732 +    }
60733 +
60734 +    list_for_each (el, &d->dvma_rails) {
60735 +       EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link);
60736 +
60737 +       if (tmp->Rail == rail)
60738 +       {
60739 +           list_del (el);
60740 +
60741 +           KMEM_FREE (tmp, sizeof (EP_RAIL_ENTRY));
60742 +           break;
60743 +       }
60744 +    }
60745 +    kmutex_unlock (&d->dvma_lock);
60746 +}
60747 +
60748 +EP_NMH *
60749 +ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm)
60750 +{
60751 +    EP_DVMA_STATE    *d = &sys->DvmaState;
60752 +    EP_DVMA_NMH      *desc;
60753 +    EP_ADDR          addr;
60754 +    struct list_head *el;
60755 +    int               i;
60756 +
60757 +    KMEM_ZALLOC (desc, EP_DVMA_NMH *, offsetof (EP_DVMA_NMH, dvma_attrs[npages]), 1);
60758 +    
60759 +    if (desc == NULL)
60760 +       return (NULL);
60761 +
60762 +    if ((addr = ep_rmalloc (d->dvma_rmap, npages << PAGESHIFT, 0)) == 0)
60763 +    {
60764 +
60765 +       KMEM_FREE (desc, sizeof (EP_DVMA_NMH));
60766 +       return (NULL);
60767 +    }
60768 +
60769 +    spin_lock_init (&desc->dvma_lock);
60770 +
60771 +    desc->dvma_perm = perm;
60772 +
60773 +    kmutex_lock (&d->dvma_lock);
60774 +    /* reserve the mapping resource */
60775 +    list_for_each (el, &d->dvma_rails) {
60776 +       EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
60777 +
60778 +       EPRINTF4 (DBG_KMAP, "%s: ep_dvma_reserve desc=%p npages=%d rail=%p\n", rail->Name, desc, npages, rail);
60779 +
60780 +       if ((desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, addr, npages)) == NULL)
60781 +       {
60782 +           printk ("%s: !!ep_dvma_reserve - rail->DvmaReserve failed\n", rail->Name);
60783 +           goto failed;
60784 +       }
60785 +
60786 +       desc->dvma_rails[rail->Number] = rail;
60787 +       desc->dvma_railmask |= (1 << rail->Number);
60788 +    }
60789 +
60790 +    /* insert into the network mapping handle table */
60791 +    desc->dvma_nmh.nmh_nmd.nmd_addr = addr;
60792 +    desc->dvma_nmh.nmh_nmd.nmd_len  = npages << PAGESHIFT;
60793 +    desc->dvma_nmh.nmh_nmd.nmd_attr = EP_NMD_ATTR (sys->Position.pos_nodeid, 0);
60794 +    desc->dvma_nmh.nmh_ops          = &ep_dvma_nmh_ops;
60795 +
60796 +    ep_nmh_insert (&sys->MappingTable, &desc->dvma_nmh);
60797 +
60798 +    list_add (&desc->dvma_link, &d->dvma_handles);
60799 +
60800 +    kmutex_unlock (&d->dvma_lock);
60801 +
60802 +    return (&desc->dvma_nmh);
60803 +
60804 + failed:
60805 +
60806 +    kmutex_unlock (&d->dvma_lock);
60807 +
60808 +    for (i = 0; i < EP_MAX_RAILS; i++)
60809 +       if (desc->dvma_rails[i] != NULL)
60810 +           desc->dvma_rails[i]->Operations.DvmaRelease (desc->dvma_rails[i], addr, npages, desc->dvma_private[i]);
60811 +
60812 +    ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr);
60813 +
60814 +    KMEM_FREE (desc, sizeof (EP_DVMA_NMH));
60815 +    return (NULL);
60816 +}
60817 +
60818 +void
60819 +ep_dvma_release (EP_SYS *sys, EP_NMH *nmh)
60820 +{
60821 +    EP_DVMA_STATE *d      = &sys->DvmaState;
60822 +    EP_DVMA_NMH   *desc   = (EP_DVMA_NMH *) nmh;
60823 +    EP_ADDR        addr   = nmh->nmh_nmd.nmd_addr;
60824 +    int            npages = nmh->nmh_nmd.nmd_len >> PAGESHIFT;
60825 +    EP_RAIL       *rail;
60826 +    int            i;
60827 +
60828 +    kmutex_lock (&d->dvma_lock);
60829 +
60830 +    list_del (&desc->dvma_link);
60831 +    
60832 +    ep_nmh_remove (&sys->MappingTable, nmh);
60833 +
60834 +    for (i = 0; i < EP_MAX_RAILS; i++)
60835 +       if ((rail = desc->dvma_rails[i]) != NULL)
60836 +           rail->Operations.DvmaRelease (rail, addr, npages, desc->dvma_private[i]);
60837 +
60838 +    ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr);
60839 +
60840 +    KMEM_FREE (desc, offsetof (EP_DVMA_NMH, dvma_attrs[npages]));
60841 +
60842 +    kmutex_unlock (&d->dvma_lock);
60843 +}
60844 +
60845 +void
60846 +ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset)
60847 +{
60848 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60849 +    unsigned     offset = (unsigned long) vaddr & PAGEOFFSET;
60850 +    unsigned     npages = btopr (len + offset);
60851 +    EP_ADDR      addr   = nmh->nmh_nmd.nmd_addr + (index << PAGESHIFT);
60852 +    int                 rmask  = *hints;
60853 +    EP_RAIL     *rail;
60854 +    register int i, rnum;
60855 +    unsigned long flags;
60856 +
60857 +    EPRINTF7 (DBG_KMAP, "ep_dvma_load: map=%p vaddr=%p len=%x nmh=%p(%x,%x) index=%d\n",
60858 +             map, vaddr, len, nmh, nmh->nmh_nmd.nmd_addr, nmh->nmh_nmd.nmd_len, index);
60859 +
60860 +    /* If no rail specified, then map into all rails */
60861 +    if (rmask == 0)
60862 +       rmask = desc->dvma_railmask;
60863 +
60864 +    ASSERT ((index + npages) <= (nmh->nmh_nmd.nmd_len >> PAGESHIFT));
60865 +
60866 +    /* If not map specified then use the kernel map */
60867 +    if (map == NULL)
60868 +       map = kernel_map;
60869 +
60870 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60871 +    /* Now map each of the specified pages (backwards) */
60872 +
60873 +    vaddr = (vaddr - offset) + (npages-1)*PAGESIZE;
60874 +    for (i = npages-1; i >= 0; i--, vaddr -= PAGESIZE)
60875 +    {
60876 +       physaddr_t paddr = vaddr_to_phys (map, vaddr);
60877 +       
60878 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60879 +       {
60880 +           if (! (rmask & (1 << rnum)) || (rail = desc->dvma_rails[rnum]) == NULL)
60881 +               rmask &= ~(1 << rnum);
60882 +           else
60883 +           {
60884 +               rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], index + i, paddr, desc->dvma_perm);
60885 +
60886 +               desc->dvma_attrs[index + i] |= (1 << rnum);
60887 +           }
60888 +       }
60889 +    }
60890 +
60891 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60892 +       if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL)
60893 +           rail->TlbFlushRequired = 1;
60894 +
60895 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
60896 +
60897 +    /* Construct the network mapping handle to be returned. */
60898 +    subset->nmd_addr = addr + offset;
60899 +    subset->nmd_len  = len;
60900 +    subset->nmd_attr = EP_NMD_ATTR(sys->Position.pos_nodeid, rmask);
60901 +}
60902 +
60903 +void
60904 +ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd)
60905 +{
60906 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60907 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
60908 +    unsigned     npages = btopr (nmd->nmd_len + offset);
60909 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
60910 +    EP_RAIL     *rail;
60911 +    int          rnum;
60912 +    int          rmask;
60913 +    register int i;
60914 +    unsigned long flags;
60915 +    
60916 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60917 +
60918 +    /* compute which rails we need to unload on */
60919 +    for (rmask = 0, i = 0; i < npages; i++)
60920 +    {
60921 +       rmask |= desc->dvma_attrs[index + i];
60922 +       
60923 +       desc->dvma_attrs[index + i] = 0;
60924 +    }
60925 +    
60926 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60927 +       if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL)
60928 +           rail->Operations.DvmaUnload (rail, desc->dvma_private[rnum], index, npages);
60929 +
60930 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
60931 +}
60932 +
60933 +int
60934 +ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask)
60935 +{
60936 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
60937 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
60938 +    unsigned     npages = btopr (nmd->nmd_len + offset);
60939 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
60940 +    int          r, rnum;
60941 +    register int i;
60942 +    unsigned long flags;
60943 +
60944 +    spin_lock_irqsave (&desc->dvma_lock, flags);
60945 +
60946 +    EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x mask=%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask);
60947 +
60948 +    if ((mask &= desc->dvma_railmask) == 0)
60949 +    {
60950 +       printk ("ep_dvma_map_rails: no intersecting rails %04x.%04x\n", mask, desc->dvma_railmask);
60951 +       spin_unlock_irqrestore (&desc->dvma_lock, flags);
60952 +       return (-1);
60953 +    }
60954 +
60955 +    for (i = npages-1; i >= 0; i--)
60956 +    {
60957 +       int pgidx = (index + i);
60958 +
60959 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60960 +           if (desc->dvma_attrs[pgidx] & (1 << rnum))
60961 +               break;
60962 +       
60963 +       if (rnum == EP_MAX_RAILS)
60964 +       {
60965 +           EPRINTF3 (DBG_KMAP, "ep_dvma_map_rails: nmh=%p idx=%x [%08x] not ptes valid\n", nmh, pgidx, 
60966 +                     nmh->nmh_nmd.nmd_addr + ((pgidx) << PAGESHIFT));
60967 +           mask = 0;
60968 +       }
60969 +       else
60970 +       {
60971 +           EP_RAIL   *rail  = desc->dvma_rails[rnum];
60972 +           physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx);
60973 +           
60974 +           EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx,
60975 +                     nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr);
60976 +           
60977 +           for (r = 0; r < EP_MAX_RAILS; r++)
60978 +           {
60979 +               if ((mask & (1 << r)) == 0)
60980 +                   continue;
60981 +               
60982 +               if ((desc->dvma_attrs[pgidx] & (1 << r)) == 0)
60983 +               {
60984 +                   EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr=%llx\n",
60985 +                             desc->dvma_rails[rnum]->Name, nmh, pgidx, nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), 
60986 +                             (long long) paddr);
60987 +                   
60988 +                   rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], pgidx, paddr, desc->dvma_perm);
60989 +                   
60990 +                   desc->dvma_attrs[pgidx] |= (1 << r);
60991 +               }
60992 +           }
60993 +       }
60994 +    }
60995 +
60996 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
60997 +       if ((mask & (1 << rnum)) != 0)
60998 +           desc->dvma_rails[rnum]->TlbFlushRequired = 1;
60999 +
61000 +    EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x|%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask);
61001 +
61002 +    /* Finally update the network memory descriptor */
61003 +    nmd->nmd_attr |= mask;
61004 +
61005 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
61006 +
61007 +    return (0);
61008 +}
61009 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
61010 +#include <linux/highmem.h>
61011 +
61012 +/* Generic rolling checksum algorithm */
61013 +uint16_t
61014 +rolling_check_sum (char *msg, int nob, uint16_t sum)
61015 +{
61016 +    while (nob-- > 0)
61017 +       sum = sum * 13 + *msg++;
61018 +
61019 +    return (sum);
61020 +}
61021 +
61022 +#if ! defined(NO_RMAP)
61023 +void  
61024 +unmap_phys_address(unsigned long phys_addr)
61025 +{
61026 +    unsigned long pfn = (phys_addr >> PAGE_SHIFT);
61027 +    
61028 +    if (pfn_valid(pfn)) 
61029 +       kunmap(pfn_to_page(pfn));
61030 +}
61031 +
61032 +void * 
61033 +map_phys_address(unsigned long phys_addr)
61034 +{
61035 +    unsigned long pfn = (phys_addr >> PAGE_SHIFT);
61036 +    
61037 +    if (pfn_valid(pfn)) 
61038 +       return  kmap(pfn_to_page(pfn));
61039 +    
61040 +    return NULL;
61041 +}
61042 +#else
61043 +void  
61044 +unmap_phys_address(unsigned long phys_addr)
61045 +{
61046 +    struct page *p = virt_to_page(__va(phys_addr));
61047 +    
61048 +    if (VALID_PAGE(p)) 
61049 +       kunmap(p);
61050 +}
61051 +
61052 +void * 
61053 +map_phys_address(unsigned long phys_addr)
61054 +{
61055 +    struct page *p = virt_to_page(__va(phys_addr));
61056 +                               
61057 +    if (VALID_PAGE(p)) 
61058 +       return  kmap(p);
61059 +    
61060 +    return NULL;
61061 +}
61062 +#endif
61063 +
61064 +uint16_t
61065 +ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum)
61066 +{
61067 +    /* cant be called from an interupt */
61068 +
61069 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
61070 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
61071 +    unsigned     npages = btopr (nmd->nmd_len + offset);
61072 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
61073 +    unsigned     start, len;
61074 +    int          rnum;
61075 +    register int i;
61076 +    unsigned long flags;
61077 +    EP_RAIL      *rail;
61078 +
61079 +
61080 +    spin_lock_irqsave (&desc->dvma_lock, flags);
61081 +
61082 +    EPRINTF3 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x \n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
61083
61084 +    /* find a rail */
61085 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
61086 +       if (desc->dvma_attrs[index] & (1 << rnum))
61087 +           break;
61088 +       
61089 +    ASSERT (rnum != EP_MAX_RAILS);
61090
61091 +    rail = desc->dvma_rails[rnum];
61092 +
61093 +    for (i = 0; i <= (npages-1); i++)
61094 +    {
61095 +       int        pgidx = (index + i);
61096 +       physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx);
61097 +       void *     virt;
61098 +
61099 +       spin_unlock_irqrestore (&desc->dvma_lock, flags); /* unlock for check sum calc */
61100 +
61101 +       virt = map_phys_address(paddr);
61102 +
61103 +       if (!virt)
61104 +           printk("ep_dvma_calc_check_sum: virt = NULL ! \n");
61105 +       else {
61106 +           if ( i == 0 ) {
61107 +               /* last bit of the first page */
61108 +               start =  (nmd->nmd_addr & (PAGESIZE - 1)) ;
61109 +               len   =  PAGESIZE - start;
61110 +               if ( len > nmd->nmd_len) /* less than the remaining page */ 
61111 +                   len =  nmd->nmd_len;
61112 +           } else {
61113 +               if ( i != (npages-1)) {
61114 +                   /* all of the middle pages    */
61115 +                   start = 0;
61116 +                   len   = PAGESIZE;
61117 +               } else {
61118 +                   /* first bit of the last page */
61119 +                   start = 0;
61120 +                   len   = ((nmd->nmd_addr + nmd->nmd_len -1) & (PAGESIZE -1)) +1;
61121 +               }
61122 +           }
61123 +
61124 +           check_sum = rolling_check_sum (((char *)virt)+start, len, check_sum);
61125 +           unmap_phys_address(paddr);
61126 +   
61127 +           /* re aquire the lock */
61128 +           spin_lock_irqsave (&desc->dvma_lock, flags);
61129 +       }
61130 +       
61131 +       EPRINTF5 (DBG_KMAP, "%s: ep_dvma_calc_check_sum: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx,
61132 +                 nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr);     
61133 +    }
61134 +
61135 +    EPRINTF4 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x = %d\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, check_sum);
61136 +
61137 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
61138 +
61139 +    return (check_sum);
61140 +}
61141 +#endif
61142 +/*
61143 + * Local variables:
61144 + * c-file-style: "stroustrup"
61145 + * End:
61146 + */
61147 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kmap_elan3.c
61148 ===================================================================
61149 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kmap_elan3.c    2004-02-23 16:02:56.000000000 -0500
61150 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kmap_elan3.c 2005-07-28 14:52:52.891672200 -0400
61151 @@ -0,0 +1,209 @@
61152 +/*
61153 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61154 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61155 + *
61156 + *    For licensing information please see the supplied COPYING file
61157 + *
61158 + */
61159 +
61160 +#ident "@(#)$Id: kmap_elan3.c,v 1.3.8.1 2004/12/14 10:19:14 mike Exp $"
61161 +/*      $Source: /cvs/master/quadrics/epmod/kmap_elan3.c,v $ */
61162 +
61163 +#include <qsnet/kernel.h>
61164 +
61165 +#include <elan3/elanregs.h>
61166 +#include <elan3/elandev.h>
61167 +#include <elan3/elanvp.h>
61168 +#include <elan3/elan3mmu.h>
61169 +#include <elan3/elanctxt.h>
61170 +#include <elan3/elandebug.h>
61171 +
61172 +#include <elan/kcomm.h>
61173 +
61174 +#include "kcomm_elan3.h"
61175 +
61176 +#if defined(DIGITAL_UNIX)
61177 +#  define kernel_map                   (first_task->map)
61178 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
61179 +#elif defined(LINUX)
61180 +#  define kernel_map                   get_kern_mm()
61181 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
61182 +#elif defined(SOLARIS)
61183 +#  define kernel_map                   &kas
61184 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
61185 +#endif
61186 +
61187 +#define ELAN3_PTES_PER_PAGE            (PAGESIZE/ELAN3_PAGE_SIZE)
61188 +
61189 +#if defined(__LITTLE_ENDIAN__)
61190 +#define PERM_ENDIAN    0
61191 +#else
61192 +#define PERM_ENDIAN    ELAN3_PTE_BIG_ENDIAN
61193 +#endif
61194 +
61195 +static unsigned int main_permtable[] = 
61196 +{
61197 +    ELAN3_PERM_REMOTEALL,              /* EP_PERM_EXECUTE */
61198 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
61199 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
61200 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_ALL */
61201 +};
61202 +
61203 +static unsigned int sdram_permtable[] = 
61204 +{
61205 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_EXECUTE */
61206 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
61207 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
61208 +    ELAN3_PERM_REMOTEALL,              /* EP_PERM_ALL */
61209 +};
61210 +
61211 +static unsigned int io_permtable[] = 
61212 +{
61213 +    ELAN3_PERM_LOCAL_READ,             /* EP_PERM_EXECUTE */
61214 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
61215 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
61216 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_ALL */
61217 +};
61218 +
61219 +void
61220 +ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr)
61221 +{
61222 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
61223 +    unsigned     npages = len >> PAGESHIFT;
61224 +    int          i;
61225 +    unsigned int off;
61226 +
61227 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61228 +
61229 +    for (i = 0; i < npages; i++)
61230 +    {
61231 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr);
61232 +
61233 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
61234 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, 
61235 +                             main_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0));
61236 +
61237 +       eaddr += PAGESIZE;
61238 +       kaddr += PAGESIZE;
61239 +    }
61240 +}
61241 +
61242 +void
61243 +ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr)
61244 +{
61245 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
61246 +    unsigned     npages = len >> PAGESHIFT;
61247 +    int          i;
61248 +    unsigned int off;
61249 +
61250 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61251 +
61252 +    for (i = 0; i < npages; i++)
61253 +    {
61254 +       physaddr_t paddr = elan3_sdram_to_phys (rail->Device, saddr);
61255 +
61256 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
61257 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr+off, paddr+off, 
61258 +                             sdram_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0) );
61259 +
61260 +       eaddr += PAGESIZE;
61261 +       saddr += PAGESIZE;
61262 +    }
61263 +}
61264 +
61265 +void
61266 +ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned len, unsigned int perm)
61267 +{
61268 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
61269 +    unsigned     npages = len >> PAGESHIFT;
61270 +    int          i;
61271 +    unsigned int off;
61272 +
61273 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (ioaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61274 +
61275 +    for (i = 0; i < npages; i++)
61276 +    {
61277 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) ioaddr);
61278 +
61279 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
61280 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, 
61281 +                             io_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC);
61282 +
61283 +       eaddr += PAGESIZE;
61284 +       ioaddr += PAGESIZE;
61285 +    }
61286 +}
61287 +void
61288 +ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned len)
61289 +{
61290 +    EP3_RAIL *rail = (EP3_RAIL *) r;
61291 +
61292 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61293 +
61294 +    elan3mmu_unload (rail->Elan3mmu, eaddr, len, PTE_UNLOAD_UNLOCK | PTE_UNLOAD_NOSYNC);
61295 +}
61296 +
61297 +void *
61298 +ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned npages)
61299 +{
61300 +    EP3_RAIL *rail = (EP3_RAIL *) r;
61301 +    void     *private;
61302 +
61303 +    KMEM_ALLOC (private, void *, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t), 1);
61304 +    
61305 +    if (private == NULL)
61306 +       return NULL;
61307 +    
61308 +    elan3mmu_reserve (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private);
61309 +
61310 +    return private;
61311 +}
61312 +
61313 +void
61314 +ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned npages, void *private)
61315 +{
61316 +    EP3_RAIL *rail = (EP3_RAIL *) r;
61317 +
61318 +    elan3mmu_release (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private);
61319 +
61320 +    KMEM_FREE (private, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t));
61321 +}
61322 +
61323 +void
61324 +ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned index, physaddr_t paddr, unsigned int perm)
61325 +{
61326 +    ELAN3_DEV    *dev  = ((EP3_RAIL *) r)->Device;
61327 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
61328 +    int          off;
61329 +
61330 +    for (off =0 ; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
61331 +    {  
61332 +       ELAN3_PTE newpte = elan3mmu_phys_to_pte (dev, paddr + off, main_permtable[perm]) | ELAN3_PTE_REF | ELAN3_PTE_MOD;
61333 +
61334 +       elan3_writepte (dev, *ptep, newpte);
61335 +
61336 +       ptep++;
61337 +    }
61338 +}
61339 +
61340 +physaddr_t
61341 +ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned index)
61342 +{
61343 +    EP3_RAIL    *rail = (EP3_RAIL *) r;
61344 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
61345 +    ELAN3_PTE     pte  = elan3_readpte (rail->Device, *ptep);
61346 +
61347 +    return pte & ELAN3_PTE_PFN_MASK;
61348 +}
61349 +
61350 +void
61351 +ep3_dvma_unload (EP_RAIL *r, void *private, unsigned index, unsigned npages)
61352 +{
61353 +    EP3_RAIL    *rail = (EP3_RAIL *) r;
61354 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
61355 +    ELAN3_PTE     tpte = elan3mmu_kernel_invalid_pte (rail->Elan3mmu);
61356 +    int i;
61357 +
61358 +    for (i = (npages * ELAN3_PTES_PER_PAGE) - 1; i >= 0; i--)
61359 +       elan3_writepte (rail->Device, ptep[i], tpte);
61360 +}
61361 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kmap_elan4.c
61362 ===================================================================
61363 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kmap_elan4.c    2004-02-23 16:02:56.000000000 -0500
61364 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kmap_elan4.c 2005-07-28 14:52:52.892672048 -0400
61365 @@ -0,0 +1,226 @@
61366 +/*
61367 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61368 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61369 + *
61370 + *    For licensing information please see the supplied COPYING file
61371 + *
61372 + */
61373 +
61374 +#ident "@(#)$Id: kmap_elan4.c,v 1.7.8.3 2005/03/18 13:54:01 mike Exp $"
61375 +/*      $Source: /cvs/master/quadrics/epmod/kmap_elan4.c,v $ */
61376 +
61377 +#include <qsnet/kernel.h>
61378 +
61379 +#include <elan/kcomm.h>
61380 +
61381 +#include "debug.h"
61382 +#include "kcomm_elan4.h"
61383 +
61384 +#if defined(DIGITAL_UNIX)
61385 +#  define kernel_map                   (first_task->map)
61386 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
61387 +#elif defined(LINUX)
61388 +#  define kernel_map                   get_kern_mm()
61389 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
61390 +#elif defined(SOLARIS)
61391 +#  define kernel_map                   &kas
61392 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
61393 +#endif
61394 +
61395 +static unsigned int main_permtable[] = 
61396 +{
61397 +    PERM_Unused,                       /* EP_PERM_EXECUTE */
61398 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
61399 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
61400 +    PERM_DataReadWrite,                        /* EP_PERM_ALL */
61401 +};
61402 +
61403 +static unsigned int sdram_permtable[] = 
61404 +{
61405 +    PERM_LocExecute,                   /* EP_PERM_EXECUTE */
61406 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
61407 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
61408 +    PERM_RemoteAll,                    /* EP_PERM_ALL */
61409 +};
61410 +
61411 +static unsigned int io_permtable[] = 
61412 +{
61413 +    PERM_Unused,                       /* EP_PERM_EXECUTE */
61414 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
61415 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
61416 +    PERM_Unused,                       /* EP_PERM_ALL */
61417 +};
61418 +
61419 +void
61420 +ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr)
61421 +{
61422 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
61423 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
61424 +    unsigned int npages = len >> PAGESHIFT;
61425 +    int          i;
61426 +    unsigned int off;
61427 +
61428 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61429 +
61430 +    for (i = 0; i < npages; i++)
61431 +    {
61432 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr);
61433 +
61434 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
61435 +       {
61436 +           E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]);
61437 +
61438 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, newpte);
61439 +       }
61440 +
61441 +       eaddr += PAGESIZE;
61442 +       kaddr += PAGESIZE;
61443 +    }
61444 +}
61445 +
61446 +void
61447 +ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr)
61448 +{
61449 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
61450 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
61451 +    unsigned int npages = len >> PAGESHIFT;
61452 +    int          i;
61453 +    unsigned int off;
61454 +
61455 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61456 +
61457 +    if ((eaddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (saddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)))
61458 +       printk ("ep4_sdram_map: eaddr=%x saddr=%lx - incorrectly alised\n", eaddr, saddr);
61459 +
61460 +    for (i = 0; i < npages; i++)
61461 +    {
61462 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
61463 +       {
61464 +           E4_uint64 newpte = ((saddr + off) >> PTE_PADDR_SHIFT) | PTE_SetPerm (sdram_permtable[perm]);
61465 +
61466 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, newpte);
61467 +       }
61468 +
61469 +       eaddr += PAGESIZE;
61470 +       saddr += PAGESIZE;
61471 +    }
61472 +}
61473 +
61474 +void
61475 +ep4_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm)
61476 +{
61477 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
61478 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
61479 +    unsigned int npages = len >> PAGESHIFT;
61480 +    int          i;
61481 +    unsigned int off;
61482 +
61483 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (ioaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61484 +
61485 +    for (i = 0; i < npages; i++)
61486 +    {
61487 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) ioaddr);
61488 +
61489 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
61490 +       {
61491 +           E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, io_permtable[perm]);
61492 +
61493 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, newpte);
61494 +       }
61495 +
61496 +       eaddr += PAGESIZE;
61497 +       ioaddr += PAGESIZE;
61498 +    }
61499 +}
61500 +void
61501 +ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len)
61502 +{
61503 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61504 +
61505 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
61506 +
61507 +    elan4mmu_unload_range (&rail->r_ctxt, 0, eaddr, len);
61508 +}
61509 +
61510 +void *
61511 +ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages)
61512 +{
61513 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
61514 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
61515 +
61516 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_reserve: eaddr=%x npages=%d (=> %d)\n", eaddr, npages, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])));
61517 +
61518 +    return elan4mmu_reserve (&rail->r_ctxt, 0, (E4_Addr) eaddr, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])), 1);
61519 +}
61520 +
61521 +void
61522 +ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private)
61523 +{
61524 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61525 +
61526 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_release: eaddr=%x npages=%d private=%p\n", eaddr, npages, private);
61527 +
61528 +    elan4mmu_release (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private);
61529 +}
61530 +
61531 +void
61532 +ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm)
61533 +{
61534 +    EP4_RAIL     *rail  = (EP4_RAIL *) r;
61535 +    ELAN4_DEV    *dev   = rail->r_ctxt.ctxt_dev;
61536 +    unsigned int  off;
61537 +    unsigned long flags;
61538 +
61539 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_set_pte: index %x -> eaddr %llx paddr %llx\n", 
61540 +             index, ((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE), (long long) paddr);
61541 +
61542 +    local_irq_save (flags);
61543 +    for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
61544 +    {
61545 +       E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]);
61546 +
61547 +       elan4mmu_set_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, (index << (PAGE_SHIFT - dev->dev_pageshift[0])) +
61548 +                         (off >> dev->dev_pageshift[0]), newpte);
61549 +    }
61550 +    local_irq_restore (flags);
61551 +}
61552 +
61553 +physaddr_t
61554 +ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index)
61555 +{
61556 +    EP4_RAIL     *rail  = (EP4_RAIL *) r;
61557 +    ELAN4_DEV    *dev   = rail->r_ctxt.ctxt_dev;
61558 +    E4_uint64     pte;
61559 +    unsigned long flags;
61560 +
61561 +    local_irq_save (flags);
61562 +    pte = elan4mmu_get_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, index << (PAGE_SHIFT - dev->dev_pageshift[0]));
61563 +    local_irq_restore (flags);
61564 +
61565 +    return elan4mmu_pte2phys (dev, pte);
61566 +}
61567 +
61568 +void
61569 +ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages)
61570 +{
61571 +    EP4_RAIL  *rail  = (EP4_RAIL *) r;
61572 +    ELAN4_DEV *dev   = rail->r_ctxt.ctxt_dev;
61573 +    EP_ADDR    eaddr = ((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE);
61574 +    unsigned long idx = (index << (PAGE_SHIFT - dev->dev_pageshift[0]));
61575 +    unsigned long lim = idx + (npages << (PAGE_SHIFT - dev->dev_pageshift[0]));
61576 +    unsigned long flags;
61577 +
61578 +    EPRINTF5 (DBG_KMAP, "ep4_dvma_unload: eaddr %x -> %lx : index=%d idx=%ld lim=%ld\n", 
61579 +             eaddr, (unsigned long)(eaddr + (npages * PAGE_SIZE)), index, idx, lim);
61580 +
61581 +    local_irq_save (flags);
61582 +    for (; idx < lim; idx++)
61583 +       elan4mmu_clear_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, idx);
61584 +    local_irq_restore (flags);
61585 +}
61586 +
61587 +/*
61588 + * Local variables:
61589 + * c-file-style: "stroustrup"
61590 + * End:
61591 + */
61592 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kmsg_elan3.c
61593 ===================================================================
61594 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kmsg_elan3.c    2004-02-23 16:02:56.000000000 -0500
61595 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kmsg_elan3.c 2005-07-28 14:52:52.892672048 -0400
61596 @@ -0,0 +1,345 @@
61597 +/*
61598 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61599 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61600 + *
61601 + *    For licensing information please see the supplied COPYING file
61602 + *
61603 + */
61604 +
61605 +#ident "@(#)$Id: kmsg_elan3.c,v 1.3.8.1 2004/09/30 09:52:37 david Exp $"
61606 +/*      $Source: /cvs/master/quadrics/epmod/kmsg_elan3.c,v $ */
61607 +
61608 +#include <qsnet/kernel.h>
61609 +
61610 +#include <elan/kcomm.h>
61611 +
61612 +#include "kcomm_vp.h"
61613 +#include "kcomm_elan3.h"
61614 +#include "debug.h"
61615 +
61616 +static void
61617 +ep3_inputq_event (EP3_RAIL *rail, void *arg)
61618 +{
61619 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) arg;
61620 +    
61621 +    (*inputq->q_callback)((EP_RAIL *)rail, inputq->q_arg);
61622 +}
61623 +
61624 +static EP3_COOKIE_OPS ep3_inputq_cookie_ops = 
61625 +{
61626 +    ep3_inputq_event,
61627 +};
61628 +
61629 +EP_INPUTQ *
61630 +ep3_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount,
61631 +                 EP_INPUTQ_CALLBACK *callback, void *arg)
61632 +{
61633 +    EP3_RAIL      *rail = (EP3_RAIL *) r;
61634 +    EP3_INPUTQ    *inputq;
61635 +    EP3_InputQueue qdesc;
61636 +    void          *slots;
61637 +    int            i;
61638 +
61639 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
61640 +
61641 +    KMEM_ALLOC (inputq, EP3_INPUTQ *, sizeof (EP3_INPUTQ), TRUE);
61642 +
61643 +    if (inputq == NULL)
61644 +       return (EP_INPUTQ *) NULL;
61645 +    
61646 +    if ((slots = ep_alloc_main (&rail->Generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL)
61647 +    {
61648 +       KMEM_FREE (inputq, sizeof (EP3_INPUTQ));
61649 +       return (EP_INPUTQ *) NULL;
61650 +    }
61651 +
61652 +    inputq->q_slotSize  = slotSize;
61653 +    inputq->q_slotCount = slotCount;
61654 +    inputq->q_callback  = callback;
61655 +    inputq->q_arg       = arg;
61656 +    inputq->q_slots     = slots;
61657 +
61658 +    /* Initialise all the slots to be "unreceived" */
61659 +    for (i = 0; i < slotCount; i++)
61660 +       ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
61661 +    
61662 +    inputq->q_base     = inputq->q_slotsAddr;
61663 +    inputq->q_top      = inputq->q_base + (slotCount-1) * slotSize;
61664 +    inputq->q_fptr     = inputq->q_base;
61665 +    inputq->q_desc     = EP_SYSTEMQ_DESC(rail->QueueDescs, qnum);
61666 +    inputq->q_descAddr = EP_SYSTEMQ_ADDR (qnum);
61667 +
61668 +    if (callback)
61669 +       RegisterCookie (&rail->CookieTable, &inputq->q_cookie, inputq->q_descAddr, &ep3_inputq_cookie_ops, inputq);
61670 +
61671 +    /* Initialise the input queue descriptor */
61672 +    qdesc.q_state          = E3_QUEUE_FULL;
61673 +    qdesc.q_bptr           = inputq->q_base + slotSize;
61674 +    qdesc.q_fptr           = inputq->q_fptr;
61675 +    qdesc.q_base           = inputq->q_base;
61676 +    qdesc.q_top            = inputq->q_top;
61677 +    qdesc.q_size           = slotSize;
61678 +    qdesc.q_event.ev_Count = 1;
61679 +    qdesc.q_event.ev_Type  = callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0;
61680 +    qdesc.q_wevent         = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event);
61681 +    qdesc.q_wcount         = 0;
61682 +
61683 +    /* copy the queue descriptor down to sdram */
61684 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue));
61685 +
61686 +    return (EP_INPUTQ *) inputq;
61687 +}
61688 +
61689 +void
61690 +ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q)
61691 +{
61692 +    EP3_RAIL   *rail   = (EP3_RAIL *) r;
61693 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) q;
61694 +
61695 +    ep_free_main (&rail->Generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
61696 +
61697 +    if (inputq->q_callback)
61698 +       DeregisterCookie (&rail->CookieTable, &inputq->q_cookie);
61699 +
61700 +    KMEM_FREE (inputq, sizeof (EP3_INPUTQ));
61701 +}
61702 +
61703 +void
61704 +ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q)
61705 +{
61706 +    EP3_RAIL   *rail   = (EP3_RAIL *) r;
61707 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) q;
61708 +
61709 +    elan3_sdram_writel (rail->Device, inputq->q_desc + offsetof (EP3_InputQueue, q_state), 0);
61710 +}
61711 +
61712 +void
61713 +ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q)
61714 +{
61715 +    EP3_RAIL      *rail   = (EP3_RAIL *) r;
61716 +    EP3_INPUTQ    *inputq = (EP3_INPUTQ *) q;
61717 +    EP3_InputQueue qdesc;
61718 +
61719 +    /* mark the queue as locked */
61720 +    SetQueueLocked (rail, inputq->q_desc);
61721 +
61722 +    /* re-initialise the queue as empty */
61723 +    qdesc.q_state          = E3_QUEUE_FULL;
61724 +    qdesc.q_bptr           = (E3_Addr) inputq->q_base + inputq->q_slotSize;
61725 +    qdesc.q_fptr           = inputq->q_fptr;
61726 +    qdesc.q_base           = inputq->q_base;
61727 +    qdesc.q_top            = inputq->q_top;
61728 +    qdesc.q_size           = inputq->q_slotSize;
61729 +    qdesc.q_event.ev_Count = 1;
61730 +    qdesc.q_event.ev_Type  = inputq->q_callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0;
61731 +    qdesc.q_wevent         = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event);
61732 +    qdesc.q_wcount         = 0;
61733 +
61734 +    /* copy the queue descriptor down to sdram */
61735 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue));
61736 +}
61737 +
61738 +int
61739 +ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg)
61740 +{
61741 +    EP3_RAIL          *rail   = (EP3_RAIL *) r;
61742 +    EP3_INPUTQ        *inputq = (EP3_INPUTQ *) q;
61743 +    sdramaddr_t        qdesc  = inputq->q_desc;
61744 +    E3_Addr            nfptr;
61745 +    int                count = 0;
61746 +    E3_uint32          state;
61747 +    int                       delay;
61748 +
61749 + run_again_because_of_eventqueue_overflow:
61750 +    nfptr = inputq->q_fptr + inputq->q_slotSize;
61751 +    if (nfptr > inputq->q_top)                                 
61752 +       nfptr = inputq->q_base;
61753 +
61754 +    while (nfptr != elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_bptr)))                       /* PCI read */
61755 +    {
61756 +       unsigned long slot = (unsigned long) inputq->q_slots + (nfptr - inputq->q_base);
61757 +
61758 +       /* Poll the final word of the message until the message has completely
61759 +        * arrived in main memory. */
61760 +       for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
61761 +           DELAY (delay);
61762 +
61763 +       /* Call the message handler */
61764 +       (*handler) (r, arg, (void *) slot);
61765 +       
61766 +       state = elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state));                           /* PCI read */
61767 +       if ((state & E3_QUEUE_FULL) == 0)
61768 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr);                        /* PCI write */
61769 +       else
61770 +       {
61771 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr);                        /* PCI write */
61772 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state), (state & ~E3_QUEUE_FULL));    /* PCI write */
61773 +       }
61774 +       inputq->q_fptr = nfptr;
61775 +       
61776 +       nfptr += roundup (inputq->q_slotSize, E3_BLK_ALIGN);
61777 +       if (nfptr > inputq->q_top)
61778 +           nfptr = inputq->q_base;
61779 +
61780 +       if (++count >= maxCount && maxCount)
61781 +           break;
61782 +    }
61783 +    
61784 +    if (inputq->q_callback && count != 0)
61785 +    {
61786 +       if (count != inputq->q_waitCount)
61787 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_wcount), inputq->q_waitCount = count);
61788 +
61789 +       if (IssueWaitevent (rail, inputq->q_descAddr + offsetof (EP3_InputQueue, q_wevent)) == ISSUE_COMMAND_TRAPPED)
61790 +           goto run_again_because_of_eventqueue_overflow;
61791 +    }
61792 +
61793 +    return count;
61794 +}
61795 +
61796 +#define Q_EVENT(q,slotNum)             ((q)->q_elan      + (slotNum) * sizeof (E3_BlockCopyEvent))
61797 +#define Q_EVENT_ADDR(q,slotNum)                ((q)->q_elanAddr  + (slotNum) * sizeof (E3_BlockCopyEvent))
61798 +#define Q_MSG(q,slotNum)       (void *)((q)->q_main      + (slotNum) * (q)->q_slotSize)
61799 +#define Q_MSG_ADDR(q,slotNum)          ((q)->q_mainAddr  + (slotNum) * (q)->q_slotSize)
61800 +#define Q_DONE(q,slotNum)     (*((int *)((q)->q_main      + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32))))
61801 +#define Q_DONE_ADDR(q,slotNum)         ((q)->q_mainAddr  + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32))
61802 +
61803 +#define Q_ELAN_SIZE(q)                 ((q)->q_slotCount * sizeof (E3_BlockCopyEvent))
61804 +#define Q_MAIN_SIZE(q)                 ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E3_uint32)))
61805 +
61806 +static void
61807 +ep3_outputq_retry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error)
61808 +{
61809 +    E3_DMA_BE    *dmabe = (E3_DMA_BE *) dma;
61810 +    sdramaddr_t   event = ep_elan2sdram (&rail->Generic, dmabe->s.dma_srcEvent);
61811 +    E3_Addr       done  = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Dest));
61812 +    E3_uint32    *donep = ep_elan2main (&rail->Generic, done & ~EV_BCOPY_DTYPE_MASK);
61813 +
61814 +    EPRINTF1 (DBG_KMSG, "ep3_ouputq_retry: donep at %p -> FAILED\n", donep);
61815 +    
61816 +    *donep = EP3_EVENT_FAILED;
61817 +}
61818 +
61819 +static EP3_COOKIE_OPS ep3_outputq_cookie_ops =
61820 +{
61821 +    NULL, /* Event */
61822 +    ep3_outputq_retry,
61823 +    NULL, /* DmaCancelled */
61824 +    NULL, /* DmaVerify */
61825 +};
61826 +
61827 +EP_OUTPUTQ *
61828 +ep3_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount)
61829 +{
61830 +    EP3_RAIL         *rail = (EP3_RAIL *) r;
61831 +    EP3_OUTPUTQ      *outputq;
61832 +    int               i;
61833 +    E3_BlockCopyEvent event;
61834 +
61835 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
61836 +
61837 +    KMEM_ALLOC (outputq, EP3_OUTPUTQ *, sizeof (EP3_OUTPUTQ), 1);
61838 +
61839 +    if (outputq == NULL)
61840 +       return NULL;
61841 +
61842 +    outputq->q_slotCount = slotCount;
61843 +    outputq->q_slotSize  = slotSize;
61844 +
61845 +    outputq->q_elan = ep_alloc_elan (r, Q_ELAN_SIZE(outputq), 0, &outputq->q_elanAddr);
61846 +
61847 +    if (outputq->q_elan == (sdramaddr_t) 0)
61848 +    {
61849 +       KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
61850 +       return NULL;
61851 +    }
61852 +
61853 +    outputq->q_main = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr);
61854 +
61855 +    if (outputq->q_main == (void *) NULL)
61856 +    {
61857 +       ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq));
61858 +       KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
61859 +       return NULL;
61860 +    }
61861 +
61862 +    RegisterCookie (&rail->CookieTable, &outputq->q_cookie, outputq->q_elanAddr, &ep3_outputq_cookie_ops, outputq);
61863 +
61864 +    for (i = 0; i < slotCount; i++)
61865 +    {
61866 +       EP3_INIT_COPY_EVENT (event, outputq->q_cookie, Q_DONE_ADDR(outputq, i), 0);
61867 +
61868 +       Q_DONE(outputq, i) = outputq->q_cookie.Cookie;
61869 +       
61870 +       elan3_sdram_copyl_to_sdram (rail->Device, &event, Q_EVENT(outputq, i), sizeof (E3_BlockCopyEvent));
61871 +    }
61872 +
61873 +    return (EP_OUTPUTQ *) outputq;
61874 +}
61875 +
61876 +void
61877 +ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q)
61878 +{
61879 +    EP3_RAIL    *rail    = (EP3_RAIL *) r;
61880 +    EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q;
61881 +
61882 +    DeregisterCookie (&rail->CookieTable, &outputq->q_cookie);
61883 +    
61884 +    ep_free_main (r, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
61885 +    ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq));
61886
61887 +    KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
61888 +}
61889 +
61890 +void *
61891 +ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
61892 +{
61893 +    return Q_MSG ((EP3_OUTPUTQ *) q, slotNum);
61894 +}
61895 +
61896 +int
61897 +ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
61898 +{
61899 +    switch (Q_DONE((EP3_OUTPUTQ *) q, slotNum))
61900 +    {
61901 +    case EP3_EVENT_ACTIVE:
61902 +       return EP_OUTPUTQ_BUSY;
61903 +       
61904 +    case EP3_EVENT_FAILED:
61905 +       return EP_OUTPUTQ_FAILED;
61906 +       
61907 +    default:
61908 +       return EP_OUTPUTQ_FINISHED;
61909 +    }
61910 +}
61911 +
61912 +int
61913 +ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, 
61914 +                 unsigned vp, unsigned qnum, unsigned retries)
61915 +{
61916 +    EP3_RAIL    *rail    = (EP3_RAIL *) r;
61917 +    EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q;
61918 +    unsigned     base    = outputq->q_slotSize - roundup (size, E3_BLK_ALIGN);
61919 +    E3_DMA_BE    dmabe;
61920 +
61921 +    dmabe.s.dma_type           = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_QUEUED, retries);
61922 +    dmabe.s.dma_size            = roundup (size, E3_BLK_ALIGN);
61923 +    dmabe.s.dma_source          = Q_MSG_ADDR(outputq, slotNum) + base;
61924 +    dmabe.s.dma_dest            = base;
61925 +    dmabe.s.dma_destEvent       = EP_SYSTEMQ_ADDR(qnum);
61926 +    dmabe.s.dma_destCookieVProc = vp;
61927 +    dmabe.s.dma_srcEvent        = Q_EVENT_ADDR(outputq, slotNum);
61928 +    dmabe.s.dma_srcCookieVProc  = 0;
61929 +
61930 +    Q_DONE(outputq, slotNum) = EP3_EVENT_ACTIVE;
61931 +    
61932 +    elan3_sdram_writel (rail->Device, Q_EVENT(outputq, slotNum), 1);
61933 +
61934 +    if (IssueDma (rail, &dmabe, EP_RETRY_CRITICAL, FALSE) != ISSUE_COMMAND_OK)
61935 +    {
61936 +       Q_DONE(outputq, slotNum) = EP3_EVENT_FAILED;
61937 +       return FALSE;
61938 +    }
61939 +
61940 +    return TRUE;
61941 +}
61942 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kmsg_elan4.c
61943 ===================================================================
61944 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kmsg_elan4.c    2004-02-23 16:02:56.000000000 -0500
61945 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kmsg_elan4.c 2005-07-28 14:52:52.893671896 -0400
61946 @@ -0,0 +1,418 @@
61947 +/*
61948 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61949 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61950 + *
61951 + *    For licensing information please see the supplied COPYING file
61952 + *
61953 + */
61954 +
61955 +#ident "@(#)$Id: kmsg_elan4.c,v 1.8.6.2 2005/02/28 14:06:56 david Exp $"
61956 +/*      $Source: /cvs/master/quadrics/epmod/kmsg_elan4.c,v $ */
61957 +
61958 +#include <qsnet/kernel.h>
61959 +
61960 +#include <elan/kcomm.h>
61961 +
61962 +#include "debug.h"
61963 +#include "kcomm_vp.h"
61964 +#include "kcomm_elan4.h"
61965 +
61966 +#include <elan4/trtype.h>
61967 +
61968 +static void
61969 +ep4_inputq_interrupt (EP4_RAIL *rail, void *arg)
61970 +{
61971 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) arg;
61972 +
61973 +    /* mark the queue as "fired" to cause a single waitevent
61974 +     * to be issued next time the queue is polled */
61975 +    atomic_inc (&inputq->q_fired);
61976 +    
61977 +    (*inputq->q_callback)(&rail->r_generic, inputq->q_arg);
61978 +}
61979 +
61980 +EP_INPUTQ *
61981 +ep4_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount,
61982 +                 EP_INPUTQ_CALLBACK *callback, void *arg)
61983 +{
61984 +    EP4_RAIL     *rail = (EP4_RAIL *) r;
61985 +    EP4_INPUTQ   *inputq;
61986 +    E4_Event32    qevent;
61987 +    void         *slots;
61988 +    int           i;
61989 +
61990 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
61991 +
61992 +    KMEM_ALLOC (inputq, EP4_INPUTQ *, sizeof (EP4_INPUTQ), 1);
61993 +
61994 +    if (inputq == NULL)
61995 +       return (EP_INPUTQ *) NULL;
61996 +    
61997 +    if ((slots = ep_alloc_main (&rail->r_generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL)
61998 +    {
61999 +       KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
62000 +       return (EP_INPUTQ *) NULL;
62001 +    }
62002 +
62003 +    inputq->q_slotSize  = slotSize;
62004 +    inputq->q_slotCount = slotCount;
62005 +    inputq->q_callback  = callback;
62006 +    inputq->q_arg       = arg;
62007 +    inputq->q_slots     = slots;
62008 +
62009 +    /* Initialise all the slots to be "unreceived" */
62010 +    for (i = 0; i < slotCount; i++)
62011 +       ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
62012 +    
62013 +    inputq->q_last      = inputq->q_slotsAddr + (slotCount-1) * slotSize;
62014 +    inputq->q_fptr      = inputq->q_slotsAddr;
62015 +    inputq->q_desc      = EP_SYSTEMQ_DESC (rail->r_queuedescs, qnum);
62016 +    inputq->q_descAddr  = EP_SYSTEMQ_ADDR (qnum);
62017 +    inputq->q_eventAddr = rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]);
62018 +
62019 +    if (callback)
62020 +    {
62021 +       if ((inputq->q_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == 0)
62022 +       {
62023 +           ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
62024 +
62025 +           KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
62026 +           return (EP_INPUTQ *) NULL;
62027 +       }
62028 +
62029 +       if ((inputq->q_wcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == 0)
62030 +       {
62031 +           ep4_put_ecq (rail, inputq->q_ecq, 1);
62032 +           ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
62033 +
62034 +           KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
62035 +           return (EP_INPUTQ *) NULL;
62036 +       }
62037 +
62038 +       ep4_register_intcookie (rail, &inputq->q_intcookie, inputq->q_descAddr, ep4_inputq_interrupt, inputq);
62039 +
62040 +       inputq->q_count = 0;
62041 +
62042 +       atomic_set (&inputq->q_fired, 0);
62043 +
62044 +       /* Initialise the queue event */
62045 +       qevent.ev_CountAndType = E4_EVENT_INIT_VALUE (callback ? -32 : 0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0);
62046 +       qevent.ev_WritePtr     = inputq->q_ecq->ecq_addr;
62047 +       qevent.ev_WriteValue   = (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD;
62048 +    }
62049 +
62050 +    /* copy the event down to sdram */
62051 +    elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qevent, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]), sizeof (E4_Event32));
62052 +
62053 +    return (EP_INPUTQ *) inputq;
62054 +}
62055 +
62056 +void
62057 +ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q)
62058 +{
62059 +    EP4_RAIL   *rail   = (EP4_RAIL *) r;
62060 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) q;
62061 +
62062 +    ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
62063 +
62064 +    if (inputq->q_callback)
62065 +    {
62066 +       ep4_deregister_intcookie (rail, &inputq->q_intcookie);
62067 +       ep4_put_ecq (rail, inputq->q_ecq, 1);
62068 +       ep4_put_ecq (rail, inputq->q_wcq, 4);
62069 +    }
62070 +
62071 +    KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
62072 +}
62073 +
62074 +void
62075 +ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q)
62076 +{
62077 +    EP4_RAIL     *rail     = (EP4_RAIL *) r;
62078 +    EP4_INPUTQ   *inputq   = (EP4_INPUTQ *) q;
62079 +    EP_ADDR       lastSlot = inputq->q_slotsAddr + (inputq->q_slotCount-1) * inputq->q_slotSize;
62080 +    E4_InputQueue qdesc;
62081 +
62082 +    qdesc.q_bptr    = inputq->q_slotsAddr;
62083 +    qdesc.q_fptr    = inputq->q_slotsAddr;
62084 +    qdesc.q_control = E4_InputQueueControl (inputq->q_slotsAddr, lastSlot, inputq->q_slotSize);
62085 +    qdesc.q_event   = inputq->q_callback ? inputq->q_eventAddr : 0;
62086 +
62087 +    /* copy the queue descriptor down to sdram */
62088 +    ep4_write_qdesc (rail, inputq->q_desc, &qdesc);
62089 +
62090 +    EPRINTF5 (DBG_KMSG,  "ep_enable_inputq: %x - %016llx %016llx %016llx %016llx\n", (int) inputq->q_descAddr,
62091 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 0),
62092 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 8),
62093 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 16),
62094 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 24));
62095 +}
62096 +
62097 +void
62098 +ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q)
62099 +{
62100 +    EP4_RAIL     *rail   = (EP4_RAIL *) r;
62101 +    EP4_INPUTQ   *inputq = (EP4_INPUTQ *) q;
62102 +    E4_InputQueue qdesc;
62103 +
62104 +    /* Initialise the input queue descriptor as "full" with no event */
62105 +    qdesc.q_bptr    = 0;
62106 +    qdesc.q_fptr    = 8;
62107 +    qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8);
62108 +    qdesc.q_event   = 0;
62109 +
62110 +    /* copy the queue descriptor down to sdram */
62111 +    ep4_write_qdesc (rail, inputq->q_desc, &qdesc);
62112 +}
62113 +
62114 +int
62115 +ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg)
62116 +{
62117 +    EP4_RAIL   *rail   = (EP4_RAIL *) r;
62118 +    ELAN4_DEV  *dev    = rail->r_ctxt.ctxt_dev; 
62119 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) q;
62120 +    sdramaddr_t qdesc = inputq->q_desc;
62121 +    E4_Addr     fptr  = inputq->q_fptr;
62122 +    E4_Addr     bptr  = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
62123 +    int                count = 0;
62124 +    int         delay;
62125 +
62126 +    while (bptr != 0 && fptr != bptr)
62127 +    {
62128 +       while (fptr != bptr)
62129 +       {
62130 +           unsigned long slot = (unsigned long) inputq->q_slots + (fptr - inputq->q_slotsAddr);
62131 +           
62132 +           /* Poll the final word of the message until the message has completely
62133 +            * arrived in main memory. */
62134 +           for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
62135 +               DELAY (delay);
62136 +           
62137 +           EPRINTF4(DBG_KMSG, "ep4_poll_inputq: %x slot %d of %d [%08x]\n", (int)inputq->q_descAddr,
62138 +                    ((int)(fptr - inputq->q_slotsAddr))/inputq->q_slotSize, 
62139 +                    inputq->q_slotCount, ((uint32_t *) (slot + inputq->q_slotSize))[-1]);
62140 +           
62141 +           /* Call the message handler */
62142 +           (*handler) (r, arg, (void *) slot);
62143 +           
62144 +           /* reset the last word of the slot to "unreceived" */
62145 +           ((uint32_t *) (slot + inputq->q_slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
62146 +           
62147 +           /* move on the front pointer */
62148 +           fptr = (fptr == inputq->q_last) ? inputq->q_slotsAddr : fptr + inputq->q_slotSize;
62149 +           
62150 +           elan4_sdram_writel (dev, qdesc + offsetof (E4_InputQueue, q_fptr), fptr);
62151 +           
62152 +           inputq->q_count++;
62153 +           
62154 +           if (++count >= maxCount && maxCount)
62155 +           {
62156 +               inputq->q_fptr = fptr;
62157 +
62158 +               return count;
62159 +           }
62160 +       }
62161 +
62162 +       bptr = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
62163 +    }
62164 +
62165 +    inputq->q_fptr = fptr;
62166 +
62167 +    /* Only insert a single wait event command if the callback has
62168 +     * occured, otherwise just acrue the count as we've just periodically
62169 +     * polled it.
62170 +     */
62171 +    if (inputq->q_callback && atomic_read (&inputq->q_fired))
62172 +    {
62173 +       atomic_dec (&inputq->q_fired);
62174 +
62175 +       ep4_wait_event_cmd (inputq->q_wcq, inputq->q_eventAddr,
62176 +                           E4_EVENT_INIT_VALUE (-inputq->q_count << 5, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),
62177 +                           inputq->q_ecq->ecq_addr,
62178 +                           (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD);
62179 +
62180 +       inputq->q_count = 0;
62181 +    }
62182 +
62183 +    return count;
62184 +}
62185 +
62186 +#define Q_MSG(q,slotNum)         (unsigned long)((q)->q_main      + (slotNum) * (q)->q_slotSize)
62187 +#define Q_MSG_ADDR(q,slotNum)                  ((q)->q_mainAddr  + (slotNum) * (q)->q_slotSize)
62188 +#define Q_DONE(q,slotNum)        *((E4_uint64 *)((q)->q_main      + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64)))
62189 +#define Q_DONE_ADDR(q,slotNum)                 ((q)->q_mainAddr  + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64))
62190 +
62191 +#define Q_MAIN_SIZE(q)                 ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E4_uint64)))
62192 +
62193 +#define Q_DONE_VAL(val,cnt)            ((cnt) << 16 | (val))
62194 +#define Q_DONE_RET(done)               ((int) ((done) & 0xffff))
62195 +#define Q_DONE_CNT(done)               ((int) ((done) >> 16))
62196 +
62197 +EP_OUTPUTQ *
62198 +ep4_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount)
62199 +{
62200 +    EP4_RAIL    *rail = (EP4_RAIL *) r;
62201 +    EP4_OUTPUTQ *outputq;
62202 +    int          i;
62203 +
62204 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
62205 +
62206 +    KMEM_ALLOC (outputq, EP4_OUTPUTQ *, sizeof (EP4_OUTPUTQ), 1);
62207 +
62208 +    if (outputq == NULL)
62209 +       return NULL;
62210 +
62211 +    spin_lock_init (&outputq->q_lock);
62212 +
62213 +    outputq->q_slotCount = slotCount;
62214 +    outputq->q_slotSize  = slotSize;
62215 +    outputq->q_main      = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr);
62216 +
62217 +    if (outputq->q_main == (E4_uint64 *) NULL)
62218 +    {
62219 +       KMEM_FREE (outputq, sizeof (EP_OUTPUTQ));
62220 +       return NULL;
62221 +    }
62222 +
62223 +    outputq->q_cq = elan4_alloccq (&rail->r_ctxt, CQ_Size64K, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority);
62224 +
62225 +    if (outputq->q_cq == (ELAN4_CQ *) NULL)
62226 +    {
62227 +       ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
62228 +
62229 +       KMEM_FREE (outputq, sizeof (EP_OUTPUTQ));
62230 +    }
62231 +
62232 +    outputq->q_dwords = CQ_Size (outputq->q_cq->cq_size) >> 3;
62233 +
62234 +    /* mark all the queue slots as finished */
62235 +    for (i = 0; i < slotCount; i++)
62236 +       Q_DONE(outputq, i) = Q_DONE_VAL (EP_OUTPUTQ_FINISHED, 0);
62237 +
62238 +    return (EP_OUTPUTQ *) outputq;
62239 +}
62240 +
62241 +void
62242 +ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q)
62243 +{
62244 +    EP4_RAIL    *rail    = (EP4_RAIL *) r;
62245 +    EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q;
62246 +
62247 +    elan4_freecq (&rail->r_ctxt, outputq->q_cq);
62248 +
62249 +    ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
62250 +
62251 +    spin_lock_destroy (&outputq->q_lock);
62252 +
62253 +    KMEM_FREE (outputq, sizeof (EP4_OUTPUTQ));
62254 +}
62255 +
62256 +void *
62257 +ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
62258 +{
62259 +    return (void *) Q_MSG ((EP4_OUTPUTQ *) q, slotNum);
62260 +}
62261 +
62262 +int
62263 +ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
62264 +{
62265 +    EPRINTF2 (DBG_KMSG, "ep4_outputq_state: slotNum %d state %x\n", slotNum, (int)Q_DONE((EP4_OUTPUTQ *) q, slotNum));
62266 +
62267 +    return Q_DONE_RET(Q_DONE((EP4_OUTPUTQ *)q, slotNum));
62268 +}
62269 +
62270 +int
62271 +ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, 
62272 +                 unsigned vp, unsigned qnum, unsigned retries)
62273 +{
62274 +    EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q;
62275 +    unsigned int nbytes  = roundup (size, 32);
62276 +    unsigned int base    = outputq->q_slotSize - nbytes;
62277 +    unsigned int i, dwords;
62278 +    unsigned long flags;
62279 +    E4_uint64 val;
62280 +
62281 +    spin_lock_irqsave (&outputq->q_lock, flags);
62282 +
62283 +    EPRINTF4 (DBG_KMSG, "ep4_outputq_send: slotNum=%d size=%d vp=%d qnum=%d\n", slotNum, size, vp, qnum);
62284 +
62285 +    /* compute command queue size as follows - each slot uses
62286 +     *     overhead:   14 dwords +
62287 +     *    data > 128 ? 36 dwords
62288 +     *     data > 64  ? 18 dwords
62289 +     *     data > 32  ? 10 dwords
62290 +     *     else         6  dwords
62291 +     */
62292 +    dwords = 14 + (size > 128 ? 36 :
62293 +                  size > 64  ? 18 :
62294 +                  size  ? 10 : 6);
62295 +
62296 +    outputq->q_dwords += Q_DONE_CNT (Q_DONE(outputq, slotNum));
62297 +
62298 +    if (dwords > outputq->q_dwords)
62299 +    {
62300 +       /* attempt to reclaim command queue space from other slots */
62301 +       i = slotNum;
62302 +       do {
62303 +           if (++i == outputq->q_slotCount)
62304 +               i = 0;
62305 +           
62306 +           val = Q_DONE(outputq, i);
62307 +
62308 +           if ((Q_DONE_RET (val) == EP_OUTPUTQ_FINISHED || Q_DONE_RET (val) == EP_OUTPUTQ_FAILED) && Q_DONE_CNT(val) > 0)
62309 +           {
62310 +               outputq->q_dwords += Q_DONE_CNT (val);
62311 +
62312 +               Q_DONE(outputq, i) = Q_DONE_VAL(Q_DONE_RET(val), 0);
62313 +           }
62314 +       } while (i != slotNum && dwords > outputq->q_dwords);
62315 +    }
62316 +
62317 +    if (dwords > outputq->q_dwords)
62318 +    {
62319 +       spin_unlock_irqrestore (&outputq->q_lock, flags);
62320 +       
62321 +       EPRINTF0 (DBG_KMSG, "ep4_outputq_state: no command queue space\n");
62322 +       return 0;
62323 +    }
62324 +
62325 +    outputq->q_dwords -= dwords;
62326 +
62327 +    Q_DONE(outputq, slotNum) = Q_DONE_VAL (EP_OUTPUTQ_BUSY, dwords);
62328 +
62329 +    if (outputq->q_retries != retries)
62330 +    {
62331 +       outputq->q_retries = retries;
62332 +
62333 +       elan4_guard (outputq->q_cq, GUARD_CHANNEL(1) | GUARD_RESET(retries));
62334 +       elan4_nop_cmd (outputq->q_cq, 0);
62335 +    }
62336 +
62337 +    /* transfer the top "size" bytes from message buffer to top of input queue */
62338 +    elan4_open_packet (outputq->q_cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp));
62339 +    elan4_sendtrans0 (outputq->q_cq, TR_INPUT_Q_GETINDEX, EP_SYSTEMQ_ADDR(qnum));
62340 +
62341 +    /* send upto EP_SYSTEMQ_MSG_MAX (256) bytes of message to the top of the slot */
62342 +    if (size > 128)
62343 +    {
62344 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 0,   (void *) (Q_MSG(outputq, slotNum) + base + 0));
62345 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 128, (void *) (Q_MSG(outputq, slotNum) + base + 128));
62346 +    }
62347 +    else if (size > 64)
62348 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base, (void *) (Q_MSG(outputq, slotNum) + base));
62349 +    else if (size > 32)
62350 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD),  base, (void *) (Q_MSG(outputq, slotNum) + base));
62351 +    else
62352 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (32 >> 3, 0, TR_DATATYPE_DWORD),  base, (void *) (Q_MSG(outputq, slotNum) + base));
62353 +    elan4_sendtrans1 (outputq->q_cq, TR_INPUT_Q_COMMIT, EP_SYSTEMQ_ADDR(qnum), 0 /* no cookie */);
62354 +
62355 +    elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (outputq->q_retries));
62356 +    elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FINISHED, dwords));
62357 +
62358 +    elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (outputq->q_retries));
62359 +    elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FAILED, dwords));
62360 +
62361 +    spin_unlock_irqrestore (&outputq->q_lock, flags);
62362 +
62363 +    return 1;
62364 +}
62365 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kthread.c
62366 ===================================================================
62367 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kthread.c       2004-02-23 16:02:56.000000000 -0500
62368 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kthread.c    2005-07-28 14:52:52.894671744 -0400
62369 @@ -0,0 +1,186 @@
62370 +/*
62371 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62372 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
62373 + *
62374 + *    For licensing information please see the supplied COPYING file
62375 + *
62376 + */
62377 +
62378 +#ident "@(#)$Id: kthread.c,v 1.5 2004/05/19 08:54:57 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
62379 +/*      $Source: /cvs/master/quadrics/epmod/kthread.c,v $*/
62380 +
62381 +#include <qsnet/kernel.h>
62382 +
62383 +#include <elan/kthread.h>
62384 +
62385 +void
62386 +ep_kthread_init (EP_KTHREAD *kt)
62387 +{
62388 +       spin_lock_init (&kt->lock);
62389 +       kcondvar_init (&kt->wait);
62390 +       
62391 +       kt->next_run     = 0;
62392 +       kt->should_stall = 0;
62393 +       kt->started      = 0;
62394 +       kt->should_stop  = 0;
62395 +       kt->stopped      = 0;
62396 +       kt->state        = KT_STATE_RUNNING;
62397 +}
62398 +
62399 +void
62400 +ep_kthread_destroy (EP_KTHREAD *kt)
62401 +{
62402 +       spin_lock_destroy (&kt->lock);
62403 +       kcondvar_destroy (&kt->wait);
62404 +}
62405 +
62406 +void
62407 +ep_kthread_started (EP_KTHREAD *kt)
62408 +{
62409 +       unsigned long flags;
62410 +       
62411 +       spin_lock_irqsave (&kt->lock, flags);
62412 +       kt->started = 1;
62413 +       spin_unlock_irqrestore(&kt->lock, flags);
62414 +}
62415 +
62416 +void
62417 +ep_kthread_stopped (EP_KTHREAD *kt)
62418 +{
62419 +       unsigned long flags;
62420 +       
62421 +       spin_lock_irqsave (&kt->lock, flags);
62422 +       kt->stopped = 1;
62423 +       kcondvar_wakeupall (&kt->wait, &kt->lock);
62424 +       spin_unlock_irqrestore(&kt->lock, flags);
62425 +}
62426 +
62427 +int
62428 +ep_kthread_should_stall (EP_KTHREAD *kth)
62429 +{
62430 +       return (kth->should_stall);
62431 +}
62432 +
62433 +int
62434 +ep_kthread_sleep (EP_KTHREAD *kt, long next_run)
62435 +{
62436 +       unsigned long flags;
62437 +
62438 +       spin_lock_irqsave (&kt->lock, flags);
62439 +       if (next_run && (kt->next_run == 0 || BEFORE (next_run, kt->next_run)))
62440 +               kt->next_run = next_run;
62441 +
62442 +       if (kt->should_stop)
62443 +       {
62444 +               spin_unlock_irqrestore (&kt->lock, flags);
62445 +               return (-1);
62446 +       }
62447 +       
62448 +       do {
62449 +               if (kt->should_stall)
62450 +                       kcondvar_wakeupall (&kt->wait, &kt->lock);
62451 +
62452 +               kt->state = KT_STATE_SLEEPING;
62453 +               kt->running = 0;
62454 +               if (kt->should_stall || kt->next_run == 0)
62455 +                       kcondvar_wait (&kt->wait, &kt->lock, &flags);
62456 +               else
62457 +                       kcondvar_timedwait (&kt->wait,&kt->lock, &flags, kt->next_run);
62458 +               kt->state = KT_STATE_RUNNING;
62459 +               kt->running = lbolt;
62460 +       } while (kt->should_stall);
62461 +       kt->next_run = 0;
62462 +       spin_unlock_irqrestore (&kt->lock, flags);
62463 +       
62464 +       return (0);
62465 +}
62466 +
62467 +void
62468 +ep_kthread_schedule (EP_KTHREAD *kt, long tick)
62469 +{
62470 +       unsigned long flags;
62471 +       
62472 +       spin_lock_irqsave (&kt->lock, flags);
62473 +       if (kt->next_run == 0 || BEFORE (tick, kt->next_run))
62474 +       {
62475 +               kt->next_run = tick;
62476 +               if (!kt->should_stall && kt->state == KT_STATE_SLEEPING)
62477 +               {
62478 +                       kt->state = KT_STATE_SCHEDULED;
62479 +                       kcondvar_wakeupone (&kt->wait, &kt->lock);
62480 +               }
62481 +       }
62482 +       spin_unlock_irqrestore (&kt->lock, flags);
62483 +}
62484 +
62485 +void
62486 +ep_kthread_stall (EP_KTHREAD *kt)
62487 +{
62488 +       unsigned long flags;
62489 +       
62490 +       spin_lock_irqsave (&kt->lock, flags);
62491 +       if (kt->should_stall++ == 0)
62492 +               kcondvar_wakeupall (&kt->wait, &kt->lock);
62493 +
62494 +       while (kt->state != KT_STATE_SLEEPING)
62495 +               kcondvar_wait (&kt->wait, &kt->lock, &flags);
62496 +       spin_unlock_irqrestore (&kt->lock, flags);
62497 +}
62498 +
62499 +void
62500 +ep_kthread_resume (EP_KTHREAD *kt)
62501 +{
62502 +       unsigned long flags;
62503 +
62504 +       spin_lock_irqsave (&kt->lock, flags);
62505 +       if (--kt->should_stall == 0)
62506 +       {
62507 +               kt->state = KT_STATE_SCHEDULED;
62508 +               kcondvar_wakeupone (&kt->wait, &kt->lock);
62509 +       }
62510 +       spin_unlock_irqrestore (&kt->lock, flags);
62511 +}
62512 +
62513 +void
62514 +ep_kthread_stop (EP_KTHREAD *kt)
62515 +{
62516 +       unsigned long flags;
62517 +       
62518 +       spin_lock_irqsave (&kt->lock, flags);
62519 +       kt->should_stop = 1;
62520 +       while (kt->started && !kt->stopped)
62521 +       {
62522 +               kcondvar_wakeupall (&kt->wait, &kt->lock);
62523 +               kcondvar_wait (&kt->wait, &kt->lock, &flags);
62524 +       }
62525 +       spin_unlock_irqrestore (&kt->lock, flags);
62526 +}
62527 +
62528 +int
62529 +ep_kthread_state (EP_KTHREAD *kt, long *time)
62530 +{
62531 +       unsigned long flags;
62532 +       int res = KT_STATE_SLEEPING;
62533 +
62534 +       spin_lock_irqsave (&kt->lock, flags);
62535 +
62536 +       if (kt->next_run) {
62537 +               *time = kt->next_run;
62538 +               res   = kt->should_stall ? KT_STATE_STALLED : KT_STATE_SCHEDULED;
62539 +       }
62540 +
62541 +       if (kt->running) {
62542 +               *time = kt->running;
62543 +               res   = KT_STATE_RUNNING;
62544 +       }
62545 +
62546 +       spin_unlock_irqrestore (&kt->lock, flags);
62547 +       
62548 +       return res;
62549 +}
62550 +
62551 +/*
62552 + * Local variables:
62553 + * c-file-style: "linux"
62554 + * End:
62555 + */
62556 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/kthread.h
62557 ===================================================================
62558 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/kthread.h       2004-02-23 16:02:56.000000000 -0500
62559 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/kthread.h    2005-07-28 14:52:52.894671744 -0400
62560 @@ -0,0 +1,53 @@
62561 +/*
62562 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62563 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
62564 + *
62565 + *    For licensing information please see the supplied COPYING file
62566 + *
62567 + */
62568 +
62569 +#ifndef __ELAN3_KTHREAD_H
62570 +#define __ELAN3_KTHREAD_H
62571 +
62572 +#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
62573 +/*      $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/
62574 +
62575 +typedef struct ep_kthread
62576 +{
62577 +       kcondvar_t      wait;                                   /* place to sleep */
62578 +       spinlock_t      lock;                                   /* and lock */
62579 +       long            next_run;                               /* tick when thread should next run */
62580 +       long            running;                                /* tick when thread started to run */
62581 +       unsigned short  should_stall;
62582 +       unsigned char   state;
62583 +       unsigned int    started:1;
62584 +       unsigned int    should_stop:1;
62585 +       unsigned int    stopped:1;
62586 +} EP_KTHREAD;
62587 +
62588 +#define KT_STATE_SLEEPING              0
62589 +#define KT_STATE_SCHEDULED             1
62590 +#define KT_STATE_RUNNING               2
62591 +#define KT_STATE_STALLED               3
62592 +
62593 +#define AFTER(a, b)                    ((((long)(a)) - ((long)(b))) > 0)
62594 +#define BEFORE(a,b)                    ((((long)(a)) - ((long)(b))) < 0)
62595 +
62596 +extern void ep_kthread_init (EP_KTHREAD *kt);
62597 +extern void ep_kthread_destroy (EP_KTHREAD *kt);
62598 +extern void ep_kthread_started (EP_KTHREAD *kt);
62599 +extern void ep_kthread_stopped (EP_KTHREAD *kt);
62600 +extern int  ep_kthread_should_stall (EP_KTHREAD *kth);
62601 +extern int  ep_kthread_sleep (EP_KTHREAD *kth, long next_run);
62602 +extern void ep_kthread_schedule (EP_KTHREAD *kt, long when);
62603 +extern void ep_kthread_stall (EP_KTHREAD *kth);
62604 +extern void ep_kthread_resume (EP_KTHREAD *kt);
62605 +extern void ep_kthread_stop (EP_KTHREAD *kt);
62606 +extern int  ep_kthread_state (EP_KTHREAD *kt, long *time);
62607 +#endif /* __ELAN3_KTHREAD_H */
62608 +
62609 +/*
62610 + * Local variables:
62611 + * c-file-style: "linux"
62612 + * End:
62613 + */
62614 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/Makefile
62615 ===================================================================
62616 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/Makefile        2004-02-23 16:02:56.000000000 -0500
62617 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/Makefile     2005-07-28 14:52:52.894671744 -0400
62618 @@ -0,0 +1,17 @@
62619 +#
62620 +# Makefile for Quadrics QsNet
62621 +#
62622 +# Copyright (c) 2002-2004 Quadrics Ltd
62623 +#
62624 +# File: drivers/net/qsnet/ep/Makefile
62625 +#
62626 +
62627 +
62628 +ep3-$(CONFIG_ELAN3)    := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o
62629 +ep4-$(CONFIG_ELAN4)    := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o
62630 +#
62631 +
62632 +obj-$(CONFIG_EP)       += ep.o
62633 +ep-objs        := cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o $(ep3-$(CONFIG_EP)) $(ep4-$(CONFIG_EP))
62634 +
62635 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
62636 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/Makefile.conf
62637 ===================================================================
62638 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/Makefile.conf   2004-02-23 16:02:56.000000000 -0500
62639 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/Makefile.conf        2005-07-28 14:52:52.895671592 -0400
62640 @@ -0,0 +1,12 @@
62641 +# Flags for generating QsNet Linux Kernel Makefiles
62642 +MODNAME                =       ep.o
62643 +MODULENAME     =       ep
62644 +KOBJFILES      =       cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o \$\(ep3-\$\(CONFIG_EP\)\) \$\(ep4-\$\(CONFIG_EP\)\)
62645 +EXPORT_KOBJS   =       conf_linux.o
62646 +CONFIG_NAME    =       CONFIG_EP
62647 +SGALFC         =       
62648 +# EXTRALINES START
62649 +
62650 +ep3-$(CONFIG_ELAN3)    := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o
62651 +ep4-$(CONFIG_ELAN4)    := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o
62652 +# EXTRALINES END
62653 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/neterr.c
62654 ===================================================================
62655 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/neterr.c        2004-02-23 16:02:56.000000000 -0500
62656 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/neterr.c     2005-07-28 14:52:52.895671592 -0400
62657 @@ -0,0 +1,82 @@
62658 +/*
62659 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62660 + *
62661 + *    For licensing information please see the supplied COPYING file
62662 + *
62663 + */
62664 +
62665 +#ident "@(#)$Id: neterr.c,v 1.25.8.1 2004/11/12 10:54:51 mike Exp $"
62666 +/*      $Source: /cvs/master/quadrics/epmod/neterr.c,v $ */
62667 +
62668 +#include <qsnet/kernel.h>
62669 +#include <elan/kcomm.h>
62670 +
62671 +#include "debug.h"
62672 +
62673 +void
62674 +ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie)
62675 +{
62676 +    EP_SYS       *sys      = rail->System;
62677 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId];
62678 +    unsigned long flags;
62679 +
62680 +    spin_lock_irqsave (&sys->NodeLock, flags);
62681 +
62682 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
62683 +    
62684 +    if (nodeRail->NetworkErrorState == 0)
62685 +    {
62686 +       EPRINTF2 (DBG_NETWORK_ERROR, "%s: raise context filter for node %d due to network error\n", rail->Name, nodeId);
62687 +       printk ("%s: raise context filter for node %d due to network error\n", rail->Name, nodeId);
62688 +       
62689 +       rail->Operations.RaiseFilter (rail, nodeId);
62690 +       
62691 +       if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
62692 +           printk ("%s: node %d is flushing - deferring network error fixup\n", rail->Name, nodeId);
62693 +       else
62694 +           list_add_tail (&nodeRail->Link, &rail->NetworkErrorList);
62695 +    }
62696 +    
62697 +    switch (what)
62698 +    {
62699 +    case EP_NODE_NETERR_ATOMIC_PACKET:
62700 +       ASSERT (nodeRail->NetworkErrorCookies[channel] == 0);
62701 +       
62702 +       /* Need to raise the approriate context filter for this node,
62703 +        * and periodically send a neterr fixup message to it until 
62704 +        * we receive an ack from it
62705 +        */
62706 +       IncrStat (rail, NeterrAtomicPacket);
62707 +       
62708 +       nodeRail->NetworkErrorCookies[channel] = cookie;
62709 +       
62710 +       nodeRail->NetworkErrorState |= EP_NODE_NETERR_ATOMIC_PACKET;
62711 +       nodeRail->MsgXid             = ep_xid_cache_alloc (sys, &rail->XidCache);
62712 +       
62713 +       EPRINTF3 (DBG_NETWORK_ERROR, "%s: atomic packet destroyed - node %d cookie %llx\n", rail->Name, nodeId, cookie);
62714 +
62715 +       printk ("%s: atomic packet destroyed - node %d cookie %llx\n", rail->Name, nodeId, cookie);
62716 +       break;
62717 +
62718 +    case EP_NODE_NETERR_DMA_PACKET:
62719 +       /* Must be an overlapped dma packet, raise the context filter,
62720 +        * and hold it up for a NETWORK_ERROR_TIMEOUT */
62721 +       IncrStat (rail, NeterrDmaPacket);
62722 +       
62723 +       nodeRail->NetworkErrorState |= EP_NODE_NETERR_DMA_PACKET;
62724 +       break;
62725 +    }
62726 +
62727 +    nodeRail->NextRunTime = lbolt + NETWORK_ERROR_TIMEOUT;
62728 +    
62729 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
62730 +
62731 +    ep_kthread_schedule (&sys->ManagerThread, nodeRail->NextRunTime);
62732 +}
62733 +
62734 +/*
62735 + * Local variables:
62736 + * c-file-style: "stroustrup"
62737 + * End:
62738 + */
62739 +
62740 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/neterr_elan3.c
62741 ===================================================================
62742 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/neterr_elan3.c  2004-02-23 16:02:56.000000000 -0500
62743 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/neterr_elan3.c       2005-07-28 14:52:52.896671440 -0400
62744 @@ -0,0 +1,326 @@
62745 +/*
62746 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62747 + *
62748 + *    For licensing information please see the supplied COPYING file
62749 + *
62750 + */
62751 +
62752 +#ident "@(#)$Id: neterr_elan3.c,v 1.24 2003/11/17 13:26:45 david Exp $"
62753 +/*      $Source: /cvs/master/quadrics/epmod/neterr_elan3.c,v $ */
62754 +
62755 +#include <qsnet/kernel.h>
62756 +
62757 +#include <elan/kcomm.h>
62758 +
62759 +#include "kcomm_vp.h"
62760 +#include "kcomm_elan3.h"
62761 +#include "debug.h"
62762 +
62763 +typedef struct neterr_halt_args
62764 +{
62765 +    EP3_RAIL        *Rail;
62766 +    unsigned int      NodeId;
62767 +    EP_NETERR_COOKIE *Cookies;
62768 +} NETERR_HALT_ARGS;
62769 +
62770 +static int
62771 +DmaMatchesCookie (EP3_RAIL *rail, E3_DMA_BE *dma, int nodeId, EP_NETERR_COOKIE *cookies, char *where)
62772 +{
62773 +    E3_uint32     cvproc;
62774 +    E3_uint32     cookie;
62775 +    
62776 +    if (dma->s.dma_direction == DMA_WRITE)
62777 +    {
62778 +       cvproc = dma->s.dma_destCookieVProc;
62779 +       cookie = dma->s.dma_srcCookieVProc;
62780 +    }
62781 +    else
62782 +    {
62783 +       cvproc = dma->s.dma_srcCookieVProc;
62784 +       cookie = dma->s.dma_destCookieVProc;
62785 +    }
62786 +    
62787 +    EPRINTF6 (DBG_NETWORK_ERROR, "%s: Neterr - %s: DMA %08x %08x %08x %08x\n", rail->Generic.Name, where,
62788 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
62789 +    EPRINTF5 (DBG_NETWORK_ERROR, "%s:                     %08x %08x %08x %08x\n", rail->Generic.Name,
62790 +             dma->s.dma_destEvent, dma->s.dma_destCookieVProc, dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
62791 +
62792 +    if (EP_VP_ISDATA((cvproc & DMA_PROCESS_MASK)) && EP_VP_TO_NODE(cvproc & DMA_PROCESS_MASK) == nodeId)
62793 +    {
62794 +       /*
62795 +        * This is a DMA going to the node which has a network fixup
62796 +        * request pending, so check if the cookie matches.
62797 +        */
62798 +       if ((cookie == cookies[0] || cookie == cookies[1]) /* && !WaitForEop */)
62799 +       {
62800 +           EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %08x on %s\n", rail->Generic.Name, cookie, where);
62801 +           
62802 +           return (TRUE);
62803 +       }
62804 +    }
62805 +
62806 +    return (FALSE);
62807 +}
62808 +
62809 +
62810 +static void
62811 +NetworkErrorHaltOperation (ELAN3_DEV *dev, void *arg)
62812 +{
62813 +    NETERR_HALT_ARGS *args = (NETERR_HALT_ARGS *) arg;
62814 +    EP3_RAIL         *rail = args->Rail;
62815 +    EP_SYS           *sys  = rail->Generic.System;
62816 +    sdramaddr_t       FPtr, BPtr;
62817 +    sdramaddr_t       Base, Top;
62818 +    E3_DMA_BE         dma;
62819 +    unsigned long     flags;
62820 +
62821 +    spin_lock_irqsave (&sys->NodeLock, flags);
62822 +
62823 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
62824 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
62825 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
62826 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
62827 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
62828 +    
62829 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
62830 +    BPtr =  read_reg32 (dev, DProc_SysCntx_BPtr);
62831 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
62832 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
62833 +    
62834 +    while (FPtr != BPtr)
62835 +    {
62836 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
62837 +       
62838 +       if (DmaMatchesCookie (rail, &dma, args->NodeId, args->Cookies, "runq "))
62839 +       {
62840 +           /*
62841 +            * Transfer the DMA to the node, it's source event will 
62842 +            * get executed later.
62843 +            */
62844 +           QueueDmaOnStalledList (rail, &dma);
62845 +           
62846 +           /*
62847 +            * Remove the DMA from the queue by replacing it with one with
62848 +            * zero size and no events.
62849 +            *
62850 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
62851 +            * to mark the approriate run queue as empty.
62852 +            */
62853 +           dma.s.dma_type            = (SYS_CONTEXT_BIT << 16);
62854 +           dma.s.dma_size            = 0;
62855 +           dma.s.dma_source          = (E3_Addr) 0;
62856 +           dma.s.dma_dest            = (E3_Addr) 0;
62857 +           dma.s.dma_destEvent       = (E3_Addr) 0;
62858 +           dma.s.dma_destCookieVProc = 0;
62859 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
62860 +           dma.s.dma_srcCookieVProc  = 0;
62861 +           
62862 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
62863 +       }
62864 +
62865 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
62866 +    }
62867 +
62868 +    rail->NetworkErrorFlushed = TRUE;
62869 +    kcondvar_wakeupall (&rail->NetworkErrorSleep, &sys->NodeLock);
62870 +
62871 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
62872 +}
62873 +
62874 +void
62875 +ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
62876 +{
62877 +    EP3_RAIL        *rail        = (EP3_RAIL *) r;
62878 +    EP_SYS          *sys         = rail->Generic.System;
62879 +    ELAN3_DEV       *dev         = rail->Device;
62880 +    EP_NODE_RAIL    *nodeRail    = &rail->Generic.Nodes[nodeId];
62881 +    E3_DMA_BE        dmabe;
62882 +    EP3_COOKIE      *cp;
62883 +    E3_uint32        vp;
62884 +    NETERR_HALT_ARGS args;
62885 +    struct list_head *el, *nel, matchedList;
62886 +    int              i;
62887 +    unsigned long    flags;
62888 +
62889 +    INIT_LIST_HEAD (&matchedList);
62890 +
62891 +    StallDmaRetryThread (rail);
62892 +
62893 +    args.Rail       = rail;
62894 +    args.NodeId     = nodeId;
62895 +    args.Cookies    = cookies;
62896 +
62897 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
62898 +    QueueHaltOperation (rail->Device, 0, NULL, INT_TProcHalted | INT_DProcHalted, NetworkErrorHaltOperation, &args);
62899 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
62900 +    
62901 +    spin_lock_irqsave (&sys->NodeLock, flags);
62902 +    while (! rail->NetworkErrorFlushed)
62903 +       kcondvar_wait (&rail->NetworkErrorSleep, &sys->NodeLock, &flags);
62904 +    rail->NetworkErrorFlushed = FALSE;
62905 +    
62906 +    spin_lock (&rail->DmaRetryLock);
62907 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
62908 +    {
62909 +       list_for_each_safe (el, nel, &rail->DmaRetries[i]) {
62910 +           EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
62911 +
62912 +           if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "retry"))
62913 +           {
62914 +               /* remove from retry list */
62915 +               list_del (&retry->Link);
62916 +
62917 +               /* add to list of dmas which matched */
62918 +               list_add_tail (&retry->Link, &matchedList);
62919 +           }
62920 +       }
62921 +    }
62922 +    
62923 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
62924 +       EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
62925 +       
62926 +       if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "stalled"))
62927 +       {
62928 +           /* remove from retry list */
62929 +           list_del (&retry->Link);
62930 +           
62931 +           /* add to list of dmas which matched */
62932 +           list_add_tail (&retry->Link, &matchedList);
62933 +       }
62934 +    }
62935 +    
62936 +    spin_unlock (&rail->DmaRetryLock);
62937 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
62938 +    
62939 +    ResumeDmaRetryThread (rail);
62940 +
62941 +    /* Now "set" the source event of any write DMA's */
62942 +    while (! list_empty (&matchedList))
62943 +    {
62944 +       EP3_RETRY_DMA *retry = list_entry (matchedList.next, EP3_RETRY_DMA, Link);
62945 +       
62946 +       list_del (&retry->Link);
62947 +
62948 +       if (retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_srcEvent)
62949 +       {
62950 +           sdramaddr_t event = ep_elan2sdram (&rail->Generic, retry->Dma.s.dma_srcEvent);
62951 +
62952 +           /* Block local interrupts, since we need to atomically
62953 +            * decrement the event count and perform the word write
62954 +            */
62955 +           local_irq_save (flags);
62956 +           {
62957 +               E3_uint32 type  = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type));
62958 +               E3_uint32 count = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Count));
62959 +
62960 +               elan3_sdram_writel (dev, event + offsetof (E3_Event, ev_Count), count - 1);
62961 +
62962 +               if (count == 1)
62963 +               {
62964 +                   if (type & EV_TYPE_MASK_BCOPY)
62965 +                   {
62966 +                       E3_Addr srcVal  = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Source));
62967 +                       E3_Addr dstAddr = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Dest)) & ~EV_BCOPY_DTYPE_MASK;
62968 +
62969 +                       ASSERT ((srcVal & EV_WCOPY) != 0);
62970 +                       
62971 +                       EPRINTF3 (DBG_NETWORK_ERROR, "%s: neterr perform event word write at %08x with %08x\n", rail->Generic.Name, dstAddr, srcVal);
62972 +
62973 +                       ELAN3_OP_STORE32 (rail->Ctxt, dstAddr, srcVal);
62974 +                   }
62975 +
62976 +                   if ((type & ~EV_TYPE_MASK_BCOPY) != 0)
62977 +                   {
62978 +                       if ((type & EV_TYPE_MASK_CHAIN) == EV_TYPE_CHAIN)
62979 +                       {
62980 +                           printk ("%s: event at %08x - chained event %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type);
62981 +                           panic ("ep: neterr invalid event type\n");
62982 +                       }
62983 +                       else if ((type & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
62984 +                       {
62985 +                           EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr event interrupt - cookie %08x\n", rail->Generic.Name, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)));
62986 +                           
62987 +                           cp = LookupCookie (&rail->CookieTable, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)));
62988 +                           
62989 +                           if (cp->Operations->Event)
62990 +                               cp->Operations->Event(rail, cp->Arg);
62991 +                       }
62992 +                       else if ((type & EV_TYPE_MASK_DMA) == EV_TYPE_DMA)
62993 +                       {
62994 +                           sdramaddr_t dma = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2));
62995 +                           
62996 +                           EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr chained dma - %08x\n", rail->Generic.Name, (type & ~EV_TYPE_MASK2));
62997 +                           
62998 +                           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
62999 +                           
63000 +                           if (dmabe.s.dma_direction == DMA_WRITE)
63001 +                           {
63002 +                               vp = dmabe.s.dma_destVProc;
63003 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
63004 +                           }
63005 +                           else
63006 +                           {
63007 +                               vp = dmabe.s.dma_srcVProc;
63008 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
63009 +                               
63010 +                               /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the 
63011 +                                * DMA descriptor will be read from the EP_RETRY_DMA rather than the 
63012 +                                * original DMA - this can then get reused and an incorrect DMA 
63013 +                                * descriptor sent 
63014 +                                * eventp->ev_Type contains the dma address with type in the lower bits 
63015 +                                */ 
63016 +                           
63017 +                               dmabe.s.dma_source    = (type & ~EV_TYPE_MASK2);
63018 +                               dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
63019 +                           }
63020 +                       
63021 +                           ASSERT (EP_VP_ISDATA(vp));
63022 +                       
63023 +                           nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
63024 +
63025 +                           switch (nodeRail->State)
63026 +                           {
63027 +                           case EP_NODE_CONNECTED:
63028 +                           case EP_NODE_LEAVING_CONNECTED:
63029 +                               if (cp != NULL)
63030 +                                   cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
63031 +                               else
63032 +                               {
63033 +                                   ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
63034 +                               
63035 +                                   QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
63036 +                               }
63037 +                               break;
63038 +
63039 +                           case EP_NODE_LOCAL_PASSIVATE:
63040 +                               QueueDmaOnStalledList (rail, &dmabe);
63041 +                               break;
63042 +
63043 +                           default:
63044 +                               panic ("ep: neterr incorrect state for node\n");
63045 +                           }
63046 +                       }
63047 +                       else if ((type & EV_TYPE_MASK_THREAD) == EV_TYPE_THREAD)
63048 +                       {
63049 +                           printk ("%s: event at %08x - thread waiting %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type);
63050 +                           panic ("ep: neterr invalid event type\n");
63051 +                       }
63052 +                   }
63053 +               }
63054 +           }
63055 +           local_irq_restore(flags);
63056 +       }
63057 +       
63058 +       /* add to free list */
63059 +       spin_lock_irqsave (&rail->DmaRetryLock, flags);
63060 +       list_add (&retry->Link, &rail->DmaRetryFreeList);
63061 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
63062 +    }
63063 +}
63064 +
63065 +/*
63066 + * Local variables:
63067 + * c-file-style: "stroustrup"
63068 + * End:
63069 + */
63070 +
63071 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/neterr_elan4.c
63072 ===================================================================
63073 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/neterr_elan4.c  2004-02-23 16:02:56.000000000 -0500
63074 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/neterr_elan4.c       2005-07-28 14:52:52.896671440 -0400
63075 @@ -0,0 +1,251 @@
63076 +/*
63077 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63078 + *
63079 + *    For licensing information please see the supplied COPYING file
63080 + *
63081 + */
63082 +
63083 +#ident "@(#)$Id: neterr_elan4.c,v 1.2 2003/11/24 17:57:24 david Exp $"
63084 +/*      $Source: /cvs/master/quadrics/epmod/neterr_elan4.c,v $ */
63085 +
63086 +#include <qsnet/kernel.h>
63087 +
63088 +#include <elan/kcomm.h>
63089 +
63090 +#include "kcomm_vp.h"
63091 +#include "kcomm_elan4.h"
63092 +#include "debug.h"
63093 +
63094 +struct neterr_desc
63095 +{
63096 +    EP4_RAIL         *rail;
63097 +    unsigned int      nodeid;
63098 +    EP_NETERR_COOKIE *cookies;
63099 +    int                      done;
63100 +} ;
63101 +
63102 +static int
63103 +dma_matches_cookie (EP4_RAIL *rail, E4_uint64 vproc, E4_uint64 cookie, unsigned int nodeId, EP_NETERR_COOKIE *cookies, const char *where)
63104 +{
63105 +    if ((EP_VP_ISDATA (vproc) && EP_VP_TO_NODE (vproc) == nodeId) && (cookie == cookies[0] || cookie == cookies[1]))
63106 +    {
63107 +       EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %016llx on %s\n", rail->r_generic.Name, cookie, where);
63108 +
63109 +       return 1;
63110 +    }
63111 +    return 0;
63112 +}
63113 +
63114 +static void
63115 +ep4_neterr_dma_flushop (ELAN4_DEV *dev, void *arg, int qfull)
63116 +{
63117 +    struct neterr_desc *desc  = (struct neterr_desc *) arg;
63118 +    EP4_RAIL           *rail  = desc->rail;
63119 +    E4_uint64           qptrs = read_reg64 (dev, DProcHighPriPtrs);
63120 +    E4_uint32           qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
63121 +    E4_uint32           qfptr = E4_QueueFrontPointer (qptrs);
63122 +    E4_uint32           qbptr = E4_QueueBackPointer (qptrs);
63123 +    E4_DProcQueueEntry  qentry;
63124 +    unsigned long       flags;
63125 +
63126 +    while ((qfptr != qbptr) || qfull)
63127 +    {
63128 +       E4_uint64 cookie = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie));
63129 +       E4_uint64 vproc  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc));
63130 +
63131 +       if (dma_matches_cookie (rail, vproc, cookie, desc->nodeid, desc->cookies, "runq "))
63132 +       {
63133 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry));
63134 +
63135 +           ep4_queue_dma_stalled (rail, &qentry.Desc);
63136 +
63137 +           /* Replace the dma with one which will "disappear" */
63138 +           qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
63139 +           qentry.Desc.dma_cookie   = 0;
63140 +           qentry.Desc.dma_vproc    = 0;
63141 +           qentry.Desc.dma_srcAddr  = 0;
63142 +           qentry.Desc.dma_dstAddr  = 0;
63143 +           qentry.Desc.dma_srcEvent = 0;
63144 +           qentry.Desc.dma_dstEvent = 0;
63145 +
63146 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
63147 +       }
63148 +       
63149 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
63150 +       qfull = 0;
63151 +    }
63152 +
63153 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
63154 +    desc->done = 1;
63155 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
63156 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
63157 +}
63158 +
63159 +static void
63160 +ep4_neterr_dma_haltop (ELAN4_DEV *dev, void *arg)
63161 +{
63162 +    struct neterr_desc *desc = (struct neterr_desc *) arg;
63163 +
63164 +    elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1);
63165 +}
63166 +
63167 +void
63168 +ep4_neterr_fixup_dmas (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
63169 +{
63170 +    EP_NODE_RAIL      *nodeRail = &rail->r_generic.Nodes[nodeId];
63171 +    struct neterr_desc desc;
63172 +    struct list_head   matchedList;
63173 +    struct list_head  *el, *nel;
63174 +    unsigned long      flags;
63175 +    register int       i;
63176 +
63177 +    desc.rail    = rail;
63178 +    desc.nodeid  = nodeId;
63179 +    desc.cookies = cookies;
63180 +    desc.done    = 0;
63181 +
63182 +    INIT_LIST_HEAD (&matchedList);
63183 +
63184 +    /* First -  stall the retry thread, so that it will no longer restart
63185 +     *          any dma's from the retry list */
63186 +    ep_kthread_stall (&rail->r_retry_thread);
63187 +    
63188 +    /* Second - flush through all command queues targetted by events, thread etc */
63189 +    ep4_flush_ecqs (rail);
63190 +    
63191 +    /* Third - queue a halt operation to flush through all DMA's which are executing
63192 +     *         or on the run queues */
63193 +    kmutex_lock (&rail->r_haltop_mutex);
63194 +    
63195 +    rail->r_haltop.op_mask      = INT_DProcHalted;
63196 +    rail->r_haltop.op_function  = ep4_neterr_dma_haltop;
63197 +    rail->r_haltop.op_arg       = &desc;
63198 +
63199 +    rail->r_flushop.op_function = ep4_neterr_dma_flushop;
63200 +    rail->r_flushop.op_arg      = &desc;
63201 +    
63202 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
63203 +
63204 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
63205 +    while (! desc.done)
63206 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
63207 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
63208 +    kmutex_unlock (&rail->r_haltop_mutex);
63209 +
63210 +    /* Fourth - run down the dma retry lists and move all entries to the cancelled
63211 +     *          list.  Any dma's which were on the run queues have already been
63212 +     *          moved there */
63213 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
63214 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
63215 +    {
63216 +       list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) {
63217 +           EP4_DMA_RETRY *retry    = list_entry (el, EP4_DMA_RETRY, retry_link);
63218 +           
63219 +           if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "retry"))
63220 +           {
63221 +               /* remove from retry list */
63222 +               list_del (&retry->retry_link);
63223 +               
63224 +               /* add to list of dmas which matched */
63225 +               list_add_tail (&retry->retry_link, &matchedList);
63226 +           }
63227 +       }
63228 +    }
63229 +    
63230 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
63231 +       EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link);
63232 +       
63233 +       if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "stalled"))
63234 +       {
63235 +           /* remove from retry list */
63236 +           list_del (&retry->retry_link);
63237 +           
63238 +           /* add to list of dmas which matched */
63239 +           list_add_tail (&retry->retry_link, &matchedList);
63240 +       }
63241 +    }
63242 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
63243 +    
63244 +    /* Now "set" the source event of any put DMA#'s we can use the dma 
63245 +     * retry command queue as the retry thread is stalled */
63246 +    while (! list_empty (&matchedList))
63247 +    {
63248 +       EP4_DMA_RETRY *retry = list_entry (matchedList.next, EP4_DMA_RETRY, retry_link);
63249 +       
63250 +       list_del (&retry->retry_link);
63251 +
63252 +       elan4_set_event_cmd (rail->r_dma_ecq->ecq_cq, retry->retry_dma.dma_srcEvent);
63253 +
63254 +       spin_lock_irqsave (&rail->r_dma_lock, flags);
63255 +       list_add (&retry->retry_link, &rail->r_dma_freelist);
63256 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
63257 +    }
63258 +
63259 +    /* Flush through the command queues to ensure that all the setevents have executed */
63260 +    ep4_flush_ecqs (rail);
63261 +
63262 +    /* Finally - allow the retry thread to run again */
63263 +    ep_kthread_resume (&rail->r_retry_thread);
63264 +}
63265 +
63266 +void
63267 +ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops)
63268 +{
63269 +    /* we're called from the ManagerThread, so no need to stall it */
63270 +    list_add_tail (&ops->op_link, &rail->r_neterr_ops);
63271 +}
63272 +void
63273 +ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops)
63274 +{
63275 +    EP_SYS *sys = rail->r_generic.System;
63276 +
63277 +    ep_kthread_stall (&sys->ManagerThread);
63278 +    list_del (&ops->op_link);
63279 +    ep_kthread_resume (&sys->ManagerThread);
63280 +}
63281 +
63282 +void
63283 +ep4_neterr_fixup_sten (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
63284 +{
63285 +    struct list_head *el;
63286 +
63287 +    list_for_each (el, &rail->r_neterr_ops) {
63288 +       EP4_NETERR_OPS *op = list_entry (el, EP4_NETERR_OPS, op_link);
63289 +
63290 +       (op->op_func) (rail, op->op_arg, nodeId, cookies);
63291 +    }
63292 +}
63293 +
63294 +void
63295 +ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
63296 +{
63297 +    EP4_RAIL *rail = (EP4_RAIL *) r;
63298 +
63299 +    /* network error cookies can come from the following :
63300 +     *
63301 +     *   DMA  engine
63302 +     *     if a DMA matches a network error cookie, then we just need to 
63303 +     *     execute the local setevent *before* returning.
63304 +     *
63305 +     *   STEN packet
63306 +     *     if the STEN packet was generated with as a WAIT_FOR_EOP
63307 +     *     and it's not present on the retry lists, then re-create
63308 +     *     it.
63309 +     *
63310 +     */
63311 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4_neterr_fixup: node %d cookies <%lld%s%s%s%s> <%lld%s%s%s%s>\n",
63312 +             rail->r_generic.Name, nodeId, EP4_COOKIE_STRING(cookies[0]), EP4_COOKIE_STRING(cookies[1]));
63313 +
63314 +    if ((cookies[0] & EP4_COOKIE_DMA) || (cookies[1] & EP4_COOKIE_DMA))
63315 +       ep4_neterr_fixup_dmas (rail, nodeId, cookies);
63316 +
63317 +    if ((cookies[0] & EP4_COOKIE_STEN) || (cookies[1] & EP4_COOKIE_STEN))
63318 +       ep4_neterr_fixup_sten (rail, nodeId, cookies);
63319 +}
63320 +
63321 +/*
63322 + * Local variables:
63323 + * c-file-style: "stroustrup"
63324 + * End:
63325 + */
63326 +
63327 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/nmh.c
63328 ===================================================================
63329 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/nmh.c   2004-02-23 16:02:56.000000000 -0500
63330 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/nmh.c        2005-07-28 14:52:52.897671288 -0400
63331 @@ -0,0 +1,181 @@
63332 +/*
63333 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63334 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63335 + *
63336 + *    For licensing information please see the supplied COPYING file
63337 + *
63338 + */
63339 +#ident "@(#)$Id: nmh.c,v 1.6 2004/01/05 13:48:08 david Exp $"
63340 +/*      $Source: /cvs/master/quadrics/epmod/nmh.c,v $*/
63341 +
63342 +#include <qsnet/kernel.h>
63343 +
63344 +#include <elan/kcomm.h>
63345 +
63346 +#define EP_NMD_SPANS(nmd, base, top)   ((nmd)->nmd_addr <= (base) &&  \
63347 +                                        ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (top))
63348 +
63349 +#define EP_NMD_OVERLAPS(nmd, addr, len)        ((nmd)->nmd_addr <= ((addr) + (len)) && \
63350 +                                        ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (addr))
63351 +
63352 +#define EP_NMH_HASH(tbl,idx,addr)      ((addr) % (tbl)->tbl_size[idx])
63353 +
63354 +int
63355 +ep_nmh_init (EP_NMH_TABLE *tbl)
63356 +{
63357 +    int i, idx, hsize = 1;
63358 +
63359 +    for (idx = EP_NMH_NUMHASH-1; idx >= 0; idx--, hsize <<= 1)
63360 +    {
63361 +       tbl->tbl_size[idx] = (hsize < EP_NMH_HASHSIZE) ? hsize : EP_NMH_HASHSIZE;
63362 +
63363 +       KMEM_ZALLOC (tbl->tbl_hash[idx], struct list_head *, sizeof (struct list_head) * tbl->tbl_size[idx], 1);
63364 +       
63365 +       if (tbl->tbl_hash == NULL)
63366 +       {
63367 +           while (++idx < EP_NMH_NUMHASH)
63368 +               KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]);
63369 +           return (ENOMEM);
63370 +       }
63371 +
63372 +       for (i = 0; i < tbl->tbl_size[idx]; i++)
63373 +           INIT_LIST_HEAD (&tbl->tbl_hash[idx][i]);
63374 +    }
63375 +
63376 +    return (0);
63377 +}
63378 +
63379 +void
63380 +ep_nmh_fini (EP_NMH_TABLE *tbl)
63381 +{
63382 +    int idx;
63383 +
63384 +    for (idx = 0; idx < EP_NMH_NUMHASH; idx++)
63385 +       if (tbl->tbl_hash[idx])
63386 +           KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]);
63387 +    
63388 +    bzero (tbl, sizeof (EP_NMH_TABLE));
63389 +}
63390 +
63391 +void
63392 +ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmh)
63393 +{
63394 +    EP_ADDR base = nmh->nmh_nmd.nmd_addr;
63395 +    EP_ADDR top  = base + nmh->nmh_nmd.nmd_len - 1;
63396 +    int     idx;
63397 +
63398 +    for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1)
63399 +       ;
63400 +
63401 +    list_add_tail (&nmh->nmh_link, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]);
63402 +}
63403 +
63404 +void
63405 +ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmh)
63406 +{
63407 +    list_del (&nmh->nmh_link);
63408 +}
63409 +
63410 +EP_NMH *
63411 +ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmd)
63412 +{
63413 +    EP_ADDR           base = nmd->nmd_addr;
63414 +    EP_ADDR           top  = base + nmd->nmd_len - 1;
63415 +    int               idx;
63416 +    struct list_head *le;
63417 +    
63418 +    for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1)
63419 +       ;
63420 +    
63421 +    for (; idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1) {
63422 +
63423 +       list_for_each (le, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]) {
63424 +           EP_NMH *nmh = list_entry (le, EP_NMH, nmh_link);
63425 +
63426 +           if (EP_NMD_SPANS (&nmh->nmh_nmd, nmd->nmd_addr, nmd->nmd_addr + nmd->nmd_len - 1))
63427 +               return (nmh);
63428 +       }
63429 +    }
63430 +
63431 +    return (0);
63432 +}
63433 +
63434 +void
63435 +ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len)
63436 +{
63437 +    ASSERT ((off + len - 1) <= nmd->nmd_len);
63438 +
63439 +    subset->nmd_addr = nmd->nmd_addr + off;
63440 +    subset->nmd_len  = len;
63441 +    subset->nmd_attr = nmd->nmd_attr;
63442 +}
63443 +
63444 +int
63445 +ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b)
63446 +{
63447 +    if (EP_NMD_NODEID (a) != EP_NMD_NODEID (b))                        /* not generated on the same node */
63448 +       return 0;
63449 +    
63450 +    if ((EP_NMD_RAILMASK (a) & EP_NMD_RAILMASK (b)) == 0)      /* no common rails */
63451 +       return 0;
63452 +    
63453 +    if (b->nmd_addr == (a->nmd_addr + a->nmd_len))
63454 +    {
63455 +       if (merged != NULL)
63456 +       {
63457 +           merged->nmd_addr = a->nmd_addr;
63458 +           merged->nmd_len  = a->nmd_len + b->nmd_len;
63459 +           merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(a), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b));
63460 +       }
63461 +       return 1;
63462 +    }
63463 +    
63464 +    if (a->nmd_addr == (b->nmd_addr + b->nmd_len))
63465 +    {
63466 +       if (merged != NULL)
63467 +       {
63468 +           merged->nmd_addr = b->nmd_addr;
63469 +           merged->nmd_len  = b->nmd_len + a->nmd_len;
63470 +           merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(b), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b));
63471 +       }
63472 +       
63473 +       return 1;
63474 +    }
63475 +
63476 +    return 0;
63477 +}
63478 +
63479 +int
63480 +ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask)
63481 +{
63482 +    EP_NMH *nmh = ep_nmh_find (&sys->MappingTable, nmd);
63483 +
63484 +    if (nmh == NULL)
63485 +    {
63486 +       printk ("ep_nmd_map_rails: nmd=%08x.%08x.%08x cannot be found\n",
63487 +               nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
63488 +       return (-1);
63489 +    }
63490 +
63491 +    return (nmh->nmh_ops->op_map_rails (sys, nmh, nmd, railmask));
63492 +}
63493 +
63494 +EP_RAILMASK
63495 +ep_nmd2railmask (EP_NMD *frags, int nFrags)
63496 +{
63497 +    EP_RAILMASK mask;
63498 +
63499 +    if (nFrags == 0)
63500 +       return ((EP_RAILMASK)-1);
63501 +    
63502 +    for (mask = EP_NMD_RAILMASK(frags); --nFrags; )
63503 +       mask &= EP_NMD_RAILMASK(++frags);
63504 +
63505 +    return (mask);
63506 +}
63507 +
63508 +/*
63509 + * Local variables:
63510 + * c-file-style: "stroustrup"
63511 + * End:
63512 + */
63513 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork.c
63514 ===================================================================
63515 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/probenetwork.c  2004-02-23 16:02:56.000000000 -0500
63516 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork.c       2005-07-28 14:52:52.898671136 -0400
63517 @@ -0,0 +1,446 @@
63518 +/*
63519 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63520 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63521 + *
63522 + *    For licensing information please see the supplied COPYING file
63523 + *
63524 + */
63525 +
63526 +#ident "@(#)$Id: probenetwork.c,v 1.43 2004/04/19 15:43:15 david Exp $"
63527 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork.c,v $ */
63528 +
63529 +#include <qsnet/kernel.h>
63530 +
63531 +#include <elan/kcomm.h>
63532 +#include "debug.h"
63533 +
63534 +int PositionCheck = 1;
63535 +
63536 +#define NUM_DOWN_FROM_VAL(NumDownLinksVal, level)      (((NumDownLinksVal) >> ((level) << 2)) & 0xF)
63537 +
63538 +int
63539 +ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos)
63540 +{
63541 +    int               lvl, i;
63542 +    int               level;
63543 +    int               nodeid;
63544 +    int               numnodes;
63545 +    int                      randomRoutingDisabled;
63546 +    int               sw;
63547 +    int               nacks;
63548 +    int               nowayup;
63549 +    int                      nalias;
63550 +    int                      upmask;
63551 +    int                      partial;
63552 +    int                      link;
63553 +    int                      invalid;
63554 +    int                      linkdown[ELAN_MAX_LEVELS];
63555 +    int                      linkup[ELAN_MAX_LEVELS];
63556 +    EP_SWITCH        *switches[ELAN_MAX_LEVELS];
63557 +    int               switchCount[ELAN_MAX_LEVELS+1];
63558 +    int               lowestBcast;
63559 +    int               numUpLinks[ELAN_MAX_LEVELS];
63560 +    int               routedown [ELAN_MAX_LEVELS];
63561 +
63562 +    EPRINTF1 (DBG_PROBE, "%s: ProbeNetwork started\n", rail->Name);
63563 +
63564 +    switchCount[0] = 1;
63565 +    numUpLinks [0] = 4;
63566 +
63567 +    for (level = 0; level < ELAN_MAX_LEVELS; level++)
63568 +    {
63569 +       int ndown  = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, level);
63570 +
63571 +       KMEM_ZALLOC (switches[level], EP_SWITCH *, sizeof (EP_SWITCH) * switchCount[level], 1);
63572 +
63573 +       for (sw = 0, nacks = 0, nowayup = 0, lowestBcast=7; sw < switchCount[level]; sw++)
63574 +       {
63575 +           EP_SWITCH *lsw  = &switches[level][sw];
63576 +           int        good = 1;
63577 +           int        tsw;
63578 +
63579 +           for (nodeid = 0,tsw = sw, lvl = level-1 ; lvl >= 0 ; lvl--)
63580 +           {
63581 +               EP_SWITCH *lsw;
63582 +               int        link = (8-numUpLinks[lvl]) + (tsw % numUpLinks[lvl]);
63583 +
63584 +               tsw  = tsw / numUpLinks[lvl];
63585 +               lsw  = &switches[lvl][tsw];
63586 +
63587 +               if (lsw->present == 0 || (lsw->lnr & (1 << link)))
63588 +               {
63589 +                   EPRINTF4 (DBG_PROBE, "lvl %d sw %d present=%d lnr=%x\n", lvl, sw, lsw->present, lsw->lnr);
63590 +                   good = 0;
63591 +               }
63592 +               
63593 +               linkup[lvl]   = link;
63594 +               linkdown[lvl] = lsw->link;
63595 +
63596 +               if ( lvl ) nodeid = ((nodeid + linkdown[lvl]) * (8-numUpLinks[lvl-1]));
63597 +               else       nodeid += linkdown[0];
63598 +
63599 +           }
63600 +           
63601 +           /* 
63602 +            * don't bother probing routes which we we've already seen are unreachable 
63603 +            * because a link upwards was in reset or the switch previously nacked us.
63604 +            */
63605 +           if (! good)
63606 +           {
63607 +               lsw->present = 0;
63608 +
63609 +               nacks++;
63610 +               nowayup++;
63611 +
63612 +               continue;
63613 +           }
63614 +
63615 +           lsw->present = rail->Operations.ProbeRoute (rail, level, sw, nodeid, linkup, linkdown, 5, lsw);
63616 +
63617 +           if (! lsw->present)
63618 +           {
63619 +               EPRINTF3 (DBG_PROBE, "%s: level %d switch %d - unexpected nack\n", rail->Name, level, sw);
63620 +
63621 +               nacks++;
63622 +               nowayup++;
63623 +           }
63624 +           else
63625 +           {
63626 +               EPRINTF5 (DBG_PROBE, "%s: level %d switch %d - link %d bcast %d\n", rail->Name, level, sw, lsw->link, lsw->bcast);
63627 +
63628 +               if (level == 2 && rail->Devinfo.dev_device_id == PCI_DEVICE_ID_ELAN3)
63629 +               {
63630 +                   /* If we see broadcast top as 7, and we came in on a low link, then we can't
63631 +                    * determine whether we're in a 128 way or a un-configured 64u64d switch, so
63632 +                    * we treat it as a 64u64d and detect the 128 way case by "going over the top" 
63633 +                    * below. Unless we've been told what it really is by NumDownLinksVal.
63634 +                    */
63635 +                   if (lsw->bcast == 7 && lsw->link < 4)
63636 +                       lsw->bcast = ndown ? (ndown - 1) : 3;
63637 +               }
63638 +
63639 +               if ( lowestBcast > lsw->bcast ) 
63640 +                   lowestBcast = lsw->bcast;
63641 +
63642 +               if (lsw->link > (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)))
63643 +               {
63644 +                   /* We've arrived on a "up-link" - this could be either
63645 +                    * we're in the top half of a x8 top-switch - or we're
63646 +                    * in the bottom half and have gone "over the top". We
63647 +                    * differentiate these cases since the switches below
63648 +                    * a x8 top-switch will have broadcast top set to 3, 
63649 +                    * and the x8 topswitch have broadcast top set to 7.
63650 +                    */
63651 +                   if (lsw->bcast == 7)
63652 +                       nowayup++;
63653 +                   else
63654 +                   {
63655 +                       EPRINTF2 (DBG_PROBE, "%s: level %d - gone over the top\n",
63656 +                                 rail->Name, level);
63657 +
63658 +                       if (level > 0)
63659 +                       {
63660 +                           KMEM_FREE (switches[level], sizeof (EP_SWITCH) * switchCount[level] );
63661 +                           level--;
63662 +                       }
63663 +                       
63664 +                       numUpLinks[level] = 0;
63665 +                       goto finished;
63666 +                   }
63667 +               }
63668 +
63669 +           }
63670 +       }
63671 +
63672 +       numUpLinks[level]    = ndown ? (8 - ndown) : (7 - lowestBcast);
63673 +       switchCount[level+1] = switchCount[level] *  numUpLinks[level];
63674 +       
63675 +       /* Now we know which links are uplinks, we can see whether there is
63676 +        * any possible ways up */
63677 +       upmask = (ndown ? (0xFF << ndown) & 0xFF : (0xFF << (8 - numUpLinks[level])) & 0xFF);
63678 +
63679 +       for (sw = 0; sw < switchCount[level]; sw++)
63680 +       {
63681 +           EP_SWITCH *lsw  = &switches[level][sw];
63682 +
63683 +           if (lsw->present && lsw->link <= (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)) && (switches[level][sw].lnr & upmask) == upmask)
63684 +               nowayup++;
63685 +       }
63686 +
63687 +       EPRINTF7 (DBG_PROBE, "%s: level %d - sw=%d nacks=%d nowayup=%d bcast=%d numup=%d\n", 
63688 +                 rail->Name, level, sw, nacks, nowayup, lowestBcast, numUpLinks[level]);
63689 +
63690 +       if (nacks == sw)
63691 +       {
63692 +           static bitmap_t printed[BT_BITOUL(EP_MAX_RAILS)];
63693 +
63694 +           if (! BT_TEST (printed, rail->Number))
63695 +               printk ("%s: cannot determine network position\n", rail->Name);
63696 +           BT_SET (printed, rail->Number);
63697 +           goto failed;
63698 +       }
63699 +
63700 +       if (nowayup == sw)
63701 +           goto finished;
63702 +    }
63703 +    
63704 +    printk ("%s: exceeded number of levels\n", rail->Name);
63705 +    level = ELAN_MAX_LEVELS - 1;
63706 +
63707 + failed:
63708 +    
63709 +    for (lvl = 0; lvl <= level; lvl++)
63710 +       KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] );
63711 +
63712 +    return -EAGAIN;
63713 +
63714 + finished:
63715 +    /* we've successfully probed the network - now calculate our node 
63716 +     * positon and what level of random routing is possible */
63717 +    nalias = 1;
63718 +    for (lvl = 0, invalid = 0, partial = 0, randomRoutingDisabled = 0; lvl <= level; lvl++)
63719 +    {
63720 +       int ndown  = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, lvl);
63721 +       int upmask = ndown ? (0xFF << ndown) & 0xFF : 0xF0;
63722 +
63723 +       for (sw = 0, nalias = 0; sw < switchCount[lvl]; sw++)
63724 +       {
63725 +           EP_SWITCH *lsw = &switches[lvl][sw];
63726 +           
63727 +           /* You can only use adaptive routing if links 4-7 are uplinks, and at least one of them is
63728 +            * not in reset.   Otherwise you can randomly select an "uplink" if all the uplinks are not
63729 +            * in reset. */
63730 +           if (lsw->present && ((upmask == 0xF0) ? (lsw->lnr & upmask) == upmask : (lsw->lnr & upmask) != 0))
63731 +               randomRoutingDisabled |= (1 << lvl);
63732 +           
63733 +           if (!lsw->present)
63734 +               partial++;
63735 +           else
63736 +           {
63737 +               if (lsw->invalid)
63738 +               {
63739 +                   printk ("%s: invalid switch detected (level %d switch %d)\n", rail->Name, lvl, sw);
63740 +                   invalid++;
63741 +               }
63742 +               
63743 +               for (i = 0; i < nalias; i++)
63744 +                   if (linkdown[i] == lsw->link)
63745 +                       break;
63746 +               if (i == nalias)
63747 +                   linkdown[nalias++] = lsw->link;
63748 +           }
63749 +       }
63750 +       
63751 +       link = linkdown[0];
63752 +       for (i = 1; i < nalias; i++)
63753 +           if (linkdown[i] < link)
63754 +               link = linkdown[i];
63755 +
63756 +       if (nalias > 1 && lvl != level)
63757 +       {
63758 +           printk ("%s: switch aliased below top level (level %d)\n", rail->Name, lvl);
63759 +           invalid++;
63760 +       }
63761 +       
63762 +       routedown[lvl] = link;
63763 +   }
63764 +
63765 +    for (lvl = 0; lvl <= level; lvl++) 
63766 +       KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] );
63767 +
63768 +    if (invalid)
63769 +    {
63770 +       printk ("%s: invalid switch configuration\n", rail->Name);
63771 +       return (EINVAL);
63772 +    }
63773 +
63774 +    /* Handle the aliasing case where a 16 way is used as multiple smaller switches */
63775 +    if (nalias == 1)
63776 +       level++;
63777 +    else if (nalias == 2)                                      /* a 16 way as 2x8 ways */
63778 +       numUpLinks[level++] = 6;                                /*   only 2 down links */
63779 +    else if (nalias > 4)                                       /* a 16 way as 8x2 ways */
63780 +       numUpLinks[level-1] = 6;
63781 +    
63782 +    /* 
63783 +     * Compute my nodeid and number of nodes in the machine
63784 +     * from the routedown and the number of downlinks at each level.
63785 +     */
63786 +    for(nodeid=0, lvl = level - 1; lvl >= 0; lvl--)
63787 +    {
63788 +       if (lvl) nodeid = ((nodeid + routedown[lvl]) * (8-numUpLinks[lvl-1]));  
63789 +       else     nodeid += routedown[0];
63790 +    }
63791 +
63792 +    for (numnodes = 1, lvl = 0; lvl < level; lvl++)
63793 +       numnodes *= (8 - numUpLinks[lvl]);
63794 +
63795 +    sprintf (rail->Name, "ep%d[%d]", rail->Number, nodeid);
63796 +
63797 +    if (randomRoutingDisabled & ((1 << (level-1))-1))
63798 +       printk ("%s: nodeid=%d level=%d numnodes=%d (random routing disabled 0x%x)\n", 
63799 +               rail->Name, nodeid, level, numnodes, randomRoutingDisabled);
63800 +    else if (partial)
63801 +       printk ("%s: nodeid=%d level=%d numnodes=%d (random routing ok)\n",
63802 +               rail->Name, nodeid, level, numnodes);
63803 +    else
63804 +       printk ("%s: nodeid=%d level=%d numnodes=%d\n",
63805 +               rail->Name, nodeid, level, numnodes);
63806 +
63807 +    pos->pos_mode               = ELAN_POS_MODE_SWITCHED;
63808 +    pos->pos_nodeid              = nodeid;
63809 +    pos->pos_levels              = level;
63810 +    pos->pos_nodes               = numnodes;
63811 +    pos->pos_random_disabled     = randomRoutingDisabled;
63812 +
63813 +    for(lvl = 0; lvl < level; lvl++)
63814 +       pos->pos_arity[level -lvl - 1] = (8-numUpLinks[lvl]);
63815 +    pos->pos_arity[level] = 1;                         /* XXXX why does this need to be 1 ? */
63816 +    
63817 +    return 0;
63818 +}
63819 +
63820 +/*
63821 + * broadcast top is invalid if it is not set to the number of downlinks-1,
63822 + * or at the topmost level it is less than ndown-1.
63823 + */
63824 +#define BCAST_TOP_INVALID(lvl, bcast, ndown)   ((lvl) == 0 ? (bcast) < ((ndown)-1) : (bcast) != ((ndown) - 1))
63825 +
63826 +void
63827 +CheckPosition (EP_RAIL *rail)
63828 +{
63829 +    ELAN_POSITION *pos     = &rail->Position;
63830 +    unsigned int   nodeid  = pos->pos_nodeid;
63831 +    unsigned int   invalid = 0;
63832 +    unsigned int   changed = 0;
63833 +    int lvl, slvl;
63834 +
63835 +    if (! PositionCheck)
63836 +       return;
63837 +
63838 +    if (rail->Operations.CheckPosition(rail))          /* is update ready for this rail */
63839 +    {
63840 +       EPRINTF2 (DBG_ROUTETABLE, "%s: check position: SwitchProbeLevel=%d\n", rail->Name, rail->SwitchProbeLevel);
63841 +
63842 +       for (lvl = 0, slvl = pos->pos_levels-1; lvl <= rail->SwitchProbeLevel; lvl++, slvl--)
63843 +       {
63844 +           EP_SWITCHSTATE *state  = &rail->SwitchState[lvl];
63845 +           EP_SWITCHSTATE *lstate = &rail->SwitchLast[lvl];
63846 +           unsigned int    ndown  = pos->pos_arity[slvl];
63847 +           unsigned int    upmask = (0xFF << ndown) & 0xFF;
63848 +           unsigned int    mylink = nodeid % ndown;
63849 +           unsigned int    error  = 0;
63850 +           unsigned int    binval = 0;
63851 +
63852 +           nodeid /= ndown;
63853 +
63854 +           /*
63855 +            * broadcast top is invalid if it is not set to the number of downlinks-1,
63856 +            * or at the topmost level it is less than ndown-1.
63857 +            */
63858 +           if (BCAST_TOP_INVALID(lvl, state->bcast, ndown) || (state->LNR & upmask) == upmask)
63859 +           {
63860 +               /* no way up from here - we'd better be at the top */
63861 +               if (lvl != (pos->pos_levels-1))
63862 +               {
63863 +                   if (state->bcast != (ndown-1))
63864 +                       printk ("%s: invalid broadcast top %d at level %d\n", rail->Name, state->bcast, lvl);
63865 +                   else if ((state->LNR & upmask) == upmask && (lstate->LNR & upmask) == upmask)
63866 +                       printk ("%s: no way up to switch at level %d (turned off ?)\n", rail->Name, lvl+1);
63867 +               }
63868 +               else
63869 +               {
63870 +                   if (state->linkid != mylink)
63871 +                       printk ("%s: moved at top level was connected to link %d now connected to %d\n", rail->Name, mylink, state->linkid);
63872 +               }
63873 +
63874 +               if (state->linkid != mylink)
63875 +                   error++;
63876 +               
63877 +               if (BCAST_TOP_INVALID (lvl, state->bcast, ndown))
63878 +                   binval++;
63879 +           }
63880 +           else
63881 +           {
63882 +               if (state->linkid != mylink)
63883 +               {
63884 +                   if (state->linkid != rail->SwitchLast[lvl].linkid)
63885 +                       printk ("%s: moved at lvl %d was connected to link %d now connected to %d\n", rail->Name, lvl, mylink, state->linkid);
63886 +                       
63887 +                   error++;
63888 +               }
63889 +           }
63890 +
63891 +           if (error == 0 && invalid == 0)
63892 +               rail->SwitchProbeTick[lvl] = lbolt;
63893 +           
63894 +           EPRINTF10 (DBG_ROUTETABLE, "%s:   lvl=%d (slvl=%d) linkid=%d bcast=%d lnr=%02x uplink=%d : error=%d binval=%d invalid=%d\n", 
63895 +                      rail->Name, lvl, slvl, state->linkid, state->bcast, state->LNR, state->uplink, error, binval, invalid);
63896 +
63897 +           invalid |= (error | binval);
63898 +       }
63899 +       
63900 +       for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
63901 +           if (rail->SwitchState[lvl].uplink != rail->SwitchLast[lvl].uplink)
63902 +               changed++;
63903 +
63904 +       if (changed)
63905 +       {
63906 +           printk ("%s: broadcast tree has changed from", rail->Name);
63907 +           for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
63908 +               printk ("%c%d", lvl == 0 ? ' ' : ',', rail->SwitchLast[lvl].uplink);
63909 +
63910 +           for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
63911 +               printk ("%s%d", lvl == 0 ? " to " : ",", rail->SwitchState[lvl].uplink);
63912 +           printk ("\n");
63913 +       }
63914 +
63915 +       if (rail->SwitchProbeLevel > 0)
63916 +           bcopy (rail->SwitchState, rail->SwitchLast, rail->SwitchProbeLevel * sizeof (EP_SWITCHSTATE));
63917 +    }
63918 +
63919 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
63920 +    {
63921 +       EPRINTF4 (DBG_ROUTETABLE, "%s: level %d lbolt=%lx ProbeLevelTick=%lx\n",
63922 +                 rail->Name, lvl, lbolt, rail->SwitchProbeTick[lvl]);
63923 +       
63924 +       if (AFTER (lbolt, rail->SwitchProbeTick[lvl] + EP_POSITION_TIMEOUT))
63925 +       {
63926 +           if (lvl < rail->SwitchBroadcastLevel+1)
63927 +           {
63928 +               if (lvl == 0)
63929 +                   printk ("%s: cable disconnected\n", rail->Name);
63930 +               else
63931 +                   printk ("%s: broadcast level has dropped to %d (should be %d)\n",
63932 +                           rail->Name, lvl, rail->Position.pos_levels);
63933 +           }
63934 +           break;
63935 +       }
63936 +    }
63937 +    
63938 +    if (lvl > rail->SwitchBroadcastLevel+1)
63939 +    {
63940 +       if (rail->SwitchBroadcastLevel < 0)
63941 +           printk ("%s: cable reconnected\n", rail->Name);
63942 +       if (lvl == rail->Position.pos_levels)
63943 +           printk ("%s: broadcast level has recovered\n", rail->Name);
63944 +       else
63945 +           printk ("%s: broadcast level has recovered to %d (should be %d)\n", 
63946 +                   rail->Name, lvl, rail->Position.pos_levels);
63947 +    }
63948 +    
63949 +    if (rail->SwitchBroadcastLevel != (lvl - 1))
63950 +    {
63951 +       EPRINTF2 (DBG_ROUTETABLE, "%s: setting SwitchBroadcastLevel to %d\n", rail->Name, lvl-1);
63952 +       
63953 +       rail->SwitchBroadcastLevel     = lvl - 1;
63954 +       rail->SwitchBroadcastLevelTick = lbolt;
63955 +    }
63956 +}
63957 +
63958 +
63959 +/*
63960 + * Local variables:
63961 + * c-file-style: "stroustrup"
63962 + * End:
63963 + */
63964 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork_elan3.c
63965 ===================================================================
63966 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/probenetwork_elan3.c    2004-02-23 16:02:56.000000000 -0500
63967 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork_elan3.c 2005-07-28 14:52:52.898671136 -0400
63968 @@ -0,0 +1,298 @@
63969 +/*
63970 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63971 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63972 + *
63973 + *    For licensing information please see the supplied COPYING file
63974 + *
63975 + */
63976 +
63977 +#ident "@(#)$Id: probenetwork_elan3.c,v 1.40 2004/04/15 12:30:08 david Exp $"
63978 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan3.c,v $ */
63979 +
63980 +#include <qsnet/kernel.h>
63981 +
63982 +#include <elan/kcomm.h>
63983 +
63984 +#include "kcomm_vp.h"
63985 +#include "kcomm_elan3.h"
63986 +#include "debug.h"
63987 +
63988 +#include <elan3/intrinsics.h>
63989 +
63990 +static void ep3_probe_event (EP3_RAIL *rail, void *arg);
63991 +static EP3_COOKIE_OPS ep3_probe_ops = 
63992 +{
63993 +    ep3_probe_event
63994 +} ;
63995 +
63996 +int
63997 +ep3_init_probenetwork (EP3_RAIL *rail)
63998 +{
63999 +    sdramaddr_t              stack;
64000 +    E3_Addr           sp;
64001 +    E3_BlockCopyEvent event;
64002 +    int               i;
64003 +
64004 +    if (! (stack = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rail->ProbeStack)))
64005 +       return -ENOMEM;
64006 +
64007 +    spin_lock_init (&rail->ProbeLock);
64008 +    kcondvar_init (&rail->ProbeWait);
64009 +
64010 +    /* Initialise the probe command structure */
64011 +    for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++)
64012 +       elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[i]), 0);
64013 +    for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++)
64014 +       elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[i]), 1);
64015 +    
64016 +    RegisterCookie (&rail->CookieTable, &rail->ProbeCookie, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeDone), &ep3_probe_ops, rail);
64017 +    
64018 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Type), 0);
64019 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Count), 0);
64020 +
64021 +    EP3_INIT_COPY_EVENT (event, rail->ProbeCookie, rail->RailMainAddr + offsetof (EP3_RAIL_MAIN, ProbeDone), 1);
64022 +    elan3_sdram_copyl_to_sdram (rail->Device, &event, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeDone), sizeof (E3_BlockCopyEvent));
64023 +
64024 +    rail->RailMain->ProbeDone = EP3_EVENT_FREE;
64025 +
64026 +    sp = ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "kcomm_probe"),
64027 +                         rail->ProbeStack, stack, EP3_STACK_SIZE,
64028 +                         3, rail->CommandPortAddr, rail->RailElanAddr, rail->RailMainAddr);
64029 +    
64030 +    IssueRunThread (rail, sp);
64031 +
64032 +    return 0;
64033 +}
64034 +
64035 +void
64036 +ep3_destroy_probenetwork (EP3_RAIL *rail)
64037 +{
64038 +    if (rail->ProbeStack == (sdramaddr_t) 0)
64039 +       return;
64040 +
64041 +    /* XXXX: ensure that the network probe thread is stopped */
64042 +
64043 +    DeregisterCookie (&rail->CookieTable, &rail->ProbeCookie);
64044 +
64045 +    kcondvar_destroy (&rail->ProbeWait);
64046 +    spin_lock_destroy (&rail->ProbeLock);
64047 +    
64048 +    ep_free_elan (&rail->Generic, rail->ProbeStack, EP3_STACK_SIZE);
64049 +}
64050 +
64051 +static void
64052 +ep3_probe_event (EP3_RAIL *rail, void *arg)
64053 +{
64054 +    unsigned long flags;
64055 +
64056 +    spin_lock_irqsave (&rail->ProbeLock, flags);
64057 +    rail->ProbeDone = 1;
64058 +    kcondvar_wakeupone (&rail->ProbeWait, &rail->ProbeLock);
64059 +    spin_unlock_irqrestore (&rail->ProbeLock, flags);
64060 +}
64061 +
64062 +int
64063 +ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw)
64064 +{
64065 +    EP3_RAIL      *rail     = (EP3_RAIL *) r;
64066 +    EP3_RAIL_MAIN *railMain = rail->RailMain;
64067 +    sdramaddr_t    railElan = rail->RailElan;
64068 +    E3_uint16      flits[MAX_FLITS];
64069 +    E3_uint32      result;
64070 +    int                   nflits;
64071 +    unsigned long  flags;
64072 +
64073 +    spin_lock_irqsave (&rail->ProbeLock, flags);
64074 +
64075 +    nflits = GenerateProbeRoute ( flits, nodeid, level, linkup, linkdown, 0);
64076 +           
64077 +    if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_PROBE(level), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
64078 +    {
64079 +       EPRINTF0 (DBG_ROUTETABLE, "ProbeRoute: cannot load route entry\n");
64080 +       spin_unlock_irqrestore (&rail->ProbeLock, flags);
64081 +       return (EINVAL);
64082 +    }
64083 +
64084 +    do {
64085 +       /* Initialise the probe source to include our partially computed nodeid */
64086 +       elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), nodeid);
64087 +       elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), nodeid);
64088 +
64089 +       /* Initialise the count result etc */
64090 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_SINGLE);
64091 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level);
64092 +
64093 +       railMain->ProbeResult  = -1;
64094 +           
64095 +       /* Clear the receive area */
64096 +       bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0));
64097 +       bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1));
64098 +    
64099 +       /* Re-arm the completion event */
64100 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1);
64101 +       railMain->ProbeDone = EP3_EVENT_ACTIVE;
64102 +       rail->ProbeDone = 0;
64103 +
64104 +       /* And wakeup the thread to do the probe */
64105 +       IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart));
64106 +
64107 +       /* Now wait for it to complete */
64108 +       while (! rail->ProbeDone)
64109 +           kcondvar_wait (&rail->ProbeWait, &rail->ProbeLock, &flags);
64110 +
64111 +       /* wait for block copy event to flush write buffers */
64112 +       while (! EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone))
64113 +           if (! EP3_EVENT_FIRING(rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone), rail->ProbeCookie, railMain->ProbeDone))
64114 +               panic ("ProbeRoute: network probe event failure\n");
64115 +
64116 +       result = railMain->ProbeResult;
64117 +
64118 +       if (result == C_ACK_ERROR)
64119 +           kcondvar_timedwait (&rail->ProbeWait, &rail->ProbeLock, &flags, lbolt + (hz/8));
64120 +       
64121 +       railMain->ProbeDone = EP3_EVENT_FREE;
64122 +
64123 +    } while (result != C_ACK_OK && --attempts);
64124 +
64125 +    if (result == C_ACK_OK)
64126 +    {
64127 +       if (railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid ||
64128 +           railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid)
64129 +       {
64130 +           printk ("%s: lost nodeid at level %d switch %d - %d != %d\n", rail->Generic.Name, level, sw,
64131 +                   railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1], nodeid);
64132 +
64133 +           result = C_ACK_ERROR;
64134 +       }
64135 +       else
64136 +       {
64137 +           E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - level - 1];
64138 +           E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - level - 1];
64139 +               
64140 +           EPRINTF7 (DBG_PROBE, "%s: level %d switch %d - linkid=%d bcast=%d LNR=%02x%s\n", 
64141 +                     rail->Generic.Name, level, sw, TR_TRACEROUTE0_LINKID(val0),
64142 +                     TR_TRACEROUTE1_BCAST_TOP(val1), TR_TRACEROUTE0_LNR(val0),
64143 +                     TR_TRACEROUTE0_REVID(val0) ? "" : " RevA Part");
64144 +           
64145 +           lsw->lnr     = TR_TRACEROUTE0_LNR(val0);
64146 +           lsw->link    = TR_TRACEROUTE0_LINKID(val0);
64147 +           lsw->bcast   = TR_TRACEROUTE1_BCAST_TOP(val1);
64148 +           lsw->invalid = (TR_TRACEROUTE0_REVID(val0) == 0);
64149 +       }
64150 +    }
64151 +    spin_unlock_irqrestore (&rail->ProbeLock, flags);
64152 +    
64153 +    return (result == C_ACK_OK);
64154 +}
64155 +
64156 +void
64157 +ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos)
64158 +{
64159 +    E3_uint16  flits[MAX_FLITS];
64160 +    int        lvl, nflits;
64161 +    
64162 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
64163 +    {
64164 +       nflits = GenerateCheckRoute (pos, flits, pos->pos_levels - lvl - 1, 0);
64165 +
64166 +       if (LoadRoute (rail->Device, rail->Ctxt->RouteTable, EP_VP_PROBE(lvl), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
64167 +           panic ("ep3_probe_position_found: cannot load probe route entry\n");
64168 +    }
64169 +    
64170 +    /* Initialise the traceroute source data with our nodeid */
64171 +    elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid);
64172 +    elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid);
64173 +}
64174 +
64175 +int
64176 +ep3_check_position (EP_RAIL *r)
64177 +{
64178 +    EP3_RAIL      *rail     = (EP3_RAIL *) r;
64179 +    EP3_RAIL_MAIN *railMain = rail->RailMain;
64180 +    sdramaddr_t    railElan = rail->RailElan;
64181 +    ELAN_POSITION *pos      = &rail->Generic.Position;
64182 +    unsigned int   level    = rail->RailMain->ProbeLevel;
64183 +    unsigned int   updated  = EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone);
64184 +    unsigned int   lvl;
64185 +
64186 +    if (updated)
64187 +    {
64188 +       if (railMain->ProbeResult != C_ACK_OK)
64189 +       {
64190 +           EPRINTF2 (DBG_PROBE, "%s: CheckNetworkPosition: packet nacked result=%d\n", rail->Generic.Name, railMain->ProbeResult); 
64191 +           
64192 +           rail->Generic.SwitchProbeLevel = -1;
64193 +       }
64194 +       else
64195 +       {
64196 +           E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - 2*(level+1)];
64197 +           E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - 2*(level+1)];
64198 +
64199 +           if (val0 != pos->pos_nodeid || val1 != pos->pos_nodeid)
64200 +           {
64201 +               static unsigned long printed = 0;
64202 +
64203 +               /* We've received a packet from another node - this probably means
64204 +                * that we've moved */
64205 +               if ((lbolt - printed) > (HZ*10))
64206 +               {
64207 +                   printk ("%s: ep3_check_position - level %d lost nodeid\n", rail->Generic.Name, level);
64208 +                   printed = lbolt;
64209 +               }
64210 +
64211 +               rail->Generic.SwitchProbeLevel = -1;
64212 +           }
64213 +           else
64214 +           {
64215 +               for (lvl = 0; lvl <= level; lvl++)
64216 +               {
64217 +                   E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
64218 +                   E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
64219 +
64220 +                   rail->Generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID(val0);
64221 +                   rail->Generic.SwitchState[lvl].LNR    = TR_TRACEROUTE0_LNR(val0);
64222 +                   rail->Generic.SwitchState[lvl].bcast  = TR_TRACEROUTE1_BCAST_TOP(val1);
64223 +                   rail->Generic.SwitchState[lvl].uplink = 4;
64224 +
64225 +                   EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->Generic.SwitchState[lvl].linkid,
64226 +                             rail->Generic.SwitchState[lvl].LNR, rail->Generic.SwitchState[lvl].bcast ,rail->Generic.SwitchState[lvl].uplink);
64227 +               }
64228 +               rail->Generic.SwitchProbeLevel = level;
64229 +           }
64230 +       }
64231 +
64232 +       railMain->ProbeDone = EP3_EVENT_FREE;
64233 +    }
64234 +
64235 +    if (railMain->ProbeDone == EP3_EVENT_FREE)
64236 +    {
64237 +       if (rail->Generic.SwitchBroadcastLevel == rail->Generic.Position.pos_levels-1)
64238 +           level = rail->Generic.Position.pos_levels - 1;
64239 +       else
64240 +           level = rail->Generic.SwitchBroadcastLevel + 1;
64241 +
64242 +       EPRINTF2 (DBG_PROBE, "%s: ep3_check_postiion: level %d\n", rail->Generic.Name, level);
64243 +
64244 +       /* Initialise the count result etc */
64245 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_MULTIPLE);
64246 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level);
64247 +
64248 +       railMain->ProbeResult = -1;
64249 +       railMain->ProbeLevel  = -1;
64250 +       
64251 +       /* Clear the receive area */
64252 +       bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0));
64253 +       bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1));
64254 +       
64255 +       /* Re-arm the completion event */
64256 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Type), EV_TYPE_BCOPY);
64257 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1);
64258 +
64259 +       railMain->ProbeDone = EP3_EVENT_ACTIVE;
64260 +       
64261 +       IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart));
64262 +    }
64263 +
64264 +    return updated;
64265 +}
64266 +
64267 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork_elan3_thread.c
64268 ===================================================================
64269 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/probenetwork_elan3_thread.c     2004-02-23 16:02:56.000000000 -0500
64270 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork_elan3_thread.c  2005-07-28 14:52:52.899670984 -0400
64271 @@ -0,0 +1,98 @@
64272 +/*
64273 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64274 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64275 + *
64276 + *    For licensing information please see the supplied COPYING file
64277 + *
64278 + */
64279 +
64280 +#ident "@(#)$Id: probenetwork_elan3_thread.c,v 1.19 2004/03/24 11:32:56 david Exp $"
64281 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan3_thread.c,v $*/
64282 +
64283 +#include <elan3/e3types.h>
64284 +#include <elan3/events.h>
64285 +#include <elan3/elanregs.h>
64286 +#include <elan3/intrinsics.h>
64287 +
64288 +#include "kcomm_vp.h"
64289 +#include "kcomm_elan3.h"
64290 +
64291 +static int
64292 +kcomm_probe_vp (EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain, int vp, int attempts, int timeouts)
64293 +{
64294 +    int rc;
64295 +
64296 +    /* Since we use %g1 to hold the "rxd" so the trap handler can
64297 +     * complete the envelope processing - we pass zero to indicate we're
64298 +     * not a receiver thread */
64299 +    asm volatile ("mov %g0, %g1");
64300 +
64301 +    while (attempts && timeouts)
64302 +    {
64303 +       c_open (vp);
64304 +       c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest0, &railElan->ProbeSource0);
64305 +       c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest1, &railElan->ProbeSource1);
64306 +       c_sendtrans0 (TR_SENDACK | TR_SETEVENT, (E3_Addr) 0);
64307 +       
64308 +       switch (rc = c_close())
64309 +       {
64310 +       case C_ACK_OK:
64311 +           return (C_ACK_OK);
64312 +           
64313 +       case C_ACK_DISCARD:
64314 +           attempts--;
64315 +           break;
64316 +
64317 +       default:                                        /* output timeout */
64318 +           timeouts--;
64319 +       }
64320 +
64321 +       c_break_busywait();
64322 +    }
64323 +
64324 +    return (timeouts == 0 ? C_ACK_ERROR : C_ACK_DISCARD);
64325 +}
64326 +
64327 +void
64328 +kcomm_probe (E3_CommandPort *cport, EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain)
64329 +{
64330 +    int level;
64331 +
64332 +    for (;;)
64333 +    {
64334 +       c_waitevent (&railElan->ProbeStart, 1);
64335 +
64336 +       switch (railElan->ProbeType)
64337 +       {
64338 +       case PROBE_SINGLE:
64339 +           railMain->ProbeResult = kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(railElan->ProbeLevel),
64340 +                                                   PROBE_SINGLE_ATTEMPTS, PROBE_SINGLE_TIMEOUTS);
64341 +
64342 +           cport->SetEvent = (E3_Addr) &railElan->ProbeDone;
64343 +           break;
64344 +
64345 +       case PROBE_MULTIPLE:
64346 +           for (level = railElan->ProbeLevel; level >= 0; level--)
64347 +           {
64348 +               if (kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(level),
64349 +                                   PROBE_MULTIPLE_ATTEMPTS, PROBE_MULTIPLE_TIMEOUTS) == C_ACK_OK)
64350 +               {
64351 +                   railMain->ProbeLevel  = level;
64352 +                   railMain->ProbeResult = C_ACK_OK;
64353 +                   break;
64354 +               }
64355 +
64356 +               c_break_busywait();
64357 +           }
64358 +           cport->SetEvent = (E3_Addr) &railElan->ProbeDone;
64359 +           break;
64360 +       }
64361 +
64362 +    }
64363 +}
64364 +
64365 +/*
64366 + * Local variables:
64367 + * c-file-style: "stroustrup"
64368 + * End:
64369 + */
64370 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork_elan4.c
64371 ===================================================================
64372 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/probenetwork_elan4.c    2004-02-23 16:02:56.000000000 -0500
64373 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/probenetwork_elan4.c 2005-07-28 14:52:52.900670832 -0400
64374 @@ -0,0 +1,396 @@
64375 +/*
64376 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64377 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64378 + *
64379 + *    For licensing information please see the supplied COPYING file
64380 + *
64381 + */
64382 +
64383 +#ident "@(#)$Id: probenetwork_elan4.c,v 1.9 2004/08/19 11:05:03 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
64384 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan4.c,v $*/
64385 +
64386 +#include <qsnet/kernel.h>
64387 +
64388 +#include <elan/kcomm.h>
64389 +
64390 +#include "kcomm_vp.h"
64391 +#include "kcomm_elan4.h"
64392 +#include "debug.h"
64393 +
64394 +#include <elan4/trtype.h>
64395 +#include <elan4/commands.h>
64396 +
64397 +static void
64398 +probe_interrupt (EP4_RAIL *rail, void *arg)
64399 +{
64400 +    unsigned long flags;
64401 +
64402 +    spin_lock_irqsave (&rail->r_probe_lock, flags);
64403 +    rail->r_probe_done = 1;
64404 +    kcondvar_wakeupone (&rail->r_probe_wait, &rail->r_probe_lock);
64405 +    spin_unlock_irqrestore (&rail->r_probe_lock, flags);
64406 +}
64407 +
64408 +int
64409 +ep4_probe_init (EP4_RAIL *rail)
64410 +{
64411 +    spin_lock_init (&rail->r_probe_lock);
64412 +    kcondvar_init (&rail->r_probe_wait);
64413 +
64414 +    rail->r_probe_cq = ep4_alloc_ecq (rail, CQ_Size1K);
64415 +
64416 +    if (rail->r_probe_cq == NULL)
64417 +       return -ENOMEM;
64418 +
64419 +    ep4_register_intcookie (rail, &rail->r_probe_intcookie, rail->r_elan_addr, probe_interrupt, rail);
64420 +
64421 +    return 0;
64422 +}
64423 +
64424 +void
64425 +ep4_probe_destroy (EP4_RAIL *rail)
64426 +{
64427 +    if (rail->r_probe_cq)
64428 +       ep4_free_ecq (rail, rail->r_probe_cq);
64429 +
64430 +    if (rail->r_probe_intcookie.int_arg == NULL)
64431 +       return;
64432 +    ep4_deregister_intcookie (rail, &rail->r_probe_intcookie);
64433 +
64434 +    kcondvar_destroy (&rail->r_probe_wait);
64435 +    spin_lock_destroy (&rail->r_probe_lock);
64436 +}
64437 +
64438 +#define LINKDOWN(nodeid, level)        ((nodeid >> (level << 1)) & 3)
64439 +#define PROBE_PATTERN0(nodeid) (0xaddebabe ^ nodeid)
64440 +#define PROBE_PATTERN1(nodeid)  (0xfeedbeef ^ nodeid)
64441 +
64442 +#define EP4_PROBE_RETRIES      4
64443 +
64444 +int
64445 +ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw)
64446 +{
64447 +    EP4_RAIL      *rail  = (EP4_RAIL *) r;
64448 +    EP4_RAIL_MAIN *rmain = rail->r_main;
64449 +    E4_uint16      first = 0;
64450 +    int                   rb    = 0;
64451 +
64452 +    E4_uint8  packed[ROUTE_NUM_PACKED];
64453 +    E4_VirtualProcessEntry route;
64454 +    unsigned long flags;
64455 +    int i;
64456 +
64457 +    for (i = 0; i < ROUTE_NUM_PACKED; i++)
64458 +       packed[i] = 0;
64459 +
64460 +    /* Generate "up" routes */
64461 +    for (i = 0; i < level; i++)
64462 +       if (first == 0)
64463 +           first = linkup ? FIRST_ROUTE(linkup[i]) : FIRST_ADAPTIVE;
64464 +       else
64465 +           packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : PACKED_ADAPTIVE;
64466 +    
64467 +    /* Generate a "to-me" route down */
64468 +    if (first == 0)
64469 +       first = FIRST_MYLINK;
64470 +    else
64471 +       packed[rb++] = PACKED_MYLINK;
64472 +    
64473 +    /* Generate the "down" routes */
64474 +    for (i = level-1; i >= 0; i--)
64475 +       packed[rb++] = linkdown ? PACKED_ROUTE(linkdown[i]) : PACKED_ROUTE(LINKDOWN(nodeid, i));
64476 +    
64477 +    /* Pack up the routes into the virtual process entry */
64478 +    route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3);
64479 +    route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM);
64480 +
64481 +    for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
64482 +    {
64483 +       route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
64484 +       route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
64485 +    }
64486 +
64487 +    elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(level), &route);
64488 +    
64489 +    while (attempts--)
64490 +    {
64491 +       rail->r_probe_done = 0;
64492 +
64493 +       /* generate the STEN packet - note we use a datatype of dword as we're copying to elan in dwords
64494 +        *   NB - no flow control is required, since the max packet size is less than the command queue
64495 +        *        size and it's dedicated for network probing.
64496 +        */
64497 +       
64498 +       elan4_guard   (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_RESET(EP4_PROBE_RETRIES));
64499 +       elan4_nop_cmd (rail->r_probe_cq->ecq_cq, 0);
64500 +       
64501 +       elan4_open_packet (rail->r_probe_cq->ecq_cq, OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(level)));
64502 +       elan4_sendtransn  (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS),
64503 +                          rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0),
64504 +                          0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 
64505 +                          0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull | ((E4_uint64)PROBE_PATTERN0(nodeid) << 32));
64506 +       elan4_sendtransn  (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS),
64507 +                          rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1),
64508 +                          0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 
64509 +                          0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000000000001ull | ((E4_uint64)PROBE_PATTERN1(nodeid) << 32));
64510 +       elan4_sendtrans0  (rail->r_probe_cq->ecq_cq, TR_NOP_TRANS | TR_LAST_AND_SEND_ACK, 0);
64511 +
64512 +       elan4_guard           (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES));
64513 +       elan4_write_dword_cmd (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FINISHED);
64514 +
64515 +       elan4_guard            (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES));
64516 +       elan4_write_dword_cmd  (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FAILED);
64517 +
64518 +       elan4_interrupt_cmd   (rail->r_probe_cq->ecq_cq,  rail->r_probe_intcookie.int_val);
64519 +
64520 +       spin_lock_irqsave (&rail->r_probe_lock, flags);
64521 +       while (! rail->r_probe_done)
64522 +           kcondvar_wait (&rail->r_probe_wait, &rail->r_probe_lock, &flags);
64523 +       spin_unlock_irqrestore (&rail->r_probe_lock, flags);
64524 +
64525 +       if (rmain->r_probe_result == EP4_STATE_FINISHED)
64526 +       {
64527 +           if (rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN0(nodeid) ||
64528 +               rmain->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN1(nodeid))
64529 +           {
64530 +               printk ("%s: lost nodeid at level %d switch %d - %d != %d\n", rail->r_generic.Name, level, sw,
64531 +                       rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1], PROBE_PATTERN0(nodeid));
64532 +           }
64533 +           else
64534 +           {
64535 +               E4_uint32 val0 = rmain->r_probe_dest0[TRACEROUTE_ENTRIES - level - 1];
64536 +               E4_uint32 val1 = rmain->r_probe_dest1[TRACEROUTE_ENTRIES - level - 1];
64537 +               
64538 +               lsw->lnr     = TR_TRACEROUTE0_LNR(val0);
64539 +               lsw->link    = TR_TRACEROUTE0_LINKID(val0);
64540 +               lsw->bcast   = TR_TRACEROUTE1_BCAST_TOP(val1);
64541 +               lsw->invalid = 0;
64542 +
64543 +               return 1;
64544 +           }
64545 +       }
64546 +
64547 +       rmain->r_probe_result = EP4_STATE_FREE;
64548 +    }
64549 +
64550 +    return 0;
64551 +}
64552 +
64553 +
64554 +void
64555 +ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos)
64556 +{
64557 +    ELAN4_DEV  *dev  = rail->r_ctxt.ctxt_dev;
64558 +    int         lvl;
64559 +
64560 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
64561 +    {
64562 +       /* Initialise the "probe" route to use the broadcast tree */
64563 +       ELAN_POSITION *pos     = &rail->r_generic.Position;
64564 +       unsigned char *arityp  = &pos->pos_arity[pos->pos_levels - 1];
64565 +       unsigned int   spanned = *arityp;
64566 +       E4_uint16      first   = 0;
64567 +       int            rb      = 0;
64568 +       
64569 +       E4_uint8  packed[ROUTE_NUM_PACKED];
64570 +       E4_VirtualProcessEntry route;
64571 +       int i;
64572 +       
64573 +       for (i = 0; i < ROUTE_NUM_PACKED; i++)
64574 +           packed[i] = 0;
64575 +
64576 +       /* Generate "up" routes */
64577 +       for (i = 0; i < lvl; i++, spanned *= *(--arityp))
64578 +       {
64579 +           if (first == 0)
64580 +               first = FIRST_BCAST_TREE;
64581 +           else
64582 +               packed[rb++] = PACKED_BCAST_TREE;
64583 +       }
64584 +
64585 +       /* Generate a "to-me" route down */
64586 +       if (first == 0)
64587 +           first = FIRST_MYLINK;
64588 +       else
64589 +           packed[rb++] = PACKED_MYLINK;
64590 +
64591 +       spanned /= *arityp++;
64592 +
64593 +       /* Generate the "down" routes */
64594 +       for (i = lvl-1; i >= 0; i--)
64595 +       {
64596 +           spanned /= *arityp;
64597 +           packed[rb++] = PACKED_ROUTE((pos->pos_nodeid / spanned) % *arityp);
64598 +           arityp++;
64599 +       }
64600 +
64601 +    
64602 +       /* Pack up the routes into the virtual process entry */
64603 +       route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3);
64604 +       route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM);
64605 +       
64606 +       for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
64607 +       {
64608 +           route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
64609 +           route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
64610 +       }
64611 +       
64612 +       elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(lvl), &route);
64613 +       
64614 +       /* Initialise "start" event for this level */
64615 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CountAndType),
64616 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS));
64617 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopySource),
64618 +                           rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl]));
64619 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopyDest),
64620 +                           rail->r_probe_cq->ecq_addr);
64621 +
64622 +       /* Initiailise command stream - reset the start event */
64623 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_cmd),
64624 +                           WRITE_DWORD_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl])));
64625 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_value),
64626 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS));
64627 +
64628 +       /* Initiailise command stream - sten traceroute packet */
64629 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_open),
64630 +                           OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(lvl)));
64631 +
64632 +       /* Initiailise command stream - traceroute 0 */
64633 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute0),
64634 +                           SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16));
64635 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute0),
64636 +                           rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0));
64637 +       for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++)
64638 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]),
64639 +                               0x0000000000000000ull);
64640 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]),
64641 +                           0x0000000000000000ull | ((E4_uint64) PROBE_PATTERN0(pos->pos_nodeid) << 32));
64642 +
64643 +       /* Initiailise command stream - traceroute 1 */
64644 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute1),
64645 +                           SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16));
64646 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute1),
64647 +                           rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1));
64648 +       for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++)
64649 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]),
64650 +                               0x0000000100000001ull);
64651 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]),
64652 +                           0x0000000000000001ull | ((E4_uint64) PROBE_PATTERN1(pos->pos_nodeid) << 32));
64653 +
64654 +       /* Initiailise command stream - null sendack */
64655 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_sendack),
64656 +                           SEND_TRANS_CMD | ((TR_NOP_TRANS | TR_LAST_AND_SEND_ACK) << 16));
64657 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_sendack),
64658 +                           0);
64659 +       
64660 +       /* Initiailise command stream - guard ok, write done */
64661 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_ok),
64662 +                           GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES));
64663 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_writedword_ok),
64664 +                           WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level)));
64665 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_value_ok),
64666 +                           lvl);
64667 +
64668 +       /* Initiailise command stream - guard fail, chain to next or write done */
64669 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_fail),
64670 +                           GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES));
64671 +
64672 +       if (lvl > 0)
64673 +       {
64674 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail),
64675 +                               SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl-1])));
64676 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop),
64677 +                               NOP_CMD);
64678 +       }
64679 +       else
64680 +       {
64681 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail),
64682 +                               WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level)));
64683 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop),
64684 +                               EP4_PROBE_FAILED);
64685 +       }
64686 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_nop_pad),
64687 +                           NOP_CMD);
64688 +    }
64689 +
64690 +    
64691 +    rail->r_main->r_probe_level = EP4_PROBE_ACTIVE;
64692 +
64693 +    mb();
64694 +    ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[pos->pos_levels-1]));
64695 +}
64696 +
64697 +int
64698 +ep4_check_position (EP_RAIL *r)
64699 +{
64700 +    EP4_RAIL      *rail = (EP4_RAIL *) r;
64701 +    ELAN_POSITION *pos  = &rail->r_generic.Position;
64702 +    unsigned int level  = rail->r_main->r_probe_level;
64703 +    unsigned int lvl;
64704 +
64705 +    EPRINTF2 (DBG_PROBE, "%s: ep4_check_position: level=%lld\n", rail->r_generic.Name, rail->r_main->r_probe_level);
64706 +
64707 +    if (rail->r_main->r_probe_level != EP4_PROBE_ACTIVE)
64708 +    {
64709 +       if (rail->r_main->r_probe_level == EP4_PROBE_FAILED)
64710 +       {
64711 +           EPRINTF1 (DBG_PROBE, "%s: ep4_check_position: packets all nacked\n", rail->r_generic.Name);
64712 +
64713 +           rail->r_generic.SwitchProbeLevel = -1;
64714 +       }
64715 +       else
64716 +       {
64717 +           E4_uint32 val0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - 2*(level+1)];
64718 +           E4_uint32 val1  = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - 2*(level+1)];
64719 +
64720 +           if (val0 != PROBE_PATTERN0 (pos->pos_nodeid) || val1 != PROBE_PATTERN1 (pos->pos_nodeid))
64721 +           {
64722 +               static unsigned long printed = 0;
64723 +
64724 +               /* We've received a packet from another node - this probably means
64725 +                * that we've moved */
64726 +               if ((lbolt - printed) > (HZ*10))
64727 +               {
64728 +                   printk ("%s: ep4_check_position - level %d lost nodeid\n", rail->r_generic.Name, level);
64729 +                   printed = lbolt;
64730 +               }
64731 +
64732 +               rail->r_generic.SwitchProbeLevel = -1;
64733 +           }
64734 +           else
64735 +           {
64736 +               for (lvl = 0 ; lvl <= level; lvl++)
64737 +               {
64738 +                   E4_uint32 uval0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - lvl - 1];
64739 +                   E4_uint32 dval0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
64740 +                   E4_uint32 dval1  = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
64741 +
64742 +                   rail->r_generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID (dval0);
64743 +                   rail->r_generic.SwitchState[lvl].LNR    = TR_TRACEROUTE0_LNR(dval0);
64744 +                   rail->r_generic.SwitchState[lvl].bcast  = TR_TRACEROUTE1_BCAST_TOP (dval1);
64745 +                   rail->r_generic.SwitchState[lvl].uplink = TR_TRACEROUTE0_LINKID (uval0);
64746 +
64747 +                   EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->r_generic.SwitchState[lvl].linkid,
64748 +                             rail->r_generic.SwitchState[lvl].LNR, rail->r_generic.SwitchState[lvl].bcast ,rail->r_generic.SwitchState[lvl].uplink);
64749 +
64750 +               }
64751 +
64752 +               rail->r_generic.SwitchProbeLevel = level;
64753 +           }
64754 +       }
64755 +
64756 +       rail->r_main->r_probe_level = EP4_PROBE_ACTIVE;
64757 +       mb();
64758 +
64759 +       if (rail->r_generic.SwitchBroadcastLevel == rail->r_generic.Position.pos_levels-1)
64760 +           level = rail->r_generic.Position.pos_levels - 1;
64761 +       else
64762 +           level = rail->r_generic.SwitchBroadcastLevel + 1;
64763 +
64764 +       ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[level]));
64765 +
64766 +       return 1;
64767 +    }
64768 +
64769 +    return 0;
64770 +}
64771 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/procfs_linux.c
64772 ===================================================================
64773 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/procfs_linux.c  2004-02-23 16:02:56.000000000 -0500
64774 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/procfs_linux.c       2005-07-28 14:52:52.901670680 -0400
64775 @@ -0,0 +1,693 @@
64776 +/*
64777 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64778 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64779 + *
64780 + *    For licensing information please see the supplied COPYING file
64781 + *
64782 + */
64783 +
64784 +#ident "@(#)$Id: procfs_linux.c,v 1.53.2.4 2005/01/18 14:18:42 david Exp $"
64785 +/*      $Source: /cvs/master/quadrics/epmod/procfs_linux.c,v $*/
64786 +
64787 +#include <qsnet/kernel.h>
64788 +
64789 +#include <elan/kcomm.h>
64790 +#include <elan/epsvc.h>
64791 +#include <elan/epcomms.h>
64792 +
64793 +#include "cm.h"
64794 +#include "debug.h"
64795 +#include "conf_linux.h"
64796 +#include <linux/module.h>
64797 +#include <linux/wait.h>
64798 +#include <linux/poll.h>
64799 +
64800 +#include <qsnet/procfs_linux.h>
64801 +
64802 +struct proc_dir_entry *ep_procfs_root;
64803 +struct proc_dir_entry *ep_config_root;
64804 +
64805 +/*
64806 + * We provide a slightly "special" interface for /proc/elan/device%d/nodeset,
64807 + * so that it can be included in a "poll" system call.  On each "read" on the
64808 + * file, we generate a new nodeset if a) the previous one has been completely
64809 + * read and b) if it has changed since it was generated.
64810 + *
64811 + * Unfortunately ... this doesn't allow "tail -f" to work, since this uses
64812 + * fstat() on the fd, as we only hold the last nodeset string, we could not
64813 + * handle the case where two processes were reading a different rates.
64814 + * We could maybe have implemented this as a "sliding window", so that we 
64815 + * add a new nodeset string, when it has changed and someone reads past 
64816 + * end of the last one.   Then if someone read from before out "window"
64817 + * we would produce "padding" data.  The problem with this, is that a 
64818 + * simple "cat" on /proc/elan/device%d/nodeset will read the whole "file"
64819 + * which will be mostly padding !
64820 + *
64821 + * Just to not that the purpose of this interface is:
64822 + *    1) to allow cat /proc/elan/device%d/nodeset to show the current
64823 + *       nodeset.
64824 + *    2) to allow rms (or similar) to poll() on the file, and when the
64825 + *       nodeset changes read a new one.
64826 + *
64827 + * so ... we don't bother solving the troublesome "tail -f" problem.
64828 + */
64829 +
64830 +typedef struct nodeset_private
64831 +{
64832 +    struct nodeset_private *pr_next;
64833 +    EP_RAIL                *pr_rail;
64834 +    unsigned               pr_changed;
64835 +    char                  *pr_page;
64836 +    unsigned               pr_off;
64837 +    unsigned               pr_len;
64838 +} NODESET_PRIVATE;
64839 +
64840 +NODESET_PRIVATE   *ep_nodeset_list;
64841 +wait_queue_head_t  ep_nodeset_wait;
64842 +spinlock_t         ep_nodeset_lock;
64843 +
64844 +static int
64845 +proc_write_state(struct file *file, const char *buffer,
64846 +                unsigned long count, void *data)
64847 +{
64848 +    EP_RAIL *rail = (EP_RAIL *) data;
64849 +    char    tmpbuf[128];
64850 +    int     res;
64851 +
64852 +    if (count > sizeof (tmpbuf)-1)
64853 +       return (-EINVAL);
64854 +    
64855 +    MOD_INC_USE_COUNT;
64856 +    
64857 +    if (copy_from_user (tmpbuf, buffer, count))
64858 +       res = -EFAULT;
64859 +    else 
64860 +    {
64861 +       tmpbuf[count] = '\0';   
64862 +
64863 +       if (tmpbuf[count-1] == '\n')
64864 +           tmpbuf[count-1] = '\0';
64865 +
64866 +       if (! strcmp (tmpbuf, "start") && rail->State == EP_RAIL_STATE_UNINITIALISED)
64867 +           ep_start_rail (rail);
64868 +       
64869 +       if (! strcmp (tmpbuf, "stop") && rail->State > EP_RAIL_STATE_UNINITIALISED)
64870 +           ep_stop_rail (rail);
64871 +       
64872 +       if (! strcmp (tmpbuf, "offline") && rail->State > EP_RAIL_STATE_UNINITIALISED)
64873 +           cm_force_offline (rail, 1, CM_OFFLINE_PROCFS);
64874 +
64875 +       if (! strcmp (tmpbuf, "online") && rail->State > EP_RAIL_STATE_UNINITIALISED)
64876 +           cm_force_offline (rail, 0, CM_OFFLINE_PROCFS);
64877 +
64878 +       if (! strncmp (tmpbuf, "restart=", 8) && rail->State == EP_RAIL_STATE_RUNNING)
64879 +           cm_restart_node (rail, simple_strtol (tmpbuf + 8, NULL, 0));
64880 +
64881 +       if (! strncmp (tmpbuf, "panic=", 6))
64882 +           ep_panic_node (rail->System, simple_strtol(tmpbuf + 6, NULL, 0),
64883 +                          strchr (tmpbuf, ',') ? strchr(tmpbuf, ',') + 1 : "remote panic request");
64884 +
64885 +       if (! strncmp (tmpbuf, "raise=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED)
64886 +           rail->Operations.RaiseFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0));
64887 +
64888 +       if (! strncmp (tmpbuf, "lower=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED)
64889 +           rail->Operations.LowerFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0));
64890 +       
64891 +       res = count;
64892 +    }
64893 +
64894 +    MOD_DEC_USE_COUNT;
64895 +
64896 +    return (res);
64897 +}
64898 +
64899 +static int
64900 +proc_read_state(char *page, char **start, off_t off,
64901 +               int count, int *eof, void *data)
64902 +{
64903 +    EP_RAIL *rail = (EP_RAIL *) data;
64904 +    int     len;
64905 +
64906 +    switch (rail->State)
64907 +    {
64908 +    case EP_RAIL_STATE_UNINITIALISED:
64909 +       len = sprintf (page, "uninitialised\n");
64910 +       break;
64911 +    case EP_RAIL_STATE_STARTED:
64912 +       len = sprintf (page, "started\n");
64913 +       break;
64914 +    case EP_RAIL_STATE_RUNNING:
64915 +       len = sprintf (page, "running NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes);
64916 +       break;
64917 +    case EP_RAIL_STATE_INCOMPATIBLE:
64918 +       len = sprintf (page, "incompatible NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes);
64919 +       break;
64920 +    default:
64921 +       len = sprintf (page, "<unknown>\n");
64922 +       break;
64923 +    }
64924 +
64925 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
64926 +}
64927 +
64928 +static int
64929 +proc_write_display(struct file *file, const char *buffer,
64930 +                  unsigned long count, void *data)
64931 +{
64932 +    EP_RAIL *rail = (EP_RAIL *) data;
64933 +    char    tmpbuf[128];
64934 +    int     res;
64935 +
64936 +    if (count > sizeof (tmpbuf)-1)
64937 +       return (-EINVAL);
64938 +    
64939 +    MOD_INC_USE_COUNT;
64940 +    
64941 +    if (copy_from_user (tmpbuf, buffer, count))
64942 +       res = -EFAULT;
64943 +    else 
64944 +    {
64945 +       tmpbuf[count] = '\0';   
64946 +
64947 +       if (tmpbuf[count-1] == '\n')
64948 +           tmpbuf[count-1] = '\0';
64949 +
64950 +       if (! strcmp (tmpbuf, "rail"))
64951 +           DisplayRail (rail);
64952 +       if (! strcmp (tmpbuf, "segs"))
64953 +           DisplaySegs (rail);
64954 +       if (! strcmp (tmpbuf, "nodes"))
64955 +           DisplayNodes (rail);
64956 +       if (! strcmp (tmpbuf, "status"))
64957 +           DisplayStatus (rail);
64958 +       if (! strcmp (tmpbuf, "debug") && rail->Operations.Debug)
64959 +           rail->Operations.Debug (rail);
64960 +       if (! strncmp (tmpbuf, "epcomms", 7))
64961 +           ep_comms_display (rail->System, tmpbuf[7] == '=' ? tmpbuf + 8 : NULL);
64962 +       res = count;
64963 +    }
64964 +
64965 +    MOD_DEC_USE_COUNT;
64966 +
64967 +    return (res);
64968 +}
64969 +
64970 +static int
64971 +proc_read_display(char *page, char **start, off_t off,
64972 +                 int count, int *eof, void *data)
64973 +{
64974 +    int len = sprintf (page, "<unreadable>\n");
64975 +    
64976 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
64977 +}
64978 +
64979 +
64980 +static int
64981 +proc_read_stats(char *page, char **start, off_t off,
64982 +               int count, int *eof, void *data)
64983 +{
64984 +    EP_RAIL *rail = (EP_RAIL *) data;
64985 +
64986 +    if ( rail == NULL ) {
64987 +       strcpy(page,"proc_read_stats rail=NULL\n");
64988 +    } else {
64989 +       page[0] = 0;
64990 +       ep_fillout_stats(rail, page);
64991 +       rail->Operations.FillOutStats (rail, page);
64992 +    }
64993 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
64994 +}
64995 +
64996 +static int
64997 +proc_read_devinfo(char *page, char **start, off_t off,
64998 +                 int count, int *eof, void *data)
64999 +{
65000 +    EP_RAIL       *rail    = (EP_RAIL *) data;
65001 +    ELAN_DEVINFO  *devinfo = &rail->Devinfo;
65002 +    ELAN_POSITION *pos     = &rail->Position;
65003 +    char          *p       = page;
65004 +    
65005 +    switch (devinfo->dev_device_id)
65006 +    {
65007 +    case PCI_DEVICE_ID_ELAN3:
65008 +       p += sprintf (p, "ep%d is elan3 %d rev %c\n", rail->Number, 
65009 +                     devinfo->dev_instance, 'a' + devinfo->dev_revision_id);
65010 +       break;
65011 +       
65012 +    case PCI_DEVICE_ID_ELAN4:
65013 +       p += sprintf (p, "ep%d is elan4 %d rev %c\n", rail->Number, 
65014 +                     devinfo->dev_instance, 'a' + devinfo->dev_revision_id);
65015 +       break;
65016 +    default:
65017 +       p += sprintf (p, "ep%d is unkown %x/%x\n", rail->Number, devinfo->dev_vendor_id, devinfo->dev_device_id);
65018 +       break;
65019 +    }
65020 +
65021 +    if (rail->State == EP_RAIL_STATE_RUNNING)
65022 +       p += sprintf (p, "ep%d nodeid %d numnodes %d\n", rail->Number, pos->pos_nodeid, pos->pos_nodes);
65023 +
65024 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, p - page));
65025 +}
65026 +
65027 +static struct rail_info
65028 +{
65029 +    char *name;
65030 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
65031 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
65032 +} rail_info[] = {
65033 +    {"state",   proc_read_state,   proc_write_state},
65034 +    {"display", proc_read_display, proc_write_display},
65035 +    {"stats",   proc_read_stats,   NULL},
65036 +    {"devinfo", proc_read_devinfo, NULL},
65037 +};
65038 +
65039 +static int
65040 +nodeset_open (struct inode *inode, struct file *file)
65041 +{
65042 +    NODESET_PRIVATE *pr;
65043 +
65044 +    if ((pr = kmalloc (sizeof (NODESET_PRIVATE), GFP_KERNEL)) == NULL)
65045 +       return (-ENOMEM);
65046 +    
65047 +    pr->pr_changed = 1;
65048 +    pr->pr_off     = 0;
65049 +    pr->pr_len     = 0;
65050 +    pr->pr_page    = NULL;
65051 +    pr->pr_rail    = (EP_RAIL *)( PDE(inode)->data );
65052 +
65053 +    spin_lock (&ep_nodeset_lock);
65054 +    pr->pr_next = ep_nodeset_list;
65055 +    ep_nodeset_list = pr;
65056 +    spin_unlock (&ep_nodeset_lock);
65057 +
65058 +    file->private_data = (void *) pr;
65059 +
65060 +    MOD_INC_USE_COUNT;
65061 +    return (0);
65062 +}
65063 +
65064 +static int
65065 +nodeset_release (struct inode *inode, struct file *file)
65066 +{
65067 +    NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data;
65068 +    NODESET_PRIVATE **ppr;
65069 +
65070 +    spin_lock (&ep_nodeset_lock);
65071 +    for (ppr = &ep_nodeset_list; (*ppr) != pr; ppr = &(*ppr)->pr_next)
65072 +       ;
65073 +    (*ppr) = pr->pr_next;
65074 +    spin_unlock (&ep_nodeset_lock);
65075 +
65076 +    if (pr->pr_page)
65077 +       free_page ((unsigned long) pr->pr_page);
65078 +    kfree (pr);
65079 +    
65080 +    MOD_DEC_USE_COUNT;
65081 +    return (0);
65082 +}
65083 +
65084 +static ssize_t
65085 +nodeset_read (struct file *file, char *buf, size_t count, loff_t *ppos)
65086 +{
65087 +    NODESET_PRIVATE *pr  = (NODESET_PRIVATE *) file->private_data;
65088 +    EP_RAIL          *rail = pr->pr_rail;
65089 +    int              error;
65090 +    unsigned long    flags;
65091 +
65092 +    if (!pr->pr_changed && pr->pr_off >= pr->pr_len)
65093 +       return (0);
65094 +
65095 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
65096 +       return (error);
65097 +
65098 +    if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
65099 +       return (-ENOMEM);
65100 +
65101 +    if (pr->pr_off >= pr->pr_len)
65102 +    {
65103 +       kmutex_lock (&rail->CallbackLock);
65104 +       if (rail->State == EP_RAIL_STATE_RUNNING)
65105 +       {
65106 +           spin_lock_irqsave (&rail->System->NodeLock, flags);
65107 +           ep_sprintf_bitmap (pr->pr_page, PAGESIZE, statemap_tobitmap(rail->NodeSet), 0, 0, rail->Position.pos_nodes);
65108 +           spin_unlock_irqrestore (&rail->System->NodeLock, flags);
65109 +
65110 +           if (rail->SwitchBroadcastLevel == -1)
65111 +               strcat (pr->pr_page, "<disconnected>");
65112 +           else if (rail->SwitchBroadcastLevel < (rail->Position.pos_levels-1))
65113 +               sprintf (pr->pr_page + strlen (pr->pr_page), "<%d>", rail->SwitchBroadcastLevel);
65114 +           strcat (pr->pr_page, "\n");
65115 +       }
65116 +       else
65117 +           strcpy (pr->pr_page, "<not running>\n");
65118 +       kmutex_unlock (&rail->CallbackLock);
65119 +
65120 +       pr->pr_len     = strlen (pr->pr_page);
65121 +       pr->pr_off     = 0;
65122 +       pr->pr_changed = 0;
65123 +    }
65124 +
65125 +    if (count >= (pr->pr_len - pr->pr_off))
65126 +       count = pr->pr_len - pr->pr_off;
65127 +
65128 +    copy_to_user (buf, pr->pr_page + pr->pr_off, count);
65129 +
65130 +    pr->pr_off += count;
65131 +    *ppos      += count;
65132 +
65133 +    if (pr->pr_off >= pr->pr_len)
65134 +    {
65135 +       free_page ((unsigned long) pr->pr_page);
65136 +       pr->pr_page = NULL;
65137 +    }
65138 +
65139 +    return (count);
65140 +}
65141 +
65142 +static unsigned int
65143 +nodeset_poll (struct file *file, poll_table *wait)
65144 +{
65145 +    NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data;
65146 +
65147 +    poll_wait (file, &ep_nodeset_wait, wait);
65148 +    if (pr->pr_changed || pr->pr_off < pr->pr_len)
65149 +       return (POLLIN | POLLRDNORM);
65150 +    return (0);
65151 +}
65152 +
65153 +static void 
65154 +nodeset_callback (void *arg, statemap_t *map)
65155 +{
65156 +    EP_RAIL         *rail = (EP_RAIL *) arg;
65157 +    NODESET_PRIVATE *pr;
65158 +
65159 +    ep_display_bitmap (rail->Name, "Nodeset", statemap_tobitmap(map), 0, ep_numnodes(rail->System));
65160 +
65161 +    spin_lock (&ep_nodeset_lock);
65162 +    for (pr = ep_nodeset_list; pr; pr = pr->pr_next)
65163 +       if (pr->pr_rail == rail)
65164 +           pr->pr_changed = 1;
65165 +    spin_unlock (&ep_nodeset_lock);
65166 +
65167 +    wake_up_interruptible (&ep_nodeset_wait);
65168 +}
65169 +
65170 +void
65171 +proc_character_fill (long mode, char *fmt, ...)
65172 +{
65173 +    int len;
65174 +    va_list ap;
65175 +    PROC_PRIVATE *private = (PROC_PRIVATE *)mode;
65176 +    
65177 +    /* is the buffer already full */
65178 +    if (private->pr_len >= private->pr_data_len) 
65179 +       return;
65180 +    
65181 +    /* attempt to fill up to the remaining space */
65182 +    va_start (ap, fmt);
65183 +    len = vsnprintf ( & private->pr_data[private->pr_len], (private->pr_data_len - private->pr_len), fmt, ap);
65184 +    va_end (ap);
65185 +    
65186 +    if (len < 0 ) 
65187 +    {
65188 +       /* we have reached the end of buffer and need to fail all future writes
65189 +        * the caller can check (pr_len >= pr_data_len) and recall with more space 
65190 +        */
65191 +       private->pr_len = private->pr_data_len;
65192 +       return;
65193 +    }
65194 +    
65195 +    /* move the length along */
65196 +    private->pr_len += len;   
65197 +}
65198 +
65199 +int
65200 +proc_release (struct inode *inode, struct file *file)
65201 +{
65202 +    PROC_PRIVATE *pr = (PROC_PRIVATE *) file->private_data;
65203 +    
65204 +    if (pr->pr_data)
65205 +       KMEM_FREE (pr->pr_data, pr->pr_data_len);
65206 +    kfree (pr);
65207 +    
65208 +    MOD_DEC_USE_COUNT;
65209 +    return (0);
65210 +}
65211 +
65212 +ssize_t
65213 +proc_read (struct file *file, char *buf, size_t count, loff_t *ppos)
65214 +{
65215 +    PROC_PRIVATE *pr  = (PROC_PRIVATE *) file->private_data;
65216 +    int           error;
65217 +
65218 +    if (pr->pr_off >= pr->pr_len)
65219 +       return (0);
65220 +
65221 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
65222 +       return (error);
65223 +
65224 +    if (count >= (pr->pr_len - pr->pr_off))
65225 +       count = pr->pr_len - pr->pr_off;
65226 +
65227 +    copy_to_user (buf, pr->pr_data + pr->pr_off, count);
65228 +
65229 +    pr->pr_off += count;
65230 +    *ppos      += count;
65231 +
65232 +    return (count);
65233 +}
65234 +
65235 +static int
65236 +proc_open (struct inode *inode, struct file *file)
65237 +{
65238 +    PROC_PRIVATE *pr;
65239 +    CM_RAIL      *cmRail;
65240 +    int           pages = 4;
65241 +    unsigned long flags;
65242 +
65243 +    if ((pr = kmalloc (sizeof (PROC_PRIVATE), GFP_KERNEL)) == NULL)
65244 +       return (-ENOMEM);
65245 +    
65246 +    pr->pr_rail = (EP_RAIL *)(PDE(inode)->data);
65247 +       
65248 +    do {       
65249 +       pr->pr_data_len = PAGESIZE * pages;
65250 +
65251 +       KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1);
65252 +       if (pr->pr_data == NULL) 
65253 +       { 
65254 +           pr->pr_len  = sprintf (pr->pr_data, "Out of Memory\n");
65255 +           break;
65256 +       } 
65257 +       
65258 +       pr->pr_off     = 0;
65259 +       pr->pr_len     = 0;
65260 +       pr->pr_data[0] = 0;
65261 +       
65262 +       if (pr->pr_rail->State != EP_RAIL_STATE_RUNNING) 
65263 +       { 
65264 +           pr->pr_len  = sprintf (pr->pr_data, "Rail not Running\n");
65265 +           break;
65266 +       } 
65267 +       else 
65268 +       {
65269 +           pr->pr_di.func  = proc_character_fill;
65270 +           pr->pr_di.arg   = (long)pr;
65271 +
65272 +           if (!strcmp("maps", file->f_dentry->d_iname)) 
65273 +           {
65274 +               cmRail = pr->pr_rail->ClusterRail;
65275 +
65276 +               spin_lock_irqsave (&cmRail->Lock, flags);
65277 +               DisplayNodeMaps (&pr->pr_di, cmRail);   
65278 +               spin_unlock_irqrestore (&cmRail->Lock, flags);  
65279 +           }
65280 +
65281 +           if (!strcmp("segs", file->f_dentry->d_iname)) 
65282 +           {
65283 +               cmRail = pr->pr_rail->ClusterRail;
65284 +               
65285 +               spin_lock_irqsave (&cmRail->Lock, flags);       
65286 +               DisplayNodeSgmts (&pr->pr_di, cmRail);
65287 +               spin_unlock_irqrestore (&cmRail->Lock, flags);
65288 +           }
65289 +
65290 +           if (!strcmp("tree", file->f_dentry->d_iname)) 
65291 +               DisplayRailDo (&pr->pr_di, pr->pr_rail);
65292 +       }
65293 +
65294 +       if ( pr->pr_len < pr->pr_data_len) 
65295 +           break; /* we managed to get all the output into the buffer */
65296 +
65297 +       pages++;
65298 +       KMEM_FREE ( pr->pr_data,  pr->pr_data_len);
65299 +    } while (1);
65300 +       
65301 +
65302 +    file->private_data = (void *) pr;
65303 +
65304 +    MOD_INC_USE_COUNT;
65305 +    return (0);
65306 +}
65307 +
65308 +struct file_operations proc_nodeset_operations = 
65309 +{
65310 +    read:      nodeset_read,
65311 +    poll:      nodeset_poll,
65312 +    open:      nodeset_open,
65313 +    release:   nodeset_release,
65314 +};
65315 +
65316 +struct file_operations proc_operations = 
65317 +{
65318 +    read:      proc_read,
65319 +    open:      proc_open,
65320 +    release:   proc_release,
65321 +};
65322 +
65323 +void
65324 +ep_procfs_rail_init (EP_RAIL *rail)
65325 +{
65326 +    struct proc_dir_entry *dir;
65327 +    struct proc_dir_entry *p;
65328 +    char                   name[10];
65329 +    int                    i;
65330 +
65331 +    sprintf (name, "rail%d", rail->Number);
65332 +
65333 +    if ((dir = rail->ProcDir = proc_mkdir (name, ep_procfs_root)) == NULL)
65334 +       return;
65335 +
65336 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
65337 +    {
65338 +       if ((p = create_proc_entry (rail_info[i].name, 0, dir)) != NULL)
65339 +       {
65340 +           p->read_proc  = rail_info[i].read_func;
65341 +           p->write_proc = rail_info[i].write_func;
65342 +           p->data       = rail;
65343 +           p->owner      = THIS_MODULE;
65344 +       }
65345 +    }
65346 +
65347 +    if ((p = create_proc_entry ("nodeset", 0, dir)) != NULL)
65348 +    {
65349 +       p->proc_fops = &proc_nodeset_operations;
65350 +       p->owner     = THIS_MODULE;
65351 +       p->data      = rail;
65352 +
65353 +       rail->CallbackRegistered = 1;
65354 +       ep_register_callback (rail, EP_CB_NODESET, nodeset_callback, rail);
65355 +    }
65356 +     
65357 +    if ((p = create_proc_entry ("maps", 0, dir)) != NULL)
65358 +    {
65359 +       p->proc_fops = &proc_operations;
65360 +       p->owner     = THIS_MODULE;
65361 +       p->data      = rail;    
65362 +    }
65363 +    
65364 +    if ((p = create_proc_entry ("segs", 0, dir)) != NULL)
65365 +    {
65366 +       p->proc_fops = &proc_operations;
65367 +       p->owner     = THIS_MODULE;
65368 +       p->data      = rail;
65369 +    }
65370 +    
65371 +    if ((p = create_proc_entry ("tree", 0, dir)) != NULL)
65372 +    {
65373 +       p->proc_fops = &proc_operations;
65374 +       p->owner     = THIS_MODULE;
65375 +       p->data      = rail;
65376 +    }
65377 +
65378 +}
65379 +
65380 +void
65381 +ep_procfs_rail_fini (EP_RAIL *rail)
65382 +{
65383 +    struct proc_dir_entry *dir = rail->ProcDir;
65384 +    char name[10];
65385 +    int  i;
65386 +
65387 +    if (dir == NULL)
65388 +       return;
65389 +
65390 +    if (rail->CallbackRegistered)
65391 +    {
65392 +       ep_remove_callback (rail, EP_CB_NODESET, nodeset_callback, rail);
65393 +
65394 +       remove_proc_entry ("nodeset", dir);
65395 +    }
65396 +
65397 +    remove_proc_entry ("maps",    dir);
65398 +    remove_proc_entry ("segs",    dir);
65399 +    remove_proc_entry ("tree",    dir);
65400 +
65401 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
65402 +       remove_proc_entry (rail_info[i].name, dir);
65403 +
65404 +    sprintf (name, "rail%d", rail->Number);
65405 +    remove_proc_entry (name, ep_procfs_root);
65406 +}
65407 +
65408 +#include "quadrics_version.h"
65409 +static char     quadrics_version[] = QUADRICS_VERSION;
65410 +
65411 +void
65412 +ep_procfs_init()
65413 +{
65414 +    extern int txd_stabilise;
65415 +    extern int MaxSwitchLevels;
65416 +
65417 +    spin_lock_init (&ep_nodeset_lock);
65418 +    init_waitqueue_head (&ep_nodeset_wait);
65419 +
65420 +    ep_procfs_root = proc_mkdir ("ep", qsnet_procfs_root);
65421 +    ep_config_root = proc_mkdir ("config", ep_procfs_root);
65422 +
65423 +    qsnet_proc_register_str (ep_procfs_root, "version", quadrics_version, 1);
65424 +
65425 +    qsnet_proc_register_hex (ep_config_root, "epdebug",               &epdebug,               0);
65426 +    qsnet_proc_register_hex (ep_config_root, "epdebug_console",       &epdebug_console,       0);
65427 +    qsnet_proc_register_hex (ep_config_root, "epdebug_cmlevel",       &epdebug_cmlevel,       0);
65428 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
65429 +    qsnet_proc_register_hex (ep_config_root, "epdebug_check_sum",     &epdebug_check_sum,     0);
65430 +#endif
65431 +    qsnet_proc_register_hex (ep_config_root, "epcomms_forward_limit", &epcomms_forward_limit, 0);
65432 +    qsnet_proc_register_int (ep_config_root, "txd_stabilise",         &txd_stabilise,         0);
65433 +    qsnet_proc_register_int (ep_config_root, "assfail_mode",          &assfail_mode,          0);
65434 +    qsnet_proc_register_int (ep_config_root, "max_switch_levels",     &MaxSwitchLevels,       1);
65435 +
65436 +    ep_procfs_rcvr_xmtr_init();
65437 +}
65438 +
65439 +void
65440 +ep_procfs_fini(void)
65441 +{
65442 +    ep_procfs_rcvr_xmtr_fini();
65443 +
65444 +    remove_proc_entry ("max_switch_levels",     ep_config_root);
65445 +    remove_proc_entry ("assfail_mode",          ep_config_root);
65446 +    remove_proc_entry ("txd_stabilise",         ep_config_root);
65447 +    remove_proc_entry ("epcomms_forward_limit", ep_config_root);
65448 +
65449 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
65450 +    remove_proc_entry ("epdebug_check_sum",     ep_config_root);
65451 +#endif
65452 +    remove_proc_entry ("epdebug_cmlevel",       ep_config_root);
65453 +    remove_proc_entry ("epdebug_console",       ep_config_root);
65454 +    remove_proc_entry ("epdebug",               ep_config_root);
65455 +
65456 +    remove_proc_entry ("version", ep_procfs_root);
65457 +    
65458 +    remove_proc_entry ("config", ep_procfs_root);
65459 +    remove_proc_entry ("ep", qsnet_procfs_root);
65460 +
65461 +    spin_lock_destroy (&ep_nodeset_lock);
65462 +}
65463 +
65464 +/*
65465 + * Local variables:
65466 + * c-file-style: "stroustrup"
65467 + * End:
65468 + */
65469 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/quadrics_version.h
65470 ===================================================================
65471 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/quadrics_version.h      2004-02-23 16:02:56.000000000 -0500
65472 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/quadrics_version.h   2005-07-28 14:52:52.901670680 -0400
65473 @@ -0,0 +1 @@
65474 +#define QUADRICS_VERSION "4.31qsnet"
65475 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/railhints.c
65476 ===================================================================
65477 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/railhints.c     2004-02-23 16:02:56.000000000 -0500
65478 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/railhints.c  2005-07-28 14:52:52.902670528 -0400
65479 @@ -0,0 +1,103 @@
65480 +/*
65481 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65482 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65483 + *
65484 + *    For licensing information please see the supplied COPYING file
65485 + *
65486 + */
65487 +
65488 +#ident "@(#)$Id: railhints.c,v 1.5 2004/02/06 22:37:06 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
65489 +/*      $Source: /cvs/master/quadrics/epmod/railhints.c,v $*/
65490 +
65491 +#include <qsnet/kernel.h>
65492 +
65493 +#include <elan/kcomm.h>
65494 +#include <elan/epsvc.h>
65495 +#include <elan/epcomms.h>
65496 +
65497 +#include "debug.h"
65498 +
65499 +int
65500 +ep_pickRail(EP_RAILMASK railmask)
65501 +{
65502 +    static volatile int lastGlobal;
65503 +    int i, rnum, last = lastGlobal;
65504 +
65505 +    /* Pick a single rail out of the railmask */
65506 +    for (i = 0; i < EP_MAX_RAILS; i++)
65507 +       if (railmask & (1 << ((last + i) % EP_MAX_RAILS)))
65508 +           break;
65509 +
65510 +    if (i == EP_MAX_RAILS)
65511 +       return (-1);
65512 +
65513 +    rnum = (last + i) % EP_MAX_RAILS;
65514 +
65515 +    lastGlobal = (rnum + 1) % EP_MAX_RAILS;
65516 +
65517 +    ASSERT (railmask & (1 << rnum));
65518 +
65519 +    return (rnum);
65520 +}
65521 +
65522 +int
65523 +ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails)
65524 +{
65525 +    /* Retrun a single rail out of allowed mask with the best connectivity for broadcast. */
65526 +    return (ep_pickRail (allowedRails & xmtr->RailMask));
65527 +}
65528 +
65529 +int
65530 +ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId)
65531 +{
65532 +    EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId];
65533 +
65534 +    EPRINTF5 (DBG_XMTR, "ep_xmtr_prefrail: xmtr=%p allowedRails=%x nodeId=%d xmtr->RailMaks=%x Connected=%x\n", 
65535 +             xmtr, allowedRails, nodeId, xmtr->RailMask, node->ConnectedRails);
65536 +
65537 +    /* Return a single rail which is currently connected to nodeId (limited to rails
65538 +     * in allowedmask) - if more than one rail is possible, then round-robin between 
65539 +     * them */
65540 +    return (ep_pickRail (allowedRails & xmtr->RailMask & node->ConnectedRails));
65541 +}
65542 +
65543 +EP_RAILMASK
65544 +ep_xmtr_availrails (EP_XMTR *xmtr)
65545 +{
65546 +    /* Return which rails can be used to transmit one. */
65547 +
65548 +    return (xmtr->RailMask);
65549 +}
65550 +
65551 +EP_RAILMASK
65552 +ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId)
65553 +{
65554 +    EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId];
65555 +
65556 +    /* Return which rails can be used to transmit to this node. */
65557 +
65558 +    return (xmtr->RailMask & node->ConnectedRails);
65559 +}
65560 +
65561 +int
65562 +ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails)
65563 +{
65564 +    /* Return the "best" rail for queueing a receive buffer out on - this will be a
65565 +     * rail with ThreadWaiting set or the rail with the least descriptors queued
65566 +     * on it. */
65567 +    
65568 +    return (ep_pickRail (allowedRails & rcvr->RailMask));
65569 +}
65570 +
65571 +EP_RAILMASK
65572 +ep_rcvr_availrails (EP_RCVR *rcvr)
65573 +{
65574 +    /* Return which rails can be used to queue receive buffers. */
65575 +    return (rcvr->RailMask);
65576 +}
65577 +
65578 +/*
65579 + * Local variables:
65580 + * c-file-style: "stroustrup"
65581 + * End:
65582 + */
65583 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/rmap.c
65584 ===================================================================
65585 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/rmap.c  2004-02-23 16:02:56.000000000 -0500
65586 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/rmap.c       2005-07-28 14:52:52.902670528 -0400
65587 @@ -0,0 +1,365 @@
65588 +/*
65589 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65590 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65591 + *
65592 + *    For licensing information please see the supplied COPYING file
65593 + *
65594 + */
65595 +
65596 +#ident "@(#)$Id: rmap.c,v 1.15 2004/05/19 10:24:38 david Exp $"
65597 +/*      $Source: /cvs/master/quadrics/epmod/rmap.c,v $ */
65598 +
65599 +#include <qsnet/kernel.h>
65600 +#include <elan/rmap.h>
65601 +
65602 +#include "debug.h"
65603 +
65604 +void
65605 +ep_display_rmap (EP_RMAP *mp)
65606 +{
65607 +    EP_RMAP_ENTRY *bp;
65608 +    unsigned long flags;
65609 +    
65610 +    spin_lock_irqsave (&mp->m_lock, flags);
65611 +    ep_debugf (DBG_DEBUG, "map: %s size %d free %d\n", mp->m_name, mp->m_size, mp->m_free);
65612 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
65613 +       ep_debugf (DBG_DEBUG, "   [%lx - %lx]\n", bp->m_addr, bp->m_addr+bp->m_size-1);
65614 +    spin_unlock_irqrestore (&mp->m_lock, flags);
65615 +}
65616 +
65617 +void
65618 +ep_mapinit (EP_RMAP *mp, char *name, u_int mapsize)
65619 +{
65620 +    spin_lock_init (&mp->m_lock);
65621 +    kcondvar_init (&mp->m_wait);
65622 +    
65623 +    /* The final segment in the array has size 0 and acts as a delimiter
65624 +     * we insure that we never use segments past the end of the array by
65625 +     * maintaining a free segment count in m_free.  When excess segments
65626 +     * occur we discard some resources */
65627 +    
65628 +    mp->m_size = mapsize;
65629 +    mp->m_free = mapsize;
65630 +    mp->m_name = name;
65631 +    
65632 +    bzero (mp->m_map, sizeof (EP_RMAP_ENTRY) * (mapsize+1));
65633 +}
65634 +
65635 +EP_RMAP *
65636 +ep_rmallocmap (size_t mapsize, char *name, int cansleep)
65637 +{
65638 +    EP_RMAP *mp;
65639 +
65640 +    KMEM_ZALLOC (mp, EP_RMAP *, sizeof (EP_RMAP) + mapsize*sizeof (EP_RMAP_ENTRY), cansleep);
65641 +
65642 +    if (mp != NULL)
65643 +       ep_mapinit (mp, name, mapsize);
65644 +
65645 +    return (mp);
65646 +}
65647 +
65648 +void
65649 +ep_rmfreemap (EP_RMAP *mp)
65650 +{
65651 +    spin_lock_destroy (&mp->m_lock);
65652 +    kcondvar_destroy (&mp->m_wait);
65653 +    
65654 +    KMEM_FREE (mp, sizeof (EP_RMAP) + mp->m_size * sizeof (EP_RMAP_ENTRY));
65655 +}
65656 +
65657 +static u_long
65658 +ep_rmalloc_locked (EP_RMAP *mp, size_t size)
65659 +{
65660 +    EP_RMAP_ENTRY *bp;
65661 +    u_long            addr;
65662 +    
65663 +    ASSERT (size > 0);
65664 +    ASSERT (SPINLOCK_HELD (&mp->m_lock));
65665 +
65666 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
65667 +    {
65668 +       if (bp->m_size >= size)
65669 +       {
65670 +           addr = bp->m_addr;
65671 +           bp->m_addr += size;
65672 +           
65673 +           if ((bp->m_size -= size) == 0)
65674 +           {
65675 +               /* taken all of this slot - so shift the map down */
65676 +               do {
65677 +                   bp++;
65678 +                   (bp-1)->m_addr = bp->m_addr;
65679 +               } while (((bp-1)->m_size = bp->m_size) != 0);
65680 +
65681 +               mp->m_free++;
65682 +           }
65683 +           return (addr);
65684 +       }
65685 +    }
65686 +
65687 +    return (0);
65688 +}
65689 +
65690 +u_long
65691 +ep_rmalloc (EP_RMAP *mp, size_t size, int cansleep)
65692 +{
65693 +    unsigned long addr;
65694 +    unsigned long flags;
65695 +
65696 +    spin_lock_irqsave (&mp->m_lock, flags);
65697 +    while ((addr = ep_rmalloc_locked (mp, size)) == 0 && cansleep)
65698 +    {
65699 +       mp->m_want = 1;
65700 +       kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags);
65701 +    }
65702 +
65703 +    spin_unlock_irqrestore (&mp->m_lock, flags);
65704 +
65705 +    return (addr);
65706 +}
65707 +
65708 +
65709 +
65710 +u_long
65711 +ep_rmalloc_constrained (EP_RMAP *mp, size_t size, u_long alo, u_long ahi, u_long align, int cansleep)
65712 +{
65713 +    EP_RMAP_ENTRY *bp, *bp2, *lbp;
65714 +    unsigned long addr=0;
65715 +    size_t        delta;
65716 +    int           ok;
65717 +    unsigned long flags;
65718 +
65719 +    spin_lock_irqsave (&mp->m_lock, flags);
65720 + again:
65721 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
65722 +    {
65723 +       delta = 0;
65724 +       
65725 +       if (alo < bp->m_addr)
65726 +       {
65727 +           addr = bp->m_addr;
65728 +           
65729 +           if (addr & (align-1))
65730 +               addr = (addr + (align-1)) & ~(align-1);
65731 +           
65732 +           delta = addr - bp->m_addr;
65733 +           
65734 +           if (ahi >= bp->m_addr + bp->m_size)
65735 +               ok = (bp->m_size >= (size + delta));
65736 +           else
65737 +               ok = ((bp->m_addr + size + delta) <= ahi);
65738 +       }
65739 +       else
65740 +       {
65741 +           addr = alo;
65742 +           if (addr & (align-1))
65743 +               addr = (addr + (align-1)) & ~(align-1);
65744 +           delta = addr - bp->m_addr;
65745 +           
65746 +           if (ahi >= bp->m_addr + bp->m_size)
65747 +               ok = ((alo + size + delta) <= (bp->m_addr + bp->m_size));
65748 +           else
65749 +               ok = ((alo + size + delta) <= ahi);
65750 +       }
65751 +
65752 +       if (ok)
65753 +           break;
65754 +    }  
65755 +    
65756 +    if (bp->m_size == 0)
65757 +    {
65758 +       if (cansleep)
65759 +       {
65760 +           mp->m_want = 1;
65761 +           kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags);
65762 +           goto again;
65763 +       }
65764 +       spin_unlock_irqrestore (&mp->m_lock, flags);
65765 +       return (0);
65766 +    }
65767 +
65768 +    /* found an approriate map entry - so take the bit out which we want */
65769 +    if (bp->m_addr == addr) 
65770 +    {
65771 +       if (bp->m_size == size) 
65772 +       {
65773 +           /* allocate entire segment and compress map */
65774 +           bp2 = bp;
65775 +           while (bp2->m_size) 
65776 +           {
65777 +               bp2++;
65778 +               (bp2-1)->m_addr = bp2->m_addr;
65779 +               (bp2-1)->m_size = bp2->m_size;
65780 +           }
65781 +           mp->m_free++;
65782 +       }
65783 +       else 
65784 +       {
65785 +           /* take from start of segment */
65786 +           bp->m_addr += size;
65787 +           bp->m_size -= size;
65788 +       }
65789 +    }
65790 +    else 
65791 +    {
65792 +       if (bp->m_addr + bp->m_size == addr + size) 
65793 +       {
65794 +           /* take from end of segment */
65795 +           bp->m_size -= size;
65796 +       }
65797 +       else 
65798 +       {
65799 +           /* split the segment loosing the last entry if there's no space */
65800 +           if (mp->m_free == 0) 
65801 +           {
65802 +               /* find last map entry */
65803 +               for (lbp = bp; lbp->m_size != 0; lbp++)
65804 +                   ;
65805 +               lbp--;
65806 +               
65807 +               if (lbp->m_size > (lbp-1)->m_size)
65808 +                   lbp--;
65809 +               
65810 +               printk ("%s: lost resource map entry [%lx, %lx]\n",
65811 +                       mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size);
65812 +               
65813 +               *lbp = *(lbp+1);
65814 +               (lbp+1)->m_size = 0;
65815 +               
65816 +               mp->m_free++;
65817 +           }
65818 +           
65819 +           for (bp2 = bp; bp2->m_size != 0; bp2++)
65820 +               continue;
65821 +           
65822 +           for (bp2--; bp2 > bp; bp2--)
65823 +           {
65824 +               (bp2+1)->m_addr = bp2->m_addr;
65825 +               (bp2+1)->m_size = bp2->m_size;
65826 +           }
65827 +
65828 +           mp->m_free--;
65829 +           
65830 +           (bp+1)->m_addr = addr + size;
65831 +           (bp+1)->m_size = bp->m_addr + bp->m_size - (addr + size);
65832 +           bp->m_size = addr - bp->m_addr;
65833 +       }
65834 +    }
65835 +
65836 +    spin_unlock_irqrestore (&mp->m_lock, flags);
65837 +    return (addr);
65838 +}
65839 +
65840 +void
65841 +ep_rmfree (EP_RMAP *mp, size_t size, u_long addr)
65842 +{
65843 +    EP_RMAP_ENTRY *bp;
65844 +    unsigned long t;
65845 +    unsigned long flags;
65846 +
65847 +    spin_lock_irqsave (&mp->m_lock, flags);
65848 +
65849 +    ASSERT (addr != 0 && size > 0);
65850 +       
65851 +again:
65852 +    /* find the piece of the map which starts after the returned space
65853 +     * or the end of the map */
65854 +    for (bp = &mp->m_map[0]; bp->m_addr <= addr && bp->m_size != 0; bp++)
65855 +       ;
65856 +
65857 +    /* bp points to the piece to the right of where we want to go */
65858 +    
65859 +    if (bp > &mp->m_map[0] && (bp-1)->m_addr + (bp-1)->m_size >= addr) 
65860 +    {
65861 +       /* merge with piece on the left */
65862 +       
65863 +       ASSERT ((bp-1)->m_addr + (bp-1)->m_size <= addr);
65864 +       
65865 +       (bp-1)->m_size += size;
65866 +       
65867 +       ASSERT (bp->m_size == 0 || addr+size <= bp->m_addr);
65868 +       
65869 +       if (bp->m_size && (addr + size) == bp->m_addr)
65870 +       {
65871 +           /* merge witht he piece on the right by 
65872 +            * growing the piece on the left and shifting
65873 +            * the map down */
65874 +           
65875 +           ASSERT ((addr + size) <= bp->m_addr);
65876 +           
65877 +           (bp-1)->m_size += bp->m_size;
65878 +           while (bp->m_size) 
65879 +           {
65880 +               bp++;
65881 +               (bp-1)->m_addr = bp->m_addr;
65882 +               (bp-1)->m_size = bp->m_size;
65883 +           }
65884 +           
65885 +           mp->m_free++;
65886 +       }
65887 +    }
65888 +    else if (addr + size >= bp->m_addr && bp->m_size)
65889 +    {
65890 +       /* merge with piece to the right */
65891 +       
65892 +       ASSERT ((addr + size) <= bp->m_addr);
65893 +       
65894 +       bp->m_addr -= size;
65895 +       bp->m_size += size;
65896 +    }
65897 +    else
65898 +    {
65899 +       /* doesn't join with left or right - check for map
65900 +          overflow and discard the smallest of the last or
65901 +          next to last entries */
65902 +
65903 +       if (mp->m_free == 0)
65904 +       {
65905 +           EP_RMAP_ENTRY *lbp;
65906 +           
65907 +           /* find last map entry */
65908 +           for (lbp = bp; lbp->m_size != 0; lbp++)
65909 +               ;
65910 +           lbp--;
65911 +           
65912 +           if (lbp->m_size > (lbp-1)->m_size)
65913 +               lbp--;
65914 +           
65915 +           printk ("%s: lost resource map entry [%lx, %lx]\n", 
65916 +                   mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size);
65917 +           
65918 +           *lbp = *(lbp+1);
65919 +           (lbp+1)->m_size = 0;
65920 +
65921 +           mp->m_free++;
65922 +           goto again;
65923 +       }
65924 +
65925 +       /* make a new entry and push the remaining ones up */
65926 +       do {
65927 +           t = bp->m_addr;
65928 +           bp->m_addr = addr;
65929 +           addr = t;
65930 +           t = bp->m_size;
65931 +           bp->m_size = size;
65932 +           bp++;
65933 +       } while ((size = t) != 0);
65934 +
65935 +       mp->m_free--;
65936 +    }
65937 +    
65938 +    /* if anyone blocked on rmalloc failure, wake 'em up */
65939 +    if (mp->m_want)
65940 +    {
65941 +       mp->m_want = 0;
65942 +       kcondvar_wakeupall (&mp->m_wait, &mp->m_lock);
65943 +    }
65944 +
65945 +    spin_unlock_irqrestore (&mp->m_lock, flags);
65946 +}
65947 +
65948 +/*
65949 + * Local variables:
65950 + * c-file-style: "stroustrup"
65951 + * End:
65952 + */
65953 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/spinlock_elan3_thread.c
65954 ===================================================================
65955 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/spinlock_elan3_thread.c 2004-02-23 16:02:56.000000000 -0500
65956 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/spinlock_elan3_thread.c      2005-07-28 14:52:52.903670376 -0400
65957 @@ -0,0 +1,44 @@
65958 +/*
65959 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65960 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65961 + *
65962 + *    For licensing information please see the supplied COPYING file
65963 + *
65964 + */
65965 +
65966 +#ident "@(#)$Id: spinlock_elan3_thread.c,v 1.9 2003/10/07 13:22:38 david Exp $"
65967 +/*      $Source: /cvs/master/quadrics/epmod/spinlock_elan3_thread.c,v $ */
65968 +
65969 +#include <qsnet/types.h>
65970 +
65971 +#include <elan3/e3types.h>
65972 +#include <elan3/events.h>
65973 +#include <elan3/elanregs.h>
65974 +#include <elan3/intrinsics.h>
65975 +
65976 +#include <elan/nmh.h>
65977 +#include <elan/kcomm.h>
65978 +#include <elan/epcomms.h>
65979 +
65980 +#include "kcomm_elan3.h"
65981 +#include "epcomms_elan3.h"
65982 +
65983 +void
65984 +ep3_spinblock (EP3_SPINLOCK_ELAN *sle, EP3_SPINLOCK_MAIN *sl)
65985 +{
65986 +    do {
65987 +       sl->sl_seq = sle->sl_seq;                       /* Release my lock */
65988 +       
65989 +       while (sle->sl_lock)                            /* Wait until the main */
65990 +           c_break();                                  /* releases the lock */
65991 +       
65992 +       sle->sl_seq++;                                  /* and try and relock */
65993 +    } while (sle->sl_lock);
65994 +}
65995 +
65996 +
65997 +/*
65998 + * Local variables:
65999 + * c-file-style: "stroustrup"
66000 + * End:
66001 + */
66002 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/statemap.c
66003 ===================================================================
66004 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/statemap.c      2004-02-23 16:02:56.000000000 -0500
66005 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/statemap.c   2005-07-28 14:52:52.903670376 -0400
66006 @@ -0,0 +1,385 @@
66007 +/*
66008 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66009 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66010 + *
66011 + *    For licensing information please see the supplied COPYING file
66012 + *
66013 + */
66014 +
66015 +#ident "@(#)$Id: statemap.c,v 1.11.8.1 2004/11/18 12:05:00 david Exp $"
66016 +/*      $Source: /cvs/master/quadrics/epmod/statemap.c,v $ */
66017 +
66018 +#include <qsnet/kernel.h>
66019 +#include <elan/statemap.h>
66020 +
66021 +/******************************** global state bitmap stuff **********************************/
66022 +static int
66023 +statemap_setmapbit (bitmap_t *map, int offset, int bit)
66024 +{
66025 +   bitmap_t *e    = &map[offset >> BT_ULSHIFT];
66026 +   bitmap_t  mask = ((bitmap_t)1) << (offset & BT_ULMASK);
66027 +   int       rc = ((*e) & mask) != 0;
66028 +   
66029 +   if (bit)
66030 +   {
66031 +      *e |= mask;
66032 +      return (!rc);
66033 +   }
66034 +
66035 +   *e &= ~mask;
66036 +   return (rc);
66037 +}
66038 +
66039 +static int
66040 +statemap_firstsegbit (bitmap_t seg)
66041 +{
66042 +   int            bit = 0;
66043 +   
66044 +   if (seg == 0)
66045 +      return (-1);
66046 +
66047 +#if (BT_ULSHIFT == 6)
66048 +   if ((seg & 0xffffffffL) == 0)
66049 +   {
66050 +      seg >>= 32;
66051 +      bit += 32;
66052 +   }
66053 +#elif (BT_ULSHIFT != 5)
66054 +# error "Unexpected value of BT_ULSHIFT"
66055 +#endif
66056 +
66057 +   if ((seg & 0xffff) == 0)
66058 +   {
66059 +      seg >>= 16;
66060 +      bit += 16;
66061 +   }
66062 +      
66063 +   if ((seg & 0xff) == 0)
66064 +   {
66065 +      seg >>= 8;
66066 +      bit += 8;
66067 +   }
66068 +      
66069 +   if ((seg & 0xf) == 0)
66070 +   {
66071 +      seg >>= 4;
66072 +      bit += 4;
66073 +   }
66074 +      
66075 +   if ((seg & 0x3) == 0)
66076 +   {
66077 +      seg >>= 2;
66078 +      bit += 2;
66079 +   }
66080 +
66081 +   return (((seg & 0x1) == 0) ? bit + 1 : bit);
66082 +}
66083 +
66084 +bitmap_t
66085 +statemap_getseg (statemap_t *map, unsigned int offset)
66086 +{
66087 +   ASSERT (offset < map->size);
66088 +   ASSERT ((offset & BT_ULMASK) == 0);
66089 +
66090 +   return (map->bitmap[offset >> BT_ULSHIFT]);
66091 +}
66092 +
66093 +void
66094 +statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg)
66095 +{
66096 +   ASSERT (offset < map->size);
66097 +   ASSERT ((offset & BT_ULMASK) == 0);
66098 +
66099 +   offset >>= BT_ULSHIFT;
66100 +   if (map->bitmap[offset] == seg)
66101 +      return;
66102 +
66103 +   map->bitmap[offset] = seg;
66104 +
66105 +   if (statemap_setmapbit (map->changemap2, offset,       1) &&
66106 +       statemap_setmapbit (map->changemap1, offset >>= BT_ULSHIFT, 1))
66107 +      statemap_setmapbit (map->changemap0, offset >>= BT_ULSHIFT, 1);
66108 +}
66109 +
66110 +bitmap_t
66111 +statemap_getbits (statemap_t *map, unsigned int offset, int nbits)
66112 +{
66113 +   int      index = offset >> BT_ULSHIFT;
66114 +   bitmap_t mask  = (nbits == BT_NBIPUL) ? (bitmap_t) -1 : (((bitmap_t)1) << nbits) - 1;
66115 +   
66116 +   ASSERT (nbits <= BT_NBIPUL);
66117 +   ASSERT (offset + nbits <= map->size);
66118 +
66119 +   offset &= BT_ULMASK;
66120 +   if (offset + nbits <= BT_NBIPUL)
66121 +      return ((map->bitmap[index] >> offset) & mask);
66122 +   
66123 +   return (((map->bitmap[index] >> offset) |
66124 +           (map->bitmap[index + 1] << (BT_NBIPUL - offset))) & mask);
66125 +}
66126 +
66127 +void
66128 +statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits)
66129 +{
66130 +   int      index = offset >> BT_ULSHIFT;
66131 +   bitmap_t mask;
66132 +   bitmap_t seg;
66133 +   bitmap_t newseg;
66134 +
66135 +   ASSERT (nbits <= BT_NBIPUL);
66136 +   ASSERT (offset + nbits <= map->size);
66137 +
66138 +   offset &= BT_ULMASK;
66139 +   if (offset + nbits <= BT_NBIPUL)
66140 +   {
66141 +      mask = ((nbits == BT_NBIPUL) ? -1 : ((((bitmap_t)1) << nbits) - 1)) << offset;
66142 +      seg = map->bitmap[index];
66143 +      newseg = ((bits << offset) & mask) | (seg & ~mask);
66144 +      
66145 +      if (seg == newseg)
66146 +        return;
66147 +   
66148 +      map->bitmap[index] = newseg;
66149 +      
66150 +      if (statemap_setmapbit (map->changemap2, index,       1) &&
66151 +         statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1))
66152 +        statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1);
66153 +      return;
66154 +   }
66155 +   
66156 +   mask = ((bitmap_t)-1) << offset;
66157 +   seg = map->bitmap[index];
66158 +   newseg = ((bits << offset) & mask) | (seg & ~mask);
66159 +
66160 +   if (seg != newseg)
66161 +   {
66162 +      map->bitmap[index] = newseg;
66163 +      
66164 +      if (statemap_setmapbit (map->changemap2, index,       1) &&
66165 +         statemap_setmapbit (map->changemap1, index >> BT_ULSHIFT, 1))
66166 +        statemap_setmapbit (map->changemap0, index >> (2 * BT_ULSHIFT), 1);
66167 +   }
66168 +   
66169 +   index++;
66170 +   offset = BT_NBIPUL - offset;
66171 +   mask = (((bitmap_t)1) << (nbits - offset)) - 1;
66172 +   seg = map->bitmap[index];
66173 +   newseg = ((bits >> offset) & mask) | (seg & ~mask);
66174 +   
66175 +   if (seg == newseg)
66176 +      return;
66177 +   
66178 +   map->bitmap[index] = newseg;
66179 +   
66180 +   if (statemap_setmapbit (map->changemap2, index,       1) &&
66181 +       statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1))
66182 +      statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1);
66183 +}
66184 +
66185 +void
66186 +statemap_zero (statemap_t *dst)
66187 +{
66188 +   int       size       = dst->size;
66189 +   int       offset     = 0;
66190 +   bitmap_t *changemap0 = dst->changemap0;
66191 +   bitmap_t *changemap1 = dst->changemap1;
66192 +   bitmap_t *changemap2 = dst->changemap2;
66193 +   bitmap_t *dstmap     = dst->bitmap;
66194 +   bitmap_t  bit0;
66195 +   bitmap_t  bit1;
66196 +   bitmap_t  bit2;
66197 +
66198 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
66199 +   {
66200 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
66201 +      {
66202 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, offset += BT_NBIPUL)
66203 +        {
66204 +            *dstmap = 0;
66205 +            *changemap2 |= bit2;
66206 +        }
66207 +        *changemap1 |= bit1;
66208 +      }
66209 +      *changemap0 |= bit0;
66210 +   }
66211 +}
66212 +   
66213 +void
66214 +statemap_setmap (statemap_t *dst, statemap_t *src)
66215 +{
66216 +   int       size       = dst->size;
66217 +   int       offset     = 0;
66218 +   bitmap_t *changemap0 = dst->changemap0;
66219 +   bitmap_t *changemap1 = dst->changemap1;
66220 +   bitmap_t *changemap2 = dst->changemap2;
66221 +   bitmap_t *dstmap     = dst->bitmap;
66222 +   bitmap_t *srcmap     = src->bitmap;
66223 +   bitmap_t  bit0;
66224 +   bitmap_t  bit1;
66225 +   bitmap_t  bit2;
66226 +
66227 +   ASSERT (src->size == size);
66228 +   
66229 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
66230 +   {
66231 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
66232 +      {
66233 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL)
66234 +           if (*dstmap != *srcmap)
66235 +           {
66236 +              *dstmap = *srcmap;
66237 +              *changemap2 |= bit2;
66238 +           }
66239 +        if (*changemap2 != 0)
66240 +           *changemap1 |= bit1;
66241 +      }
66242 +      if (*changemap1 != 0)
66243 +        *changemap0 |= bit0;
66244 +   }
66245 +}
66246 +
66247 +void
66248 +statemap_ormap (statemap_t *dst, statemap_t *src)
66249 +{
66250 +   int       size       = dst->size;
66251 +   int       offset     = 0;
66252 +   bitmap_t *changemap0 = dst->changemap0;
66253 +   bitmap_t *changemap1 = dst->changemap1;
66254 +   bitmap_t *changemap2 = dst->changemap2;
66255 +   bitmap_t *dstmap     = dst->bitmap;
66256 +   bitmap_t *srcmap     = src->bitmap;
66257 +   bitmap_t  bit0;
66258 +   bitmap_t  bit1;
66259 +   bitmap_t  bit2;
66260 +   bitmap_t  seg;
66261 +
66262 +   ASSERT (src->size == size);
66263 +   
66264 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
66265 +   {
66266 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
66267 +      {
66268 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL)
66269 +        {
66270 +           seg = *dstmap | *srcmap;
66271 +           if (*dstmap != seg)
66272 +           {
66273 +              *dstmap = seg;
66274 +              *changemap2 |= bit2;
66275 +           }
66276 +        }
66277 +        if (*changemap2 != 0)
66278 +           *changemap1 |= bit1;
66279 +      }
66280 +      if (*changemap1 != 0)
66281 +        *changemap0 |= bit0;
66282 +   }
66283 +}
66284 +
66285 +int
66286 +statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange)
66287 +{
66288 +   int          bit0;
66289 +   bitmap_t    *cm1;
66290 +   int          bit1;
66291 +   bitmap_t    *cm2;
66292 +   int          bit2;
66293 +   unsigned int offset;
66294 +
66295 +   bit0 = statemap_firstsegbit (*(map->changemap0));
66296 +   if (bit0 < 0)
66297 +      return (-1);
66298 +
66299 +   offset = bit0;
66300 +   cm1 = map->changemap1 + offset;
66301 +   bit1 = statemap_firstsegbit (*cm1);
66302 +   ASSERT (bit1 >= 0);
66303 +
66304 +   offset = (offset << BT_ULSHIFT) + bit1;
66305 +   cm2 = map->changemap2 + offset;
66306 +   bit2 = statemap_firstsegbit (*cm2);
66307 +   ASSERT (bit2 >= 0);
66308 +   
66309 +   offset = (offset << BT_ULSHIFT) + bit2;
66310 +   *newseg = map->bitmap[offset];
66311 +
66312 +   if (clearchange &&
66313 +       (*cm2 &= ~(((bitmap_t)1) << bit2)) == 0 &&
66314 +       (*cm1 &= ~(((bitmap_t)1) << bit1)) == 0)
66315 +      map->changemap0[0] &= ~(((bitmap_t)1) << bit0);
66316 +
66317 +   return (offset << BT_ULSHIFT);
66318 +}
66319 +
66320 +int
66321 +statemap_changed (statemap_t *map)
66322 +{
66323 +   return ((*(map->changemap0) != 0));
66324 +}
66325 +
66326 +void
66327 +statemap_reset (statemap_t *map)
66328 +{
66329 +   bzero (map->changemap0, map->changemap_nob + map->bitmap_nob);
66330 +}
66331 +
66332 +void
66333 +statemap_copy (statemap_t *dst, statemap_t *src)
66334 +{
66335 +   ASSERT (dst->size == src->size);
66336 +   bcopy (src->changemap0, dst->changemap0, src->changemap_nob + src->bitmap_nob);
66337 +}
66338 +
66339 +void
66340 +statemap_clearchanges (statemap_t *map)
66341 +{
66342 +   if (statemap_changed (map))
66343 +      bzero (map->changemap0, map->changemap_nob);
66344 +}
66345 +
66346 +bitmap_t *
66347 +statemap_tobitmap (statemap_t *map)
66348 +{
66349 +    return (map->bitmap);
66350 +}
66351 +
66352 +statemap_t *
66353 +statemap_create (int size)
66354 +{
66355 +   int   struct_entries     = (sizeof (statemap_t) * 8 + (BT_NBIPUL-1)) >> BT_ULSHIFT;
66356 +   int   bitmap_entries     = (size + (BT_NBIPUL-1)) >> BT_ULSHIFT;
66357 +   int   changemap2_entries = (bitmap_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
66358 +   int   changemap1_entries = (changemap2_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
66359 +   int   changemap0_entries = (changemap1_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
66360 +   int   changemap_entries  = changemap0_entries + changemap1_entries + changemap2_entries;
66361 +   int   nob                = (struct_entries + bitmap_entries + changemap_entries) * sizeof (bitmap_t);
66362 +   statemap_t *map;
66363 +
66364 +   ASSERT ((1 << BT_ULSHIFT) == BT_NBIPUL);
66365 +   ASSERT (changemap0_entries == 1);
66366 +
66367 +   KMEM_ZALLOC (map, statemap_t *, nob, 1);
66368 +
66369 +   map->size = size;
66370 +   map->nob  = nob;
66371 +   map->changemap_nob = changemap_entries * sizeof (bitmap_t);
66372 +   map->bitmap_nob = bitmap_entries * sizeof (bitmap_t);
66373 +   map->changemap0 = ((bitmap_t *)map) + struct_entries;
66374 +   map->changemap1 = map->changemap0 + changemap0_entries;
66375 +   map->changemap2 = map->changemap1 + changemap1_entries;
66376 +   map->bitmap     = map->changemap2 + changemap2_entries;
66377 +
66378 +   return (map);
66379 +}
66380 +
66381 +void
66382 +statemap_destroy (statemap_t *map)
66383 +{
66384 +   KMEM_FREE (map, map->nob);
66385 +}
66386 +
66387 +/*
66388 + * Local variables:
66389 + * c-file-style: "stroustrup"
66390 + * End:
66391 + */
66392 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/statusmon.h
66393 ===================================================================
66394 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/statusmon.h     2004-02-23 16:02:56.000000000 -0500
66395 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/statusmon.h  2005-07-28 14:52:52.904670224 -0400
66396 @@ -0,0 +1,44 @@
66397 +/*
66398 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66399 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66400 + *
66401 + *    For licensing information please see the supplied COPYING file
66402 + *
66403 + */
66404 +
66405 +#ident "@(#)$Id: statusmon.h,v 1.6 2003/10/07 13:22:38 david Exp $"
66406 +/*      $Source: /cvs/master/quadrics/epmod/statusmon.h,v $*/
66407 +
66408 +#ifndef __ELAN3_STATUSMON_H
66409 +#define __ELAN3_STATUSMON_H
66410 +
66411 +typedef struct statusmon_node
66412 +{
66413 +    u_int      NodeId;
66414 +    u_int      State;
66415 +} STATUSMON_SGMT;
66416 +
66417 +typedef struct statusmon_level
66418 +{
66419 +    unsigned      Width;
66420 +    STATUSMON_SGMT Nodes[CM_SGMTS_PER_LEVEL];
66421 +} STATUSMON_LEVEL;
66422 +
66423 +typedef struct statusmon_msg
66424 +{
66425 +    unsigned       Type;
66426 +    unsigned       NodeId;
66427 +    unsigned       NumLevels;
66428 +    unsigned       TopLevel;
66429 +    unsigned        Role;
66430 +    STATUSMON_LEVEL Levels[CM_MAX_LEVELS];
66431 +} STATUSMON_MSG;
66432 +
66433 +
66434 +#endif /* __ELAN3_STATUSMON_H */
66435 +
66436 +/*
66437 + * Local variables:
66438 + * c-file-style: "stroustrup"
66439 + * End:
66440 + */
66441 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/support.c
66442 ===================================================================
66443 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/support.c       2004-02-23 16:02:56.000000000 -0500
66444 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/support.c    2005-07-28 14:52:52.904670224 -0400
66445 @@ -0,0 +1,109 @@
66446 +/*
66447 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66448 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66449 + *
66450 + *    For licensing information please see the supplied COPYING file
66451 + *
66452 + */
66453 +
66454 +#ident "@(#)$Id: support.c,v 1.37.8.1 2004/09/30 15:01:53 david Exp $"
66455 +/*      $Source: /cvs/master/quadrics/epmod/support.c,v $ */
66456 +
66457 +#include <qsnet/kernel.h>
66458 +#include <elan/kcomm.h>
66459 +
66460 +/****************************************************************************************/
66461 +/*
66462 + * Nodeset/flush callbacks.
66463 + */
66464 +int
66465 +ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg)
66466 +{
66467 +    EP_CALLBACK *cb;
66468 +    
66469 +    KMEM_ALLOC (cb, EP_CALLBACK *, sizeof (EP_CALLBACK), 1);
66470 +    
66471 +    cb->Routine = routine;
66472 +    cb->Arg     = arg;
66473 +
66474 +    kmutex_lock (&rail->CallbackLock);
66475 +    cb->Next = rail->CallbackList[idx];
66476 +    rail->CallbackList[idx] = cb;
66477 +    kmutex_unlock (&rail->CallbackLock);
66478 +    
66479 +    return (ESUCCESS);
66480 +}
66481 +
66482 +void
66483 +ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg)
66484 +{
66485 +    EP_CALLBACK  *cb;
66486 +    EP_CALLBACK **predp;
66487 +
66488 +    kmutex_lock (&rail->CallbackLock);
66489 +    for (predp = &rail->CallbackList[idx]; (cb = *predp); predp = &cb->Next)
66490 +       if (cb->Routine == routine && cb->Arg == arg)
66491 +           break;
66492 +
66493 +    if (cb == NULL)
66494 +       panic ("ep_remove_member_callback");
66495 +    
66496 +    *predp = cb->Next;
66497 +    kmutex_unlock (&rail->CallbackLock);
66498 +    
66499 +    KMEM_FREE (cb, sizeof (EP_CALLBACK));
66500 +}
66501 +
66502 +void
66503 +ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *map)
66504 +{
66505 +    EP_CALLBACK *cb;
66506 +
66507 +    kmutex_lock (&rail->CallbackLock);
66508 +
66509 +    rail->CallbackStep = idx;
66510 +
66511 +    for (cb = rail->CallbackList[idx]; cb; cb = cb->Next) {
66512 +       (cb->Routine) (cb->Arg, map);
66513 +    }
66514 +    kmutex_unlock (&rail->CallbackLock);
66515 +}
66516 +
66517 +unsigned int
66518 +ep_backoff (EP_BACKOFF *backoff, int type)
66519 +{
66520 +    static int bcount[EP_NUM_BACKOFF] = {1, 16, 32, 64, 128, 256, 512, 1024};
66521 +    
66522 +    if (backoff->type != type)
66523 +    {
66524 +       backoff->type  = type;
66525 +       backoff->indx  = 0;
66526 +       backoff->count = 0;
66527 +    }
66528 +
66529 +    if (++backoff->count > bcount[backoff->indx] && backoff->indx < (EP_NUM_BACKOFF-1))
66530 +    {
66531 +       backoff->indx++;
66532 +       backoff->count = 0;
66533 +    }
66534 +
66535 +    return (backoff->indx);
66536 +}
66537 +
66538 +/* Generic checksum algorithm */
66539 +uint16_t
66540 +CheckSum (char *msg, int nob)
66541 +{
66542 +    uint16_t sum = 0;
66543 +   
66544 +    while (nob-- > 0)
66545 +       sum = sum * 13 + *msg++;
66546 +
66547 +    return (sum);
66548 +}
66549 +
66550 +/*
66551 + * Local variables:
66552 + * c-file-style: "stroustrup"
66553 + * End:
66554 + */
66555 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/support_elan3.c
66556 ===================================================================
66557 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/support_elan3.c 2004-02-23 16:02:56.000000000 -0500
66558 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/support_elan3.c      2005-07-28 14:52:52.908669616 -0400
66559 @@ -0,0 +1,2111 @@
66560 +/*
66561 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66562 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66563 + *
66564 + *    For licensing information please see the supplied COPYING file
66565 + *
66566 + */
66567 +
66568 +#ident "@(#)$Id: support_elan3.c,v 1.42.8.3 2004/11/12 10:54:51 mike Exp $"
66569 +/*      $Source: /cvs/master/quadrics/epmod/support_elan3.c,v $ */
66570 +
66571 +#include <qsnet/kernel.h>
66572 +#include <qsnet/kthread.h>
66573 +
66574 +#include <elan/kcomm.h>
66575 +#include <elan/epsvc.h>
66576 +#include <elan/epcomms.h>
66577 +
66578 +#include "kcomm_vp.h"
66579 +#include "kcomm_elan3.h"
66580 +#include "epcomms_elan3.h"
66581 +#include "debug.h"
66582 +
66583 +#include <elan3/thread.h>
66584 +#include <elan3/urom_addrs.h>
66585 +
66586 +/****************************************************************************************/
66587 +#define DMA_RING_NEXT_POS(ring)      ((ring)->Position+1 == ring->Entries ? 0 : ((ring)->Position+1))
66588 +#define DMA_RING_PREV_POS(ring,pos)  ((pos) == 0 ? (ring)->Entries-1 : (pos) - 1)
66589 +
66590 +static int 
66591 +DmaRingCreate (EP3_RAIL *rail, EP3_DMA_RING *ring, int ctxnum, int entries)
66592 +{
66593 +    unsigned long pgnum = (ctxnum * sizeof (E3_CommandPort)) / PAGE_SIZE;
66594 +    unsigned long pgoff = (ctxnum * sizeof (E3_CommandPort)) & (PAGE_SIZE-1);
66595 +    int           s;    
66596 +        
66597 +    /* set up the initial position */
66598 +    ring->Entries  = entries;
66599 +    ring->Position = 0;
66600 +    
66601 +    if (! (ring->pEvent = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_BlockCopyEvent), 0, &ring->epEvent)))
66602 +    {
66603 +       ring->CommandPort = (ioaddr_t) NULL;
66604 +       return (ENOMEM);
66605 +    }
66606 +    
66607 +    if (! (ring->pDma = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_DMA), 0, &ring->epDma)))
66608 +    {
66609 +       ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent));
66610 +
66611 +       ring->CommandPort = (ioaddr_t) NULL;
66612 +       return (ENOMEM);
66613 +    }
66614 +    
66615 +    if (! (ring->pDoneBlk = ep_alloc_main (&rail->Generic, entries * sizeof (E3_uint32), 0, &ring->epDoneBlk)))
66616 +    {
66617 +       ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent));
66618 +       ep_free_elan (&rail->Generic, ring->epDma,   entries * sizeof (E3_DMA));
66619 +
66620 +       ring->CommandPort = (ioaddr_t) NULL;
66621 +       return (ENOMEM);
66622 +    }
66623 +    
66624 +    if (MapDeviceRegister (rail->Device, ELAN3_BAR_COMMAND_PORT, &ring->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ring->CommandPageHandle) != ESUCCESS)
66625 +    {
66626 +       ep_free_elan (&rail->Generic, ring->epEvent,   entries * sizeof (E3_BlockCopyEvent));
66627 +       ep_free_elan (&rail->Generic, ring->epDma,     entries * sizeof (E3_DMA));
66628 +       ep_free_main (&rail->Generic, ring->epDoneBlk, entries * sizeof (E3_uint32));
66629 +
66630 +       ring->CommandPort = (ioaddr_t) NULL;
66631 +       return (ENOMEM);
66632 +    }
66633 +    ring->CommandPort = ring->CommandPage + pgoff;
66634 +       
66635 +    for (s = 0; s < entries; s++)
66636 +    {
66637 +       /* setup the event */
66638 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Type),   
66639 +                          EV_TYPE_BCOPY | EV_TYPE_DMA | DMA_RING_DMA_ELAN(ring, s));
66640 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Source), DMA_RING_DMA_ELAN(ring,s)  | EV_WCOPY);
66641 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Dest),   DMA_RING_DONE_ELAN(ring,s) | EV_TYPE_BCOPY_WORD );         
66642 +
66643 +       /* need to set all the doneBlks to appear that they have completed */
66644 +       ring->pDoneBlk[s] = DMA_RING_DMA_ELAN(ring,s)  | EV_WCOPY;
66645 +    }
66646 +
66647 +    return 0; /* success */
66648 +}
66649 +
66650 +static void
66651 +DmaRingRelease(EP3_RAIL *rail, EP3_DMA_RING *ring)
66652 +{
66653 +    if (ring->CommandPage != (ioaddr_t) 0)
66654 +    {
66655 +       UnmapDeviceRegister(rail->Device, &ring->CommandPageHandle);
66656 +
66657 +       ep_free_elan (&rail->Generic, ring->epEvent,   ring->Entries * sizeof (E3_BlockCopyEvent));
66658 +       ep_free_elan (&rail->Generic, ring->epDma,     ring->Entries * sizeof (E3_DMA));
66659 +       ep_free_main (&rail->Generic, ring->epDoneBlk, ring->Entries * sizeof (E3_uint32));
66660 +    }
66661 +    ring->CommandPage = (ioaddr_t) 0;
66662 +}
66663 +
66664 +void 
66665 +DmaRingsRelease (EP3_RAIL *rail)
66666 +{
66667 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_CRITICAL]);
66668 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_HIGH_PRI]);
66669 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_LOW_PRI]);
66670 +}
66671 +
66672 +int 
66673 +DmaRingsCreate (EP3_RAIL *rail)
66674 +{
66675 +    if (DmaRingCreate (rail, &rail->DmaRings[EP3_RING_CRITICAL], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_CRITICAL, EP3_RING_CRITICAL_LEN) ||
66676 +       DmaRingCreate (rail, &rail->DmaRings[EP3_RING_HIGH_PRI], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_HIGH_PRI, EP3_RING_HIGH_PRI_LEN) ||
66677 +       DmaRingCreate (rail, &rail->DmaRings[EP3_RING_LOW_PRI],  ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_LOW_PRI,  EP3_RING_LOW_PRI_LEN))
66678 +    {
66679 +       DmaRingsRelease (rail);
66680 +       return (ENOMEM);
66681 +    }
66682 +  
66683 +    return 0;
66684 +}
66685 +
66686 +static int 
66687 +DmaRingNextSlot (EP3_DMA_RING *ring)
66688 +{
66689 +    int pos  = ring->Position;
66690 +    int npos = DMA_RING_NEXT_POS(ring);
66691 +
66692 +    if (ring->pDoneBlk[npos] == EP3_EVENT_ACTIVE)
66693 +       return (-1);
66694 +    
66695 +    ring->pDoneBlk[pos] = EP3_EVENT_ACTIVE;
66696 +
66697 +    ring->Position = npos; /* move on one */
66698 +
66699 +    return (pos);
66700 +}
66701 +
66702 +
66703 +/****************************************************************************************/
66704 +/*
66705 + * Dma/event command issueing - these handle cproc queue overflow traps.
66706 + */
66707 +static int
66708 +DmaRunQueueSizeCheck (EP3_RAIL *rail, E3_uint32 len)
66709 +{
66710 +    E3_uint64  FandBPtr = read_reg64 (rail->Device, DProc_SysCntx_FPtr);
66711 +    E3_uint32  FPtr, BPtr;
66712 +    E3_uint32  qlen;
66713 +
66714 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
66715 +    FPtr = (FandBPtr & 0xFFFFFFFFull);
66716 +    BPtr = (FandBPtr >> 32);
66717 +#else
66718 +    FPtr = (FandBPtr >> 32);
66719 +    BPtr = (FandBPtr & 0xFFFFFFFFull);
66720 +#endif
66721 +    
66722 +    qlen = (((BPtr - FPtr)/sizeof (E3_DMA)) & (E3_SysCntxQueueSize-1));
66723 +    
66724 +    if      (qlen < 4)   IncrStat (rail, DmaQueueLength[0]);
66725 +    else if (qlen < 8)   IncrStat (rail, DmaQueueLength[1]);
66726 +    else if (qlen < 16)  IncrStat (rail, DmaQueueLength[2]);
66727 +    else if (qlen < 32)  IncrStat (rail, DmaQueueLength[3]);
66728 +    else if (qlen < 64)  IncrStat (rail, DmaQueueLength[4]);
66729 +    else if (qlen < 128) IncrStat (rail, DmaQueueLength[5]);
66730 +    else if (qlen < 240) IncrStat (rail, DmaQueueLength[6]);
66731 +    else                 IncrStat (rail, DmaQueueLength[7]);
66732 +       
66733 +    return (qlen < len);
66734 +}
66735 +
66736 +int
66737 +IssueDma (EP3_RAIL *rail, E3_DMA_BE * dmabe, int type, int retryThread)
66738 +{
66739 +    ELAN3_DEV     *dev = rail->Device;
66740 +    EP3_RETRY_DMA *retry;
66741 +    EP3_DMA_RING  *ring;
66742 +    int           slot;
66743 +    int           i, res;
66744 +    unsigned long flags;
66745 +
66746 +    ASSERT (dmabe->s.dma_direction == DMA_WRITE || dmabe->s.dma_direction == DMA_READ_REQUEUE);
66747 +
66748 +    ASSERT (! EP_VP_ISDATA(dmabe->s.dma_destVProc) ||
66749 +           (dmabe->s.dma_direction == DMA_WRITE ? 
66750 +            EP_VP_TO_NODE(dmabe->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
66751 +            EP_VP_TO_NODE(dmabe->s.dma_destVProc) == rail->Generic.Position.pos_nodeid));
66752 +    
66753 +    /*
66754 +     * If we're not the retry thread - then don't issue this DMA
66755 +     * if there are any already queued on the retry lists with
66756 +     * higher or equal priority than this one that are ready to
66757 +     * retry.
66758 +     */
66759 +    if (! retryThread)
66760 +    {
66761 +       for (i = EP_RETRY_BASE; i < type; i++)
66762 +       {
66763 +           if (list_empty (&rail->DmaRetries[i]))
66764 +               continue;
66765 +
66766 +           retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
66767 +               
66768 +           if (AFTER (lbolt, retry->RetryTime))
66769 +           {
66770 +               IncrStat (rail, IssueDmaFail[type]);
66771 +               return (ISSUE_COMMAND_RETRY);
66772 +           }
66773 +       }
66774 +    }
66775 +
66776 +    /*
66777 +     * Depending on the type of DMA we're issuing - throttle back
66778 +     * issueing of it if the DMA run queue is too full.  This then
66779 +     * prioritises the "special" messages and completing data 
66780 +     * transfers which have matched a receive buffer.
66781 +     */
66782 +
66783 +    if (type >= EP_RETRY_LOW_PRI_RETRY)
66784 +    {
66785 +       if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 2))
66786 +       {
66787 +           IncrStat (rail, IssueDmaFail[type]);
66788 +           return (ISSUE_COMMAND_RETRY);
66789 +       }
66790 +       ring = &rail->DmaRings[EP3_RING_LOW_PRI];
66791 +    } 
66792 +    else if (type == EP_RETRY_LOW_PRI)
66793 +    {
66794 +       if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 3))
66795 +       {
66796 +           IncrStat (rail, IssueDmaFail[type]);
66797 +           return (ISSUE_COMMAND_RETRY);
66798 +       }
66799 +       ring = &rail->DmaRings[EP3_RING_LOW_PRI];
66800 +    }
66801 +    else if (type >= EP_RETRY_HIGH_PRI)
66802 +       ring = &rail->DmaRings[EP3_RING_HIGH_PRI];
66803 +    else
66804 +       ring = &rail->DmaRings[EP3_RING_CRITICAL];
66805 +
66806 +    local_irq_save (flags);
66807 +    if (! spin_trylock (&dev->CProcLock))
66808 +    {
66809 +       IncrStat (rail, IssueDmaFail[type]);
66810 +
66811 +       res = ISSUE_COMMAND_RETRY;
66812 +    }
66813 +    else
66814 +    {
66815 +       if ((slot = DmaRingNextSlot (ring)) == -1)
66816 +       {
66817 +           IncrStat (rail, IssueDmaFail[type]);
66818 +           
66819 +           res = ISSUE_COMMAND_RETRY;
66820 +       }
66821 +       else
66822 +       {
66823 +           EPRINTF4 (DBG_COMMAND, "IssueDma: type %08x size %08x Elan source %08x Elan dest %08x\n",
66824 +                     dmabe->s.dma_type, dmabe->s.dma_size, dmabe->s.dma_source, dmabe->s.dma_dest);
66825 +           EPRINTF2 (DBG_COMMAND, "          dst event %08x cookie/proc %08x\n",
66826 +                     dmabe->s.dma_destEvent, dmabe->s.dma_destCookieVProc);
66827 +           EPRINTF2 (DBG_COMMAND, "          src event %08x cookie/proc %08x\n",
66828 +                     dmabe->s.dma_srcEvent, dmabe->s.dma_srcCookieVProc);
66829 +
66830 +           elan3_sdram_copyq_to_sdram (dev,  dmabe,  DMA_RING_DMA(ring, slot), sizeof (E3_DMA));                       /* PCI write block */
66831 +           elan3_sdram_writel (dev, DMA_RING_EVENT(ring, slot) + offsetof (E3_BlockCopyEvent, ev_Count), 1);   /* PCI write */
66832 +           
66833 +           mb();                                                               /* ensure writes to main memory completed */
66834 +           writel (DMA_RING_EVENT_ELAN(ring,slot), ring->CommandPort + offsetof (E3_CommandPort, SetEvent));
66835 +           mmiob();                                                            /* and flush through IO writes */
66836 +           
66837 +           res = ISSUE_COMMAND_OK;
66838 +       }
66839 +       spin_unlock (&dev->CProcLock);
66840 +    }
66841 +    local_irq_restore (flags);
66842 +
66843 +    return (res);
66844 +}
66845 +
66846 +int
66847 +IssueWaitevent (EP3_RAIL *rail, E3_Addr value)
66848 +{
66849 +    ELAN3_DEV     *dev   = rail->Device;
66850 +    int           res;
66851 +    unsigned long flags;
66852 +    
66853 +    spin_lock_irqsave (&dev->IntrLock, flags);
66854 +
66855 +    ASSERT (rail->CommandPortEventTrap == FALSE);
66856 +
66857 +    /*
66858 +     * Disable the command processor interrupts, so that we don't see
66859 +     * spurious interrupts appearing.
66860 +     */
66861 +    DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
66862 +
66863 +    EPRINTF1 (DBG_COMMAND, "IssueWaitevent: %08x\n", value);
66864 +
66865 +    mb();                                                              /* ensure writes to main memory completed */
66866 +    writel (value, rail->CommandPort + offsetof (E3_CommandPort, WaitEvent0));
66867 +    mmiob();                                                           /* and flush through IO writes */
66868 +    
66869 +    do {
66870 +       res = CheckCommandQueueFlushed (rail->Ctxt, EventComQueueNotEmpty, ISSUE_COMMAND_CANT_WAIT, &flags);
66871 +
66872 +       EPRINTF1 (DBG_COMMAND, "IssueWaitevent: CheckCommandQueueFlushed -> %d\n", res);
66873 +
66874 +       if (res == ISSUE_COMMAND_WAIT)
66875 +           HandleCProcTrap (dev, 0, NULL);
66876 +    } while (res != ISSUE_COMMAND_OK);
66877 +
66878 +    if (! rail->CommandPortEventTrap)
66879 +       res = ISSUE_COMMAND_OK;
66880 +    else
66881 +    {
66882 +       rail->CommandPortEventTrap = FALSE;
66883 +       res = ISSUE_COMMAND_TRAPPED;
66884 +    }
66885 +
66886 +    EPRINTF1 (DBG_COMMAND, "IssueWaitevent: -> %d\n", res);
66887 +
66888 +    /*
66889 +     * Re-enable the command processor interrupt as we've finished 
66890 +     * polling it.
66891 +     */
66892 +    ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
66893 +
66894 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
66895 +
66896 +    return (res);
66897 +}
66898 +
66899 +void
66900 +IssueSetevent (EP3_RAIL *rail, E3_Addr value)
66901 +{
66902 +    EPRINTF1 (DBG_COMMAND, "IssueSetevent: %08x\n", value);
66903 +
66904 +    mb();                                                              /* ensure writes to main memory completed */
66905 +    writel (value, rail->CommandPort + offsetof (E3_CommandPort, SetEvent));
66906 +    mmiob();                                                           /* and flush through IO writes */
66907 +}
66908 +
66909 +void
66910 +IssueRunThread (EP3_RAIL *rail, E3_Addr value)
66911 +{
66912 +    EPRINTF1 (DBG_COMMAND, "IssueRunThread: %08x\n", value);
66913 +
66914 +    mb();                                                              /* ensure writes to main memory completed */
66915 +    writel (value, rail->CommandPort + offsetof (E3_CommandPort, RunThread));
66916 +    mmiob();                                                           /* and flush through IO writes */
66917 +}
66918 +
66919 +/****************************************************************************************/
66920 +/*
66921 + * DMA retry list management
66922 + */
66923 +static unsigned DmaRetryTimes[EP_NUM_RETRIES]; 
66924 +
66925 +static void
66926 +ep3_dma_retry (EP3_RAIL *rail)
66927 +{
66928 +    EP3_COOKIE    *cp;
66929 +    int            res;
66930 +    int                   vp;
66931 +    unsigned long  flags;
66932 +    int            i;
66933 +
66934 +    kernel_thread_init("ep3_dma_retry");
66935 +
66936 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
66937 +
66938 +    for (;;)
66939 +    {
66940 +       long yieldAt   = lbolt + (hz/10);
66941 +       long retryTime = 0;
66942 +
66943 +       if (rail->DmaRetryThreadShouldStop)
66944 +           break;
66945 +       
66946 +       for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
66947 +       {
66948 +           while (! list_empty (&rail->DmaRetries[i]))
66949 +           {
66950 +               EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
66951 +
66952 +               if (! AFTER (lbolt, retry->RetryTime))
66953 +                   break;
66954 +               
66955 +               if (rail->DmaRetryThreadShouldStall || AFTER (lbolt, yieldAt))
66956 +                   goto cant_do_more;
66957 +
66958 +               EPRINTF2 (DBG_RETRY, "%s: DmaRetryThread: retry %p\n", rail->Generic.Name, retry);
66959 +               EPRINTF5 (DBG_RETRY, "%s:                 %08x %08x %08x %08x\n",
66960 +                         rail->Generic.Name, retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest);
66961 +               EPRINTF5 (DBG_RETRY, "%s:                 %08x %08x %08x %08x\n",
66962 +                         rail->Generic.Name, retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc,
66963 +                         retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc);
66964 +#if defined(DEBUG)
66965 +               if (retry->Dma.s.dma_direction == DMA_WRITE)
66966 +                   cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_srcEvent);
66967 +               else
66968 +                   cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_destEvent);
66969 +
66970 +               ASSERT (cp != NULL || (retry->Dma.s.dma_srcEvent == 0 && retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_isRemote));
66971 +               
66972 +               if (cp && cp->Operations->DmaVerify)
66973 +                   cp->Operations->DmaVerify (rail, cp->Arg, &retry->Dma);
66974 +#endif
66975 +
66976 +#if defined(DEBUG_ASSERT)
66977 +               if (retry->Dma.s.dma_direction == DMA_WRITE)
66978 +                   vp = retry->Dma.s.dma_destVProc;
66979 +               else
66980 +                   vp = retry->Dma.s.dma_srcVProc;
66981 +
66982 +               ASSERT (!EP_VP_ISDATA(vp) || 
66983 +                       (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED &&
66984 +                        rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE));
66985 +#endif
66986 +               spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
66987 +               res = IssueDma (rail, &(retry->Dma), i, TRUE);
66988 +               spin_lock_irqsave (&rail->DmaRetryLock, flags);
66989 +               
66990 +               if (res != ISSUE_COMMAND_OK)
66991 +                   goto cant_do_more;
66992 +               
66993 +               /* Command issued, so remove from list, and add to free list */
66994 +               list_del (&retry->Link);
66995 +               list_add (&retry->Link, &rail->DmaRetryFreeList);
66996 +           }
66997 +       }
66998 +    cant_do_more:
66999 +       
67000 +       for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
67001 +       {
67002 +           if (!list_empty (&rail->DmaRetries[i]))
67003 +           {
67004 +               EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
67005 +
67006 +               retryTime = retryTime ? MIN(retryTime, retry->RetryTime) : retry->RetryTime;
67007 +           }
67008 +       }
67009 +
67010 +       if (retryTime && !AFTER (retryTime, lbolt))
67011 +           retryTime = lbolt + 1;
67012 +
67013 +       do {
67014 +           EPRINTF3 (DBG_RETRY, "%s: ep_cm_retry: %s %lx\n", rail->Generic.Name, rail->DmaRetryThreadShouldStall ? "stalled" : "sleeping", retryTime);
67015 +           
67016 +           if (rail->DmaRetryTime == 0 || (retryTime != 0 && retryTime < rail->DmaRetryTime))
67017 +               rail->DmaRetryTime = retryTime;
67018 +           
67019 +           rail->DmaRetrySleeping = TRUE;
67020 +           
67021 +           if (rail->DmaRetryThreadShouldStall)                                        /* wakeup threads waiting in StallDmaRetryThread */
67022 +               kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);  /* for us to really go to sleep for good. */
67023 +
67024 +           if (rail->DmaRetryTime == 0 || rail->DmaRetryThreadShouldStall)
67025 +               kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
67026 +           else
67027 +               kcondvar_timedwait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags, rail->DmaRetryTime);
67028 +
67029 +           rail->DmaRetrySleeping = FALSE;
67030 +
67031 +       } while (rail->DmaRetryThreadShouldStall);
67032 +
67033 +       rail->DmaRetryTime = 0;
67034 +    }
67035 +
67036 +    rail->DmaRetryThreadStopped = 1;
67037 +    kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);
67038 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67039 +
67040 +    kernel_thread_exit();
67041 +}
67042 +
67043 +void
67044 +StallDmaRetryThread (EP3_RAIL *rail)
67045 +{
67046 +    unsigned long flags;
67047 +
67048 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67049 +    rail->DmaRetryThreadShouldStall++;
67050 +
67051 +    while (! rail->DmaRetrySleeping)
67052 +       kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
67053 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67054 +}
67055 +
67056 +void 
67057 +ResumeDmaRetryThread (EP3_RAIL *rail)
67058 +{
67059 +    unsigned long flags;
67060 +
67061 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67062 +
67063 +    ASSERT (rail->DmaRetrySleeping);
67064 +
67065 +    if (--rail->DmaRetryThreadShouldStall == 0)
67066 +    {
67067 +       rail->DmaRetrySleeping = 0;
67068 +       kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock);
67069 +    }
67070 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67071 +}
67072 +
67073 +int
67074 +InitialiseDmaRetries (EP3_RAIL *rail)
67075 +{
67076 +    int i;
67077 +
67078 +    spin_lock_init (&rail->DmaRetryLock);
67079 +    kcondvar_init (&rail->DmaRetryWait);
67080 +
67081 +    for (i = 0; i < EP_NUM_RETRIES; i++)
67082 +       INIT_LIST_HEAD (&rail->DmaRetries[i]);
67083 +
67084 +    INIT_LIST_HEAD (&rail->DmaRetryFreeList);
67085 +
67086 +    DmaRetryTimes[EP_RETRY_HIGH_PRI]  = EP_RETRY_HIGH_PRI_TIME;
67087 +
67088 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
67089 +       DmaRetryTimes[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i;
67090 +    
67091 +    DmaRetryTimes[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME;
67092 +
67093 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
67094 +       DmaRetryTimes[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i;
67095 +    
67096 +    DmaRetryTimes[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME;
67097 +    DmaRetryTimes[EP_RETRY_NETERR]    = EP_RETRY_NETERR_TIME;
67098 +
67099 +    rail->DmaRetryInitialised = 1;
67100 +
67101 +    if (kernel_thread_create (ep3_dma_retry, (void *) rail) == 0)
67102 +    {
67103 +       spin_lock_destroy (&rail->DmaRetryLock);
67104 +       return (ENOMEM);
67105 +    }
67106 +
67107 +    rail->DmaRetryThreadStarted = 1;
67108 +
67109 +    return (ESUCCESS);
67110 +}
67111 +
67112 +void
67113 +DestroyDmaRetries (EP3_RAIL *rail)
67114 +{
67115 +    unsigned long flags;
67116 +
67117 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67118 +    rail->DmaRetryThreadShouldStop = 1;
67119 +    while (rail->DmaRetryThreadStarted && !rail->DmaRetryThreadStopped)
67120 +    {
67121 +       kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);
67122 +       kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
67123 +    }
67124 +    rail->DmaRetryThreadStarted = 0;
67125 +    rail->DmaRetryThreadStopped = 0;
67126 +    rail->DmaRetryThreadShouldStop = 0;
67127 +    rail->DmaRetryInitialised = 0;
67128 +
67129 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67130 +
67131 +    /* Everyone should have given back their retry dma's by now */
67132 +    ASSERT (rail->DmaRetryReserved == 0);
67133 +
67134 +    while (! list_empty (&rail->DmaRetryFreeList))
67135 +    {
67136 +       EP3_RETRY_DMA *retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
67137 +       
67138 +       list_del (&retry->Link);
67139 +
67140 +       KMEM_FREE (retry, sizeof (EP3_RETRY_DMA));
67141 +    }
67142 +
67143 +    kcondvar_destroy (&rail->DmaRetryWait);
67144 +    spin_lock_destroy (&rail->DmaRetryLock);
67145 +}
67146 +
67147 +int
67148 +ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr)
67149 +{
67150 +    EP3_RETRY_DMA *retry;
67151 +    int                  remaining = count;
67152 +    unsigned long flags;
67153 +
67154 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67155 +    
67156 +    if (remaining <= (rail->DmaRetryCount - rail->DmaRetryReserved))
67157 +    {
67158 +       rail->DmaRetryReserved += remaining;
67159 +
67160 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67161 +       return (ESUCCESS);
67162 +    }
67163 +
67164 +    remaining -= (rail->DmaRetryCount - rail->DmaRetryReserved);
67165 +
67166 +    rail->DmaRetryReserved = rail->DmaRetryCount;
67167 +
67168 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67169 +
67170 +    while (remaining)
67171 +    {
67172 +       KMEM_ALLOC (retry, EP3_RETRY_DMA *, sizeof (EP3_RETRY_DMA), !(attr & EP_NO_SLEEP));
67173 +       
67174 +       if (retry == NULL)
67175 +           goto failed;
67176 +
67177 +       /* clear E3_DMA */
67178 +       bzero((char *)(&(retry->Dma.s)), sizeof(E3_DMA));
67179 +
67180 +       remaining--; 
67181 +
67182 +       spin_lock_irqsave (&rail->DmaRetryLock, flags);
67183 +
67184 +       list_add (&retry->Link, &rail->DmaRetryFreeList);
67185 +
67186 +       rail->DmaRetryCount++;
67187 +       rail->DmaRetryReserved++;
67188 +
67189 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67190 +    }
67191 +    return (ESUCCESS);
67192 +
67193 + failed:
67194 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67195 +    rail->DmaRetryReserved -= (count - remaining);
67196 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67197 +    return (ENOMEM);
67198 +}
67199 +
67200 +void
67201 +ReleaseDmaRetries (EP3_RAIL *rail, int count)
67202 +{
67203 +    unsigned long flags;
67204 +
67205 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67206 +    rail->DmaRetryReserved -= count;
67207 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67208 +}
67209 +
67210 +void
67211 +QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval)
67212 +{
67213 +    EP3_RETRY_DMA *retry;
67214 +    unsigned long flags;
67215 +
67216 +    /*
67217 +     * When requeueing DMAs they must never be "READ" dma's since
67218 +     * these would fetch the DMA descriptor from the retryn descriptor
67219 +     */
67220 +    ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE);
67221 +    ASSERT (dma->s.dma_direction == DMA_WRITE ? 
67222 +           EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
67223 +           EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid);
67224 +
67225 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67226 +    
67227 +    EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList));
67228 +
67229 +    /* take an item of the free list */
67230 +    retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
67231 +
67232 +    list_del (&retry->Link);
67233 +    
67234 +    EPRINTF5 (DBG_RETRY, "%s: QueueDmaForRetry: %08x %08x %08x %08x\n", rail->Generic.Name,
67235 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
67236 +    EPRINTF5 (DBG_RETRY, "%s:                   %08x %08x %08x %08x\n",rail->Generic.Name,
67237 +            dma->s.dma_destEvent, dma->s.dma_destCookieVProc,
67238 +            dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
67239 +
67240 +    /* copy the DMA into the retry descriptor */
67241 +    retry->Dma.s.dma_type            = dma->s.dma_type;
67242 +    retry->Dma.s.dma_size            = dma->s.dma_size;
67243 +    retry->Dma.s.dma_source          = dma->s.dma_source;
67244 +    retry->Dma.s.dma_dest            = dma->s.dma_dest;
67245 +    retry->Dma.s.dma_destEvent       = dma->s.dma_destEvent;
67246 +    retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc;
67247 +    retry->Dma.s.dma_srcEvent        = dma->s.dma_srcEvent;
67248 +    retry->Dma.s.dma_srcCookieVProc  = dma->s.dma_srcCookieVProc;
67249 +
67250 +    retry->RetryTime = lbolt + DmaRetryTimes[interval];
67251 +
67252 +    /* chain onto the end of the approriate retry list */
67253 +    list_add_tail (&retry->Link, &rail->DmaRetries[interval]);
67254 +
67255 +    /* now wakeup the retry thread */
67256 +    if (rail->DmaRetryTime == 0 || retry->RetryTime < rail->DmaRetryTime)
67257 +       rail->DmaRetryTime = retry->RetryTime;
67258 +    
67259 +    if (rail->DmaRetrySleeping && !rail->DmaRetryThreadShouldStall)
67260 +    {
67261 +       rail->DmaRetrySleeping = 0;
67262 +       kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock);
67263 +    }
67264 +
67265 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67266 +}
67267 +
67268 +void
67269 +QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma)
67270 +{
67271 +    EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[dma->s.dma_direction == DMA_WRITE ? 
67272 +                                                 EP_VP_TO_NODE(dma->s.dma_srcVProc) :
67273 +                                                 EP_VP_TO_NODE(dma->s.dma_destVProc)];
67274 +    EP3_RETRY_DMA *retry;
67275 +    unsigned long flags;
67276 +
67277 +    /*
67278 +     * When requeueing DMAs they must never be "READ" dma's since
67279 +     * these would fetch the DMA descriptor from the retryn descriptor
67280 +     */
67281 +    ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE);
67282 +    ASSERT (dma->s.dma_direction == DMA_WRITE ? 
67283 +           EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
67284 +           EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid);
67285 +
67286 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67287 +    
67288 +    EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList));
67289 +
67290 +    /* take an item of the free list */
67291 +    retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
67292 +
67293 +    list_del (&retry->Link);
67294 +    
67295 +    EPRINTF5 (DBG_RETRY, "%s: QueueDmaOnStalledList: %08x %08x %08x %08x\n", rail->Generic.Name,
67296 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
67297 +    EPRINTF5 (DBG_RETRY, "%s:                        %08x %08x %08x %08x\n", rail->Generic.Name,
67298 +             dma->s.dma_destEvent, dma->s.dma_destCookieVProc,
67299 +             dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
67300 +
67301 +    /* copy the DMA into the retry descriptor */
67302 +    retry->Dma.s.dma_type            = dma->s.dma_type;
67303 +    retry->Dma.s.dma_size            = dma->s.dma_size;
67304 +    retry->Dma.s.dma_source          = dma->s.dma_source;
67305 +    retry->Dma.s.dma_dest            = dma->s.dma_dest;
67306 +    retry->Dma.s.dma_destEvent       = dma->s.dma_destEvent;
67307 +    retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc;
67308 +    retry->Dma.s.dma_srcEvent        = dma->s.dma_srcEvent;
67309 +    retry->Dma.s.dma_srcCookieVProc  = dma->s.dma_srcCookieVProc;
67310 +
67311 +    /* chain onto the node cancelled dma list */
67312 +    list_add_tail (&retry->Link, &nodeRail->StalledDmas);
67313 +
67314 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67315 +}
67316 +
67317 +void
67318 +FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId)
67319 +{
67320 +    EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[nodeId];
67321 +    struct list_head *el, *nel;
67322 +    unsigned long flags;
67323 +
67324 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67325 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
67326 +       list_del (el);
67327 +       list_add (el, &rail->DmaRetryFreeList);
67328 +    }
67329 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67330 +}
67331 +
67332 +/****************************************************************************************/
67333 +/*
67334 + * Connection management.
67335 + */
67336 +static void
67337 +DiscardingHaltOperation (ELAN3_DEV *dev, void *arg)
67338 +{
67339 +    EP3_RAIL *rail = (EP3_RAIL *) arg;
67340 +    unsigned long flags;
67341 +
67342 +    spin_lock_irqsave (&dev->IntrLock, flags);
67343 +    rail->HaltOpCompleted = 1;
67344 +    kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock);
67345 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
67346 +}
67347
67348 +typedef struct {
67349 +     EP3_RAIL  *rail;
67350 +    sdramaddr_t qaddr;
67351 +} SetQueueFullData;
67352
67353 +static void
67354 +SetQueueLockedOperation (ELAN3_DEV *dev, void *arg)
67355 +{
67356 +    SetQueueFullData *data =  (SetQueueFullData *) arg;
67357 +    unsigned long     flags;     
67358 +
67359 +    spin_lock_irqsave (&dev->IntrLock, flags);
67360 +
67361 +    elan3_sdram_writel  (dev, data->qaddr, E3_QUEUE_LOCKED | elan3_sdram_readl(dev, data->qaddr));
67362 +   
67363 +    data->rail->HaltOpCompleted = 1;
67364 +    kcondvar_wakeupall (&data->rail->HaltOpSleep, &dev->IntrLock);
67365 +
67366 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
67367 +}
67368 +
67369 +static void
67370 +FlushDmaQueuesHaltOperation (ELAN3_DEV *dev, void *arg)
67371 +{
67372 +    EP3_RAIL      *rail    = (EP3_RAIL *) arg;
67373 +    sdramaddr_t    FPtr, BPtr;
67374 +    sdramaddr_t           Base, Top;
67375 +    E3_DMA_BE      dma;
67376 +    EP_NODE_RAIL  *node;
67377 +    int            vp;
67378 +    unsigned long  flags;
67379 +
67380 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
67381 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
67382 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
67383 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
67384 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
67385 +    
67386 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
67387 +    BPtr =  read_reg32 (dev, DProc_SysCntx_BPtr);
67388 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
67389 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
67390 +    
67391 +    while (FPtr != BPtr)
67392 +    {
67393 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
67394 +       
67395 +       EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: %08x %08x %08x %08x\n", rail->Generic.Name,
67396 +                 dma.s.dma_type, dma.s.dma_size, dma.s.dma_source, dma.s.dma_dest);
67397 +       EPRINTF5 (DBG_DISCON, "%s:                              %08x %08x %08x %08x\n", rail->Generic.Name,
67398 +                 dma.s.dma_destEvent, dma.s.dma_destCookieVProc,
67399 +                dma.s.dma_srcEvent, dma.s.dma_srcCookieVProc);
67400 +       
67401 +       ASSERT ((dma.s.dma_u.s.Context & SYS_CONTEXT_BIT) != 0);
67402 +
67403 +       if (dma.s.dma_direction == DMA_WRITE)
67404 +           vp = dma.s.dma_destVProc;
67405 +       else
67406 +           vp = dma.s.dma_srcVProc;
67407 +       
67408 +       node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67409 +
67410 +       ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE));
67411 +
67412 +       if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE)
67413 +       {
67414 +           /*
67415 +            * This is a DMA going to the node which is being removed, 
67416 +            * so move it onto the node dma list where it will get
67417 +            * handled later.
67418 +            */
67419 +           EPRINTF1 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: move dma to cancelled list\n", rail->Generic.Name);
67420 +          
67421 +           if (dma.s.dma_direction != DMA_WRITE)
67422 +           {
67423 +               /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been 
67424 +                * modified by the elan to point at the dma in the rxd where it was issued
67425 +                * from */
67426 +               dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
67427 +           }
67428 +           
67429 +           QueueDmaOnStalledList (rail, &dma);
67430 +           
67431 +           /*
67432 +            * Remove the DMA from the queue by replacing it with one with
67433 +            * zero size and no events.
67434 +            *
67435 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
67436 +            * to mark the approriate run queue as empty.
67437 +            */
67438 +           dma.s.dma_type            = (SYS_CONTEXT_BIT << 16);
67439 +           dma.s.dma_size            = 0;
67440 +           dma.s.dma_source          = (E3_Addr) 0;
67441 +           dma.s.dma_dest            = (E3_Addr) 0;
67442 +           dma.s.dma_destEvent       = (E3_Addr) 0;
67443 +           dma.s.dma_destCookieVProc = 0;
67444 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
67445 +           dma.s.dma_srcCookieVProc  = 0;
67446 +           
67447 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
67448 +       }
67449 +
67450 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
67451 +    }
67452 +
67453 +    spin_lock_irqsave (&dev->IntrLock, flags);
67454 +    rail->HaltOpCompleted = 1;
67455 +    kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock);
67456 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
67457 +}
67458 +
67459 +void
67460 +SetQueueLocked (EP3_RAIL *rail, sdramaddr_t qaddr)
67461 +{
67462 +    ELAN3_DEV        *dev = rail->Device;
67463 +    SetQueueFullData  data;
67464 +    unsigned long     flags;
67465 +    
67466 +    /* Ensure that the context filter changes have been seen by halting
67467 +     * then restarting the inputters - this also ensures that any setevent
67468 +     * commands used to issue dma's have completed and any trap has been
67469 +     * handled. */
67470 +    data.rail  = rail;
67471 +    data.qaddr = qaddr;
67472 +
67473 +    kmutex_lock (&rail->HaltOpMutex);
67474 +    spin_lock_irqsave (&dev->IntrLock, flags);
67475 +    QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx | INT_TProcHalted, SetQueueLockedOperation, &data);
67476 +
67477 +    while (! rail->HaltOpCompleted)
67478 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
67479 +    rail->HaltOpCompleted = 0;
67480 +
67481 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
67482 +    kmutex_unlock (&rail->HaltOpMutex);
67483 +}
67484 +
67485 +void
67486 +ep3_flush_filters (EP_RAIL *r)
67487 +{
67488 +    EP3_RAIL *rail = (EP3_RAIL *) r;
67489 +    ELAN3_DEV *dev  = rail->Device;
67490 +    unsigned long flags;
67491 +
67492 +    /* Ensure that the context filter changes have been seen by halting
67493 +     * then restarting the inputters - this also ensures that any setevent
67494 +     * commands used to issue dma's have completed and any trap has been
67495 +     * handled. */
67496 +    kmutex_lock (&rail->HaltOpMutex);
67497 +    spin_lock_irqsave (&dev->IntrLock, flags);
67498 +    QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx, DiscardingHaltOperation, rail);
67499 +    
67500 +    while (! rail->HaltOpCompleted)
67501 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
67502 +    rail->HaltOpCompleted = 0;
67503 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
67504 +    kmutex_unlock (&rail->HaltOpMutex);
67505 +}
67506 +
67507 +void
67508 +ep3_flush_queues (EP_RAIL *r)
67509 +{
67510 +    EP3_RAIL         *rail = (EP3_RAIL *) r;
67511 +    ELAN3_DEV         *dev  = rail->Device;
67512 +    struct list_head *el;
67513 +    struct list_head *nel;
67514 +    EP_NODE_RAIL     *node;
67515 +    unsigned long flags;
67516 +    int vp, i;
67517 +
67518 +    ASSERT (NO_LOCKS_HELD);
67519 +    
67520 +    /* First - stall the dma retry thread, so that it will no longer
67521 +     *         restart any dma's from the rety lists. */
67522 +    StallDmaRetryThread (rail);
67523 +
67524 +    /* Second - queue a halt operation to flush through all DMA's which are executing
67525 +     *          or on the run queue. */
67526 +    kmutex_lock (&rail->HaltOpMutex);
67527 +    spin_lock_irqsave (&dev->IntrLock, flags);
67528 +    QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, FlushDmaQueuesHaltOperation, rail);
67529 +    while (! rail->HaltOpCompleted)
67530 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
67531 +    rail->HaltOpCompleted = 0;
67532 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
67533 +    kmutex_unlock (&rail->HaltOpMutex);
67534 +
67535 +    /* Third - run down the dma retry lists and move all entries to the cancelled
67536 +     *         list.  Any dma's which were on the run queues have already been
67537 +     *         moved there */
67538 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
67539 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
67540 +    {
67541 +       list_for_each_safe (el, nel, &rail->DmaRetries[i]) {
67542 +           EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
67543 +
67544 +           if (retry->Dma.s.dma_direction == DMA_WRITE)
67545 +               vp = retry->Dma.s.dma_destVProc;
67546 +           else
67547 +               vp = retry->Dma.s.dma_srcVProc;
67548 +           
67549 +           node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67550 +           
67551 +           ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE));
67552 +
67553 +           if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE)
67554 +           {
67555 +               EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueues: %08x %08x %08x %08x\n",rail->Generic.Name,
67556 +                         retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest);
67557 +               EPRINTF5 (DBG_DISCON, "%s:                 %08x %08x %08x %08x\n", rail->Generic.Name,
67558 +                         retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc,
67559 +                         retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc);
67560 +
67561 +               list_del (&retry->Link);
67562 +
67563 +               list_add_tail (&retry->Link, &node->StalledDmas);
67564 +           }
67565 +       }
67566 +    }
67567 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
67568 +
67569 +    /* Finally - allow the dma retry thread to run again */
67570 +    ResumeDmaRetryThread (rail);
67571 +}
67572 +
67573 +/****************************************************************************************/
67574 +/* NOTE - we require that all cookies are non-zero, which is 
67575 + *        achieved because EP_VP_DATA() is non-zero for all
67576 + *        nodes */
67577 +E3_uint32
67578 +LocalCookie (EP3_RAIL *rail, unsigned remoteNode)
67579 +{
67580 +    E3_uint32     cookie;
67581 +    unsigned long flags;
67582 +
67583 +    spin_lock_irqsave (&rail->CookieLock, flags);
67584 +    cookie = DMA_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(rail->Generic.Position.pos_nodeid));
67585 +    spin_unlock_irqrestore (&rail->CookieLock, flags);
67586 +
67587 +    /* Main processor cookie for srcCookie - this is what is sent
67588 +     * to the remote node along with the setevent from the put
67589 +     * or the dma descriptor for a get */
67590 +    return (cookie);
67591 +}
67592 +
67593 +E3_uint32
67594 +RemoteCookie (EP3_RAIL *rail, u_int remoteNode)
67595 +{
67596 +    uint32_t      cookie;
67597 +    unsigned long flags;
67598 +
67599 +    spin_lock_irqsave (&rail->CookieLock, flags);
67600 +    cookie = DMA_REMOTE_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(remoteNode));
67601 +    spin_unlock_irqrestore (&rail->CookieLock, flags);
67602 +
67603 +    /* Main processor cookie for dstCookie - this is the cookie
67604 +     * that the "remote put" dma uses for it's setevent packets for
67605 +     * a get dma */
67606 +    
67607 +    return (cookie);
67608 +}
67609 +
67610 +/****************************************************************************************/
67611 +/*
67612 + * Event Cookie management.
67613 + *
67614 + *   We find the ep_cookie in one of two ways:
67615 + *     1) for block copy events
67616 + *          the cookie value is stored in the ev_Source - for EVIRQ events
67617 + *          it is also stored in the ev_Type
67618 + *     2) for normal events
67619 + *          we just use the event address.
67620 + */
67621 +void 
67622 +InitialiseCookieTable (EP3_COOKIE_TABLE *table)
67623 +{
67624 +    register int i;
67625 +    
67626 +    spin_lock_init (&table->Lock);
67627 +    
67628 +    for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++)
67629 +       table->Entries[i] = NULL;
67630 +}
67631 +
67632 +void
67633 +DestroyCookieTable (EP3_COOKIE_TABLE *table)
67634 +{
67635 +    register int i;
67636 +
67637 +    for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++)
67638 +       if (table->Entries[i])
67639 +           printk ("DestroyCookieTable: entry %d not empty\n", i);
67640 +
67641 +    spin_lock_destroy (&table->Lock);
67642 +}
67643 +
67644 +void
67645 +RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp, E3_uint32 cookie, EP3_COOKIE_OPS *ops, void *arg)
67646 +{
67647 +    EP3_COOKIE *tcp;
67648 +    int hashval = EP3_HASH_COOKIE(cookie);
67649 +    unsigned long flags;
67650 +
67651 +    spin_lock_irqsave (&table->Lock, flags);
67652 +    
67653 +    cp->Operations = ops;
67654 +    cp->Arg        = arg;
67655 +    cp->Cookie     = cookie;
67656 +    
67657 +#if defined(DEBUG)
67658 +    /* Check that the cookie is unique */
67659 +    for (tcp = table->Entries[hashval]; tcp; tcp = tcp->Next)
67660 +       if (tcp->Cookie == cookie)
67661 +           panic ("RegisterEventCookie: non unique cookie\n");
67662 +#endif
67663 +    cp->Next = table->Entries[hashval];
67664 +    
67665 +    table->Entries[hashval] = cp;
67666 +    
67667 +    spin_unlock_irqrestore (&table->Lock, flags);
67668 +}
67669 +
67670 +void
67671 +DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp)
67672 +{
67673 +    EP3_COOKIE **predCookiep;
67674 +    unsigned long flags;
67675 +
67676 +    spin_lock_irqsave (&table->Lock, flags);
67677 +    
67678 +    for (predCookiep = &table->Entries[EP3_HASH_COOKIE (cp->Cookie)]; *predCookiep; predCookiep = &(*predCookiep)->Next)
67679 +    {
67680 +       if (*predCookiep == cp)
67681 +       {
67682 +           *predCookiep = cp->Next;
67683 +           break;
67684 +       }
67685 +    }
67686 +
67687 +    spin_unlock_irqrestore (&table->Lock, flags);
67688 +
67689 +    cp->Operations = NULL;
67690 +    cp->Arg        = NULL;
67691 +    cp->Cookie     = 0;
67692 +    cp->Next       = NULL;
67693 +}
67694 +
67695 +EP3_COOKIE *
67696 +LookupCookie (EP3_COOKIE_TABLE *table, E3_Addr cookie)
67697 +{
67698 +    EP3_COOKIE *cp;
67699 +    unsigned long flags;
67700 +
67701 +    spin_lock_irqsave (&table->Lock, flags);
67702 +    
67703 +    for (cp = table->Entries[EP3_HASH_COOKIE(cookie)]; cp; cp = cp->Next)
67704 +       if (cp->Cookie == cookie)
67705 +           break;
67706 +    
67707 +    spin_unlock_irqrestore (&table->Lock, flags);
67708 +    return (cp);
67709 +}
67710 +
67711 +EP3_COOKIE *
67712 +LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr eaddr)
67713 +{
67714 +    sdramaddr_t event;
67715 +    E3_uint32 type;
67716 +
67717 +    if ((event = ep_elan2sdram (&rail->Generic, eaddr)) != (sdramaddr_t) 0)
67718 +    {
67719 +       type = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Type));
67720 +
67721 +       if (type & EV_TYPE_BCOPY)
67722 +           return (LookupCookie (table, elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Source)) & ~EV_WCOPY));
67723 +       else
67724 +           return (LookupCookie (table, eaddr));
67725 +    }
67726 +
67727 +    return (NULL);
67728 +}
67729 +
67730 +/****************************************************************************************/
67731 +/*
67732 + * Elan context operations - note only support interrupt ops.
67733 + */
67734 +static int        ep3_event     (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
67735 +static int        ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
67736 +static int        ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
67737 +static int        ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan);
67738 +static int        ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
67739 +static int        ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf);
67740 +
67741 +static E3_uint8   ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr);
67742 +static void       ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
67743 +static E3_uint16  ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr);
67744 +static void       ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
67745 +static E3_uint32  ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr);
67746 +static void       ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
67747 +static E3_uint64  ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr);
67748 +static void       ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
67749 +
67750 +ELAN3_OPS ep3_elan3_ops = 
67751 +{
67752 +    ELAN3_OPS_VERSION,         /* Version */
67753 +    
67754 +    NULL,                      /* Exception */
67755 +    NULL,                      /* GetWordItem */
67756 +    NULL,                      /* GetBlockItem */
67757 +    NULL,                      /* PutWordItem */
67758 +    NULL,                      /* PutBlockItem */
67759 +    NULL,                      /* PutbackItem */
67760 +    NULL,                      /* FreeWordItem */
67761 +    NULL,                      /* FreeBlockItem */
67762 +    NULL,                      /* CountItems */
67763 +    ep3_event,                 /* Event */
67764 +    NULL,                      /* SwapIn */
67765 +    NULL,                      /* SwapOut */
67766 +    NULL,                      /* FreePrivate */
67767 +    NULL,                      /* FixupNetworkError */
67768 +    ep3_dprocTrap,             /* DProcTrap */
67769 +    ep3_tprocTrap,             /* TProcTrap */
67770 +    ep3_iprocTrap,             /* IProcTrap */
67771 +    ep3_cprocTrap,             /* CProcTrap */
67772 +    ep3_cprocReissue,          /* CProcReissue */
67773 +    NULL,                      /* StartFaultCheck */
67774 +    NULL,                      /* EndFaulCheck */
67775 +    ep3_load8,                 /* Load8 */
67776 +    ep3_store8,                        /* Store8 */
67777 +    ep3_load16,                        /* Load16 */
67778 +    ep3_store16,               /* Store16 */
67779 +    ep3_load32,                        /* Load32 */
67780 +    ep3_store32,               /* Store32 */
67781 +    ep3_load64,                        /* Load64 */
67782 +    ep3_store64,               /* Store64 */
67783 +};
67784 +
67785 +static int
67786 +ep3_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag)
67787 +{
67788 +    EP3_RAIL  *rail = (EP3_RAIL *) ctxt->Private;
67789 +    EP3_COOKIE *cp   = LookupCookie (&rail->CookieTable, cookie);
67790 +    
67791 +    if (cp == NULL)
67792 +    {
67793 +       printk ("ep3_event: cannot find event cookie for %x\n", cookie);
67794 +       return (OP_HANDLED);
67795 +    }
67796 +    
67797 +    if (cp->Operations->Event)
67798 +       cp->Operations->Event(rail, cp->Arg);
67799 +    
67800 +    return (OP_HANDLED);
67801 +}
67802 +
67803 +/* Trap interface */
67804 +int
67805 +ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
67806 +{
67807 +    EP3_RAIL        *rail = (EP3_RAIL *) ctxt->Private;
67808 +    ELAN3_DEV        *dev = rail->Device;
67809 +    EP3_COOKIE       *cp;
67810 +    E3_FaultSave_BE *FaultArea;
67811 +    E3_uint16        vp;
67812 +    int                     validTrap;
67813 +    int                     numFaults;
67814 +    int                     i;
67815 +    sdramaddr_t      event;
67816 +    E3_uint32        type;
67817 +    sdramaddr_t      dma;
67818 +    E3_DMA_BE        dmabe;
67819 +    int              status = EAGAIN;
67820 +
67821 +    EPRINTF4 (DBG_EPTRAP, "ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
67822 +             trap->Status.s.WakeupFunction, trap->Status.s.Context, 
67823 +             trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
67824 +    EPRINTF4 (DBG_EPTRAP, "              type %08x size %08x source %08x dest %08x\n",
67825 +             trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
67826 +    EPRINTF2 (DBG_EPTRAP, "              Dest event %08x cookie/proc %08x\n",
67827 +             trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
67828 +    EPRINTF2 (DBG_EPTRAP, "              Source event %08x cookie/proc %08x\n",
67829 +             trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
67830 +
67831 +    ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT);
67832 +
67833 +    switch (trap->Status.s.TrapType)
67834 +    {
67835 +    case MI_DmaPacketTimedOutOrPacketError:
67836 +       if (trap->Desc.s.dma_direction == DMA_WRITE)
67837 +           vp = trap->Desc.s.dma_destVProc;
67838 +       else
67839 +           vp = trap->Desc.s.dma_srcVProc;
67840 +       
67841 +       if (! trap->PacketInfo.s.PacketTimeout)
67842 +           status = ETIMEDOUT;
67843 +       else
67844 +       {
67845 +           status = EHOSTDOWN;
67846 +
67847 +           /* XXXX: dma timedout - might want to "restart" tree ? */
67848 +       }
67849 +       goto retry_dma;
67850 +
67851 +    case MI_DmaFailCountError:
67852 +       goto retry_dma;
67853 +
67854 +    case MI_TimesliceDmaQueueOverflow:
67855 +       IncrStat (rail, DprocDmaQueueOverflow);
67856 +
67857 +       goto retry_dma;
67858 +
67859 +    case MI_RemoteDmaCommand:
67860 +    case MI_RunDmaCommand:
67861 +    case MI_DequeueNonSysCntxDma:
67862 +    case MI_DequeueSysCntxDma:
67863 +       /*
67864 +        * The DMA processor has trapped due to outstanding prefetches from the previous 
67865 +        * dma.  The "current" dma has not been consumed, so we just ignore the trap
67866 +        */
67867 +       return (OP_HANDLED);
67868 +       
67869 +    case MI_EventQueueOverflow:
67870 +       IncrStat (rail, DprocEventQueueOverflow);
67871 +
67872 +       if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 &&
67873 +           ((type  = elan3_sdram_readl (dev, event + offsetof(E3_Event,ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
67874 +       {
67875 +           spin_unlock (&ctxt->Device->IntrLock);
67876 +           ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY)), OP_LWP);
67877 +           spin_lock (&ctxt->Device->IntrLock);
67878 +       }
67879 +       return (OP_HANDLED);
67880 +       
67881 +    case MI_DmaQueueOverflow:
67882 +       IncrStat (rail, DprocDmaQueueOverflow);
67883 +
67884 +       if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 &&
67885 +           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA &&
67886 +           (dma  = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0)
67887 +       {
67888 +           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
67889 +           
67890 +           /* We only chain together DMA's of the same direction, so since
67891 +            * we took a DmaQueueOverflow trap - this means that DMA which
67892 +            * trapped was a WRITE dma - hence the one we chain to must also
67893 +            * be a WRITE dma.
67894 +            */
67895 +           ASSERT (dmabe.s.dma_direction == DMA_WRITE);
67896 +           
67897 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
67898 +
67899 +#ifdef DEBUG_ASSERT
67900 +           {
67901 +               E3_uint16     vp       = dmabe.s.dma_destVProc;
67902 +               EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67903 +
67904 +               ASSERT (cp != NULL && (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)));
67905 +           }
67906 +#endif
67907 +           cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
67908 +           
67909 +           return (OP_HANDLED);
67910 +       }
67911 +
67912 +       panic ("ep3_dprocTrap\n");
67913 +       return (OP_HANDLED);
67914 +
67915 +    default:
67916 +       break;
67917 +    }
67918 +
67919 +    /* If it's a dma which traps past the end of the source, then */
67920 +    /* just re-issue it */
67921 +    numFaults = validTrap = (trap->FaultSave.s.FSR.Status != 0);
67922 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
67923 +    {
67924 +       if (FaultArea->s.FSR.Status != 0)
67925 +       {
67926 +           numFaults++;
67927 +
67928 +           /* XXXX: Rev B Elans can prefetch data past the end of the dma descriptor */
67929 +           /*       if the fault relates to this, then just ignore it */
67930 +           if (FaultArea->s.FaultAddress >= (trap->Desc.s.dma_source+trap->Desc.s.dma_size))
67931 +           {
67932 +               static int i;
67933 +               if (i < 10 && i++ < 10)
67934 +                   printk ("ep3_dprocTrap: Rev B prefetch trap error %08x %08x\n",
67935 +                            FaultArea->s.FaultAddress, (trap->Desc.s.dma_source+trap->Desc.s.dma_size));
67936 +               continue;
67937 +           }
67938 +
67939 +           validTrap++;
67940 +       }
67941 +    }
67942 +
67943 +    /*
67944 +     * NOTE: for physical errors (uncorrectable ECC/PCI parity errors) the FSR will
67945 +     *       be zero - hence we will not see any faults - and none will be valid, 
67946 +     *       so only ignore a Rev B prefetch trap if we've seen some faults. Otherwise
67947 +     *       we can reissue a DMA which has already sent it's remote event !
67948 +     */
67949 +    if (numFaults != 0 && validTrap == 0)
67950 +    {
67951 +    retry_dma:
67952 +       if (trap->Desc.s.dma_direction == DMA_WRITE)
67953 +       {
67954 +           vp = trap->Desc.s.dma_destVProc;
67955 +           cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_srcEvent);
67956 +       }
67957 +       else
67958 +       {
67959 +           ASSERT (EP3_CONTEXT_ISDATA(trap->Desc.s.dma_queueContext) || trap->Desc.s.dma_direction == DMA_READ_REQUEUE);
67960 +
67961 +           vp = trap->Desc.s.dma_srcVProc;
67962 +           cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_destEvent);
67963 +
67964 +           /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been 
67965 +            * modified by the elan to point at the dma in the rxd where it was issued
67966 +            * from */
67967 +           trap->Desc.s.dma_direction = (trap->Desc.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
67968 +       }
67969 +
67970 +#ifdef DEBUG_ASSERT
67971 +       {
67972 +           EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
67973 +
67974 +           ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
67975 +       }
67976 +#endif
67977 +       
67978 +       if (cp != NULL)
67979 +           cp->Operations->DmaRetry (rail, cp->Arg, &trap->Desc, status);
67980 +       else
67981 +       {
67982 +           ASSERT (trap->Desc.s.dma_direction == DMA_WRITE && trap->Desc.s.dma_srcEvent == 0 && trap->Desc.s.dma_isRemote);
67983 +
67984 +           QueueDmaForRetry (rail, &trap->Desc, EP_RETRY_ANONYMOUS);
67985 +       }
67986 +
67987 +       return (OP_HANDLED);
67988 +    }
67989 +    
67990 +    printk ("ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
67991 +            trap->Status.s.WakeupFunction, trap->Status.s.Context, 
67992 +            trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
67993 +    printk ("                    FaultAddr=%x EventAddr=%x FSR=%x\n",
67994 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
67995 +            trap->FaultSave.s.FSR.Status);
67996 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
67997 +       printk ("                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
67998 +                FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
67999 +    
68000 +    printk ("                  type %08x size %08x source %08x dest %08x\n",
68001 +            trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
68002 +    printk ("                  Dest event %08x cookie/proc %08x\n",
68003 +            trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
68004 +    printk ("                  Source event %08x cookie/proc %08x\n",
68005 +            trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
68006 +
68007 +//    panic ("ep3_dprocTrap");
68008 +
68009 +    return (OP_HANDLED);
68010 +}
68011 +
68012 +int
68013 +ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
68014 +{
68015 +    EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private;
68016 +
68017 +    EPRINTF6 (DBG_EPTRAP, "ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n",
68018 +             trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits, trap->TrapBits.Bits, MiToName (trap->mi));
68019 +    EPRINTF4 (DBG_EPTRAP, "              g0=%08x g1=%08x g2=%08x g3=%08x\n", 
68020 +             trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
68021 +             trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
68022 +    EPRINTF4 (DBG_EPTRAP, "              g4=%08x g5=%08x g6=%08x g7=%08x\n", 
68023 +             trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
68024 +             trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
68025 +    EPRINTF4 (DBG_EPTRAP, "              o0=%08x o1=%08x o2=%08x o3=%08x\n", 
68026 +             trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
68027 +             trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
68028 +    EPRINTF4 (DBG_EPTRAP, "              o4=%08x o5=%08x o6=%08x o7=%08x\n", 
68029 +             trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
68030 +             trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
68031 +    EPRINTF4 (DBG_EPTRAP, "              l0=%08x l1=%08x l2=%08x l3=%08x\n", 
68032 +             trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
68033 +             trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
68034 +    EPRINTF4 (DBG_EPTRAP, "              l4=%08x l5=%08x l6=%08x l7=%08x\n", 
68035 +             trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
68036 +             trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
68037 +    EPRINTF4 (DBG_EPTRAP, "              i0=%08x i1=%08x i2=%08x i3=%08x\n", 
68038 +             trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
68039 +             trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
68040 +    EPRINTF4 (DBG_EPTRAP, "              i4=%08x i5=%08x i6=%08x i7=%08x\n", 
68041 +             trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
68042 +             trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
68043 +    
68044 +    ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT);
68045 +
68046 +    switch (trap->mi)
68047 +    {
68048 +    case MI_UnimplementedError:
68049 +       if (trap->TrapBits.s.ForcedTProcTrap)
68050 +       {
68051 +           ASSERT (trap->TrapBits.s.OutputWasOpen == 0);
68052 +           
68053 +           EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ForcedTProcTrap\n");
68054 +
68055 +           IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE));
68056 +           return (OP_HANDLED);
68057 +       }
68058 +
68059 +       if (trap->TrapBits.s.ThreadTimeout)
68060 +       {
68061 +           EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ThreadTimeout\n");
68062 +
68063 +           if (trap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 0)
68064 +               RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue);
68065 +           else
68066 +           {
68067 +               CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], trap->TrapBits.s.PacketAckValue);
68068 +
68069 +               RollThreadToClose (ctxt, trap, EP3_PAckStolen);
68070 +           }
68071 +               
68072 +           IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE));
68073 +           return (OP_HANDLED);
68074 +       }
68075 +
68076 +       if (trap->TrapBits.s.Unimplemented)
68077 +       {
68078 +           E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK);
68079 +
68080 +           PRINTF1 (ctxt, DBG_EPTRAP, "ep3_tprocTrap: unimplemented instruction %08x\n", instr);
68081 +
68082 +           if ((instr & OPCODE_MASK) == OPCODE_Ticc &&
68083 +               (instr & OPCODE_IMM)  == OPCODE_IMM &&
68084 +               (Ticc_COND(instr)     == Ticc_TA))
68085 +           {
68086 +               switch (INSTR_IMM(instr))
68087 +               {
68088 +               case EP3_UNIMP_TRAP_NO_DESCS:
68089 +                   StallThreadForNoDescs (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], 
68090 +                                          SaveThreadToStack (ctxt, trap, TRUE));
68091 +                   return (OP_HANDLED);
68092 +
68093 +               case EP3_UNIMP_TRAP_PACKET_NACKED:
68094 +                   CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], E3_PAckDiscard);
68095 +
68096 +                   IssueRunThread (rail, SaveThreadToStack (ctxt, trap, TRUE));
68097 +                   return (OP_HANDLED);
68098 +
68099 +               case EP3_UNIMP_THREAD_HALTED: 
68100 +                   StallThreadForHalted (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], 
68101 +                                         SaveThreadToStack (ctxt, trap, TRUE));
68102 +                   return (OP_HANDLED);
68103 +
68104 +               default:
68105 +                   break;
68106 +                   
68107 +               }
68108 +           }
68109 +       }
68110 +       break;
68111 +
68112 +    default:
68113 +       break;
68114 +    }
68115 +
68116 +    /* All other traps should not happen for kernel comms */
68117 +    printk ("ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n",
68118 +            trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits,
68119 +            trap->TrapBits.Bits, MiToName (trap->mi));
68120 +    printk ("              FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
68121 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
68122 +    printk ("              DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
68123 +            trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
68124 +    printk ("              InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
68125 +            trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
68126 +    printk ("              OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
68127 +            trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
68128 +
68129 +    if (trap->DirtyBits.s.GlobalsDirty)
68130 +    {
68131 +       printk ("              g0=%08x g1=%08x g2=%08x g3=%08x\n", 
68132 +                trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
68133 +                trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
68134 +       printk ("              g4=%08x g5=%08x g6=%08x g7=%08x\n", 
68135 +                trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
68136 +                trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
68137 +    }
68138 +    if (trap->DirtyBits.s.OutsDirty)
68139 +    {
68140 +       printk ("              o0=%08x o1=%08x o2=%08x o3=%08x\n", 
68141 +                trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
68142 +                trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
68143 +       printk ("              o4=%08x o5=%08x o6=%08x o7=%08x\n", 
68144 +                trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
68145 +                trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
68146 +    }
68147 +    if (trap->DirtyBits.s.LocalsDirty)
68148 +    {
68149 +       printk ("              l0=%08x l1=%08x l2=%08x l3=%08x\n", 
68150 +                trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
68151 +                trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
68152 +       printk ("              l4=%08x l5=%08x l6=%08x l7=%08x\n", 
68153 +                trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
68154 +                trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
68155 +    }
68156 +    if (trap->DirtyBits.s.InsDirty)
68157 +    {
68158 +       printk ("              i0=%08x i1=%08x i2=%08x i3=%08x\n", 
68159 +                trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
68160 +                trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
68161 +       printk ("              i4=%08x i5=%08x i6=%08x i7=%08x\n", 
68162 +                trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
68163 +                trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
68164 +    }
68165 +    
68166 +//    panic ("ep3_tprocTrap");
68167 +
68168 +    return (OP_HANDLED);
68169 +}
68170 +
68171 +int
68172 +ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int channel)
68173 +{
68174 +    EP3_RAIL      *rail = (EP3_RAIL *) ctxt->Private;
68175 +    ELAN3_DEV      *dev = ctxt->Device;
68176 +    EP3_COOKIE    *cp;
68177 +    sdramaddr_t    event;
68178 +    E3_uint32      type;
68179 +    sdramaddr_t    dma;
68180 +    E3_DMA_BE      dmabe;
68181 +
68182 +    ASSERT (trap->Transactions[0].s.TrTypeCntx.s.Context & SYS_CONTEXT_BIT);
68183 +
68184 +    /*
68185 +     * first process the trap to determine the cause
68186 +     */
68187 +    InspectIProcTrap (ctxt, trap);
68188 +
68189 +    if (! trap->AckSent && trap->LockQueuePointer)             /* Must be a network error in a queueing DMA */
68190 +    {                                                          /* packet - unlock the queue */
68191 +       IncrStat (rail, QueueingPacketTrap);
68192 +
68193 +       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE);
68194 +       return (OP_HANDLED);
68195 +    }
68196 +
68197 +    if (trap->AckSent && trap->BadTransaction)
68198 +    {
68199 +       spin_unlock (&dev->IntrLock);
68200 +
68201 +       /* NOTE - no network error fixup is necessary for system context
68202 +        *        messages since they are idempotent and are single packet 
68203 +        *        dmas
68204 +        */
68205 +       if (EP3_CONTEXT_ISDATA (trap->Transactions[0].s.TrTypeCntx.s.Context))
68206 +       {
68207 +           int nodeId = EP3_CONTEXT_TO_NODE(trap->Transactions[0].s.TrTypeCntx.s.Context);
68208 +           
68209 +           if (trap->DmaIdentifyTransaction)
68210 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->DmaIdentifyTransaction->s.TrAddr);
68211 +           else if (trap->ThreadIdentifyTransaction)
68212 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->ThreadIdentifyTransaction->s.TrAddr);
68213 +           else
68214 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_DMA_PACKET, channel, 0);
68215 +       }
68216 +
68217 +       spin_lock (&dev->IntrLock);
68218 +       return (OP_HANDLED);
68219 +    }
68220 +    
68221 +    if (trap->AckSent)
68222 +    {
68223 +       if (trap->TrappedTransaction == NULL)
68224 +           return (OP_HANDLED);
68225 +       
68226 +       while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans)
68227 +       {
68228 +           E3_IprocTrapHeader_BE *hdrp  = trap->TrappedTransaction;
68229 +           E3_IprocTrapData_BE   *datap = trap->TrappedDataBuffer;
68230 +           
68231 +           ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0);
68232 +           
68233 +           if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0)
68234 +           {
68235 +               printk ("ep3_iprocTrap: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr);
68236 +//             panic ("ep3_iprocTrap\n");
68237 +           }
68238 +           else
68239 +           {
68240 +               switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
68241 +               {
68242 +               case TR_SETEVENT & TR_OPCODE_TYPE_MASK:
68243 +                   switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus))
68244 +                   {
68245 +                   case MI_DmaQueueOverflow:
68246 +                       IncrStat (rail, IprocDmaQueueOverflow);
68247 +
68248 +                       if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 &&
68249 +                           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA &&
68250 +                           (dma  = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0)
68251 +                       {
68252 +                           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
68253 +                           
68254 +                           if (dmabe.s.dma_direction == DMA_WRITE)
68255 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
68256 +                           else
68257 +                           {
68258 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
68259 +                               
68260 +                               /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the 
68261 +                                * DMA descriptor will be read from the EP3_RETRY_DMA rather than the 
68262 +                                * original DMA - this can then get reused and an incorrect DMA 
68263 +                                * descriptor sent 
68264 +                                * eventp->ev_Type contains the dma address with type in the lower bits 
68265 +                                */ 
68266 +                               
68267 +                               dmabe.s.dma_source    = (type & ~EV_TYPE_MASK2);
68268 +                               dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
68269 +                           }
68270 +
68271 +#ifdef DEBUG_ASSERT
68272 +                           {
68273 +                               E3_uint16     vp       = (dmabe.s.dma_direction == DMA_WRITE ? dmabe.s.dma_destVProc : dmabe.s.dma_srcVProc);
68274 +                               EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
68275 +
68276 +                               ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
68277 +                           }
68278 +#endif
68279 +
68280 +                           if (cp != NULL)
68281 +                               cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
68282 +                           else
68283 +                           {
68284 +                               ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
68285 +                               
68286 +                               QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
68287 +                           }
68288 +                           break;
68289 +                       }
68290 +
68291 +                       printk ("ep3_iprocTrap: SETEVENT : %x - cannot find dma to restart\n", hdrp->s.TrAddr);
68292 +//                     panic ("ep3_iprocTrap\n");
68293 +                       break;
68294 +
68295 +                   case MI_EventQueueOverflow:
68296 +                   {
68297 +                       sdramaddr_t event;
68298 +                       E3_uint32   type;
68299 +
68300 +                       IncrStat (rail, IprocEventQueueOverflow);
68301 +
68302 +                       if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 &&
68303 +                           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
68304 +                       {
68305 +                           spin_unlock (&dev->IntrLock);
68306 +                           ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)), OP_LWP);
68307 +                           spin_lock (&dev->IntrLock);
68308 +
68309 +                           break;
68310 +                       }
68311 +
68312 +                       printk ("ep3_iprocTrap: SETEVENT : %x - cannot find event\n", hdrp->s.TrAddr);
68313 +//                     panic ("ep3_iprocTrap\n");
68314 +                       break;
68315 +                   }
68316 +
68317 +                   default:
68318 +                       printk ("ep3_iprocTrap: SETEVENT : %x MI=%x\n", hdrp->s.TrAddr, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus));
68319 +//                     panic ("ep3_iprocTrap\n");
68320 +                       break;
68321 +                   }
68322 +                   break;
68323 +                   
68324 +               case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:
68325 +                   /* Just ignore send-discard transactions */
68326 +                   break;
68327 +                   
68328 +               case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:
68329 +               {
68330 +                   E3_DMA_BE *dmap = (E3_DMA_BE *) datap;
68331 +
68332 +                   if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_DmaQueueOverflow)
68333 +                   {
68334 +                       printk ("ep3_iprocTrap: MI=%x\n", GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus));
68335 +                       break;
68336 +                   }
68337 +
68338 +                   IncrStat (rail, IprocDmaQueueOverflow);
68339 +
68340 +                   cp = LookupEventCookie (rail, &rail->CookieTable, dmap->s.dma_srcEvent);
68341 +
68342 +                   /* modify the dma type since it will still be a "read" dma */
68343 +                   dmap->s.dma_type = (dmap->s.dma_type & ~DMA_TYPE_READ) | DMA_TYPE_ISREMOTE;
68344 +
68345 +#ifdef DEBUG_ASSERT
68346 +                   {
68347 +                       E3_uint16     vp       = dmap->s.dma_destVProc;
68348 +                       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
68349 +                       
68350 +                       ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
68351 +                   }
68352 +#endif
68353 +                   if (cp != NULL)
68354 +                       cp->Operations->DmaRetry (rail, cp->Arg, dmap, EAGAIN);
68355 +                   else
68356 +                   {
68357 +                       ASSERT (dmap->s.dma_direction == DMA_WRITE && dmap->s.dma_srcEvent == 0 && dmap->s.dma_isRemote);
68358 +                       
68359 +                       QueueDmaForRetry (rail, dmap, EP_RETRY_ANONYMOUS);
68360 +                   }
68361 +                   break;
68362 +               }   
68363 +               default:
68364 +                   printk ("ep3_iprocTrap: %s\n", IProcTrapString (hdrp, datap));
68365 +                   break;
68366 +               }
68367 +           }
68368 +           
68369 +           /*
68370 +            * We've successfully processed this transaction, so move onto the 
68371 +            * next one.
68372 +            */
68373 +           trap->TrappedTransaction++;
68374 +           trap->TrappedDataBuffer++;
68375 +       }
68376 +
68377 +       return (OP_HANDLED);
68378 +    }
68379 +    
68380 +    /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */
68381 +    if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) &&         /* a DMA packet */
68382 +       trap->LockQueuePointer == 0 && trap->UnlockQueuePointer &&              /* a queueing DMA */
68383 +       trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress)   /* and missed lockqueue */
68384 +    {
68385 +       printk ("ep3_iprocTrap: missed lockqueue transaction for queue %x\n", trap->UnlockQueuePointer);
68386 +       return (OP_HANDLED);
68387 +    }
68388 +
68389 +    if (trap->FaultSave.s.FaultContext != 0)
68390 +       printk ("ep3_iprocTrap: pagefault at %08x in context %x\n", 
68391 +               trap->FaultSave.s.FaultAddress, trap->FaultSave.s.FaultContext);
68392 +
68393 +//    panic ("ep3_iprocTrap: unexpected inputter trap\n");
68394 +    
68395 +    return (OP_HANDLED);
68396 +}
68397 +
68398 +/*
68399 + * Command processor trap
68400 + *   kernel comms should only be able to generate
68401 + *   queue overflow traps
68402 + */
68403 +int
68404 +ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap)
68405 +{
68406 +    EP3_RAIL     *rail   = (EP3_RAIL *) ctxt->Private;
68407 +    int           ctxnum = (trap->TrapBuf.r.Breg >> 16) & MAX_ROOT_CONTEXT_MASK;
68408 +    ELAN3_DEV     *dev    = rail->Device;
68409 +    EP3_DMA_RING  *ring;
68410 +    EP3_COOKIE   *cp;
68411 +    E3_DMA_BE     dmabe;
68412 +    int           vp, slot;
68413 +    unsigned long flags;
68414 +
68415 +    switch (trap->Status.s.TrapType)
68416 +    {
68417 +    case MI_DmaQueueOverflow:
68418 +       IncrStat (rail, CprocDmaQueueOverflow);
68419 +
68420 +       /* Use the context number that the setevent was issued in,
68421 +        * to find the appropriate dma ring, then since they are guaranteed
68422 +        * to be issued in order, we just search backwards till we find the
68423 +        * last one which has completed its word copy - this must be the
68424 +        * one which had caused the DmaQueueOverflow trap ! */
68425 +
68426 +       ASSERT (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS));
68427 +
68428 +       spin_lock_irqsave (&dev->CProcLock, flags);
68429 +
68430 +       ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM];
68431 +       slot = DMA_RING_PREV_POS(ring, ring->Position);
68432 +       
68433 +       while (ring->pDoneBlk[slot] == EP3_EVENT_ACTIVE)
68434 +           slot = DMA_RING_PREV_POS(ring, slot);
68435 +       
68436 +       elan3_sdram_copyq_from_sdram (rail->Device , DMA_RING_DMA(ring,slot), &dmabe, sizeof (E3_DMA));
68437 +
68438 +#if defined(DEBUG_ASSERT)
68439 +       while (slot != DMA_RING_PREV_POS(ring, ring->Position))
68440 +       {
68441 +           ASSERT (ring->pDoneBlk[slot] != EP3_EVENT_ACTIVE);
68442 +           
68443 +           slot = DMA_RING_PREV_POS(ring, slot);
68444 +       }
68445 +#endif
68446 +       spin_unlock_irqrestore (&dev->CProcLock, flags);
68447 +
68448 +       if (dmabe.s.dma_direction == DMA_WRITE)
68449 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
68450 +       else
68451 +       {
68452 +           ASSERT (dmabe.s.dma_direction = DMA_READ_REQUEUE);
68453 +
68454 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
68455 +       }
68456 +
68457 +#if defined(DEBUG_ASSERT)
68458 +       if (dmabe.s.dma_direction == DMA_WRITE)
68459 +           vp = dmabe.s.dma_destVProc;
68460 +       else
68461 +           vp = dmabe.s.dma_srcVProc;
68462 +       
68463 +       ASSERT (!EP_VP_ISDATA(vp) || (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED &&
68464 +                                     rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE));
68465 +#endif
68466 +
68467 +       if (cp != NULL)
68468 +           cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
68469 +       else
68470 +       {
68471 +           ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
68472 +           
68473 +           QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
68474 +       }
68475 +       
68476 +       return (OP_HANDLED);
68477 +
68478 +    case MI_EventQueueOverflow:
68479 +       ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM);
68480 +
68481 +       IncrStat (rail, CprocEventQueueOverflow);
68482 +       
68483 +       rail->CommandPortEventTrap = TRUE;
68484 +       return (OP_HANDLED);
68485 +       
68486 +#if defined(PER_CPU_TIMEOUT)
68487 +    case MI_SetEventReadWait:
68488 +       if (ctxnum == ELAN3_MRF_CONTEXT_NUM && trap->FaultSave.s.EventAddress == EP_PACEMAKER_EVENTADDR)
68489 +       {
68490 +           HeartbeatPacemaker (rail);
68491 +           return (OP_HANDLED);
68492 +       }
68493 +#endif
68494 +
68495 +    default:
68496 +       printk ("ep3_cprocTrap : Context=%x Status=%x TrapType=%x\n", ctxnum, trap->Status.Status, trap->Status.s.TrapType);
68497 +       printk ("               FaultAddr=%x EventAddr=%x FSR=%x\n",
68498 +                trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
68499 +                trap->FaultSave.s.FSR.Status);
68500 +       break;
68501 +    }
68502 +
68503 +//    panic ("ep3_cprocTrap");
68504 +
68505 +    return (OP_HANDLED);
68506 +}
68507 +
68508 +static int
68509 +ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf)
68510 +{
68511 +    EP3_RAIL   *rail    = (EP3_RAIL *) ctxt->Private;
68512 +    unsigned  cmdoff = (tbuf->s.ContextType >> 5) & 0xFF;
68513 +    int       ctxnum = (tbuf->s.ContextType >> 16) & MAX_ROOT_CONTEXT_MASK;
68514 +    
68515 +    if (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS))
68516 +    {
68517 +       EP3_DMA_RING *ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM];
68518 +
68519 +       ASSERT ((cmdoff << 2) == offsetof (E3_CommandPort, SetEvent)); /* can only be setevent commands! */
68520 +       ASSERT (tbuf->s.Addr >= DMA_RING_EVENT_ELAN(ring,0) && tbuf->s.Addr < DMA_RING_EVENT_ELAN(ring, ring->Entries));
68521 +       
68522 +       writel (tbuf->s.Addr, ring->CommandPort + (cmdoff << 2));
68523 +    }
68524 +    else
68525 +    {
68526 +       ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM);
68527 +
68528 +       writel (tbuf->s.Addr, ctxt->CommandPort + (cmdoff << 2));
68529 +    }
68530 +    
68531 +    return (OP_HANDLED);
68532 +}
68533 +
68534 +static E3_uint8
68535 +ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr)
68536 +{
68537 +    EP3_RAIL    *rail  = (EP3_RAIL *) ctxt->Private;
68538 +    ELAN3_DEV    *dev = ctxt->Device;
68539 +    sdramaddr_t offset;
68540 +    E3_uint8   *ptr;
68541 +
68542 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68543 +       return (elan3_sdram_readb (dev, offset));
68544 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != NULL)
68545 +       return (*ptr);
68546 +
68547 +    printk ("ep3_load8: %08x\n", addr);
68548 +    return (0);
68549 +}
68550 +
68551 +static void
68552 +ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val)
68553 +{
68554 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68555 +    ELAN3_DEV   *dev = ctxt->Device;
68556 +    sdramaddr_t offset;
68557 +    E3_uint8   *ptr;
68558 +
68559 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68560 +       elan3_sdram_writeb (dev, offset, val);
68561 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68562 +       *ptr = val;
68563 +    else
68564 +       printk ("ep3_store8 %08x\n", addr);
68565 +}
68566 +
68567 +static E3_uint16
68568 +ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr)
68569 +{
68570 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68571 +    ELAN3_DEV   *dev = ctxt->Device;
68572 +    sdramaddr_t offset;
68573 +    E3_uint16  *ptr;
68574 +
68575 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68576 +       return (elan3_sdram_readw (dev, offset));
68577 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68578 +       return (*ptr);
68579 +
68580 +    printk ("ep3_load16 %08x\n", addr);
68581 +    return (0);
68582 +}
68583 +
68584 +static void
68585 +ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val)
68586 +{
68587 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68588 +    ELAN3_DEV   *dev = ctxt->Device;
68589 +    sdramaddr_t offset;
68590 +    E3_uint16  *ptr;
68591 +
68592 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68593 +       elan3_sdram_writew (dev, offset, val);
68594 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68595 +       *ptr = val;
68596 +    else
68597 +       printk ("ep3_store16 %08x\n", addr);
68598 +}
68599 +
68600 +static E3_uint32
68601 +ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr)
68602 +{
68603 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68604 +    ELAN3_DEV   *dev = ctxt->Device;
68605 +    sdramaddr_t offset;
68606 +    E3_uint32  *ptr;
68607 +
68608 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68609 +       return (elan3_sdram_readl(dev, offset));
68610 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68611 +       return (*ptr);
68612 +    
68613 +    printk ("ep3_load32 %08x\n", addr);
68614 +    return (0);
68615 +}
68616 +
68617 +static void
68618 +ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val)
68619 +{
68620 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68621 +    ELAN3_DEV   *dev = ctxt->Device;
68622 +    sdramaddr_t offset;
68623 +    E3_uint32  *ptr;
68624 +
68625 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68626 +       elan3_sdram_writel (dev, offset, val);
68627 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68628 +       *ptr = val;
68629 +    else
68630 +       printk ("ep3_store32 %08x\n", addr);
68631 +}
68632 +
68633 +static E3_uint64
68634 +ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr)
68635 +{
68636 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68637 +    ELAN3_DEV   *dev = ctxt->Device;
68638 +    sdramaddr_t offset;
68639 +    E3_uint64  *ptr;
68640 +
68641 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68642 +       return (elan3_sdram_readq (dev, offset));
68643 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68644 +       return (*ptr);
68645 +
68646 +    printk ("ep3_load64 %08x\n", addr);
68647 +    return (0);
68648 +}
68649 +
68650 +static void
68651 +ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val)
68652 +{
68653 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
68654 +    ELAN3_DEV   *dev = ctxt->Device;
68655 +    sdramaddr_t offset;
68656 +    E3_uint64  *ptr;
68657 +
68658 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
68659 +       elan3_sdram_writeq (dev, offset, val);
68660 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
68661 +       *ptr = val;
68662 +    else
68663 +       printk ("ep3_store64 %08x\n", addr);
68664 +}
68665 +
68666 +/*
68667 + * Local variables:
68668 + * c-file-style: "stroustrup"
68669 + * End:
68670 + */
68671 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/support_elan4.c
68672 ===================================================================
68673 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/support_elan4.c 2004-02-23 16:02:56.000000000 -0500
68674 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/support_elan4.c      2005-07-28 14:52:52.910669312 -0400
68675 @@ -0,0 +1,1184 @@
68676 +/*
68677 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
68678 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
68679 + *
68680 + *    For licensing information please see the supplied COPYING file
68681 + *
68682 + */
68683 +
68684 +#ident "@(#)$Id: support_elan4.c,v 1.18.2.3 2004/11/18 12:05:00 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
68685 +/*      $Source: /cvs/master/quadrics/epmod/support_elan4.c,v $*/
68686 +
68687 +#include <qsnet/kernel.h>
68688 +#include <qsnet/kthread.h>
68689 +
68690 +#include <elan/kcomm.h>
68691 +
68692 +#include "kcomm_vp.h"
68693 +#include "kcomm_elan4.h"
68694 +#include "debug.h"
68695 +
68696 +#include <elan4/trtype.h>
68697 +#include <elan4/debug.h>
68698 +
68699 +void
68700 +ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg)
68701 +{
68702 +    unsigned long flags;
68703 +    
68704 +    cp->int_val      = cookie;
68705 +    cp->int_callback = callback;
68706 +    cp->int_arg      = arg;
68707 +       
68708 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
68709 +    list_add_tail (&cp->int_link, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]);
68710 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68711 +}
68712 +
68713 +void
68714 +ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp)
68715 +{
68716 +    unsigned long flags;
68717 +    
68718 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
68719 +    list_del (&cp->int_link);
68720 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68721 +}
68722 +
68723 +
68724 +EP4_INTCOOKIE *
68725 +ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie)
68726 +{
68727 +    struct list_head *el;
68728 +    unsigned long flags;
68729 +
68730 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
68731 +    list_for_each (el, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]) {
68732 +       EP4_INTCOOKIE *cp = list_entry (el, EP4_INTCOOKIE, int_link);
68733 +
68734 +       if (cp->int_val == cookie)
68735 +       {
68736 +           spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68737 +           return cp;
68738 +       }
68739 +    }
68740 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
68741 +    return NULL;
68742 +}
68743 +
68744 +E4_uint64
68745 +ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node)
68746 +{
68747 +    E4_uint64      cookie;
68748 +    unsigned long  flags;
68749 +
68750 +    spin_lock_irqsave (&rail->r_cookie_lock, flags);
68751 +    cookie = rail->r_cookies[node];
68752 +
68753 +    rail->r_cookies[node] += EP4_COOKIE_INC;
68754 +    
68755 +    spin_unlock_irqrestore (&rail->r_cookie_lock, flags);
68756 +
68757 +    return cookie;
68758 +}
68759 +
68760 +void
68761 +ep4_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
68762 +{
68763 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
68764 +    ELAN4_EPROC_TRAP trap;
68765 +
68766 +    elan4_extract_eproc_trap (ctxt->ctxt_dev, status, &trap, 0);
68767 +
68768 +    if (epdebug & DBG_EPTRAP)
68769 +       elan4_display_eproc_trap (DBG_BUFFER, 0, "ep4_eproc_trap", &trap);
68770 +
68771 +    switch (EPROC_TrapType (status))
68772 +    {
68773 +    case EventProcNoFault:
68774 +       EPRINTF1 (DBG_EPTRAP, "%s: EventProcNoFault\n", rail->r_generic.Name);
68775 +       return;
68776 +
68777 +    default:
68778 +       printk ("%s: unhandled eproc trap %d\n", rail->r_generic.Name, EPROC_TrapType (status));
68779 +       elan4_display_eproc_trap (DBG_CONSOLE, 0, "ep4_eproc_trap", &trap);
68780 +    }
68781 +}
68782 +
68783 +void
68784 +ep4_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
68785 +{
68786 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
68787 +    ELAN4_CPROC_TRAP trap;
68788 +    struct list_head *el;
68789 +    register int      i;
68790 +
68791 +    elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &trap, cqnum);
68792 +
68793 +    if (epdebug & DBG_EPTRAP)
68794 +       elan4_display_cproc_trap (DBG_BUFFER, 0, "ep4_cproc_trap", &trap);
68795 +       
68796 +    switch (CPROC_TrapType (status))
68797 +    {
68798 +    case CommandProcInterruptQueueOverflow:
68799 +       /*
68800 +        * Try and handle a bunch of elan main interrupts
68801 +        */
68802 +       for (i = 0; i <EP4_NUM_ECQ; i++) {
68803 +           list_for_each (el, &rail->r_ecq_list[i]) {
68804 +               EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68805 +           
68806 +               if (elan4_cq2num (ecq->ecq_cq) == cqnum)
68807 +               {
68808 +                   printk ("%s: defer command queue %d after trap %x\n",
68809 +                           rail->r_generic.Name, cqnum, CPROC_TrapType (status));
68810 +       
68811 +                   elan4_queue_mainintop (ctxt->ctxt_dev, &ecq->ecq_intop);
68812 +                   return;
68813 +               }
68814 +           }
68815 +       }
68816 +       break;
68817 +
68818 +    case CommandProcDmaQueueOverflow:
68819 +    case CommandProcThreadQueueOverflow:
68820 +       for (i = 0; i <EP4_NUM_ECQ; i++) {
68821 +           list_for_each (el, &rail->r_ecq_list[i]) {
68822 +               EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
68823 +           
68824 +               if (elan4_cq2num (ecq->ecq_cq) == cqnum)
68825 +               {
68826 +                   printk ("%s: restart command queue %d after trap %x\n",
68827 +                           rail->r_generic.Name, cqnum, CPROC_TrapType (status));
68828 +
68829 +                   elan4_restartcq (ctxt->ctxt_dev, ecq->ecq_cq);
68830 +                   return;
68831 +               }
68832 +           }
68833 +       }
68834 +       break;
68835 +    }
68836 +
68837 +    printk ("%s: unhandled cproc trap %d for cqnum %d\n", rail->r_generic.Name, CPROC_TrapType (status), cqnum);
68838 +    elan4_display_cproc_trap (DBG_CONSOLE, 0, "ep4_cproc_trap", &trap);
68839 +}
68840 +
68841 +void
68842 +ep4_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
68843 +{
68844 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
68845 +    ELAN4_DPROC_TRAP trap;
68846 +
68847 +    elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit);
68848 +
68849 +    if (epdebug & DBG_EPTRAP)
68850 +       elan4_display_dproc_trap (DBG_BUFFER, 0, "ep4_dproc_trap", &trap);
68851 +
68852 +    if (! DPROC_PrefetcherFault (trap.tr_status))
68853 +    {
68854 +       switch (DPROC_TrapType (trap.tr_status))
68855 +       {
68856 +       case DmaProcFailCountError:
68857 +           goto retry_this_dma;
68858 +
68859 +       case DmaProcPacketAckError:
68860 +           goto retry_this_dma;
68861 +
68862 +       case DmaProcQueueOverflow:
68863 +           goto retry_this_dma;
68864 +       }
68865 +    }
68866 +
68867 +    printk ("%s: unhandled dproc trap\n", rail->r_generic.Name);
68868 +    elan4_display_dproc_trap (DBG_CONSOLE, 0, "ep4_dproc_trap", &trap);
68869 +    return;
68870 +
68871 + retry_this_dma:
68872 +    /*XXXX implement backoff .... */
68873 +
68874 +    ep4_queue_dma_retry (rail, &trap.tr_desc, EP_RETRY_LOW_PRI);
68875 +}
68876 +
68877 +void
68878 +ep4_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
68879 +{
68880 +    EP4_RAIL         *rail = EP4_CTXT_TO_RAIL (ctxt);
68881 +    ELAN4_TPROC_TRAP *trap = &rail->r_tproc_trap;
68882 +
68883 +    elan4_extract_tproc_trap (ctxt->ctxt_dev, status, trap);
68884 +
68885 +    if (epdebug & DBG_EPTRAP)
68886 +       elan4_display_tproc_trap (DBG_BUFFER, 0, "ep4_tproc_trap", trap);
68887 +       
68888 +    printk ("%s: unhandled tproc trap\n", rail->r_generic.Name);
68889 +    elan4_display_tproc_trap (DBG_CONSOLE, 0, "ep4_tproc_trap", trap);
68890 +}
68891 +
68892 +void
68893 +ep4_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
68894 +{
68895 +    EP4_RAIL         *rail = EP4_CTXT_TO_RAIL (ctxt);
68896 +    ELAN4_IPROC_TRAP *trap = &rail->r_iproc_trap;
68897 +
68898 +    elan4_extract_iproc_trap (ctxt->ctxt_dev, status, trap, unit);
68899 +
68900 +    if (epdebug & DBG_EPTRAP)
68901 +       elan4_display_iproc_trap (DBG_BUFFER, 0, "ep4_iproc_trap", trap);
68902 +       
68903 +    elan4_inspect_iproc_trap (trap);
68904 +
68905 +    switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType))
68906 +    {
68907 +    case InputDmaQueueOverflow:
68908 +       ep4_queue_dma_retry (rail, (E4_DMA *) &trap->tr_dataBuffers[trap->tr_trappedTrans], EP_RETRY_LOW_PRI);
68909 +       return;
68910 +
68911 +    case InputEventEngineTrapped:
68912 +    {
68913 +       E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans];
68914 +       sdramaddr_t         inputq;
68915 +       E4_Addr             event;
68916 +
68917 +       /* XXXX: flow control on the command queue which we issue to is 
68918 +        * rather difficult, we don't want to have space for an event 
68919 +        * for each possible context, nor the mechanism to hold the 
68920 +        * context filter up until the event has been executed.  Given
68921 +        * that the event engine will be restarted by this same interrupt
68922 +        * and we're using high priority command queues, then we just use
68923 +        * a single small command queue for this.
68924 +        */
68925 +       switch (IPROC_TransactionType(hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK)
68926 +       {
68927 +       case TR_SETEVENT & TR_OPCODE_MASK:
68928 +           if (hdrp->TrAddr != 0)
68929 +               ep4_set_event_cmd (rail->r_event_ecq, hdrp->TrAddr);
68930 +           return;
68931 +
68932 +       case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:
68933 +           if ((inputq = ep_elan2sdram (&rail->r_generic, hdrp->TrAddr)) == 0)
68934 +               printk ("%s: TR_INPUT_Q_COMMIT at %llx is not sdram\n", rail->r_generic.Name, hdrp->TrAddr);
68935 +           else
68936 +           {
68937 +               if ((event = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq + offsetof (E4_InputQueue, q_event))) != 0)
68938 +                   ep4_set_event_cmd (rail->r_event_ecq, event);
68939 +               return;
68940 +           }
68941 +       }
68942 +       break;
68943 +    }
68944 +
68945 +    case InputEopErrorOnWaitForEop:
68946 +    case InputEopErrorTrap:
68947 +    case InputCrcErrorAfterPAckOk:
68948 +       if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD))
68949 +           return;
68950 +       
68951 +       if (EP4_CONTEXT_ISDATA (IPROC_NetworkContext (status)))
68952 +       {
68953 +           unsigned int nodeId = EP4_CONTEXT_TO_NODE (IPROC_NetworkContext (status));
68954 +
68955 +           if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || 
68956 +               ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID)))
68957 +           {
68958 +               printk ("%s: network error on dma packet from node %d\n", rail->r_generic.Name, nodeId);
68959 +
68960 +               ep_queue_network_error (&rail->r_generic, EP4_CONTEXT_TO_NODE(IPROC_NetworkContext (status)), EP_NODE_NETERR_DMA_PACKET, unit & 1, 0);
68961 +               return;
68962 +           }
68963 +           
68964 +           if (trap->tr_flags & TR_FLAG_EOP_ERROR)
68965 +           {
68966 +               E4_uint64        status = trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType;
68967 +               EP_NETERR_COOKIE cookie = 0;
68968 +
68969 +               switch (IPROC_TransactionType (status) & TR_OPCODE_MASK)
68970 +               {
68971 +               case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK:
68972 +                   if (IPROC_TrapValue(status) == InputNoFault)
68973 +                       cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68974 +                   else
68975 +                       cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0];
68976 +                   printk ("%s: network error on setevent <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68977 +                   break;
68978 +
68979 +               case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:
68980 +                   if (IPROC_TrapValue(status) == InputNoFault)
68981 +                       cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68982 +                   else
68983 +                       cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0];
68984 +                   printk ("%s: network error on queue commit <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68985 +                   break;
68986 +                   
68987 +               case TR_REMOTEDMA & TR_OPCODE_MASK:
68988 +                   cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68989 +                   printk ("%s: network error on remote dma <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68990 +                   break;
68991 +
68992 +               case TR_IDENTIFY & TR_OPCODE_MASK:
68993 +                   cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
68994 +                   printk ("%s: network error on identify <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
68995 +                   break;
68996 +
68997 +               default:
68998 +                   panic ("%s: unknown identify transaction type %x for eop error from node %d\n", rail->r_generic.Name,
68999 +                           IPROC_TransactionType (trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType), nodeId);
69000 +                   break;
69001 +               }
69002 +
69003 +               ep_queue_network_error (&rail->r_generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, unit & 1, cookie);
69004 +           }
69005 +       }
69006 +       return;
69007 +    }
69008 +
69009 +    printk ("%s: unhandled iproc trap\n", rail->r_generic.Name);
69010 +    elan4_display_iproc_trap (DBG_CONSOLE, 0, "ep4_iproc_trap", trap);
69011 +}
69012 +
69013 +void
69014 +ep4_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
69015 +{
69016 +    EP4_RAIL      *rail = EP4_CTXT_TO_RAIL (ctxt);
69017 +    EP4_INTCOOKIE *cp  = ep4_lookup_intcookie (rail, cookie);
69018 +
69019 +    if (cp == NULL)
69020 +    {
69021 +       printk ("ep4_interrupt: cannot find event cookie for %016llx\n", (long long) cookie);
69022 +       return;
69023 +    }
69024 +
69025 +    cp->int_callback (rail, cp->int_arg);
69026 +}
69027 +
69028 +ELAN4_TRAP_OPS ep4_trap_ops = 
69029 +{
69030 +    ep4_eproc_trap,
69031 +    ep4_cproc_trap,
69032 +    ep4_dproc_trap,
69033 +    ep4_tproc_trap,
69034 +    ep4_iproc_trap,
69035 +    ep4_interrupt,
69036 +};
69037 +
69038 +void
69039 +ep4_flush_filters (EP_RAIL *r)
69040 +{
69041 +    /* nothing to do here as elan4_set_filter() flushes the context filter */
69042 +}
69043 +
69044 +struct flush_queues_desc
69045 +{
69046 +    EP4_RAIL      *rail;
69047 +    volatile int   done;
69048 +} ;
69049 +
69050 +static void
69051 +ep4_flush_queues_flushop (ELAN4_DEV *dev, void *arg, int qfull)
69052 +{
69053 +    struct flush_queues_desc *desc  = (struct flush_queues_desc *) arg;
69054 +    EP4_RAIL                 *rail  = desc->rail;
69055 +    E4_uint64                qptrs = read_reg64 (dev, DProcHighPriPtrs);
69056 +    E4_uint32                 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
69057 +    E4_uint32                 qfptr = E4_QueueFrontPointer (qptrs);
69058 +    E4_uint32                 qbptr = E4_QueueBackPointer (qptrs);
69059 +    E4_DProcQueueEntry        qentry;
69060 +    unsigned long             flags;
69061 +
69062 +    while ((qfptr != qbptr) || qfull)
69063 +    {
69064 +       E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize));
69065 +       
69066 +       if (DMA_Context (qentry.Desc.dma_typeSize) == rail->r_ctxt.ctxt_num)
69067 +       {
69068 +           E4_uint64     vp       = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc));
69069 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(vp)];
69070 +           
69071 +           EP4_ASSERT (rail, !EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
69072 +           
69073 +           if (EP_VP_ISDATA(vp) && nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
69074 +           {
69075 +               /*
69076 +                * This is a DMA going to the node which is being removed, 
69077 +                * so move it onto the node dma list where it will get
69078 +                * handled later.
69079 +                */
69080 +               qentry.Desc.dma_typeSize = typeSize;
69081 +               qentry.Desc.dma_cookie   = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie));
69082 +               qentry.Desc.dma_vproc    = vp;
69083 +               qentry.Desc.dma_srcAddr  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcAddr));
69084 +               qentry.Desc.dma_dstAddr  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstAddr));
69085 +               qentry.Desc.dma_srcEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcEvent));
69086 +               qentry.Desc.dma_dstEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstEvent));
69087 +               
69088 +               EPRINTF4 (DBG_RETRY, "ep4_flush_dmas: %016llx %016llx %016llx %016llx\n", qentry.Desc.dma_typeSize, 
69089 +                         qentry.Desc.dma_cookie, qentry.Desc.dma_vproc, qentry.Desc.dma_srcAddr);
69090 +               EPRINTF3 (DBG_RETRY, "                %016llx %016llx %016llx\n", qentry.Desc.dma_dstAddr, 
69091 +                         qentry.Desc.dma_srcEvent, qentry.Desc.dma_dstEvent);
69092 +               
69093 +               ep4_queue_dma_stalled (rail, &qentry.Desc);
69094 +               
69095 +               qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
69096 +               qentry.Desc.dma_cookie   = 0;
69097 +               qentry.Desc.dma_vproc    = 0;
69098 +               qentry.Desc.dma_srcAddr  = 0;
69099 +               qentry.Desc.dma_dstAddr  = 0;
69100 +               qentry.Desc.dma_srcEvent = 0;
69101 +               qentry.Desc.dma_dstEvent = 0;
69102 +               
69103 +               elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
69104 +           }
69105 +       }
69106 +
69107 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
69108 +       qfull = 0;
69109 +    }
69110 +
69111 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
69112 +    desc->done = 1;
69113 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
69114 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
69115 +}
69116 +
69117 +static void
69118 +ep4_flush_queues_haltop (ELAN4_DEV *dev, void *arg)
69119 +{
69120 +    struct flush_queues_desc *desc = (struct flush_queues_desc *) arg;
69121 +
69122 +    elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1);
69123 +}
69124 +
69125 +void
69126 +ep4_flush_queues (EP_RAIL *r)
69127 +{
69128 +    EP4_RAIL *rail = (EP4_RAIL *) r;
69129 +    struct flush_queues_desc desc;
69130 +    struct list_head *el, *nel;
69131 +    unsigned long flags;
69132 +    int i;
69133 +
69134 +    /* initialise descriptor */
69135 +    desc.rail  = rail;
69136 +    desc.done  = 0;
69137 +
69138 +    /* First -  stall the dma retry thread, so that it will no longer restart
69139 +     *          any dma's from the retry list */
69140 +    ep_kthread_stall (&rail->r_retry_thread);
69141 +
69142 +    /* Second - flush through all command queues targetted by events, thread etc */
69143 +    ep4_flush_ecqs (rail);
69144 +
69145 +    /* Third - queue a halt operation to flush through all DMA's which are executing
69146 +     *         or on the run queues */
69147 +    kmutex_lock (&rail->r_haltop_mutex);
69148 +
69149 +    rail->r_haltop.op_mask      = INT_DProcHalted;
69150 +    rail->r_haltop.op_function  = ep4_flush_queues_haltop;
69151 +    rail->r_haltop.op_arg       = &desc;
69152 +
69153 +    rail->r_flushop.op_function = ep4_flush_queues_flushop;
69154 +    rail->r_flushop.op_arg      = &desc;
69155 +    
69156 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
69157 +
69158 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
69159 +    while (! desc.done)
69160 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
69161 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
69162 +    kmutex_unlock (&rail->r_haltop_mutex);
69163 +
69164 +    /* Fourth - run down the dma retry lists and move all entries to the cancelled
69165 +     *          list.  Any dma's which were on the run queues have already been
69166 +     *          moved there */
69167 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69168 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
69169 +    {
69170 +       list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) {
69171 +           EP4_DMA_RETRY *retry    = list_entry (el, EP4_DMA_RETRY, retry_link);
69172 +           EP_NODE_RAIL  *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(retry->retry_dma.dma_vproc)];
69173 +
69174 +           EP4_ASSERT (rail, nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
69175 +
69176 +           if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
69177 +           {
69178 +               list_del (&retry->retry_link);
69179 +               list_add_tail (&retry->retry_link, &nodeRail->StalledDmas);
69180 +           }
69181 +       }
69182 +    }
69183 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69184 +    
69185 +    /* Finally - allow the retry thread to run again */
69186 +    ep_kthread_resume (&rail->r_retry_thread);
69187 +}
69188 +
69189 +struct write_qdesc_desc
69190 +{
69191 +    EP4_RAIL      *rail;
69192 +    sdramaddr_t    qaddr;
69193 +    E4_InputQueue *qdesc;
69194 +    volatile int   done;
69195 +} ;
69196 +
69197 +static void
69198 +ep4_write_qdesc_haltop (ELAN4_DEV *dev, void *arg)
69199 +{
69200 +    struct write_qdesc_desc *desc = (struct write_qdesc_desc *) arg;
69201 +    EP4_RAIL                *rail = desc->rail;
69202 +    unsigned long            flags;
69203 +
69204 +    elan4_sdram_copyq_to_sdram (dev, desc->qdesc, desc->qaddr, sizeof (E4_InputQueue));
69205 +
69206 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
69207 +    desc->done = 1;
69208 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
69209 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
69210 +}
69211 +
69212 +void
69213 +ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc)
69214 +{
69215 +    struct write_qdesc_desc desc;
69216 +    unsigned long flags;
69217 +
69218 +    /* initialise descriptor */
69219 +    desc.rail  = rail;
69220 +    desc.qaddr = qaddr;
69221 +    desc.qdesc = qdesc;
69222 +    desc.done  = 0;
69223 +
69224 +    kmutex_lock (&rail->r_haltop_mutex);
69225 +
69226 +    rail->r_haltop.op_mask     = INT_DiscardingHighPri;
69227 +    rail->r_haltop.op_function = ep4_write_qdesc_haltop;
69228 +    rail->r_haltop.op_arg      = &desc;
69229 +    
69230 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
69231 +
69232 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
69233 +    while (! desc.done)
69234 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
69235 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
69236 +    
69237 +    kmutex_unlock (&rail->r_haltop_mutex);
69238 +}
69239 +#define CQ_SIZE_NWORDS ((CQ_Size (ecq->ecq_cq->cq_size) >> 3) - 8)     /* available number of dwords (less enough to flush) */
69240 +EP4_ECQ *
69241 +ep4_alloc_ecq (EP4_RAIL *rail, unsigned cqsize)
69242 +{
69243 +    EP4_ECQ *ecq;
69244 +    unsigned long pgoff;
69245 +
69246 +    /* no space available, so allocate a new entry */
69247 +    KMEM_ZALLOC (ecq, EP4_ECQ *, sizeof (EP4_ECQ), 1);
69248 +
69249 +    if (ecq == NULL)
69250 +       return 0;
69251 +
69252 +    if ((ecq->ecq_cq = elan4_alloccq (&rail->r_ctxt, cqsize, CQ_EnableAllBits, CQ_Priority)) == NULL)
69253 +    {
69254 +       KMEM_FREE (ecq, sizeof (EP4_ECQ));
69255 +       return 0;
69256 +    }
69257 +
69258 +    pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1));
69259 +
69260 +    ecq->ecq_addr  = ep_rmalloc (rail->r_ecq_rmap, PAGESIZE, 0) + pgoff;
69261 +    ecq->ecq_avail = CQ_SIZE_NWORDS;                   /* available number of dwords (less enough to flush) */
69262 +
69263 +    ecq->ecq_intop.op_function = (ELAN4_HALTFN *) elan4_restartcq;
69264 +    ecq->ecq_intop.op_arg      = ecq->ecq_cq;
69265 +
69266 +    ep4_ioaddr_map (&rail->r_generic, ecq->ecq_addr - pgoff, ecq->ecq_cq->cq_mapping - pgoff, PAGESIZE, EP_PERM_WRITE);
69267 +
69268 +    spin_lock_init (&ecq->ecq_lock);
69269 +
69270 +    return ecq;
69271 +}
69272 +
69273 +void
69274 +ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq)
69275 +{
69276 +    unsigned long pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1));
69277 +
69278 +    spin_lock_destroy (&ecq->ecq_lock);
69279 +
69280 +    ep4_unmap (&rail->r_generic, ecq->ecq_addr - pgoff, PAGESIZE);
69281 +    ep_rmfree (rail->r_ecq_rmap, PAGESIZE, ecq->ecq_addr - pgoff);
69282 +
69283 +    elan4_freecq (&rail->r_ctxt, ecq->ecq_cq);
69284 +    
69285 +    KMEM_FREE (ecq, sizeof (EP4_ECQ));
69286 +}
69287 +
69288 +EP4_ECQ *
69289 +ep4_get_ecq (EP4_RAIL *rail, unsigned which, unsigned ndwords)
69290 +{
69291 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
69292 +    struct list_head *el;
69293 +    unsigned long flags;
69294 +    EP4_ECQ *ecq;
69295 +    
69296 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69297 +    list_for_each (el, &rail->r_ecq_list[which]) {
69298 +       EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
69299 +
69300 +       if (ecq->ecq_avail >= ndwords)
69301 +       {
69302 +           ecq->ecq_avail -= ndwords;
69303 +
69304 +           spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69305 +
69306 +           return ecq;
69307 +       }
69308 +    }
69309 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69310 +
69311 +    if ((ecq = ep4_alloc_ecq (rail, EP4_ECQ_Size (which))) == NULL)
69312 +       return NULL;
69313 +
69314 +    if (which == EP4_ECQ_EVENT)
69315 +    {
69316 +       if ((ecq->ecq_event = ep_alloc_elan (&rail->r_generic, sizeof (E4_Event32), 0, &ecq->ecq_event_addr)) == 0)
69317 +       {
69318 +           ep4_free_ecq (rail, ecq);
69319 +           return NULL;
69320 +       }
69321 +       
69322 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType),
69323 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
69324 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr),
69325 +                           ecq->ecq_addr);
69326 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue),
69327 +                           SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event)));
69328 +       
69329 +       if ((ecq->ecq_flushcq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL)
69330 +       {
69331 +           ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32));
69332 +           ep4_free_ecq (rail, ecq);
69333 +           return NULL;
69334 +       }
69335 +    }
69336 +
69337 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69338 +    list_add (&ecq->ecq_link, &rail->r_ecq_list[which]);
69339 +
69340 +    ecq->ecq_avail -= ndwords;
69341 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69342 +
69343 +    return ecq;
69344 +}
69345 +
69346 +void
69347 +ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned ndwords)
69348 +{
69349 +    unsigned long flags;
69350 +
69351 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69352 +
69353 +    ecq->ecq_avail += ndwords;
69354 +    
69355 +    if (ecq->ecq_avail !=  CQ_SIZE_NWORDS) 
69356 +       spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69357 +    else
69358 +    {
69359 +       list_del (&ecq->ecq_link);
69360 +       spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69361 +       
69362 +       if (ecq->ecq_flushcq)
69363 +           ep4_put_ecq (rail, ecq->ecq_flushcq, 1);
69364 +       if (ecq->ecq_event_addr)
69365 +           ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32));
69366 +
69367 +       ep4_free_ecq (rail, ecq);
69368 +    }
69369 +}
69370 +
69371 +void
69372 +ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag)
69373 +{
69374 +    unsigned long flags;
69375 +
69376 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
69377 +    elan4_nop_cmd (ecq->ecq_cq, tag);
69378 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
69379 +    
69380 +}
69381 +
69382 +void
69383 +ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event)
69384 +{
69385 +    unsigned long flags;
69386 +
69387 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
69388 +    elan4_set_event_cmd (ecq->ecq_cq, event);
69389 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
69390 +}
69391 +
69392 +void
69393 +ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1)
69394 +{
69395 +    unsigned long flags;
69396 +
69397 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
69398 +    elan4_wait_event_cmd (ecq->ecq_cq, event, candt, param0, param1);
69399 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
69400 +}
69401 +
69402 +void
69403 +ep4_flush_interrupt (EP4_RAIL *rail, void *arg)
69404 +{
69405 +    unsigned long flags;
69406 +
69407 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69408 +    rail->r_flush_count = 0;
69409 +    kcondvar_wakeupone (&rail->r_flush_sleep, &rail->r_ecq_lock);
69410 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69411 +}
69412 +
69413 +void
69414 +ep4_flush_ecqs (EP4_RAIL *rail)
69415 +{
69416 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
69417 +    struct list_head *el;
69418 +    unsigned long flags;
69419 +    int i;
69420 +
69421 +    kmutex_lock (&rail->r_flush_mutex);
69422 +
69423 +    EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0));
69424 +
69425 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69426 +    /* first flush all the "event" queues */
69427 +    list_for_each (el, &rail->r_ecq_list[EP4_ECQ_EVENT]) {
69428 +       EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
69429 +
69430 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType),
69431 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
69432 +
69433 +       ep4_set_event_cmd (ecq->ecq_flushcq, ecq->ecq_event_addr);
69434 +
69435 +       rail->r_flush_count++;
69436 +    }
69437 +
69438 +    /* next issue the setevents to all the other queues */
69439 +    for (i = EP4_ECQ_ATOMIC; i <EP4_NUM_ECQ; i++)
69440 +    {
69441 +       list_for_each (el,&rail->r_ecq_list[i]) {
69442 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
69443 +
69444 +           ep4_set_event_cmd (ecq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event));
69445 +
69446 +           rail->r_flush_count++;
69447 +       }
69448 +    }
69449 +
69450 +    /* issue the waitevent command */
69451 +    ep4_wait_event_cmd (rail->r_flush_mcq,  rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event),
69452 +                       E4_EVENT_INIT_VALUE (-32 * rail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0),
69453 +                       rail->r_flush_ecq->ecq_addr,
69454 +                       INTERRUPT_CMD | (rail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT));
69455 +    
69456 +    while (rail->r_flush_count)
69457 +       kcondvar_wait (&rail->r_flush_sleep, &rail->r_ecq_lock, &flags);
69458 +    
69459 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69460 +
69461 +    EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0));
69462 +
69463 +    kmutex_unlock (&rail->r_flush_mutex);
69464 +}
69465 +
69466 +void
69467 +ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, 
69468 +                EP_ADDR stackAddr, E4_Addr startpc, int nargs,...)
69469 +{
69470 +    sdramaddr_t   sp = stackTop - roundup (nargs * sizeof (E4_uint64), E4_STACK_ALIGN);
69471 +    int           i;
69472 +    va_list       ap;
69473 +    
69474 +    /*
69475 +     * the thread start code expects the following :
69476 +     *   %r1 = stack pointer
69477 +     *   %r6 = frame pointer
69478 +     *   %r2 = function to call
69479 +     *
69480 +     *   function args are store on stack above %sp
69481 +     */
69482 +
69483 +    va_start(ap, nargs);
69484 +    for (i = 0; i < nargs; i++)
69485 +       elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, sp + (i * sizeof (E4_uint64)), va_arg (ap, E4_uint64));
69486 +    va_end (ap);
69487 +    
69488 +    regs->Registers[0] = ep_symbol (&rail->r_threadcode, ".thread_start");             /* %r0 - PC */
69489 +    regs->Registers[1] = stackAddr - (stackTop - sp);                                  /* %r1 - stack pointer */
69490 +    regs->Registers[2] = startpc;                                                      /* %r2 - start pc */
69491 +    regs->Registers[3] = 0;
69492 +    regs->Registers[4] = 0;
69493 +    regs->Registers[5] = 0;
69494 +    regs->Registers[6] = stackTop;                                                     /* %r6 - frame pointer */ 
69495 +}
69496 +
69497 +/* retransmission thread */
69498 +
69499 +void
69500 +ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops)
69501 +{
69502 +    ep_kthread_stall (&rail->r_retry_thread);
69503 +    list_add_tail (&ops->op_link, &rail->r_retry_ops);
69504 +    ep_kthread_resume (&rail->r_retry_thread);
69505 +}
69506 +
69507 +void
69508 +ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops)
69509 +{
69510 +    ep_kthread_stall (&rail->r_retry_thread);
69511 +    list_del (&ops->op_link);
69512 +    ep_kthread_resume (&rail->r_retry_thread);
69513 +}
69514 +
69515 +void
69516 +ep4_retry_thread (EP4_RAIL *rail)
69517 +{
69518 +    struct list_head *el;
69519 +
69520 +    kernel_thread_init ("ep4_retry");
69521 +    
69522 +    for (;;)
69523 +    {
69524 +       long nextRunTime = 0;
69525 +
69526 +       list_for_each (el, &rail->r_retry_ops) {
69527 +           EP4_RETRY_OPS *ops = list_entry (el, EP4_RETRY_OPS, op_link);
69528 +
69529 +           nextRunTime = ops->op_func (rail, ops->op_arg, nextRunTime);
69530 +       }
69531 +
69532 +       if (ep_kthread_sleep (&rail->r_retry_thread, nextRunTime) < 0)
69533 +           break;
69534 +    }
69535 +
69536 +    ep_kthread_stopped (&rail->r_retry_thread);
69537 +
69538 +    kernel_thread_exit();
69539 +}
69540 +
69541 +/* DMA retransmission */
69542 +static unsigned ep4_dma_retry_times[EP_NUM_RETRIES];
69543 +
69544 +static unsigned long
69545 +ep4_retry_dmas (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
69546 +{
69547 +    unsigned long yieldAt = lbolt + (hz/10);
69548 +    unsigned long flags;
69549 +    int           i;
69550 +
69551 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
69552 +    {
69553 +       while (! list_empty (&rail->r_dma_retrylist[i]))
69554 +       {
69555 +           EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link);
69556 +           
69557 +           if (! AFTER(lbolt, retry->retry_time))
69558 +               break;
69559 +
69560 +           if (ep_kthread_should_stall (&rail->r_retry_thread) || AFTER (lbolt, yieldAt))
69561 +               goto cant_do_more;
69562 +           
69563 +           EPRINTF3 (DBG_RETRY, "%s: ep4_retry_dmas: flowcnt %llx %llx\n", rail->r_generic.Name, rail->r_dma_flowcnt, rail->r_main->r_dma_flowcnt);
69564 +
69565 +           if ((rail->r_dma_flowcnt - rail->r_main->r_dma_flowcnt) > EP4_DMA_RETRY_FLOWCNT)
69566 +           {
69567 +               printk ("ep4_retry_dmas: flowcnt %llx %llx\n", rail->r_dma_flowcnt, rail->r_main->r_dma_flowcnt);
69568 +
69569 +               goto cant_do_more;
69570 +           }
69571 +
69572 +           EPRINTF4 (DBG_RETRY, "%s: ep4_retry_dmas: %016llx %016llx %016llx\n", rail->r_generic.Name,
69573 +                     retry->retry_dma.dma_typeSize, retry->retry_dma.dma_cookie, retry->retry_dma.dma_vproc);
69574 +           EPRINTF5 (DBG_RETRY, "%s:                  %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
69575 +                     retry->retry_dma.dma_srcAddr, retry->retry_dma.dma_dstAddr, retry->retry_dma.dma_srcEvent, 
69576 +                     retry->retry_dma.dma_dstEvent);
69577 +
69578 +           elan4_run_dma_cmd (rail->r_dma_ecq->ecq_cq, &retry->retry_dma);
69579 +           elan4_write_dword_cmd (rail->r_dma_ecq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_dma_flowcnt), ++rail->r_dma_flowcnt);
69580 +
69581 +           spin_lock_irqsave (&rail->r_dma_lock, flags);
69582 +           list_del (&retry->retry_link);
69583 +           list_add (&retry->retry_link, &rail->r_dma_freelist);
69584 +           spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69585 +       }
69586 +    }
69587 + cant_do_more:
69588 +
69589 +    /* re-compute the next retry time */
69590 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
69591 +    {
69592 +       if (! list_empty (&rail->r_dma_retrylist[i]))
69593 +       {
69594 +           EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link);
69595 +
69596 +           SET_NEXT_RUN_TIME (nextRunTime, retry->retry_time);
69597 +       }
69598 +    }
69599 +
69600 +    return nextRunTime;
69601 +}
69602 +
69603 +void
69604 +ep4_initialise_dma_retries (EP4_RAIL *rail)
69605 +{
69606 +    int i;
69607 +
69608 +    spin_lock_init (&rail->r_dma_lock);
69609 +    
69610 +    for (i = 0; i < EP_NUM_RETRIES; i++)
69611 +       INIT_LIST_HEAD (&rail->r_dma_retrylist[i]);
69612 +    
69613 +    INIT_LIST_HEAD (&rail->r_dma_freelist);
69614 +    
69615 +    rail->r_dma_ecq = ep4_alloc_ecq (rail, EP4_DMA_RETRY_CQSIZE);
69616 +    
69617 +    rail->r_dma_allocated = 0;
69618 +    rail->r_dma_reserved  = 0;
69619 +
69620 +    ep4_dma_retry_times[EP_RETRY_HIGH_PRI] = EP_RETRY_HIGH_PRI_TIME;
69621 +
69622 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
69623 +       ep4_dma_retry_times[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i;
69624 +    
69625 +    ep4_dma_retry_times[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME;
69626 +
69627 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
69628 +       ep4_dma_retry_times[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i;
69629 +    
69630 +    ep4_dma_retry_times[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME;
69631 +    ep4_dma_retry_times[EP_RETRY_NETERR]    = EP_RETRY_NETERR_TIME;
69632 +
69633 +    rail->r_dma_ops.op_func = ep4_retry_dmas;
69634 +    rail->r_dma_ops.op_arg  = NULL;
69635 +
69636 +    ep4_add_retry_ops (rail, &rail->r_dma_ops);
69637 +}
69638 +
69639 +void
69640 +ep4_finalise_dma_retries (EP4_RAIL *rail)
69641 +{
69642 +    ep4_remove_retry_ops (rail, &rail->r_dma_ops);
69643 +
69644 +    /* Everyone should have given back their retry dma's by now */
69645 +    EP4_ASSERT (rail, rail->r_dma_reserved == 0);
69646 +
69647 +    while (! list_empty (&rail->r_dma_freelist))
69648 +    {
69649 +       EP4_DMA_RETRY *retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
69650 +
69651 +       list_del (&retry->retry_link);
69652 +
69653 +       KMEM_FREE (retry, sizeof (EP4_DMA_RETRY));
69654 +    }
69655 +
69656 +    ep4_free_ecq (rail, rail->r_dma_ecq);
69657 +
69658 +    spin_lock_destroy (&rail->r_dma_lock);
69659 +}
69660 +
69661 +int
69662 +ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, EP_ATTRIBUTE attr)
69663 +{
69664 +    EP4_DMA_RETRY *retry;
69665 +    unsigned int   remaining = count;
69666 +    unsigned long  flags;
69667 +
69668 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69669 +
69670 +    if (remaining <= (rail->r_dma_allocated - rail->r_dma_reserved))
69671 +    {
69672 +       rail->r_dma_reserved += remaining;
69673 +
69674 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69675 +
69676 +       return 0;
69677 +    }
69678 +    
69679 +    remaining -= (rail->r_dma_allocated - rail->r_dma_reserved);
69680 +
69681 +    rail->r_dma_reserved = rail->r_dma_allocated;
69682 +
69683 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69684 +
69685 +    while (remaining > 0)
69686 +    {
69687 +       KMEM_ALLOC (retry, EP4_DMA_RETRY *, sizeof (EP4_DMA_RETRY), !(attr & EP_NO_SLEEP));
69688 +
69689 +       if (retry == NULL)
69690 +           goto failed;
69691 +       
69692 +       remaining--;
69693 +
69694 +       spin_lock_irqsave (&rail->r_dma_lock, flags);
69695 +       list_add (&retry->retry_link, &rail->r_dma_freelist);
69696 +
69697 +       rail->r_dma_allocated++;
69698 +       rail->r_dma_reserved++;
69699 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69700 +    }
69701 +
69702 +    return 0;
69703 +
69704 + failed:
69705 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69706 +    rail->r_dma_reserved -= (count - remaining);
69707 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69708 +
69709 +    return 1;
69710 +}
69711 +
69712 +void
69713 +ep4_release_dma_retries (EP4_RAIL *rail, unsigned int count)
69714 +{
69715 +    unsigned long flags;
69716 +
69717 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69718 +    rail->r_dma_reserved -= count;
69719 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69720 +}
69721 +
69722 +void
69723 +ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval)
69724 +{
69725 +    EP4_DMA_RETRY *retry;
69726 +    unsigned long  flags;
69727 +    
69728 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69729 +
69730 +    EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist));
69731 +    
69732 +    /* take an item of the free list */
69733 +    retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
69734 +
69735 +    list_del (&retry->retry_link);
69736 +    
69737 +    EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_retry: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
69738 +             dma->dma_typeSize, dma->dma_cookie, dma->dma_vproc, dma->dma_srcAddr);
69739 +    EPRINTF5 (DBG_RETRY, "%s:                      %016llx %016llx %016llx (%d)\n", rail->r_generic.Name,
69740 +             dma->dma_dstAddr, dma->dma_srcEvent, dma->dma_dstEvent, interval);
69741 +
69742 +    retry->retry_dma.dma_typeSize = dma->dma_typeSize;
69743 +    retry->retry_dma.dma_cookie   = dma->dma_cookie;
69744 +    retry->retry_dma.dma_vproc    = dma->dma_vproc;
69745 +    retry->retry_dma.dma_srcAddr  = dma->dma_srcAddr;
69746 +    retry->retry_dma.dma_dstAddr  = dma->dma_dstAddr;
69747 +    retry->retry_dma.dma_srcEvent = dma->dma_srcEvent;
69748 +    retry->retry_dma.dma_dstEvent = dma->dma_dstEvent;
69749 +
69750 +    retry->retry_time             = lbolt + ep4_dma_retry_times[interval];
69751 +
69752 +    /* chain onto the end of the approriate retry list */
69753 +    list_add_tail (&retry->retry_link, &rail->r_dma_retrylist[interval]);
69754 +
69755 +    ep_kthread_schedule (&rail->r_retry_thread, retry->retry_time);
69756 +
69757 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69758 +}
69759 +
69760 +void
69761 +ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma)
69762 +{
69763 +    EP_NODE_RAIL  *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(dma->dma_vproc)];
69764 +    EP4_DMA_RETRY *retry;
69765 +    unsigned long  flags;
69766 +    
69767 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69768 +
69769 +    EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist));
69770 +    
69771 +    /* take an item of the free list */
69772 +    retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
69773 +
69774 +    list_del (&retry->retry_link);
69775 +    
69776 +    EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_stalled: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
69777 +             dma->dma_typeSize, dma->dma_cookie, dma->dma_vproc, dma->dma_srcAddr);
69778 +    EPRINTF4 (DBG_RETRY, "%s:                        %016llx %016llx %016llx\n", rail->r_generic.Name,
69779 +             dma->dma_dstAddr, dma->dma_srcEvent, dma->dma_dstEvent);
69780 +
69781 +    retry->retry_dma.dma_typeSize = dma->dma_typeSize;
69782 +    retry->retry_dma.dma_cookie   = dma->dma_cookie;
69783 +    retry->retry_dma.dma_vproc    = dma->dma_vproc;
69784 +    retry->retry_dma.dma_srcAddr  = dma->dma_srcAddr;
69785 +    retry->retry_dma.dma_dstAddr  = dma->dma_dstAddr;
69786 +    retry->retry_dma.dma_srcEvent = dma->dma_srcEvent;
69787 +    retry->retry_dma.dma_dstEvent = dma->dma_dstEvent;
69788 +
69789 +    /* chain onto the node cancelled dma list */
69790 +    list_add_tail (&retry->retry_link, &nodeRail->StalledDmas);
69791 +
69792 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69793 +}
69794 +
69795 +void
69796 +ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId)
69797 +{
69798 +    EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[nodeId];
69799 +    struct list_head *el, *nel;
69800 +    unsigned long flags;
69801 +
69802 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69803 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
69804 +       list_del (el);
69805 +       list_add (el, &rail->r_dma_freelist);
69806 +    }
69807 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69808 +}
69809 +
69810 +void
69811 +ep4_display_rail (EP4_RAIL *rail)
69812 +{
69813 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
69814 +    struct list_head *el;
69815 +    register int      i;
69816 +    unsigned long     flags;
69817 +
69818 +    ep_debugf (DBG_DEBUG, "%s: vendorid=%x deviceid=%x\n", rail->r_generic.Name, 
69819 +              rail->r_generic.Devinfo.dev_vendor_id, rail->r_generic.Devinfo.dev_device_id);
69820 +
69821 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
69822 +    for (i = 0; i < EP4_NUM_ECQ; i++)
69823 +    {
69824 +       list_for_each (el, &rail->r_ecq_list[i]) {
69825 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
69826 +           
69827 +           if (i == EP4_ECQ_EVENT)
69828 +               ep_debugf (DBG_DEBUG, "   ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d event=%llx,%llx,%llx\n",
69829 +                          i, ecq, elan4_cq2num (ecq->ecq_cq), ecq->ecq_addr, ecq->ecq_avail,
69830 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType)),
69831 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue)),
69832 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr)));
69833 +
69834 +           else
69835 +               ep_debugf (DBG_DEBUG, "   ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d\n",
69836 +                          i, ecq, elan4_cq2num (ecq->ecq_cq), ecq->ecq_addr, ecq->ecq_avail);
69837 +       }
69838 +    }
69839 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
69840 +
69841 +    ep_debugf (DBG_DEBUG, "   flush count=%ld mcq=%p ecq=%p event %llx.%llx.%llx\n", 
69842 +              rail->r_flush_count, rail->r_flush_mcq, rail->r_flush_ecq,
69843 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType)),
69844 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WritePtr)),
69845 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WriteValue)));
69846 +    
69847 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
69848 +    for (i = 0; i < EP_NUM_RETRIES; i++)
69849 +    {
69850 +       list_for_each (el, &rail->r_dma_retrylist[i]) {
69851 +           EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link);
69852 +           
69853 +           ep_debugf (DBG_DEBUG, "    RETRY[%d] typeSize %llx cookie %llx vproc %llx events %llx %llx\n",
69854 +                      i, retry->retry_dma.dma_typeSize, retry->retry_dma.dma_cookie,
69855 +                      retry->retry_dma.dma_vproc, retry->retry_dma.dma_srcEvent, retry->retry_dma.dma_dstEvent);
69856 +       }
69857 +    }
69858 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
69859 +}
69860 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode.c
69861 ===================================================================
69862 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/threadcode.c    2004-02-23 16:02:56.000000000 -0500
69863 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode.c 2005-07-28 14:52:52.911669160 -0400
69864 @@ -0,0 +1,146 @@
69865 +/*
69866 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
69867 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
69868 + *
69869 + *    For licensing information please see the supplied COPYING file
69870 + *
69871 + */
69872 +
69873 +#ident "@(#)$Id: threadcode.c,v 1.11 2003/10/07 13:22:38 david Exp $"
69874 +/*      $Source: /cvs/master/quadrics/epmod/threadcode.c,v $ */
69875 +
69876 +#include <qsnet/kernel.h>
69877 +
69878 +#include <elan/kcomm.h>
69879 +
69880 +EP_ADDR
69881 +ep_symbol (EP_CODE *code, char *name)
69882 +{
69883 +    EP_SYMBOL *s = code->symbols;
69884 +    
69885 +    while (s->name && strcmp (s->name, name))
69886 +       s++;
69887 +    
69888 +    return (s->name ? s->value : (EP_ADDR) 0);
69889 +}
69890 +
69891 +int
69892 +ep_loadcode (EP_RAIL *rail, EP_CODE *code)
69893 +{
69894 +    register int i;
69895 +
69896 +    EP_ADDR  _stext  = ep_symbol (code, "_stext");
69897 +    EP_ADDR  _etext  = ep_symbol (code, "_etext");
69898 +    EP_ADDR  _sdata  = ep_symbol (code, "_sdata");
69899 +    EP_ADDR  _edata  = ep_symbol (code, "_edata");
69900 +    EP_ADDR  _end    = ep_symbol (code, "_end");
69901 +    EP_ADDR  _rodata = roundup (_etext, sizeof (uint64_t));
69902 +
69903 +    if (_stext == (EP_ADDR) 0 || _etext == (EP_ADDR) 0 ||
69904 +       _sdata == (EP_ADDR) 0 || _edata == (EP_ADDR) 0 ||
69905 +       _end == (EP_ADDR) 0)
69906 +    {
69907 +       printk ("ep_loadcode: symbols not defined correctly for code at %p\n", code);
69908 +       return (EINVAL);
69909 +    }
69910 +
69911 +    /*
69912 +     * Include the rodata in the text segment
69913 +     */
69914 +    _etext = _rodata + code->rodata_size;
69915 +
69916 +    /*
69917 +     * If _etext is in the same page as _sdata,  then allocate a contiguous
69918 +     * chunk of memory and map it as read/write. otherwise allocate two chunks
69919 +     * and map the code in as read-only.
69920 +     */
69921 +    if ((_etext & PAGEMASK) == (_sdata & PAGEMASK))
69922 +    {
69923 +       code->ntext  = btopr (_end - (_stext & PAGEMASK));
69924 +       code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0);
69925 +
69926 +       if (code->pptext == (sdramaddr_t) 0)
69927 +           return (ENOMEM);
69928 +       
69929 +       code->_stext  = code->pptext + (_stext & PAGEOFFSET);
69930 +       code->_rodata = code->_stext + (_rodata - _stext);
69931 +       code->_sdata  = code->_stext + (_sdata - _stext);
69932 +    }
69933 +    else
69934 +    {
69935 +       code->ntext  = btopr (_etext - (_stext & PAGEMASK));
69936 +       code->ndata  = btopr (_end - (_sdata & PAGEMASK));
69937 +
69938 +       if (code->ntext)
69939 +       {
69940 +           code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0);
69941 +
69942 +           if (code->pptext == (sdramaddr_t) 0)
69943 +               return (ENOMEM);
69944 +
69945 +           code->_stext  = code->pptext + (_stext & PAGEOFFSET);
69946 +           code->_rodata = code->_stext + (_rodata - _stext);
69947 +       }
69948 +       
69949 +       if (code->ndata)
69950 +       {
69951 +           code->ppdata = ep_alloc_memory_elan (rail, _sdata & PAGEMASK, ptob (code->ndata), EP_PERM_WRITE, 0);
69952 +
69953 +           if (code->ppdata == (sdramaddr_t) 0)
69954 +           {
69955 +               if (code->ntext) ep_free_memory_elan (rail, _sdata & PAGEMASK);
69956 +               code->ntext = 0;
69957 +
69958 +               return (ENOMEM);
69959 +           }
69960 +           
69961 +           code->_sdata = code->ppdata + (_sdata & PAGEOFFSET);
69962 +       }
69963 +    }
69964 +    
69965 +#ifdef __LITTLE_ENDIAN__
69966 +#  define Flip 3
69967 +#else
69968 +#  define Flip  0
69969 +#endif
69970 +
69971 +    /*
69972 +     * Now copy the text and rodata into the SDRAM
69973 +     * this is linked into the module to be byte 
69974 +     * copied to the SDRAM, since we want to copy
69975 +     * with word accesses we have to do the byte
69976 +     * assembly correctly.
69977 +     */
69978 +    for (i = 0; i < code->text_size; i++)
69979 +       rail->Operations.SdramWriteb (rail, code->_stext + i, code->text[i^Flip]);
69980 +
69981 +    for (i = 0; i < code->rodata_size; i++)
69982 +       rail->Operations.SdramWriteb (rail, code->_rodata + i, code->rodata[i^Flip]);
69983 +    
69984 +    /*
69985 +     * And the initialised data segment.
69986 +     */
69987 +    for (i = 0; i < code->data_size; i++)
69988 +       rail->Operations.SdramWriteb (rail, code->_sdata + i, code->data[i^Flip]);
69989 +
69990 +    return (ESUCCESS);
69991 +}
69992 +
69993 +void
69994 +ep_unloadcode (EP_RAIL *rail, EP_CODE *code)
69995 +{
69996 +    EP_ADDR  _stext = ep_symbol (code, "_stext");
69997 +    EP_ADDR  _sdata = ep_symbol (code, "_sdata");
69998 +
69999 +    if (code->pptext)
70000 +       ep_free_memory_elan (rail, _stext & PAGEMASK);
70001 +    if (code->ppdata)
70002 +       ep_free_memory_elan (rail, _sdata & PAGEMASK);
70003 +    code->pptext = code->ppdata = 0;
70004 +}
70005 +
70006 +/*
70007 + * Local variables:
70008 + * c-file-style: "stroustrup"
70009 + * End:
70010 + */
70011 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode_elan3.c
70012 ===================================================================
70013 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/threadcode_elan3.c      2004-02-23 16:02:56.000000000 -0500
70014 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode_elan3.c   2005-07-28 14:52:52.911669160 -0400
70015 @@ -0,0 +1,85 @@
70016 +/*
70017 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70018 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
70019 + *
70020 + *    For licensing information please see the supplied COPYING file
70021 + *
70022 + */
70023 +
70024 +#ident "@(#)$Id: threadcode_elan3.c,v 1.11 2003/10/07 13:22:38 david Exp $"
70025 +/*      $Source: /cvs/master/quadrics/epmod/threadcode_elan3.c,v $ */
70026 +
70027 +#include <qsnet/kernel.h>
70028 +
70029 +#include <elan/kcomm.h>
70030 +
70031 +#include "kcomm_elan3.h"
70032 +#include "debug.h"
70033 +
70034 +#include <elan3/thread.h>
70035 +
70036 +E3_Addr
70037 +ep3_init_thread (ELAN3_DEV  *dev,
70038 +                E3_Addr     fn,                                /* Elan address of function */
70039 +                E3_Addr     addr,                              /* Elan address of stack */
70040 +                sdramaddr_t stack,                             /* sdram address of stack */
70041 +                int           stackSize,                       /* stack size (in bytes) */
70042 +                int           nargs,
70043 +                ...)
70044 +{
70045 +    sdramaddr_t  frame;
70046 +    sdramaddr_t  regs;
70047 +    sdramaddr_t  argsp;
70048 +    int                 i;
70049 +    va_list      ap;
70050 +
70051 +    /*
70052 +     * Align the stack pointer at the top of the stack and leave space for a stack frame
70053 +     */
70054 +    stack = ((stack + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame);
70055 +    addr  = ((addr  + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame);
70056 +
70057 +    va_start (ap, nargs);
70058 +
70059 +    if (nargs > 6)
70060 +    {
70061 +       stack -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1));
70062 +       addr  -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1));
70063 +    }
70064 +    
70065 +    frame  = stack;
70066 +    regs   = stack - sizeof (E3_OutsRegs);
70067 +
70068 +    /*
70069 +     * Initialise the registers, and stack frame.
70070 +     */
70071 +    elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[6]), fn);
70072 +    elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[7]), 0);
70073 +    
70074 +    if (nargs <= 6)
70075 +    {
70076 +       for (i = 0; i < nargs; i++)
70077 +           elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32));
70078 +    }
70079 +    else
70080 +    {
70081 +       for (i = 0; i < 6; i++)
70082 +           elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32));
70083 +       
70084 +       for (argsp = frame + offsetof (E3_Frame, fr_argx[0]); i < nargs; i++, argsp += sizeof (E3_uint32))
70085 +           elan3_sdram_writel (dev, argsp, va_arg (ap, int));
70086 +    }
70087 +
70088 +    elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savefp), 0);
70089 +    elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savepc), 0);
70090 +
70091 +    va_end (ap);
70092 +
70093 +    return (addr);
70094 +}
70095 +
70096 +/*
70097 + * Local variables:
70098 + * c-file-style: "stroustrup"
70099 + * End:
70100 + */
70101 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode_elan3_Linux.c
70102 ===================================================================
70103 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/threadcode_elan3_Linux.c        2004-02-23 16:02:56.000000000 -0500
70104 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode_elan3_Linux.c     2005-07-28 14:52:52.912669008 -0400
70105 @@ -0,0 +1,112 @@
70106 +/* --------------------------------------------------------*/
70107 +/* MACHINE GENERATED ELAN CODE                             */
70108 +#include <qsnet/kernel.h>
70109 +#include <elan/kcomm.h>
70110 +#include "kcomm_elan3.h"
70111 +static uint32_t threadcode_elan3_text[] = {
70112 +0x80a0239c, 0x00001082, 0x00e0a280, 0x47008002, 0x0020a380, 0x20600288, 0x20200286, 0x43008002, 
70113 +0x00000001, 0x0a006081, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
70114 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
70115 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
70116 +0x00000001, 0x00000001, 0xa800c613, 0xa300c609, 0x0020108a, 0x0080900b, 0x00006885, 0x0580a080, 
70117 +0x06008002, 0x02a0a080, 0x06008022, 0xffff0296, 0x04008010, 0xff3f0398, 0x1f008010, 0x00201090, 
70118 +0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x00c03f3f, 
70119 +0xf8e017be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 
70120 +0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x00e0a280, 
70121 +0xbfffbf12, 0x0020a380, 0x03008012, 0x02201090, 0x03201090, 0x08e0c381, 0x80a0039c, 0xe0a0239c, 
70122 +0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x080010b8, 0x090010b0, 0x0a0010b2, 0x04000037, 0x402006b4, 
70123 +0x50200690, 0x01201092, 0x20a0239c, 0x00a0a3f0, 0x00c03f3f, 0x8ce117be, 0x04e08f80, 0x06008012, 
70124 +0x00000001, 0x00c01ff8, 0x0000b081, 0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 
70125 +0x00a083f0, 0x20a0039c, 0x582006d0, 0x0020a280, 0x05008002, 0x0900a280, 0x10008002, 0x50200690, 
70126 +0xeaffbf30, 0x5c2006d4, 0x18001090, 0x19001092, 0x1b800294, 0x0a201096, 0x8affff7f, 0x05201098, 
70127 +0x446026d0, 0x302027f4, 0xdfffbf10, 0x50200690, 0xfdffbf10, 0x446026c0, 0x5c2006e0, 0x0020a480, 
70128 +0xf9ffbf06, 0x18001090, 0x19001092, 0x1b000494, 0x14201096, 0x7bffff7f, 0x0a201098, 0x0020a280, 
70129 +0xf4ffbf22, 0x486026e0, 0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 
70130 +0x40a0a3e0, 0x00c03f3f, 0x60e217be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 
70131 +0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 
70132 +0x60a0039c, 0xff3f84a0, 0xe0ffbf1c, 0x18001090, 0xd5ffbf30, 0x60a003de, 0x80a083e0, 0xa0a083f0, 
70133 +0x08e0c381, 0xe0a0039c, 0x00a1239c, 0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x44a123d0, 0x090010b0, 
70134 +0x0a0010b6, 0x0b0010b8, 0x0c0010b4, 0x012010ba, 0xdca023fa, 0x142007d2, 0x082007d0, 0x084002b2, 
70135 +0x000027c0, 0xf42006d0, 0x0020a280, 0x15008032, 0xf42006d0, 0x18200790, 0xdca003d2, 0x20a0239c, 
70136 +0x00a0a3f0, 0x00c03f3f, 0x20e317be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ff8, 0x0000b081, 
70137 +0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 0x00a083f0, 0x20a0039c, 0xf42006d0, 
70138 +0x0020a280, 0x0a008022, 0xdca023c0, 0x042007d0, 0x0840a680, 0x06008032, 0xdca023c0, 0x18001082, 
70139 +0x0220d091, 0xe1ffbf10, 0xf42006d0, 0x06008010, 0x190010a2, 0x042006d0, 0x00c026d0, 0x18001082, 
70140 +0x0020d091, 0x042006d0, 0x01200290, 0x042026d0, 0x000006d0, 0x0020a280, 0x04008002, 0x18001090, 
70141 +0x4f010040, 0x1b001092, 0xf02006e0, 0x0020a480, 0xf1ffbf02, 0x40b03611, 0x004004d2, 0x01201290, 
70142 +0x0840a280, 0x0e018012, 0x10001096, 0x046004d0, 0x01208a80, 0x33008002, 0xa0200484, 0x0c2610ba, 
70143 +0x000024fa, 0x00211090, 0x042024d0, 0x246004d0, 0x80200290, 0x082024d0, 0xec2004d0, 0x00210290, 
70144 +0x0c2024d0, 0x102024c4, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 
70145 +0xc0ff3f3b, 0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 
70146 +0x142024d0, 0x206004d0, 0x10210290, 0x182024d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 
70147 +0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 
70148 +0x08401292, 0x4000003b, 0x1d401292, 0x1c2024d2, 0x01201090, 0xa02024d0, 0x20200496, 0xa8200484, 
70149 +0x306004d0, 0x0020a280, 0x2b008012, 0x00201098, 0x0c2610ba, 0x00c022fa, 0x04e022c0, 0xc0200490, 
70150 +0x10e022d0, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 0xc0ff3f3b, 
70151 +0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 0x14e022d0, 
70152 +0x206004d0, 0x10210290, 0x18e022d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 0x1d400292, 
70153 +0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 0x08401292, 
70154 +0x4000003b, 0x1d401292, 0x1ce022d2, 0x4f008010, 0x0020109a, 0x0c00109a, 0x306004d0, 0x0840a380, 
70155 +0x3b00801a, 0xe02004c6, 0x0c2610ba, 0x00c022fa, 0x01202b91, 0x0c000290, 0x02202a91, 0x08400490, 
70156 +0x382002d2, 0x04e022d2, 0x342002d0, 0x08e022d0, 0x0ce022c6, 0x10e022c4, 0x186004d0, 0x02202a91, 
70157 +0x088006d2, 0x0001003b, 0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x44a103fa, 0x606007d0, 
70158 +0x00280290, 0x08401292, 0x4000003b, 0x1d401292, 0x14e022d2, 0x206004d0, 0x10210290, 0x18e022d0, 
70159 +0x186004d0, 0x02202a91, 0x088006d4, 0x0001003b, 0x1d800294, 0x088026d4, 0xc0ff3f3b, 0x1d800a94, 
70160 +0x186004d0, 0x00280290, 0x80000013, 0x09001290, 0x08801294, 0x4000003b, 0x1d801294, 0x1ce022d4, 
70161 +0x01201090, 0x008020d0, 0x04e002d0, 0x08c00086, 0x0840039a, 0x01200398, 0x20e00296, 0x306004d0, 
70162 +0x0800a380, 0xc9ffbf0a, 0x08a00084, 0xc0200490, 0xf0ff22d0, 0xe42004d0, 0x0d00a280, 0x0b00801a, 
70163 +0x00201098, 0x04008010, 0x10001096, 0x01200398, 0x20e00296, 0x306004d0, 0x0800a380, 0xfcffbf2a, 
70164 +0x04e022c0, 0xfc3f109a, 0xe42024da, 0x10001082, 0x186004d0, 0x00280290, 0x08006081, 0x00000001, 
70165 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
70166 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
70167 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00201098, 
70168 +0x0c00109a, 0x142004fa, 0xec00823b, 0x3080d61b, 0x00006891, 0x0420a280, 0x3b008002, 0x0c00a280, 
70169 +0x04008002, 0x00000001, 0x0120d091, 0x36008030, 0x7c2006d0, 0x01200290, 0x7c2026d0, 0x782006d0, 
70170 +0x0020a280, 0x04008002, 0x78200690, 0x64000040, 0x40e00692, 0xf02004d0, 0x0020a280, 0x03008012, 
70171 +0xf02026d0, 0x80e026c0, 0x7c2006d0, 0x40e026d0, 0x046004d0, 0x04208a80, 0x13008002, 0x1100108a, 
70172 +0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 0x406099e0, 
70173 +0x40a0b9e0, 0x806099e0, 0x80a0b9e0, 0xc06099e0, 0xc0a0b9e0, 0x00809be0, 0x0780039c, 0x0e008010, 
70174 +0xec2004d2, 0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 
70175 +0x406099e0, 0x40a0b9e0, 0x00809be0, 0x0780039c, 0xec2004d2, 0xe42004d0, 0x886222d0, 0x042006d0, 
70176 +0x00c026d0, 0x000007d0, 0x01208a80, 0x05008012, 0x00000001, 0x142027f2, 0x06008010, 0xdca003fa, 
70177 +0x142027f2, 0xfe3f0a90, 0x000027d0, 0xdca003fa, 0x016007ba, 0xdca023fa, 0x0c2007d0, 0x0840a680, 
70178 +0x04008032, 0x082007d0, 0x03008010, 0x102007f2, 0x084006b2, 0x00007081, 0x1600801c, 0x00000001, 
70179 +0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x8ce017be, 0x04e08f80, 0x06008012, 
70180 +0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 
70181 +0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x042007d0, 0x0840a680, 0xb3febf12, 0x190010a2, 
70182 +0x8afebf10, 0xf42006d0, 0x60a003de, 0x80a083e0, 0xa0a083f0, 0x08e0c381, 0x00a1039c, 0x80a0239c, 
70183 +0x042002c4, 0x004022c4, 0x18008030, 0x00007081, 0x16008012, 0x00000001, 0x60a0239c, 0x00a0a3c0, 
70184 +0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x24e117be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 
70185 +0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 
70186 +0x00a083c0, 0x60a0039c, 0x000002c4, 0x00a0a080, 0xe7ffbf12, 0x00000001, 0x042002c4, 0x01a00084, 
70187 +0x042022c4, 0x000002c4, 0x00a0a080, 0xddffbf12, 0x00000001, 0x08e0c381, 0x80a0039c, };
70188 +#define threadcode_elan3_text_size 0x97c
70189 +static uint32_t threadcode_elan3_data[] = {
70190 +0};
70191 +#define threadcode_elan3_data_size 0x0
70192 +static uint32_t threadcode_elan3_rodata[] = {
70193 +0};
70194 +#define threadcode_elan3_rodata_size 0x0
70195 +static EP_SYMBOL threadcode_elan3_symbols[] = {
70196 +    {"__bss_start", 0xff00297c},
70197 +    {"_edata", 0xff00297c},
70198 +    {"_end", 0xff002988},
70199 +    {"_etext", 0xff00097c},
70200 +    {"_sdata", 0xff00297c},
70201 +    {"_stext", 0xff000000},
70202 +    {"ep3_spinblock", 0xff0008dc},
70203 +    {"ep3comms_rcvr", 0xff0002a8},
70204 +    {"kcomm_probe", 0xff00013c},
70205 +    {"r", 0xff00297c},
70206 +    {"rail", 0xff002984},
70207 +    {"rm", 0xff002980},
70208 +    {0, 0}};
70209 +EP_CODE threadcode_elan3 = {
70210 +   (unsigned char *) threadcode_elan3_text,
70211 +   threadcode_elan3_text_size,
70212 +   (unsigned char *) threadcode_elan3_data,
70213 +   threadcode_elan3_data_size,
70214 +   (unsigned char *) threadcode_elan3_rodata,
70215 +   threadcode_elan3_rodata_size,
70216 +   threadcode_elan3_symbols,
70217 +};
70218 Index: linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode_elan4_Linux.c
70219 ===================================================================
70220 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/ep/threadcode_elan4_Linux.c        2004-02-23 16:02:56.000000000 -0500
70221 +++ linux-2.6.5-7.191/drivers/net/qsnet/ep/threadcode_elan4_Linux.c     2005-07-28 14:52:52.912669008 -0400
70222 @@ -0,0 +1,112 @@
70223 +/* --------------------------------------------------------*/
70224 +/* MACHINE GENERATED ELAN CODE                             */
70225 +#include <qsnet/kernel.h>
70226 +#include <elan/kcomm.h>
70227 +#include "kcomm_elan4.h"
70228 +static uint32_t threadcode_elan4_text[] = {
70229 +0x00a00087, 0xc04060cb, 0x00003080, 0x80001080, 0x02606180, 0x02004032, 0x807f60cb, 0x04606180, 
70230 +0x02004032, 0x407f60d3, 0x08606180, 0x02004032, 0x007f60db, 0x10606180, 0x02004032, 0xc07e60e3, 
70231 +0x20606180, 0x02004032, 0x807e60eb, 0x40606180, 0x02004032, 0x407e60f3, 0x80606180, 0x02004032, 
70232 +0x007e60fb, 0x40001180, 0xc3801080, 0xc07f60c3, 0x20002000, 0x20002000, 0x20002000, 0x20002000, 
70233 +0x407f8001, 0x4060c0c7, 0x4860c0d0, 0x5060c0d1, 0x5860c0d2, 0x6060c0d3, 0x6860c0d4, 0x00208292, 
70234 +0x00608291, 0x00a08294, 0xff3f8088, 0x1c381293, 0xc04044c8, 0x13004290, 0xc000c5d0, 0x08004030, 
70235 +0x00001088, 0x04204288, 0x0020b200, 0x04004003, 0x00208080, 0x9c010040, 0x00a08488, 0xc04044c8, 
70236 +0x20381288, 0x0020b200, 0xf6ff7f13, 0x01208408, 0x11161282, 0x804094c2, 0xc04044c8, 0x20381288, 
70237 +0x0020b200, 0xebff7f13, 0x00208080, 0x406040c7, 0x486040d0, 0x506040d1, 0x586040d2, 0x606040d3, 
70238 +0x686040d4, 0x08e00180, 0xc0608001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
70239 +0x807e8001, 0x4060c0c7, 0x4860c0d0, 0x5060c0d1, 0x5860c0d2, 0x6060c0d3, 0x6860c0d4, 0x7060c0d5, 
70240 +0x7860c0d6, 0x8060c0d7, 0x8860c0d8, 0x9060c0d9, 0x9860c0da, 0xa060c0db, 0xa860c0dc, 0xb060c0dd, 
70241 +0xb860c0de, 0xc060c0df, 0x8061c0c8, 0x00608296, 0x00a0829a, 0x9861c0cb, 0xa061c0cc, 0xa861c0cd, 
70242 +0x01208088, 0x3861c0c8, 0x08e042d2, 0x386140c9, 0x0900900a, 0xa06140c8, 0x986140cb, 0x18e042c9, 
70243 +0x72010040, 0x05b4128a, 0x0020808c, 0x3861c0cc, 0x986140c9, 0xc04042c8, 0x0880b400, 0x39014003, 
70244 +0xffff3f08, 0x90a0851c, 0xe023829f, 0x20f4179f, 0x10e3879f, 0xffff3f08, 0xe023829e, 0x20b4179e, 
70245 +0x03a3879e, 0xffff3f08, 0xe023829d, 0x2074179d, 0x0363879d, 0x00a08495, 0x18a08408, 0x800012c2, 
70246 +0x089a109b, 0x20f4169b, 0x20f8169b, 0x00e88609, 0x20741289, 0x01120008, 0x0a381288, 0x08408297, 
70247 +0x45208088, 0x06341288, 0x806140ca, 0xc88042c8, 0x00288218, 0x04a08408, 0x800012c2, 0x089a1088, 
70248 +0x20341288, 0x20381288, 0x00281299, 0x20a08408, 0x800012c2, 0x089a108a, 0x20b4128a, 0x20b8128a, 
70249 +0x30a08408, 0x800012c2, 0x089a1093, 0x20f41493, 0x20f81493, 0x03f41689, 0x806140cb, 0x2922808c, 
70250 +0x0334138c, 0xccc042c8, 0xc90042d1, 0x02604688, 0x0020b200, 0x03004002, 0x60a08214, 0x80a08214, 
70251 +0x90a08509, 0x804012c8, 0x01208208, 0x804092c8, 0x046012c8, 0x043a1288, 0x0020b200, 0x04004003, 
70252 +0xa86140c8, 0x67ffff7f, 0x00a0868a, 0x88a045d0, 0x0020b400, 0x12004013, 0x00208080, 0x800017c8, 
70253 +0x808096c8, 0x72010040, 0x00a08588, 0x00208290, 0x90a08509, 0x804012c8, 0x01208208, 0x804092c8, 
70254 +0x046012c8, 0x043a1288, 0x0020b200, 0x04004003, 0xa86140c8, 0x53ffff7f, 0x00a0868a, 0x804015c2, 
70255 +0x159a1089, 0x20741289, 0x20781289, 0x40b03608, 0x01208288, 0x0840b200, 0x06004023, 0xa02344c4, 
70256 +0x800017c8, 0x808096c8, 0xbb004010, 0xa8a045c8, 0x01604688, 0x00281288, 0x08009008, 0x00e0b400, 
70257 +0x05004003, 0x3f381289, 0x13408209, 0x03004010, 0x05208088, 0x04208088, 0x09009220, 0x07341889, 
70258 +0x0900840b, 0x05341888, 0x0023820a, 0x01604688, 0x0020b200, 0x1d004002, 0x0a00840c, 0xc900c4d7, 
70259 +0x40c40f08, 0x09208288, 0x08e0c2c8, 0x0a608488, 0x10e0c2c8, 0x81001008, 0x0a341288, 0x18e0c2c8, 
70260 +0x1d608488, 0x20e0c2c8, 0x28e0c2d8, 0x24608508, 0x800012c2, 0x089a1088, 0x20341288, 0x20381288, 
70261 +0x80208208, 0x30e0c2c8, 0x00218108, 0x38e0c2c8, 0x40e0c2d4, 0x48e0c2cc, 0xca00c4df, 0x20608411, 
70262 +0x80e0820b, 0x2020830c, 0x00e0b400, 0x13004013, 0x0020808e, 0xc0c0c2d7, 0x40c40f09, 0x09608289, 
70263 +0x08e0c2c9, 0x0a608488, 0x10e0c2c8, 0x00040008, 0x18e0c2c8, 0x1d608488, 0x20e0c2c8, 0x28e0c2d8, 
70264 +0x40e0c2d4, 0x48e0c2cc, 0xc000c3de, 0x00208083, 0x4c004010, 0x20608411, 0xb8238408, 0x800012c2, 
70265 +0x089a108f, 0x20f4138f, 0x20f8138f, 0x00208083, 0x13c0b000, 0x2e00401b, 0x40c40f08, 0x092082a2, 
70266 +0x00040021, 0xffff3f08, 0xe023828d, 0x2074138d, 0x1063838d, 0x0e808309, 0x0e408209, 0x02741289, 
70267 +0x1540820a, 0x38a0820a, 0x808012c2, 0x0a9a108a, 0x20b4128a, 0x20b8128a, 0xc0c0c2d7, 0x08e0c2e2, 
70268 +0x0a608488, 0x10e0c2c8, 0x20b41288, 0x21008288, 0x18e0c2c8, 0x1d608488, 0x20e0c2c8, 0x28e0c2d8, 
70269 +0x15408209, 0x34608209, 0x804012c2, 0x099a1089, 0x20741289, 0x20781289, 0x30e0c2c9, 0x38e0c2cf, 
70270 +0x40e0c2d4, 0x48e0c2cc, 0xc000c3cd, 0x0ac0830f, 0x0ac08003, 0x20608411, 0x80e0820b, 0x01a0830e, 
70271 +0x1380b300, 0xdcff7f0b, 0x2020830c, 0xe03f830c, 0xc000c3dd, 0xbc238408, 0x800012c2, 0x089a1088, 
70272 +0x20341288, 0x20381288, 0x0300b200, 0x0d00401b, 0x07341888, 0x0020888e, 0x0420b800, 0x08004019, 
70273 +0x0800840b, 0x00040008, 0x18e0c2c8, 0x01a0830e, 0x04a0b300, 0xfdff7f09, 0x80e0820b, 0xfc3f8083, 
70274 +0x07341888, 0x08008408, 0xa06140ca, 0xc00062e3, 0x402062f3, 0xc080e2e3, 0xc080e2f3, 0x982244c8, 
70275 +0x88a0c5c8, 0x88a045c8, 0x0020b200, 0x05004013, 0x04604688, 0x88a08508, 0x80a0c5c8, 0x04604688, 
70276 +0x0020b200, 0x0c004002, 0xd822c4c0, 0xc04065e3, 0x406065f3, 0xc000e1e3, 0x806065e3, 0x4020e1f3, 
70277 +0xc06065f3, 0x8020e1e3, 0xc020e1f3, 0x07004010, 0x88228108, 0xc04065e3, 0x406065f3, 0xc000e1e3, 
70278 +0x4020e1f3, 0x88228108, 0x08d61082, 0x800092c2, 0x03f41689, 0x806140cb, 0x2922808c, 0x0334138c, 
70279 +0xccc042c8, 0xc900c2d1, 0x800017c8, 0x808096c8, 0xa8a045c8, 0x0880b400, 0x03004013, 0x00a18412, 
70280 +0xa0a045d2, 0x98a045c8, 0x0020b200, 0x05004013, 0x386140c9, 0x986140c8, 0x0820c2d2, 0x386140c9, 
70281 +0x01608209, 0xfe61b200, 0x0e004015, 0x3861c0c9, 0x00001088, 0x02204288, 0x0020b200, 0x05004003, 
70282 +0x986140ca, 0x28000040, 0xa06140c8, 0x986140ca, 0xc08042c8, 0x0880b400, 0xd8fe7f13, 0x00a08495, 
70283 +0x98a045cb, 0x00e0b200, 0xbafe7f03, 0x386140c9, 0xa06140c8, 0x60a08509, 0x48000040, 0xe03f808a, 
70284 +0x986140cb, 0x08e0c2d2, 0x386140cc, 0x0120830c, 0xaffe7f10, 0x3861c0cc, 0x406040c7, 0x486040d0, 
70285 +0x506040d1, 0x586040d2, 0x606040d3, 0x686040d4, 0x706040d5, 0x786040d6, 0x806040d7, 0x886040d8, 
70286 +0x906040d9, 0x986040da, 0xa06040db, 0xa86040dc, 0xb06040dd, 0xb86040de, 0xc06040df, 0x08e00180, 
70287 +0x80618001, 0x807f8001, 0xc040e0d3, 0x4060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 
70288 +0x00e08192, 0x02000040, 0x00608091, 0x14e08110, 0x17208097, 0xc000f2d3, 0xc04060d3, 0x406060db, 
70289 +0x08a00080, 0x80608001, 0x407f8001, 0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 
70290 +0x00208080, 0x00e08192, 0x02000040, 0x00608091, 0x40e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 
70291 +0x00208490, 0x00e08597, 0x00208080, 0x00208080, 0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 
70292 +0x00208293, 0xc000f2d1, 0x406060d3, 0x806060db, 0x08a00080, 0xc0608001, 0x407f8001, 0x4060e0d3, 
70293 +0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x00e08192, 0x02000040, 0x00608091, 
70294 +0x54e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 0x00208490, 0x00e08597, 0x00208080, 0x00208080, 
70295 +0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 0x00208293, 0x0ef41294, 0x0d208594, 0x17208095, 
70296 +0x17208096, 0x17208097, 0xc000f2d3, 0x406060d3, 0x806060db, 0x08a00080, 0xc0608001, 0x01208097, 
70297 +0xb0e3c0d7, 0x80a060d2, 0x98e28004, 0x98e2c0c0, 0x80a0c0c4, 0xc080c4c3, 0x01e0b400, 0x06004002, 
70298 +0x00a08490, 0x00e08097, 0x02208097, 0xb0e3c0d7, 0xd8e2d0d0, 0xd8e2c0d0, 0x03208097, 0xb0e3c0d7, 
70299 +0x00e08088, 0x0e004010, 0x00a060c3, 0x407f8001, 0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 
70300 +0x00208080, 0x00208080, 0x01208089, 0x8820c2c9, 0x00608091, 0x00e08197, 0x0020f2d3, 0x406060d3, 
70301 +0x806060db, 0x08e00180, 0xc0608001, };
70302 +#define threadcode_elan4_text_size 0x90c
70303 +static uint32_t threadcode_elan4_data[] = {
70304 +0};
70305 +#define threadcode_elan4_data_size 0x0
70306 +static uint32_t threadcode_elan4_rodata[] = {
70307 +0};
70308 +#define threadcode_elan4_rodata_size 0x0
70309 +static EP_SYMBOL threadcode_elan4_symbols[] = {
70310 +    {".thread_restart", 0x00000000f800000c},
70311 +    {".thread_start", 0x00000000f8000000},
70312 +    {"__bss_start", 0x00000000f810090c},
70313 +    {"_edata", 0x00000000f810090c},
70314 +    {"_end", 0x00000000f8100910},
70315 +    {"_etext", 0x00000000f800090c},
70316 +    {"_sdata", 0x00000000f810090c},
70317 +    {"_stext", 0x00000000f8000000},
70318 +    {"c_queue_rxd", 0x00000000f800087c},
70319 +    {"c_reschedule", 0x00000000f8000744},
70320 +    {"c_stall_thread", 0x00000000f80008cc},
70321 +    {"c_waitevent", 0x00000000f8000788},
70322 +    {"c_waitevent_interrupt", 0x00000000f80007f8},
70323 +    {"ep4_spinblock", 0x00000000f8000080},
70324 +    {"ep4comms_rcvr", 0x00000000f8000140},
70325 +    {0, 0}};
70326 +EP_CODE threadcode_elan4 = {
70327 +   (unsigned char *) threadcode_elan4_text,
70328 +   threadcode_elan4_text_size,
70329 +   (unsigned char *) threadcode_elan4_data,
70330 +   threadcode_elan4_data_size,
70331 +   (unsigned char *) threadcode_elan4_rodata,
70332 +   threadcode_elan4_rodata_size,
70333 +   threadcode_elan4_symbols,
70334 +};
70335 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv.c
70336 ===================================================================
70337 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/jtagdrv.c     2004-02-23 16:02:56.000000000 -0500
70338 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv.c  2005-07-28 14:52:52.913668856 -0400
70339 @@ -0,0 +1,451 @@
70340 +/*
70341 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70342 + *
70343 + *    For licensing information please see the supplied COPYING file
70344 + *
70345 + */
70346 +
70347 +#ident "@(#)$Id: jtagdrv.c,v 1.12 2003/06/07 16:02:35 david Exp $"
70348 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv.c,v $*/
70349 +
70350 +#include <qsnet/types.h>
70351 +
70352 +#include "jtagdrv.h"
70353 +#include <jtag/jtagio.h>
70354 +
70355 +int
70356 +jtagdrv_strobe_data (JTAG_DEV *dev, u_char data)
70357 +{
70358 +    u_char dsr;
70359 +
70360 +    PRINTF (DBG_ECPP, ("jtagdrv_strobe_data: %s %s %s -> ", (data & LPT_DATA_TRST) ? "TRST" : "trst", 
70361 +                      (data & LPT_DATA_TDI) ? "TDI" : "tdi", (data & LPT_DATA_TMS) ? "TMS" : "tms"));
70362 +
70363 +
70364 +    LPT_WRITE_DATA (dev, data); DELAY(5);                      /* Drive NEW values on data wires */
70365 +    LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(5);             /* Drive strobe low */
70366 +    LPT_READ_STAT  (dev, dsr); DELAY(5);                       /* Sample TDI from ring */
70367 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe high */
70368 +
70369 +    PRINTF (DBG_ECPP, ("%s\n", (dsr & LPT_STAT_PE) ? "TDO" : "tdo"));
70370 +
70371 +    return ((dsr & LPT_STAT_PE) ? 1 : 0);
70372 +}
70373 +
70374 +void
70375 +jtagdrv_select_ring (JTAG_DEV *dev, u_int ring)
70376 +{
70377 +    PRINTF (DBG_ECPP, ("jtagdrv_select_ring: ring=0x%x\n", ring));
70378 +
70379 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe and TCLK high */
70380 +    LPT_WRITE_DATA (dev, ring);        DELAY(5);                       /* Drive ring address */
70381 +    LPT_WRITE_CTRL (dev, LPT_CTRL_RCLK); DELAY(5);             /* Drive strobe low */
70382 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe high */
70383 +}
70384 +
70385 +void
70386 +jtagdrv_reset (JTAG_DEV *dev)
70387 +{
70388 +    register int i;
70389 +
70390 +    for (i = 0; i < 5; i++)
70391 +       jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                /* 5 clocks to Reset from any state */
70392 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
70393 +}
70394 +
70395 +void
70396 +jtagdrv_shift_ir (JTAG_DEV *dev, u_char *value, int nbits)
70397 +{
70398 +    register int i;
70399 +    register int bit;
70400 +
70401 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select DR-Scan */
70402 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select IR-Scan */
70403 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Capture-IR */
70404 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Shift-IR */
70405 +    
70406 +    for (i = 0; i < nbits; i++)
70407 +    {
70408 +       /* strobe through the instruction bits,  asserting TMS on the last bit */
70409 +
70410 +       if (i == (nbits-1))
70411 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
70412 +       else
70413 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
70414 +       
70415 +       if (bit)
70416 +           JTAG_SET_BIT(value, i);
70417 +       else
70418 +           JTAG_CLR_BIT(value, i);
70419 +    }
70420 +    
70421 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Update-IR */
70422 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
70423 +}
70424 +
70425 +
70426 +void
70427 +jtagdrv_shift_dr (JTAG_DEV *dev, u_char *value, int nbits)
70428 +{
70429 +    register int i;
70430 +    register int bit;
70431 +
70432 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select DR-Scan */
70433 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Capture-DR */
70434 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Shift-DR */
70435 +    
70436 +    for (i = 0; i < nbits; i++)
70437 +    {
70438 +       /* strobe through the data bits,  asserting TMS on the last bit */
70439 +
70440 +       if (i == (nbits-1))
70441 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
70442 +       else
70443 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
70444 +       
70445 +       if (bit)
70446 +           JTAG_SET_BIT(value, i);
70447 +       else
70448 +           JTAG_CLR_BIT(value, i);
70449 +    }
70450 +    
70451 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Update-DR */
70452 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
70453 +}
70454 +
70455 +static int
70456 +jtagdrv_i2c_start (JTAG_DEV *dev)
70457 +{
70458 +    u_char dsr;
70459 +    int i;
70460 +
70461 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_start\n"));
70462 +    
70463 +    /* Issue a stop sequence */
70464 +    LPT_WRITE_CTRL (dev,  LPT_CTRL_SCLK); DELAY(1);            /* SCLK low */
70465 +    LPT_WRITE_DATA (dev, 0); DELAY(5);                         /* SDA low */
70466 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* SCLK high */
70467 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);              /* SDA high */
70468 +    
70469 +    /* sample the line to see if we're idle */
70470 +    LPT_READ_STAT (dev, dsr);                                  /* sample SDA */
70471 +    if ((dsr & LPT_STAT_SDA) == 0)                             /* Cannot start if SDA already driven */
70472 +    {
70473 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_start: cannot start - sda driven low\n"));
70474 +
70475 +       for (i = 0; i < 16 ; i++)
70476 +       {
70477 +           LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5);      /* SCLK low */
70478 +           LPT_WRITE_CTRL (dev, 0); DELAY(5);                  /* SCLK high */
70479 +           LPT_READ_STAT  (dev, dsr);
70480 +           
70481 +           if (dsr & LPT_STAT_SDA)
70482 +           {
70483 +               PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - stopped after %d clocks\n", i));
70484 +               break;
70485 +           }
70486 +       }
70487 +
70488 +       if ((dsr & LPT_STAT_SDA) == 0)
70489 +       {
70490 +           PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - cannot start - not idle\n"));
70491 +           return (0);
70492 +       }
70493 +
70494 +       /* seen SDA float high, so issue a stop sequence */
70495 +       LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);          /* SCLK low */
70496 +       LPT_WRITE_DATA (dev, 0); DELAY(5);                      /* SDA low */
70497 +       LPT_WRITE_CTRL (dev, 0); DELAY(5);                      /* SCLK high */
70498 +       LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);           /* SDA high */
70499 +    }
70500 +
70501 +    LPT_WRITE_DATA (dev, 0); DELAY(4);                         /* drive SDA low */
70502 +    return (1);
70503 +}
70504 +
70505 +static void
70506 +jtagdrv_i2c_stop (JTAG_DEV *dev)
70507 +{
70508 +    u_char dsr;
70509 +    int    i;
70510 +
70511 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop\n"));
70512 +
70513 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
70514 +    LPT_WRITE_DATA (dev, 0); DELAY(5);                         /* SDA low */
70515 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* SCLK high */
70516 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);              /* SDA high */
70517 +
70518 +    /* 
70519 +     * bug fix for temperature sensor chip
70520 +     * if it's still driving SDA, then clock
70521 +     * it until it stops driving it 
70522 +     */
70523 +    LPT_READ_STAT (dev, dsr);
70524 +    if ((dsr & LPT_STAT_SDA) == 0)
70525 +    {
70526 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - slave not stodeved\n"));
70527 +       for (i = 0; i < 16 ; i++)
70528 +       {
70529 +           LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5);      /* SCLK low */
70530 +           LPT_WRITE_CTRL (dev, 0); DELAY(5);                  /* SCLK high */
70531 +           LPT_READ_STAT  (dev, dsr);
70532 +           
70533 +           if (dsr & LPT_STAT_SDA)
70534 +               break;
70535 +       }
70536 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - stodeved after %d clocks\n", i));
70537 +    }
70538 +}
70539 +
70540 +static int
70541 +jtagdrv_i2c_strobe (JTAG_DEV *dev, u_char data)
70542 +{
70543 +    u_char dsr;
70544 +    
70545 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_strobe : %s", (data & LPT_DATA_SDA) ? "SDA" : "sda"));
70546 +
70547 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
70548 +    LPT_WRITE_DATA (dev, data);        DELAY(5);                       /* write data */
70549 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
70550 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA */
70551 +
70552 +    PRINTF (DBG_ECPP, (" -> %s\n", (dsr & LPT_STAT_SDA) ? "SDA" : "sda"));
70553 +
70554 +    return ((dsr & LPT_STAT_SDA) ? 1 : 0);
70555 +}
70556 +
70557 +static int
70558 +jtagdrv_i2c_get_ack (JTAG_DEV *dev)
70559 +{
70560 +    u_char dsr;
70561 +
70562 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
70563 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA);        DELAY(5);               /* SDA high */
70564 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
70565 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA */
70566 +
70567 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_get_ack -> %s\n", (dsr & LPT_STAT_SDA) ? "no ack" : "ack"));
70568 +    
70569 +    return ((dsr & LPT_STAT_SDA) ? 0 : 1);
70570 +}
70571 +
70572 +static int
70573 +jtagdrv_i2c_drive_ack (JTAG_DEV *dev, int nack)
70574 +{
70575 +    u_char dsr;
70576 +
70577 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
70578 +    LPT_WRITE_DATA (dev, nack ? LPT_DATA_SDA : 0); DELAY(5);   /* SDA low for ack, high for nack */
70579 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
70580 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA for ack */
70581 +
70582 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_drive_ack %d -> %s\n", nack, (dsr & LPT_STAT_SDA) ? "done" : "more"));
70583 +    
70584 +    return ((dsr & LPT_STAT_SDA) ? 1 : 0);
70585 +}
70586 +
70587 +static void
70588 +jtagdrv_i2c_shift_addr (JTAG_DEV *dev, u_int address, int readNotWrite)
70589 +{
70590 +    register int i;
70591 +
70592 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_addr: %x\n", address));
70593 +
70594 +    for (i = I2C_ADDR_LEN-1; i >= 0; i--)
70595 +       jtagdrv_i2c_strobe (dev, (address & (1 << i)) ? LPT_DATA_SDA : 0);
70596 +    
70597 +    jtagdrv_i2c_strobe (dev, readNotWrite ? LPT_DATA_SDA : 0);
70598 +}
70599 +
70600 +static u_char
70601 +jtagdrv_i2c_shift_data (JTAG_DEV *dev, u_char data)
70602 +{
70603 +    register int i;
70604 +    u_char val = 0;
70605 +    
70606 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : %02x\n", data));
70607 +
70608 +    for (i = I2C_DATA_LEN-1; i >= 0; i--)
70609 +       if (jtagdrv_i2c_strobe (dev, data & (1 << i) ? LPT_DATA_SDA : 0))
70610 +           val |= (1 << i);
70611 +
70612 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : -> %02x\n", val));
70613 +
70614 +    return (val);
70615 +}
70616 +
70617 +int
70618 +jtagdrv_i2c_write (JTAG_DEV *dev, u_int address, u_int count, u_char *data)
70619 +{
70620 +    register int i;
70621 +
70622 +    PRINTF (DBG_FN, ("jtagdrv_i2c_write: address=%x count=%d data=%02x\n", address, count, data[0]));
70623 +
70624 +    if (! jtagdrv_i2c_start (dev))
70625 +       return (I2C_OP_NOT_IDLE);
70626 +
70627 +    jtagdrv_i2c_shift_addr (dev, address, 0);
70628 +    
70629 +    if (! jtagdrv_i2c_get_ack (dev))
70630 +    {
70631 +       PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on address phase\n"));
70632 +
70633 +       jtagdrv_i2c_stop (dev);
70634 +       return (I2C_OP_NO_DEVICE);
70635 +    }
70636 +    
70637 +    for (i = 0; i < count; i++)
70638 +    {
70639 +       jtagdrv_i2c_shift_data (dev, data[i]);
70640 +       
70641 +       if (! jtagdrv_i2c_get_ack (dev))
70642 +       {
70643 +           PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on data phase %d\n", i));
70644 +
70645 +           jtagdrv_i2c_stop (dev);
70646 +           return (I2C_OP_WRITE_TO_BIG);
70647 +       }
70648 +    }
70649 +
70650 +    jtagdrv_i2c_stop (dev);
70651 +    return (I2C_OP_SUCCESS);
70652 +}
70653 +
70654 +int
70655 +jtagdrv_i2c_read (JTAG_DEV *dev, u_int address, u_int count, u_char *data)
70656 +{
70657 +    register int i;
70658 +
70659 +    PRINTF (DBG_FN, ("jtagdrv_i2c_read: address=%x count=%d\n", address, count));
70660 +
70661 +    if (! jtagdrv_i2c_start (dev))
70662 +       return (I2C_OP_NOT_IDLE);
70663 +
70664 +    jtagdrv_i2c_shift_addr (dev, address, 1);
70665 +    
70666 +    if (! jtagdrv_i2c_get_ack (dev))
70667 +    {
70668 +       PRINTF (DBG_FN, ("jtagdrv_i2c_read: no ack on address phase\n"));
70669 +
70670 +       jtagdrv_i2c_stop (dev);
70671 +       return (I2C_OP_NO_DEVICE);
70672 +    }
70673 +    
70674 +    for (i = 0; i < count; i++)
70675 +    {
70676 +       data[i] = jtagdrv_i2c_shift_data (dev, 0xff);
70677 +
70678 +       jtagdrv_i2c_drive_ack (dev, (i == (count-1) ? 1 : 0));
70679 +    }
70680 +
70681 +    jtagdrv_i2c_stop (dev);
70682 +    
70683 +    return (I2C_OP_SUCCESS);
70684 +}
70685 +
70686 +int
70687 +jtagdrv_i2c_writereg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data)
70688 +{
70689 +    register int i;
70690 +
70691 +    PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: address=%x count=%d\n", address, count));
70692 +
70693 +    if (! jtagdrv_i2c_start (dev))
70694 +       return (I2C_OP_NOT_IDLE);
70695 +
70696 +    jtagdrv_i2c_shift_addr (dev, address, 0);
70697 +    
70698 +    if (! jtagdrv_i2c_get_ack (dev))
70699 +    {
70700 +       PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on address phase\n"));
70701 +
70702 +       jtagdrv_i2c_stop (dev);
70703 +       return (I2C_OP_NO_DEVICE);
70704 +    }
70705 +    
70706 +    jtagdrv_i2c_shift_data (dev, intaddress);
70707 +    
70708 +    if (! jtagdrv_i2c_get_ack (dev))
70709 +    {
70710 +       PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on intaddress phase\n"));
70711 +       jtagdrv_i2c_stop (dev);
70712 +       return (I2C_OP_NO_DEVICE);
70713 +    }
70714 +    
70715 +    for (i = 0; i < count; i++)
70716 +    {
70717 +       jtagdrv_i2c_shift_data (dev, data[i]);
70718 +       if (! jtagdrv_i2c_get_ack (dev))
70719 +       {
70720 +           PRINTF (DBG_FN, ("jtagdrv_i2c_writedate: no ack on byte %d\n", i));
70721 +           jtagdrv_i2c_stop (dev);
70722 +           return (I2C_OP_WRITE_TO_BIG);
70723 +       }
70724 +    }
70725 +    
70726 +    jtagdrv_i2c_stop (dev);
70727 +    return (I2C_OP_SUCCESS);
70728 +}
70729 +
70730 +int
70731 +jtagdrv_i2c_readreg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data)
70732 +{
70733 +    PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: address=%x count=%d\n", address, count));
70734 +
70735 +    if (! jtagdrv_i2c_start (dev))
70736 +       return (I2C_OP_NOT_IDLE);
70737 +
70738 +    jtagdrv_i2c_shift_addr (dev, address, 0);
70739 +    
70740 +    if (! jtagdrv_i2c_get_ack (dev))
70741 +    {
70742 +       PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on address phase\n"));
70743 +
70744 +       jtagdrv_i2c_stop (dev);
70745 +       return (I2C_OP_NO_DEVICE);
70746 +    }
70747 +    
70748 +    jtagdrv_i2c_shift_data (dev, intaddress);
70749 +    
70750 +    if (! jtagdrv_i2c_get_ack (dev))
70751 +    {
70752 +       PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on intaddress phase\n"));
70753 +       jtagdrv_i2c_stop (dev);
70754 +       return (I2C_OP_NO_DEVICE);
70755 +    }
70756 +
70757 +    jtagdrv_i2c_stop (dev);
70758 +
70759 +    return (jtagdrv_i2c_read (dev, address, count, data));
70760 +}
70761 +
70762 +void
70763 +jtagdrv_i2c_clock_shift (JTAG_DEV *dev, u_int t, u_int n, u_int m)
70764 +{
70765 +    int i;
70766 +
70767 +    for (i = 2; i >= 0; i--)
70768 +    {
70769 +       LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
70770 +       LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1);     /* clock high | data */
70771 +    }
70772 +
70773 +    for (i = 1; i >= 0; i--)
70774 +    {
70775 +       LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
70776 +       LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)| LPT_DATA_TMS); DELAY(1);      /* clock high | data */
70777 +    }    
70778 +
70779 +    for (i = 6; i >= 0; i--)
70780 +    {
70781 +       LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
70782 +       LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1);     /* clock high | data */
70783 +    }    
70784 +
70785 +    LPT_WRITE_DATA (dev, 0); DELAY(1);                                                         /* clock low  | 0 */
70786 +
70787 +    LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(1);                                             /* strobe low */
70788 +    LPT_WRITE_CTRL (dev, 0); DELAY(1);                                                         /* strobe low */
70789 +}
70790 +
70791 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv.h
70792 ===================================================================
70793 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/jtagdrv.h     2004-02-23 16:02:56.000000000 -0500
70794 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv.h  2005-07-28 14:52:52.914668704 -0400
70795 @@ -0,0 +1,57 @@
70796 +/*
70797 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70798 + *
70799 + *    For licensing information please see the supplied COPYING file
70800 + *
70801 + */
70802 +
70803 +#ifndef __JTAGDRV_COMMON_H
70804 +#define __JTAGDRV_COMMON_H
70805 +
70806 +#ident "@(#)$Id: jtagdrv.h,v 1.5 2002/08/09 11:18:37 addy Exp $"
70807 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv.h,v $*/
70808 +
70809 +#include <qsnet/config.h>
70810 +
70811 +/* include OS specific header file */
70812 +#if defined(LINUX)
70813 +#  include "jtagdrv_Linux.h"
70814 +#elif defined(DIGITAL_UNIX)
70815 +#  include "jtagdrv_OSF1.h"
70816 +#elif defined(QNX)
70817 +#  include "jtagdrv_QNX.h"
70818 +#else
70819 +#  error cannot determint os type
70820 +#endif
70821 +
70822 +extern int jtagdebug;
70823 +
70824 +#define DBG_CFG                (1 << 0)
70825 +#define DBG_OPEN       (1 << 1)
70826 +#define DBG_IOCTL      (1 << 2)
70827 +#define DBG_ECPP       (1 << 3)
70828 +#define DBG_FN         (1 << 4)
70829 +
70830 +#define DRIVER_NAME    "jtag"
70831 +
70832 +#if defined(LINUX)
70833 +#define PRINTF(n,X)    ((n) & jtagdebug ? (void) printk X : (void) 0)
70834 +#define PRINTMSG(fmt, arg...) printk(KERN_INFO DRIVER_NAME ": " fmt, ##arg)
70835 +#else
70836 +#define PRINTF(n,X)    ((n) & jtagdebug ? (void) printf X : (void) 0)
70837 +#define PRINTMSG(M, A) printf ("jtag: " M, A)
70838 +#endif
70839 +
70840 +extern void jtagdrv_select_ring (JTAG_DEV *pp, u_int ring);
70841 +extern void jtagdrv_reset (JTAG_DEV *pp);
70842 +extern void jtagdrv_shift_ir (JTAG_DEV *pp, u_char *value, int nbits);
70843 +extern void jtagdrv_shift_dr (JTAG_DEV *pp, u_char *value, int nbits);
70844 +
70845 +extern int  jtagdrv_i2c_write (JTAG_DEV *pp, u_int address, u_int count, u_char *data);
70846 +extern int  jtagdrv_i2c_read (JTAG_DEV *pp, u_int address, u_int count, u_char *data);
70847 +extern int  jtagdrv_i2c_writereg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data);
70848 +extern int  jtagdrv_i2c_readreg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data);
70849 +extern void jtagdrv_i2c_clock_shift (JTAG_DEV *pp, u_int t, u_int n, u_int m);
70850 +
70851 +
70852 +#endif /* __JTAGDRV_COMMON_H */
70853 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv_Linux.c
70854 ===================================================================
70855 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/jtagdrv_Linux.c       2004-02-23 16:02:56.000000000 -0500
70856 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv_Linux.c    2005-07-28 14:52:52.914668704 -0400
70857 @@ -0,0 +1,325 @@
70858 +/*
70859 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70860 + *
70861 + *    For licensing information please see the supplied COPYING file
70862 + *
70863 + */
70864 +
70865 +/*
70866 + * $Id: jtagdrv_Linux.c,v 1.18.2.1 2005/02/01 10:12:01 lee Exp $
70867 + * $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.c,v $
70868 + */
70869 +
70870 +#include "jtagdrv.h"
70871 +#include <jtag/jtagio.h>
70872 +
70873 +#include <linux/module.h>
70874 +#include <linux/ioport.h>
70875 +
70876 +MODULE_AUTHOR("Quadrics Ltd.");
70877 +MODULE_DESCRIPTION("JTAG Parallel port QsNet switch interface");
70878 +
70879 +MODULE_LICENSE("GPL");
70880 +
70881 +#define MAJOR_INSTANCE 0       /* 0 is dynamic assign of device major  */ 
70882 +#define MAX_JTAG_DEV   4
70883 +
70884 +int jtag_major = MAJOR_INSTANCE;
70885 +int jtagdebug  = 0;
70886 +MODULE_PARM(jtag_major, "i");
70887 +MODULE_PARM(jtagdebug, "i");
70888 +
70889 +JTAG_DEV       jtag_devs[MAX_JTAG_DEV];
70890 +
70891 +int io[MAX_JTAG_DEV]= { 0, };
70892 +MODULE_PARM(io, "1-4i");
70893 +
70894 +
70895 +/* The fops functions */
70896 +int jtag_open(struct inode *, struct file *);
70897 +int jtag_close(struct inode *, struct file *);
70898 +int jtag_ioctl(struct inode *, struct file *, unsigned int, unsigned long );
70899 +
70900 +struct file_operations jtag_fops = {
70901 +    ioctl:   jtag_ioctl,
70902 +    open:    jtag_open,
70903 +    release: jtag_close,
70904 +};
70905 +
70906 +int
70907 +jtag_probe(void)
70908 +{
70909 +       int i=0;        
70910 +       int default_io = 1;
70911 +       JTAG_DEV *dev;
70912 +       unsigned char value=0xff;
70913 +
70914 +
70915 +       /* see if there are any user supplied io addr */
70916 +       for ( i = 0; i < MAX_JTAG_DEV; i++) {
70917 +               if ( io[i] != 0x00)
70918 +                       default_io = 0;
70919 +               jtag_devs[i].base = io[i];
70920 +       }
70921 +       
70922 +       if ( default_io ) {
70923 +               jtag_devs[0].base = 0x3bc;
70924 +               jtag_devs[1].base = 0x378;
70925 +               jtag_devs[2].base = 0x278;
70926 +               jtag_devs[3].base = 0x268;
70927 +       }
70928 +
70929 +       for ( i = 0 ; i < MAX_JTAG_DEV; i++) {
70930 +               if ( jtag_devs[i].base == 0x3bc ) 
70931 +                       jtag_devs[i].region = 3;
70932 +               else
70933 +                       jtag_devs[i].region = 8;
70934 +               jtag_devs[i].present = 0;
70935 +       }       
70936 +
70937 +
70938 +       if( default_io )
70939 +       {
70940 +               for( i = 0 ; i < MAX_JTAG_DEV; i++) {
70941 +                       dev=&(jtag_devs[i]);
70942 +                       if(dev->base && request_region(dev->base, dev->region, "jtag")) {
70943 +                               LPT_WRITE(dev, 0,0);
70944 +                               LPT_READ(dev, 0,value);
70945 +                               if ( value != 0xff) {
70946 +                                       PRINTMSG("(%d , %d) present, io=0x%04lx\n",jtag_major,i,dev->base);
70947 +                       
70948 +                                       dev->present=1; 
70949 +                               }
70950 +                               else
70951 +                                   release_region(dev->base, dev->region);
70952 +                       }
70953 +                       else
70954 +                       {
70955 +                           PRINTMSG("failed to request_region (%d , %d), io=0x%04lx\n",jtag_major,i,dev->base);
70956 +                           return -1;
70957 +                       }
70958 +               }
70959 +               return 0;
70960 +       }     
70961 +       else /* Force the region to be present, this makes the PCI parallel cards work */
70962 +       {
70963 +               for( i = 0 ; i < MAX_JTAG_DEV; i++) 
70964 +               {
70965 +                        dev=&(jtag_devs[i]);
70966 +                        if(dev->base && request_region(dev->base, dev->region, "jtag") && (dev->base != 0)) 
70967 +                       {
70968 +                                PRINTMSG("(%d , %d) forced by user, io=0x%04lx\n",jtag_major,i,dev->base);
70969 +                                        dev->present=1;
70970 +                       }       
70971 +                        else   
70972 +                       {
70973 +                                if( dev->base != 0)
70974 +                                       release_region(dev->base, dev->region);
70975 +                       }
70976 +               }
70977 +                return 0;
70978 +       }
70979 +}
70980 +
70981 +int init_module(void)
70982 +{
70983 +       int result,i;
70984 +       result = register_chrdev(jtag_major, DRIVER_NAME, &jtag_fops);
70985 +       if (result < 0) {
70986 +               PRINTMSG("Couldn't register char device err == %d\n",jtag_major);
70987 +               return -1;
70988 +       }
70989 +
70990 +       if ( jtag_major == 0 ) 
70991 +               jtag_major = result;
70992 +
70993 +       for ( i = 0; i < MAX_JTAG_DEV; i++) {
70994 +               jtag_devs[i].base=io[i];        
70995 +       }
70996 +
70997 +       jtag_probe();
70998 +
70999 +       PRINTMSG("Registered character device, major == %d\n",jtag_major);      
71000 +       return 0;
71001 +}      
71002 +
71003 +void cleanup_module(void)
71004 +{
71005 +       int i=0;
71006 +
71007 +       for( i = 0; i < MAX_JTAG_DEV; i++) {
71008 +               if( jtag_devs[i].present)       
71009 +                       release_region(jtag_devs[i].base, jtag_devs[i].region);
71010 +       }
71011 +                       
71012 +       unregister_chrdev(jtag_major, DRIVER_NAME);
71013 +       PRINTMSG("Unloaded char device\n");
71014 +}
71015 +
71016 +
71017 +int
71018 +jtag_open (struct inode *inode, struct file *filp)
71019 +{
71020 +    int unit = MINOR(inode->i_rdev);
71021 +    JTAG_DEV *dev = &jtag_devs[unit];
71022 +    
71023 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
71024 +       return (-ENXIO);
71025 +
71026 +    /*
71027 +     * Only allow a single open at a time 
71028 +     */
71029 +    if (dev->open)
71030 +       return (-EBUSY);
71031 +    dev->open = 1;
71032 +    
71033 +    /*
71034 +     * Initialise the hardware registers
71035 +     */
71036 +   
71037 +    LPT_WRITE (dev, LPT_CTRL, 0);
71038 +    DELAY(50);
71039 +    LPT_WRITE (dev, LPT_CTRL, LPT_CTRL_INIT);
71040 +
71041 +    MOD_INC_USE_COUNT;
71042 +
71043 +    return (0);
71044 +}
71045 +
71046 +int
71047 +jtag_close(struct inode *inode, struct file *filp)
71048 +{
71049 +  
71050 +    int unit = MINOR(inode->i_rdev);
71051 +    JTAG_DEV *dev = &jtag_devs[unit];
71052 +    
71053 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
71054 +       return (-ENXIO);
71055 +    
71056 +    dev->open = 0;
71057 +
71058 +    MOD_DEC_USE_COUNT;
71059 +
71060 +    return (0);
71061 +}
71062 +
71063 +int
71064 +jtag_ioctl (struct inode *inode, struct file *filp, unsigned int io_cmd, unsigned long io_data)
71065 +{
71066 +    int                  unit = MINOR(inode->i_rdev);
71067 +    JTAG_DEV             *dev = &jtag_devs[unit];
71068 +    JTAG_RESET_ARGS      *resetargs;
71069 +    JTAG_SHIFT_ARGS      *shiftargs;
71070 +    I2C_ARGS            *i2cargs;
71071 +    I2C_CLOCK_SHIFT_ARGS *clockargs;
71072 +    u_char              *buf;
71073 +    int                          freq;
71074 +
71075 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
71076 +       return (-ENXIO);
71077 +    
71078 +    PRINTF (DBG_IOCTL, ("jtag_ioctl: device %d cmd=%x\n", unit, io_cmd));
71079 +
71080 +    switch (io_cmd)
71081 +    {
71082 +    case JTAG_RESET:
71083 +       resetargs = (JTAG_RESET_ARGS *) io_data;
71084 +
71085 +       if (! VALID_JTAG_RING (resetargs->ring))
71086 +           return (-EINVAL);
71087 +       
71088 +       jtagdrv_select_ring (dev, resetargs->ring);
71089 +       jtagdrv_reset (dev);
71090 +       return (0);
71091 +       
71092 +    case JTAG_SHIFT_IR:
71093 +    case JTAG_SHIFT_DR:
71094 +       shiftargs = (JTAG_SHIFT_ARGS *) io_data;
71095 +       
71096 +       if (! VALID_JTAG_RING (shiftargs->ring) || shiftargs->nbits > (JTAG_MAX_DATA_LEN*JTAG_MAX_CHIPS)) {
71097 +           return (-EFAULT);
71098 +               }
71099 +
71100 +       buf = (u_char *) kmalloc (JTAG_NBYTES(shiftargs->nbits), GFP_KERNEL);
71101 +
71102 +       if (buf == (u_char *) NULL)
71103 +           return (-ENOMEM);
71104 +       
71105 +       if (copy_from_user (buf, shiftargs->value, JTAG_NBYTES(shiftargs->nbits)))
71106 +       {
71107 +           kfree(buf);
71108 +           return (-EFAULT);
71109 +       }
71110 +
71111 +
71112 +       jtagdrv_select_ring (dev, shiftargs->ring);
71113 +
71114 +       if (io_cmd == JTAG_SHIFT_IR)
71115 +           jtagdrv_shift_ir (dev, buf, shiftargs->nbits);
71116 +       else
71117 +           jtagdrv_shift_dr (dev, buf, shiftargs->nbits);
71118 +       
71119 +       if (copy_to_user (shiftargs->value, buf, JTAG_NBYTES (shiftargs->nbits)))
71120 +       {
71121 +           kfree (buf);
71122 +           return (-EFAULT);
71123 +       }
71124 +
71125 +       kfree (buf);
71126 +       return (0);
71127 +
71128 +    case I2C_WRITE:
71129 +    case I2C_READ:
71130 +    case I2C_WRITEREG:
71131 +    case I2C_READREG:
71132 +       i2cargs = (I2C_ARGS *) io_data;
71133 +
71134 +       if (! VALID_I2C_RING(i2cargs->ring) || i2cargs->count > I2C_MAX_DATA_LEN)
71135 +           return (-EFAULT);
71136 +
71137 +       jtagdrv_select_ring (dev, RING_I2C_BIT | i2cargs->ring);
71138 +       switch (io_cmd)
71139 +       {
71140 +       case I2C_WRITE:
71141 +           i2cargs->ok = jtagdrv_i2c_write (dev, i2cargs->device, i2cargs->count, i2cargs->data);
71142 +           break;
71143 +
71144 +       case I2C_READ:
71145 +           i2cargs->ok = jtagdrv_i2c_read (dev, i2cargs->device, i2cargs->count, i2cargs->data);
71146 +           break;
71147 +
71148 +       case I2C_WRITEREG:
71149 +           i2cargs->ok = jtagdrv_i2c_writereg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data);
71150 +           break;
71151 +
71152 +       case I2C_READREG:
71153 +           i2cargs->ok = jtagdrv_i2c_readreg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data);
71154 +           break;
71155 +       }
71156 +       return (0);
71157 +
71158 +    case I2C_CLOCK_SHIFT:
71159 +       clockargs = (I2C_CLOCK_SHIFT_ARGS *) io_data;
71160 +
71161 +       freq = (10 * clockargs->m / (1 << (((clockargs->n + 1) & 3))));
71162 +       
71163 +       /* validate the value, and initialise the ring */
71164 +       if (clockargs->t != 0 || clockargs->n > 3 || clockargs->m > 127)
71165 +           return (-EINVAL);
71166 +       
71167 +       jtagdrv_select_ring (dev, RING_I2C_BIT | RING_CLOCK_SHIFT);
71168 +       jtagdrv_i2c_clock_shift (dev, clockargs->t, clockargs->n, clockargs->m);
71169 +       jtagdrv_select_ring (dev, 0);
71170 +       return (0);
71171 +
71172 +    default:
71173 +       return (-EINVAL);
71174 +    }
71175 +    return (-EINVAL);
71176 +}
71177 +
71178 +/*
71179 + * Local variables:
71180 + * c-file-style: "stroustrup"
71181 + * End:
71182 + */
71183 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv_Linux.h
71184 ===================================================================
71185 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/jtagdrv_Linux.h       2004-02-23 16:02:56.000000000 -0500
71186 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/jtagdrv_Linux.h    2005-07-28 14:52:52.915668552 -0400
71187 @@ -0,0 +1,174 @@
71188 +/*
71189 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
71190 + *
71191 + *    For licensing information please see the supplied COPYING file
71192 + *
71193 + */
71194 +
71195 +#ident "@(#)$Id: jtagdrv_Linux.h,v 1.3 2002/08/09 11:18:37 addy Exp $"
71196 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.h,v $*/
71197 +
71198 +#ifndef __JTAGDRV_LINUX_H
71199 +#define __JTAGDRV_LINUX_H
71200 +
71201 +#include <qsnet/kernel.h>
71202 +#include <asm/io.h>
71203 +
71204 +typedef struct jtag_dev
71205 +{
71206 +    unsigned long      base;
71207 +    int                 region;
71208 +
71209 +    u_int              present:1;
71210 +    u_int              open:1;
71211 +} JTAG_DEV;
71212 +
71213 +/*
71214 +**
71215 +**                     Hardware Defines
71216 +**
71217 +*/
71218 +
71219 +/*
71220 + * Assume that bit 4 of the Control Register is set to 1 (by default) 
71221 + * to enable the printer port (CS3).
71222 + *
71223 + * The default base address is 3BC-3BF. 
71224 + */
71225 +
71226 +#define LPT0   0x3BC                   /* CSR Base Address - note this can
71227 +                                        * change depending on the setting
71228 +                                        * in the Control Register 0.
71229 +                                        *
71230 +                                        * LPT1 0x378
71231 +                                        * LPT2 0x278
71232 +                                        * LPT3 0x268
71233 +                                       */
71234 +
71235 +/*
71236 + *     Register offsets from the port base address
71237 + */
71238 +
71239 +#define LPT_REGISTER_0 0
71240 +#define LPT_REGISTER_1 1
71241 +#define LPT_REGISTER_2 2
71242 +#define LPT_REGISTER_3 0x400
71243 +#define LPT_REGISTER_4 0x401
71244 +#define LPT_REGISTER_5 0x402
71245 +
71246 +/*
71247 + *     Chip control registers
71248 + */
71249 +                                       /* Base address for Super I/O National*/
71250 +
71251 +#define SIO_BASE_ADDR  0x26e           /* Semiconductor PC87332VLJ combo-chip*/
71252 +#define CR4_REG                0x04            /* index 4, printer control reg 4 */
71253 +
71254 +#define LPT_EPP                0x01            /* Enable bit for epp */
71255 +#define LPT_ECP                0x04            /* Enable bit for ecp */
71256 +
71257 +/*
71258 + *     Registers for use with centronics, nibble and byte modes.
71259 + */
71260 +
71261 +#define LPT_DATA       LPT_REGISTER_0          /* line printer port data */
71262 +#define LPT_STAT       LPT_REGISTER_1          /* LPT port status        */
71263 +#define LPT_CTRL       LPT_REGISTER_2          /* LPT port control       */
71264 +
71265 +/*
71266 + *     Registers for use with ECP mode.
71267 + */ 
71268 +
71269 +#define LPT_DFIFO      LPT_REGISTER_3          /* r/w fifo register    */
71270 +#define LPT_CFGB       LPT_REGISTER_4          /* Configuration B      */
71271 +#define LPT_ECR                LPT_REGISTER_5          /* Exteded control      */
71272 +
71273 +/*
71274 + * Bit assignments for ECR register.
71275 + */
71276 +
71277 +       /* Bits 0-4 */
71278 +
71279 +#define LPT_ECR_EMPTY  0x01            /* FIFO is empty */
71280 +#define LPT_ECR_FULL   0x02            /* FIFO is full */
71281 +#define LPT_ECR_SERV   0x04            /* Service bit */
71282 +#define LPT_ECR_DMA    0x08            /* DMA enable */
71283 +#define LPT_ECR_nINTR  0x10            /* Interrupt disable */
71284 +
71285 +       /*
71286 +        * Bits 5-7 are ECR modes.
71287 +        */
71288 +
71289 +#define LPT_ECR_PAR    0x20            /* Parallel port FIFO mode */
71290 +#define LPT_ECR_ECP    0x60            /* ECP mode */
71291 +#define LPT_ECR_CFG    0xE0            /* Configuration mode */
71292 +#define LPT_ECR_CLEAR  ~0xE0           /* Cear mode bits */
71293 +
71294 +/*
71295 + * Bit assignments for the parallel port STATUS register:
71296 + */
71297 +
71298 +#define LPT_STAT_BIT0  0X1     /* Reserved. Bit always set.            */
71299 +#define LPT_STAT_BIT1  0X2     /* Reserved. Bit always set.            */
71300 +#define LPT_STAT_IRQ   0x4     /* interrupt status bit                 */
71301 +#define LPT_STAT_ERROR 0x8     /* set to 0 to indicate error           */
71302 +#define LPT_STAT_SLCT  0x10    /* status of SLCT lead from printer     */
71303 +#define LPT_STAT_PE    0x20    /* set to 1 when out of paper           */
71304 +#define LPT_STAT_ACK   0x40    /* acknowledge - set to 0 when ready    */
71305 +#define LPT_STAT_nBUSY 0x80    /* busy status bit, 0=busy, 1=ready     */
71306 +
71307 +/*
71308 + * Bit assignments for the parallel port CONTROL register:
71309 + */
71310 +
71311 +#define LPT_CTRL_nSTROBE       0x1     /* Printer Strobe Control       */
71312 +#define LPT_CTRL_nAUTOFD       0x2     /* Auto Feed Control            */
71313 +#define LPT_CTRL_INIT          0x4     /* Initialize Printer Control   */
71314 +#define LPT_CTRL_nSLCTIN       0x8     /* 0=select printer, 1=not selected */
71315 +#define LPT_CTRL_IRQ           0x10    /* Interrupt Request Enable Control */
71316 +#define LPT_CTRL_DIR           0x20    /* Direction control            */
71317 +#define LPT_CTRL_BIT6          0X40    /* Reserved. Bit always set.    */
71318 +#define LPT_CTRL_BIT7          0X80    /* Reserved. Bit always set.    */
71319 +
71320 +
71321 +#define LPT_WRITE(dev, regname, value) do { outb(value, (dev)->base + regname); } while (0)
71322 +#define LPT_READ(dev, regname,value)   do { value = inb((dev)->base + regname); } while (0)
71323 +
71324 +
71325 +
71326 +/* Standard register access macros */
71327 +#define LPT_WRITE_CTRL(dev, value)     LPT_WRITE(dev, LPT_CTRL, LPT_CTRL_INIT | value)
71328 +#define LPT_WRITE_DATA(dev, value)     LPT_WRITE(dev, LPT_DATA, value)
71329 +#define LPT_READ_STAT(dev, value)      LPT_READ(dev, LPT_STAT, value)
71330 +
71331 +/*
71332 + * The jtag signals are connected to the parallel port as follows :
71333 + *
71334 + *  TRST       bit 0
71335 + *  TDI                bit 1
71336 + *  TMS                bit 2
71337 + *  TCLK       AFX
71338 + *  TDO                PE
71339 + */
71340 +#define LPT_DATA_TRST  1
71341 +#define LPT_DATA_TDI   2
71342 +#define LPT_DATA_TMS   4
71343 +#define LPT_CTRL_TCLK  LPT_CTRL_nAUTOFD
71344 +#define LPT_STAT_TDO   LPT_STAT_PE
71345 +
71346 +/*
71347 + * The I2C signals are connected as follows :
71348 + */
71349 +#define LPT_DATA_SDA   2
71350 +#define LPT_CTRL_SCLK  LPT_CTRL_nAUTOFD
71351 +#define LPT_STAT_SDA   LPT_STAT_PE
71352 +
71353 +/*
71354 + * The ring selection signals are as follows :
71355 + *  addr       bit 0-7
71356 + *  clock      nSLCTIN
71357 + */
71358 +#define LPT_CTRL_RCLK  LPT_CTRL_nSLCTIN
71359 +
71360 +
71361 +#endif /* __JTAGDRV_LINUX_H */
71362 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/Makefile
71363 ===================================================================
71364 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/Makefile      2004-02-23 16:02:56.000000000 -0500
71365 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/Makefile   2005-07-28 14:52:52.915668552 -0400
71366 @@ -0,0 +1,15 @@
71367 +#
71368 +# Makefile for Quadrics QsNet
71369 +#
71370 +# Copyright (c) 2002-2004 Quadrics Ltd
71371 +#
71372 +# File: drivers/net/qsnet/jtag/Makefile
71373 +#
71374 +
71375 +
71376 +#
71377 +
71378 +obj-$(CONFIG_JTAG)     += jtag.o
71379 +jtag-objs      := jtagdrv_Linux.o jtagdrv.o
71380 +
71381 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
71382 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/Makefile.conf
71383 ===================================================================
71384 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/Makefile.conf 2004-02-23 16:02:56.000000000 -0500
71385 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/Makefile.conf      2005-07-28 14:52:52.915668552 -0400
71386 @@ -0,0 +1,10 @@
71387 +# Flags for generating QsNet Linux Kernel Makefiles
71388 +MODNAME                =       jtag.o
71389 +MODULENAME     =       jtag
71390 +KOBJFILES      =       jtagdrv_Linux.o jtagdrv.o
71391 +EXPORT_KOBJS   =       
71392 +CONFIG_NAME    =       CONFIG_JTAG
71393 +SGALFC         =       
71394 +# EXTRALINES START
71395 +
71396 +# EXTRALINES END
71397 Index: linux-2.6.5-7.191/drivers/net/qsnet/jtag/quadrics_version.h
71398 ===================================================================
71399 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/jtag/quadrics_version.h    2004-02-23 16:02:56.000000000 -0500
71400 +++ linux-2.6.5-7.191/drivers/net/qsnet/jtag/quadrics_version.h 2005-07-28 14:52:52.915668552 -0400
71401 @@ -0,0 +1 @@
71402 +#define QUADRICS_VERSION "4.31qsnet"
71403 Index: linux-2.6.5-7.191/drivers/net/qsnet/Kconfig
71404 ===================================================================
71405 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/Kconfig    2004-02-23 16:02:56.000000000 -0500
71406 +++ linux-2.6.5-7.191/drivers/net/qsnet/Kconfig 2005-07-28 14:52:52.916668400 -0400
71407 @@ -0,0 +1,79 @@
71408 +#
71409 +# Kconfig for Quadrics QsNet
71410 +#
71411 +# Copyright (c) 2004 Quadrics Ltd
71412 +#
71413 +# File: driver/net/qsnet/Kconfig
71414 +#
71415 +
71416 +menu "Quadrics QsNet"
71417 +        depends on NETDEVICES
71418 +
71419 +config QSNET
71420 +        tristate "Quadrics QsNet support"
71421 +       default m
71422 +        depends on PCI
71423 +        ---help---
71424 +          Quadrics QsNet is a high bandwidth, ultra low latency cluster interconnect
71425 +          which provides both user and kernel programmers with secure, direct access
71426 +          to the Quadrics network.
71427 +
71428 +config ELAN3
71429 +        tristate "Elan 3 device driver"
71430 +       default m
71431 +        depends on QSNET
71432 +        ---help---
71433 +          This is the main device driver for the Quadrics QsNet (Elan3) PCI device.
71434 +          This is a high bandwidth, ultra low latency interconnect which provides
71435 +          both user and kernel programmers with secure, direct access to the
71436 +          Quadrics network.
71437 +
71438 +config ELAN4
71439 +        tristate "Elan 4 device driver"
71440 +       default m
71441 +        depends on QSNET
71442 +        ---help---
71443 +          This is the main device driver for the Quadrics QsNetII (Elan4) PCI-X device.
71444 +          This is a high bandwidth, ultra low latency interconnect which provides
71445 +          both user and kernel programmers with secure, direct access to the
71446 +          Quadrics network.
71447 +
71448 +config EP
71449 +        tristate "Elan Kernel Comms"
71450 +       default m
71451 +        depends on QSNET && (ELAN4 || ELAN3)
71452 +        ---help---
71453 +          This module implements the QsNet kernel communications layer. This
71454 +          is used to layer kernel level facilities on top of the basic Elan
71455 +          device drivers. These can be used to implement subsystems such as
71456 +          TCP/IP and remote filing systems over the QsNet interconnect.
71457 +
71458 +config EIP
71459 +        tristate "Elan IP device driver"
71460 +       default m
71461 +        depends on QSNET && EP && NET
71462 +        ---help---
71463 +        This is a network IP device driver for the Quadrics QsNet device.
71464 +        It allows the TCP/IP protocol to be run over the Quadrics interconnect.
71465 +
71466 +config RMS
71467 +        tristate "Resource Management System support"
71468 +       default m
71469 +        depends on QSNET
71470 +        ---help---
71471 +        This is a support module for the Quadrics RMS resource manager. It provides kernel
71472 +        services for monitoring and controlling user job execution, termination and cleanup.
71473 +
71474 +config JTAG
71475 +        tristate "Switch monitoring"
71476 +       default m
71477 +        depends on QSNET
71478 +        ---help---
71479 +          The jtag interface is used to allow processes to send and retrieve jtag
71480 +          information to a Quadrics QsNet Elite switch via the parallel port.
71481 +          The module requires a /dev/jtag[0-3] entry (usually there is only a /dev/jtag0)
71482 +          device and a particular device only allows one process at a time to access this
71483 +          resource.
71484 +          For more information about JTag interface, please refer to the IEEE document on
71485 +          http://www.ieee.org/
71486 +endmenu
71487 Index: linux-2.6.5-7.191/drivers/net/qsnet/Makefile
71488 ===================================================================
71489 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/Makefile   2004-02-23 16:02:56.000000000 -0500
71490 +++ linux-2.6.5-7.191/drivers/net/qsnet/Makefile        2005-07-28 14:52:52.916668400 -0400
71491 @@ -0,0 +1,15 @@
71492 +#
71493 +# Makefile for Quadrics QsNet
71494 +#
71495 +# Copyright (c) 2002-2004 Quadrics Ltd.
71496 +#
71497 +# File: driver/net/qsnet/Makefile
71498 +#
71499 +
71500 +obj-$(CONFIG_QSNET)     += qsnet/ elan/
71501 +obj-$(CONFIG_ELAN3)     += elan3/
71502 +obj-$(CONFIG_ELAN4)     += elan4/
71503 +obj-$(CONFIG_EP)       += ep/ 
71504 +obj-$(CONFIG_EIP)       += eip/ 
71505 +obj-$(CONFIG_RMS)       += rms/ 
71506 +obj-$(CONFIG_JTAG)      += jtag/
71507 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/debug.c
71508 ===================================================================
71509 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/debug.c      2004-02-23 16:02:56.000000000 -0500
71510 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/debug.c   2005-07-28 14:52:52.917668248 -0400
71511 @@ -0,0 +1,583 @@
71512 +/*
71513 + *    Copyright (c) 2003 by Quadrics Ltd.
71514 + * 
71515 + *    For licensing information please see the supplied COPYING file
71516 + *
71517 + */
71518 +
71519 +#ident "@(#)$Id: debug.c,v 1.21 2004/08/19 08:09:57 david Exp $"
71520 +/*      $Source: /cvs/master/quadrics/qsnet/debug.c,v $ */
71521 +
71522 +#include <qsnet/kernel.h>
71523 +#include <qsnet/debug.h>
71524 +#include <qsnet/procfs_linux.h>
71525 +
71526 +caddr_t        qsnet_debug_buffer_ptr = NULL;
71527 +int           qsnet_debug_front      = 0;
71528 +int           qsnet_debug_back       = 0;
71529 +int            qsnet_debug_lost_lines = 0;
71530 +int           qsnet_debug_disabled   = 0;
71531 +
71532 +int           qsnet_debug_line_size  = 256;
71533 +int           qsnet_debug_num_lines  = 8192;
71534 +
71535 +int           qsnet_assfail_mode     = 1;                      /* default to BUG() */
71536 +
71537 +int            qsnet_debug_running    = 0;
71538 +int            kqsnet_debug_running   = 0;
71539 +
71540 +static spinlock_t qsnet_debug_lock;
71541 +static kcondvar_t qsnet_debug_wait;
71542 +static char       qsnet_debug_buffer_space[8192];
71543 +
71544 +#define QSNET_DEBUG_PREFIX_MAX_SIZE    32
71545 +#define QSNET_DEBUG_MAX_WORDWRAP       15
71546 +
71547 +/* must be larger than  QSNET_DEBUG_PREFIX_MAX_SIZE +  QSNET_DEBUG_MAX_WORDWRAP + 2 */
71548 +#if defined(DIGITAL_UNIX) 
71549 +#define QSNET_DEBUG_CONSOLE_WIDTH 80
71550 +#elif defined(LINUX)
71551 +#define QSNET_DEBUG_CONSOLE_WIDTH 128
71552 +#endif
71553 +
71554 +#define isspace(CH)    ((CH==' ') | (CH=='\t') | (CH=='\n'))
71555 +
71556 +#ifdef LINUX
71557 +#define ALLOC_DEBUG_BUFFER(ptr)                do { (ptr) = (void *)__get_free_pages (GFP_KERNEL, get_order (qsnet_debug_num_lines * qsnet_debug_line_size)); } while (0)
71558 +#define FREE_DEBUG_BUFFER(ptr)         free_pages ((unsigned long) ptr, get_order (qsnet_debug_num_lines * qsnet_debug_line_size))
71559 +#else
71560 +#define ALLOC_DEBUG_BUFFER(ptr)                KMEM_ALLOC (ptr, caddr_t, qsnet_debug_num_lines * qsnet_debug_line_size, 1)
71561 +#define FREE_DEBUG_BUFFER(ptr)         KMEM_FREE (ptr, qsnet_debug_num_lines * qsnet_debug_line_size)
71562 +#endif
71563 +
71564 +void
71565 +qsnet_debug_init ()
71566 +{
71567 +       spin_lock_init (&qsnet_debug_lock);
71568 +       kcondvar_init  (&qsnet_debug_wait);
71569 +
71570 +       qsnet_debug_front      = 0;
71571 +       qsnet_debug_back       = 0;
71572 +       qsnet_debug_lost_lines = 0;
71573 +
71574 +       if (qsnet_debug_line_size < (QSNET_DEBUG_PREFIX_MAX_SIZE + QSNET_DEBUG_MAX_WORDWRAP + 2))
71575 +               qsnet_debug_line_size = 256;
71576 +
71577 +       qsnet_debug_running    = 1;
71578 +
71579 +       qsnet_proc_register_int (qsnet_procfs_config, "assfail_mode", &qsnet_assfail_mode, 0);
71580 +}
71581 +
71582 +void
71583 +qsnet_debug_fini()
71584 +{
71585 +       if (!qsnet_debug_running) return;
71586 +
71587 +       remove_proc_entry ("assfail_mode", qsnet_procfs_config);
71588 +
71589 +       spin_lock_destroy (&qsnet_debug_lock);
71590 +       kcondvar_destroy  (&qsnet_debug_wait);
71591 +       
71592 +       if (qsnet_debug_buffer_ptr)
71593 +               FREE_DEBUG_BUFFER (qsnet_debug_buffer_ptr);
71594 +
71595 +       qsnet_debug_buffer_ptr     = NULL;
71596 +       qsnet_debug_lost_lines = 0;     
71597 +       qsnet_debug_running    = 0;     
71598 +}
71599 +
71600 +void
71601 +qsnet_debug_disable(int val)
71602 +{
71603 +       qsnet_debug_disabled = val;
71604 +}
71605 +
71606 +void
71607 +qsnet_debug_alloc()
71608 +{
71609 +       caddr_t ptr;
71610 +       unsigned long flags;
71611 +
71612 +       if (!qsnet_debug_running) return;
71613 +
71614 +       if (qsnet_debug_buffer_ptr == NULL)
71615 +       {
71616 +               ALLOC_DEBUG_BUFFER (ptr);
71617 +
71618 +               if (ptr != NULL)
71619 +               {
71620 +                       spin_lock_irqsave (&qsnet_debug_lock, flags);
71621 +                       if (qsnet_debug_buffer_ptr == NULL)
71622 +                       {
71623 +                               qsnet_debug_buffer_ptr = ptr;
71624 +                               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71625 +                       }
71626 +                       else
71627 +                       {
71628 +                               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71629 +
71630 +                               FREE_DEBUG_BUFFER (ptr);
71631 +                       }
71632 +               }
71633 +       }
71634 +       
71635 +}
71636 +
71637 +static void 
71638 +qsnet_prefix_debug(unsigned int mode, char *prefix, char *buffer) 
71639 +{
71640 +       /* assumes caller has lock */
71641 +
71642 +       int  prefixlen = strlen(prefix);
71643 +       char pref[QSNET_DEBUG_PREFIX_MAX_SIZE];
71644 +       int  prefix_done = 0;
71645 +
71646 +       if (!qsnet_debug_running) return;
71647 +
71648 +       if (qsnet_debug_disabled)
71649 +               return;
71650 +
71651 +       if (prefixlen >= QSNET_DEBUG_PREFIX_MAX_SIZE) 
71652 +       {
71653 +               strncpy(pref,prefix,QSNET_DEBUG_PREFIX_MAX_SIZE -2);
71654 +               strcpy (&pref[QSNET_DEBUG_PREFIX_MAX_SIZE-5],"... ");
71655 +
71656 +               prefix = pref;
71657 +                prefixlen = strlen(prefix);
71658 +       }
71659 +
71660 +#ifdef CONFIG_MPSAS
71661 +       {
71662 +               char *p;
71663 +#define TRAP_PUTCHAR_B                 (0x17a - 256)
71664 +#define SAS_PUTCHAR(c)                 do {\
71665 +                       register int o0 asm ("o0") = (c);\
71666 +\
71667 +                       asm volatile ("ta %0; nop" \
71668 +                                     : /* no outputs */\
71669 +                                     : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\
71670 +                                     : /* clobbered */ "o0");\
71671 +\
71672 +                       if (o0 == '\n') {\
71673 +                               o0 = '\r';\
71674 +\
71675 +                               asm volatile ("ta %0; nop" \
71676 +                                             : /* no outputs */\
71677 +                                             : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\
71678 +                                             : /* clobbered */ "o0");\
71679 +                       }\
71680 +               } while(0)
71681 +
71682 +               for (p = prefix; *p; p++)
71683 +                       SAS_PUTCHAR (*p);
71684 +
71685 +               for (p = buffer; *p; p++)
71686 +                       SAS_PUTCHAR (*p);
71687 +       }
71688 +#else
71689 +       if (mode & QSNET_DEBUG_BUFFER)
71690 +       {
71691 +               if (qsnet_debug_buffer_ptr == NULL)
71692 +                       qsnet_debug_lost_lines++;
71693 +               else
71694 +               {                   
71695 +                       caddr_t base = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back];
71696 +                       caddr_t lim  = base + qsnet_debug_line_size - 2;
71697 +                       caddr_t p;
71698 +               
71699 +                       p = buffer; 
71700 +                       prefix_done = 0;
71701 +                       while (*p) 
71702 +                       {
71703 +                               /* sort out prefix */
71704 +                               if ( prefix_done++ ) 
71705 +                               {
71706 +                                       int i;
71707 +                                       for(i=0;i<prefixlen;i++)
71708 +                                               base[i] = ' ';
71709 +                                       /* memset(base,' ',prefixlen); */
71710 +                               }
71711 +                               else
71712 +                                       strcpy(base,prefix);
71713 +                               base += prefixlen; /* move the base on */
71714 +
71715 +                               /* copy data */
71716 +                               for ( ; *p && (base < lim); )
71717 +                                       *base++ = *p++;
71718 +
71719 +                               /* if line split then add \n */
71720 +                               if ((base == lim) && (*base != '\n'))
71721 +                               {
71722 +                                       char *ptr;
71723 +                                       int   count;
71724 +
71725 +                                       *base = '\n';
71726 +                                       /* we added a \n cos it was end of line put next char was \n */
71727 +                                       if (*p == '\n') 
71728 +                                               p++;
71729 +                                       else
71730 +                                       {
71731 +                                               /* lets see if we can back track and find a white space to break on */
71732 +                                               ptr = base-1;
71733 +                                               count = 1;
71734 +                                               while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP ))
71735 +                                               {
71736 +                                                       count++;
71737 +                                                       ptr--;
71738 +                                               }
71739 +
71740 +                                               if ( isspace(*ptr) ) 
71741 +                                               {
71742 +                                                       /* found somewhere to wrap to */
71743 +                                                       p -= (count-1); /* need to loose the white space */
71744 +                                                       base = ptr;
71745 +                                                       *base = '\n';
71746 +                                               }
71747 +                                       }
71748 +                                       base++;
71749 +                               }
71750 +                               *base = '\0';
71751 +
71752 +                               /* move on pointers */
71753 +                               qsnet_debug_back = (++qsnet_debug_back == qsnet_debug_num_lines) ? 0 : qsnet_debug_back;            
71754 +                               if (qsnet_debug_back == qsnet_debug_front)
71755 +                               {
71756 +                                       qsnet_debug_lost_lines++;
71757 +                                       qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
71758 +                               }
71759 +                               base  = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back];
71760 +                               lim  =  base + qsnet_debug_line_size - 2;
71761 +                       }
71762 +                       kcondvar_wakeupone (&qsnet_debug_wait, &qsnet_debug_lock);
71763 +               }
71764 +       }
71765 +
71766 +       if (mode & QSNET_DEBUG_CONSOLE)
71767 +       {
71768 +               int     remaining = QSNET_DEBUG_CONSOLE_WIDTH - prefixlen;
71769 +               caddr_t p;
71770 +               char    line[QSNET_DEBUG_CONSOLE_WIDTH +2];
71771 +               int     len;
71772 +           
71773 +               strcpy (pref,prefix);
71774 +               prefix_done = 0;
71775 +
71776 +               p = buffer;
71777 +               while ( *p )
71778 +               {
71779 +                       /* use the prefix only once */
71780 +                       if  ( prefix_done++ > 0 ) 
71781 +                               {
71782 +                                       int i;
71783 +                                       for(i=0;i<prefixlen;i++)
71784 +                                               pref[i] = ' ';
71785 +                                       /* memset(perf,' ',prefixlen); */
71786 +                               }       
71787 +
71788 +                       len=strlen(p);
71789 +                       if (len > remaining) len = remaining;
71790 +                 
71791 +                       strncpy(line, p, len);
71792 +                       line[len] = 0;
71793 +                       p += len;
71794 +                   
71795 +                       /* word wrap */
71796 +                       if ((len == remaining) && *p && !isspace(*p))
71797 +                       {
71798 +                               /* lets see if we can back track and find a white space to break on */
71799 +                               char * ptr = &line[len-1];
71800 +                               int    count = 1;
71801 +
71802 +                               while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP ))
71803 +                               {
71804 +                                       count++;
71805 +                                       ptr--;
71806 +                               }
71807 +
71808 +                               if ( isspace(*ptr) ) 
71809 +                               {
71810 +                                       /* found somewhere to wrap to */
71811 +                                       p -= (count-1); /* need to loose the white space */
71812 +                                       len -= count;
71813 +                               }               
71814 +                       }
71815 +
71816 +                       if (line[len-1] != '\n' ) 
71817 +                       {
71818 +                               line[len] = '\n';
71819 +                               line[len+1] = 0;
71820 +                       }
71821 +
71822 +                       /* we put a \n in so dont need another one next */
71823 +                       if ( *p == '\n')
71824 +                               p++;
71825 +
71826 +#if defined(DIGITAL_UNIX)
71827 +                       {
71828 +                               char *pr;
71829 +
71830 +                               for (pr = pref; *pr; pr++)
71831 +                                       cnputc (*pr);
71832 +
71833 +                               for (pr = line; *pr; pr++)
71834 +                                       cnputc (*pr); 
71835 +                       }
71836 +#elif defined(LINUX)
71837 +                       printk("%s%s",pref,line);
71838 +#endif
71839 +               }
71840 +       }
71841 +#endif /* CONFIG_MPSAS */
71842 +}
71843 +
71844 +void
71845 +qsnet_vdebugf (unsigned int mode, char *prefix, char *fmt, va_list ap)
71846 +{
71847 +       unsigned long flags;
71848 +
71849 +       if (!qsnet_debug_running) return;
71850 +
71851 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71852 +
71853 +       qsnet_debug_buffer_space[0] = '\0';
71854 +
71855 +#if defined(DIGITAL_UNIX)
71856 +       prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap);
71857 +#elif defined(LINUX)
71858 +       vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap);
71859 +#endif
71860 +
71861 +       if (prefix == NULL)
71862 +               printk ("qsnet_vdebugf: prefix==NULL\n");
71863 +       else
71864 +               qsnet_prefix_debug(mode, prefix, qsnet_debug_buffer_space);
71865 +
71866 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71867 +}
71868 +
71869 +void kqsnet_debugf(char *fmt,...)
71870 +{
71871 +       if ( kqsnet_debug_running ) {
71872 +               va_list ap;
71873 +               char string[20];
71874 +               
71875 +               sprintf (string, "mm=%p:", current->mm);
71876 +               va_start(ap, fmt);
71877 +               qsnet_vdebugf(QSNET_DEBUG_BUFFER, string, fmt, ap);
71878 +               va_end(ap);
71879 +       }       
71880 +}
71881 +void 
71882 +qsnet_debugf(unsigned int mode, char *fmt,...)
71883 +{
71884 +       va_list       ap;
71885 +       unsigned long flags;
71886 +
71887 +       if (!qsnet_debug_running) return;
71888 +
71889 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71890 +
71891 +       qsnet_debug_buffer_space[0] = '\0';
71892 +
71893 +       va_start (ap, fmt);
71894 +#if defined(DIGITAL_UNIX)
71895 +       prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap);
71896 +#elif defined(LINUX)
71897 +       vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap);
71898 +#endif
71899 +       va_end (ap);
71900 +
71901 +       qsnet_prefix_debug(mode, "", qsnet_debug_buffer_space); 
71902 +
71903 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71904 +}
71905 +
71906 +int
71907 +qsnet_debug_buffer (caddr_t ubuffer, int len)
71908 +{
71909 +       caddr_t buffer, ptr, base;
71910 +       int     remain, len1;
71911 +       unsigned long flags;
71912 +       static  char qsnet_space[65536];
71913 +
71914 +       if (!qsnet_debug_running) return (0);
71915 +
71916 +       if (len < qsnet_debug_line_size)
71917 +               return (-1);
71918 +
71919 +       if (len > (qsnet_debug_line_size * qsnet_debug_num_lines))
71920 +               len = qsnet_debug_line_size * qsnet_debug_num_lines;
71921 +    
71922 +       if ( len > 65536 ) {
71923 +               KMEM_ZALLOC (buffer, caddr_t, len, 1);
71924 +       } else 
71925 +               buffer = qsnet_space;
71926 +
71927 +       if (buffer == NULL)
71928 +               return (-1);
71929 +
71930 +       if (qsnet_debug_buffer_ptr == NULL)
71931 +               qsnet_debug_alloc();
71932 +
71933 +       if (qsnet_debug_buffer_ptr == NULL)
71934 +       {
71935 +               if ( len > 65536 )
71936 +                       KMEM_FREE (buffer, len);
71937 +               return (-1);
71938 +       }
71939 +
71940 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
71941 +    
71942 +       while (!qsnet_debug_lost_lines && (qsnet_debug_back == qsnet_debug_front))
71943 +               if (kcondvar_waitsig (&qsnet_debug_wait, &qsnet_debug_lock, &flags) == 0)
71944 +                       break;
71945 +    
71946 +       ptr    = buffer;
71947 +       remain = len;
71948 +
71949 +       if (qsnet_debug_lost_lines)
71950 +       {
71951 +               qsnet_debug_lost_lines = 0;
71952 +               strcpy (ptr, "Debug Buffer has overflowed!!\n");
71953 +               len1 = strlen (ptr);
71954 +
71955 +               remain -= len1;
71956 +               ptr    += len1;
71957 +       }
71958 +
71959 +       while (qsnet_debug_front != qsnet_debug_back)
71960 +       {
71961 +               /* copy the line from DebugFront */
71962 +               base = &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size];
71963 +
71964 +               len1 = strlen (base);
71965 +
71966 +               if (len1 > remain)
71967 +                       break;
71968 +       
71969 +               bcopy (base, ptr, len1);
71970 +       
71971 +               ptr += len1;
71972 +               remain -= len1;
71973 +
71974 +               qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
71975 +       }
71976 +
71977 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
71978 +
71979 +       len1 = ptr - buffer;
71980 +
71981 +       if (len1 != 0 && copyout (buffer, ubuffer, len1))
71982 +               len1 = -1;
71983 +
71984 +       if ( len > 65536 )
71985 +               KMEM_FREE (buffer, len);
71986 +   
71987 +       return (len1);
71988 +}
71989 +
71990 +void
71991 +qsnet_debug_buffer_on() 
71992 +{
71993 +       if (qsnet_debug_buffer_ptr == NULL)
71994 +               qsnet_debug_alloc();
71995 +}
71996 +
71997 +void 
71998 +qsnet_debug_buffer_clear()
71999 +{
72000 +       unsigned long flags;
72001 +
72002 +       qsnet_debug_buffer_on();
72003 +       
72004 +       if (qsnet_debug_buffer_ptr != NULL){
72005 +               spin_lock_irqsave (&qsnet_debug_lock, flags);
72006 +               qsnet_debug_front      = 0;
72007 +               qsnet_debug_back       = 0;
72008 +               qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Clear","");
72009 +               spin_unlock_irqrestore (&qsnet_debug_lock, flags);      
72010 +       }
72011 +}
72012 +
72013 +void 
72014 +qsnet_debug_buffer_mark(char *str)
72015 +{
72016 +       unsigned long flags;    
72017 +
72018 +       qsnet_debug_buffer_on();
72019 +
72020 +       if (qsnet_debug_buffer_ptr != NULL) {
72021 +               spin_lock_irqsave (&qsnet_debug_lock, flags);
72022 +               qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Mark",str);
72023 +               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
72024 +       }
72025 +}
72026 +int
72027 +qsnet_debug_dump ()
72028 +{
72029 +       unsigned long flags;
72030 +
72031 +       if (!qsnet_debug_running) return (0);
72032 +
72033 +       if (qsnet_debug_buffer_ptr == NULL)
72034 +               qsnet_debug_alloc();
72035 +
72036 +       if (qsnet_debug_buffer_ptr == NULL)
72037 +               return (-1);
72038 +
72039 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
72040 +
72041 +       while (qsnet_debug_front != qsnet_debug_back)
72042 +       {
72043 +               printk ("%s", &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size]);
72044 +
72045 +               qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
72046 +       }
72047 +
72048 +       if (qsnet_debug_lost_lines)
72049 +               printk ("\n**** Debug buffer has lost %d lines\n****\n",qsnet_debug_lost_lines);
72050 +
72051 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
72052 +
72053 +       return (0);
72054 +}
72055 +
72056 +int
72057 +qsnet_debug_kmem (void *handle)
72058 +{
72059 +       if (!qsnet_debug_running) return (0);
72060 +
72061 +#ifdef KMEM_DEBUG
72062 +       qsnet_kmem_display(handle);
72063 +#endif
72064 +       return (0);
72065 +}
72066 +
72067 +int
72068 +qsnet_assfail (char *ex, const char *func, char *file, int line)
72069 +{
72070 +       qsnet_debugf (QSNET_DEBUG_BUFFER, "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
72071 +
72072 +       printk (KERN_EMERG "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
72073 +
72074 +       if (panicstr)
72075 +               return (0);
72076 +
72077 +       if (qsnet_assfail_mode & 1)                             /* return to BUG() */
72078 +               return 1;
72079 +
72080 +       if (qsnet_assfail_mode & 2)
72081 +               panic ("qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
72082 +       if (qsnet_assfail_mode & 4)
72083 +               qsnet_debug_disable (1);
72084 +
72085 +       return 0;
72086 +
72087 +}
72088 +
72089 +
72090 +/*
72091 + * Local variables:
72092 + * c-file-style: "linux"
72093 + * End:
72094 + */
72095 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/i686_mmx.c
72096 ===================================================================
72097 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/i686_mmx.c   2004-02-23 16:02:56.000000000 -0500
72098 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/i686_mmx.c        2005-07-28 14:52:52.917668248 -0400
72099 @@ -0,0 +1,99 @@
72100 +/*
72101 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
72102 + * 
72103 + *    For licensing information please see the supplied COPYING file
72104 + *
72105 + */
72106 +
72107 +#ident "@(#)$Id: i686_mmx.c,v 1.11 2004/01/05 12:08:25 mike Exp $"
72108 +/*      $Source: /cvs/master/quadrics/qsnet/i686_mmx.c,v $*/
72109 +
72110 +#include <qsnet/kernel.h>
72111 +
72112 +#if defined(LINUX_I386)
72113 +
72114 +#include <linux/config.h>
72115 +#include <linux/sched.h>
72116 +#include <asm/processor.h>
72117 +#include <asm/i387.h>
72118 +
72119 +int mmx_disabled = 0;
72120 +
72121 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
72122 +/* These functions are lifted from arch/i386/kernel/i387.c
72123 + * and MUST be kept in step with the kernel (currently 2.4.17)
72124 + * alternatively we should export the kernel_fpu_begin() function
72125 + */
72126 +static inline void __save_init_fpu( struct task_struct *tsk )
72127 +{
72128 +       if ( cpu_has_fxsr ) {
72129 +               asm volatile( "fxsave %0 ; fnclex"
72130 +                             : "=m" (tsk->thread.i387.fxsave) );
72131 +       } else {
72132 +               asm volatile( "fnsave %0 ; fwait"
72133 +                             : "=m" (tsk->thread.i387.fsave) );
72134 +       }
72135 +       tsk->flags &= ~PF_USEDFPU;
72136 +}
72137 +#if defined(MODULE)
72138 +void kernel_fpu_begin(void)
72139 +{
72140 +       struct task_struct *tsk = current;
72141 +
72142 +       if (tsk->flags & PF_USEDFPU) {
72143 +               __save_init_fpu(tsk);
72144 +               return;
72145 +       }
72146 +       clts();
72147 +}
72148 +#endif
72149 +#endif
72150 +
72151 +extern inline int
72152 +mmx_preamble(void)
72153 +{
72154 +    if (mmx_disabled || in_interrupt())
72155 +       return (0);
72156 +
72157 +    kernel_fpu_begin();
72158 +
72159 +    return (1);
72160 +}
72161 +
72162 +extern inline void
72163 +mmx_postamble(void)
72164 +{
72165 +    kernel_fpu_end();
72166 +}
72167 +
72168 +extern u64
72169 +qsnet_readq (volatile u64 *ptr)
72170 +{
72171 +    u64 value;
72172 +
72173 +    if (! mmx_preamble())
72174 +       value = *ptr;
72175 +    else
72176 +    {
72177 +       asm volatile ("movq (%0), %%mm0\n"
72178 +                     "movq %%mm0, (%1)\n"
72179 +                     : : "r" (ptr), "r" (&value) : "memory");
72180 +       mmx_postamble();
72181 +    }
72182 +    return (value);
72183 +}
72184 +
72185 +void
72186 +qsnet_writeq(u64 value, volatile u64 *ptr)
72187 +{
72188 +    if (! mmx_preamble())
72189 +       *ptr = value;
72190 +    else
72191 +    {
72192 +       asm volatile ("movq (%0), %%mm0\n"
72193 +                     "movq %%mm0, (%1)\n"
72194 +                     : : "r" (&value), "r" (ptr) : "memory");
72195 +       mmx_postamble();
72196 +    }
72197 +}
72198 +#endif
72199 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/kernel_linux.c
72200 ===================================================================
72201 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/kernel_linux.c       2004-02-23 16:02:56.000000000 -0500
72202 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/kernel_linux.c    2005-07-28 14:52:52.919667944 -0400
72203 @@ -0,0 +1,856 @@
72204 +/*
72205 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
72206 + * 
72207 + *    For licensing information please see the supplied COPYING file
72208 + *
72209 + */
72210 +
72211 +#ident "@(#)$Id: kernel_linux.c,v 1.71.2.3 2004/11/04 11:03:47 david Exp $"
72212 +/*      $Source: /cvs/master/quadrics/qsnet/kernel_linux.c,v $*/
72213 +
72214 +#include <qsnet/kernel.h>
72215 +#include <qsnet/ctrl_linux.h>
72216 +#include <qsnet/kpte.h>
72217 +
72218 +#include <linux/sysctl.h>
72219 +#include <linux/init.h>
72220 +#include <linux/module.h>
72221 +#include <linux/vmalloc.h>
72222 +
72223 +#include <qsnet/procfs_linux.h>
72224 +
72225 +#include <linux/smp.h>         /* for smp_call_function() prototype */
72226 +#include <linux/smp_lock.h>
72227 +#include <linux/mm.h>
72228 +
72229 +#include <linux/highmem.h>
72230 +
72231 +extern int mmx_disabled;
72232 +extern int qsnet_debug_line_size;
72233 +extern int qsnet_debug_num_lines;
72234 +
72235 +gid_t                 qsnet_procfs_gid;
72236 +struct proc_dir_entry *qsnet_procfs_root;
72237 +struct proc_dir_entry *qsnet_procfs_config;
72238 +
72239 +MODULE_AUTHOR("Quadrics Ltd.");
72240 +MODULE_DESCRIPTION("QsNet Kernel support code");
72241 +
72242 +MODULE_LICENSE("GPL");
72243 +
72244 +#if defined(LINUX_I386)
72245 +MODULE_PARM(mmx_disabled, "i");
72246 +#endif
72247 +
72248 +MODULE_PARM(qsnet_debug_line_size, "i");
72249 +MODULE_PARM(qsnet_debug_num_lines, "i");
72250 +
72251 +MODULE_PARM(qsnet_procfs_gid, "i");
72252 +
72253 +#ifdef KMEM_DEBUG
72254 +EXPORT_SYMBOL(qsnet_kmem_alloc_debug);
72255 +EXPORT_SYMBOL(qsnet_kmem_free_debug);
72256 +#else
72257 +EXPORT_SYMBOL(qsnet_kmem_alloc);
72258 +EXPORT_SYMBOL(qsnet_kmem_free);
72259 +#endif
72260 +
72261 +EXPORT_SYMBOL(qsnet_kmem_display);
72262 +EXPORT_SYMBOL(kmem_to_phys);
72263 +
72264 +EXPORT_SYMBOL(cpu_hold_all);
72265 +EXPORT_SYMBOL(cpu_release_all);
72266 +
72267 +#if defined(LINUX_I386)
72268 +EXPORT_SYMBOL(qsnet_readq);
72269 +EXPORT_SYMBOL(qsnet_writeq);
72270 +#endif
72271 +
72272 +/* debug.c */
72273 +EXPORT_SYMBOL(qsnet_debugf);
72274 +EXPORT_SYMBOL(kqsnet_debugf);
72275 +EXPORT_SYMBOL(qsnet_vdebugf);
72276 +EXPORT_SYMBOL(qsnet_debug_buffer);
72277 +EXPORT_SYMBOL(qsnet_debug_alloc);
72278 +EXPORT_SYMBOL(qsnet_debug_dump);
72279 +EXPORT_SYMBOL(qsnet_debug_kmem);
72280 +EXPORT_SYMBOL(qsnet_debug_disable);
72281 +
72282 +EXPORT_SYMBOL(qsnet_assfail);
72283 +
72284 +EXPORT_SYMBOL(qsnet_procfs_gid);
72285 +EXPORT_SYMBOL(qsnet_procfs_root);
72286 +
72287 +static int qsnet_open    (struct inode *ino, struct file *fp);
72288 +static int qsnet_release (struct inode *ino, struct file *fp);
72289 +static int qsnet_ioctl   (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
72290 +
72291 +static struct file_operations qsnet_ioctl_fops = 
72292 +{
72293 +       ioctl:   qsnet_ioctl,
72294 +       open:    qsnet_open,
72295 +       release: qsnet_release,
72296 +};
72297 +
72298 +static int
72299 +qsnet_open (struct inode *inode, struct file *fp)
72300 +{
72301 +       MOD_INC_USE_COUNT;
72302 +       fp->private_data = NULL;
72303 +       return (0);
72304 +}
72305 +
72306 +static int
72307 +qsnet_release (struct inode *inode, struct file *fp)
72308 +{
72309 +       MOD_DEC_USE_COUNT;
72310 +       return (0);
72311 +}
72312 +
72313 +static int 
72314 +qsnet_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
72315 +{
72316 +       int res=0;
72317 +
72318 +       switch (cmd) 
72319 +       {
72320 +       case QSNETIO_DEBUG_KMEM:
72321 +       {
72322 +               QSNETIO_DEBUG_KMEM_STRUCT args;
72323 +
72324 +               if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_KMEM_STRUCT)))
72325 +                       return (-EFAULT);
72326 +
72327 +               /* doesnt use handle as a pointer */
72328 +               qsnet_kmem_display(args.handle);
72329 +               break;
72330 +       }
72331 +
72332 +       case QSNETIO_DEBUG_DUMP : 
72333 +       {
72334 +               res = qsnet_debug_dump();
72335 +               break;
72336 +       }
72337 +
72338 +       case QSNETIO_DEBUG_BUFFER :
72339 +       {
72340 +               QSNETIO_DEBUG_BUFFER_STRUCT args;
72341 +
72342 +               if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT)))
72343 +                       return (-EFAULT);
72344 +
72345 +               /* qsnet_debug_buffer uses copyout */
72346 +               if ((res = qsnet_debug_buffer (args.addr, args.len)) != -1)
72347 +               {
72348 +                       args.len = res;
72349 +                       if (copy_to_user ((void *) arg, &args, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT)))
72350 +                               return (-EFAULT);
72351 +                       res = 0;
72352 +               }
72353 +               break;
72354 +       }
72355 +       default:
72356 +               res = EINVAL;
72357 +               break;
72358 +       }
72359 +
72360 +       return ((res == 0) ? 0 : -res);
72361 +}
72362 +
72363 +#ifdef KMEM_DEBUG
72364 +static int qsnet_kmem_open    (struct inode *ino, struct file *fp);
72365 +static int qsnet_kmem_release (struct inode *ino, struct file *fp);
72366 +static ssize_t qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos);
72367 +
72368 +static struct file_operations qsnet_kmem_fops = 
72369 +{
72370 +       open:    qsnet_kmem_open,
72371 +       release: qsnet_kmem_release,
72372 +       read:    qsnet_kmem_read,
72373 +};
72374 +
72375 +typedef struct qsnet_private_space
72376 +{
72377 +       char * space;
72378 +       int    size;
72379 +       struct qsnet_private_space *next;
72380 +} QSNET_PRIVATE_SPACE;
72381 +
72382 +typedef struct qsnet_private  
72383 +{
72384 +       QSNET_PRIVATE_SPACE *space_chain;
72385 +        QSNET_PRIVATE_SPACE *current_space;
72386 +       int                  current_pos;
72387 +
72388 +} QSNET_PRIVATE;
72389 +
72390 +#define QSNET_KMEM_DEBUG_LINE_SIZE ((int)512)
72391 +#define QSNET_PRIVATE_PAGE_SIZE    ((int)(4*1024))
72392 +
72393 +static int qsnet_kmem_fill(QSNET_PRIVATE *pd);
72394 +
72395 +void
72396 +destroy_chain(QSNET_PRIVATE * pd)
72397 +{
72398 +       QSNET_PRIVATE_SPACE *mem, *next;
72399 +       
72400 +       if (pd == NULL) return;
72401 +
72402 +       for(mem = pd->space_chain ; mem != NULL; )
72403 +       {
72404 +               next = mem->next; 
72405 +               if ( mem->space ) 
72406 +                       kfree ( mem->space);
72407 +               kfree(mem);
72408 +               mem = next;
72409 +       }
72410 +       kfree (pd);
72411 +}
72412 +
72413 +QSNET_PRIVATE *
72414 +make_chain(int len)
72415 +{
72416 +       QSNET_PRIVATE       * pd;
72417 +       QSNET_PRIVATE_SPACE * mem;
72418 +       int                   i;
72419 +
72420 +       /* make the private data block */
72421 +       if ((pd = kmalloc (sizeof (QSNET_PRIVATE), GFP_KERNEL)) == NULL)
72422 +               return NULL;
72423 +       pd->space_chain = NULL;
72424 +
72425 +       /* first make the holders */
72426 +       for(i=0;i<len;i++)
72427 +       {
72428 +               if ((mem = kmalloc (sizeof (QSNET_PRIVATE_SPACE), GFP_KERNEL)) == NULL)
72429 +               {
72430 +                       destroy_chain(pd);
72431 +                       return (NULL);
72432 +               }
72433 +               mem->next  = pd->space_chain;
72434 +               mem->size  = 0;
72435 +               mem->space = 0;
72436 +               pd->space_chain = mem;
72437 +
72438 +               /* now add the space */
72439 +               if ((mem->space = kmalloc (QSNET_PRIVATE_PAGE_SIZE, GFP_KERNEL)) == NULL)
72440 +               {
72441 +                       destroy_chain(pd);
72442 +                       return (NULL);
72443 +               }                       
72444 +
72445 +               mem->space[0] = 0;
72446 +
72447 +       }
72448 +
72449 +       pd->current_space = pd->space_chain;
72450 +       pd->current_pos   = 0;
72451 +
72452 +       return pd;
72453 +}
72454 +
72455 +static int
72456 +qsnet_kmem_open (struct inode *inode, struct file *fp)
72457 +{
72458 +       MOD_INC_USE_COUNT;
72459 +       fp->private_data = NULL;
72460 +       return (0);
72461 +}
72462 +
72463 +static int
72464 +qsnet_kmem_release (struct inode *inode, struct file *fp)
72465 +{
72466 +       if ( fp->private_data )
72467 +       {
72468 +               QSNET_PRIVATE * pd = (QSNET_PRIVATE *) fp->private_data;
72469 +
72470 +               /* free the space */
72471 +               if (pd->space_chain)
72472 +                       kfree (pd->space_chain);        
72473 +
72474 +               /* free struct */
72475 +               kfree (pd);
72476 +       }
72477 +       MOD_DEC_USE_COUNT;
72478 +       return (0);
72479 +}
72480 +
72481 +static ssize_t
72482 +qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos)
72483 +{
72484 +       QSNET_PRIVATE * pd = (QSNET_PRIVATE *) file->private_data;
72485 +       int             error;
72486 +       int             output_count;
72487 +       int             num_of_links=10;
72488 +
72489 +       /* make a buffer to output count bytes in */
72490 +       if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
72491 +               return (error);
72492 +
72493 +       if ( pd == NULL) 
72494 +       {
72495 +               /* first time */
72496 +
72497 +               /* ok we have to guess at how much space we are going to need  */
72498 +               /* if it fails we up the space and carry try again             */
72499 +               /* we have to do it this way as we cant get more memory whilst */
72500 +               /* holding the lock                                            */
72501 +               if ((pd = make_chain(num_of_links)) == NULL)
72502 +                       return (-ENOMEM);       
72503 +
72504 +               while ( qsnet_kmem_fill(pd) ) 
72505 +               {
72506 +                       destroy_chain(pd);
72507 +                       num_of_links += 10;
72508 +                       if ((pd = make_chain(num_of_links)) == NULL)
72509 +                               return (-ENOMEM);       
72510 +               }
72511 +
72512 +               /* we have the space and filled it */
72513 +               file->private_data = (void *)pd;        
72514 +       }
72515 +               
72516 +       /* output buffer */
72517 +       if ( pd->current_pos >= pd->current_space->size )
72518 +               return (0); /* finished */
72519 +
72520 +       output_count = pd->current_space->size - pd->current_pos;
72521 +       if ( output_count > count ) 
72522 +               output_count = count;
72523 +
72524 +       copy_to_user(buf, (pd->current_space->space + pd->current_pos), output_count);
72525 +
72526 +       pd->current_pos += output_count;
72527 +       ppos            += output_count;
72528 +
72529 +       /* just check to see if we have finished the current space */
72530 +       if ( pd->current_pos >= pd->current_space->size )
72531 +       {
72532 +               if ( pd->current_space->next )
72533 +               {
72534 +                       pd->current_space = pd->current_space->next;
72535 +                       pd->current_pos   = 0;
72536 +               }
72537 +       }       
72538 +
72539 +       return (output_count);
72540 +}
72541 +#endif /* KMEM_DEBUG */
72542 +
72543 +static int
72544 +proc_write_qsnetdebug(struct file *file, const char *buffer,
72545 +                     unsigned long count, void *data)
72546 +{
72547 +       char    tmpbuf[128];
72548 +       int     res;
72549 +       
72550 +       if (count > sizeof (tmpbuf)-1)
72551 +               return (-EINVAL);
72552 +       
72553 +       MOD_INC_USE_COUNT;
72554 +       
72555 +       if (copy_from_user (tmpbuf, buffer, count))
72556 +               res = -EFAULT;
72557 +       else 
72558 +       {
72559 +               tmpbuf[count] = '\0';   
72560 +               
72561 +               if (tmpbuf[count-1] == '\n')
72562 +                       tmpbuf[count-1] = '\0';
72563 +               
72564 +               if (! strcmp (tmpbuf, "on"))
72565 +                       qsnet_debug_buffer_on();
72566 +
72567 +               if (! strcmp (tmpbuf, "clear"))
72568 +                       qsnet_debug_buffer_clear();
72569 +
72570 +               if (! strncmp (tmpbuf, "mark",4))
72571 +                       qsnet_debug_buffer_mark( &tmpbuf[4] );
72572 +               
72573 +               res = count;
72574 +       }
72575 +       
72576 +       MOD_DEC_USE_COUNT;
72577 +       
72578 +       return (res);
72579 +}
72580 +
72581 +static int
72582 +proc_read_qsnetdebug(char *page, char **start, off_t off,
72583 +                    int count, int *eof, void *data)
72584 +{
72585 +       int len = sprintf (page, "echo command > /proc/qsnet/config/qsnetdebug\ncommand = on | off | clear | mark text\n");
72586 +       return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
72587 +}
72588 +
72589 +#include "quadrics_version.h"
72590 +extern int kqsnet_debug_running;
72591 +static char       quadrics_version[] = QUADRICS_VERSION;
72592 +
72593 +static int __init qsnet_init(void)
72594 +{
72595 +       struct proc_dir_entry *p;
72596 +
72597 +       if ((qsnet_procfs_root = proc_mkdir ("qsnet", 0)) == NULL)
72598 +       {
72599 +               printk ("qsnet: failed to create /proc/qsnet \n");
72600 +               return (-ENXIO);
72601 +       }
72602 +       
72603 +       if ((p = create_proc_entry ("ioctl", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_root)) == NULL)
72604 +       {
72605 +               printk ("qsnet: failed to register /proc/qsnet/ioctl\n");
72606 +               return (-ENXIO);
72607 +       }
72608 +       p->proc_fops = &qsnet_ioctl_fops;
72609 +       p->owner     = THIS_MODULE;
72610 +       p->data      = NULL;
72611 +       p->gid       = qsnet_procfs_gid;
72612 +
72613 +       qsnet_proc_register_str (qsnet_procfs_root, "version", quadrics_version, S_IRUGO);
72614 +
72615 +       if ((qsnet_procfs_config = proc_mkdir ("config", qsnet_procfs_root)) == NULL)
72616 +       {
72617 +               printk ("qsnet: failed to create /proc/qsnet/config \n");
72618 +               return (-ENXIO);
72619 +       }
72620 +
72621 +#ifdef KMEM_DEBUG
72622 +       if ((p = create_proc_entry ("kmem_debug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL)
72623 +       {
72624 +               printk ("qsnet: failed to register /proc/qsnet/config/kmem_debug\n");
72625 +               return (-ENXIO);
72626 +       }
72627 +       p->proc_fops = &qsnet_kmem_fops;
72628 +       p->owner     = THIS_MODULE;
72629 +       p->data      = NULL;
72630 +       p->gid       = qsnet_procfs_gid;
72631 +#endif         
72632 +      
72633 +       qsnet_debug_init(); 
72634 +
72635 +       qsnet_proc_register_int (qsnet_procfs_config, "kqsnet_debug_running", &kqsnet_debug_running, 0);
72636 +
72637 +       if ((p = create_proc_entry ("qsnetdebug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL)
72638 +       {
72639 +               printk ("qsnet: failed to register /proc/qsnet/config/qsnetdebug\n");
72640 +               return (-ENXIO);
72641 +       }
72642 +       p->read_proc  = proc_read_qsnetdebug;
72643 +       p->write_proc = proc_write_qsnetdebug;
72644 +       p->owner      = THIS_MODULE;
72645 +       p->data       = NULL;
72646 +       p->gid        = qsnet_procfs_gid;
72647 +       
72648 +       return (0);
72649 +}
72650 +
72651 +static void __exit qsnet_exit(void)
72652 +{
72653 +#ifdef KMEM_DEBUG
72654 +       qsnet_kmem_display(0);
72655 +#endif
72656 +       qsnet_debug_fini();
72657 +
72658 +       remove_proc_entry ("qsnetdebug",           qsnet_procfs_config);
72659 +       remove_proc_entry ("kqsnet_debug_running", qsnet_procfs_config);
72660 +#ifdef KMEM_DEBUG
72661 +       remove_proc_entry ("kmem_debug",           qsnet_procfs_config);
72662 +#endif
72663 +       remove_proc_entry ("config",               qsnet_procfs_root);
72664 +
72665 +       remove_proc_entry ("version", qsnet_procfs_root);
72666 +       remove_proc_entry ("ioctl",   qsnet_procfs_root);
72667 +
72668 +       remove_proc_entry ("qsnet", 0);
72669 +}
72670 +
72671 +/* Declare the module init and exit functions */
72672 +module_init(qsnet_init);
72673 +module_exit(qsnet_exit);
72674 +
72675 +#ifdef KMEM_DEBUG
72676 +/*
72677 + * Kernel memory allocation.  We maintain our own list of allocated mem
72678 + * segments so we can free them on module cleanup.
72679 + * 
72680 + * We use kmalloc for allocations less than one page in size; vmalloc for
72681 + * larger sizes.
72682 + */
72683 +
72684 +typedef struct {
72685 +       struct list_head list;
72686 +       void            *ptr;
72687 +       int             len;
72688 +       int             used_vmalloc;
72689 +       void            *owner;
72690 +       void            *caller;
72691 +       unsigned int     time;
72692 +       int              line;
72693 +       char             filename[20];
72694 +} kmalloc_t;
72695 +
72696 +static LIST_HEAD(kmalloc_head);
72697 +
72698 +static spinlock_t      kmalloc_lock = SPIN_LOCK_UNLOCKED;
72699 +
72700 +/*
72701 + * Kernel memory allocation.  We use kmalloc for allocations less 
72702 + * than one page in size; vmalloc for larger sizes.
72703 + */
72704 +
72705 +static int
72706 +qsnet_kmem_fill(QSNET_PRIVATE *pd)
72707 +{
72708 +       kmalloc_t *kp;
72709 +       struct list_head *lp;
72710 +       unsigned long flags;
72711 +       char str[QSNET_KMEM_DEBUG_LINE_SIZE];
72712 +       QSNET_PRIVATE_SPACE * current_space;
72713 +       int                   current_pos;
72714 +       int                   len;
72715 +       current_space = pd->space_chain;
72716 +       current_pos   = 0;
72717 +       
72718 +       
72719 +       current_space->space[0] = 0;    
72720 +       spin_lock_irqsave(&kmalloc_lock, flags);
72721 +       for (lp = kmalloc_head.next; lp != &kmalloc_head;  lp = lp->next) {
72722 +               kp = list_entry(lp, kmalloc_t, list);
72723 +               
72724 +               /* make the next line */
72725 +               sprintf(str,"%p %d %d %p %p %u %d %s\n",
72726 +                       kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->filename);
72727 +               len = strlen(str);
72728 +               
72729 +               /* does it fit on the current page */
72730 +               if ( (current_pos + len + 1) >=  QSNET_PRIVATE_PAGE_SIZE)
72731 +               {
72732 +                       /* move onto next page */
72733 +                       if ((current_space = current_space->next) == NULL)
72734 +                       {
72735 +                               /* run out of space !!!! */
72736 +                               spin_unlock_irqrestore(&kmalloc_lock, flags);
72737 +                               return (1);
72738 +                       }
72739 +                       current_space->space[0] = 0;    
72740 +                       current_pos = 0;
72741 +               }
72742 +               strcat( current_space->space + current_pos, str);
72743 +               current_pos += len;
72744 +
72745 +               /* remember how much we wrote to this page */
72746 +               current_space->size = current_pos;
72747 +
72748 +       }
72749 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72750 +
72751 +       return (0);
72752 +}
72753 +
72754 +void * 
72755 +qsnet_kmem_alloc_debug(int len, int cansleep, int zerofill, char *file, int line)
72756 +{
72757 +       void *new;
72758 +       unsigned long flags;
72759 +       kmalloc_t *kp;
72760 +
72761 +       if (len < PAGE_SIZE || !cansleep)
72762 +               new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC);
72763 +       else
72764 +               new = vmalloc(len);
72765 +
72766 +       if (len >= PAGE_SIZE)
72767 +               ASSERT(PAGE_ALIGNED((uintptr_t) new));
72768 +
72769 +       if (new && zerofill)
72770 +               memset(new,0,len);
72771 +
72772 +       /* record allocation */
72773 +       kp = kmalloc(sizeof(kmalloc_t), cansleep ? GFP_KERNEL : GFP_ATOMIC);
72774 +       ASSERT(kp != NULL);
72775 +       kp->len = len;
72776 +       kp->ptr = new;
72777 +       kp->used_vmalloc = (len >= PAGE_SIZE || cansleep);
72778 +       kp->owner  = current;
72779 +       kp->caller = __builtin_return_address(0);
72780 +       kp->time = lbolt;
72781 +       kp->line = line;
72782 +       len = strlen(file);
72783 +
72784 +       if (len > 18) 
72785 +               strcpy(kp->filename,&file[len-18]);
72786 +       else
72787 +               strcpy(kp->filename,file);
72788 +
72789 +       spin_lock_irqsave(&kmalloc_lock, flags);
72790 +       list_add(&kp->list, &kmalloc_head);
72791 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72792 +
72793 +       return new;
72794 +}
72795 +
72796 +void 
72797 +qsnet_kmem_free_debug(void *ptr, int len, char *file, int line)
72798 +{
72799 +       unsigned long flags;
72800 +       kmalloc_t *kp;
72801 +       struct list_head *lp;
72802 +
72803 +       spin_lock_irqsave(&kmalloc_lock, flags);
72804 +       for (lp = kmalloc_head.next; lp != &kmalloc_head; lp = lp->next) {
72805 +               kp = list_entry(lp, kmalloc_t, list);
72806 +               if (kp->ptr == ptr) {
72807 +                       if (kp->len != len)
72808 +                               printk("qsnet_kmem_free_debug(%p) ptr %p len %d mismatch: expected %d caller %p owner %p (%s:%d)\n",
72809 +                                      current, ptr, len, kp->len, __builtin_return_address(0), kp->caller, file, line);
72810 +                       list_del(lp);
72811 +                       kfree(kp); /* free off descriptor */
72812 +                       break;
72813 +               }
72814 +       }
72815 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72816 +
72817 +       if (lp == &kmalloc_head) /* segment must be found */
72818 +       {
72819 +               printk( "qsnet_kmem_free_debug(%p) ptr %p len %d not found: caller %p (%s:%d)\n",
72820 +                       current, ptr, len, __builtin_return_address(0), file, line);
72821 +       }
72822 +
72823 +       if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) 
72824 +               vfree (ptr);
72825 +       else
72826 +               kfree (ptr);
72827 +}
72828 +
72829 +#else /* !KMEM_DEBUG */
72830 +
72831 +void * 
72832 +qsnet_kmem_alloc(int len, int cansleep, int zerofill)
72833 +{
72834 +       void *new;
72835 +
72836 +       if (len < PAGE_SIZE || !cansleep)
72837 +               new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC);
72838 +       else
72839 +               new = vmalloc(len);
72840 +
72841 +       if (len >= PAGE_SIZE)
72842 +               ASSERT(PAGE_ALIGNED((unsigned long) new));
72843 +
72844 +       if (new && zerofill)
72845 +               memset(new,0,len);
72846 +
72847 +       return new;
72848 +}
72849 +
72850 +void 
72851 +qsnet_kmem_free(void *ptr, int len)
72852 +{
72853 +       if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) 
72854 +               vfree (ptr);
72855 +       else
72856 +               kfree (ptr);
72857 +}
72858 +#endif /* !KMEM_DEBUG */
72859 +
72860 +void
72861 +qsnet_kmem_display(void *handle)
72862 +{
72863 +#ifdef KMEM_DEBUG
72864 +       kmalloc_t *kp;
72865 +       struct list_head *lp;
72866 +       unsigned long flags;
72867 +       int count = 0, totsize = 0;
72868 +
72869 +       spin_lock_irqsave(&kmalloc_lock, flags);
72870 +       for (lp = kmalloc_head.next; lp != &kmalloc_head;  lp = lp->next) {
72871 +               kp = list_entry(lp, kmalloc_t, list);
72872 +
72873 +               if (!handle || handle == kp->owner)
72874 +               {
72875 +                       printk("qsnet_kmem_display(%p): mem %p len %d unfreed caller %p (%p) \n",
72876 +                              handle, kp->ptr, kp->len, kp->caller, kp->owner);
72877 +                   
72878 +                       count++;
72879 +                       totsize += kp->len;
72880 +               }
72881 +       }
72882 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
72883 +
72884 +       printk("qsnet_kmem_display(%p): %d bytes left in %d objects\n", handle, totsize, count);
72885 +#endif
72886 +}
72887 +
72888 +physaddr_t
72889 +kmem_to_phys(void *ptr)
72890 +{
72891 +       virtaddr_t virt = (virtaddr_t) ptr;
72892 +       physaddr_t phys;
72893 +       pte_t     *pte;
72894 +
72895 +       if ((virt >= VMALLOC_START && virt < VMALLOC_END))
72896 +       {
72897 +               pte = find_pte_kernel(virt);
72898 +               ASSERT(pte && !pte_none(*pte));
72899 +               phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1));
72900 +       }
72901 +#if defined(PKMAP_BASE)
72902 +       else if (virt >= PKMAP_BASE && virt < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
72903 +       {
72904 +               pte = find_pte_kernel(virt);
72905 +               ASSERT(pte && !pte_none(*pte));
72906 +               phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1));
72907 +       }
72908 +#endif
72909 +#if defined(__ia64)
72910 +       else if (virt >= __IA64_UNCACHED_OFFSET && virt < PAGE_OFFSET)
72911 +       {
72912 +               /* ia64 non-cached KSEG */
72913 +               phys = ((physaddr_t) ptr - __IA64_UNCACHED_OFFSET);
72914 +       }
72915 +#endif
72916 +       else /* otherwise it's KSEG */
72917 +       {
72918 +               phys = __pa(virt);
72919 +       }
72920 +           
72921 +#if defined(CONFIG_ALPHA_GENERIC) || (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
72922 +       /* 
72923 +        * with TS_BIAS as bit 40 - the tsunami pci space is mapped into
72924 +        * the kernel at 0xfffff500.00000000 however we need to convert
72925 +        * this to the true physical address 0x00000800.00000000.
72926 +        *
72927 +        * there is no need for PHYS_TWIDDLE since we knew we'd get a kernel
72928 +        * virtual address already and handled this with __pa().
72929 +        */
72930 +       if (phys & (1ul << 40)) {
72931 +               phys &= ~(1ul << 40);   /*   clear bit 40 (kseg I/O select) */
72932 +               phys |= (1ul << 43);    /*   set   bit 43 (phys I/O select) */
72933 +       }
72934 +#endif
72935 +       return phys;
72936 +}
72937 +
72938 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
72939 +
72940 +EXPORT_SYMBOL(pci_resource_size);
72941 +EXPORT_SYMBOL(pci_get_base_address);
72942 +EXPORT_SYMBOL(pci_base_to_kseg);
72943 +
72944 +
72945 +/*
72946 + * PCI stuff.  
72947 + *
72948 + * XXX pci_base_to_kseg() and pci_kseg_to_phys() are problematic
72949 + * in that they may not work on non-Tsunami (DS20, ES40, etc) 
72950 + * architectures, and may not work in non-zero PCI bus numbers.
72951 + */
72952 +
72953 +unsigned long 
72954 +pci_get_base_address(struct pci_dev *pdev, int index)
72955 +{
72956 +       unsigned long base;
72957 +
72958 +       ASSERT(index >= 0 && index <= 5);
72959 +       /* borrowed in part from drivers/scsi/sym53c8xx.c */
72960 +       base = pdev->base_address[index++];
72961 +
72962 +#if BITS_PER_LONG > 32
72963 +       if ((base & 0x7) == 0x4)
72964 +               base |= (((unsigned long)pdev->base_address[index]) << 32);
72965 +#endif
72966 +       return base;
72967 +}
72968 +
72969 +unsigned long 
72970 +pci_resource_size(struct pci_dev *pdev, int index)
72971 +{
72972 +       u32 addr, mask, size;
72973 +
72974 +       static u32 bar_addr[] = {
72975 +               PCI_BASE_ADDRESS_0, 
72976 +               PCI_BASE_ADDRESS_1, 
72977 +               PCI_BASE_ADDRESS_2,
72978 +               PCI_BASE_ADDRESS_3, 
72979 +               PCI_BASE_ADDRESS_4, 
72980 +               PCI_BASE_ADDRESS_5, 
72981 +       };
72982 +       ASSERT(index >= 0 && index <= 5);
72983 +
72984 +       /* algorithm from Rubini book */
72985 +       pci_read_config_dword (pdev,    bar_addr[index], &addr);
72986 +       pci_write_config_dword(pdev,    bar_addr[index], ~0);
72987 +       pci_read_config_dword (pdev,    bar_addr[index], &mask);
72988 +       pci_write_config_dword(pdev,    bar_addr[index], addr);
72989 +
72990 +       mask &= PCI_BASE_ADDRESS_MEM_MASK;
72991 +       size = ~mask + 1;
72992 +       return size;
72993 +}
72994 +
72995 +/*
72996 + * Convert BAR register value to KSEG address.
72997 + */
72998 +void *
72999 +pci_base_to_kseg(u64 baddr, int bus)
73000 +{
73001 +       u64 kseg;
73002 +
73003 +       /* XXX tsunami specific */
73004 +       baddr &= ~(u64)0x100000000;  /* mask out hose bit */
73005 +       kseg = TSUNAMI_MEM(bus) + baddr;
73006 +       return (void *)kseg; 
73007 +}
73008 +
73009 +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,0) */
73010 +
73011 +/*
73012 + * Spin the other CPU's in an SMP system.
73013 + * smp_call_function() needed to be exported to modules.  It will be
73014 + * papered over in <linux/smp.h> if running on a non-SMP box.
73015 + */
73016 +static spinlock_t hold_lock = SPIN_LOCK_UNLOCKED;
73017 +
73018 +#if 0
73019 +static void cpu_hold(void *unused)
73020 +{
73021 +       spin_lock(&hold_lock);
73022 +       spin_unlock(&hold_lock);
73023 +}
73024 +#endif
73025 +
73026 +void cpu_hold_all(void)
73027 +{
73028 +       spin_lock(&hold_lock);
73029 +
73030 +#if 0
73031 +       {
73032 +               int res;
73033 +               int retries = 10; 
73034 +           
73035 +               /* XXXXX: cannot call smp_call_function() from interrupt context */
73036 +           
73037 +               do {
73038 +                       /* only request blocking retry if not in interrupt context */
73039 +                       res = smp_call_function(cpu_hold, NULL, !in_interrupt(), 0);
73040 +                       if (res)
73041 +                               mdelay(5);
73042 +               } while (res && retries--);
73043 +           
73044 +               if (res)
73045 +                       printk("cpu_hold_all: IPI timeout\n");
73046 +       }
73047 +#endif
73048 +}
73049 +
73050 +void cpu_release_all(void)
73051 +{
73052 +       spin_unlock(&hold_lock);
73053 +}
73054 +
73055 +/*
73056 + * Local variables:
73057 + * c-file-style: "linux"
73058 + * End:
73059 + */
73060 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/Makefile
73061 ===================================================================
73062 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/Makefile     2004-02-23 16:02:56.000000000 -0500
73063 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/Makefile  2005-07-28 14:52:52.919667944 -0400
73064 @@ -0,0 +1,15 @@
73065 +#
73066 +# Makefile for Quadrics QsNet
73067 +#
73068 +# Copyright (c) 2002-2004 Quadrics Ltd
73069 +#
73070 +# File: drivers/net/qsnet/qsnet/Makefile
73071 +#
73072 +
73073 +
73074 +#
73075 +
73076 +obj-$(CONFIG_QSNET)    += qsnet.o
73077 +qsnet-objs     := debug.o kernel_linux.o i686_mmx.o
73078 +
73079 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
73080 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/Makefile.conf
73081 ===================================================================
73082 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/Makefile.conf        2004-02-23 16:02:56.000000000 -0500
73083 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/Makefile.conf     2005-07-28 14:52:52.919667944 -0400
73084 @@ -0,0 +1,10 @@
73085 +# Flags for generating QsNet Linux Kernel Makefiles
73086 +MODNAME                =       qsnet.o
73087 +MODULENAME     =       qsnet
73088 +KOBJFILES      =       debug.o kernel_linux.o i686_mmx.o
73089 +EXPORT_KOBJS   =       kernel_linux.o
73090 +CONFIG_NAME    =       CONFIG_QSNET
73091 +SGALFC         =       
73092 +# EXTRALINES START
73093 +
73094 +# EXTRALINES END
73095 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/qsnetkmem_linux.c
73096 ===================================================================
73097 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/qsnetkmem_linux.c    2004-02-23 16:02:56.000000000 -0500
73098 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/qsnetkmem_linux.c 2005-07-28 14:52:52.920667792 -0400
73099 @@ -0,0 +1,325 @@
73100 +/*
73101 + *    Copyright (c) 2003 by Quadrics Ltd.
73102 + * 
73103 + *    For licensing information please see the supplied COPYING file
73104 + *
73105 + */
73106 +
73107 +#ident "@(#)$Id: qsnetkmem_linux.c,v 1.3 2003/08/13 10:03:27 fabien Exp $"
73108 +/*      $Source: /cvs/master/quadrics/qsnet/qsnetkmem_linux.c,v $*/
73109 +
73110 +/* macro macros */
73111 +#define MACRO_BEGIN     do {
73112 +#define MACRO_END       } while (0)
73113 +#define offsetof(T,F) ((int )&(((T *)0)->F))
73114 +
73115 +#include <stdio.h>
73116 +#include <stdlib.h>
73117 +#include <ctype.h>
73118 +#include <sys/types.h>
73119 +#include <errno.h>
73120 +#include <unistd.h>
73121 +#include <string.h>
73122 +#include <qsnet/config.h>
73123 +#include <qsnet/list.h>
73124 +#include <qsnet/procfs_linux.h>
73125 +#include <signal.h>
73126 +#include <sys/wait.h>
73127 +
73128 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
73129 +
73130 +#define LIST_HEAD(name) \
73131 +       struct list_head name = LIST_HEAD_INIT(name)
73132 +
73133 +typedef struct {
73134 +       struct list_head list;
73135 +       void            *ptr;
73136 +       int             len;
73137 +       int             used_vmalloc;
73138 +       void            *owner;
73139 +       void            *caller;
73140 +       unsigned int     time;
73141 +       int              mark;
73142 +       int              line;
73143 +       char             file[256];
73144 +       
73145 +} kmalloc_t;
73146 +
73147 +
73148 +static LIST_HEAD(current_kmem);
73149 +static LIST_HEAD(stored_kmem);
73150 +
73151 +void
73152 +count_kmem(struct list_head * list, long * count, long * size )
73153 +{
73154 +       long              c,s;
73155 +       struct list_head *tmp;
73156 +       kmalloc_t        *kmem_ptr = NULL;
73157 +
73158 +
73159 +       c = s = 0L;
73160 +
73161 +       list_for_each(tmp, list) {
73162 +               kmem_ptr = list_entry(tmp, kmalloc_t , list);
73163 +               c++;
73164 +               s += kmem_ptr->len;
73165 +       }       
73166 +
73167 +       *count = c;
73168 +       *size  = s;
73169 +}
73170 +
73171 +void
73172 +clear_kmem(struct list_head * list)
73173 +{
73174 +       struct list_head *tmp,*tmp2;
73175 +       kmalloc_t        *kmem_ptr = NULL;
73176 +
73177 +       list_for_each_safe(tmp, tmp2, list) {
73178 +               kmem_ptr = list_entry(tmp, kmalloc_t , list);
73179 +               list_del_init(&kmem_ptr->list);
73180 +               free( kmem_ptr );
73181 +       }
73182 +}
73183 +
73184 +void 
73185 +move_kmem(struct list_head * dest, struct list_head *src)
73186 +{
73187 +       struct list_head *tmp,*tmp2;
73188 +       kmalloc_t        *kp= NULL;
73189 +
73190 +       list_for_each_safe(tmp, tmp2, src) {
73191 +               kp = list_entry(tmp, kmalloc_t , list);
73192 +               list_del_init(&kp->list);
73193 +
73194 +/*
73195 +               printf("mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
73196 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
73197 +*/                 
73198 +
73199 +               list_add_tail(&kp->list, dest);
73200 +       }
73201 +}
73202 +
73203 +void
73204 +read_kmem(struct list_head * list)
73205 +{
73206 +       FILE      * fd;
73207 +       char        line[1024];
73208 +       int         line_size = 100;
73209 +       char      * rep;
73210 +       kmalloc_t * kp;
73211 +
73212 +       clear_kmem(list);
73213 +
73214 +       fd = fopen(QSNET_PROCFS_KMEM_DEBUG,"r");
73215 +       if ( fd == NULL) 
73216 +       {
73217 +               printf("No Kmem Debug\n");
73218 +               return;
73219 +       }
73220 +
73221 +       rep = fgets(line,line_size, fd);
73222 +
73223 +       while ( rep != NULL ) 
73224 +       {
73225 +               kp = malloc(sizeof(kmalloc_t));
73226 +
73227 +               sscanf(line,"%p %d %d %p %p %u %d %s\n",
73228 +                      &kp->ptr, &kp->len, &kp->used_vmalloc, &kp->caller, &kp->owner, &kp->time, &kp->line, &kp->file[0]);
73229 +
73230 +/*
73231 +               printf(">>%s<<\n",line);
73232 +               printf("%p %d %d %p %p %u %d %s\n",
73233 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->file);
73234 +*/
73235 +
73236 +               list_add_tail(&kp->list, list);
73237 +
73238 +               rep = fgets(line,line_size, fd);
73239 +       }
73240 +       fclose(fd);
73241 +}
73242 +
73243 +void
73244 +mark_kmem(struct list_head * list, int mark)
73245 +{
73246 +       struct list_head *tmp;
73247 +       kmalloc_t        *kp = NULL;
73248 +
73249 +       list_for_each(tmp, list) {
73250 +               kp = list_entry(tmp, kmalloc_t , list);
73251 +
73252 +               kp->mark = mark;
73253 +       }
73254 +}
73255 +
73256 +kmalloc_t *
73257 +find_kmem(kmalloc_t * value, struct list_head * list)
73258 +{
73259 +       struct list_head *tmp;
73260 +       kmalloc_t        *kp = NULL;
73261 +
73262 +       
73263 +       list_for_each(tmp, list) {
73264 +               kp = list_entry(tmp, kmalloc_t , list);
73265 +               if ( (kp->ptr == value->ptr)
73266 +                    && (kp->len == value->len)
73267 +                    && (kp->used_vmalloc  == value->used_vmalloc )
73268 +                    && (kp->owner  == value->owner )
73269 +                    && (kp->caller  == value->caller )
73270 +                    && (kp->time  == value->time )
73271 +                    && (kp->line  == value->line )
73272 +                    && !(strcmp(kp->file,value->file) ))
73273 +                       return kp;
73274 +       }       
73275 +       return NULL;
73276 +}
73277 +
73278 +void 
73279 +diff_kmem(struct list_head *curr, struct list_head *stored)
73280 +{
73281 +       struct list_head *tmp;
73282 +       kmalloc_t        *kp = NULL;
73283 +       long              c,s;
73284 +
73285 +       mark_kmem(stored,  0);
73286 +       mark_kmem(curr,    0);
73287 +       
73288 +       list_for_each(tmp, stored) {
73289 +               kp = list_entry(tmp, kmalloc_t , list);
73290 +               if (find_kmem( kp, curr) != NULL) 
73291 +                       kp->mark = 1;
73292 +       }
73293 +       
73294 +       list_for_each(tmp, curr) {
73295 +               kp = list_entry(tmp, kmalloc_t , list);
73296 +               if (find_kmem( kp, stored) != NULL) 
73297 +                       kp->mark = 1;
73298 +       }               
73299 +
73300 +       c=s=0L;
73301 +       list_for_each(tmp, stored) {
73302 +               kp = list_entry(tmp, kmalloc_t , list);
73303 +               if (kp->mark != 1)
73304 +               {
73305 +                       printf("-- mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
73306 +                              kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
73307 +                       c++;
73308 +                       s+= kp->len;
73309 +               }
73310 +       }
73311 +       printf("-- %4ld %10ld \n",c,s);
73312 +       
73313 +       c=s=0L;
73314 +       list_for_each(tmp, curr) {
73315 +               kp = list_entry(tmp, kmalloc_t , list);
73316 +               if (kp->mark != 1)
73317 +               {
73318 +                       printf("++ mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
73319 +                              kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
73320 +                       c++;
73321 +                       s+= kp->len;
73322 +               }
73323 +       }               
73324 +       printf("++ %4ld %10ld \n",c,s);
73325 +}
73326 +
73327 +
73328 +void
73329 +print_kmem(struct list_head * list)
73330 +{
73331 +       struct list_head *tmp;
73332 +       kmalloc_t        *kp = NULL;
73333 +
73334 +       list_for_each(tmp, list) {
73335 +               kp = list_entry(tmp, kmalloc_t , list);
73336 +
73337 +               printf("mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
73338 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
73339 +                   
73340 +       }
73341 +}
73342 +
73343 +void 
73344 +print_cmds()
73345 +{
73346 +       long c,s;
73347 +
73348 +       printf("q : quits \n");
73349 +       printf("r : read\n");
73350 +       printf("c : print current\n");
73351 +       printf("o : print stored\n");
73352 +       printf("s : store\n");
73353 +
73354 +       count_kmem(&current_kmem, &c, &s );
73355 +       printf("\ncurrent : %4ld %10ld\n", c , s);
73356
73357 +       count_kmem(&stored_kmem, &c, &s );
73358 +       printf("store   : %4ld %10ld\n", c , s);
73359
73360 +}
73361 +
73362 +int
73363 +main()
73364 +{
73365 +       char            line[128];
73366 +       int             line_size=127;
73367 +       int             len;
73368 +
73369 +
73370 +       while (1)
73371 +       {
73372 +               
73373 +               printf(">> ");
73374 +               fgets(line,line_size, stdin);
73375 +       
73376 +               
73377 +               len = strlen( line ) -1;
73378 +               if ( len ) 
73379 +               {
73380 +                       switch ( tolower(line[0]) ) 
73381 +                       {
73382 +                       case 'q':
73383 +                               exit(0);
73384 +
73385 +                       case 'r' :
73386 +                               read_kmem(&current_kmem);
73387 +                               break;
73388 +
73389 +                       case 'c' :
73390 +                               print_kmem(&current_kmem);
73391 +                               break;
73392 +
73393 +                       case 'o' :
73394 +                               print_kmem(&stored_kmem);
73395 +                               break;
73396 +
73397 +                       case 's' :
73398 +                               clear_kmem(&stored_kmem);
73399 +                               move_kmem(&stored_kmem, &current_kmem);
73400 +                               break;
73401 +
73402 +                       case 'd' :
73403 +                               diff_kmem(&current_kmem, &stored_kmem);
73404 +                               break;
73405 +
73406 +                       default:
73407 +                               print_cmds();   
73408 +                       }
73409 +
73410 +               
73411 +                       
73412 +               }
73413 +               else
73414 +                       print_cmds();
73415 +       }
73416 +
73417 +}
73418 +
73419 +
73420 +/*
73421 + * Local variables:
73422 + * c-file-style: "linux"
73423 + * End:
73424 + */
73425 Index: linux-2.6.5-7.191/drivers/net/qsnet/qsnet/quadrics_version.h
73426 ===================================================================
73427 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/qsnet/quadrics_version.h   2004-02-23 16:02:56.000000000 -0500
73428 +++ linux-2.6.5-7.191/drivers/net/qsnet/qsnet/quadrics_version.h        2005-07-28 14:52:52.920667792 -0400
73429 @@ -0,0 +1 @@
73430 +#define QUADRICS_VERSION "4.31qsnet"
73431 Index: linux-2.6.5-7.191/drivers/net/qsnet/rms/Makefile
73432 ===================================================================
73433 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/rms/Makefile       2004-02-23 16:02:56.000000000 -0500
73434 +++ linux-2.6.5-7.191/drivers/net/qsnet/rms/Makefile    2005-07-28 14:52:52.920667792 -0400
73435 @@ -0,0 +1,15 @@
73436 +#
73437 +# Makefile for Quadrics QsNet
73438 +#
73439 +# Copyright (c) 2002-2004 Quadrics Ltd
73440 +#
73441 +# File: drivers/net/qsnet/rms/Makefile
73442 +#
73443 +
73444 +
73445 +#
73446 +
73447 +obj-$(CONFIG_RMS)      += rms.o
73448 +rms-objs       := rms_kern.o rms_kern_Linux.o
73449 +
73450 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
73451 Index: linux-2.6.5-7.191/drivers/net/qsnet/rms/Makefile.conf
73452 ===================================================================
73453 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/rms/Makefile.conf  2004-02-23 16:02:56.000000000 -0500
73454 +++ linux-2.6.5-7.191/drivers/net/qsnet/rms/Makefile.conf       2005-07-28 14:52:52.921667640 -0400
73455 @@ -0,0 +1,10 @@
73456 +# Flags for generating QsNet Linux Kernel Makefiles
73457 +MODNAME                =       rms.o
73458 +MODULENAME     =       rms
73459 +KOBJFILES      =       rms_kern.o rms_kern_Linux.o
73460 +EXPORT_KOBJS   =       
73461 +CONFIG_NAME    =       CONFIG_RMS
73462 +SGALFC         =       
73463 +# EXTRALINES START
73464 +
73465 +# EXTRALINES END
73466 Index: linux-2.6.5-7.191/drivers/net/qsnet/rms/quadrics_version.h
73467 ===================================================================
73468 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/rms/quadrics_version.h     2004-02-23 16:02:56.000000000 -0500
73469 +++ linux-2.6.5-7.191/drivers/net/qsnet/rms/quadrics_version.h  2005-07-28 14:52:52.921667640 -0400
73470 @@ -0,0 +1 @@
73471 +#define QUADRICS_VERSION "4.31qsnet"
73472 Index: linux-2.6.5-7.191/drivers/net/qsnet/rms/rms_kern.c
73473 ===================================================================
73474 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/rms/rms_kern.c     2004-02-23 16:02:56.000000000 -0500
73475 +++ linux-2.6.5-7.191/drivers/net/qsnet/rms/rms_kern.c  2005-07-28 14:52:52.923667336 -0400
73476 @@ -0,0 +1,1757 @@
73477 +/*
73478 + * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
73479 + *
73480 + * For licensing information please see the supplied COPYING file
73481 + *
73482 + * rms_kern.c:    RMS kernel module
73483 + *
73484 + * $Source: /cvs/master/quadrics/rmsmod/rms_kern.c,v $
73485 + */
73486 +
73487 +#ident "@(#)$Id: rms_kern.c,v 1.62.2.4 2005/01/18 11:05:45 duncan Exp $"
73488 +
73489 +#include <stddef.h>
73490 +#include <qsnet/kernel.h>
73491 +#include <qsnet/autoconf.h>
73492 +#include <rms/rmscall.h>
73493 +
73494 +/*
73495 + * extend stats added in version 5
73496 + */
73497 +#define RMS_MODVERSION 5
73498 +
73499 +#if defined(SOLARIS)
73500 +
73501 +#define CURUID() CURPROC()->p_cred->cr_uid
73502 +#define RMS_NCPUS() 4
73503 +#define PROC_STRUCT proc
73504 +
73505 +#include <sys/time.h>
73506 +
73507 +#elif defined(LINUX)
73508 +
73509 +#ifdef PROCESS_ACCT 
73510 +#define TIMEVAL_TO_MSEC(tv) ((tv)->tv_sec * 1000 + (tv)->tv_usec / 1000)
73511 +#define TIMEVAL_TO_CT(tv)   ((tv)->tv_sec * HZ + (tv)->tv_usec / (1000000L / HZ))
73512 +#endif
73513 +
73514 +#ifdef RSS_ATOMIC
73515 +#define PROC_RSS(proc) ((proc)->mm ? atomic_read(&(proc)->mm->rss) : 0)
73516 +#else
73517 +#define PROC_RSS(proc) ((proc)->mm ? (proc)->mm->rss : 0)
73518 +#endif
73519 +
73520 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
73521 +#      define  RMS_NCPUS()     smp_num_cpus
73522 +#else
73523 +#      define RMS_NCPUS()      num_online_cpus()
73524 +#endif
73525 +
73526 +#define CURUID()       CURPROC()->uid
73527 +#define p_pid          pid
73528 +#define PROC_STRUCT    task_struct
73529 +
73530 +/* care needed with conversion to millisecs on 32-bit Linux */
73531 +#ifdef LINUX
73532 +#ifdef LINUX_I386
73533 +#define CT_TO_MSEC(x)  ct_to_msec(x)
73534 +
73535 +uint64_t ct_to_msec(clock_t t)
73536 +{
73537 +    uint64_t msecs;
73538 +    if (t < 2000000)
73539 +    {
73540 +       t = (1000 * t)/HZ;
73541 +       msecs = t;
73542 +    }
73543 +    else
73544 +    {
73545 +       t = t / HZ;
73546 +       msecs = t * 1000;
73547 +    }
73548 +    return(msecs);
73549 +}
73550 +
73551 +#else
73552 +#define CT_TO_MSEC(x)  (((x) * 1000)/HZ)
73553 +#endif
73554 +#endif
73555 +
73556 +#ifndef FALSE
73557 +#define FALSE          (0)
73558 +#define TRUE           (!FALSE)
73559 +#endif
73560 +
73561 +#include <linux/time.h>
73562 +#include <linux/proc_fs.h>
73563 +#include <linux/ptrack.h>
73564 +
73565 +#include <linux/module.h>
73566 +
73567 +#elif defined(DIGITAL_UNIX)
73568 +
73569 +#define CURUID() CURPROC()->p_ruid
73570 +extern  int ncpus;
73571 +#define RMS_NCPUS() ncpus
73572 +#define PROC_STRUCT proc
73573 +#define TIMEVAL_TO_MSEC(tv) ((tv)->tv_sec * 1000 + (tv)->tv_usec / 1000)
73574 +
73575 +#include <sys/time.h>
73576 +
73577 +#else
73578 +#error cannot determine operating system
73579 +#endif
73580 +
73581 +int shm_cleanup(void);
73582 +
73583 +struct cap_desc {
73584 +
73585 +    struct cap_desc *next;
73586 +    int              index;    /* index of capability in program */
73587 +    ELAN_CAPABILITY  cap;      /* elan capability */
73588 +
73589 +};
73590 +
73591 +struct proc_desc {
73592 +    
73593 +    struct proc_desc    *next;
73594 +    struct PROC_STRUCT  *proc;
73595 +    struct prg_desc     *program;      /* controlling program         */
73596 +    int                  mycap;                /* index of my capability      */
73597 +    int                  myctx;                /* context number for process  */
73598 +    int                  flags;
73599 +    int                  vp;           /* elan virtual process number */
73600 +};
73601 +
73602 +struct prg_desc {
73603 +    
73604 +    struct prg_desc  *next;            
73605 +    int               id;      /* program id                          */
73606 +    int               flags;   /* program status flags                */
73607 +    uid_t             uid;     /* user id                             */
73608 +    int               ncpus;   /* number of cpus allocated to program */
73609 +    int               nprocs;  /* number of processes in program      */
73610 +    struct proc_desc *pdescs;  /* processes in this program           */
73611 +    int               ncaps;   /* number of capabilities              */
73612 +    struct cap_desc  *caps;    /* elan capabilities                   */
73613 +    char             *corepath;        /* core path for parallel program      */
73614 +    int               psid;    /* processor set id                    */
73615 +
73616 +    uint64_t       cutime;     /* user time accumulated by children   */
73617 +    uint64_t       cstime;     /* system time accumulated by children */
73618 +    uint64_t       start_time; /* time program created                */
73619 +    uint64_t       end_time;   /* time last process exited            */
73620 +    uint64_t       sched_time; /* last time job was scheduled         */
73621 +    uint64_t       accum_atime;        /* allocated time last deschedule      */
73622 +    uint64_t       memint;     /* accumulated memory integral         */
73623 +    uint64_t       ebytes;     /* data transferred by the Elan(s)     */
73624 +    uint64_t       exfers;     /* number of Elan data transfers       */
73625 +    long           maxrss;     /* maximum size to date                */
73626 +    long           majflt;
73627 +    
73628 +#ifdef LINUX
73629 +    struct proc_dir_entry *proc_entry;
73630 +#endif
73631 +
73632 +};
73633 +
73634 +#if defined(LINUX)
73635 +static int rms_ptrack_callback (void *arg, int phase, struct task_struct *child);
73636 +#else
73637 +static void rms_xd_callback(void *arg, int phase, void *ctask);
73638 +static void rms_xa_callback (void *arg, int phase, void *ctask);
73639 +#endif
73640 +
73641 +static void prgsignal(struct prg_desc *program, int signo);
73642 +static uint64_t gettime(void);
73643 +static void freeProgram(struct prg_desc *program);
73644 +
73645 +static struct prg_desc *programs = 0;
73646 +
73647 +kmutex_t rms_lock;
73648 +
73649 +int rms_init(void)
73650 +{
73651 +    kmutex_init (&rms_lock);
73652 +
73653 +    DBG(printk("rms: initialising\n"));
73654 +
73655 +    return(ESUCCESS);
73656 +}
73657 +
73658 +int rms_reconfigure(void)
73659 +{
73660 +    return(ESUCCESS);
73661 +}
73662 +
73663 +int rms_programs_registered(void)
73664 +{
73665 +    /*
73666 +    ** Called when trying to unload rms.mod will not succeed
73667 +    ** if programs registered
73668 +    */
73669
73670 +   struct prg_desc *program, **pp;
73671 +
73672 +   kmutex_lock(&rms_lock);
73673 +
73674 +   for (program = programs; program; program = program->next)
73675 +   {
73676 +       if (program->nprocs != 0)
73677 +       {
73678 +            kmutex_unlock(&rms_lock);
73679 +            return(EBUSY);
73680 +       }
73681 +   }
73682 +
73683 +   /*
73684 +   ** We have traversed the programs list and no processes registered
73685 +   ** Now free the memory
73686 +   */
73687 +       
73688 +    pp = &programs;
73689 +    while ((program = *pp) != NULL)
73690 +    {
73691 +        *pp = program->next;
73692 +        freeProgram(program);
73693 +    }
73694 +    kmutex_unlock(&rms_lock);
73695 +   
73696 +    return(ESUCCESS);
73697 +
73698 +}
73699 +
73700 +int rms_fini(void)
73701 +{
73702 +    /*
73703 +     * don't allow an unload if there are programs registered
73704 +     */
73705 +    if (rms_programs_registered())
73706 +        return(EBUSY);
73707 +
73708 +    kmutex_destroy (&rms_lock);
73709 +
73710 +    DBG(printk("rms: removed\n"));
73711 +
73712 +    return(ESUCCESS);
73713 +}
73714 +
73715 +#ifdef LINUX
73716 +
73717 +extern struct proc_dir_entry *rms_procfs_programs;
73718 +
73719 +/*
73720 + * display one pid per line if there isn't enough space 
73721 + * for another pid then add "...\n" and stop 
73722 + */
73723 +int pids_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
73724 +{
73725 +    struct prg_desc *program = (struct prg_desc *)data;
73726 +    struct proc_desc *pdesc;
73727 +    char *ptr = page;
73728 +    int bytes = 0, nb;
73729 +
73730 +    kmutex_lock(&rms_lock);
73731 +    
73732 +    for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73733 +    {
73734 +       if (bytes > count - 15)
73735 +       {
73736 +           bytes += sprintf(ptr,"...\n");
73737 +           break;
73738 +       }
73739 +        nb = sprintf(ptr, "%d %d\n", pdesc->proc->p_pid, pdesc->vp);
73740 +       bytes += nb;
73741 +       ptr += nb;
73742 +    }
73743 +    kmutex_unlock(&rms_lock);
73744 +    
73745 +    return(bytes);
73746 +}
73747 +
73748 +int status_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
73749 +{
73750 +    struct prg_desc *program = (struct prg_desc *)data;
73751 +    int bytes;
73752 +    if (program->flags & PRG_KILLED)
73753 +       bytes = sprintf(page, "killed\n");
73754 +    else
73755 +       bytes = sprintf(page, "running\n");
73756 +    return(bytes);
73757 +}
73758 +
73759 +void rms_create_proc_entry(struct prg_desc *program)
73760 +{
73761 +    struct proc_dir_entry *p;
73762 +    char name[32];
73763 +
73764 +    if (rms_procfs_programs)
73765 +    {
73766 +       sprintf(name,"%d", program->id);
73767 +       if ((program->proc_entry = proc_mkdir(name, rms_procfs_programs)) != NULL)
73768 +       {
73769 +           if ((p = create_proc_entry ("pids", S_IRUGO, program->proc_entry)) != NULL)
73770 +           {
73771 +               p->owner = THIS_MODULE;
73772 +               p->data = program;
73773 +               p->read_proc = pids_callback;
73774 +           }
73775 +           if ((p = create_proc_entry ("status", S_IRUGO, program->proc_entry)) != NULL)
73776 +           {
73777 +               p->owner = THIS_MODULE;
73778 +               p->data = program;
73779 +               p->read_proc = status_callback;
73780 +           }
73781 +       }
73782 +    }
73783 +}
73784 +
73785 +void rms_remove_proc_entry(struct prg_desc *program)
73786 +{
73787 +    char name[32];
73788 +    if (rms_procfs_programs)
73789 +    {
73790 +       if (program->proc_entry)
73791 +       {
73792 +           remove_proc_entry ("pids", program->proc_entry);
73793 +           remove_proc_entry ("status", program->proc_entry);
73794 +       }
73795 +       sprintf(name,"%d", program->id);
73796 +       remove_proc_entry (name, rms_procfs_programs);
73797 +    }
73798 +}
73799 +
73800 +#endif
73801 +
73802 +/*
73803 + * find a program from its index/pid
73804 + *
73805 + * Duncan:  make the lookup more efficient for large numbers of programs/processes
73806 + */
73807 +static struct prg_desc *findProgram(const int id)
73808 +{
73809 +    struct prg_desc *program;
73810 +    for (program = programs; program; program = program->next)
73811 +       if (program->id == id)
73812 +           return(program);
73813 +    return(0);
73814 +}
73815 +
73816 +static struct proc_desc *findProcess(const int pid)
73817 +{
73818 +    struct prg_desc *program;
73819 +    struct proc_desc *pdesc;
73820 +    for (program = programs; program; program = program->next)
73821 +       for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
73822 +           if (pdesc->proc->p_pid == pid)
73823 +               return(pdesc);
73824 +    return(0);
73825 +}
73826 +
73827 +static void freeProgram(struct prg_desc *program)
73828 +{
73829 +    struct proc_desc *pdesc;
73830 +    struct cap_desc *cdesc;
73831 +
73832 +#ifdef LINUX
73833 +    rms_remove_proc_entry(program);
73834 +#endif
73835 +
73836 +    while ((pdesc = program->pdescs) != NULL)
73837 +    {
73838 +       program->pdescs = pdesc->next;
73839 +       KMEM_FREE(pdesc, sizeof(struct proc_desc));
73840 +    }
73841 +
73842 +    while ((cdesc = program->caps) != NULL)
73843 +    {
73844 +       program->caps = cdesc->next;
73845 +       KMEM_FREE(cdesc, sizeof(struct cap_desc));
73846 +    }
73847 +
73848 +    if (program->corepath)
73849 +       KMEM_FREE(program->corepath, MAXCOREPATHLEN + 1);
73850 +
73851 +    KMEM_FREE(program, sizeof(struct prg_desc));
73852 +
73853 +#ifdef LINUX
73854 +    MOD_DEC_USE_COUNT;
73855 +#endif
73856 +}
73857 +
73858 +/*
73859 + * rms_prgcreate
73860 + *
73861 + * create a new program description
73862 + */
73863 +int rms_prgcreate(int id, uid_t uid, int cpus)
73864 +{
73865 +    struct prg_desc *program;
73866 +    struct proc_desc *pdesc;
73867 +    
73868 +    DBG(printk("rms_prgcreate :: program %d pid %d uid %d cpus %d\n", id, CURPROC()->p_pid, uid, cpus));
73869 +    
73870 +    /*
73871 +     * parallel programs are created as root by the rmsd as it forks the loader
73872 +     */
73873 +    if (CURUID())
73874 +       return(EACCES);
73875 +    
73876 +    /*
73877 +     * program ids must be unique
73878 +     */
73879 +    kmutex_lock(&rms_lock);
73880 +    program = findProgram(id);
73881 +    kmutex_unlock(&rms_lock);
73882 +    if (program)
73883 +       return(EINVAL);
73884 +
73885 +    /*
73886 +     * create a new program description
73887 +     */
73888 +    KMEM_ALLOC(program, struct prg_desc *, sizeof(struct prg_desc), TRUE);
73889 +    if (!program)
73890 +       return(ENOMEM);
73891 +
73892 +    program->id = id;
73893 +    program->flags = PRG_RUNNING;
73894 +    program->ncpus = cpus;
73895 +    program->nprocs = 1;
73896 +    program->uid = uid;
73897 +    program->ncaps = 0;
73898 +    program->caps = 0;
73899 +    program->corepath = 0;
73900 +    program->psid = 0;
73901 +    program->start_time = program->sched_time = gettime();
73902 +    program->end_time = 0;
73903 +    program->accum_atime = 0;
73904 +    program->cutime = 0;
73905 +    program->cstime = 0;
73906 +    program->maxrss = 0;
73907 +    program->memint = 0;
73908 +    program->majflt = 0;
73909 +    program->ebytes = 0;
73910 +    program->exfers = 0;
73911 +
73912 +    KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
73913 +    if (!pdesc)
73914 +       return(ENOMEM);
73915 +
73916 +    pdesc->proc = CURPROC();
73917 +    pdesc->next = 0;
73918 +    pdesc->mycap = ELAN_CAP_UNINITIALISED;
73919 +    pdesc->myctx = ELAN_CAP_UNINITIALISED;
73920 +    pdesc->vp = -1;            /* rmsloader */
73921 +    pdesc->program = program;
73922 +    program->pdescs = pdesc;
73923 +    
73924 +#ifdef LINUX
73925 +    rms_create_proc_entry(program);
73926 +#endif
73927 +    
73928 +    kmutex_lock(&rms_lock);
73929 +
73930 +#if defined(LINUX)
73931 +    if (ptrack_register (rms_ptrack_callback, NULL) != 0)
73932 +    {
73933 +       kmutex_unlock(&rms_lock);
73934 +        KMEM_FREE(pdesc,sizeof(struct proc_desc));
73935 +        KMEM_FREE(program,sizeof(struct prg_desc));
73936 +       return(ENOMEM);
73937 +    }
73938 +#else
73939 +    /*
73940 +     * install a fork handler
73941 +     */
73942 +    if (HANDLER_REGISTER((void *)(unsigned long)rms_xa_callback, NULL, XA_FORK | XA_EXIT | XA_IOF | XA_KOF | XA_KOE) == NULL)
73943 +    {
73944 +       kmutex_unlock(&rms_lock);
73945 +        KMEM_FREE(pdesc,sizeof(struct proc_desc));
73946 +        KMEM_FREE(program,sizeof(struct prg_desc));
73947 +       return(ENOMEM);
73948 +    }
73949 +#endif
73950 +
73951 +    program->next = programs;
73952 +    programs = program;
73953 +    
73954 +#ifdef LINUX
73955 +    MOD_INC_USE_COUNT;
73956 +#endif
73957 +    
73958 +    kmutex_unlock(&rms_lock);
73959 +    return(ESUCCESS);
73960 +}
73961 +
73962 +
73963 +/*
73964 + * rms_prgdestroy
73965 + *
73966 + * destroy a program description
73967 + */
73968 +int rms_prgdestroy(int id)
73969 +{
73970 +    struct prg_desc *program, **pp;
73971 +    int status = ESRCH;
73972 +
73973 +    /*
73974 +     * parallel programs are created and destroyed by the rmsd
73975 +     */
73976 +    if (CURUID())
73977 +       return(EACCES);
73978 +
73979 +    kmutex_lock(&rms_lock);
73980 +    
73981 +    pp = &programs;
73982 +    while ((program = *pp) != NULL)
73983 +    {
73984 +       if (program->id == id)
73985 +       {
73986 +           if (program->nprocs == 0)
73987 +           {
73988 +               DBG(printk("rms_prgdestro :: removing program %d\n", program->id));
73989 +               *pp = program->next;
73990 +               freeProgram(program);
73991 +               status = ESUCCESS;
73992 +           }
73993 +           else
73994 +           {
73995 +               DBG(printk("rms_prgdestro :: failed to remove program %d: %d\n", program->id, program->nprocs));
73996 +               status = ECHILD;
73997 +               pp = &program->next;
73998 +           }
73999 +       }
74000 +       else
74001 +           pp = &program->next;
74002 +    }
74003 +    
74004 +    kmutex_unlock(&rms_lock);
74005 +    return(status);
74006 +}
74007 +
74008 +/*
74009 + * rms_prgids
74010 + */
74011 +int rms_prgids(int maxids, int *prgids, int *nprgs)
74012 +{
74013 +    struct prg_desc *program;
74014 +    int count = 0, *buf, *bufp;
74015 +    int status = ESUCCESS;
74016 +
74017 +    if (maxids < 1)
74018 +        return(EINVAL);
74019 +
74020 +    kmutex_lock(&rms_lock);
74021 +
74022 +    for (program = programs; program; program = program->next)
74023 +        count++;
74024 +    count = MIN(count, maxids);
74025 +
74026 +    if (count > 0)
74027 +    {
74028 +        KMEM_ALLOC(buf, int *, count * sizeof(int), TRUE);
74029 +       if (buf)
74030 +       {                  
74031 +           for (program = programs, bufp=buf; bufp < buf + count; 
74032 +                program = program->next)
74033 +               *bufp++ = program->id;
74034 +       
74035 +           if (copyout(buf, prgids, sizeof(int) * count))
74036 +               status = EFAULT;
74037 +
74038 +           KMEM_FREE(buf, count * sizeof(int));
74039 +       }
74040 +       else
74041 +           status = ENOMEM;
74042 +    }
74043 +    
74044 +    if (copyout(&count, nprgs, sizeof(int)))
74045 +       status = EFAULT;
74046 +
74047 +    kmutex_unlock(&rms_lock);
74048 +    
74049 +    return(status);
74050 +}
74051 +
74052 +/*
74053 + * rms_prginfo
74054 + */
74055 +int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs)
74056 +{
74057 +    struct prg_desc *program;
74058 +    struct proc_desc *pdesc;
74059 +    pid_t *pidp, *buf;
74060 +    int status = ESUCCESS;
74061 +
74062 +    kmutex_lock(&rms_lock);
74063 +
74064 +    if ((program = findProgram(id)) != NULL)
74065 +    {
74066 +       if (program->nprocs > 0)
74067 +       {
74068 +           KMEM_ALLOC(buf, pid_t *, program->nprocs * sizeof(pid_t), TRUE);
74069 +           if (buf)
74070 +           {
74071 +               for (pidp = buf, pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74072 +                   *pidp++ = pdesc->proc->p_pid;
74073 +               
74074 +               if (copyout(buf, pids, sizeof(pid_t) * MIN(program->nprocs, maxpids)))
74075 +                   status = EFAULT;
74076 +               
74077 +               KMEM_FREE(buf, program->nprocs * sizeof(pid_t));
74078 +           }
74079 +           else
74080 +               status = ENOMEM;
74081 +       }
74082 +       
74083 +       if (copyout(&program->nprocs, nprocs, sizeof(int)))
74084 +           status = EFAULT;
74085 +    }
74086 +    else
74087 +       status = ESRCH;
74088 +
74089 +    kmutex_unlock(&rms_lock);
74090 +    
74091 +    return(status);
74092 +}
74093 +
74094 +/*
74095 + * rmsmod always used to use psignal but this doesn't work
74096 + * on Linux 2.6.7 so we have changed to kill_proc
74097 + */
74098 +static void prgsignal(struct prg_desc *program, int signo)
74099 +{
74100 +    struct proc_desc *pdesc;
74101 +    for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74102 +       kill_proc(pdesc->proc->p_pid, signo, 1);
74103 +}
74104 +
74105 +
74106 +int rms_prgsignal(int id, int signo)
74107 +{
74108 +    struct prg_desc *program;
74109 +    int status = ESUCCESS;
74110 +    
74111 +    kmutex_lock(&rms_lock);
74112 +    
74113 +    if ((program = findProgram(id)) != NULL)
74114 +    {
74115 +       if (CURUID() == 0 || CURUID() == program->uid)
74116 +       {
74117 +           prgsignal(program, signo);
74118 +           if (signo == SIGKILL)
74119 +               program->flags |= PRG_KILLED;
74120 +       }
74121 +       else
74122 +           status = EACCES;
74123 +    }
74124 +    else
74125 +       status = ESRCH;
74126 +    
74127 +    kmutex_unlock(&rms_lock);
74128 +    
74129 +    return(status);
74130 +}
74131 +
74132 +int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap)
74133 +{
74134 +    struct prg_desc *program;
74135 +    struct cap_desc *cdesc;
74136 +    int status = ESUCCESS;
74137 +
74138 +    if (cap == NULL)
74139 +        return(EINVAL);
74140 +
74141 +    kmutex_lock(&rms_lock);
74142 +    if ((program = findProgram(id)) != NULL)
74143 +    {
74144 +       KMEM_ALLOC(cdesc, struct cap_desc *, sizeof(struct cap_desc), TRUE);
74145 +       if (cdesc)
74146 +       {
74147 +           cdesc->index = index;
74148 +           if (copyin(cap, &cdesc->cap, sizeof(ELAN_CAPABILITY)))
74149 +           {
74150 +               KMEM_FREE(cdesc, sizeof(struct cap_desc));
74151 +               status = EFAULT;
74152 +           }
74153 +           else
74154 +           {
74155 +               DBG(printk("rms_prgaddcap :: program %d index %d context %d<-->%d\n",
74156 +                          program->id, index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext));
74157 +               cdesc->next = program->caps;
74158 +               program->caps = cdesc;
74159 +               program->ncaps++;
74160 +           }
74161 +       }
74162 +       else
74163 +           status = ENOMEM;
74164 +    }
74165 +    else
74166 +       status = ESRCH;
74167 +
74168 +    kmutex_unlock(&rms_lock);
74169 +    return(status);
74170 +}
74171 +
74172 +static uint64_t gettime(void)
74173 +{
74174 +    uint64_t now;
74175 +
74176 +#if defined(SOLARIS)
74177 +    timespec_t tv;
74178 +    gethrestime(&tv);
74179 +    now = tv.tv_sec * 1000 + tv.tv_nsec / 1000000;
74180 +#elif defined(LINUX)
74181 +    struct timeval tv;
74182 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17)
74183 +    get_fast_time(&tv);
74184 +#else
74185 +    do_gettimeofday(&tv);
74186 +#endif
74187 +    now = tv.tv_sec * 1000 + tv.tv_usec / 1000;
74188 +#elif defined(DIGITAL_UNIX)
74189 +    struct timeval tv;
74190 +    microtime(&tv);
74191 +    now = tv.tv_sec * 1000 + tv.tv_usec / 1000;
74192 +#endif
74193 +
74194 +    return(now);
74195 +}
74196 +
74197 +#ifdef DIGITAL_UNIX
74198 +
74199 +int rms_getrusage(struct proc_desc *pdesc, struct rusage *ru)
74200 +{
74201 +    task_t   task;
74202 +    thread_t thread;
74203 +    
74204 +    if (!pdesc->proc)
74205 +       return(-1);
74206 +    
74207 +    /*
74208 +     * locking required unless called from the current proc
74209 +     */
74210 +    if (pdesc->proc != CURPROC())
74211 +    {
74212 +       if (!P_REF(pdesc->proc))
74213 +           return(-1);
74214 +       
74215 +       task = proc_to_task(pdesc->proc);
74216 +       if (!task) 
74217 +       {
74218 +           P_UNREF(pdesc->proc);
74219 +           DBG(printk("rms_getrusage :: process (%d) has no task\n", pdesc->proc->p_pid));
74220 +           return(-1);
74221 +       }
74222 +
74223 +       task_reference(task);
74224 +       task_lock(task);
74225 +       
74226 +       if (!queue_empty(&task->thread_list))
74227 +           thread = (thread_t) queue_first(&task->thread_list);
74228 +       else 
74229 +       {
74230 +           task_unlock(task);
74231 +           task_deallocate(task);
74232 +           P_UNREF(pdesc->proc);
74233 +           return(-1);
74234 +       }
74235 +       
74236 +       thread_reference(thread);
74237 +       task_unlock(task);
74238 +    }
74239 +    
74240 +    *ru = proc_to_utask(pdesc->proc)->uu_ru;
74241 +    task_get_rusage(ru, proc_to_task(pdesc->proc));
74242 +    
74243 +    if (pdesc->proc != CURPROC())
74244 +    {
74245 +       task_deallocate(task);
74246 +       thread_deallocate(thread);
74247 +       P_UNREF(pdesc->proc);
74248 +    }
74249 +    return(0);
74250 +}
74251 +
74252 +#endif
74253 +
74254 +/*
74255 + * new stats collection interface, 64-bit with addition of Elan stats
74256 + */
74257 +int rms_prggetstats(int id, prgstats_t *stats)
74258 +{
74259 +#ifdef DIGITAL_UNIX
74260 +    long ruixrss, ruidrss, ruisrss, rumaxrss, rumajflt;
74261 +#endif
74262 +    struct prg_desc *program = 0;
74263 +    struct proc_desc *pdesc;
74264 +    int status = ESUCCESS;
74265 +    prgstats_t totals;
74266 +    uint64_t now = gettime();
74267 +#if defined(SOLARIS)
74268 +    clock_t utime, stime;
74269 +#elif defined(LINUX)
74270 +    uint64_t utime, stime;
74271 +#endif
74272 +
74273 +    long maxrss;
74274 +
74275 +    kmutex_lock(&rms_lock);
74276 +    
74277 +    if (id < 0)
74278 +    {
74279 +       if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74280 +           program = pdesc->program;
74281 +    }
74282 +    else
74283 +       program = findProgram(id);
74284 +
74285 +    if (program)
74286 +    {
74287 +       if (CURUID() == 0 || CURUID() == program->uid)
74288 +       {
74289 +           totals.flags = program->flags;
74290 +           totals.ncpus = program->ncpus;
74291 +           maxrss = 0;
74292 +
74293 +           if (program->nprocs > 0)
74294 +               totals.etime = now - program->start_time;
74295 +           else
74296 +               totals.etime = program->end_time - program->start_time;
74297 +           
74298 +           totals.atime = program->accum_atime;
74299 +           if (program->flags & PRG_RUNNING)
74300 +               totals.atime += program->ncpus * (now - program->sched_time);
74301 +           
74302 +#if defined(SOLARIS)
74303 +           utime = stime = 0;
74304 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74305 +           {
74306 +               utime += pdesc->proc->p_utime;
74307 +               stime += pdesc->proc->p_stime;
74308 +           }
74309 +           totals.utime = TICK_TO_MSEC(utime);
74310 +           totals.stime = TICK_TO_MSEC(stime);
74311 +
74312 +#elif defined(LINUX)
74313 +           utime = stime = 0;
74314 +           totals.memint = program->memint;
74315 +           totals.pageflts = program->majflt;
74316 +
74317 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74318 +           {
74319 +#ifdef  PROCESS_ACCT
74320 +       DBG(printk("rms_prggetsta :: process %d utime %ld clks stime %ld clks\n", 
74321 +                               pdesc->proc->p_pid, TIMEVAL_TO_CT(&pdesc->proc->utime),
74322 +                               TIMEVAL_TO_CT(&pdesc->proc->stime)));                 
74323 +       utime += TIMEVAL_TO_CT(&pdesc->proc->utime);                  
74324 +       stime += TIMEVAL_TO_CT(&pdesc->proc->stime);                  
74325 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
74326 +               DBG(printk("rms_prggetsta :: process %d utime %ld clks stime %ld clks\n", 
74327 +                          pdesc->proc->p_pid, pdesc->proc->times.tms_utime, 
74328 +                          pdesc->proc->times.tms_stime));
74329 +               utime += pdesc->proc->times.tms_utime;
74330 +               stime += pdesc->proc->times.tms_stime;
74331 +#else
74332 +               DBG(printk("rms_prggetsta :: process %d utime %ld clks stime %ld clks\n", 
74333 +                          pdesc->proc->p_pid, pdesc->proc->utime, pdesc->proc->stime));
74334 +               utime += pdesc->proc->utime;
74335 +               stime += pdesc->proc->stime;
74336 +#endif
74337 +
74338 +               totals.pageflts += pdesc->proc->maj_flt; 
74339 +
74340 +               maxrss += PROC_RSS(pdesc->proc) >> (20 - PAGE_SHIFT);
74341 +           }
74342 +
74343 +           /* convert user and system times to millisecs */
74344 +           totals.utime = CT_TO_MSEC(utime);
74345 +           totals.stime = CT_TO_MSEC(stime);
74346 +           
74347 +#elif defined(DIGITAL_UNIX)
74348 +           totals.utime = totals.stime = 0;
74349 +           totals.memint = program->memint;
74350 +           totals.pageflts = program->majflt;
74351 +
74352 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74353 +           {
74354 +               struct rusage ru;
74355 +               if (rms_getrusage(pdesc, &ru) < 0)
74356 +                   continue;
74357 +               
74358 +               totals.utime += TIMEVAL_TO_MSEC(&ru.ru_utime);
74359 +               totals.stime += TIMEVAL_TO_MSEC(&ru.ru_stime);
74360 +               
74361 +               /* convert maxrss to megabytes */
74362 +               rumaxrss = ru.ru_maxrss >> 10;
74363 +               rumajflt = ru.ru_majflt;
74364 +               totals.pageflts += rumajflt;
74365 +               
74366 +               /*
74367 +                * memory intergals are still broken in 5.1
74368 +                */
74369 +               
74370 +#ifdef FIXED_MEMINIT
74371 +               
74372 +               /* convert from pages * clock ticks to Mbytes * secs */
74373 +               ruixrss = (ru.ru_ixrss >> (20 - PAGE_SHIFT)) / hz;
74374 +               ruidrss = (ru.ru_idrss >> (20 - PAGE_SHIFT)) / hz;
74375 +               ruisrss = (ru.ru_isrss >> (20 - PAGE_SHIFT)) / hz;
74376 +               
74377 +               DBG(printk("rms_prggetsta :: process %d mem %d int %d %d %d flt %d\n", pdesc->proc->p_pid, 
74378 +                          rumaxrss, ruixrss, ruidrss, ruisrss, rumajflt));
74379 +               
74380 +               totals.memint += ruixrss + ruidrss + ruisrss;
74381 +#else
74382 +               DBG(printk("rms_prggetsta :: process %d mem %d flt %d\n", pdesc->proc->p_pid, rumaxrss, rumajflt));
74383 +               totals.memint = 0;
74384 +#endif
74385 +               maxrss += rumaxrss;
74386 +           }
74387 +#endif /* DIGITAL_UNIX */
74388 +
74389 +           if (maxrss > program->maxrss)
74390 +               program->maxrss = maxrss;
74391 +           
74392 +           totals.utime += program->cutime;
74393 +           totals.stime += program->cstime;
74394 +           totals.mem = program->maxrss;
74395 +           totals.ebytes = program->ebytes;
74396 +           totals.exfers = program->exfers;
74397 +
74398 +           DBG(printk("rms_prggetsta :: program %d mem %d flt %d\n", program->id, totals.mem, totals.pageflts));
74399 +           
74400 +           if (copyout(&totals, stats, sizeof(prgstats_t)))
74401 +               status = EFAULT;
74402 +       }
74403 +       else
74404 +           status = EACCES;
74405 +    }
74406 +    else
74407 +       status = ESRCH;
74408 +    
74409 +    kmutex_unlock(&rms_lock);
74410 +    return(status);
74411 +}
74412 +
74413 +/*
74414 + * preserve the old stats stats collection interface
74415 + */
74416 +
74417 +int rms_prggetoldstats(int id, prgstats_old_t *stats)
74418 +{
74419 +#ifdef DIGITAL_UNIX
74420 +    long ruixrss, ruidrss, ruisrss, rumaxrss, rumajflt;
74421 +#endif
74422 +    struct prg_desc *program = 0;
74423 +    struct proc_desc *pdesc;
74424 +    int status = ESUCCESS;
74425 +    prgstats_old_t totals;
74426 +    uint64_t now = gettime();
74427 +#if defined(SOLARIS) || defined(LINUX)
74428 +    clock_t utime, stime;
74429 +#endif
74430 +    long maxrss;
74431 +
74432 +    kmutex_lock(&rms_lock);
74433 +    
74434 +    if (id < 0)
74435 +    {
74436 +       if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74437 +           program = pdesc->program;
74438 +    }
74439 +    else
74440 +       program = findProgram(id);
74441 +
74442 +    if (program)
74443 +    {
74444 +       if (CURUID() == 0 || CURUID() == program->uid)
74445 +       {
74446 +           totals.flags = program->flags;
74447 +           totals.ncpus = program->ncpus;
74448 +           maxrss = 0;
74449 +
74450 +           if (program->nprocs > 0)
74451 +               totals.etime = now - program->start_time;
74452 +           else
74453 +               totals.etime = program->end_time - program->start_time;
74454 +           
74455 +           totals.atime = program->accum_atime;
74456 +           if (program->flags & PRG_RUNNING)
74457 +               totals.atime += program->ncpus * (now - program->sched_time);
74458 +           
74459 +#if defined(SOLARIS)
74460 +           utime = stime = 0;
74461 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74462 +           {
74463 +               utime += pdesc->proc->p_utime;
74464 +               stime += pdesc->proc->p_stime;
74465 +           }
74466 +           totals.utime = TICK_TO_MSEC(utime);
74467 +           totals.stime = TICK_TO_MSEC(stime);
74468 +
74469 +#elif defined(LINUX)
74470 +           utime = stime = 0;
74471 +           totals.memint = program->memint;
74472 +           totals.pageflts = program->majflt;
74473 +
74474 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74475 +           {
74476 +#ifdef  PROCESS_ACCT
74477 +       DBG(printk("rms_getoldsta :: process %d utime %ld clks stime %ld clks\n", 
74478 +                               pdesc->proc->p_pid, TIMEVAL_TO_CT(&pdesc->proc->utime),
74479 +                               TIMEVAL_TO_CT(&pdesc->proc->stime)));                 
74480 +       utime += TIMEVAL_TO_CT(&pdesc->proc->utime);                  
74481 +       stime += TIMEVAL_TO_CT(&pdesc->proc->stime);                  
74482 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
74483 +               DBG(printk("rms_getoldsta :: process %d utime %ld clks stime %ld clks\n", 
74484 +                          pdesc->proc->p_pid, pdesc->proc->times.tms_utime, 
74485 +                          pdesc->proc->times.tms_stime));
74486 +               utime += pdesc->proc->times.tms_utime;
74487 +               stime += pdesc->proc->times.tms_stime;
74488 +#else
74489 +               DBG(printk("rms_getoldsta :: process %d utime %ld clks stime %ld clks\n", 
74490 +                          pdesc->proc->p_pid, pdesc->proc->utime, pdesc->proc->stime));
74491 +               utime += pdesc->proc->utime;
74492 +               stime += pdesc->proc->stime;
74493 +#endif
74494 +
74495 +               totals.pageflts += pdesc->proc->maj_flt; 
74496 +               maxrss += PROC_RSS(pdesc->proc) >> (20 - PAGE_SHIFT);
74497 +           }
74498 +
74499 +           /* convert user and system times to millisecs */
74500 +           totals.utime = CT_TO_MSEC(utime);
74501 +           totals.stime = CT_TO_MSEC(stime);
74502 +           
74503 +#elif defined(DIGITAL_UNIX)
74504 +           totals.utime = totals.stime = 0;
74505 +           totals.memint = program->memint;
74506 +           totals.pageflts = program->majflt;
74507 +
74508 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
74509 +           {
74510 +               struct rusage ru;
74511 +               if (rms_getrusage(pdesc, &ru) < 0)
74512 +                   continue;
74513 +               
74514 +               totals.utime += TIMEVAL_TO_MSEC(&ru.ru_utime);
74515 +               totals.stime += TIMEVAL_TO_MSEC(&ru.ru_stime);
74516 +               
74517 +               /* convert maxrss to megabytes */
74518 +               rumaxrss = ru.ru_maxrss >> 10;
74519 +               rumajflt = ru.ru_majflt;
74520 +               totals.pageflts += rumajflt;
74521 +               
74522 +               /*
74523 +                * memory intergals are still broken in 5.1
74524 +                */
74525 +               
74526 +#ifdef FIXED_MEMINIT
74527 +               
74528 +               /* convert from pages * clock ticks to Mbytes * secs */
74529 +               ruixrss = (ru.ru_ixrss >> (20 - PAGE_SHIFT)) / hz;
74530 +               ruidrss = (ru.ru_idrss >> (20 - PAGE_SHIFT)) / hz;
74531 +               ruisrss = (ru.ru_isrss >> (20 - PAGE_SHIFT)) / hz;
74532 +               
74533 +               DBG(printk("rms_getoldsta :: process %d mem %d int %d %d %d flt %d\n", pdesc->proc->p_pid, 
74534 +                          rumaxrss, ruixrss, ruidrss, ruisrss, rumajflt));
74535 +               
74536 +               totals.memint += ruixrss + ruidrss + ruisrss;
74537 +#else
74538 +               DBG(printk("rms_getoldsta :: process %d mem %d flt %d\n", pdesc->proc->p_pid, rumaxrss, rumajflt));
74539 +               totals.memint = 0;
74540 +#endif
74541 +               maxrss += rumaxrss;
74542 +           }
74543 +#endif /* DIGITAL_UNIX */
74544 +
74545 +           if (maxrss > program->maxrss)
74546 +               program->maxrss = maxrss;
74547 +           
74548 +           totals.utime += program->cutime;
74549 +           totals.stime += program->cstime;
74550 +           totals.mem = program->maxrss;
74551 +           
74552 +           DBG(printk("rms_getoldsta :: program %d mem %d flt %d\n", program->id, totals.mem, totals.pageflts));
74553 +           
74554 +           if (copyout(&totals, stats, sizeof(prgstats_old_t)))
74555 +               status = EFAULT;
74556 +       }
74557 +       else
74558 +           status = EACCES;
74559 +    }
74560 +    else
74561 +       status = ESRCH;
74562 +    
74563 +    kmutex_unlock(&rms_lock);
74564 +    return(status);
74565 +}
74566 +
74567 +
74568 +int rms_prgsuspend(int id)
74569 +{
74570 +    struct prg_desc *program;
74571 +    int status = ESUCCESS;
74572 +
74573 +    kmutex_lock(&rms_lock);
74574 +    
74575 +    if ((program = findProgram(id)) != NULL)
74576 +    {
74577 +       if (CURUID() == 0 || CURUID() == program->uid)
74578 +       {
74579 +           program->flags &= ~PRG_RUNNING;
74580 +           program->flags |=  PRG_SUSPEND;
74581 +           program->accum_atime += program->ncpus * (gettime() - program->sched_time);
74582 +
74583 +           /* suspend/resume just use signals for now */
74584 +           prgsignal(program, SIGSTOP);
74585 +       }
74586 +       else
74587 +           status = EACCES;
74588 +    }
74589 +    else
74590 +       status = ESRCH;
74591 +
74592 +    kmutex_unlock(&rms_lock);
74593 +    return(status);
74594 +}
74595 +
74596 +int rms_prgresume(int id)
74597 +{
74598 +    struct prg_desc *program;
74599 +    int status = ESUCCESS;
74600 +
74601 +    kmutex_lock(&rms_lock);
74602 +    
74603 +    if ((program = findProgram(id)) != NULL)
74604 +    {
74605 +       if (CURUID() == 0 || CURUID() == program->uid)
74606 +       {
74607 +           program->flags &= ~PRG_SUSPEND;
74608 +           program->flags |=  PRG_RUNNING;
74609 +           program->sched_time = gettime();
74610 +           prgsignal(program, SIGCONT);
74611 +       }
74612 +       else
74613 +           status = EACCES;
74614 +    }
74615 +    else
74616 +       status = ESRCH;
74617 +
74618 +    kmutex_unlock(&rms_lock);
74619 +    return(status);
74620 +}
74621 +
74622 +
74623 +int rms_ncaps(int *ncaps)
74624 +{
74625 +    struct proc_desc *pdesc;
74626 +    int status = ESUCCESS;
74627 +    
74628 +    kmutex_lock(&rms_lock);
74629 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74630 +    {
74631 +       if (copyout(&pdesc->program->ncaps, ncaps, sizeof(int)))
74632 +           status = EFAULT;
74633 +    }
74634 +    else
74635 +       status = ESRCH;
74636 +
74637 +    kmutex_unlock(&rms_lock);
74638 +    return(status);
74639 +}
74640 +
74641 +int rms_getprgid(pid_t pid, int *id)
74642 +{
74643 +    struct proc_desc *pdesc;
74644 +    int status = ESUCCESS;
74645 +    
74646 +    if (pid == 0)
74647 +       pid = CURPROC()->p_pid;
74648 +    
74649 +    kmutex_lock(&rms_lock);
74650 +    if ((pdesc = findProcess(pid)) != NULL)
74651 +    {
74652 +       if (copyout(&pdesc->program->id, id, sizeof(int)))
74653 +           status = EFAULT;
74654 +    }
74655 +    else
74656 +       status = ESRCH;
74657 +
74658 +    kmutex_unlock(&rms_lock);
74659 +    return(status);
74660 +}
74661 +
74662 +int rms_setcap(int index, int ctx)
74663 +{
74664 +    struct proc_desc *pdesc;
74665 +    struct cap_desc *cdesc;
74666 +    int status = EINVAL;
74667 +    
74668 +    DBG(printk("rms_setcap    :: process %d cap %d ctx %d\n",CURPROC()->p_pid,index,ctx));
74669 +
74670 +    kmutex_lock(&rms_lock);
74671 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74672 +    {
74673 +       for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next)
74674 +           if (cdesc->index == index && 0 <= ctx && ctx <= (cdesc->cap.cap_highcontext - cdesc->cap.cap_lowcontext + 1))
74675 +           {
74676 +               pdesc->mycap = index;
74677 +               pdesc->myctx = cdesc->cap.cap_lowcontext + ctx;
74678 +               status = ESUCCESS;
74679 +           }
74680 +    }
74681 +    else
74682 +       status = ESRCH;
74683 +
74684 +    kmutex_unlock(&rms_lock);
74685 +    return(status);
74686 +}
74687 +
74688 +
74689 +int rms_mycap(int *index)
74690 +{
74691 +    struct proc_desc *pdesc;
74692 +    int status = ESUCCESS;
74693 +    
74694 +    DBG(printk("rms_mycap :: process %d\n", CURPROC()->p_pid));
74695 +    
74696 +    kmutex_lock(&rms_lock);
74697 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74698 +    {
74699 +       DBG(printk("rms_mycap :: found process %d mycap = %d\n", CURPROC()->p_pid, pdesc->mycap));
74700 +       if (copyout(&pdesc->mycap, index, sizeof(int)))
74701 +           status = EFAULT;
74702 +    }
74703 +    else
74704 +       status = ESRCH;
74705 +
74706 +    kmutex_unlock(&rms_lock);
74707 +    return(status);
74708 +}
74709 +
74710 +int rms_getcap(int index, ELAN_CAPABILITY *cap)
74711 +{
74712 +    struct proc_desc *pdesc;
74713 +    struct cap_desc *cdesc;
74714 +    int status = ESUCCESS;
74715 +    
74716 +    kmutex_lock(&rms_lock);
74717 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
74718 +    {
74719 +       for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next)
74720 +           if (cdesc->index == index)
74721 +               break;
74722 +       
74723 +       if (cdesc)
74724 +       {
74725 +           /* tell each process about its own context */
74726 +           cdesc->cap.cap_mycontext = pdesc->myctx;
74727 +           
74728 +           if (copyout(&cdesc->cap, cap, ELAN_CAP_SIZE(&cdesc->cap)))
74729 +               status = EFAULT;
74730 +           
74731 +           DBG(printk("rms_getcap    :: program %d index %d context %d<-->%d\n", pdesc->program->id, 
74732 +                      cdesc->index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext));
74733 +       }
74734 +       else
74735 +           status = EINVAL;
74736 +    }
74737 +    else
74738 +       status = ESRCH;
74739 +    
74740 +    kmutex_unlock(&rms_lock);
74741 +    return(status);
74742 +}
74743 +
74744 +
74745 +static int
74746 +rms_fork_callback (struct PROC_STRUCT *curproc, struct PROC_STRUCT *child)
74747 +{
74748 +    struct prg_desc *program;
74749 +    struct proc_desc *parent;
74750 +    struct proc_desc *pdesc = NULL;
74751 +
74752 +    kmutex_lock(&rms_lock);
74753 +    
74754 +    DBG(printk("rms_fork_func :: phase is fork pid %d child %d\n", curproc->p_pid, child->p_pid));
74755 +
74756 +    /*
74757 +     * find the process that forked
74758 +     */
74759 +    if ((parent = findProcess(curproc->p_pid)) != NULL)
74760 +    {
74761 +       program = parent->program;
74762 +       
74763 +       DBG(printk("rms_fork_func :: program is %d flags %d\n", program->id, program->flags));
74764 +       
74765 +       /*
74766 +        * processes can be blocked in fork while prgsignal is in progress
74767 +        * so check to see if the PRG_KILLED flag is set
74768 +        */
74769 +       if (program->flags & PRG_KILLED)
74770 +           DBG(printk("rms_fork_func :: fork handler called after program killed\n"));
74771 +       else
74772 +       {
74773 +           /*
74774 +            * create a new process description and add to program
74775 +            */
74776 +           KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
74777 +           if (pdesc)
74778 +           {
74779 +               pdesc->next = program->pdescs;
74780 +               program->pdescs = pdesc;
74781 +               pdesc->proc = child;
74782 +               pdesc->mycap = parent->mycap;
74783 +               pdesc->myctx = parent->myctx;
74784 +               pdesc->program = program;
74785 +               pdesc->vp = -1;              /* assigned by elaninitdone */
74786 +               program->nprocs++;
74787 +           }
74788 +           else
74789 +               printk("rms_fork_func :: memory allocation failed\n");
74790 +       }
74791 +    }
74792 +    else
74793 +       DBG(printk("rms_fork_func :: no program\n"));
74794 +    
74795 +    kmutex_unlock (&rms_lock);
74796 +
74797 +    return pdesc == NULL;
74798 +}
74799 +
74800 +static void
74801 +rms_exit_callback (struct PROC_STRUCT *curproc)
74802 +{
74803 +    struct prg_desc *program;
74804 +    struct proc_desc *pdesc, **pdescp, *p;
74805 +#ifdef DIGITAL_UNIX
74806 +    struct rusage ru;
74807 +#endif
74808 +    long maxrss;
74809 +
74810 +    kmutex_lock(&rms_lock);
74811 +    
74812 +    DBG(printk("rms_exit_func :: process %d exiting\n", curproc->p_pid));
74813 +
74814 +    /*
74815 +     * find the process that exited and accumulate 
74816 +     * resource usage in its parent program
74817 +     */
74818 +    for (program = programs, pdesc = 0; program && !pdesc; program = program->next)
74819 +    {
74820 +       pdescp = &program->pdescs;
74821 +       while ((pdesc = *pdescp) != NULL)
74822 +       {
74823 +           if (pdesc->proc->p_pid == curproc->p_pid)
74824 +           {
74825 +               /*
74826 +                * keep track of the resources used
74827 +                */
74828 +#if defined(SOLARIS)
74829 +               program->cutime += TICK_TO_MSEC(pdesc->proc->p_utime);
74830 +               program->cstime += TICK_TO_MSEC(pdesc->proc->p_stime);
74831 +               
74832 +#elif defined(LINUX)
74833 +#ifdef PROCESS_ACCT
74834 +       DBG(printk("rms_exit_func :: process %d exit utime %ld clks stime %ld clks\n",
74835 +                               pdesc->proc->p_pid,
74836 +                               TIMEVAL_TO_CT(&pdesc->proc->utime),                
74837 +                               TIMEVAL_TO_CT(&pdesc->proc->stime)));              
74838 +       program->cutime += TIMEVAL_TO_MSEC(&pdesc->proc->utime);      
74839 +       program->cstime += TIMEVAL_TO_MSEC(&pdesc->proc->stime);      
74840 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)                           
74841 +               DBG(printk("rms_exit_func :: process %d exit utime %ld clks stime %ld clks\n", 
74842 +                          pdesc->proc->p_pid, pdesc->proc->times.tms_utime, 
74843 +                          pdesc->proc->times.tms_stime));
74844 +               
74845 +               program->cutime += CT_TO_MSEC(pdesc->proc->times.tms_utime);
74846 +               program->cstime += CT_TO_MSEC(pdesc->proc->times.tms_stime);
74847 +#else
74848 +               DBG(printk("rms_exit_func :: process %d exit utime %ld clks stime %ld clks\n", 
74849 +                          pdesc->proc->p_pid, pdesc->proc->utime, pdesc->proc->stime));
74850 +               
74851 +               program->cutime += CT_TO_MSEC(pdesc->proc->utime);
74852 +               program->cstime += CT_TO_MSEC(pdesc->proc->stime);
74853 +#endif
74854 +               program->majflt += pdesc->proc->maj_flt;
74855 +               maxrss = PROC_RSS(pdesc->proc) >> (20 - PAGE_SHIFT);
74856 +               
74857 +#elif defined(DIGITAL_UNIX)
74858 +               if (rms_getrusage(pdesc, &ru) == 0)
74859 +               {
74860 +                   program->cutime += TIMEVAL_TO_MSEC(&ru.ru_utime);
74861 +                   program->cstime += TIMEVAL_TO_MSEC(&ru.ru_stime);
74862 +                   program->majflt += ru.ru_majflt;
74863 +                   
74864 +                   /* convert maxrss to megabytes */
74865 +                   maxrss = ru.ru_maxrss >> 10;
74866 +               }
74867 +#endif
74868 +               
74869 +               /*
74870 +                * shared memory segment cleanup
74871 +                */
74872 +#if defined(DIGITAL_UNIX)
74873 +               rms_shmcleanup(-1);
74874 +#elif defined(LINUX)
74875 +               shm_cleanup();
74876 +#endif 
74877 +               
74878 +               /* 
74879 +                * remove process from program 
74880 +                */
74881 +               *pdescp = pdesc->next;
74882 +               KMEM_FREE(pdesc, sizeof(struct proc_desc));
74883 +               program->nprocs--;
74884 +               
74885 +               /*
74886 +                * update the memory high water mark for the program
74887 +                */
74888 +               for (p = program->pdescs; p; p = p->next)
74889 +               {
74890 +#if defined(DIGITAL_UNIX)
74891 +                   if (rms_getrusage(p, &ru) < 0)
74892 +                       continue;
74893 +                   
74894 +                   /* convert maxrss to megabytes */
74895 +                   maxrss += ru.ru_maxrss >> 10;
74896 +                   
74897 +#elif defined(LINUX)                   
74898 +                   maxrss += PROC_RSS(p->proc) >> (20 - PAGE_SHIFT);
74899 +#endif
74900 +               }
74901 +               if (maxrss > program->maxrss)
74902 +                   program->maxrss = maxrss;
74903 +               
74904 +               DBG(printk("rms_exit_func :: program %d procs %d mem %ld\n", program->id, program->nprocs, program->maxrss));
74905 +               
74906 +               /*
74907 +                * final update to the program if this is the last process
74908 +                */
74909 +               if (program->nprocs == 0)
74910 +               {
74911 +                   program->end_time = gettime();
74912 +                   program->flags &= ~PRG_RUNNING;
74913 +                   program->accum_atime += program->ncpus * (program->end_time - program->sched_time);
74914 +                   DBG(printk("rms_exit_func :: last process has gone\n"));
74915 +               }
74916 +               break;
74917 +           }
74918 +           else
74919 +               pdescp = &pdesc->next;
74920 +       }
74921 +    }
74922 +    kmutex_unlock  (&rms_lock);
74923 +}
74924 +
74925 +#if defined(LINUX)
74926 +static int
74927 +rms_ptrack_callback (void *arg, int phase, struct task_struct *child)
74928 +{
74929 +    switch (phase)
74930 +    {
74931 +    case PTRACK_PHASE_CLONE:
74932 +       if (rms_fork_callback (current, child))
74933 +           return PTRACK_DENIED;
74934 +       else
74935 +           return PTRACK_INNHERIT;
74936 +
74937 +    case PTRACK_PHASE_CLONE_FAIL:
74938 +       DBG(printk("rms_fork_func :: fork failed pid %d child %d\n", current->p_pid, child->p_pid));
74939 +       rms_exit_callback(child);
74940 +       break;
74941 +
74942 +    case PTRACK_PHASE_EXIT:
74943 +       rms_exit_callback(current);
74944 +       break;
74945 +    }
74946 +    return PTRACK_FINISHED;
74947 +}
74948 +
74949 +#else
74950 +
74951 +static void
74952 +rms_xa_callback (void *arg, int phase, void *ctask)
74953 +{
74954 +    switch (phase)
74955 +    {
74956 +    case XA_FORK:
74957 +       if (rms_fork_callback (CURPROC(), (struct PROC_STRUCT *)task_to_proc(ctask)))
74958 +           psignal(task_to_proc(ctask), SIGKILL);
74959 +       break;
74960 +    case XA_EXIT:
74961 +       rms_exit_callback (CURPROC());
74962 +       break;
74963 +    }
74964 +}
74965 +
74966 +#endif
74967 +
74968 +#ifdef DIGITAL_UNIX
74969 +
74970 +/*
74971 + * NB: These functions will only work on steelos.
74972 + */
74973 +
74974 +/*
74975 + * rms_setcorepath
74976 + *
74977 + * set a path at which to dump core if the task aborts  
74978 + *
74979 + * enhanced core file names must be enabled for this to work
74980 + */
74981 +int rms_setcorepath(char *corepath)
74982 +{
74983 +    int    length;
74984 +    char  *path;
74985 +    int    status; 
74986 +    struct proc_desc *pdesc;
74987 +    
74988 +    /* 
74989 +     * access restricted - we don't want users moving
74990 +     * their corepath and generating a huge I/O load
74991 +     */
74992 +    if (CURUID())
74993 +       return(EACCES);
74994 +    
74995 +    if (!(pdesc = findProcess(CURPROC()->p_pid)))
74996 +       return(ESRCH);
74997 +    
74998 +    if (pdesc->program->corepath)
74999 +       return(EEXIST);
75000 +    
75001 +    KMEM_ALLOC(path, char *, MAXCOREPATHLEN + 1, TRUE);
75002 +    if (path == 0)
75003 +       return(ENOMEM);
75004 +    
75005 +    if (copyinstr(corepath, path, MAXCOREPATHLEN, &length))
75006 +       return(EFAULT);
75007 +    
75008 +    path[length] = 0;
75009 +    status = add_corepath(path);
75010 +    
75011 +    DBG(printk("rms_setcorepa :: id %d corepath %s status %d\n", pdesc->program->id, path, status));
75012 +    
75013 +    if (status == ESUCCESS)
75014 +       pdesc->program->corepath = path;
75015 +    else
75016 +       KMEM_FREE(path, MAXCOREPATHLEN + 1);
75017 +    
75018 +    return(status);
75019 +}
75020 +
75021 +static int find_corepath(pid_t pid, char *path, int len)
75022 +{
75023 +    struct proc *procp;
75024 +    struct utask *utask;
75025 +    int status = ESUCCESS;
75026 +
75027 +    procp = pfind(pid);
75028 +    if (procp == NULL)
75029 +        return(ENOENT);
75030 +    
75031 +    utask = proc_to_utask(procp);
75032 +    
75033 +    if (utask->uu_coredir)
75034 +        bcopy(utask->uu_coredir,path,len);
75035 +    else
75036 +        status = ENOENT;
75037 +    
75038 +    /* pfind takes out a reference */
75039 +    P_UNREF(procp);
75040 +
75041 +    return(status);
75042 +}
75043 +
75044 +int rms_getcorepath(pid_t pid, char *corepath, int maxlen)
75045 +{
75046 +    char src[MAXCOREPATHLEN];
75047 +    int len;
75048 +    int status;
75049 +    
75050 +    if (maxlen < 2)
75051 +       return(EINVAL);
75052 +    
75053 +    len = MIN(maxlen, MAXCOREPATHLEN);
75054 +    
75055 +    status = find_corepath(pid, src, len);
75056 +    
75057 +    if (status == ESUCCESS)
75058 +        len = strlen(src)+1;
75059 +    else if (status == ENOENT) 
75060 +    {
75061 +       len = 2;
75062 +       src[0] = '.';
75063 +        src[1] = '\0';
75064 +        status = ESUCCESS;
75065 +    }
75066 +    
75067 +    if (copyout(src, corepath, len))
75068 +       return(EFAULT);
75069 +    
75070 +    return(status);
75071 +}
75072 +
75073 +#endif
75074 +
75075 +/*
75076 + * rms_elaninitdone - mark a process as having successfully completed elan initialisation
75077 + */
75078 +int rms_elaninitdone(int vp)
75079 +{
75080 +    int status = ESUCCESS;
75081 +    struct proc_desc *pdesc;
75082 +    
75083 +    DBG(printk("rms_elaninit  :: process %d vp %d\n", CURPROC()->p_pid, vp));
75084 +    
75085 +    kmutex_lock(&rms_lock);
75086 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
75087 +       pdesc->vp = vp;
75088 +    else
75089 +       status = ESRCH;
75090 +    kmutex_unlock(&rms_lock);
75091 +    return(status);
75092 +}
75093 +
75094 +
75095 +/*
75096 + * rms_prgelanpids - return the ids of processes that have completed elan initialisation
75097 + */
75098 +int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids)
75099 +{
75100 +    struct prg_desc *program;
75101 +    struct proc_desc *pdesc;
75102 +    pid_t *pidbuf;
75103 +    int status = ESUCCESS, count = 0, *vpbuf;
75104 +    
75105 +    DBG(printk("rms_elanpids  :: process %d id %d\n", CURPROC()->p_pid, id));
75106 +    
75107 +    kmutex_lock(&rms_lock);
75108 +    
75109 +    if ((program = findProgram(id)) != NULL)
75110 +    {
75111 +       if (program->nprocs > 0)
75112 +       {
75113 +           KMEM_ALLOC(pidbuf, pid_t *, program->nprocs * sizeof(pid_t), TRUE);
75114 +           KMEM_ALLOC(vpbuf, int *, program->nprocs * sizeof(int), TRUE);
75115 +           if (pidbuf && vpbuf)
75116 +           {
75117 +               for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
75118 +                   if (pdesc->vp >= 0)
75119 +                   {
75120 +                       pidbuf[count] = pdesc->proc->p_pid;
75121 +                       vpbuf[count] = pdesc->vp;
75122 +                       count++;
75123 +                   }
75124 +           
75125 +               if (count > 0 && (copyout(pidbuf, pids, sizeof(pid_t) * MIN(count, maxpids)) ||
75126 +                                 copyout(vpbuf, vps, sizeof(int) * MIN(count, maxpids))))
75127 +                   status = EFAULT;
75128 +               
75129 +               KMEM_FREE(pidbuf, program->nprocs * sizeof(pid_t));
75130 +               KMEM_FREE(vpbuf, program->nprocs * sizeof(int));
75131 +           }
75132 +           else
75133 +               status = ENOMEM;
75134 +       }
75135 +
75136 +       if (copyout(&count, npids, sizeof(int)))
75137 +           status = EFAULT;
75138 +    }
75139 +    else
75140 +       status = ESRCH;
75141 +
75142 +    kmutex_unlock(&rms_lock);
75143 +    
75144 +    return(status);
75145 +
75146 +}
75147 +
75148 +int rms_setpset(int psid)
75149 +{
75150 +    struct prg_desc *program;
75151 +    struct proc_desc *pdesc;
75152 +    int status = ESUCCESS;
75153 +
75154 +    if (CURUID())
75155 +       return(EACCES);
75156 +
75157 +    kmutex_lock(&rms_lock);
75158 +    
75159 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
75160 +    {
75161 +       program = pdesc->program;
75162 +       program->psid = psid;
75163 +    }
75164 +    else
75165 +       status = ESRCH;
75166 +
75167 +    kmutex_unlock(&rms_lock);
75168 +    return(status);
75169 +}
75170 +
75171 +
75172 +int rms_getpset(int id, int *psid)
75173 +{
75174 +    struct prg_desc *program;
75175 +    int status = ESUCCESS;
75176 +    
75177 +    kmutex_lock(&rms_lock);
75178 +    if ((program = findProgram(id)) != NULL)
75179 +    {
75180 +       if (copyout(&program->psid, psid, sizeof(int)))
75181 +           status = EFAULT;
75182 +    }
75183 +    else
75184 +       status = ESRCH;
75185 +    
75186 +    kmutex_unlock(&rms_lock);
75187 +    return(status);
75188 +}
75189 +
75190 +int
75191 +rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers)
75192 +{
75193 +    struct prg_desc *program;
75194 +    int status = ESUCCESS;
75195 +    
75196 +    DBG(printk("rms_setelanst :: process %d id %d\n", CURPROC()->p_pid, id));
75197 +
75198 +    kmutex_lock(&rms_lock);
75199 +    if ((program = findProgram(id)) != NULL)
75200 +    {
75201 +       if (CURUID() == 0 || CURUID() == program->uid)
75202 +       {
75203 +           program->ebytes = ebytes;
75204 +           program->exfers = exfers;
75205 +       }
75206 +       else
75207 +           status = EACCES;
75208 +    }
75209 +    else
75210 +       status = ESRCH;
75211 +    
75212 +    kmutex_unlock(&rms_lock);
75213 +    return(status);
75214 +}
75215 +
75216 +rms_modversion()
75217 +{
75218 +    return(RMS_MODVERSION);
75219 +}
75220 +
75221 +
75222 +/*
75223 + * Local variables:
75224 + * c-file-style: "stroustrup"
75225 + * End:
75226 + */
75227 +
75228 +
75229 +
75230 +
75231 +
75232 +
75233 +
75234 Index: linux-2.6.5-7.191/drivers/net/qsnet/rms/rms_kern_Linux.c
75235 ===================================================================
75236 --- linux-2.6.5-7.191.orig/drivers/net/qsnet/rms/rms_kern_Linux.c       2004-02-23 16:02:56.000000000 -0500
75237 +++ linux-2.6.5-7.191/drivers/net/qsnet/rms/rms_kern_Linux.c    2005-07-28 14:52:52.924667184 -0400
75238 @@ -0,0 +1,430 @@
75239 +/*
75240 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75241 + *
75242 + *    For licensing information please see the supplied COPYING file
75243 + *
75244 + */
75245 +
75246 +#ident "$Id: rms_kern_Linux.c,v 1.20 2004/05/14 08:55:57 duncan Exp $"
75247 +/*      $Source: /cvs/master/quadrics/rmsmod/rms_kern_Linux.c,v $*/
75248 +
75249 +#include <qsnet/kernel.h>
75250 +
75251 +#include <linux/sysctl.h>
75252 +#include <linux/init.h>
75253 +#include <linux/module.h>
75254 +#include <linux/proc_fs.h>
75255 +
75256 +#include <rms/rmscall.h>
75257 +#include <rms/rmsio.h>
75258 +
75259 +MODULE_AUTHOR("Quadrics Ltd");
75260 +MODULE_DESCRIPTION("RMS support module");
75261 +MODULE_LICENSE("GPL");
75262 +
75263 +int rms_debug = 0;
75264 +
75265 +ctl_table rms_table[] = {
75266 +    {
75267 +       .ctl_name = 1,
75268 +       .procname = "rms_debug",
75269 +       .data     = &rms_debug,
75270 +       .maxlen   = sizeof(int),
75271 +       .mode     = 0644,
75272 +       .child    = NULL,
75273 +       .proc_handler = &proc_dointvec,
75274 +    },
75275 +    {0}
75276 +};
75277 +
75278 +ctl_table rms_root_table[] = {
75279 +    {
75280 +       .ctl_name = CTL_DEBUG,
75281 +       .procname = "rms",
75282 +       .data     = NULL,
75283 +       .maxlen   = 0,
75284 +       .mode     = 0555,
75285 +       .child    = rms_table,
75286 +    },
75287 +    {0}
75288 +};
75289 +
75290 +static struct ctl_table_header *rms_sysctl_header;
75291 +
75292 +static int rms_open (struct inode *ino, struct file *fp);
75293 +static int rms_release (struct inode *ino, struct file *fp);
75294 +static int rms_ioctl (struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg);
75295 +
75296 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
75297 +static int
75298 +rms_ioctl32_cmds[] =
75299 +{
75300 +    RMSIO_GETPRGID32,
75301 +    RMSIO_GETCAP32
75302 +};
75303 +
75304 +static int      rms_ioctl32 (unsigned int fd, unsigned int cmd, 
75305 +                            unsigned long arg, struct file *file);
75306 +#endif
75307 +
75308 +static struct file_operations rms_fops =
75309 +{
75310 +    .owner   = THIS_MODULE,
75311 +    .ioctl   = rms_ioctl,
75312 +    .open    = rms_open,
75313 +    .release = rms_release,
75314 +};
75315 +
75316 +struct proc_dir_entry *rms_procfs_programs;
75317 +static struct proc_dir_entry *rms_procfs_root;
75318 +
75319 +int version_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
75320 +{
75321 +    return(sprintf(page, "$Id: rms_kern_Linux.c,v 1.20 2004/05/14 08:55:57 duncan Exp $\n"));
75322 +}
75323 +
75324 +static int __init rms_start(void)
75325 +{
75326 +    struct proc_dir_entry *p;
75327 +    int res;
75328 +
75329 +    if ((rms_sysctl_header = register_sysctl_table(rms_root_table, 1)) == 0)
75330 +    {
75331 +       printk ("rmsmod: failed to register sysctl table\n");
75332 +       return (-ENXIO);
75333 +    }
75334 +    
75335 +    if ((rms_procfs_root = proc_mkdir("rms",  NULL)) == NULL ||
75336 +       (rms_procfs_programs = proc_mkdir("programs",  rms_procfs_root)) == NULL ||
75337 +       (p = create_proc_entry ("control", S_IRUGO, rms_procfs_root)) == NULL)
75338 +    {
75339 +       unregister_sysctl_table (rms_sysctl_header);
75340 +       printk ("rmsmod: failed to register /proc/rms\n");
75341 +       return (-ENXIO);
75342 +    }
75343 +    p->proc_fops = &rms_fops;
75344 +    p->owner     = THIS_MODULE;
75345 +    p->data      = NULL;
75346 +
75347 +    if ((p = create_proc_entry ("version", S_IRUGO, rms_procfs_root)) != NULL)
75348 +    {
75349 +       p->owner = THIS_MODULE;
75350 +       p->data = NULL;
75351 +       p->read_proc = version_callback;
75352 +    }
75353 +
75354 +    if ((res = rms_init()) != ESUCCESS)
75355 +    {
75356 +       remove_proc_entry ("programs", rms_procfs_root);
75357 +       remove_proc_entry ("control", rms_procfs_root);
75358 +       remove_proc_entry ("rms", NULL);
75359 +       unregister_sysctl_table (rms_sysctl_header);
75360 +       return (-res);
75361 +    }
75362 +
75363 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
75364 +    lock_kernel();
75365 +    {
75366 +       extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
75367 +       register int i;
75368 +       for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++)
75369 +           register_ioctl32_conversion (rms_ioctl32_cmds[i], rms_ioctl32);
75370 +    }
75371 +    unlock_kernel();
75372 +#endif
75373 +    return (0);
75374 +}
75375 +
75376 +static void __exit rms_exit(void)
75377 +{
75378 +    rms_fini();
75379 +
75380 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
75381 +    lock_kernel();
75382 +    {
75383 +       extern void unregister_ioctl32_conversion(unsigned int cmd);
75384 +       register int i;
75385 +
75386 +       for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++)
75387 +           unregister_ioctl32_conversion (rms_ioctl32_cmds[i]);
75388 +    }
75389 +    unlock_kernel();
75390 +#endif
75391 +
75392 +    remove_proc_entry ("version", rms_procfs_root);
75393 +    remove_proc_entry ("programs", rms_procfs_root);
75394 +    remove_proc_entry ("control", rms_procfs_root);
75395 +    remove_proc_entry ("rms", NULL);
75396 +    unregister_sysctl_table(rms_sysctl_header);
75397 +}
75398 +
75399 +/* Declare the module init and exit functions */
75400 +module_init(rms_start);
75401 +module_exit(rms_exit);
75402 +
75403 +static int
75404 +rms_open (struct inode *inode, struct file *fp)
75405 +{
75406 +    MOD_INC_USE_COUNT;
75407 +    fp->private_data = NULL;
75408 +
75409 +    return (0);
75410 +}
75411 +
75412 +static int
75413 +rms_release (struct inode *inode, struct file *fp)
75414 +{
75415 +    MOD_DEC_USE_COUNT;
75416 +    return (0);
75417 +}
75418 +
75419 +static int 
75420 +rms_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
75421 +{
75422 +    int res;
75423 +
75424 +    switch (cmd) 
75425 +    {
75426 +/* no corepath support in Linux yet */
75427 +#if 0
75428 +    case RMSIO_SETCOREPATH:
75429 +       res = rms_setcorepath((caddr_t)arg);
75430 +       break;
75431 +       
75432 +    case RMSIO_GETCOREPATH:
75433 +    {
75434 +       RMSIO_GETCOREPATH_STRUCT args;
75435 +
75436 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75437 +           return (-EFAULT);
75438 +
75439 +       res = rms_getcorepath(args.pid, args.corepath, args.maxlen);
75440 +       break;
75441 +    }
75442 +#endif
75443 +       
75444 +    case RMSIO_PRGCREATE:
75445 +    {
75446 +       RMSIO_PRGCREATE_STRUCT args;
75447 +
75448 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75449 +           return (-EFAULT);
75450 +
75451 +       res = rms_prgcreate(args.id, args.uid, args.cpus);
75452 +       break;
75453 +    }
75454 +
75455 +    case RMSIO_PRGDESTROY:
75456 +       res = rms_prgdestroy(arg);
75457 +       break;
75458 +       
75459 +    case RMSIO_PRGIDS:
75460 +    {
75461 +       RMSIO_PRGIDS_STRUCT args;
75462 +       
75463 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75464 +           return (-EFAULT);
75465 +
75466 +       res = rms_prgids(args.maxids, args.prgids, args.nprgs);
75467 +       break;
75468 +    }
75469 +
75470 +    case RMSIO_PRGINFO:
75471 +    {
75472 +       RMSIO_PRGINFO_STRUCT args;
75473 +       
75474 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75475 +           return (-EFAULT);
75476 +
75477 +       res = rms_prginfo(args.id, args.maxpids, args.pids, args.nprocs);
75478 +       break;
75479 +    }
75480 +       
75481 +    case RMSIO_PRGSIGNAL:
75482 +    {
75483 +       RMSIO_PRGSIGNAL_STRUCT args;
75484 +
75485 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75486 +           return (-EFAULT);
75487 +
75488 +       res = rms_prgsignal(args.id, args.signo);
75489 +       break;
75490 +    }
75491 +       
75492 +    case RMSIO_PRGADDCAP:
75493 +    {
75494 +       RMSIO_PRGADDCAP_STRUCT args;
75495 +
75496 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75497 +           return (-EFAULT);
75498 +
75499 +       res = rms_prgaddcap(args.id, args.index, args.cap);
75500 +       break;
75501 +    }
75502 +
75503 +    case RMSIO_SETCAP:
75504 +    {
75505 +       RMSIO_SETCAP_STRUCT args;
75506 +
75507 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75508 +           return (-EFAULT);
75509 +
75510 +       res = rms_setcap(args.index, args.ctx);
75511 +       break;
75512 +    }
75513 +       
75514 +    case RMSIO_NCAPS:
75515 +       res = rms_ncaps((int *)arg);
75516 +       break;
75517 +       
75518 +    case RMSIO_GETPRGID:
75519 +    {
75520 +       RMSIO_GETPRGID_STRUCT args;
75521 +
75522 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75523 +           return (-EFAULT);
75524 +
75525 +       res = rms_getprgid(args.pid, args.id);
75526 +       break;
75527 +    }
75528 +
75529 +    case RMSIO_GETMYCAP:
75530 +       res = rms_mycap((int *)arg);
75531 +       break;
75532 +       
75533 +    case RMSIO_GETCAP:
75534 +    {
75535 +       RMSIO_GETCAP_STRUCT args;
75536 +
75537 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75538 +           return (-EFAULT);
75539 +
75540 +       res = rms_getcap(args.index, args.cap);
75541 +       break;
75542 +    }
75543 +
75544 +    case RMSIO_PRGGETSTATS:
75545 +    {
75546 +       RMSIO_PRGGETSTATS_STRUCT args;
75547 +
75548 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75549 +           return (-EFAULT);
75550 +
75551 +       res = rms_prggetoldstats(args.id, args.stats);
75552 +       break;
75553 +    }
75554 +
75555 +    case RMSIO_PRGGETSTATS2:
75556 +    {
75557 +       RMSIO_PRGGETSTATS2_STRUCT args;
75558 +
75559 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75560 +           return (-EFAULT);
75561 +
75562 +       res = rms_prggetstats(args.id, args.stats);
75563 +       break;
75564 +    }
75565 +
75566 +    case RMSIO_PRGSUSPEND:
75567 +       res = rms_prgsuspend(arg);
75568 +       break;
75569 +       
75570 +    case RMSIO_PRGRESUME:
75571 +       res = rms_prgresume(arg);
75572 +       break;
75573 +
75574 +    case RMSIO_ELANINITDONE:
75575 +       res = rms_elaninitdone(arg);
75576 +       break;
75577 +
75578 +    case RMSIO_PRGELANPIDS:
75579 +    {
75580 +       RMSIO_PRGELANPIDS_STRUCT args;
75581 +
75582 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75583 +           return (-EFAULT);
75584 +
75585 +       res = rms_prgelanpids(args.id, args.maxpids, args.vps, args.pids, args.npids);
75586 +       break;
75587 +    }
75588 +
75589 +    case RMSIO_SETELANSTATS:
75590 +    {
75591 +       RMSIO_SETELANSTATS_STRUCT args;
75592 +       elanstats_t estats;
75593 +
75594 +       if (copy_from_user(&args, (void *)arg, sizeof(args)) ||
75595 +           copy_from_user(&estats, (void *)args.estats, sizeof(estats)))
75596 +           return(-EFAULT);
75597 +       
75598 +       res = rms_setelanstats(args.id, estats.ebytes, estats.exfers);
75599 +       break;
75600 +    }
75601 +
75602 +    case RMSIO_MODVERSION:
75603 +    {
75604 +       RMSIO_MODVERSION_STRUCT args;
75605 +       int version = rms_modversion();
75606 +       
75607 +       if (copy_from_user (&args, (void *)arg, sizeof (args)))
75608 +           return (-EFAULT);
75609 +       
75610 +       if (copyout(&version, args.version, sizeof(int)))
75611 +           res = EFAULT;
75612 +       else
75613 +           res = ESUCCESS;
75614 +
75615 +       break;
75616 +    }
75617 +
75618 +    default:
75619 +       res = EINVAL;
75620 +       break;
75621 +    }
75622 +
75623 +    return ((res == 0) ? 0 : -res);
75624 +}
75625 +
75626 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
75627 +static int
75628 +rms_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
75629 +{
75630 +    int res;
75631 +
75632 +    switch (cmd)
75633 +    {
75634 +    case RMSIO_GETPRGID32:
75635 +    {
75636 +       RMSIO_GETPRGID_STRUCT32 args;
75637 +
75638 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75639 +           return (-EFAULT);
75640 +
75641 +       res = rms_getprgid(args.pid, (int *)(unsigned long) args.idptr);
75642 +       break;
75643 +    }
75644 +       
75645 +    case RMSIO_GETCAP32:
75646 +    {
75647 +       RMSIO_GETCAP_STRUCT32 args;
75648 +
75649 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
75650 +           return (-EFAULT);
75651 +
75652 +       res = rms_getcap(args.index, (ELAN_CAPABILITY *)(unsigned long) args.capptr);
75653 +       break;
75654 +    }
75655 +
75656 +    default:
75657 +       return (sys_ioctl (fd, cmd, arg));
75658 +    }
75659 +
75660 +    return ((res == 0) ? 0 : -res);
75661 +}
75662 +#endif
75663 +
75664 +/*
75665 + * Local variables:
75666 + * c-file-style: "stroustrup"
75667 + * End:
75668 + */
75669 Index: linux-2.6.5-7.191/drivers/net/Kconfig
75670 ===================================================================
75671 --- linux-2.6.5-7.191.orig/drivers/net/Kconfig  2005-06-28 12:23:55.000000000 -0400
75672 +++ linux-2.6.5-7.191/drivers/net/Kconfig       2005-07-28 14:52:52.926666880 -0400
75673 @@ -2491,6 +2491,8 @@
75674  
75675  source "drivers/net/tokenring/Kconfig"
75676  
75677 +source "drivers/net/qsnet/Kconfig"
75678 +
75679  config NET_FC
75680         bool "Fibre Channel driver support"
75681         depends on NETDEVICES && SCSI && PCI
75682 Index: linux-2.6.5-7.191/drivers/net/Makefile
75683 ===================================================================
75684 --- linux-2.6.5-7.191.orig/drivers/net/Makefile 2005-06-28 12:23:50.000000000 -0400
75685 +++ linux-2.6.5-7.191/drivers/net/Makefile      2005-07-28 14:52:52.927666728 -0400
75686 @@ -196,3 +196,5 @@
75687  
75688  obj-$(CONFIG_NETCONSOLE) += netconsole.o
75689  obj-$(CONFIG_XPNET) += xpnet.o
75690 +
75691 +obj-$(CONFIG_QSNET) += qsnet/
75692 Index: linux-2.6.5-7.191/fs/exec.c
75693 ===================================================================
75694 --- linux-2.6.5-7.191.orig/fs/exec.c    2005-06-28 12:24:23.000000000 -0400
75695 +++ linux-2.6.5-7.191/fs/exec.c 2005-07-28 14:52:52.928666576 -0400
75696 @@ -65,6 +65,8 @@
75697  #include <linux/kmod.h>
75698  #endif
75699  
75700 +#include <linux/ptrack.h>
75701 +
75702  int core_uses_pid;
75703  char core_pattern[65] = "core";
75704  int suid_dumpable = 0;
75705 @@ -1202,6 +1204,9 @@
75706         if (retval < 0)
75707                 goto out;
75708  
75709 +       /* notify any ptrack callbacks of the process exec */
75710 +       ptrack_call_callbacks(PTRACK_PHASE_EXEC, NULL);
75711 +
75712         retval = search_binary_handler(&bprm,regs);
75713         if (retval >= 0) {
75714                 TRIG_EVENT(exec_hook, file->f_dentry->d_name.len,
75715 Index: linux-2.6.5-7.191/fs/select.c
75716 ===================================================================
75717 --- linux-2.6.5-7.191.orig/fs/select.c  2005-06-28 12:24:00.000000000 -0400
75718 +++ linux-2.6.5-7.191/fs/select.c       2005-07-28 14:52:52.928666576 -0400
75719 @@ -649,3 +649,4 @@
75720         }
75721         return -EIOCBRETRY;
75722  }
75723 +EXPORT_SYMBOL_GPL(sys_poll);
75724 Index: linux-2.6.5-7.191/fs/read_write.c
75725 ===================================================================
75726 --- linux-2.6.5-7.191.orig/fs/read_write.c      2005-06-28 12:24:09.000000000 -0400
75727 +++ linux-2.6.5-7.191/fs/read_write.c   2005-07-28 14:52:52.929666424 -0400
75728 @@ -339,6 +339,7 @@
75729  
75730         return ret;
75731  }
75732 +EXPORT_SYMBOL(sys_write);
75733  
75734  asmlinkage ssize_t sys_pread64(unsigned int fd, char __user *buf,
75735                              size_t count, loff_t pos)
75736 Index: linux-2.6.5-7.191/include/elan/bitmap.h
75737 ===================================================================
75738 --- linux-2.6.5-7.191.orig/include/elan/bitmap.h        2004-02-23 16:02:56.000000000 -0500
75739 +++ linux-2.6.5-7.191/include/elan/bitmap.h     2005-07-28 14:52:52.929666424 -0400
75740 @@ -0,0 +1,74 @@
75741 +/*
75742 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75743 + *
75744 + *    For licensing information please see the supplied COPYING file
75745 + *
75746 + */
75747 +
75748 +#ifndef __QSNET_BITMAP_H
75749 +#define __QSNET_BITMAP_H
75750 +
75751 +#ident "$Id: bitmap.h,v 1.5 2004/01/20 17:32:15 david Exp $"
75752 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/bitmap.h,v $ */
75753 +
75754 +typedef unsigned int                   bitmap_t;
75755 +
75756 +#define BT_NBIPUL                      32                      /* n bits per bitmap_t */
75757 +#define BT_ULSHIFT                     5                       /* log 2 BT_NBIPUL to extract word index */
75758 +#define BT_ULMASK                      0x1f                    /* to extract bit index */
75759 +
75760 +#define BT_WIM(bitmap,bitindex)                ((bitmap)[(bitindex) >> BT_ULSHIFT])            /* word in map */
75761 +#define BT_BIW(bitindex)               (1 << ((bitindex) & BT_ULMASK))         /* bit in word */
75762 +
75763 +/* BT_BITOUL -- n bits to n words */
75764 +#define BT_BITOUL(nbits)               (((nbits) + BT_NBIPUL -1) / BT_NBIPUL)
75765 +
75766 +#define BT_TEST(bitmap,bitindex)       ((BT_WIM((bitmap), (bitindex)) & BT_BIW(bitindex)) ? 1 : 0)
75767 +#define BT_SET(bitmap,bitindex)                do { BT_WIM((bitmap), (bitindex)) |= BT_BIW(bitindex); } while (0)
75768 +#define BT_CLEAR(bitmap,bitindex)      do { BT_WIM((bitmap), (bitindex)) &= ~BT_BIW(bitindex); } while (0)
75769 +
75770 +/* return first free bit in the bitmap, or -1 for failure */
75771 +extern int  bt_freebit (bitmap_t *bitmap, int nbits);
75772 +
75773 +/* return the index of the lowest set bit in the bitmap or -1 for failure */
75774 +extern int bt_lowbit (bitmap_t *bitmap, int nbits);
75775 +
75776 +/* return the index of the next set/clear bit in the bitmap or -1 for failure */
75777 +extern int bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset);
75778 +
75779 +/* copy/zero/fill/compare a bit map */
75780 +extern void bt_copy (bitmap_t *a, bitmap_t *b, int nbits);
75781 +extern void bt_zero (bitmap_t *a, int nbits);
75782 +extern void bt_fill (bitmap_t *a, int nbits);
75783 +extern int  bt_cmp (bitmap_t *a, bitmap_t *b, int nbits);
75784 +
75785 +/* intersect bitmap 'a' with bitmap 'b' and return in 'a' */
75786 +extern void bt_intersect (bitmap_t *a, bitmap_t *b, int nbits);
75787 +
75788 +/* remove/add bitmap 'b' from bitmap 'a' */
75789 +extern void bt_remove (bitmap_t *a, bitmap_t *b, int nbits);
75790 +extern void bt_add (bitmap_t *a, bitmap_t *b, int nbits);
75791 +
75792 +/* check whether bitmap 'a' spans bitmap 'b' */
75793 +extern int  bt_spans (bitmap_t *a, bitmap_t *b, int nbits);
75794 +
75795 +/* copy [base,base+nbits-1] from 'a' to 'b' */
75796 +extern void bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits);
75797 +
75798 +/* find bits clear in 'a' and set in 'b', put result in 'c' */
75799 +extern void bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits);
75800 +
75801 +/* find bits set in 'a' and clear in 'b', put result in 'c' */
75802 +extern void bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits);
75803 +
75804 +/* return number of bits set in bitmap */
75805 +extern int  bt_nbits (bitmap_t *a, int nbits);
75806 +
75807 +
75808 +#endif /* __QSNET_BITMAP_H */
75809 +
75810 +/*
75811 + * Local variables:
75812 + * c-file-style: "linux"
75813 + * End:
75814 + */
75815 Index: linux-2.6.5-7.191/include/elan/capability.h
75816 ===================================================================
75817 --- linux-2.6.5-7.191.orig/include/elan/capability.h    2004-02-23 16:02:56.000000000 -0500
75818 +++ linux-2.6.5-7.191/include/elan/capability.h 2005-07-28 14:52:52.930666272 -0400
75819 @@ -0,0 +1,197 @@
75820 +/*
75821 + *    Copyright (c) 2003 by Quadrics Limited.
75822 + * 
75823 + *    For licensing information please see the supplied COPYING file
75824 + *
75825 + */
75826 +
75827 +#ident "@(#)$Id: capability.h,v 1.16 2004/07/20 10:15:33 david Exp $"
75828 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.h,v $*/
75829 +
75830 +#ifndef __ELAN_CAPABILITY_H
75831 +#define __ELAN_CAPABILITY_H
75832 +
75833 +#include <elan/bitmap.h>
75834 +
75835 +/* Maximum number of rails */
75836 +#define ELAN_MAX_RAILS          (31)
75837 +/* Maximum number of virtual processes we support */
75838 +#define ELAN_MAX_VPS           (16384)
75839 +
75840 +/* Number of words in a bitmap capability */
75841 +#define ELAN_BITMAPSIZE                BT_BITOUL(ELAN_MAX_VPS)
75842 +
75843 +/* Guaranteed invalid values */
75844 +#define ELAN_INVALID_PROCESS   (0x7fffffff)            /* A GUARANTEED invalid process # */
75845 +#define ELAN_INVALID_NODE      (0xFFFF)
75846 +#define ELAN_INVALID_CONTEXT   (0xFFFF)
75847 +
75848 +/* Number of values in a user key */
75849 +#define ELAN_USERKEY_ENTRIES   4
75850 +
75851 +typedef void * ELAN_CAP_OWNER;
75852 +
75853 +/* 
75854 + * When used in userspace this is relative to the base of
75855 + * the capabality but is an absolute location for kernel space.
75856 + */
75857 +typedef struct elan_location
75858 +{
75859 +       unsigned short loc_node;
75860 +       unsigned short loc_context;
75861 +} ELAN_LOCATION;
75862 +
75863 +typedef struct elan_userkey
75864 +{
75865 +       unsigned        key_values[ELAN_USERKEY_ENTRIES];
75866 +} ELAN_USERKEY;
75867 +
75868 +typedef struct elan_capability
75869 +{
75870 +       ELAN_USERKEY    cap_userkey;                            /* User defined protection */
75871 +
75872 +       int             cap_version;                            /* Version number */
75873 +       unsigned short  cap_type;                               /* Capability Type */
75874 +       unsigned short  cap_spare;                              /* spare was cap_elan_type */
75875 +
75876 +       int             cap_lowcontext;                         /* low context number in block */
75877 +       int             cap_highcontext;                        /* high context number in block */
75878 +       int             cap_mycontext;                          /* my context number */
75879 +    
75880 +       int             cap_lownode;                            /* low elan id of group */
75881 +       int             cap_highnode;                           /* high elan id of group */
75882 +
75883 +       unsigned int    cap_railmask;                           /* which rails this capability is valid for */
75884 +       
75885 +       bitmap_t        cap_bitmap[ELAN_BITMAPSIZE];            /* Bitmap of process to processor translation */
75886 +} ELAN_CAPABILITY;
75887 +
75888 +#define ELAN_CAP_UNINITIALISED         (-1)
75889 +
75890 +#define ELAN_CAP_VERSION_NUMBER                (0x00010002)
75891 +
75892 +#define ELAN_CAP_NUM_NODES(cap)                ((cap)->cap_highnode - (cap)->cap_lownode + 1)
75893 +#define ELAN_CAP_NUM_CONTEXTS(cap)     ((cap)->cap_highcontext - (cap)->cap_lowcontext + 1)
75894 +
75895 +/* using or defining our own MIN/MAX had confilicts with dunix so we define ELAN_ ones */
75896 +#define ELAN_MIN(a,b)  ((a) > (b) ? (b) : (a))
75897 +#define ELAN_MAX(a,b)  ((a) > (b) ? (a) : (b))
75898 +#define ELAN_CAP_BITMAPSIZE(cap)       (ELAN_MAX (ELAN_MIN (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap), ELAN_MAX_VPS), 0))
75899 +
75900 +#define ELAN_CAP_SIZE(cap)             (offsetof (ELAN_CAPABILITY, cap_bitmap[BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap))]))
75901 +#define ELAN_CAP_ENTRIES(cap)           (((cap)->cap_type & ELAN_CAP_TYPE_NO_BITMAP) ? ELAN_CAP_BITMAPSIZE((cap)) : bt_nbits((cap)->cap_bitmap, ELAN_CAP_BITMAPSIZE((cap))))
75902 +
75903 +#define ELAN_CAP_IS_RAIL_SET(cap,rail)  ((cap)->cap_railmask & (1<<rail))
75904 +
75905 +#define ELAN_CAP_KEY_MATCH(cap1,cap2)  ((cap1)->cap_userkey.key_values[0] == (cap2)->cap_userkey.key_values[0] && \
75906 +                                        (cap1)->cap_userkey.key_values[1] == (cap2)->cap_userkey.key_values[1] && \
75907 +                                        (cap1)->cap_userkey.key_values[2] == (cap2)->cap_userkey.key_values[2] && \
75908 +                                        (cap1)->cap_userkey.key_values[3] == (cap2)->cap_userkey.key_values[3])
75909 +
75910 +#define ELAN_CAP_TYPE_MATCH(cap1,cap2)  ((cap1)->cap_version           == (cap2)->cap_version           && \
75911 +                                        (cap1)->cap_type              == (cap2)->cap_type)
75912 +
75913 +#define ELAN_CAP_GEOM_MATCH(cap1,cap2) ((cap1)->cap_lowcontext        == (cap2)->cap_lowcontext        && \
75914 +                                        (cap1)->cap_highcontext       == (cap2)->cap_highcontext       && \
75915 +                                        (cap1)->cap_lownode           == (cap2)->cap_lownode           && \
75916 +                                        (cap1)->cap_highnode          == (cap2)->cap_highnode          && \
75917 +                                         (cap1)->cap_railmask          == (cap2)->cap_railmask          && \
75918 +                                        !bcmp (&(cap1)->cap_bitmap[0], &(cap2)->cap_bitmap[0],            \
75919 +                                               BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap1)*sizeof(bitmap_t))))
75920 +
75921 +#define ELAN_CAP_MATCH(cap1,cap2)      (ELAN_CAP_KEY_MATCH (cap1, cap2)  && \
75922 +                                        ELAN_CAP_TYPE_MATCH (cap1, cap2) && \
75923 +                                        ELAN_CAP_GEOM_MATCH (cap1, cap2))
75924 +
75925 +#define ELAN_CAP_VALID_MYCONTEXT(cap)   (    ((cap)->cap_lowcontext  != ELAN_CAP_UNINITIALISED)     \
75926 +                                         && ((cap)->cap_mycontext   != ELAN_CAP_UNINITIALISED)     \
75927 +                                         && ((cap)->cap_highcontext != ELAN_CAP_UNINITIALISED)     \
75928 +                                         && ((cap)->cap_lowcontext <= (cap)->cap_mycontext)        \
75929 +                                         && ((cap)->cap_mycontext <= (cap)->cap_highcontext)) 
75930 +
75931 +/*
75932 + * Definitions for type 
75933 + */
75934 +#define ELAN_CAP_TYPE_BLOCK            1               /* Block distribution */
75935 +#define ELAN_CAP_TYPE_CYCLIC           2               /* Cyclic distribution */
75936 +#define ELAN_CAP_TYPE_KERNEL           3               /* Kernel capability */
75937 +
75938 +#define ELAN_CAP_TYPE_MASK             (0xFFF)         /* Mask for type */
75939 +
75940 +/* OR these bits in for extra features */
75941 +#define ELAN_CAP_TYPE_HWTEST           (1 << 12)       /* Hardware test capability type */
75942 +#define ELAN_CAP_TYPE_MULTI_RAIL       (1 << 13)       /* "new" multi rail capability */
75943 +#define ELAN_CAP_TYPE_NO_BITMAP                (1 << 14)       /* don't use bit map */
75944 +#define ELAN_CAP_TYPE_BROADCASTABLE    (1 << 15)       /* broadcastable */
75945 +
75946 +
75947 +extern void          elan_nullcap     (ELAN_CAPABILITY *cap);
75948 +extern char         *elan_capability_string (ELAN_CAPABILITY *cap, char *str);
75949 +extern ELAN_LOCATION elan_vp2location (unsigned process, ELAN_CAPABILITY *cap);
75950 +extern int           elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap);
75951 +extern int           elan_nvps        (ELAN_CAPABILITY *cap);
75952 +extern int           elan_nlocal      (int node, ELAN_CAPABILITY *cap);
75953 +extern int           elan_maxlocal    (ELAN_CAPABILITY *cap);
75954 +extern int           elan_localvps    (int node, ELAN_CAPABILITY *cap, int *vps, int size);
75955 +extern int           elan_nrails      (ELAN_CAPABILITY *cap);
75956 +extern int           elan_rails       (ELAN_CAPABILITY *cap, int *rails);
75957 +extern int           elan_cap_overlap (ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2);
75958 +
75959 +/*
75960 + * capability creation/access fns provide for running
75961 + * new libelan code on old OS releases
75962 + */
75963 +extern int elan_lowcontext(ELAN_CAPABILITY *cap);
75964 +extern int elan_mycontext(ELAN_CAPABILITY *cap);
75965 +extern int elan_highcontext(ELAN_CAPABILITY *cap);
75966 +extern int elan_lownode(ELAN_CAPABILITY *cap);
75967 +extern int elan_highnode(ELAN_CAPABILITY *cap);
75968 +extern int elan_captype(ELAN_CAPABILITY *cap);
75969 +extern int elan_railmask(ELAN_CAPABILITY *cap);
75970 +
75971 +extern int elan_getenvCap (ELAN_CAPABILITY *cap, int index);
75972 +extern ELAN_CAPABILITY *elan_createCapability(void);
75973 +extern ELAN_CAPABILITY *elan_copyCapability(ELAN_CAPABILITY *from, int ctxShift);
75974 +extern int elan_generateCapability(char *string);
75975 +
75976 +typedef struct elan_cap_struct
75977 +{
75978 +       ELAN_CAP_OWNER   owner;
75979 +       ELAN_CAPABILITY  cap;
75980 +
75981 +       unsigned int     attached; /* count of people attached */
75982 +       unsigned int     active;   /* ie not being destroyed   */
75983 +} ELAN_CAP_STRUCT;
75984 +
75985 +#if ! defined(__KERNEL__)
75986 +extern void          elan_get_random_key(ELAN_USERKEY *key);
75987 +extern int           elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp);
75988 +#endif
75989 +
75990 +#if defined(__KERNEL__)
75991 +/* capability.c */
75992 +extern int elan_validate_cap  (ELAN_CAPABILITY *cap);
75993 +extern int elan_validate_map  (ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75994 +
75995 +extern int elan_create_cap  (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
75996 +extern int elan_destroy_cap (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
75997 +extern int elan_create_vp   (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75998 +extern int elan_destroy_vp  (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
75999 +
76000 +typedef        void (*ELAN_DESTROY_CB)(void *args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
76001 +
76002 +extern int elan_attach_cap  (ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB callback);
76003 +extern int elan_detach_cap  (ELAN_CAPABILITY *cap, unsigned int rail);
76004 +
76005 +extern int elan_get_caps    (uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps);
76006 +extern int elan_cap_dump    (void);
76007 +#endif /* __KERNEL__ */
76008 +
76009 +
76010 +#endif /* __ELAN_CAPABILITY_H */
76011 +
76012 +/*
76013 + * Local variables:
76014 + * c-file-style: "linux"
76015 + * End:
76016 + */
76017 Index: linux-2.6.5-7.191/include/elan/cm.h
76018 ===================================================================
76019 --- linux-2.6.5-7.191.orig/include/elan/cm.h    2004-02-23 16:02:56.000000000 -0500
76020 +++ linux-2.6.5-7.191/include/elan/cm.h 2005-07-28 14:52:52.931666120 -0400
76021 @@ -0,0 +1,412 @@
76022 +/*
76023 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
76024 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
76025 + *
76026 + *    For licensing information please see the supplied COPYING file
76027 + *
76028 + */
76029 +
76030 +#ifndef __ELAN_CM_H
76031 +#define __ELAN_CM_H
76032 +
76033 +#ident "@(#)$Id: cm.h,v 1.14.2.1 2004/11/12 10:54:50 mike Exp $"
76034 +/*      $Source: /cvs/master/quadrics/epmod/cm.h,v $*/
76035 +
76036 +#include <elan/statemap.h>
76037 +
76038 +#if defined(DIGITAL_UNIX)
76039 +/*
76040 + * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible
76041 + * for handling all PCI interrupts and "funneled" operations.  When a kernel thread
76042 + * is made runnable, the scheduler will choose which cpu it will run on at that time,
76043 + * and will only execute a higher priority thread from another cpu's run queue when 
76044 + * it becomes totally idle (apparently also including user processes).  Also the 
76045 + * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed
76046 + * at "preemptable" places - so again have no guarantee on when they will execute if
76047 + * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64
76048 + * is incapable of scheduling a high priority kernel  thread within a deterministic time
76049 + * of when it should have become runnable - wonderfull.
76050 + *
76051 + * Hence the solution Compaq have proposed it to schedule a timeout onto all of the
76052 + * cpu's timeouts lists at the maximum frequency that we could want to execute code,
76053 + * then to handle the scheduling of work between these ourselves.  With a bit of luck
76054 + * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance
76055 + * to do our important work.
76056 + *
76057 + * However ..... this still is not reliable, since timeouts under Tru64 are still 
76058 + * only run when the currently running kernel thread "co-operates" by calling one
76059 + * of a number of functions which is permitted to run the "lwc"s AND is not holding
76060 + * any spinlocks AND is running ai IPL 0.   However Compaq are unable to provide
76061 + * any upper limit on the time between the "lwc"'s being run and so it is possible
76062 + * for all 4 cpus to not run them for an unbounded time.
76063 + *
76064 + * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to 
76065 + * hardclock() to "solve" this problem for Memory Channel.  However, since it
76066 + * is called within the clock interrupt it is not permissible to aquire any
76067 + * spinlocks, nor to run for "too long".  This means that it is not possible to
76068 + * call the heartbeat algorithm from this hook.  
76069 + *
76070 + * Our solution to these limitations is to use the hook to cause an elan interrupt 
76071 + * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device 
76072 + * to trap and ep_cprocTrap() can then run the heartbeat code.  However there is a lock 
76073 + * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to 
76074 + * use a trylock and if we fail, then hope that when the interrupt is delievered again
76075 + * some time later we will succeed.
76076 + *
76077 + * However this only works if the kernel is able to respond to the Elan interrupt,
76078 + * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has
76079 + * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval.
76080 + *
76081 + * In fact this is exactly the mechanism that other operating systems use to
76082 + * execute timeouts, since the hardclock interrupt posts a low priority 
76083 + * "soft interrupt" which "pre-eempts" the currently running thread and then
76084 + * executes the timeouts.To block timeouts you use splsoftclock() the same as 
76085 + * in Tru64.
76086 + */
76087 +#define PER_CPU_TIMEOUT                        TRUE
76088 +#endif
76089 +
76090 +
76091 +#define CM_SGMTS_PER_LEVEL             8                       /* maximum nodes in each segment */
76092 +#define CM_MAX_LEVELS                  6                       /* maximum depth of tree */
76093 +
76094 +/* message buffers/dmas/events etc */
76095 +#define CM_NUM_NODE_MSG_BUFFERS                (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */
76096 +#define CM_NUM_SPARE_MSG_BUFFERS       8                       /* spare msg buffers for non-connected nodes */
76097 +#define CM_NUM_MSG_BUFFERS             (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS)
76098 +
76099 +#define CM_INPUTQ_ENTRIES              128                     /* # entries in input queue */
76100 +
76101 +#define CM_PERIODIC_DISCOVER_INTERVAL  (5000)          /* 5s (infrequent resolution of established leader conflicts) */
76102 +#define CM_URGENT_DISCOVER_INTERVAL    (50)            /* 0.05s (more frequently than heartbeats 'cause they don't retry) */
76103 +#define CM_HEARTBEAT_INTERVAL          (125)           /* 0.125s */
76104 +#define CM_TIMER_SCHEDULE_TIMEOUT      (4000)          /* 4s     Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */
76105 +#define CM_THREAD_SCHEDULE_TIMEOUT     (30000)         /* 30s    Maximum time before a thread that's scheduled to run gets to run */
76106 +#define CM_THREAD_RUNNING_TIMEOUT      (30000)         /* 30s    Don't expect the manager thread to be running longer than this */
76107 +
76108 +#ifdef PER_CPU_TIMEOUT
76109 +#define CM_PERCPU_TIMEOUT_INTERVAL     (50)            /* 0.05s (must be less than all above intervals) */
76110 +#define CM_PACEMAKER_INTERVAL          (500)           /* 0.05s */
76111 +
76112 +#define CM_HEARTBEAT_OVERDUE           (250)           /* 0.25s Maximum time a timeout can be overdue before taking extreme action */
76113 +#endif
76114 +
76115 +#define CM_P2P_DMA_RETRIES             31
76116 +
76117 +/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES
76118 + * attempts to send one to be successfully received */
76119 +#define CM_P2P_MSG_RETRIES             8
76120 +
76121 +/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts
76122 + * to send one to be successfully received. */
76123 +#define CM_BCAST_MSG_RETRIES           40
76124 +
76125 +/* Heartbeat timeout allows for a node stalling and still getting its
76126 + * heartbeat. The 2 is to allow for unsynchronised polling times. */
76127 +#define CM_HEARTBEAT_TIMEOUT           (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL)
76128 +
76129 +/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people
76130 + * who don't see discovery are considered dead by their leader.  This
76131 + * ensures that by the time a node "discovers" it is a leader of a segment,
76132 + * the previous leader of that segment will have been deemed to be dead by
76133 + * its the parent segment's leader */
76134 +#define CM_DISCOVER_TIMEOUT            (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL)
76135 +
76136 +#define CM_WAITING_TIMEOUT             (CM_DISCOVER_TIMEOUT * 100)
76137 +
76138 +/*
76139 + * Convert all timeouts specified in mS into "ticks"
76140 + */
76141 +#define MSEC2TICKS(MSEC)               (((MSEC)*HZ)/1000)
76142 +
76143 +
76144 +/* statemap entry */
76145 +typedef struct cm_state_entry
76146 +{
76147 +    int16_t           level;                   /* cluster level to apply to */
76148 +    int16_t          offset;                   /* from statemap_findchange() */
76149 +    uint16_t          seg[BT_NBIPUL/16];       /* ditto */
76150 +} CM_STATEMAP_ENTRY;
76151 +
76152 +/* offset is >= 0 for a change to apply and */
76153 +#define STATEMAP_NOMORECHANGES (-1)            /* end of a set of updates */
76154 +#define STATEMAP_RESET         (-2)            /* reset the target map */
76155 +#define STATEMAP_NOOP          (-3)            /* null token */
76156 +
76157 +/* CM message format */
76158 +typedef int8_t CM_SEQ;                         /* heartbeat sequence numbers; at least 2 bits, signed */
76159 +
76160 +/*
76161 + * The message header is received into the last 64 byte block of 
76162 + * the input queue and the Version *MUST* be the last word of the 
76163 + * block to ensure that we can see that the whole of the message
76164 + * has reached main memory after we've seen the input queue pointer
76165 + * have been updated.
76166 + */
76167 +typedef struct ep_cm_hdr
76168 +{
76169 +    uint32_t          Pad0;
76170 +    uint32_t          Pad1;
76171 +
76172 +    uint8_t           Type;
76173 +    uint8_t           Level;
76174 +    CM_SEQ            Seq;                     /* precision at least 2 bits each*/
76175 +    CM_SEQ            AckSeq;
76176 +    
76177 +    uint16_t          NumMaps;
76178 +    uint16_t          MachineId;
76179 +
76180 +    uint16_t          NodeId;
76181 +    uint16_t          Checksum;
76182 +
76183 +    uint32_t           Timestamp;
76184 +    uint32_t           ParamHash;
76185 +    uint32_t          Version;
76186 +} CM_HDR;
76187 +
76188 +#define CM_HDR_SIZE        sizeof (CM_HDR)
76189 +
76190 +typedef struct cm_msg
76191 +{
76192 +    union {
76193 +       CM_STATEMAP_ENTRY   Statemaps[1];               /* piggy-backed statemap updates start here */
76194 +       uint8_t             Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE];
76195 +    } Payload;
76196 +    
76197 +    CM_HDR                 Hdr;
76198 +} CM_MSG;
76199 +
76200 +/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */
76201 +#define CM_MSG_MAXMAPS         (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY))
76202 +#define CM_MSG_MAP(mapno)      (CM_MSG_MAXMAPS - (mapno) - 1)
76203 +
76204 +/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */
76205 +#define CM_MSG_BASE(nmaps)     (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps]))
76206 +#define CM_MSG_SIZE(nmaps)     (sizeof (CM_MSG) - CM_MSG_BASE(nmaps))
76207 +
76208 +#define CM_MSG_VERSION                         0xcad00005
76209 +#define CM_MSG_TYPE_RESOLVE_LEADER             0
76210 +#define CM_MSG_TYPE_DISCOVER_LEADER            1
76211 +#define CM_MSG_TYPE_NOTIFY                     2
76212 +#define CM_MSG_TYPE_DISCOVER_SUBORDINATE       3
76213 +#define CM_MSG_TYPE_IMCOMING                   4
76214 +#define CM_MSG_TYPE_HEARTBEAT                  5
76215 +#define CM_MSG_TYPE_REJOIN                     6
76216 +
76217 +/* CM machine segment */
76218 +typedef struct cm_sgmtMaps
76219 +{
76220 +    u_char       InputMapValid;                        /* Input map has been set */
76221 +    u_char       OutputMapValid;               /* Output map has been set */
76222 +    u_char       SentChanges;                  /* got an outstanding STATEMAP_NOMORECHANGES to send */
76223 +    statemap_t  *OutputMap;                    /* state to send */
76224 +    statemap_t  *InputMap;                     /* state received */
76225 +    statemap_t  *CurrentInputMap;              /* state being received */
76226 +} CM_SGMTMAPS;
76227 +
76228 +typedef struct cm_sgmt
76229 +{
76230 +   u_char       State;
76231 +   u_char       SendMaps;
76232 +   u_char       MsgAcked;
76233 +   CM_SEQ      MsgSeq;
76234 +   CM_SEQ      AckSeq;
76235 +   u_int       NodeId;
76236 +   long                UpdateTick;
76237 +   long                WaitingTick;
76238 +   uint32_t    Timestamp;
76239 +   CM_SGMTMAPS  Maps[CM_MAX_LEVELS];           /* Maps[i] == state for cluster level i */
76240 +   u_short      MsgNumber;                     /* msg buffer to use */
76241 +   u_short     NumMaps;                        /* # maps in message buffer */
76242 +   u_short      Level;
76243 +   u_short      Sgmt;
76244 +} CM_SGMT;
76245 +
76246 +#define CM_SGMT_ABSENT         0               /* no one there at all */
76247 +#define CM_SGMT_WAITING                1               /* waiting for subtree to connect */
76248 +#define CM_SGMT_COMING         2               /* expecting a subtree to reconnect */
76249 +#define CM_SGMT_PRESENT                3               /* connected */
76250 +
76251 +typedef struct cm_level
76252 +{
76253 +    int               SwitchLevel;
76254 +    u_int             MinNodeId;
76255 +    u_int              NumNodes;
76256 +    u_int              NumSegs;
76257 +    u_int              MySgmt;
76258 +   
76259 +    /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */
76260 +    u_char             SubordinateMapValid[CM_MAX_LEVELS];
76261 +    statemap_t        *SubordinateMap[CM_MAX_LEVELS];
76262 +
76263 +    /* maps/flags for this cluster level */
76264 +    u_int              Online:1;                               /* I've gone online (seen myself running) */
76265 +    u_int             Restarting:1;                            /* driving my owm restart bit */
76266 +    u_char            OfflineReasons;                          /* forced offline by broadcast */
76267 +
76268 +    u_char             GlobalMapValid;
76269 +    u_char             SubTreeMapValid;
76270 +    u_long            Connected;
76271 +
76272 +    statemap_t        *LocalMap;               /* state bits I drive */
76273 +    statemap_t        *SubTreeMap;             /* OR of my and my subtree states */
76274 +    statemap_t        *GlobalMap;              /* OR of all node states */
76275 +    statemap_t        *LastGlobalMap;          /* last map I saw */
76276 +    statemap_t        *TmpMap;                 /* scratchpad */
76277 +
76278 +    CM_SGMT           Sgmts[CM_SGMTS_PER_LEVEL];
76279 +} CM_LEVEL;
76280 +
76281 +#define CM_ROLE_LEADER_CANDIDATE       0
76282 +#define CM_ROLE_LEADER                 1
76283 +#define CM_ROLE_SUBORDINATE            2
76284 +
76285 +/* global status bits */
76286 +#define CM_GSTATUS_STATUS_MASK         0x03    /* bits nodes drive to broadcast their status */
76287 +#define CM_GSTATUS_ABSENT              0x00    /* Off the network */
76288 +#define CM_GSTATUS_STARTING            0x01    /* I'm waiting for everyone to see me online */
76289 +#define CM_GSTATUS_RUNNING              0x03   /* up and running */
76290 +#define CM_GSTATUS_CLOSING             0x02    /* I'm waiting for everyone to see me offline */
76291 +
76292 +#define CM_GSTATUS_ACK_MASK            0x0c    /* bits node drive to ack other status */
76293 +#define CM_GSTATUS_MAY_START           0x04    /* Everyone thinks I may not start */
76294 +#define CM_GSTATUS_MAY_RUN             0x08    /* Everyone thinks I may not run */
76295 +
76296 +#define CM_GSTATUS_RESTART             0x10    /* Someone thinks I should restart */
76297 +#define CM_GSTATUS_BITS                        5
76298 +
76299 +#define CM_GSTATUS_BASE(node)          ((node) * CM_GSTATUS_BITS)
76300 +
76301 +#if defined(PER_CPU_TIMEOUT)
76302 +typedef struct cm_timeout_data
76303 +{
76304 +    long               ScheduledAt;                            /* lbolt timeout was scheduled to run at */
76305 +
76306 +    unsigned long       EarlyCount;                            /* # times run early than NextRun */
76307 +    unsigned long      MissedCount;                            /* # times run on time - but someone else was running it */
76308 +    unsigned long       WastedCount;                           /* # times we failed to get the spinlock */
76309 +    unsigned long      WorkCount;                              /* # times we're the one running */
76310 +
76311 +    unsigned long      WorstDelay;                             /* worst scheduling delay */
76312 +    unsigned long      BestDelay;                              /* best scheduling delay */
76313 +
76314 +    unsigned long      WorstLockDelay;                         /* worst delay before getting rail->Lock */
76315 +
76316 +    unsigned long      WorstHearbeatDelay;                     /* worst delay before calling DoHeartbeatWork */
76317 +} CM_TIMEOUT_DATA;
76318 +#endif
76319 +
76320 +typedef struct cm_rail
76321 +{
76322 +    EP_RAIL          *Rail;                                    /* rail we're associated with */
76323 +    struct list_head   Link;                                   /*   and linked on the CM_SUBSYS */
76324 +
76325 +    uint32_t          ParamHash;                               /* hash of critical parameters */
76326 +    uint32_t           Timestamp;
76327 +    long              DiscoverStartTick;                       /* when discovery start */
76328 +
76329 +    unsigned int       NodeId;                                 /* my node id */
76330 +    unsigned int       NumNodes;                               /*   and number of nodes */
76331 +    unsigned int       NumLevels;                              /* number of levels computed from machine size */
76332 +    int                       BroadcastLevel;
76333 +    long              BroadcastLevelTick;
76334 +    unsigned int       TopLevel;                               /* level at which I'm not a leader */
76335 +    unsigned char      Role;                                   /* state at TopLevel */
76336 +
76337 +    EP_INPUTQ        *PolledQueue;                             /* polled input queue */
76338 +    EP_INPUTQ        *IntrQueue;                               /* intr input queue */
76339 +    EP_OUTPUTQ       *MsgQueue;                                /* message  */
76340 +    unsigned int       NextSpareMsg;                           /* next "spare" message buffer to use */
76341 +
76342 +    EP_CM_RAIL_STATS   Stats;                                  /* statistics */
76343 +
76344 +    kmutex_t          Mutex;
76345 +    spinlock_t        Lock;
76346 +    
76347 +    long              NextHeartbeatTime;                       /* next time to check/send heartbeats */
76348 +    long              NextDiscoverTime;                        /* next time to progress discovery  */
76349 +    long              NextRunTime;                             /* the earlier of the above two or intr requires inputq poll*/
76350 +
76351 +    unsigned int       OfflineReasons;                         /* forced offline by procfs/manager thread stuck */
76352 +
76353 +#if defined(PER_CPU_TIMEOUT)
76354 +    spinlock_t        HeartbeatTimeoutsLock;                   /* spinlock to sequentialise per-cpu timeouts */
76355 +    long              HeartbeatTimeoutsStarted;                /* bitmap of which timeouts have started */
76356 +    long              HeartbeatTimeoutsStopped;                /* bitmap of which timeouts have stopped */
76357 +    long              HeartbeatTimeoutsShouldStop;             /* flag to indicate timeouts should stop */
76358 +    kcondvar_t        HeartbeatTimeoutsWait;                   /* place to sleep waiting for timeouts to stop */
76359 +    long              HeartbeatTimeoutRunning;                 /* someone is running the timeout - don't try for the lock */
76360 +
76361 +    long              HeartbeatTimeoutOverdue;                 /* heartbeat seen as overdue - interrupt requested */
76362 +
76363 +    CM_TIMEOUT_DATA   *HeartbeatTimeoutsData;                  /* per timeout data */
76364 +#else
76365 +    struct timer_list  HeartbeatTimer;                         /* timer for heartbeat/discovery */
76366 +#endif
76367 +
76368 +    CM_LEVEL           Levels[CM_MAX_LEVELS];
76369 +} CM_RAIL;
76370 +
76371 +/* OfflineReasons (both per-rail and  */
76372 +#define CM_OFFLINE_BROADCAST           (1 << 0)
76373 +#define CM_OFFLINE_PROCFS              (1 << 1)
76374 +#define CM_OFFLINE_MANAGER             (1 << 2)
76375 +
76376 +typedef struct cm_subsys
76377 +{
76378 +    EP_SUBSYS          Subsys;
76379 +    CM_RAIL            *Rails[EP_MAX_RAILS];
76380 +} CM_SUBSYS;
76381 +
76382 +extern int  MachineId;
76383 +
76384 +extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId);
76385 +extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId);
76386 +extern void cm_restart_comms (CM_RAIL *cmRail);
76387 +extern int  cm_init (EP_SYS *sys);
76388 +
76389 +extern void DisplayRail(EP_RAIL *rail);
76390 +extern void DisplaySegs (EP_RAIL *rail);
76391 +extern void DisplayStatus (EP_RAIL *rail);
76392 +
76393 +typedef struct proc_private
76394 +{
76395 +    struct nodeset_private *pr_next;
76396 +    EP_RAIL                *pr_rail;
76397 +    char                  *pr_data;
76398 +    int                     pr_data_len;
76399 +    unsigned               pr_off;
76400 +    unsigned               pr_len;
76401 +    DisplayInfo             pr_di;
76402 +} PROC_PRIVATE;
76403 +
76404 +extern void    proc_character_fill (long mode, char *fmt, ...);
76405 +extern int     proc_release (struct inode *inode, struct file *file);
76406 +extern ssize_t proc_read (struct file *file, char *buf, size_t count, loff_t *ppos);
76407 +
76408 +
76409 +extern void DisplayNodeMaps  (DisplayInfo *di, CM_RAIL *cmRail);
76410 +extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail);
76411 +extern void DisplayRailDo    (DisplayInfo *di, EP_RAIL *rail);
76412 +
76413 +extern int    cm_read_cluster(EP_RAIL *rail,char *page);
76414 +extern void   cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason);
76415 +
76416 +extern int    cm_svc_indicator_set      (EP_RAIL *rail, int svc_indicator);
76417 +extern int    cm_svc_indicator_clear    (EP_RAIL *rail, int svc_indicator);
76418 +extern int    cm_svc_indicator_is_set   (EP_RAIL *rail, int svc_indicator, int nodeId);
76419 +extern int    cm_svc_indicator_bitmap   (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
76420 +
76421 +/* cm_procfs.c */
76422 +extern void   cm_procfs_init (CM_SUBSYS *subsys);
76423 +extern void   cm_procfs_fini (CM_SUBSYS *subsys);
76424 +extern void   cm_procfs_rail_init (CM_RAIL *rail);
76425 +extern void   cm_procfs_rail_fini (CM_RAIL *rail);
76426 +
76427 +/*
76428 + * Local variables:
76429 + * c-file-style: "stroustrup"
76430 + * End:
76431 + */
76432 +#endif /* __ELAN_CM_H */
76433 +
76434 Index: linux-2.6.5-7.191/include/elan/compat.h
76435 ===================================================================
76436 --- linux-2.6.5-7.191.orig/include/elan/compat.h        2004-02-23 16:02:56.000000000 -0500
76437 +++ linux-2.6.5-7.191/include/elan/compat.h     2005-07-28 14:52:52.931666120 -0400
76438 @@ -0,0 +1,23 @@
76439 +/*
76440 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
76441 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
76442 + *
76443 + *    For licensing information please see the supplied COPYING file
76444 + *
76445 + */
76446 +
76447 +#ident "@(#)$Id: compat.h,v 1.1 2003/12/03 13:18:48 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
76448 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/compat.h,v $*/
76449 +
76450 +#ifndef __ELAN_COMPAT_H
76451 +#define __ELAN_COMPAT_H
76452 +
76453 +#define ELANMOD_STATS_MAP      ELAN_STATS_MAP
76454 +
76455 +#endif  /* __ELAN_COMPAT_H */
76456 +
76457 +/*
76458 + * Local variables:
76459 + * c-file-style: "stroustrup"
76460 + * End:
76461 + */
76462 Index: linux-2.6.5-7.191/include/elan/device.h
76463 ===================================================================
76464 --- linux-2.6.5-7.191.orig/include/elan/device.h        2004-02-23 16:02:56.000000000 -0500
76465 +++ linux-2.6.5-7.191/include/elan/device.h     2005-07-28 14:52:52.932665968 -0400
76466 @@ -0,0 +1,62 @@
76467 +/*
76468 + *    Copyright (c) 2003 by Quadrics Limited.
76469 + * 
76470 + *    For licensing information please see the supplied COPYING file
76471 + *
76472 + */
76473 +
76474 +#ident "@(#)$Id: device.h,v 1.5 2003/09/24 13:55:37 david Exp $"
76475 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/device.h,v $*/
76476 +
76477 +#ifndef __ELAN_DEVICE_H
76478 +#define __ELAN_DEVICE_H
76479 +
76480 +/* non-kernel headings */
76481 +typedef unsigned int ELAN_DEV_IDX;
76482 +
76483 +#if defined(__KERNEL__)
76484 +
76485 +/* device callbacks */
76486 +#define ELAN_DEV_OPS_VERSION ((u_int)1)
76487 +
76488 +typedef struct elan_dev_ops
76489 +{
76490 +       /* dev info */
76491 +       int (*get_position)          (void *user_data, ELAN_POSITION *position);
76492 +       int (*set_position)          (void *user_data, unsigned short nodeId, unsigned short numNodes);
76493 +
76494 +       /* cap */
76495 +
76496 +       u_int  ops_version;
76497 +} ELAN_DEV_OPS;
76498 +
76499 +typedef struct elan_dev_struct
76500 +{
76501 +       struct list_head node;
76502 +
76503 +       ELAN_DEV_IDX     devidx;
76504 +       ELAN_DEVINFO    *devinfo;
76505 +       void            *user_data;
76506 +       ELAN_DEV_OPS *ops;
76507 +} ELAN_DEV_STRUCT;
76508 +
76509 +/* device.c */
76510 +extern ELAN_DEV_IDX         elan_dev_register   (ELAN_DEVINFO    *devinfo, 
76511 +                                                   ELAN_DEV_OPS *ops,
76512 +                                                   void            *userdata);
76513 +extern int                  elan_dev_deregister (ELAN_DEVINFO *devinfo);
76514 +
76515 +extern ELAN_DEV_STRUCT * elan_dev_find       (ELAN_DEV_IDX devidx);
76516 +
76517 +extern ELAN_DEV_STRUCT * elan_dev_find_byrail(unsigned short deviceid, unsigned rail);
76518 +extern int                  elan_dev_dump       (void);
76519 +
76520 +#endif /* __KERNEL__ */
76521 +
76522 +#endif /* __ELAN_DEVICE_H */
76523 +
76524 +/*
76525 + * Local variables:
76526 + * c-file-style: "linux"
76527 + * End:
76528 + */
76529 Index: linux-2.6.5-7.191/include/elan/devinfo.h
76530 ===================================================================
76531 --- linux-2.6.5-7.191.orig/include/elan/devinfo.h       2004-02-23 16:02:56.000000000 -0500
76532 +++ linux-2.6.5-7.191/include/elan/devinfo.h    2005-07-28 14:52:52.932665968 -0400
76533 @@ -0,0 +1,92 @@
76534 +/*
76535 + *    Copyright (c) 2003 by Quadrics Limited.
76536 + * 
76537 + *    For licensing information please see the supplied COPYING file
76538 + *
76539 + */
76540 +
76541 +#ident "@(#)$Id: devinfo.h,v 1.11.2.1 2005/02/01 12:36:40 david Exp $"
76542 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.h,v $*/
76543 +
76544 +#ifndef __ELAN_DEVINFO_H
76545 +#define __ELAN_DEVINFO_H
76546 +
76547 +#define ELAN_MAX_LEVELS                        8                       /* maximum number of levels in switch network */
76548 +
76549 +typedef struct elan_position
76550 +{
76551 +       unsigned        pos_mode;                               /* mode we're operating in */
76552 +       unsigned        pos_nodeid;                             /* port this device connected to */
76553 +       unsigned        pos_levels;                             /* number of levels to top switch */
76554 +       unsigned        pos_nodes;                              /* number of nodes in the machine */
76555 +       unsigned        pos_random_disabled;                    /* levels at which "random" routing is not possible */
76556 +       unsigned char   pos_arity[ELAN_MAX_LEVELS];             /* number of downlinks per switch level */
76557 +} ELAN_POSITION;
76558 +
76559 +#define ELAN4_PARAM_PCI_PADDING_FLAGS          0               /* A bit field, representing good places to burst across the pci                      */
76560 +#define ELAN4_PARAM_EVENT_COPY_WIN             1               /* The num of cmds when it becomes quicker to send via event copy than write directly */
76561 +#define ELAN4_PARAM_WRITE_COMBINING            2               /* If set the device supports bursts accesses across the pci bus                      */
76562 +#define ELAN4_PARAM_DRIVER_FEATURES            11              /* device driver features */
76563 +#define ELAN4_PARAM_COUNT                      12
76564 +
76565 +/* values for ELAN4_PARAM_DRIVER_FEATURES, dev_features */
76566 +#define ELAN4_FEATURE_PCI_MAP          (1 << 0)                        /* must use pci mapping functions */
76567 +#define ELAN4_FEATURE_64BIT_READ       (1 << 1)                        /* must perform 64 bit PIO reads */
76568 +#define ELAN4_FEATURE_PIN_DOWN         (1 << 2)                        /* must pin down pages */
76569 +#define ELAN4_FEATURE_NO_WRITE_COMBINE (1 << 3)                        /* don't allow write combinig at all */
76570 +#define ELAN4_FEATURE_NO_IOPROC                (1 << 4)                        /* unpatched kernel or disabled by procfs */
76571 +#define ELAN4_FEATURE_NO_IOPROC_UPDATE (1 << 5)                        /* don't do coproc update xlation loading */
76572 +#define ELAN4_FEATURE_NO_PAGEFAULT     (1 << 6)                        /* don't do pagefaulting */
76573 +#define ELAN4_FEATURE_NO_PREFETCH      (1 << 7)                        /* don't allow prefetching of elan sdram/cports */
76574 +
76575 +typedef struct elan_params
76576 +{
76577 +       unsigned        values[ELAN4_PARAM_COUNT];
76578 +} ELAN_PARAMS;
76579 +
76580 +/* values for pos_mode */
76581 +#define ELAN_POS_UNKNOWN               0                       /* network position unknown */
76582 +#define ELAN_POS_MODE_SWITCHED         1                       /* connected to a switch */
76583 +#define ELAN_POS_MODE_LOOPBACK         2                       /* loopback connector */
76584 +#define ELAN_POS_MODE_BACKTOBACK       3                       /* cabled back-to-back to another node */
76585 +
76586 +typedef struct elan_devinfo
76587 +{
76588 +       unsigned short  dev_vendor_id;                          /* pci vendor id */
76589 +       unsigned short  dev_device_id;                          /* pci device id */
76590 +       unsigned char   dev_revision_id;                        /* pci revision id */
76591 +       unsigned char   dev_instance;                           /* device instance number */
76592 +       unsigned char   dev_rail;                               /* device rail number */
76593 +
76594 +       unsigned short  dev_driver_version;                     /* device driver version */
76595 +       unsigned short  dev_params_mask;                        /* mask for valid entries in dev_params array */
76596 +       ELAN_PARAMS     dev_params;                             /* device parametization */
76597 +
76598 +       unsigned        dev_num_down_links_value;               /* MRH hint as to machine size NEEDS coding XXXXX */
76599 +} ELAN_DEVINFO;
76600 +
76601 +#define PCI_VENDOR_ID_QUADRICS         0x14fc
76602 +#define PCI_DEVICE_ID_ELAN3            0x0000
76603 +#define   PCI_REVISION_ID_ELAN3_REVA   0x0000
76604 +#define   PCI_REVISION_ID_ELAN3_REVB   0x0001
76605 +#define PCI_DEVICE_ID_ELAN4            0x0001
76606 +#define   PCI_REVISION_ID_ELAN4_REVA   0x0000
76607 +#define   PCI_REVISION_ID_ELAN4_REVB   0x0001
76608 +
76609 +#if defined(__KERNEL__)
76610 +/* devinfo.c */
76611 +#include <elan/capability.h>
76612 +#include <elan/device.h>
76613 +extern int elan_get_devinfo  (ELAN_DEV_IDX devidx, ELAN_DEVINFO  *devinfo);
76614 +extern int elan_get_position (ELAN_DEV_IDX devidx, ELAN_POSITION *position);
76615 +extern int elan_set_position (ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes);
76616 +#endif /* __KERNEL__ */
76617 +
76618 +
76619 +#endif /* __ELAN_DEVINFO_H */
76620 +
76621 +/*
76622 + * Local variables:
76623 + * c-file-style: "linux"
76624 + * End:
76625 + */
76626 Index: linux-2.6.5-7.191/include/elan/elanmoddebug.h
76627 ===================================================================
76628 --- linux-2.6.5-7.191.orig/include/elan/elanmoddebug.h  2004-02-23 16:02:56.000000000 -0500
76629 +++ linux-2.6.5-7.191/include/elan/elanmoddebug.h       2005-07-28 14:52:52.932665968 -0400
76630 @@ -0,0 +1,63 @@
76631 +/*
76632 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
76633 + *
76634 + *    For licensing information please see the supplied COPYING file
76635 + *
76636 + */
76637 +
76638 +#ifndef _ELAN_DEBUG_H
76639 +#define _ELAN_DEBUG_H
76640 +
76641 +
76642 +#ident "$Id: elanmoddebug.h,v 1.5 2003/09/24 13:55:37 david Exp $"
76643 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmoddebug.h,v $ */
76644 +
76645 +#if defined(__KERNEL__)
76646 +
76647 +/* 0 | QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE */
76648 +extern int elan_debug_mode; 
76649 +extern int elan_debug_mask;
76650 +
76651 +#define ELAN_DBG_VP            0x00000001
76652 +#define ELAN_DBG_CAP            0x00000002
76653 +#define ELAN_DBG_CTRL           0x00000004
76654 +#define ELAN_DBG_SYS_FN         0x00000008
76655 +#define ELAN_DBG_ALL           0xffffffff
76656 +
76657 +
76658 +#if defined(DEBUG_PRINTF)
76659 +#  define ELAN_DEBUG0(m,fmt)                   ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt)             : (void)0)
76660 +#  define ELAN_DEBUG1(m,fmt,a)                 ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a)           : (void)0)
76661 +#  define ELAN_DEBUG2(m,fmt,a,b)               ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b)         : (void)0)
76662 +#  define ELAN_DEBUG3(m,fmt,a,b,c)             ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c)       : (void)0)
76663 +#  define ELAN_DEBUG4(m,fmt,a,b,c,d)           ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d)     : (void)0)
76664 +#  define ELAN_DEBUG5(m,fmt,a,b,c,d,e)         ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e)   : (void)0)
76665 +#  define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f)       ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e,f) : (void)0)
76666 +#ifdef __GNUC__
76667 +#  define ELAN_DEBUG(m,args...)                        ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode, ##args)         : (void)0)
76668 +#endif
76669 +
76670 +#else
76671 +
76672 +#  define ELAN_DEBUG0(m,fmt)                   (0)
76673 +#  define ELAN_DEBUG1(m,fmt,a)                 (0)
76674 +#  define ELAN_DEBUG2(m,fmt,a,b)               (0)
76675 +#  define ELAN_DEBUG3(m,fmt,a,b,c)             (0)
76676 +#  define ELAN_DEBUG4(m,fmt,a,b,c,d)           (0)
76677 +#  define ELAN_DEBUG5(m,fmt,a,b,c,d,e)         (0)
76678 +#  define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f)       (0)
76679 +#ifdef __GNUC__
76680 +#  define ELAN_DEBUG(m,args...)
76681 +#endif
76682 +
76683 +#endif /* DEBUG_PRINTF */
76684 +
76685 +
76686 +#endif /* __KERNEL__ */
76687 +#endif /* _ELAN_DEBUG_H */
76688 +
76689 +/*
76690 + * Local variables:
76691 + * c-file-style: "linux"
76692 + * End:
76693 + */
76694 Index: linux-2.6.5-7.191/include/elan/elanmod.h
76695 ===================================================================
76696 --- linux-2.6.5-7.191.orig/include/elan/elanmod.h       2004-02-23 16:02:56.000000000 -0500
76697 +++ linux-2.6.5-7.191/include/elan/elanmod.h    2005-07-28 14:52:52.932665968 -0400
76698 @@ -0,0 +1,59 @@
76699 +/*
76700 + *    Copyright (c) 2003 by Quadrics Limited.
76701 + * 
76702 + *    For licensing information please see the supplied COPYING file
76703 + *
76704 + */
76705 +
76706 +#ident "@(#)$Id: elanmod.h,v 1.10 2004/06/18 09:28:16 mike Exp $"
76707 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.h,v $*/
76708 +
76709 +#ifndef __ELAN_MOD_H
76710 +#define __ELAN_MOD_H
76711 +
76712 +#include <elan/devinfo.h>
76713 +#include <elan/device.h>
76714 +#include <elan/capability.h>
76715 +#include <elan/stats.h>
76716 +
76717 +#if defined(__KERNEL__)
76718 +
76719 +#include <elan/elanmoddebug.h>
76720 +
76721 +extern kmutex_t elan_mutex;
76722 +
76723 +/* elan_general.c */
76724 +extern int elan_init(void);
76725 +extern int elan_fini(void);
76726 +
76727 +/* return codes, -ve => errno, +ve => success */
76728 +#define ELAN_CAP_OK  (0)
76729 +#define ELAN_CAP_RMS (1)
76730 +
76731 +#define ELAN_USER_ATTACH    (1)
76732 +#define ELAN_USER_DETACH    (2)
76733 +#define ELAN_USER_P2P       (3)
76734 +#define ELAN_USER_BROADCAST (4)
76735 +
76736 +extern int elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use);
76737 +
76738 +#define ELAN_USER_BASE_CONTEXT_NUM     0x000                   /* first user allowable context */
76739 +#define ELAN_USER_TOP_CONTEXT_NUM      0x7FF                   /* last user allowable context */
76740 +
76741 +#define ELAN_RMS_BASE_CONTEXT_NUM      0x400                   /* reserved for RMS allocation */
76742 +#define ELAN_RMS_TOP_CONTEXT_NUM       0x7FF
76743 +
76744 +#define ELAN_USER_CONTEXT(ctx)         ((ctx) >= ELAN_USER_BASE_CONTEXT_NUM && \
76745 +                                        (ctx) <= ELAN_USER_TOP_CONTEXT_NUM)    
76746 +
76747 +#define ELAN_RMS_CONTEXT(ctx)          ((ctx) >= ELAN_RMS_BASE_CONTEXT_NUM && \
76748 +                                        (ctx) <= ELAN_RMS_TOP_CONTEXT_NUM)    
76749 +#endif /* __KERNEL__ */
76750 +
76751 +#endif /* __ELAN_MOD_H */
76752 +
76753 +/*
76754 + * Local variables:
76755 + * c-file-style: "linux"
76756 + * End:
76757 + */
76758 Index: linux-2.6.5-7.191/include/elan/elanmod_linux.h
76759 ===================================================================
76760 --- linux-2.6.5-7.191.orig/include/elan/elanmod_linux.h 2004-02-23 16:02:56.000000000 -0500
76761 +++ linux-2.6.5-7.191/include/elan/elanmod_linux.h      2005-07-28 14:52:52.933665816 -0400
76762 @@ -0,0 +1,140 @@
76763 +/*
76764 + *    Copyright (c) 2003 by Quadrics Ltd.
76765 + * 
76766 + *    For licensing information please see the supplied COPYING file
76767 + *
76768 + */
76769 +
76770 +#ident "@(#)$Id: elanmod_linux.h,v 1.6 2003/09/29 15:36:20 mike Exp $"
76771 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.h,v $*/
76772 +
76773 +#ifndef __ELAN_MOD_LINUX_H
76774 +#define __ELAN_MOD_LINUX_H
76775 +
76776 +#define ELANCRTL_USER_BASE             0x40
76777 +
76778 +/* stats */
76779 +typedef struct elanctrl_stats_get_next_struct
76780 +{
76781 +       ELAN_STATS_IDX   statidx; 
76782 +       ELAN_STATS_IDX  *next_statidx; /* return value */
76783 +} ELANCTRL_STATS_GET_NEXT_STRUCT;
76784 +#define ELANCTRL_STATS_GET_NEXT   _IOR   ('e', ELANCRTL_USER_BASE + 0,  ELANCTRL_STATS_GET_NEXT_STRUCT)
76785 +
76786 +typedef struct elanctrl_stats_find_index_struct
76787 +{
76788 +       caddr_t          block_name;
76789 +       ELAN_STATS_IDX  *statidx; /* return value */
76790 +       uint        *num_entries; /* return value */
76791 +} ELANCTRL_STATS_FIND_INDEX_STRUCT;
76792 +#define ELANCTRL_STATS_FIND_INDEX   _IOR   ('e', ELANCRTL_USER_BASE + 1,  ELANCTRL_STATS_FIND_INDEX_STRUCT)
76793 +
76794 +typedef struct elanctrl_stats_get_block_info_struct
76795 +{
76796 +       ELAN_STATS_IDX  statidx; 
76797 +       caddr_t       block_name; /* return value */
76798 +       uint        *num_entries; /* return value */
76799 +} ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT;
76800 +#define ELANCTRL_STATS_GET_BLOCK_INFO   _IOR   ('e', ELANCRTL_USER_BASE + 2, ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT)
76801 +
76802 +typedef struct elanctrl_stats_get_index_name_struct
76803 +{
76804 +       ELAN_STATS_IDX statidx; 
76805 +       uint           index;
76806 +       caddr_t        name; /* return value */
76807 +} ELANCTRL_STATS_GET_INDEX_NAME_STRUCT;
76808 +#define ELANCTRL_STATS_GET_INDEX_NAME   _IOR   ('e', ELANCRTL_USER_BASE + 3, ELANCTRL_STATS_GET_INDEX_NAME_STRUCT)
76809 +
76810 +typedef struct elanctrl_stats_clear_block_struct
76811 +{
76812 +       ELAN_STATS_IDX statidx; 
76813 +} ELANCTRL_STATS_CLEAR_BLOCK_STRUCT;
76814 +#define ELANCTRL_STATS_CLEAR_BLOCK   _IOR   ('e', ELANCRTL_USER_BASE + 4, ELANCTRL_STATS_CLEAR_BLOCK_STRUCT)
76815 +
76816 +typedef struct elanctrl_stats_get_block_struct
76817 +{
76818 +       ELAN_STATS_IDX statidx; 
76819 +       uint           entries;  
76820 +       ulong         *values; /* return values */
76821 +} ELANCTRL_STATS_GET_BLOCK_STRUCT;
76822 +#define ELANCTRL_STATS_GET_BLOCK        _IOR   ('e', ELANCRTL_USER_BASE + 5, ELANCTRL_STATS_GET_BLOCK_STRUCT)
76823 +
76824 +
76825 +typedef struct elanctrl_get_devinfo_struct
76826 +{
76827 +       ELAN_DEV_IDX  devidx; 
76828 +       ELAN_DEVINFO *devinfo; /* return values */
76829 +} ELANCTRL_GET_DEVINFO_STRUCT;
76830 +#define ELANCTRL_GET_DEVINFO        _IOR   ('e', ELANCRTL_USER_BASE + 6, ELANCTRL_GET_DEVINFO_STRUCT)
76831 +
76832 +typedef struct elanctrl_get_position_struct
76833 +{
76834 +       ELAN_DEV_IDX   devidx; 
76835 +       ELAN_POSITION *position; /* return values */
76836 +} ELANCTRL_GET_POSITION_STRUCT;
76837 +#define ELANCTRL_GET_POSITION        _IOR   ('e', ELANCRTL_USER_BASE + 7, ELANCTRL_GET_POSITION_STRUCT)
76838 +
76839 +typedef struct elanctrl_set_position_struct
76840 +{
76841 +       ELAN_DEV_IDX   devidx; 
76842 +       unsigned short nodeId;
76843 +       unsigned short numNodes;
76844 +} ELANCTRL_SET_POSITION_STRUCT;
76845 +#define ELANCTRL_SET_POSITION        _IOR   ('e', ELANCRTL_USER_BASE + 8, ELANCTRL_SET_POSITION_STRUCT)
76846 +
76847 +typedef struct elanctrl_create_cap_struct
76848 +{
76849 +       ELAN_CAPABILITY cap;
76850 +} ELANCTRL_CREATE_CAP_STRUCT;
76851 +#define ELANCTRL_CREATE_CAP             _IOW   ('e', ELANCRTL_USER_BASE + 9, ELANCTRL_CREATE_CAP_STRUCT)
76852 +
76853 +typedef struct elanctrl_destroy_cap_struct
76854 +{
76855 +       ELAN_CAPABILITY cap;
76856 +} ELANCTRL_DESTROY_CAP_STRUCT;
76857 +#define ELANCTRL_DESTROY_CAP             _IOW   ('e', ELANCRTL_USER_BASE + 10, ELANCTRL_DESTROY_CAP_STRUCT)
76858 +
76859 +typedef struct elanctrl_create_vp_struct
76860 +{
76861 +       ELAN_CAPABILITY cap;
76862 +       ELAN_CAPABILITY map;
76863 +} ELANCTRL_CREATE_VP_STRUCT;
76864 +#define ELANCTRL_CREATE_VP             _IOW   ('e', ELANCRTL_USER_BASE + 11, ELANCTRL_CREATE_VP_STRUCT)
76865 +
76866 +typedef struct elanctrl_destroy_vp_struct
76867 +{
76868 +       ELAN_CAPABILITY cap;
76869 +       ELAN_CAPABILITY map;
76870 +} ELANCTRL_DESTROY_VP_STRUCT;
76871 +#define ELANCTRL_DESTROY_VP          _IOW   ('e', ELANCRTL_USER_BASE + 12, ELANCTRL_DESTROY_VP_STRUCT)
76872 +
76873 +#define ELANCTRL_DEBUG_DUMP          _IO    ('e', ELANCRTL_USER_BASE + 13)
76874 +
76875 +typedef struct elanctrl_get_caps_struct
76876 +{
76877 +       uint            *number_of_results;
76878 +       uint             array_size;
76879 +       ELAN_CAP_STRUCT *caps;
76880 +} ELANCTRL_GET_CAPS_STRUCT;
76881 +#define ELANCTRL_GET_CAPS          _IOW   ('e', ELANCRTL_USER_BASE + 14, ELANCTRL_GET_CAPS_STRUCT)
76882 +
76883 +
76884 +typedef struct elanctrl_debug_buffer_struct
76885 +{
76886 +       caddr_t buffer;
76887 +       int     size;
76888 +} ELANCTRL_DEBUG_BUFFER_STRUCT;
76889 +#define ELANCTRL_DEBUG_BUFFER _IOW ('e', ELANCRTL_USER_BASE + 15, ELANCTRL_DEBUG_BUFFER_STRUCT)
76890 +
76891 +#define ELANMOD_PROCFS_IOCTL      "/proc/qsnet/elan/ioctl"
76892 +#define ELANMOD_PROCFS_VERSION    "/proc/qsnet/elan/version"
76893 +#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask"
76894 +#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode"
76895 +
76896 +#endif /* __ELAN_MOD_LINUX_H */
76897 +
76898 +/*
76899 + * Local variables:
76900 + * c-file-style: "linux"
76901 + * End:
76902 + */
76903 Index: linux-2.6.5-7.191/include/elan/elanmod_subsystem.h
76904 ===================================================================
76905 --- linux-2.6.5-7.191.orig/include/elan/elanmod_subsystem.h     2004-02-23 16:02:56.000000000 -0500
76906 +++ linux-2.6.5-7.191/include/elan/elanmod_subsystem.h  2005-07-28 14:52:52.933665816 -0400
76907 @@ -0,0 +1,138 @@
76908 +/*
76909 + *    Copyright (c) 2003 by Quadrics Limited.
76910 + * 
76911 + *    For licensing information please see the supplied COPYING file
76912 + *
76913 + */
76914 +
76915 +#ifndef __ELAN_SUBSYSTEM_H
76916 +#define __ELAN_SUBSYSTEM_H
76917 +
76918 +#include <sys/types.h>
76919 +#include <sys/param.h>
76920 +
76921 +#if defined( __KERNEL__) 
76922 +int elan_configure(
76923 +    cfg_op_t op,
76924 +    caddr_t  indata,
76925 +    ulong    indata_size,
76926 +    caddr_t  outdata,
76927 +    ulong    outdata_size);
76928 +#endif
76929 +
76930 +#define ELAN_KMOD_CODE(x)      ((x)+CFG_OP_SUBSYS_MIN)
76931 +#define ELAN_MAX_KMOD_CODES 100
76932 +
76933 +#define ELAN_SUBSYS "elan"
76934 +
76935 +#define ELAN_STATS_GET_NEXT    0x01
76936 +typedef struct {
76937 +       ELAN_STATS_IDX statidx;
76938 +       ELAN_STATS_IDX *next_statidx;   
76939 +} elan_stats_get_next_struct;
76940 +
76941 +
76942 +#define ELAN_STATS_FIND_INDEX   0x02
76943 +typedef struct {
76944 +       caddr_t          block_name;
76945 +       ELAN_STATS_IDX  *statidx; /* return value */
76946 +       uint        *num_entries; /* return value */
76947 +} elan_stats_find_index_struct;
76948 +
76949 +#define ELAN_STATS_GET_BLOCK_INFO  0x03
76950 +typedef struct {
76951 +       ELAN_STATS_IDX  statidx; 
76952 +       caddr_t       block_name; /* return value */
76953 +       uint        *num_entries; /* return value */
76954 +} elan_stats_get_block_info_struct;
76955 +
76956 +#define ELAN_STATS_GET_INDEX_NAME  0x04
76957 +typedef struct {
76958 +       ELAN_STATS_IDX statidx; 
76959 +       uint           index;
76960 +       caddr_t        name; /* return value */
76961 +} elan_stats_get_index_name_struct;
76962 +
76963 +#define ELAN_STATS_CLEAR_BLOCK  0x05
76964 +typedef struct {
76965 +       ELAN_STATS_IDX statidx; 
76966 +} elan_stats_clear_block_struct;
76967 +
76968 +#define ELAN_STATS_GET_BLOCK     0x06
76969 +typedef struct 
76970 +{
76971 +       ELAN_STATS_IDX statidx; 
76972 +       uint           entries;  
76973 +       ulong         *values; /* return values */
76974 +} elan_stats_get_block_struct;
76975 +
76976 +#define ELAN_GET_DEVINFO     0x07
76977 +typedef struct 
76978 +{
76979 +       ELAN_DEV_IDX  devidx; 
76980 +       ELAN_DEVINFO *devinfo; /* return values */
76981 +} elan_get_devinfo_struct;
76982 +
76983 +#define ELAN_GET_POSITION  0x08
76984 +typedef struct {
76985 +       ELAN_DEV_IDX   devidx; 
76986 +       ELAN_POSITION *position; /* return values */
76987 +} elan_get_position_struct;
76988 +
76989 +#define ELAN_SET_POSITION   0x09
76990 +typedef struct {
76991 +       ELAN_DEV_IDX   devidx; 
76992 +       unsigned short nodeId;
76993 +       unsigned short numNodes;
76994 +} elan_set_position_struct;
76995 +
76996 +#define ELAN_CREATE_CAP  0x0a
76997 +typedef struct {
76998 +       ELAN_CAPABILITY cap;
76999 +} elan_create_cap_struct;
77000 +
77001 +#define ELAN_DESTROY_CAP    0x0b
77002 +typedef struct {
77003 +       ELAN_CAPABILITY cap;
77004 +} elan_destroy_cap_struct;
77005 +
77006 +#define ELAN_CREATE_VP   0x0c
77007 +typedef struct {
77008 +       ELAN_CAPABILITY cap;
77009 +       ELAN_CAPABILITY map;
77010 +} elan_create_vp_struct;
77011 +
77012 +#define ELAN_DESTROY_VP    0x0d
77013 +typedef struct {
77014 +       ELAN_CAPABILITY cap;
77015 +       ELAN_CAPABILITY map;
77016 +} elan_destroy_vp_struct;
77017 +
77018 +
77019 +#define ELAN_DEBUG_DUMP   0x0e
77020 +
77021 +#define ELAN_GET_CAPS    0x0f
77022 +typedef struct {
77023 +       uint            *number_of_results;
77024 +       uint             array_size;
77025 +       ELAN_CAP_STRUCT *caps;
77026 +} elan_get_caps_struct;
77027 +
77028 +#define ELAN_DEBUG_BUFFER 0x10
77029 +typedef struct {
77030 +       caddr_t addr;
77031 +       int     len;
77032 +} elan_debug_buffer_struct;
77033 +
77034 +#define ELANMOD_PROCFS_IOCTL      "/proc/qsnet/elan/ioctl"
77035 +#define ELANMOD_PROCFS_VERSION    "/proc/qsnet/elan/version"
77036 +#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask"
77037 +#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode"
77038 +
77039 +#endif /* __ELAN_SUBSYSTEM_H */
77040 +
77041 +/*
77042 + * Local variables:
77043 + * c-file-style: "linux"
77044 + * End:
77045 + */
77046 Index: linux-2.6.5-7.191/include/elan/epcomms.h
77047 ===================================================================
77048 --- linux-2.6.5-7.191.orig/include/elan/epcomms.h       2004-02-23 16:02:56.000000000 -0500
77049 +++ linux-2.6.5-7.191/include/elan/epcomms.h    2005-07-28 14:52:52.935665512 -0400
77050 @@ -0,0 +1,635 @@
77051 +/*
77052 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
77053 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
77054 + *
77055 + *    For licensing information please see the supplied COPYING file
77056 + *
77057 + */
77058 +
77059 +#ifndef __ELAN_EPCOMMS_H
77060 +#define __ELAN_EPCOMMS_H
77061 +
77062 +#ident "$Id: epcomms.h,v 1.44.2.2 2004/11/12 10:54:50 mike Exp $"
77063 +/*      $Source: /cvs/master/quadrics/epmod/epcomms.h,v $ */
77064 +
77065 +#include <elan/kcomm.h>
77066 +#include <elan/bitmap.h>
77067 +
77068 +#define EPCOMMS_SUBSYS_NAME    "epcomms"
77069 +
77070 +/* message service numbers */
77071 +#define EP_MSG_SVC_EIP512              0x00                            /* Quadrics EIP services */
77072 +#define EP_MSG_SVC_EIP1K               0x01
77073 +#define EP_MSG_SVC_EIP2K               0x02
77074 +#define EP_MSG_SVC_EIP4K               0x03
77075 +#define EP_MSG_SVC_EIP8K               0x04
77076 +#define EP_MSG_SVC_EIP16K              0x05
77077 +#define EP_MSG_SVC_EIP32K              0x06
77078 +#define EP_MSG_SVC_EIP64K              0x07
77079 +#define EP_MSG_SVC_EIP128K             0x08
77080 +
77081 +#define EP_MSG_SVC_PFS                 0x09                            /* Quadrics PFS rpc service */
77082 +
77083 +#define EP_MSG_SVC_PORTALS_SMALL       0x10                            /* Lustre Portals */
77084 +#define EP_MSG_SVC_PORTALS_LARGE       0x11
77085 +
77086 +#define EP_MSG_NSVC                    0x40                            /* Max number of services */
77087 +
77088 +#define EP_MSGQ_ADDR(qnum)             (EP_EPCOMMS_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE)
77089 +
77090 +/*
77091 + * EP_ENVELOPE
77092 + *   Messages are sent by sending an envelope to the destination
77093 + *   describing the source buffers to transfer.  The receiving thread
77094 + *   then allocates a receive buffer and fetches the data by issuing
77095 + *   "get" dmas.
77096 + *
77097 + * NOTE:  envelopes are not explicitly converted to network byte order
77098 + *        since they are always transferred little endian as they are
77099 + *        copied to/from elan memory using word operations.
77100 + */
77101 +typedef struct ep_envelope
77102 +{
77103 +    uint32_t     Version;                                      /* Protocol version field */
77104 +
77105 +    EP_ATTRIBUTE  Attr;                                        /* Attributes */
77106 +
77107 +    EP_XID       Xid;                                          /* transaction id */
77108 +
77109 +    uint32_t     NodeId;                                       /* Source processor */
77110 +    uint32_t     Range;                                        /* range we're sending to (high << 16 | low) */
77111 +
77112 +    EP_ADDR      TxdRail;                                      /* address of per-rail txd */
77113 +    EP_NMD       TxdMain;                                      /* address of main memory portion of txd */
77114 +
77115 +    uint32_t      nFrags;                                      /* # fragments */
77116 +    EP_NMD       Frags[EP_MAXFRAG];                            /* network mapping handles of source data */
77117 +
77118 +    uint32_t      CheckSum;                                     /* holds the check sum value when active 
77119 +                                                                * must be after all members to be checksum'd
77120 +                                                                */
77121 +
77122 +    uint32_t     Pad[6];                                       /* Pad to 128 bytes */
77123 +} EP_ENVELOPE;
77124 +
77125 +#define EP_ENVELOPE_VERSION            0xdac10001
77126 +#define EP_ENVELOPE_SIZE               roundup (sizeof (EP_ENVELOPE), EP_BLK_SIZE)
77127 +
77128 +/*
77129 + * RPC payload - this small amount of data is transfered in
77130 + * the envelope for RPCs
77131 + */
77132 +typedef struct ep_payload
77133 +{
77134 +    uint32_t   Data[128/sizeof(uint32_t)];
77135 +} EP_PAYLOAD;
77136 +
77137 +#define EP_PAYLOAD_SIZE                        roundup (sizeof (EP_PAYLOAD), EP_BLK_SIZE)
77138 +
77139 +#define EP_INPUTQ_SIZE                 (EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE)
77140 +
77141 +/*
77142 + * EP_STATUSBLK
77143 + *   RPC completion transfers a status block to the client.
77144 + */
77145 +typedef struct ep_statusblk
77146 +{
77147 +    uint32_t   Data[128/sizeof(uint32_t)];
77148 +} EP_STATUSBLK;
77149 +
77150 +#define EP_STATUSBLK_SIZE              roundup (sizeof(EP_STATUSBLK), EP_BLK_SIZE)
77151 +
77152 +#define EP_RANGE(low,high)             ((high) << 16 | (low))
77153 +#define EP_RANGE_LOW(range)            ((range) & 0xFFFF)
77154 +#define EP_RANGE_HIGH(range)           (((range) >> 16) & 0xFFFF)
77155 +
77156 +/* return codes from functions, + 'res' parameter to txd callback, ep_rxd_status() */
77157 +typedef enum
77158 +{
77159 +    EP_SUCCESS         = 0,                                    /* message sent/received successfully */
77160 +    EP_RXD_PENDING     = -1,                                   /* rxd not completed by thread */
77161 +    EP_CONN_RESET      = -2,                                   /* virtual circuit reset */
77162 +    EP_NODE_DOWN       = -3,                                   /* node down - transmit not attempted */
77163 +    EP_MSG_TOO_BIG      = -4,                                  /* received message larger than buffer */
77164 +    EP_ENOMEM          = -5,                                   /* memory alloc failed */
77165 +    EP_EINVAL          = -6,                                   /* invalid parameters */
77166 +    EP_SHUTDOWN                = -7,                                   /* receiver is being shut down */
77167 +} EP_STATUS;
77168 +
77169 +/* forward declarations */
77170 +typedef struct ep_rxd          EP_RXD;
77171 +typedef struct ep_txd          EP_TXD;
77172 +typedef struct ep_rcvr_rail    EP_RCVR_RAIL;
77173 +typedef struct ep_rcvr         EP_RCVR;
77174 +typedef struct ep_xmtr_rail    EP_XMTR_RAIL;
77175 +typedef struct ep_xmtr         EP_XMTR;
77176 +typedef struct ep_comms_rail    EP_COMMS_RAIL;
77177 +typedef struct ep_comms_subsys  EP_COMMS_SUBSYS;
77178 +
77179 +typedef struct ep_rcvr_stats           EP_RCVR_STATS;
77180 +typedef struct ep_xmtr_stats           EP_XMTR_STATS;
77181 +typedef struct ep_rcvr_rail_stats      EP_RCVR_RAIL_STATS;
77182 +typedef struct ep_xmtr_rail_stats      EP_XMTR_RAIL_STATS;
77183 +
77184 +typedef void (EP_RXH)(EP_RXD *rxd);                            /* callback function from receive completion */
77185 +typedef void (EP_TXH)(EP_TXD *txd, void *arg, EP_STATUS res);  /* callback function from transmit completion  */
77186 +
77187 +/* Main memory portion shared descriptor */
77188 +typedef struct ep_rxd_main
77189 +{
77190 +    EP_ENVELOPE                Envelope;                               /* 128 byte aligned envelope */
77191 +    EP_PAYLOAD         Payload;                                /* 128 byte aligned payload */
77192 +    bitmap_t           Bitmap[BT_BITOUL(EP_MAX_NODES)];        /* broadcast bitmap */
77193 +    EP_STATUSBLK       StatusBlk;                              /* RPC status block to return */
77194 +    uint64_t           Next;                                   /* linked list when on active list (main address) */
77195 +    int32_t            Len;                                    /* Length of message received */
77196 +} EP_RXD_MAIN;
77197 +
77198 +#define EP_RXD_MAIN_SIZE       roundup (sizeof (EP_RXD_MAIN), EP_BLK_SIZE)
77199 +
77200 +/* Phases for message/rpc */
77201 +#ifndef __ELAN__
77202 +
77203 +/* Kernel memory portion of per-rail receive descriptor */
77204 +typedef struct ep_rxd_rail
77205 +{
77206 +    struct list_head    Link;                                  /* linked on freelist */
77207 +    EP_RCVR_RAIL       *RcvrRail;                              /* rvcr we're associated with */
77208 +    
77209 +    EP_RXD            *Rxd;                                    /* receive descriptor we're bound to */
77210 +} EP_RXD_RAIL;
77211 +
77212 +#define RXD_BOUND2RAIL(rxdRail,rcvrRail)       ((rxdRail) != NULL && ((EP_RXD_RAIL *) (rxdRail))->RcvrRail == (EP_RCVR_RAIL *) rcvrRail)
77213 +
77214 +struct ep_rxd
77215 +{
77216 +    struct list_head   Link;                                   /* linked on free/active list */
77217 +    EP_RCVR           *Rcvr;                                   /* owning receiver */
77218 +
77219 +    EP_RXD_MAIN               *RxdMain;                                /* shared main memory portion. */
77220 +    EP_NMD             NmdMain;                                /*  and network mapping descriptor */
77221 +
77222 +    EP_RXD_RAIL               *RxdRail;                                /* per-rail rxd we're bound to */
77223 +    
77224 +    EP_RXH            *Handler;                                /* completion function */
77225 +    void              *Arg;                                    /*    and arguement */
77226 +
77227 +    unsigned int       State;                                  /* RXD status (active,stalled,failed) */
77228 +
77229 +    EP_NMD             Data;                                   /* network mapping descriptor for user buffer */
77230 +
77231 +    int                        nFrags;                                 /* network mapping descriptor for put/get/complete */
77232 +    EP_NMD             Local[EP_MAXFRAG];
77233 +    EP_NMD             Remote[EP_MAXFRAG];
77234 +
77235 +    long               NextRunTime;                            /* time to resend failover/map requests */
77236 +    EP_XID             MsgXid;                                 /*   and transaction id */
77237 +
77238 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
77239 +    struct list_head   CheckSumLink;                           /* linked on check sum list */
77240 +#endif
77241 +};
77242 +
77243 +#define EP_NUM_RXD_PER_BLOCK   16
77244 +
77245 +/* rxd->State */
77246 +#define EP_RXD_FREE            0
77247 +
77248 +#define EP_RXD_RECEIVE_UNBOUND 1
77249 +#define EP_RXD_RECEIVE_ACTIVE  2
77250 +
77251 +#define EP_RXD_PUT_ACTIVE      3
77252 +#define EP_RXD_PUT_STALLED     4
77253 +#define EP_RXD_GET_ACTIVE      5
77254 +#define EP_RXD_GET_STALLED     6
77255 +
77256 +#define EP_RXD_COMPLETE_ACTIVE 7
77257 +#define EP_RXD_COMPLETE_STALLED        8
77258 +
77259 +#define EP_RXD_RPC_IN_PROGRESS 9
77260 +#define EP_RXD_COMPLETED       10      
77261 +
77262 +#define EP_RXD_BEEN_ABORTED    11                              /* rxd was aborted while in a private state */
77263 +
77264 +typedef struct ep_rxd_block
77265 +{
77266 +    struct list_head   Link;
77267 +
77268 +    EP_NMD             NmdMain;
77269 +
77270 +    EP_RXD             Rxd[EP_NUM_RXD_PER_BLOCK];
77271 +} EP_RXD_BLOCK;
77272 +
77273 +struct ep_rcvr_rail_stats 
77274 +{
77275 +    EP_STATS_COUNT rx;
77276 +    EP_STATS_COUNT rx_len;
77277 +};
77278 +
77279 +struct ep_rcvr_rail
77280 +{
77281 +    EP_RCVR           *Rcvr;                                   /* associated receiver */
77282 +    EP_COMMS_RAIL      *CommsRail;                             /* comms rail */
77283 +
77284 +    struct proc_dir_entry *procfs_root;                         /* root of this rcvr_rail's procfs entry */
77285 +    EP_RCVR_RAIL_STATS     stats;                               /* generic rcvr_rail stats */
77286 +};
77287 +
77288 +struct ep_rcvr_stats
77289 +{
77290 +    EP_STATS_COUNT rx;
77291 +    EP_STATS_COUNT rx_len;
77292 +};
77293 +
77294 +struct ep_rcvr
77295 +{
77296 +    struct list_head  Link;                                    /* queued on subsystem */
77297 +    EP_COMMS_SUBSYS  *Subsys;                                  /* kernel comms subsystem */
77298 +    EP_SERVICE        Service;                                 /* service number */
77299 +
77300 +    unsigned int      InputQueueEntries;                       /* # entries on receive queue */
77301 +
77302 +    EP_RAILMASK              RailMask;                                 /* bitmap of which rails are available */
77303 +    EP_RCVR_RAIL     *Rails[EP_MAX_RAILS];
77304 +
77305 +    spinlock_t       Lock;                                     /* spinlock for rails/receive lists */
77306 +
77307 +    struct list_head  ActiveDescList;                          /* List of pending/active receive descriptors */
77308 +
77309 +    EP_XID_CACHE      XidCache;                                        /* XID cache (protected by Lock) */
77310 +
77311 +    struct list_head  FreeDescList;                            /* List of free receive descriptors */
77312 +    unsigned int      FreeDescCount;                           /*   and number on free list */
77313 +    unsigned int      TotalDescCount;                           /*   total number created */
77314 +    spinlock_t       FreeDescLock;                             /*   and lock for free list */
77315 +    kcondvar_t       FreeDescSleep;                            /*   with place to sleep for rx desc */
77316 +    int                      FreeDescWanted;                           /*   and flag */
77317 +    struct list_head  DescBlockList;
77318 +
77319 +    unsigned int      ForwardRxdCount;                         /* count of rxd's being forwarded */
77320 +    unsigned int      CleanupWaiting;                          /* waiting for cleanup */
77321 +    kcondvar_t       CleanupSleep;                             /*   and place to sleep */
77322 +
77323 +    struct proc_dir_entry *procfs_root;                         /* place where this rcvr's proc entry is */
77324 +    EP_RCVR_STATS          stats;                                    
77325 +};
77326 +
77327 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
77328 +#define EP_ENVELOPE_CHECK_SUM      (1<<31)
77329 +extern uint32_t ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags);
77330 +#endif
77331 +
77332 +#endif /* ! __ELAN__ */
77333 +
77334 +typedef struct ep_txd_main
77335 +{
77336 +    EP_STATUSBLK      StatusBlk;                               /* RPC status block */
77337 +    bitmap_t          Bitmap[BT_BITOUL(EP_MAX_NODES)];         /* broadcast bitmap */
77338 +} EP_TXD_MAIN;
77339 +
77340 +#define EP_TXD_MAIN_SIZE       roundup (sizeof (EP_TXD_MAIN), EP_BLK_SIZE)
77341 +
77342 +#ifndef __ELAN__
77343 +typedef struct ep_txd_rail
77344 +{
77345 +    struct list_head  Link;                                    /* linked on freelist */
77346 +    EP_XMTR_RAIL     *XmtrRail;                                        /* xmtr we're associated with */
77347 +
77348 +    EP_TXD          *Txd;                                      /* txd we're bound to */
77349 +} EP_TXD_RAIL;
77350 +
77351 +#define TXD_BOUND2RAIL(rxdRail,xmtrRail)       ((txdRail) != NULL && ((EP_TXD_RAIL *) (txdRail))->XmtrRail == (EP_XMTR_RAIL *) xmtrRail)
77352 +
77353 +struct ep_txd
77354 +{
77355 +    struct list_head  Link;                                    /* linked on free/active list */
77356 +    EP_XMTR         *Xmtr;                                     /* service we're associated with */
77357 +
77358 +    EP_TXD_MAIN             *TxdMain;                                  /* shared main memory portion */
77359 +    EP_NMD           NmdMain;                                  /*   and network mapping descriptor */
77360 +
77361 +    EP_TXD_RAIL      *TxdRail;                                 /* per-rail txd for this phase */
77362 +
77363 +    EP_TXH          *Handler;                                  /* completion function */
77364 +    void            *Arg;                                      /*    and arguement */
77365 +    
77366 +    unsigned short    NodeId;                                  /* node transmit is to. */
77367 +    EP_SERVICE        Service;                                 /*    and seervice */
77368 +
77369 +    long              TimeStamp;                                 /* time we where created at, to find sends taking too long */
77370 +    long             RetryTime;
77371 +    EP_BACKOFF       Backoff;
77372 +
77373 +    EP_ENVELOPE              Envelope;                                 /* envelope for transmit */
77374 +    EP_PAYLOAD       Payload;                                  /* payload for transmit */
77375 +};
77376 +
77377 +#define EP_NUM_TXD_PER_BLOCK   16
77378 +
77379 +/* "phase" parameter to BindTxd */
77380 +#define EP_TXD_PHASE_ACTIVE            1
77381 +#define EP_TXD_PHASE_PASSIVE           2
77382 +
77383 +typedef struct ep_txd_block
77384 +{
77385 +    struct list_head   Link;
77386 +    EP_NMD             NmdMain;
77387 +    EP_TXD             Txd[EP_NUM_TXD_PER_BLOCK];              /* transmit descriptors */
77388 +} EP_TXD_BLOCK;
77389 +
77390 +struct ep_xmtr_rail_stats
77391 +{
77392 +    EP_STATS_COUNT tx;
77393 +    EP_STATS_COUNT tx_len;
77394 +};
77395 +
77396 +struct ep_xmtr_rail
77397 +{
77398 +    EP_COMMS_RAIL      *CommsRail;                             /* associated comms rail */
77399 +    EP_XMTR           *Xmtr;                                   /* associated transmitter */
77400 +
77401 +    struct proc_dir_entry *procfs_root;                         /* place where this xmtr's proc entry is */
77402 +
77403 +    EP_XMTR_RAIL_STATS     stats;
77404 +};
77405 +
77406 +struct ep_xmtr_stats
77407 +{
77408 +    EP_STATS_COUNT tx;
77409 +    EP_STATS_COUNT tx_len;
77410 +};
77411 +
77412 +struct ep_xmtr
77413 +{
77414 +    struct list_head  Link;                                    /* Linked on subsys */
77415 +    EP_COMMS_SUBSYS  *Subsys;                                  /* kernel comms subsystem */
77416 +
77417 +    EP_RAILMASK              RailMask;                                 /* bitmap of which rails are available */
77418 +    EP_XMTR_RAIL     *Rails[EP_MAX_RAILS];                     /* per-rail state */
77419 +
77420 +    spinlock_t       Lock;                                     /* lock for active descriptor list */
77421 +
77422 +    struct list_head  ActiveDescList;                          /* list of active transmit descriptors */
77423 +
77424 +    EP_XID_CACHE      XidCache;                                        /* XID cache (protected by Lock) */
77425 +
77426 +    struct list_head  FreeDescList;                            /* List of free receive descriptors */
77427 +    unsigned int      FreeDescCount;                           /*   and number on free list */
77428 +    unsigned int      TotalDescCount;
77429 +    spinlock_t       FreeDescLock;                             /*   and lock for free list */
77430 +    kcondvar_t       FreeDescSleep;                            /*   with place to sleep for rx desc */
77431 +    int                      FreeDescWanted;                           /*   and flag */
77432 +    struct list_head  DescBlockList;
77433 +
77434 +    struct proc_dir_entry *procfs_root;                         /* place where this rcvr's proc entry is */
77435 +    EP_XMTR_STATS          stats;   
77436 +};
77437 +
77438 +/* forward descriptor */
77439 +#define EP_TREE_ARITY          3
77440 +
77441 +typedef struct ep_fwd_desc
77442 +{
77443 +    struct list_head    Link;                                  /* linked on forward/free lists */
77444 +    EP_RXD            *Rxd;                                    /* rxd to forward */
77445 +    EP_NMD             Data;                                   /* nmd of subset of receive buffer */
77446 +    unsigned           NumChildren;                            /*   number of places we're forwarding */
77447 +    unsigned           Children[EP_TREE_ARITY];
77448 +} EP_FWD_DESC;
77449 +
77450 +typedef struct ep_comms_ops
77451 +{
77452 +    void            (*DelRail) (EP_COMMS_RAIL *rail);
77453 +    void            (*DisplayRail) (EP_COMMS_RAIL *rail);
77454 +
77455 +    struct {
77456 +       void         (*AddRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
77457 +       void         (*DelRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
77458 +
77459 +       long         (*Check) (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
77460 +
77461 +       int          (*QueueRxd) (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
77462 +       void         (*RpcPut)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
77463 +       void         (*RpcGet)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
77464 +       void         (*RpcComplete)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
77465 +
77466 +       EP_RXD      *(*StealRxd)(EP_RCVR_RAIL *rcvrRail);
77467 +
77468 +       void         (*DisplayRcvr) (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
77469 +       void         (*DisplayRxd)  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
77470 +
77471 +       void         (*FillOutRailStats) (EP_RCVR_RAIL *rcvr_rail, char *str);
77472 +
77473 +    } Rcvr;
77474 +
77475 +    struct {
77476 +       void         (*AddRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail);
77477 +       void         (*DelRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail);
77478 +
77479 +       long         (*Check) (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
77480 +       
77481 +       int          (*BindTxd) (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
77482 +       void         (*UnbindTxd) (EP_TXD *txd, unsigned int phase);
77483 +       int          (*PollTxd) (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
77484 +
77485 +       void         (*DisplayXmtr) (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
77486 +       void         (*DisplayTxd)  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
77487 +
77488 +       int          (*CheckTxdState) (EP_TXD *txd);
77489 +
77490 +       void         (*FillOutRailStats) (EP_XMTR_RAIL *xmtr_rail, char *str);
77491 +
77492 +    } Xmtr;
77493 +} EP_COMMS_OPS;
77494 +
77495 +#define EP_RAIL_OP(commsRail, Which)   (commsRail)->Ops.Which
77496 +#define EP_RCVR_OP(rcvrRail, Which)    (rcvrRail)->CommsRail->Ops.Rcvr.Which
77497 +#define EP_XMTR_OP(xmtrRail, Which)    (xmtrRail)->CommsRail->Ops.Xmtr.Which
77498 +
77499 +/* "how" parameter to PollTxd */
77500 +#define POLL_TX_LIST           0
77501 +#define ENABLE_TX_CALLBACK     1
77502 +#define DISABLE_TX_CALLBACK    2
77503 +
77504 +struct ep_comms_rail
77505 +{
77506 +    struct list_head   Link;                                   /* Linked on subsys */
77507 +    EP_RAIL           *Rail;                                   /* kernel comms rail */
77508 +    EP_COMMS_SUBSYS    *Subsys;
77509 +    EP_COMMS_OPS        Ops;
77510 +
77511 +    EP_COMMS_RAIL_STATS Stats;                                 /* statistics */
77512 +};
77513 +
77514 +struct ep_comms_subsys
77515 +{
77516 +    EP_SUBSYS          Subsys;                                 /* is a kernel comms subsystem */
77517 +
77518 +    kmutex_t           Lock;                                   /* global lock */
77519 +
77520 +    EP_COMMS_STATS     Stats;                                  /* statistics */
77521 +
77522 +    struct list_head   Rails;                                  /* list of all rails */
77523 +
77524 +    struct list_head    Receivers;                             /* list of receivers */
77525 +    struct list_head   Transmitters;                           /* and transmitters */
77526 +
77527 +    /* forward/allocator thread */
77528 +    EP_KTHREAD         Thread;                                 /* place thread sleeps */
77529 +
77530 +    /* message passing "broadcast" forward lists */
77531 +    spinlock_t         ForwardDescLock;                        /* Lock for broadcast forwarding */
77532 +    struct list_head    ForwardDescList;                       /* List of rxd's to forward */
77533 +
77534 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
77535 +    spinlock_t         CheckSumDescLock;                       /* Lock for CheckSums */
77536 +    struct list_head    CheckSumDescList;                      /* List of rxd's to be CheckSumed */
77537 +#endif
77538 +
77539 +    EP_XMTR           *ForwardXmtr;                            /* and transmitter to forward with */
77540 +};
77541 +
77542 +/* epcomms.c subsystem initialisation */
77543 +extern unsigned int   epcomms_forward_limit;
77544 +
77545 +extern int           ep_comms_init (EP_SYS *sys);
77546 +extern void           ep_comms_display (EP_SYS *sys, char *how);
77547 +extern EP_RAILMASK    ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service);
77548 +
77549 +/* epcomms_elan3.c */
77550 +extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail);
77551 +
77552 +/* epcomms_elan4.c */
77553 +extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail);
77554 +
77555 +/* epcommsTx.c */
77556 +extern int            TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail);
77557 +extern void          FreeTxd (EP_XMTR *xmtr, EP_TXD *txd);
77558 +
77559 +extern unsigned int   ep_txd_lowat;
77560 +extern long           ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime);
77561 +extern void           ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr);
77562 +extern void           ep_xmtr_flush_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail);
77563 +extern void           ep_xmtr_reloc_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail);
77564 +
77565 +extern void           ep_xmtr_fillout_stats      (EP_XMTR      *xmtr,      char *str);
77566 +extern void           ep_xmtr_rail_fillout_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
77567 +
77568 +extern void           ep_xmtr_txd_stat (EP_XMTR *xmtr, EP_TXD *txd);
77569 +
77570 +/* epcommsRx.c */
77571 +extern EP_RXD        *StealRxdFromOtherRail (EP_RCVR *rcvr);
77572 +
77573 +extern unsigned int   ep_rxd_lowat;
77574 +extern long          ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime);
77575 +extern void           ep_rcvr_flush_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail);
77576 +extern void           ep_rcvr_reloc_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail);
77577 +extern void           ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full);
77578 +
77579 +extern long           ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime);
77580 +
77581 +extern void           ep_rcvr_fillout_stats      (EP_RCVR      *rcvr,      char *str);
77582 +extern void           ep_rcvr_rail_fillout_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
77583 +
77584 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
77585 +extern void           ep_csum_rxds    (EP_COMMS_SUBSYS *subsys);
77586 +extern void           ep_rxd_queue_csum (EP_RXD *rxd);
77587 +#endif
77588 +
77589 +extern void           ep_rxd_received     (EP_RXD *rxd);
77590 +extern void           ep_rxd_received_now (EP_RXD *rxd);
77591 +
77592 +/* ep_procfs.c */
77593 +extern struct proc_dir_entry *ep_procfs_root;
77594 +
77595 +extern void ep_procfs_rcvr_xmtr_init(void);
77596 +extern void ep_procfs_rcvr_xmtr_fini(void);
77597 +
77598 +extern void ep_procfs_rcvr_add(EP_RCVR *rcvr);
77599 +extern void ep_procfs_rcvr_del(EP_RCVR *rcvr);
77600 +
77601 +extern void ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail);
77602 +extern void ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail);
77603 +
77604 +extern void ep_procfs_xmtr_add(EP_XMTR *xmtr);
77605 +extern void ep_procfs_xmtr_del(EP_XMTR *xmtr);
77606 +
77607 +extern void ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail);
77608 +extern void ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail);
77609 +
77610 +
77611 +/* Public Interface */
77612 +
77613 +
77614 +/* epcomms.c message xmtr functions */
77615 +extern EP_XMTR       *ep_alloc_xmtr (EP_SYS *sys);
77616 +extern void           ep_free_xmtr (EP_XMTR *xmtr);
77617 +
77618 +extern EP_STATUS      ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
77619 +                                          EP_TXH *handler, void *arg, EP_PAYLOAD *payload,
77620 +                                          EP_NMD *nmd, int nFrag);
77621 +extern EP_STATUS      ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, 
77622 +                                           EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, 
77623 +                                           EP_PAYLOAD *payload, EP_NMD *nmd, int nFrag);
77624 +extern EP_STATUS      ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
77625 +                                      EP_TXH *handler, void *arg, EP_PAYLOAD *payload,
77626 +                                      EP_NMD *nmd, int nFrag);
77627 +extern EP_STATUS      ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
77628 +                                           EP_TXH *handler, void *arg, EP_ENVELOPE *env, EP_PAYLOAD *payload, 
77629 +                                           bitmap_t *bitmap, EP_NMD *nmd, int nFrags);
77630 +
77631 +/* epcomms.c functions for use with polled transmits */
77632 +extern int            ep_poll_transmits (EP_XMTR *xmtr);
77633 +extern int            ep_enable_txcallbacks (EP_XMTR *xmtr);
77634 +extern int            ep_disable_txcallbacks (EP_XMTR *xmtr);
77635 +
77636 +/* epcomms.c message rcvr functions */
77637 +extern EP_RCVR       *ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvelopes);
77638 +extern void          ep_free_rcvr (EP_RCVR *rcvr);
77639 +
77640 +extern EP_STATUS      ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr);
77641 +extern void          ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr);
77642 +extern EP_STATUS      ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags);
77643 +extern EP_STATUS      ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags);
77644 +extern EP_STATUS      ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, 
77645 +                                      EP_NMD *from, EP_NMD *to, int nFrags);
77646 +extern void          ep_complete_receive (EP_RXD *rxd);
77647 +
77648 +/* railhints.c */
77649 +extern int            ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails);
77650 +extern int            ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId);
77651 +extern EP_RAILMASK    ep_xmtr_availrails (EP_XMTR *xmtr);
77652 +extern EP_RAILMASK    ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId);
77653 +extern int            ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails);
77654 +extern EP_RAILMASK    ep_rcvr_availrails (EP_RCVR *rcvr);
77655 +extern EP_RAILMASK    ep_rxd_railmask (EP_RXD *rxd);
77656 +
77657 +/* epcomms.c functions for accessing fields of rxds */
77658 +extern void          *ep_rxd_arg(EP_RXD *rxd);
77659 +extern int            ep_rxd_len(EP_RXD *rxd);
77660 +extern EP_STATUS      ep_rxd_status(EP_RXD *rxd);
77661 +extern int            ep_rxd_isrpc(EP_RXD *rxd);
77662 +extern EP_ENVELOPE   *ep_rxd_envelope(EP_RXD *rxd);
77663 +extern EP_PAYLOAD    *ep_rxd_payload(EP_RXD *rxd);
77664 +extern int            ep_rxd_node(EP_RXD *rxd);
77665 +extern EP_STATUSBLK  *ep_rxd_statusblk(EP_RXD *rxd);
77666 +
77667 +/* functions for accessing fields of txds */
77668 +extern int            ep_txd_node(EP_TXD *txd);
77669 +extern EP_STATUSBLK  *ep_txd_statusblk(EP_TXD *txd);
77670 +
77671 +/* functions for controlling how many processes are using module */
77672 +extern void              ep_mod_dec_usecount (void);
77673 +extern void              ep_mod_inc_usecount (void);
77674 +
77675 +extern EP_RAILMASK ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId);
77676 +extern int ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
77677 +
77678 +#endif /* ! __ELAN__ */
77679 +/*
77680 + * Local variables:
77681 + * c-file-style: "stroustrup"
77682 + * End:
77683 + */
77684 +#endif /* __ELAN_EPCOMMS_H */
77685 +
77686 Index: linux-2.6.5-7.191/include/elan/epsvc.h
77687 ===================================================================
77688 --- linux-2.6.5-7.191.orig/include/elan/epsvc.h 2004-02-23 16:02:56.000000000 -0500
77689 +++ linux-2.6.5-7.191/include/elan/epsvc.h      2005-07-28 14:52:52.935665512 -0400
77690 @@ -0,0 +1,36 @@
77691 +/*
77692 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
77693 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
77694 + *
77695 + *    For licensing information please see the supplied COPYING file
77696 + *
77697 + */
77698 +
77699 +#ifndef __ELAN_EPSVC_H
77700 +#define __ELAN_EPSVC_H
77701 +
77702 +#ident "@(#)$Id: epsvc.h,v 1.9 2004/02/13 10:03:27 david Exp $"
77703 +/*      $Source: /cvs/master/quadrics/epmod/epsvc.h,v $ */
77704 +
77705 +
77706 +#define EP_SVC_NUM_INDICATORS       8
77707 +#define EP_SVC_INDICATOR_MAX_NAME  32
77708 +
77709 +#define EP_SVC_EIP     0
77710 +#define EP_SVC_NAMES   {"eip", "1", "2", "3", "4", "5", "6", "7"};
77711 +
77712 +#if defined(__KERNEL__)
77713 +extern int         ep_svc_indicator_set      (EP_SYS *epsys, int svc_indicator);
77714 +extern int         ep_svc_indicator_clear    (EP_SYS *epsys, int svc_indicator);
77715 +extern int         ep_svc_indicator_is_set   (EP_SYS *epsys, int svc_indicator, int nodeId);
77716 +extern int         ep_svc_indicator_bitmap   (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
77717 +extern EP_RAILMASK ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId);
77718 +#endif
77719 +
77720 +#endif /* __ELAN_EPSVC_H */
77721 +
77722 +/*
77723 + * Local variables:
77724 + * c-file-style: "stroustrup"
77725 + * End:
77726 + */
77727 Index: linux-2.6.5-7.191/include/elan/kalloc.h
77728 ===================================================================
77729 --- linux-2.6.5-7.191.orig/include/elan/kalloc.h        2004-02-23 16:02:56.000000000 -0500
77730 +++ linux-2.6.5-7.191/include/elan/kalloc.h     2005-07-28 14:52:52.935665512 -0400
77731 @@ -0,0 +1,108 @@
77732 +/*
77733 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
77734 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
77735 + *
77736 + *    For licensing information please see the supplied COPYING file
77737 + *
77738 + */
77739 +
77740 +#ifndef __ELAN3_KALLOC_H
77741 +#define __ELAN3_KALLOC_H
77742 +
77743 +#ident "$Id: kalloc.h,v 1.11 2004/05/19 10:23:59 david Exp $"
77744 +/*      $Source: /cvs/master/quadrics/epmod/kalloc.h,v $ */
77745 +
77746 +#include <elan/rmap.h>
77747 +
77748 +/*
77749 + * Memory allocator
77750 + */
77751 +#define LN2_MIN_SIZE   6                                       /* 64 bytes */
77752 +#define LN2_MAX_SIZE   16                                      /* 64k bytes */
77753 +#define NUM_FREELISTS  (LN2_MAX_SIZE-LN2_MIN_SIZE + 1)
77754 +#define MIN_SIZE       (1 << LN2_MIN_SIZE)
77755 +#define MAX_SIZE       (1 << LN2_MAX_SIZE)
77756 +
77757 +#define HASHSHIFT      LN2_MAX_SIZE
77758 +#define NHASH          32
77759 +#define HASH(addr)     (((addr) >> HASHSHIFT) & (NHASH-1))
77760 +
77761 +typedef enum
77762 +{
77763 +    EP_ALLOC_TYPE_PRIVATE_SDRAM,
77764 +    EP_ALLOC_TYPE_PRIVATE_MAIN,
77765 +    EP_ALLOC_TYPE_SHARED_MAIN,
77766 +} EP_ALLOC_TYPE;
77767 +
77768 +typedef struct ep_pool
77769 +{
77770 +    EP_NMH               Handle;                               /* network mapping handle */
77771 +
77772 +    struct list_head     HashBase;                             /* linked on hash lists */
77773 +    struct list_head     HashTop;                              /* linked on hash lists */
77774 +
77775 +    struct list_head     Link[NUM_FREELISTS];                  /* linked on free lists */
77776 +    bitmap_t            *Bitmaps[NUM_FREELISTS];               /* bitmaps for each size */
77777 +
77778 +    union {
77779 +       sdramaddr_t     Sdram;
77780 +       unsigned long   Ptr;
77781 +    } Buffer;
77782 +} EP_POOL;
77783 +
77784 +typedef struct ep_alloc
77785 +{
77786 +    spinlock_t      Lock;
77787 +    
77788 +    EP_ALLOC_TYPE    Type;
77789 +    unsigned int     Perm;
77790 +
77791 +    EP_RMAP         *ResourceMap;
77792 +
77793 +    struct list_head HashBase[NHASH];
77794 +    struct list_head HashTop[NHASH];
77795 +    struct list_head Freelists[NUM_FREELISTS];
77796 +
77797 +    union {
77798 +       struct {
77799 +           EP_SYS             *System;
77800 +           struct list_head    Rails;
77801 +       } Shared;
77802 +       
77803 +       struct {
77804 +           EP_RAIL            *Rail;
77805 +       } Private;
77806 +    } Data;
77807 +} EP_ALLOC;
77808 +
77809 +extern void            ep_display_alloc (EP_ALLOC *alloc);
77810 +
77811 +extern void            ep_alloc_init (EP_RAIL *rail);
77812 +extern void            ep_alloc_fini (EP_RAIL *rail);
77813 +
77814 +extern sdramaddr_t     ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr);
77815 +extern void            ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr);
77816 +
77817 +extern sdramaddr_t     ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp);
77818 +extern void            ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size);
77819 +extern void           *ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addr);
77820 +extern void            ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size);
77821 +
77822 +extern sdramaddr_t     ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr);
77823 +extern void            *ep_elan2main (EP_RAIL *rail, EP_ADDR addr);
77824 +
77825 +extern void            ep_shared_alloc_init (EP_SYS *sys);
77826 +extern void            ep_shared_alloc_fini (EP_SYS *sys);
77827 +extern int             ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail);
77828 +extern void            ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail);
77829 +
77830 +extern void           *ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd);
77831 +extern void            ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd);
77832 +
77833 +#endif /* __ELAN_KALLOC_H */
77834 +
77835 +/*
77836 + * Local variables:
77837 + * c-file-style: "stroustrup"
77838 + * End:
77839 + */
77840 Index: linux-2.6.5-7.191/include/elan/kcomm.h
77841 ===================================================================
77842 --- linux-2.6.5-7.191.orig/include/elan/kcomm.h 2004-02-23 16:02:56.000000000 -0500
77843 +++ linux-2.6.5-7.191/include/elan/kcomm.h      2005-07-28 14:52:52.937665208 -0400
77844 @@ -0,0 +1,839 @@
77845 +/*
77846 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
77847 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
77848 + *
77849 + *    For licensing information please see the supplied COPYING file
77850 + *
77851 + */
77852 +
77853 +#ifndef __ELAN_KCOMM_H
77854 +#define __ELAN_KCOMM_H
77855 +
77856 +#ident "$Id: kcomm.h,v 1.71.2.8 2004/12/14 10:19:14 mike Exp $"
77857 +/*      $Source: /cvs/master/quadrics/epmod/kcomm.h,v $*/
77858 +#define EP_KCOMM_MAJOR_VERSION         3
77859 +#define EP_KCOMM_MINOR_VERSION         1
77860 +
77861 +#define EP_PROTOCOL_VERSION            1                       /* CM/KCOMM protocol revision */
77862 +
77863 +#define EP_MAX_NODES                   2048                    /* Max nodes we support */
77864 +#define EP_MAX_RAILS                   16                      /* max number of rails (we use an unsigned short for bitmaps !) */
77865 +#define EP_MAXFRAG                     4                       /* max number of fragments */
77866 +
77867 +#define EP_BLK_SIZE                    64                      /* align objects for elan access */
77868 +
77869 +/* Elan virtual address address space */
77870 +#define EP_SYSTEM_QUEUE_BASE           0x00010000              /* Base address for system queues */
77871 +#define EP_MSGSYS_QUEUE_BASE           0x00020000              /* Base address for msgsys queues */
77872 +#define EP_EPCOMMS_QUEUE_BASE          0x00030000              /* Base address for message queues */
77873 +#define EP_DVMA_BASE                   0x10000000              /* elan address range for dvma mapping. */
77874 +#define EP_DVMA_TOP                    0xE0000000
77875 +
77876 +#define EP_SHARED_BASE                 0xE0000000              /* shared main/elan allocators */
77877 +#define EP_SHARED_TOP                  0xF0000000
77878 +
77879 +#define EP_PRIVATE_BASE                        0xF0000000              /* private main/elan allocators */
77880 +#define EP_PRIVATE_TOP                 0xF8000000
77881 +
77882 +#define EP_DVMA_RMAP_SIZE              1024                    /* size of resource map for dvma address space */
77883 +#define EP_SHARED_RMAP_SIZE            1024                    /* size of resource map for shared address space */
77884 +#define EP_PRIVATE_RMAP_SIZE           1024                    /* size of resource map for private address space */
77885 +
77886 +/* Input queue descriptors fit into 64 bytes */
77887 +#define EP_QUEUE_DESC_SIZE             64
77888 +
77889 +/* Timeouts for checking network position */
77890 +#define EP_POSITION_TIMEOUT            (4*HZ)          /* 1s   time to notice CheckNetworkPosition changes */
77891 +#define EP_WITHDRAW_TIMEOUT            (2*HZ)          /* 2s   time before withdrawing from unreachable nodes */
77892 +
77893 +/* Time to try again due to resource failue (eg malloc etc) */
77894 +#define RESOURCE_RETRY_TIME            (HZ/20)
77895 +
77896 +/* Time to retransmit message when send failed */
77897 +#define MSGBUSY_RETRY_TIME             (HZ/20)
77898 +
77899 +/* Time between retransmits of messages network flush requests */
77900 +#define MESSAGE_RETRY_TIME             (HZ/5)
77901 +
77902 +/* time to hold the context filter up to ensure that the
77903 + * next packet of a dma is guaranteed to get nacked (8mS) */
77904 +#define NETWORK_ERROR_TIMEOUT          (1 + roundup (HZ * 8 / 1000, 1))
77905 +
77906 +/* Time between retransmits of message failover requests */
77907 +#define FAILOVER_RETRY_TIME            (HZ/5)
77908 +
77909 +/* compute earliest time */
77910 +#define SET_NEXT_RUN_TIME(nextRunTime, time) \
77911 +do { \
77912 +    if ((nextRunTime) == 0 || AFTER(nextRunTime, (time)))\
77913 +       (nextRunTime) = (time);\
77914 +} while (0)
77915 +
77916 +/* DMA retry backoff/priorities/issue rings */
77917 +#define EP_NUM_BACKOFF                 8
77918 +#define EP_RETRY_STABALISING            0
77919 +#define EP_RETRY_BASE                  1
77920 +
77921 +#define EP_RETRY_CRITICAL              EP_RETRY_BASE
77922 +#define EP_RETRY_HIGH_PRI              (EP_RETRY_CRITICAL + 1)
77923 +#define EP_RETRY_HIGH_PRI_TIME         (1)
77924 +#define EP_RETRY_HIGH_PRI_RETRY                (EP_RETRY_HIGH_PRI + 1)
77925 +#define EP_RETRY_HIGH_PRI_RETRY_TIME   (2)
77926 +#define EP_RETRY_LOW_PRI               (EP_RETRY_HIGH_PRI_RETRY + EP_NUM_BACKOFF)
77927 +#define EP_RETRY_LOW_PRI_TIME          (2)
77928 +#define EP_RETRY_LOW_PRI_RETRY         (EP_RETRY_LOW_PRI + 1)
77929 +#define EP_RETRY_LOW_PRI_RETRY_TIME    (4)
77930 +#define EP_RETRY_ANONYMOUS             (EP_RETRY_LOW_PRI_RETRY + EP_NUM_BACKOFF)
77931 +#define EP_RETRY_ANONYMOUS_TIME                (10)
77932 +#define EP_RETRY_NETERR                        (EP_RETRY_ANONYMOUS + EP_NUM_BACKOFF)
77933 +#define EP_RETRY_NETERR_TIME           (10)
77934 +#define EP_NUM_RETRIES                 (EP_RETRY_NETERR + 1)
77935 +
77936 +typedef unsigned short EP_SERVICE;
77937 +
77938 +/* EP_ATTRIBUTE 32 bits 
77939 + *
77940 + * 0-2
77941 + *   for initial call :-
77942 + *     0 (0x1) EP_NO_ALLOC                             used once
77943 + *     1 (0x2) EP_NO_SLEEP                             used once
77944 + *     2 (0x4) EP_NOT_MYSELF                           used once
77945 + *
77946 + *   when stored and transmited :-
77947 + *     0 (0x0) EP_MULTICAST                            envelope
77948 + *     1 (0x2) EP_RPC                                  envelope
77949 + *     2 (0x4) EP_HAS_PAYLOAD                          envelope
77950 + *
77951 + * 3-11
77952 + *     3   (0x08) EP_PREFRAIL_SET                      preserved
77953 + *     4-7 (0xf0) Pref Rail
77954 + *     8  (0x100) EP_NO_INTERUPT
77955 + *     9  (0x200) EP_NO_FAILOVER
77956 + *
77957 + *    10 (0x400) EP_INTERRUPT_ENABLED                  internal
77958 + *    11 (0x800) EP_TXD_STABALISING                    internal
77959 + *
77960 + * 12-13 Not Used.
77961 + * 
77962 + * 14-15 (0xC000) Data Type.                           passed in
77963 + *    00 none. 
77964 + *    01 Service Indicator.
77965 + *    10 TimeOut.
77966 + *    11 RailMask
77967 + *          
77968 + * 16-31 (0x10000)  Data.  Service Indicator, TimeOut, RailMask, Pref Rail.
77969 + *         
77970 +*/
77971 +
77972 +typedef uint32_t EP_ATTRIBUTE;
77973 +
77974 +#define EP_LOCAL_ATTR_MASK 0x07
77975 +#define EP_CLEAR_LOCAL_ATTR(ATTR)  ( (ATTR) & ~EP_LOCAL_ATTR_MASK )
77976 +
77977 +#define EP_NO_ALLOC      0x01  /* Don't call allocators if no free descriptors */
77978 +#define EP_NO_SLEEP      0x02  /* Don't sleep if no free descriptors */
77979 +#define EP_NOT_MYSELF    0x04  /* Don't send multicast to me */
77980 +
77981 +#define EP_MULTICAST         0x01      /* Message is a multicast */
77982 +#define EP_RPC               0x02      /* Wait for RPC reply */
77983 +#define EP_HAS_PAYLOAD_BIT   0x04      /* transfer payload */
77984 +
77985 +
77986 +#define EP_PREFRAIL_SET  0x08  /* preferred rail is set (otherwise pick one from the NMDs) */
77987 +
77988 +#define EP_PREFRAIL_SHIFT  (4)
77989 +#define EP_PREFRAIL_MASK   0xf0
77990 +#define EP_IS_PREFRAIL_SET(ATTR)      (((ATTR) &  EP_PREFRAIL_SET) != 0)
77991 +#define EP_CLEAR_PREFRAIL(ATTR)       (((ATTR) & ~EP_PREFRAIL_SET) & ~EP_PREFRAIL_MASK) 
77992 +#define EP_SET_PREFRAIL(ATTR,RAIL)    (EP_CLEAR_PREFRAIL(ATTR) | (((RAIL) <<  EP_PREFRAIL_SHIFT ) &  EP_PREFRAIL_MASK ) |  EP_PREFRAIL_SET)
77993 +
77994 +
77995 +#define EP_ATTR2PREFRAIL(ATTR)            (((ATTR) & EP_PREFRAIL_MASK) >> EP_PREFRAIL_SHIFT)
77996 +
77997 +
77998 +#define EP_INTERRUPT_ENABLED 0x400     /* event interrupt enabled on EP_NO_INTERRUPT */
77999 +#define EP_TXD_STABALISING   0x800      /* flag to indicate this is attempting to stabalise */
78000 +
78001 +#define EP_IS_MULTICAST(ATTR)                 (((ATTR) &  EP_MULTICAST) != 0)
78002 +#define EP_SET_MULTICAST(ATTR)                ( (ATTR) |  EP_MULTICAST)
78003 +#define EP_CLEAR_MULTICAST(ATTR)              ( (ATTR) & ~EP_MULTICAST)
78004 +
78005 +#define EP_IS_RPC(ATTR)                       (((ATTR) &  EP_RPC) != 0)
78006 +#define EP_SET_RPC(ATTR)                      ( (ATTR) |  EP_RPC)
78007 +#define EP_CLEAR_RPC(ATTR)                    ( (ATTR) & ~EP_RPC)
78008 +
78009 +#define EP_HAS_PAYLOAD(ATTR)                  (((ATTR) &  EP_HAS_PAYLOAD_BIT) != 0)
78010 +#define EP_SET_HAS_PAYLOAD(ATTR)              ( (ATTR) |  EP_HAS_PAYLOAD_BIT)
78011 +#define EP_CLEAR_HAS_PAYLOAD(ATTR)            ( (ATTR) & ~EP_HAS_PAYLOAD_BIT)
78012 +
78013 +#define EP_IS_INTERRUPT_ENABLED(ATTR)         (((ATTR) &  EP_INTERRUPT_ENABLED) != 0)
78014 +#define EP_SET_INTERRUPT_ENABLED(ATTR)        ( (ATTR) |  EP_INTERRUPT_ENABLED)
78015 +#define EP_CLEAR_INTERRUPT_ENABLED(ATTR)      ( (ATTR) & ~EP_INTERRUPT_ENABLED)
78016 +
78017 +#define EP_IS_TXD_STABALISING(ATTR)           (((ATTR) &  EP_TXD_STABALISING) != 0)
78018 +#define EP_SET_TXD_STABALISING(ATTR)          ( (ATTR) |  EP_TXD_STABALISING)
78019 +#define EP_CLEAR_TXD_STABALISING(ATTR)        ( (ATTR) & ~EP_TXD_STABALISING)
78020 +
78021 +#define EP_NO_INTERRUPT      0x100     /* Don't generate completion interrupt (tx) */
78022 +#define EP_NO_FAILOVER       0x200     /* don't attempt rail failover, just abort */
78023 +
78024 +#define EP_IS_NO_INTERRUPT(ATTR)    (((ATTR) &  EP_NO_INTERRUPT) != 0)
78025 +#define EP_SET_NO_INTERRUPT(ATTR)   ( (ATTR) |  EP_NO_INTERRUPT)
78026 +#define EP_CLEAR_NO_INTERRUPT(ATTR) ( (ATTR) & ~EP_NO_INTERRUPT)
78027 +
78028 +#define EP_IS_NO_FAILOVER(ATTR)    (((ATTR) &  EP_NO_FAILOVER) != 0)
78029 +#define EP_SET_NO_FAILOVER(ATTR)   ( (ATTR) |  EP_NO_FAILOVER)
78030 +#define EP_CLEAR_NO_FAILOVER(ATTR) ( (ATTR) & ~EP_NO_FAILOVER)
78031 +
78032 +#define EP_TYPE_MASK           0xC000
78033 +#define EP_TYPE_SVC_INDICATOR  0x4000
78034 +#define EP_TYPE_TIMEOUT        0x8000
78035 +#define EP_TYPE_RAILMASK       0xC000
78036 +
78037 +#define EP_ATTR2TYPE(ATTR)        ( (ATTR) & EP_TYPE_MASK )
78038 +
78039 +#define EP_IS_SVC_INDICATOR(ATTR) (EP_ATTR2TYPE(ATTR) == EP_TYPE_SVC_INDICATOR)
78040 +#define EP_IS_TIMEOUT(ATTR)       (EP_ATTR2TYPE(ATTR) == EP_TYPE_TIMEOUT)
78041 +#define EP_IS_RAILMASK(ATTR)      (EP_ATTR2TYPE(ATTR) == EP_TYPE_RAILMASK)
78042 +#define EP_IS_NO_TYPE(ATTR)       (EP_ATTR2TYPE(ATTR) == 0)
78043 +
78044 +#define EP_DATA_SHIFT          (16)
78045 +#define EP_DATA_MASK           0xffff0000
78046 +
78047 +#define EP_ATTR2DATA(ATTR)     (((ATTR) & EP_DATA_MASK) >> EP_DATA_SHIFT)
78048 +#define EP_DATA2ATTR(DATA)     (((DATA) <<  EP_DATA_SHIFT) & EP_DATA_MASK)
78049 +
78050 +#define EP_CLEAR_DATA(ATTR)    (((ATTR) & ~EP_TYPE_MASK) & ~EP_DATA_MASK)
78051 +#define EP_SET_DATA(ATTR,TYPE,DATA)   (EP_CLEAR_DATA(ATTR) | ((TYPE) & EP_TYPE_MASK) | (((DATA) <<  EP_DATA_SHIFT) & EP_DATA_MASK))
78052 +
78053 +#define EP_DEFAULT_TIMEOUT     (HZ*30)
78054 +
78055 +#if !defined(offsetof)
78056 +#define offsetof(s, m)         (unsigned long)(&(((s *)0)->m))
78057 +#endif
78058 +#if !defined(roundup)
78059 +#define roundup(x, y)          ((((x)+((y)-1))/(y))*(y))
78060 +#endif
78061 +
78062 +/* 
78063 + * Message transaction ID's - these are unique 64 bts 
78064 + * numbers which include the initial rail number.
78065 + */
78066 +typedef struct ep_xid
78067 +{
78068 +    uint32_t   Generation;
78069 +    uint32_t   Handle;
78070 +    uint64_t   Unique;
78071 +} EP_XID;
78072 +
78073 +#define EP_INVALIDATE_XID(xid) ((xid).Generation = (xid).Handle = (xid).Unique = 0)
78074 +
78075 +#define EP_XID_INVALID(xid)    ((xid).Generation == 0 && (xid).Handle == 0 && (xid).Unique == 0)
78076 +#define EP_XIDS_MATCH(a,b)     ((a).Generation == (b).Generation && (a).Handle == (b).Handle && (a).Unique == (b).Unique)
78077 +
78078 +typedef struct ep_backoff
78079 +{
78080 +    unsigned char      type;
78081 +    unsigned char      indx;
78082 +    unsigned short     count;
78083 +} EP_BACKOFF;
78084 +
78085 +/* values for "type" */
78086 +#define EP_BACKOFF_FREE                0
78087 +#define EP_BACKOFF_ENVELOPE    1
78088 +#define EP_BACKOFF_FETCH       2
78089 +#define EP_BACKOFF_DATA                3
78090 +#define EP_BACKOFF_DONE                4
78091 +#define EP_BACKOFF_STABILISE   5
78092 +
78093 +#ifndef __ELAN__
78094 +
78095 +/* forward declaration of types */
78096 +typedef struct ep_rail EP_RAIL;
78097 +typedef struct ep_sys  EP_SYS;
78098 +
78099 +#include <elan/nmh.h>
78100 +#include <elan/kmap.h>
78101 +#include <elan/statemap.h>
78102 +#include <elan/kalloc.h>
78103 +#include <elan/kthread.h>
78104 +#include <elan/kcomm_stats.h>
78105 +#include <elan/devinfo.h>
78106 +
78107 +typedef struct ep_callback
78108 +{
78109 +    struct ep_callback *Next;
78110 +    void              (*Routine)(void *, statemap_t *);
78111 +    void              *Arg;
78112 +} EP_CALLBACK;
78113 +
78114 +#define EP_CB_FLUSH_FILTERING          0
78115 +#define EP_CB_FLUSH_FLUSHING           1
78116 +#define EP_CB_PASSIVATED               2
78117 +#define EP_CB_FAILOVER                 3
78118 +#define EP_CB_DISCONNECTING            4
78119 +#define EP_CB_DISCONNECTED             5
78120 +#define EP_CB_NODESET                  6
78121 +#define EP_CB_COUNT                    7
78122 +
78123 +#endif /* !defined(__ELAN__) */
78124 +
78125 +/* Small unreliable system message queues */
78126 +#define EP_SYSTEMQ_INTR                        0                       /* input queue for cluster membership generating an interrupt */
78127 +#define EP_SYSTEMQ_POLLED              1                       /* input queue for cluster membership polled on clock tick */
78128 +#define EP_SYSTEMQ_MANAGER             2                       /* input queue for manager messages */
78129 +#define EP_NUM_SYSTEMQ                 64
78130 +
78131 +#define EP_SYSTEMQ_ADDR(qnum)          (EP_SYSTEM_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE)
78132 +#define EP_SYSTEMQ_DESC(base,qnum)     ((base) + (qnum) * EP_QUEUE_DESC_SIZE)
78133 +
78134 +#define EP_SYSTEMQ_MSG_ALIGN           64                      /* message sizes aligned to 64 byte boundaries */
78135 +#define EP_SYSTEMQ_MSG_MAX             (4*64)                  /* max message size */
78136 +
78137 +/* Special flag for Version field to indicate message not
78138 + * seen in main memory yet and time limit to poll for it */
78139 +#define EP_SYSTEMQ_UNRECEIVED                  0xdeadbabe
78140 +#define EP_SYSTEMQ_UNRECEIVED_TLIMIT           16384                   /* 1023 uS */
78141 +
78142 +#ifndef __ELAN__
78143 +
78144 +typedef void (EP_INPUTQ_HANDLER) (EP_RAIL *rail, void *arg, void *msg);
78145 +typedef void (EP_INPUTQ_CALLBACK) (EP_RAIL *rail, void *arg);
78146 +
78147 +typedef struct ep_inputq
78148 +{
78149 +    unsigned long      q_hidden;                               /* implementation hidden as ep3 or ep4 */
78150 +} EP_INPUTQ;
78151 +
78152 +typedef struct ep_outputq
78153 +{
78154 +    unsigned long      q_hidden;                               /* implementation hidden as ep3 or ep4 */
78155 +} EP_OUTPUTQ;
78156 +
78157 +/* returned values for ep_outputq_state */
78158 +#define EP_OUTPUTQ_BUSY                0
78159 +#define EP_OUTPUTQ_FAILED      1
78160 +#define EP_OUTPUTQ_FINISHED    2
78161 +
78162 +typedef struct ep_switch
78163 +{
78164 +    unsigned    present:1;
78165 +    unsigned   invalid:1;
78166 +    unsigned   link:3;
78167 +    unsigned   bcast:3;
78168 +    unsigned   lnr;
78169 +} EP_SWITCH;
78170 +
78171 +/*
78172 + * Network error fixup, flush, relocation messges
78173 + */
78174 +typedef struct ep_map_nmd_body
78175 +{
78176 +    uint32_t           nFrags;
78177 +    EP_RAILMASK                Railmask;
78178 +    EP_NMD             Nmd[EP_MAXFRAG];
78179 +} EP_MAP_NMD_BODY;
78180 +
78181 +typedef struct ep_failover_body
78182 +{
78183 +    EP_XID             Xid;
78184 +    EP_RAILMASK                Railmask;
78185 +} EP_FAILOVER_BODY;
78186 +
78187 +typedef struct ep_failover_txd
78188 +{
78189 +    EP_XID             Xid;
78190 +    uint32_t           Rail;
78191 +    EP_ADDR            TxdRail;
78192 +} EP_FAILOVER_TXD;
78193 +
78194 +typedef uint64_t EP_NETERR_COOKIE;
78195 +
78196 +#define EP_PANIC_STRLEN                31
78197 +
78198 +typedef struct ep_node_state
78199 +{
78200 +    unsigned char       State;
78201 +    unsigned char       NetworkErrorState;
78202 +    EP_RAILMASK         Railmask;
78203 +} EP_NODE_STATE;
78204 +
78205 +#define EP_MANAGER_MSG_SIZE            (2 * EP_SYSTEMQ_MSG_ALIGN)
78206 +
78207 +typedef struct ep_manager_msg_hdr
78208 +{
78209 +    EP_XID             Xid;                                    /* Message transaction id */
78210 +
78211 +    uint16_t           NodeId;                                 /* Originating node number */
78212 +    uint16_t           DestId;                                 /* destination node id */
78213 +
78214 +    uint16_t           Checksum;                               /* Message checksum */
78215 +    uint8_t            Rail;                                   /* Rail message associated with */
78216 +    uint8_t            Type;                                   /* Message type */
78217 +
78218 +    uint32_t           Pad;                                    /* pad to 32 bytes */
78219 +
78220 +    uint32_t           Version;                                /* Message Version */
78221 +} EP_MANAGER_MSG_HDR;
78222 +
78223 +typedef union ep_manager_msg_body
78224 +{
78225 +    unsigned char       Space[EP_MANAGER_MSG_SIZE - sizeof (EP_MANAGER_MSG_HDR)];
78226 +
78227 +    EP_NETERR_COOKIE   Cookies[2];                             /* EP_MSG_TYPE_NETERR */
78228 +    EP_MAP_NMD_BODY    MapNmd;                                 /* EP_MSG_TYPE_MAP_NMD */
78229 +    EP_FAILOVER_BODY   Failover;                               /* EP_MSG_TYPE_FAILOVER_REQUEST */
78230 +    EP_FAILOVER_TXD    FailoverTxd;                            /* EP_MSG_TYPE_FAILOVER_RESPONSE */
78231 +    unsigned char       PanicReason[EP_PANIC_STRLEN+1];                /* EP_MSG_TYPE_REMOTE_PANIC */
78232 +    EP_NODE_STATE       NodeState;                              /* EP_MSG_TYPE_GET_NODE_STATE_RESPONSE */   
78233 +    EP_SERVICE          Service;                                /* EP_MSG_TYPE_GET_NODE_STATE */
78234 +} EP_MANAGER_MSG_BODY;
78235 +
78236 +typedef struct ep_manager_msg
78237 +{
78238 +    EP_MANAGER_MSG_BODY Body;
78239 +    EP_MANAGER_MSG_HDR  Hdr;
78240 +} EP_MANAGER_MSG;
78241 +
78242 +#define EP_MANAGER_MSG_VERSION                         0xcad01000
78243 +#define EP_MANAGER_MSG_TYPE_REMOTE_PANIC               0x00
78244 +#define EP_MANAGER_MSG_TYPE_NETERR_REQUEST             0x01
78245 +#define EP_MANAGER_MSG_TYPE_NETERR_RESPONSE            0x02
78246 +#define EP_MANAGER_MSG_TYPE_FLUSH_REQUEST              0x03
78247 +#define EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE             0x04
78248 +#define EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST            0x05
78249 +#define EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE           0x06
78250 +#define EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST           0x07
78251 +#define EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE          0x08
78252 +#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE              0x09
78253 +#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE     0x0a
78254 +
78255 +/* Message types which should only be sent when a rail is connected */
78256 +#define EP_MANAGER_MSG_TYPE_CONNECTED(type)            (((type) & 1) == 1)
78257 +
78258 +#define EP_MANAGER_OUTPUTQ_SLOTS       128                     /* # entries in outputq */
78259 +#define EP_MANAGER_INPUTQ_SLOTS                128                     /* # entries in inputq */
78260 +#define EP_MANAGER_OUTPUTQ_RETRIES     31                      /* # retries for manager messages */
78261 +
78262 +/* XID's are allocated from a cache, which doesn't
78263 + * require locking since it relies on the caller to
78264 + * manage the locking for us.
78265 + */
78266 +typedef struct ep_xid_cache
78267 +{
78268 +    struct list_head   Link;
78269 +
78270 +    uint32_t           Handle;                                 /* my XID cache handle */
78271 +    uint64_t           Current;                                /* range of XID.Unique we can allocate from */
78272 +    uint64_t           Last;
78273 +
78274 +    void             (*MessageHandler)(void *arg, EP_MANAGER_MSG *);
78275 +    void              *Arg;
78276 +} EP_XID_CACHE;
78277 +
78278 +#define EP_XID_CACHE_CHUNKS    (10000)
78279 +
78280 +typedef struct ep_node_rail
78281 +{
78282 +    struct list_head    Link;                                  /* can be linked on work lists */
78283 +
78284 +    unsigned char       State;                                 /* node connection state */
78285 +    unsigned char      NetworkErrorState;                      /* reasons for keeping the context filter up */
78286 +    unsigned char      MessageState;                           /* state of messages during passivate/relocate */
78287 +
78288 +    EP_XID             MsgXid;                                 /* neterr/flush transaction id */
78289 +    long               NextRunTime;                            /* time to drop context filter for destroyed dma packet, or to send next request */
78290 +    EP_NETERR_COOKIE   NetworkErrorCookies[2];                 /* identify cookie for destroyed atomic packet */
78291 +
78292 +    uint32_t           Cookie;                                 /* per-node network error cookie */
78293 +    spinlock_t         CookieLock;                             /* and spinlock for it. */
78294 +
78295 +    struct list_head    StalledDmas;                           /* list of stalled DMAs */
78296 +} EP_NODE_RAIL;
78297 +
78298 +#define EP_NODE_DISCONNECTED           0                       /* node is disconnected */
78299 +#define EP_NODE_CONNECTING             1                       /* awaiting connection */
78300 +#define EP_NODE_CONNECTED              2                       /* node is connected */
78301 +#define EP_NODE_LEAVING_CONNECTED      3                       /* node is starting to disconnect */
78302 +#define EP_NODE_LOCAL_PASSIVATE        4                       /* flushing context filter/run queues */
78303 +#define EP_NODE_REMOTE_PASSIVATE       5                       /* stalling for neterr flush */
78304 +#define EP_NODE_PASSIVATED             6                       /* relocating active/passive messages */
78305 +#define EP_NODE_DISCONNECTING          7                       /* entering disconncted - abort remaining comms */
78306 +#define EP_NODE_NUM_STATES             8
78307 +
78308 +#define EP_NODE_NETERR_ATOMIC_PACKET   (1 << 0)
78309 +#define EP_NODE_NETERR_DMA_PACKET      (1 << 1)
78310 +
78311 +#define EP_NODE_PASSIVE_MESSAGES       (1 << 0)
78312 +#define EP_NODE_ACTIVE_MESSAGES                (1 << 1)
78313 +
78314 +/*
78315 + * Kernel thread code is loaded as a table.
78316 + */
78317 +typedef struct ep_symbol
78318 +{
78319 +    char   *name;
78320 +    EP_ADDR value;
78321 +} EP_SYMBOL;
78322 +
78323 +typedef struct ep_code
78324 +{
78325 +    u_char        *text;
78326 +    u_int         text_size;
78327 +    u_char        *data;
78328 +    u_int         data_size;
78329 +    u_char        *rodata;
78330 +    u_int         rodata_size;
78331 +    EP_SYMBOL     *symbols;
78332 +    
78333 +    int                   ntext;
78334 +    sdramaddr_t    pptext;
78335 +    EP_ADDR       etext;
78336 +    sdramaddr_t   _stext;
78337 +    sdramaddr_t          _rodata;
78338 +
78339 +    int                   ndata;
78340 +    sdramaddr_t    ppdata;
78341 +    EP_ADDR       edata;
78342 +    sdramaddr_t   _sdata;
78343 +} EP_CODE;
78344 +
78345 +typedef struct ep_switchstate
78346 +{
78347 +    unsigned char       linkid;
78348 +    unsigned char       LNR;
78349 +    unsigned char       bcast;
78350 +    unsigned char       uplink;
78351 +} EP_SWITCHSTATE;
78352 +
78353 +typedef struct ep_rail_ops
78354 +{
78355 +    void       (*DestroyRail) (EP_RAIL *rail);
78356 +
78357 +    int        (*StartRail) (EP_RAIL *rail);
78358 +    void       (*StallRail) (EP_RAIL *rail);
78359 +    void       (*StopRail) (EP_RAIL *rail);
78360 +
78361 +    sdramaddr_t (*SdramAlloc) (EP_RAIL *rail, EP_ADDR addr, unsigned size);
78362 +    void        (*SdramFree) (EP_RAIL *rail, sdramaddr_t addr, unsigned size);
78363 +    void        (*SdramWriteb) (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
78364 +    
78365 +    void       (*KaddrMap) (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr);
78366 +    void       (*SdramMap) (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr);
78367 +    void       (*Unmap) (EP_RAIL *rail, EP_ADDR eaddr, unsigned len);
78368 +
78369 +    void       *(*DvmaReserve) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages);
78370 +    void       (*DvmaRelease) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages, void *private);
78371 +    void       (*DvmaSetPte) (EP_RAIL *rail, void *private, unsigned index, physaddr_t phys, unsigned int perm);
78372 +    physaddr_t (*DvmaReadPte) (EP_RAIL *rail, void *private, unsigned index);
78373 +    void       (*DvmaUnload)(EP_RAIL *rail, void *private, unsigned index, unsigned npages);
78374 +    void       (*FlushTlb) (EP_RAIL *rail);
78375 +
78376 +    int        (*ProbeRoute) (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, 
78377 +                              int *linkdown, int attempts, EP_SWITCH *lsw);
78378 +    void       (*PositionFound) (EP_RAIL *rail, ELAN_POSITION *pos);
78379 +    int                (*CheckPosition) (EP_RAIL *rail);
78380 +    void       (*NeterrFixup) (EP_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
78381 +
78382 +    void       (*LoadSystemRoute) (EP_RAIL *rail, unsigned int vp, unsigned int lowNode, unsigned int highNode);
78383 +
78384 +    void       (*LoadNodeRoute) (EP_RAIL *rail, unsigned nodeId);
78385 +    void       (*UnloadNodeRoute) (EP_RAIL *rail, unsigned nodeId);
78386 +    void       (*LowerFilter) (EP_RAIL *rail, unsigned nodeId);
78387 +    void       (*RaiseFilter) (EP_RAIL *rail, unsigned nodeId);
78388 +    void       (*NodeDisconnected) (EP_RAIL *rail, unsigned nodeId);
78389 +
78390 +    void       (*FlushFilters) (EP_RAIL *rail);
78391 +    void       (*FlushQueues) (EP_RAIL *rail);
78392 +
78393 +
78394 +    EP_INPUTQ  *(*AllocInputQ) (EP_RAIL *rail, unsigned qnum, unsigned slotSize, unsigned slotCount,
78395 +                               void (*callback)(EP_RAIL *rail, void *arg), void *arg);
78396 +    void       (*FreeInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
78397 +    void       (*EnableInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
78398 +    void       (*DisableInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
78399 +    int                (*PollInputQ) (EP_RAIL *rail, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
78400 +
78401 +    EP_OUTPUTQ *(*AllocOutputQ) (EP_RAIL *rail, unsigned slotSize, unsigned slotCount);
78402 +    void       (*FreeOutputQ) (EP_RAIL *rail, EP_OUTPUTQ *outputq);
78403 +    void       *(*OutputQMsg) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum);
78404 +    int         (*OutputQState) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum);
78405 +    int                (*OutputQSend) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum, unsigned size,
78406 +                               unsigned vp, unsigned qnum, unsigned retries);
78407 +
78408 +    void        (*FillOutStats) (EP_RAIL *rail, char *str);
78409 +    void       (*Debug) (EP_RAIL *rail);
78410 +
78411 +} EP_RAIL_OPS;
78412 +
78413 +#define ep_alloc_inputq(rail,qnum,slotSize,slotCount,callback,arg) \
78414 +       (rail)->Operations.AllocInputQ(rail,qnum,slotSize,slotCount,callback,arg)
78415 +#define ep_free_inputq(rail,inputq) \
78416 +       (rail)->Operations.FreeInputQ(rail,inputq)
78417 +#define ep_enable_inputq(rail,inputq) \
78418 +       (rail)->Operations.EnableInputQ(rail,inputq)
78419 +#define ep_disable_inputq(rail,inputq) \
78420 +       (rail)->Operations.DisableInputQ(rail,inputq)
78421 +#define ep_poll_inputq(rail,inputq,maxCount,handler,arg) \
78422 +       (rail)->Operations.PollInputQ(rail,inputq,maxCount,handler,arg)
78423 +#define ep_alloc_outputq(rail,slotSize,slotCount)\
78424 +       (rail)->Operations.AllocOutputQ(rail,slotSize,slotCount)
78425 +#define ep_free_outputq(rail,outputq)\
78426 +       (rail)->Operations.FreeOutputQ(rail,outputq)
78427 +#define ep_outputq_msg(rail,outputq,slotNum)\
78428 +       (rail)->Operations.OutputQMsg(rail,outputq,slotNum)
78429 +#define ep_outputq_state(rail,outputq,slotNum)\
78430 +       (rail)->Operations.OutputQState(rail,outputq,slotNum)
78431 +#define ep_outputq_send(rail,outputq,slotNum,size,vp,qnum,retries)\
78432 +       (rail)->Operations.OutputQSend(rail,outputq,slotNum,size,vp,qnum,retries)
78433 +
78434 +struct ep_rail
78435 +{
78436 +    EP_SYS            *System;                                 /* "system" we've attached to */
78437 +
78438 +    unsigned char      Number;                                 /* Rail number */
78439 +    unsigned char       State;                                 /* Rail state */
78440 +    char               Name[32];                               /* Rail name */
78441 +
78442 +    struct list_head    ManagerLink;                           /* linked on ManagedRails list */
78443 +
78444 +    ELAN_DEVINFO       Devinfo;                                /* Device information for this rail */
78445 +    ELAN_POSITION       Position;                              /* Position on switch device is connected to */
78446 +
78447 +    EP_RAIL_OPS                Operations;                             /* device specific operations */
78448 +    EP_RAIL_STATS      Stats;                                  /* statistics */
78449 +
78450 +    EP_ALLOC            ElanAllocator;                         /* per-rail elan memory allocator */
78451 +    EP_ALLOC            MainAllocator;                         /* per-rail main memory allocator */
78452 +
78453 +    unsigned           TlbFlushRequired;                       /* lazy TLB flushing */
78454 +
78455 +    int                SwitchBroadcastLevel;                   /* current switch level ok for broadcast */
78456 +    unsigned long       SwitchBroadcastLevelTick;
78457 +
78458 +    int                        SwitchProbeLevel;                       /* result of last switch probe */
78459 +    EP_SWITCHSTATE      SwitchState[ELAN_MAX_LEVELS];
78460 +    EP_SWITCHSTATE      SwitchLast[ELAN_MAX_LEVELS];
78461 +    unsigned long       SwitchProbeTick[ELAN_MAX_LEVELS];
78462 +    
78463 +    /* Node disconnecting/connecting state */
78464 +    EP_CALLBACK        *CallbackList[EP_CB_COUNT];             /* List of callbacks */
78465 +    kmutex_t           CallbackLock;                           /*   and lock for it. */
78466 +    unsigned           CallbackStep;                           /*  step through UpdateConnectionState. */
78467 +
78468 +    /* back pointer for cluster membership */
78469 +    void              *ClusterRail;
78470 +
78471 +    /* Per node state for message passing */
78472 +    EP_NODE_RAIL       *Nodes;                                 /* array of per-node state */
78473 +    statemap_t         *NodeSet;                               /* per-rail statemap of connected nodes */
78474 +    statemap_t        *NodeChangeMap;                          /* statemap of nodes to being connected/disconnected */
78475 +    statemap_t        *NodeChangeTmp;                          /*   and temporary copies */
78476 +
78477 +    struct list_head    NetworkErrorList;                      /* list of nodes resolving network errors */
78478 +    struct list_head    LocalPassivateList;                    /* list of nodes in state LOCAL_PASSIVATE */
78479 +    struct list_head    RemotePassivateList;                   /* list of nodes waiting for remote network error flush */
78480 +    struct list_head    PassivatedList;                                /* list of nodes performing message relocation */
78481 +    struct list_head    DisconnectingList;                     /* list of nodes transitioning to disconnected */
78482 +
78483 +    EP_XID_CACHE       XidCache;                               /* XID cache for node messages (single threaded access) */
78484 +
78485 +    /* Manager messages */
78486 +    EP_INPUTQ         *ManagerInputQ;
78487 +    EP_OUTPUTQ        *ManagerOutputQ;
78488 +    unsigned           ManagerOutputQNextSlot;
78489 +    spinlock_t         ManagerOutputQLock;
78490 +
78491 +    /* /proc entries */
78492 +    struct proc_dir_entry *ProcDir;
78493 +    struct proc_dir_entry *SvcIndicatorDir;
78494 +    int                    CallbackRegistered;
78495 +};
78496 +
78497 +/* values for State */
78498 +#define EP_RAIL_STATE_UNINITIALISED    0                       /* device uninitialised */
78499 +#define EP_RAIL_STATE_STARTED          1                       /* device started but network position unknown */
78500 +#define EP_RAIL_STATE_RUNNING          2                       /* device started and position known */
78501 +#define EP_RAIL_STATE_INCOMPATIBLE     3                       /* device started, but position incompatible */
78502 +
78503 +typedef struct ep_rail_entry
78504 +{
78505 +    struct list_head   Link;
78506 +    EP_RAIL           *Rail;
78507 +} EP_RAIL_ENTRY;
78508 +
78509 +typedef struct ep_subsys
78510 +{
78511 +    EP_SYS            *Sys;
78512 +
78513 +    struct list_head   Link;                                   /* Linked on sys->Subsystems */
78514 +    char              *Name;                                   /* Name to lookup */
78515 +    
78516 +    void              (*Destroy)    (struct ep_subsys *subsys, EP_SYS *sys);
78517 +
78518 +    int                       (*AddRail)    (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail);
78519 +    void              (*RemoveRail) (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail);
78520 +} EP_SUBSYS;
78521 +
78522 +typedef struct ep_node
78523 +{
78524 +    EP_RAILMASK                ConnectedRails;
78525 +} EP_NODE;
78526 +
78527 +struct ep_sys
78528 +{
78529 +    EP_RAIL         *Rails[EP_MAX_RAILS];                      /* array of all available devices */
78530 +
78531 +    kmutex_t        StartStopLock;                             /* lock for starting stopping rails */
78532 +
78533 +    ELAN_POSITION    Position;                                 /* primary node position */
78534 +
78535 +    EP_NMH_TABLE     MappingTable;                             /* Network mapping handle table */
78536 +
78537 +    EP_ALLOC        Allocator;                                 /* shared main memory allocator */
78538 +
78539 +    EP_DVMA_STATE    DvmaState;                                        /* dvma state */
78540 +
78541 +    kmutex_t        SubsysLock;                               /* lock on the Subsytems list */
78542 +    struct list_head Subsystems;                               /* list of subsystems */
78543 +
78544 +    /* device manager state */
78545 +    struct list_head ManagedRails;                             /* list of managed devices */
78546 +    EP_KTHREAD       ManagerThread;                            /* place for manager thread to sleep */
78547 +
78548 +    /* global node state */
78549 +    spinlock_t      NodeLock;                                  /* spinlock for node state (including per-device node state) */
78550 +    EP_NODE        *Nodes;                                     /* system wide node state */
78551 +    statemap_t      *NodeSet;                                  /* system wide nodeset */
78552 +    struct list_head NodesetCallbackList;                      /* list of "callbacks" */
78553 +
78554 +    /* Transaction Id */
78555 +    struct list_head XidCacheList;                             /* list of XID caches */
78556 +    uint32_t        XidGeneration;                             /* XID generation number (distinguishes reboots) */
78557 +    uint32_t        XidHandle;                                 /* XID handles (distinguishes XID caches) */
78558 +    uint64_t        XidNext;                                   /* next XID to prime cache */
78559 +    spinlock_t      XidLock;                                   /*   and it's spinlock  */
78560 +
78561 +    /* Shutdown/Panic */
78562 +    unsigned int     Shutdown;                                 /* node has shutdown/panic'd */
78563 +};
78564 +
78565 +#if defined(DEBUG_ASSERT)
78566 +extern int ep_assfail (EP_RAIL *rail, const char *string, const char *func, const char *file, const int line);
78567 +extern int sdram_assert;
78568 +extern int assfail_mode;
78569 +
78570 +#define EP_ASSERT(rail, EX)    do { \
78571 +    if (!(EX) && ep_assfail ((EP_RAIL *) (rail), #EX, __FUNCTION__, __FILE__, __LINE__)) { \
78572 +       BUG(); \
78573 +    } \
78574 +} while (0)
78575 +#define EP_ASSFAIL(rail,EX)    do { \
78576 +   if (ep_assfail ((EP_RAIL *) (rail), EX, __FUNCTION__, __FILE__, __LINE__)) { \
78577 +       BUG(); \
78578 +    } \
78579 +} while (0)
78580 +#define SDRAM_ASSERT(EX)       (sdram_assert ? (EX) : 1)
78581 +#else
78582 +#define EP_ASSERT(rail, EX)    ((void) 0)
78583 +#define EP_ASSFAIL(rail,str)   ((void) 0)
78584 +#define SDRAM_ASSERT(EX)       (1)
78585 +#endif
78586 +
78587 +/* conf_osdep.c */
78588 +extern EP_SYS    *ep_system(void);
78589 +extern void       ep_mod_dec_usecount (void);
78590 +extern void       ep_mod_inc_usecount (void);
78591 +
78592 +/* procfs_osdep.c */
78593 +extern struct proc_dir_entry *ep_procfs_root;
78594 +extern struct proc_dir_entry *ep_config_root;
78595 +
78596 +/* kcomm.c */
78597 +extern int        ep_sys_init (EP_SYS *sys);
78598 +extern void       ep_sys_fini (EP_SYS *sys);
78599 +extern void      ep_shutdown (EP_SYS *sys);
78600 +extern int        ep_init_rail (EP_SYS *sys, EP_RAIL *rail);
78601 +extern void       ep_destroy_rail (EP_RAIL *rail);
78602 +extern int        ep_start_rail (EP_RAIL *rail);
78603 +extern void       ep_stop_rail (EP_RAIL *rail);
78604 +
78605 +extern void       ep_connect_node (EP_RAIL *rail, int nodeId);
78606 +extern int        ep_disconnect_node (EP_RAIL *rail, int nodeId);
78607 +
78608 +extern EP_XID     ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache);
78609 +extern void       ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache);
78610 +extern void       ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache);
78611 +
78612 +extern int        ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body);
78613 +
78614 +extern void       ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason);
78615 +
78616 +extern void      ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys);
78617 +extern void      ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys);
78618 +extern EP_SUBSYS *ep_subsys_find (EP_SYS *sys, char *name);
78619 +
78620 +extern void       DisplayNodes (EP_RAIL *rail);
78621 +
78622 +extern void       ep_fillout_stats(EP_RAIL *rail, char *str);
78623 +
78624 +/* neterr.c */
78625 +extern void       ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie);
78626 +
78627 +/* kcomm_elan3.c */
78628 +extern unsigned int ep3_create_rails (EP_SYS *sys, unsigned int disabled);
78629 +
78630 +/* kcomm_elan4.c */
78631 +extern unsigned int ep4_create_rails (EP_SYS *sys, unsigned int disabled);
78632 +
78633 +/* probenetwork.c */
78634 +extern int       ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos);
78635 +extern void      CheckPosition (EP_RAIL *rail);
78636 +
78637 +extern uint16_t   CheckSum (char *msg, int nob);
78638 +
78639 +/* threadcode.c */
78640 +extern EP_ADDR    ep_symbol (EP_CODE *code, char *name);
78641 +extern int        ep_loadcode (EP_RAIL *rail, EP_CODE *code);
78642 +extern void       ep_unloadcode (EP_RAIL *rail, EP_CODE *code);
78643 +
78644 +/* Public interface */
78645 +/* debug.c */
78646 +extern int              ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int count, int off);
78647 +extern void             ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits);
78648 +
78649 +/* epcomms.c */
78650 +extern int              ep_waitfor_nodeid (EP_SYS *sys);
78651 +extern int              ep_nodeid (EP_SYS *sys);
78652 +extern int              ep_numnodes (EP_SYS *sys);
78653 +
78654 +/* railhints.c */
78655 +extern int              ep_pickRail(EP_RAILMASK railmask);
78656 +
78657 +/* support.c */
78658 +extern int              ep_register_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg);
78659 +extern void             ep_remove_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg);
78660 +extern void             ep_call_nodeset_callbacks (EP_SYS *sys, statemap_t *map);
78661 +
78662 +extern int              ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg);
78663 +extern void             ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg);
78664 +extern void             ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *);
78665 +extern unsigned int     ep_backoff (EP_BACKOFF *backoff, int type);
78666 +
78667 +#endif /* !__ELAN__ */
78668 +
78669 +typedef struct display_info {
78670 +    void (*func)(long, char *, ...);
78671 +    long arg;
78672 +} DisplayInfo;
78673 +
78674 +extern DisplayInfo di_ep_debug;
78675 +
78676 +
78677 +#endif /* __ELAN_KCOMM_H */
78678 +
78679 +/*
78680 + * Local variables:
78681 + * c-file-style: "stroustrup"
78682 + * End:
78683 + */
78684 Index: linux-2.6.5-7.191/include/elan/kcomm_stats.h
78685 ===================================================================
78686 --- linux-2.6.5-7.191.orig/include/elan/kcomm_stats.h   2004-02-23 16:02:56.000000000 -0500
78687 +++ linux-2.6.5-7.191/include/elan/kcomm_stats.h        2005-07-28 14:52:52.937665208 -0400
78688 @@ -0,0 +1,153 @@
78689 +/*
78690 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78691 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78692 + *
78693 + *    For licensing information please see the supplied COPYING file
78694 + *
78695 + */
78696 +
78697 +#ifndef __EP_EPSTATS_H
78698 +#define __EP_EPSTATS_H
78699 +
78700 +#ident "$Id: kcomm_stats.h,v 1.4.8.1 2004/11/12 10:54:51 mike Exp $"
78701 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_stats.h,v $ */
78702 +
78703 +#define EP_BUCKET_SLOTS                        8
78704 +
78705 +#define BucketStat(obj,stat,size)      ((size) < 128   ? (obj)->Stats.stat[0]++ : \
78706 +                                        (size) < 512   ? (obj)->Stats.stat[1]++ : \
78707 +                                        (size) < 1024  ? (obj)->Stats.stat[2]++ : \
78708 +                                        (size) < 8192  ? (obj)->Stats.stat[3]++ : \
78709 +                                        (size) < 16384 ? (obj)->Stats.stat[4]++ : \
78710 +                                        (size) < 32768 ? (obj)->Stats.stat[5]++ : \
78711 +                                        (size) < 65536 ? (obj)->Stats.stat[6]++ : \
78712 +                                        (obj)->Stats.stat[7]++)
78713 +#define IncrStat(obj,stat)             ((obj)->Stats.stat++)
78714 +
78715 +
78716 +#define EP3_NUM_DMA_FAIL               11      /* NOTE - the same as EP_NUM_RETRIES */
78717 +
78718 +#define ADD_STAT(STATS,STAT,VALUE) { unsigned long now = lbolt;\
78719 +   STATS.STAT.total += VALUE; \
78720 +   if  ( ( now - STATS.STAT.last_time ) > HZ ) { \
78721 + STATS.STAT.last_per_sec = ( STATS.STAT.total - STATS.STAT.last_count)/ ( (( now - STATS.STAT.last_time ) + (HZ/2)) / HZ);\
78722 + STATS.STAT.last_time = now; \
78723 + STATS.STAT.last_count = STATS.STAT.total; \
78724 +   }} \
78725 +
78726 +#define INC_STAT(STATS,STAT) ADD_STAT(STATS,STAT,1)
78727 +
78728 +#define GET_STAT_PER_SEC(STATS, STAT) (  (( lbolt - STATS.STAT.last_time ) < (HZ * 5)) ? STATS.STAT.last_per_sec : 0 )
78729 +#define GET_STAT_TOTAL(STATS, STAT) ( STATS.STAT.total )
78730 +
78731 +struct ep_stats_count 
78732 +{
78733 +    unsigned long total;
78734 +    unsigned long last_time;
78735 +    unsigned long last_count;
78736 +    unsigned long last_per_sec;
78737 +};
78738 +
78739 +typedef struct ep_stats_count          EP_STATS_COUNT;
78740 +
78741 +typedef struct ep3_rail_stats
78742 +{
78743 +    unsigned long      IssueDmaFail[EP3_NUM_DMA_FAIL];
78744 +
78745 +    unsigned long      DmaQueueLength[EP_BUCKET_SLOTS];
78746 +    unsigned long      CprocDmaQueueOverflow;
78747 +    unsigned long      DprocDmaQueueOverflow;
78748 +    unsigned long      IprocDmaQueueOverflow;
78749 +    unsigned long      CprocEventQueueOverflow;
78750 +    unsigned long      DprocEventQueueOverflow;
78751 +    unsigned long      IprocEventQueueOverflow;
78752 +
78753 +    unsigned long      QueueingPacketTrap;
78754 +    unsigned long      DmaIdentifyTrap;
78755 +    unsigned long      ThreadIdentifyTrap;
78756 +    unsigned long      DmaPacketTrap;
78757 +} EP3_RAIL_STATS;
78758 +
78759 +typedef struct ep4_rail_stats
78760 +{
78761 +    unsigned long       somestatsgohere;
78762 +} EP4_RAIL_STATS;
78763 +
78764 +typedef struct ep_rail_stats
78765 +{
78766 +    unsigned long      SendMessageFailed;
78767 +    unsigned long      NeterrAtomicPacket;
78768 +    unsigned long       NeterrDmaPacket;
78769 +
78770 +    EP_STATS_COUNT      rx;
78771 +    EP_STATS_COUNT      rx_len;
78772 +
78773 +    EP_STATS_COUNT      tx;
78774 +    EP_STATS_COUNT      tx_len;
78775 +
78776 +} EP_RAIL_STATS;
78777 +
78778 +typedef struct ep_cm_rail_stats
78779 +{
78780 +    /* cluster membership statistics */
78781 +    unsigned long      HeartbeatsSent;
78782 +    unsigned long      HeartbeatsRcvd;
78783 +    
78784 +    unsigned long      RetryHeartbeat;
78785 +    unsigned long      RejoinRequest;
78786 +    unsigned long      RejoinTooSlow;
78787 +    unsigned long      LaunchMessageFail;
78788 +    unsigned long      MapChangesSent;
78789 +
78790 +    /* Heartbeat scheduling stats */
78791 +    unsigned long      HeartbeatOverdue;
78792 +} EP_CM_RAIL_STATS;
78793 +
78794 +typedef struct ep_comms_rail_stats
78795 +{
78796 +    /* kernel comms large message statistics */
78797 +    unsigned long      TxEnveEvent;
78798 +    unsigned long      TxDataEvent;
78799 +    unsigned long      TxDoneEvent;
78800 +    unsigned long      RxDoneEvent;
78801 +    unsigned long      MulticastTxDone;
78802 +    unsigned long      QueueReceive;
78803 +
78804 +    unsigned long      TxEnveRetry;
78805 +    unsigned long      TxDataRetry;
78806 +    unsigned long      TxDoneRetry;
78807 +    unsigned long      RxThrdEvent;
78808 +    unsigned long      RxDataRetry;
78809 +    unsigned long      RxDoneRetry;
78810 +    unsigned long      StallThread;
78811 +    unsigned long      ThrdWaiting;
78812 +    unsigned long      CompleteEnvelope;
78813 +
78814 +    unsigned long      NoFreeTxds;
78815 +    unsigned long      NoFreeRxds;
78816 +
78817 +    unsigned long      LockRcvrTrapped;
78818 +} EP_COMMS_RAIL_STATS;
78819 +
78820 +typedef struct ep_comms_stats
78821 +{
78822 +    unsigned long      DataXmit[8];
78823 +    unsigned long      McastXmit[8];
78824 +    unsigned long      RPCXmit[8];
78825 +    unsigned long      RPCPut[8];
78826 +    unsigned long      RPCGet[8];
78827 +    unsigned long      CompleteRPC[8];
78828 +    unsigned long      RxData[8];
78829 +    unsigned long      RxMcast[8];
78830 +
78831 +    unsigned long      NoFreeTxds;
78832 +    unsigned long      NoFreeRxds;
78833 +} EP_COMMS_STATS;
78834 +
78835 +#endif /* __EP_EPSTATS_H */
78836 +
78837 +/*
78838 + * Local variables:
78839 + * c-file-style: "stroustrup"
78840 + * End:
78841 + */
78842 Index: linux-2.6.5-7.191/include/elan/kmap.h
78843 ===================================================================
78844 --- linux-2.6.5-7.191.orig/include/elan/kmap.h  2004-02-23 16:02:56.000000000 -0500
78845 +++ linux-2.6.5-7.191/include/elan/kmap.h       2005-07-28 14:52:52.938665056 -0400
78846 @@ -0,0 +1,68 @@
78847 +/*
78848 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78849 + *
78850 + *    For licensing information please see the supplied COPYING file
78851 + *
78852 + */
78853 +
78854 +#ifndef __ELAN_KMAP_H
78855 +#define __ELAN_KMAP_H
78856 +
78857 +#ident "$Id: kmap.h,v 1.3.8.1 2004/12/14 10:19:14 mike Exp $"
78858 +/*      $Source: /cvs/master/quadrics/epmod/kmap.h,v $ */
78859 +
78860 +#include <elan/rmap.h>
78861 +
78862 +extern void ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t vaddr, unsigned long len, unsigned int perm, int ep_attr);
78863 +extern void ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr);
78864 +extern void ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len);
78865 +extern void ep_perrail_dvma_sync (EP_RAIL *rail);
78866 +
78867 +typedef struct ep_dvma_nmh
78868 +{
78869 +    EP_NMH             dvma_nmh;
78870 +    
78871 +    struct list_head   dvma_link;                              /* chained on ep_dvma_state */
78872 +    unsigned           dvma_perm;                              /* permissions for region */
78873 +
78874 +    spinlock_t         dvma_lock;
78875 +    EP_RAILMASK                dvma_railmask;                          /* bitmap of rails */
78876 +    EP_RAIL           *dvma_rails[EP_MAX_RAILS];               /* assoicated rails */
78877 +    void              *dvma_private[EP_MAX_RAILS];             /* pointers to rail private data */
78878 +    unsigned int        dvma_attrs[1];                         /* bitmap of which rails pages are loaded NOTE - max 32 rails */
78879 +} EP_DVMA_NMH;
78880 +
78881 +/* values for dvma_perm */
78882 +#define EP_PERM_EXECUTE                0
78883 +#define EP_PERM_READ           1
78884 +#define EP_PERM_WRITE          2
78885 +#define EP_PERM_ALL            3
78886 +
78887 +typedef struct ep_dvma_state
78888 +{
78889 +    kmutex_t           dvma_lock;
78890 +    struct list_head    dvma_handles;
78891 +    struct list_head    dvma_rails;
78892 +    EP_RMAP           *dvma_rmap;
78893 +} EP_DVMA_STATE;
78894 +
78895 +extern void    ep_dvma_init (EP_SYS *sys);
78896 +extern void    ep_dvma_fini (EP_SYS *sys);
78897 +extern EP_NMH *ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm);
78898 +extern void    ep_dvma_release (EP_SYS *sys, EP_NMH *nmh);
78899 +extern void    ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, 
78900 +                            EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset);
78901 +extern void    ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd);
78902 +  
78903 +extern void    ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail);
78904 +extern int     ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail);
78905 +
78906 +extern uint16_t rolling_check_sum (char *msg, int nob, uint16_t sum);
78907 +
78908 +#endif /* __ELAN_KMAP_H */
78909 +
78910 +/*
78911 + * Local variables:
78912 + * c-file-style: "stroustrup"
78913 + * End:
78914 + */
78915 Index: linux-2.6.5-7.191/include/elan/kmsg.h
78916 ===================================================================
78917 --- linux-2.6.5-7.191.orig/include/elan/kmsg.h  2004-02-23 16:02:56.000000000 -0500
78918 +++ linux-2.6.5-7.191/include/elan/kmsg.h       2005-07-28 14:52:52.938665056 -0400
78919 @@ -0,0 +1,14 @@
78920 +/*
78921 + *    Copyright (c) 2003 by Quadrics Ltd.
78922 + *
78923 + *    For licensing information please see the supplied COPYING file
78924 + *
78925 + */
78926 +
78927 +#ifndef __ELAN_KMSG_H
78928 +#define __ELAN_KMSG_H
78929 +
78930 +#ident "@(#)$Id: kmsg.h,v 1.1 2003/09/23 13:55:12 david Exp $"
78931 +/*      $Source: /cvs/master/quadrics/epmod/kmsg.h,v $ */
78932 +
78933 +#endif /* __ELAN_KMSG_H */
78934 Index: linux-2.6.5-7.191/include/elan/kthread.h
78935 ===================================================================
78936 --- linux-2.6.5-7.191.orig/include/elan/kthread.h       2004-02-23 16:02:56.000000000 -0500
78937 +++ linux-2.6.5-7.191/include/elan/kthread.h    2005-07-28 14:52:52.938665056 -0400
78938 @@ -0,0 +1,53 @@
78939 +/*
78940 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78941 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78942 + *
78943 + *    For licensing information please see the supplied COPYING file
78944 + *
78945 + */
78946 +
78947 +#ifndef __ELAN3_KTHREAD_H
78948 +#define __ELAN3_KTHREAD_H
78949 +
78950 +#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
78951 +/*      $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/
78952 +
78953 +typedef struct ep_kthread
78954 +{
78955 +       kcondvar_t      wait;                                   /* place to sleep */
78956 +       spinlock_t      lock;                                   /* and lock */
78957 +       long            next_run;                               /* tick when thread should next run */
78958 +       long            running;                                /* tick when thread started to run */
78959 +       unsigned short  should_stall;
78960 +       unsigned char   state;
78961 +       unsigned int    started:1;
78962 +       unsigned int    should_stop:1;
78963 +       unsigned int    stopped:1;
78964 +} EP_KTHREAD;
78965 +
78966 +#define KT_STATE_SLEEPING              0
78967 +#define KT_STATE_SCHEDULED             1
78968 +#define KT_STATE_RUNNING               2
78969 +#define KT_STATE_STALLED               3
78970 +
78971 +#define AFTER(a, b)                    ((((long)(a)) - ((long)(b))) > 0)
78972 +#define BEFORE(a,b)                    ((((long)(a)) - ((long)(b))) < 0)
78973 +
78974 +extern void ep_kthread_init (EP_KTHREAD *kt);
78975 +extern void ep_kthread_destroy (EP_KTHREAD *kt);
78976 +extern void ep_kthread_started (EP_KTHREAD *kt);
78977 +extern void ep_kthread_stopped (EP_KTHREAD *kt);
78978 +extern int  ep_kthread_should_stall (EP_KTHREAD *kth);
78979 +extern int  ep_kthread_sleep (EP_KTHREAD *kth, long next_run);
78980 +extern void ep_kthread_schedule (EP_KTHREAD *kt, long when);
78981 +extern void ep_kthread_stall (EP_KTHREAD *kth);
78982 +extern void ep_kthread_resume (EP_KTHREAD *kt);
78983 +extern void ep_kthread_stop (EP_KTHREAD *kt);
78984 +extern int  ep_kthread_state (EP_KTHREAD *kt, long *time);
78985 +#endif /* __ELAN3_KTHREAD_H */
78986 +
78987 +/*
78988 + * Local variables:
78989 + * c-file-style: "linux"
78990 + * End:
78991 + */
78992 Index: linux-2.6.5-7.191/include/elan/nmh.h
78993 ===================================================================
78994 --- linux-2.6.5-7.191.orig/include/elan/nmh.h   2004-02-23 16:02:56.000000000 -0500
78995 +++ linux-2.6.5-7.191/include/elan/nmh.h        2005-07-28 14:52:52.939664904 -0400
78996 @@ -0,0 +1,95 @@
78997 +/*
78998 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78999 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
79000 + *
79001 + *    For licensing information please see the supplied COPYING file
79002 + *
79003 + */
79004 +
79005 +#ifndef __ELAN3_NMH_H
79006 +#define __ELAN3_NMH_H
79007 +
79008 +#ident "@(#)$Id: nmh.h,v 1.7 2004/01/06 10:29:55 david Exp $"
79009 +/*      $Source: /cvs/master/quadrics/epmod/nmh.h,v $*/
79010 +
79011 +
79012 +/* Forward declarations */
79013 +typedef struct ep_nmd          EP_NMD;
79014 +typedef struct ep_nmh_ops      EP_NMH_OPS;
79015 +typedef struct ep_nmh          EP_NMH;
79016 +
79017 +/* Railmask held in 16 bit field (packs with nodeId into NMD */
79018 +typedef uint16_t               EP_RAILMASK;
79019 +
79020 +#define EP_RAIL2RAILMASK(rnum) (1 << (rnum))
79021 +#define EP_RAILMASK_ALL                0xffff
79022 +
79023 +/* kernel comms elan network address */
79024 +typedef uint32_t               EP_ADDR;
79025 +
79026 +/* network mapping descriptor - this is returned to the user from a map operation,
79027 + * and is what is passed to all communication functions */
79028 +struct ep_nmd
79029 +{
79030 +    EP_ADDR    nmd_addr;                                       /* base address */
79031 +    uint32_t   nmd_len;                                        /* size in bytes */
79032 +    uint32_t   nmd_attr;                                       /* nodeid << 16 | railmask */
79033 +};
79034 +
79035 +#define EP_NMD_ATTR(nodeid,railmask)   (((nodeid) << 16) | (railmask))
79036 +#define EP_NMD_NODEID(nmd)             ((nmd)->nmd_attr >> 16)
79037 +#define EP_NMD_RAILMASK(nmd)           ((nmd)->nmd_attr & EP_RAILMASK_ALL)
79038 +
79039 +#if !defined(__ELAN__)
79040 +
79041 +struct ep_nmh_ops
79042 +{
79043 +    int           (*op_map_rails) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask);   /* add mappings to different rail(s) */
79044 +
79045 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
79046 +    uint16_t (*op_calc_check_sum) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum); /* calculates check sum              */
79047 +#endif
79048 +};
79049 +
79050 +struct ep_nmh
79051 +{
79052 +    EP_NMD          nmh_nmd;                                   /* public field */
79053 +    struct list_head nmh_link;                                 /* linked on hash table */
79054 +    EP_NMH_OPS     *nmh_ops;                                   /* operations to perform on object */
79055 +};
79056 +
79057 +#define EP_NMH_NUMHASH                 (32 - 11 + 1)           /* one hash table for each power of 2 above pagesize */
79058 +#define EP_NMH_HASHSIZE                        (64)                    /* max size of each hash table */
79059 +
79060 +typedef struct ep_nmh_table
79061 +{
79062 +    struct list_head *tbl_hash[EP_NMH_NUMHASH];
79063 +    unsigned         tbl_size[EP_NMH_NUMHASH];
79064 +} EP_NMH_TABLE;
79065 +
79066 +extern int         ep_nmh_init (EP_NMH_TABLE *tbl);
79067 +extern void        ep_nmh_fini (EP_NMH_TABLE *tbl);
79068 +
79069 +extern void        ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmd);
79070 +extern void        ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmd);
79071 +extern EP_NMH     *ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmh);
79072 +
79073 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
79074 +extern uint32_t    ep_nmd_calc_data_check_sum(EP_SYS *sys, EP_NMD *nmd, int nFrags);
79075 +#endif
79076 +
79077 +/* Public interface */
79078 +extern EP_RAILMASK ep_nmd2railmask (EP_NMD *frags, int nFrags);
79079 +extern void        ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len);
79080 +extern int        ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b);
79081 +extern int         ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask);
79082 +
79083 +#endif /* __ELAN__ */
79084 +
79085 +#endif /* __ELAN3_NMH_H */
79086 +
79087 +/*
79088 + * Local variables:
79089 + * c-file-style: "stroustrup"
79090 + * End:
79091 + */
79092 Index: linux-2.6.5-7.191/include/elan/rmap.h
79093 ===================================================================
79094 --- linux-2.6.5-7.191.orig/include/elan/rmap.h  2004-02-23 16:02:56.000000000 -0500
79095 +++ linux-2.6.5-7.191/include/elan/rmap.h       2005-07-28 14:52:52.939664904 -0400
79096 @@ -0,0 +1,49 @@
79097 +/*
79098 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79099 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
79100 + *
79101 + *    For licensing information please see the supplied COPYING file
79102 + *
79103 + */
79104 +
79105 +#ifndef __ELAN_RMAP_H
79106 +#define __ELAN_RMAP_H
79107 +
79108 +#ident "$Id: rmap.h,v 1.8 2004/05/19 10:24:40 david Exp $"
79109 +/*      $Source: /cvs/master/quadrics/epmod/rmap.h,v $ */
79110 +
79111 +
79112 +typedef struct ep_rmap_entry 
79113 +{
79114 +    size_t     m_size;
79115 +    u_long     m_addr;
79116 +} EP_RMAP_ENTRY;
79117 +
79118 +typedef struct ep_rmap 
79119 +{
79120 +    spinlock_t      m_lock;
79121 +    kcondvar_t       m_wait;
79122 +    u_int           m_size;
79123 +    u_int           m_free;
79124 +    u_int           m_want;
79125 +    char            *m_name;
79126 +    EP_RMAP_ENTRY    m_map[1];
79127 +} EP_RMAP;
79128 +
79129 +extern void         ep_display_rmap (EP_RMAP *map);
79130 +
79131 +extern void          ep_rmapinit (EP_RMAP *rmap, char *name, u_int mapsize);
79132 +extern unsigned long ep_rmalloc (EP_RMAP *rmap, size_t size, int cansleep);
79133 +extern unsigned long ep_rmalloc_constrained (EP_RMAP *mp, size_t size, unsigned long alo, unsigned long ahi, unsigned long align, int cansleep);
79134 +extern void          ep_rmfree (EP_RMAP *rmap, size_t size, unsigned long addr);
79135 +extern unsigned long ep_rmget (EP_RMAP *rmap, size_t size, unsigned long addr);
79136 +extern EP_RMAP      *ep_rmallocmap (size_t size, char *name, int cansleep);
79137 +extern void          ep_rmfreemap (EP_RMAP *map);
79138 +
79139 +#endif /* __ELAN3_RMAP_H */
79140 +
79141 +/*
79142 + * Local variables:
79143 + * c-file-style: "stroustrup"
79144 + * End:
79145 + */
79146 Index: linux-2.6.5-7.191/include/elan/statemap.h
79147 ===================================================================
79148 --- linux-2.6.5-7.191.orig/include/elan/statemap.h      2004-02-23 16:02:56.000000000 -0500
79149 +++ linux-2.6.5-7.191/include/elan/statemap.h   2005-07-28 14:52:52.939664904 -0400
79150 @@ -0,0 +1,52 @@
79151 +/*
79152 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79153 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
79154 + *
79155 + *    For licensing information please see the supplied COPYING file
79156 + *
79157 + */
79158 +
79159 +#ifndef __ELAN_STATEMAP_H
79160 +#define __ELAN_STATEMAP_H
79161 +
79162 +#ident "$Id: statemap.h,v 1.8 2003/10/07 13:22:38 david Exp $"
79163 +/*      $Source: /cvs/master/quadrics/epmod/statemap.h,v $ */
79164 +
79165 +#include <elan/bitmap.h>
79166 +
79167 +/******************************** global state bitmap stuff **********************************/
79168 +typedef struct
79169 +{
79170 +   unsigned int size;
79171 +   unsigned int nob;
79172 +   unsigned int changemap_nob;
79173 +   unsigned int bitmap_nob;
79174 +   bitmap_t    *changemap0;
79175 +   bitmap_t    *changemap1;
79176 +   bitmap_t    *changemap2;
79177 +   bitmap_t    *bitmap;
79178 +} statemap_t;
79179 +
79180 +extern bitmap_t              statemap_getseg (statemap_t *map, unsigned int offset);
79181 +extern void           statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg);
79182 +extern bitmap_t       statemap_getbits (statemap_t *map, unsigned int offset, int nbits);
79183 +extern void           statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits);
79184 +extern void           statemap_zero (statemap_t *map);
79185 +extern void           statemap_setmap (statemap_t *dst, statemap_t *src);
79186 +extern void           statemap_ormap (statemap_t *dst, statemap_t *src);
79187 +extern int           statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange);
79188 +extern int            statemap_changed (statemap_t *map);
79189 +extern void           statemap_reset (statemap_t *map);
79190 +extern void           statemap_copy (statemap_t *dst, statemap_t *src);
79191 +extern void           statemap_clearchanges (statemap_t *map);
79192 +extern bitmap_t      *statemap_tobitmap (statemap_t *map);
79193 +extern statemap_t    *statemap_create (int size);
79194 +extern void           statemap_destroy (statemap_t *map);
79195 +
79196 +#endif /* __ELAN_STATEMAP_H */
79197 +
79198 +/*
79199 + * Local variables:
79200 + * c-file-style: "stroustrup"
79201 + * End:
79202 + */
79203 Index: linux-2.6.5-7.191/include/elan/stats.h
79204 ===================================================================
79205 --- linux-2.6.5-7.191.orig/include/elan/stats.h 2004-02-23 16:02:56.000000000 -0500
79206 +++ linux-2.6.5-7.191/include/elan/stats.h      2005-07-28 14:52:52.939664904 -0400
79207 @@ -0,0 +1,85 @@
79208 +/*
79209 + *    Copyright (c) 2003 by Quadrics Limited.
79210 + * 
79211 + *    For licensing information please see the supplied COPYING file
79212 + *
79213 + */
79214 +
79215 +#ident "@(#)$Id: stats.h,v 1.5 2003/09/24 13:55:37 david Exp $"
79216 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/stats.h,v $*/
79217 +
79218 +#ifndef __ELAN_STATS_H
79219 +#define __ELAN_STATS_H
79220 +
79221 +
79222 +/* non-kernel headings */
79223 +#define ELAN_STATS_NAME_MAX_LEN ((uint)64)
79224 +typedef unsigned int ELAN_STATS_IDX;
79225 +
79226 +typedef struct elan_stats_map
79227 +{
79228 +       char entry_name[ELAN_STATS_NAME_MAX_LEN];
79229 +       int  index;
79230 +} ELAN_STATS_MAP;
79231 +
79232 +#if defined(__KERNEL__)
79233 +
79234 +/* stats callbacks */
79235 +#define ELAN_STATS_OPS_VERSION ((u_int)1)
79236 +typedef struct elan_stats_ops
79237 +{
79238 +       u_int  ops_version;
79239 +
79240 +       int (*elan_stats_get_name)    (void * arg, uint index,   caddr_t  name);
79241 +       int (*elan_stats_get_block)   (void * arg, uint entries, ulong   *values);
79242 +       int (*elan_stats_clear_block) (void * arg);
79243 +
79244 +} ELAN_STATS_OPS;
79245 +
79246 +typedef struct elan_stats_struct
79247 +{
79248 +       struct list_head   node;
79249 +
79250 +       ELAN_STATS_IDX     statidx;
79251 +       char               block_name[ELAN_STATS_NAME_MAX_LEN];
79252 +       uint               num_entries;
79253 +       ELAN_STATS_OPS *ops;
79254 +       void              *arg;
79255 +
79256 +} ELAN_STATS_STRUCT;
79257 +
79258 +/* stats.c */
79259 +extern int                   elan_stats_register   (ELAN_STATS_IDX    *statidx, 
79260 +                                                      char              *block_name, 
79261 +                                                      uint               num_entries,
79262 +                                                      ELAN_STATS_OPS *ops,
79263 +                                                      void              *arg);
79264 +
79265 +extern int                   elan_stats_deregister  (ELAN_STATS_IDX  statidx);
79266 +extern ELAN_STATS_STRUCT *elan_stats_find        (ELAN_STATS_IDX  statidx);
79267 +extern ELAN_STATS_STRUCT *elan_stats_find_by_name(caddr_t         block_name);
79268 +extern ELAN_STATS_STRUCT *elan_stats_find_next   (ELAN_STATS_IDX  statidx);
79269 +
79270 +
79271 +/* elan_stats.c */
79272 +extern int elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_statidx);
79273 +
79274 +extern int elan_stats_find_index     (caddr_t  block_name, ELAN_STATS_IDX *statidx, uint *num_entries);
79275 +
79276 +extern int elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t block_name, uint *num_entries);
79277 +
79278 +extern int elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name);
79279 +
79280 +extern int elan_stats_get_block      (ELAN_STATS_IDX statidx, uint entries, ulong   *values);
79281 +
79282 +extern int elan_stats_clear_block    (ELAN_STATS_IDX statidx);
79283 +
79284 +#endif /* __KERNEL__ */
79285 +
79286 +#endif /* __ELAN_STATS_H */
79287 +
79288 +/*
79289 + * Local variables:
79290 + * c-file-style: "linux"
79291 + * End:
79292 + */
79293 Index: linux-2.6.5-7.191/include/elan3/compat.h
79294 ===================================================================
79295 --- linux-2.6.5-7.191.orig/include/elan3/compat.h       2004-02-23 16:02:56.000000000 -0500
79296 +++ linux-2.6.5-7.191/include/elan3/compat.h    2005-07-28 14:52:52.940664752 -0400
79297 @@ -0,0 +1,177 @@
79298 +/*
79299 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79300 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
79301 + *
79302 + *    For licensing information please see the supplied COPYING file
79303 + *
79304 + */
79305 +
79306 +#ident "@(#)$Id: compat.h,v 1.4 2004/06/09 09:07:03 mike Exp $ $Name: QSNETMODULES-4-31_20050321 $"
79307 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/compat.h,v $*/
79308 +
79309 +#ifndef __ELAN3_COMPAT_H
79310 +#define __ELAN3_COMPAT_H
79311 +
79312 +/* compatibility header to allow Eagle branch QSNETLIBS 
79313 + * to compile against head kernel */
79314 +
79315 +#define ELAN_EAGLE_COMPAT
79316 +
79317 +/* vmseg.h */
79318 +#define ELAN_FLAGSTATS                         ELAN3_FLAGSTATS
79319 +
79320 +/* uregs.h */
79321 +#define ELAN_STATS_NAME                                ELAN3_STATS_NAME
79322 +#define elan3_stats_names                      elan_stats_names
79323 +
79324 +/* spinlock.h */
79325 +#define ELAN_SPINLOCK                          ELAN3_SPINLOCK
79326 +#define ELAN_SPINLOCK_MAIN                     ELAN3_SPINLOCK_MAIN
79327 +#define ELAN_SPINLOCK_ELAN                     ELAN3_SPINLOCK_ELAN
79328 +#define ELAN_ME_SPINENTER                      ELAN3_ME_SPINENTER
79329 +#define ELAN_ME_FORCEENTER                     ELAN3_ME_FORCEENTER
79330 +#define ELAN_ME_SPINEXIT                       ELAN3_ME_SPINEXIT
79331 +#define ELAN_SPINENTER                         ELAN3_SPINENTER
79332 +#define ELAN_SPINEXIT                          ELAN3_SPINEXIT
79333 +#define elan3_me_spinblock                     elan_me_spinblock
79334 +#define elan3_spinenter                                elan_spinenter
79335 +
79336 +/* elanio.h */
79337 +#define ELANIO_CONTROL_PATHNAME                        ELAN3IO_CONTROL_PATHNAME
79338 +#define ELANIO_USER_PATHNAME                   ELAN3IO_USER_PATHNAME
79339 +#define ELANIO_SDRAM_PATHNAME                  ELAN3IO_SDRAM_PATHNAME
79340 +#define ELANIO_MAX_PATHNAMELEN                 ELAN3IO_MAX_PATHNAMELEN
79341 +
79342 +#define ELANIO_SET_BOUNDARY_SCAN               ELAN3IO_SET_BOUNDARY_SCAN
79343 +#define ELANIO_CLEAR_BOUNDARY_SCAN             ELAN3IO_CLEAR_BOUNDARY_SCAN
79344 +#define ELANIO_READ_LINKVAL                    ELAN3IO_READ_LINKVAL
79345 +#define ELANIO_WRITE_LINKVAL                   ELAN3IO_WRITE_LINKVAL
79346 +#define ELANIO_SET_DEBUG_STRUCT                        ELAN3IO_SET_DEBUG_STRUCT
79347 +#define ELANIO_SET_DEBUG                       ELAN3IO_SET_DEBUG
79348 +#define ELANIO_DEBUG_BUFFER_STRUCT             ELAN3IO_DEBUG_BUFFER_STRUCT
79349 +#define ELANIO_DEBUG_BUFFER                    ELAN3IO_DEBUG_BUFFER
79350 +#define ELANIO_NETERR_SERVER_STRUCT            ELAN3IO_NETERR_SERVER_STRUCT
79351 +#define ELANIO_NETERR_SERVER                   ELAN3IO_NETERR_SERVER
79352 +#define ELANIO_NETERR_FIXUP                    ELAN3IO_NETERR_FIXUP
79353 +
79354 +#define ELANIO_FREE                            ELAN3IO_FREE
79355 +#define ELANIO_ATTACH                          ELAN3IO_ATTACH
79356 +#define ELANIO_DETACH                          ELAN3IO_DETACH
79357 +#define ELANIO_ADDVP_STRUCT                    ELAN3IO_ADDVP_STRUCT
79358 +#define ELANIO_ADDVP                           ELAN3IO_ADDVP
79359 +#define ELANIO_REMOVEVP                                ELAN3IO_REMOVEVP
79360 +#define ELANIO_BCASTVP_STRUCT                  ELAN3IO_BCASTVP_STRUCT
79361 +#define ELANIO_BCASTVP                         ELAN3IO_BCASTVP
79362 +#define ELANIO_LOAD_ROUTE_STRUCT               ELAN3IO_LOAD_ROUTE_STRUCT
79363 +#define ELANIO_LOAD_ROUTE                      ELAN3IO_LOAD_ROUTE
79364 +#define ELANIO_PROCESS                         ELAN3IO_PROCESS
79365 +#define ELANIO_SETPERM_STRUCT                  ELAN3IO_SETPERM_STRUCT
79366 +#define ELANIO_SETPERM                         ELAN3IO_SETPERM
79367 +#define ELANIO_CLEARPERM_STRUCT                        ELAN3IO_CLEARPERM_STRUCT
79368 +#define ELANIO_CLEARPERM                       ELAN3IO_CLEARPERM
79369 +#define ELANIO_CHANGEPERM_STRUCT               ELAN3IO_CHANGEPERM_STRUCT
79370 +#define ELANIO_CHANGEPERM                      ELAN3IO_CHANGEPERM
79371 +#define ELANIO_HELPER_THREAD                   ELAN3IO_HELPER_THREAD
79372 +#define ELANIO_WAITCOMMAND                     ELAN3IO_WAITCOMMAND
79373 +#define ELANIO_BLOCK_INPUTTER                  ELAN3IO_BLOCK_INPUTTER
79374 +#define ELANIO_SET_FLAGS                       ELAN3IO_SET_FLAGS
79375 +#define ELANIO_WAITEVENT                       ELAN3IO_WAITEVENT
79376 +#define ELANIO_ALLOC_EVENTCOOKIE               ELAN3IO_ALLOC_EVENTCOOKIE
79377 +#define ELANIO_FREE_EVENTCOOKIE                        ELAN3IO_FREE_EVENTCOOKIE
79378 +#define ELANIO_ARM_EVENTCOOKIE                 ELAN3IO_ARM_EVENTCOOKIE
79379 +#define ELANIO_WAIT_EVENTCOOKIE                        ELAN3IO_WAIT_EVENTCOOKIE
79380 +#define ELANIO_SWAPSPACE                       ELAN3IO_SWAPSPACE
79381 +#define ELANIO_EXCEPTION_SPACE                 ELAN3IO_EXCEPTION_SPACE
79382 +#define ELANIO_GET_EXCEPTION                   ELAN3IO_GET_EXCEPTION
79383 +#define ELANIO_UNLOAD_STRUCT                   ELAN3IO_UNLOAD_STRUCT
79384 +#define ELANIO_UNLOAD                          ELAN3IO_UNLOAD
79385 +#define ELANIO_GET_ROUTE_STRUCT                        ELAN3IO_GET_ROUTE_STRUCT
79386 +#define ELANIO_GET_ROUTE                       ELAN3IO_GET_ROUTE
79387 +#define ELANIO_RESET_ROUTE_STRUCT              ELAN3IO_RESET_ROUTE_STRUCT
79388 +#define ELANIO_RESET_ROUTE                     ELAN3IO_RESET_ROUTE
79389 +#define ELANIO_CHECK_ROUTE_STRUCT              ELAN3IO_CHECK_ROUTE_STRUCT
79390 +#define ELANIO_CHECK_ROUTE                     ELAN3IO_CHECK_ROUTE
79391 +#define ELANIO_VP2NODEID_STRUCT                        ELAN3IO_VP2NODEID_STRUCT
79392 +#define ELANIO_VP2NODEID                       ELAN3IO_VP2NODEID
79393 +#define ELANIO_SET_SIGNAL                      ELAN3IO_SET_SIGNAL
79394 +#define ELANIO_PROCESS_2_LOCATION_STRUCT       ELAN3IO_PROCESS_2_LOCATION_STRUCT
79395 +#define ELANIO_PROCESS_2_LOCATION              ELAN3IO_PROCESS_2_LOCATION
79396 +#define ELANIO_GET_DEVINFO_STRUCT              ELAN3IO_GET_DEVINFO_STRUCT
79397 +#define ELANIO_GET_DEVINFO                     ELAN3IO_GET_DEVINFO
79398 +#define ELANIO_GET_POSITION_STRUCT             ELAN3IO_GET_POSITION_STRUCT
79399 +#define ELANIO_GET_POSITION                    ELAN3IO_GET_POSITION
79400 +#define ELANIO_STATS_STRUCT                    ELAN3IO_STATS_STRUCT
79401 +#define ELANIO_STATS                           ELAN3IO_STATS
79402 +#  define ELAN_SYS_STATS_DEVICE                        ELAN3_SYS_STATS_DEVICE
79403 +#  define ELAN_SYS_STATS_ELAN3MMU              ELAN3_SYS_STATS_MMU
79404 +
79405 +#define ELANIO_OFF_FLAG_PAGE                   ELAN3IO_OFF_FLAG_PAGE
79406 +#define ELANIO_OFF_UREG_PAGE                   ELAN3IO_OFF_UREG_PAGE
79407 +#define ELANIO_OFF_COMMAND_PAGE                        ELAN3IO_OFF_COMMAND_PAGE
79408 +
79409 +
79410 +/* elanvp.h */
79411 +#define ELAN_ROUTE_SUCCESS                     ELAN3_ROUTE_SUCCESS
79412 +#define ELAN_ROUTE_SYSCALL_FAILED              ELAN3_ROUTE_SYSCALL_FAILED
79413 +#define ELAN_ROUTE_INVALID                     ELAN3_ROUTE_INVALID
79414 +#define ELAN_ROUTE_TOO_LONG                    ELAN3_ROUTE_TOO_LONG
79415 +#define ELAN_ROUTE_LOAD_FAILED                 ELAN3_ROUTE_LOAD_FAILED
79416 +#define ELAN_ROUTE_PROC_RANGE                  ELAN3_ROUTE_PROC_RANGE
79417 +#define ELAN_ROUTE_INVALID_LEVEL               ELAN3_ROUTE_INVALID_LEVEL
79418 +#define ELAN_ROUTE_OCILATES                    ELAN3_ROUTE_OCILATES
79419 +#define ELAN_ROUTE_WRONG_DEST                  ELAN3_ROUTE_WRONG_DEST
79420 +#define ELAN_ROUTE_TURN_LEVEL                  ELAN3_ROUTE_TURN_LEVEL
79421 +#define ELAN_ROUTE_NODEID_UNKNOWN              ELAN3_ROUTE_NODEID_UNKNOWN
79422 +
79423 +/* elandev.h */
79424 +#define ELAN_STATS                             ELAN3_STATS
79425 +#define ELAN_STATS_VERSION                     ELAN3_STATS_VERSION
79426 +
79427 +/* perm.h */
79428 +#define ELAN_PERM_NOREMOTE                     ELAN3_PERM_NOREMOTE
79429 +#define ELAN_PERM_LOCAL_READ                   ELAN3_PERM_LOCAL_READ
79430 +#define ELAN_PERM_REMOTEALL                    ELAN3_PERM_REMOTEALL
79431 +
79432 +/* threadsyscall.h */
79433 +#define ELAN_ABORT_TRAPNUM                     ELAN3_ABORT_TRAPNUM
79434 +#define ELAN_ELANCALL_TRAPNUM                  ELAN3_ELANCALL_TRAPNUM
79435 +#define ELAN_SYSCALL_TRAPNUM                   ELAN3_SYSCALL_TRAPNUM
79436 +#define ELAN_SYS_close                         ELAN3_SYS_close
79437 +#define ELAN_SYS_getpid                                ELAN3_SYS_getpid
79438 +#define ELAN_SYS_ioctl                         ELAN3_SYS_ioctl
79439 +#define ELAN_SYS_kill                          ELAN3_SYS_kill
79440 +#define ELAN_SYS_lseek                         ELAN3_SYS_lseek
79441 +#define ELAN_SYS_mmap                          ELAN3_SYS_mmap
79442 +#define ELAN_SYS_munmap                                ELAN3_SYS_munmap
79443 +#define ELAN_SYS_open                          ELAN3_SYS_open
79444 +#define ELAN_SYS_poll                          ELAN3_SYS_poll
79445 +#define ELAN_SYS_read                          ELAN3_SYS_read
79446 +#define ELAN_SYS_write                         ELAN3_SYS_write
79447 +#define ELAN_T_SYSCALL_CODE                    ELAN3_T_SYSCALL_CODE
79448 +#define ELAN_T_SYSCALL_ERRNO                   ELAN3_T_SYSCALL_ERRNO
79449 +
79450 +/* elansyscall.h */
79451 +#define ELAN_SYS_FLAG_DMA_BADVP                        ELAN3_SYS_FLAG_DMA_BADVP
79452 +#define ELAN_SYS_FLAG_THREAD_BADVP             ELAN3_SYS_FLAG_THREAD_BADVP
79453 +#define ELAN_SYS_FLAG_DMAFAIL                  ELAN3_SYS_FLAG_DMAFAIL
79454 +#define ELAN_SYS_FLAG_NETERR                   ELAN3_SYS_FLAG_NETERR
79455 +
79456 +/* intrinsics.h */
79457 +#define elan_copy64w                           elan3_copy64w
79458 +#define elan_read64dw                          elan3_read64dw
79459 +#define elan_write64dw                         elan3_write64dw
79460 +
79461 +#ifndef ELAN_POLL_EVENT
79462 +#define ELAN_POLL_EVENT                                ELAN3_POLL_EVENT
79463 +#endif
79464 +#ifndef ELAN_WAIT_EVENT
79465 +#define ELAN_WAIT_EVENT                                ELAN3_WAIT_EVENT
79466 +#endif
79467 +
79468 +#endif /* __ELAN3_COMPAT_H */
79469 +/*
79470 + * Local variables:
79471 + * c-file-style: "stroustrup"
79472 + * End:
79473 + */
79474 +
79475 Index: linux-2.6.5-7.191/include/elan3/dma.h
79476 ===================================================================
79477 --- linux-2.6.5-7.191.orig/include/elan3/dma.h  2004-02-23 16:02:56.000000000 -0500
79478 +++ linux-2.6.5-7.191/include/elan3/dma.h       2005-07-28 14:52:52.941664600 -0400
79479 @@ -0,0 +1,213 @@
79480 +/*
79481 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79482 + *
79483 + *    For licensing information please see the supplied COPYING file
79484 + *
79485 + */
79486 +
79487 +#ifndef __ELAN3_DMA_H
79488 +#define __ELAN3_DMA_H
79489 +
79490 +#ident "$Id: dma.h,v 1.38 2002/08/21 12:43:27 david Exp $"
79491 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/dma.h,v $ */
79492 +
79493 +#include <elan3/e3types.h>
79494 +#include <elan3/events.h>
79495 +
79496 +/* Alignment for a DMA descriptor */
79497 +#define E3_DMA_ALIGN           (32)
79498 +
79499 +/* The maximum size a DMA can be (i.e. < 2GB) */
79500 +#define E3_MAX_DMA_SIZE                0x7fffffff
79501 +
79502 +/* This macro returns TRUE if a fixup for the ELAN_REVB_BUG_2 problem is required 
79503 + * i.e. if the DMA begins in the last 64-bytes of a page and its size causes it to enter the
79504 + * next page, hence causing the Elan to issue 2 (64-byte) block reads to different pages.
79505 + * See GNAT hw-elan3/3263
79506 + */
79507 +#define E3_DMA_REVB_BUG_2(SIZE, ADDR, PAGESIZE)        \
79508 +       ( (((int) (ADDR) & (PAGESIZE-64)) == (PAGESIZE-64)) && (-(((int) (ADDR) | ~(PAGESIZE-1))) < (SIZE)) )
79509 +
79510 +/* There is a point where a dma runs quicker from main memory than
79511 + * when running from sdram and having to copy all the data down
79512 + * first.
79513 + */
79514 +#define E3_DMA_SDRAM_CUTOFF    128
79515 +
79516 +typedef union _e3_DmaType
79517 +{
79518 +    E3_uint32 type;
79519 +    struct
79520 +    {
79521 +#if defined(__LITTLE_ENDIAN__)
79522 +       E3_uint32 dataType:2;   /* Bits 0 to 1   */
79523 +       E3_uint32 direction:3;  /* Bit  4 to 2   */
79524 +       E3_uint32 opCode:4;     /* Bits 5 to 8   */
79525 +       E3_uint32 failCount:6;  /* Bits 9 to 14  */
79526 +       E3_uint32 isRemote:1;   /* Bit  15       */
79527 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
79528 +       E3_uint32 :3;           /* Bits 29 to 31 */
79529 +#else
79530 +       E3_uint32 :3;           /* Bits 29 to 31 */
79531 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
79532 +       E3_uint32 isRemote:1;   /* Bit  15       */
79533 +       E3_uint32 failCount:6;  /* Bits 9 to 14  */
79534 +       E3_uint32 opCode:4;     /* Bits 5 to 8   */
79535 +       E3_uint32 direction:3;  /* Bit  4 to 2   */
79536 +       E3_uint32 dataType:2;   /* Bits 0 to 1   */
79537 +#endif
79538 +    } s;
79539 +} E3_DmaType;
79540 +
79541 +#define E3_DMA_CONTEXT_MASK    (ALL_CONTEXT_BITS << 16)
79542 +
79543 +#define E3_DMA_CONTEXT(type)   (((type) >> 16) & ALL_CONTEXT_BITS)
79544 +#define E3_DMA_ISREMOTE(type)  (((type) >> 15) & 1)
79545 +#define E3_DMA_FAILCOUNT(type) (((type) >> 9) & 0x3F)
79546 +#define E3_DMA_OPCODE(type)    (((type) >> 5) & 0xF)
79547 +#define E3_DMA_DIRECTION(type) (((type) >> 2) & 0x7)
79548 +#define EP_DMA_DATATYPE(type)  (((type) >> 0) & 0x3)
79549 +
79550 +#define E3_DMA_TYPE(dataType, direction, opCode, failCount) \
79551 +    (((dataType) & 0x3) | (((direction) & 7) << 2) | (((opCode) & 0xF) << 5) | (((failCount) & 0x3F) << 9))
79552 +
79553 +
79554 +typedef union _e3_CookieVProc
79555 +{
79556 +    E3_uint32 cookie_vproc;
79557 +    struct
79558 +    {
79559 +#if defined(__LITTLE_ENDIAN__)
79560 +       E3_uint32 vproc:16;                     /* Bit  15 to 0  */
79561 +       E3_uint32 cookie:16;                    /* Bits 31 to 16 */
79562 +#else
79563 +       E3_uint32 cookie:16;                    /* Bits 31 to 16 */
79564 +       E3_uint32 vproc:16;                     /* Bit  15 to 0  */
79565 +#endif
79566 +    } s;
79567 +} E3_CookieVProc;
79568 +
79569 +#define E3_DMA_COOKIE_PROC(Cookie, VProc)  (((VProc) & 0xffff) | (((Cookie) << 16)))
79570 +
79571 +#define DMA_COOKIE_MASK                (0xffff0000)
79572 +#define DMA_PROCESS_MASK       (0x0000ffff)
79573 +
79574 +/* We use the bottom bit of the cookie to
79575 + * distinguish main/thread generated cookies
79576 + */
79577 +#define DMA_COOKIE_THREAD      (0x01 << 16)
79578 +
79579 +/* We use the next bit of the cookie to
79580 + * distinguish locally/remotely generated cookies 
79581 + */
79582 +#define DMA_COOKIE_REMOTE      (0x02 << 16)
79583 +
79584 +/* Assign and increment cookie (NB: we have reserved the bottom two bits)
79585 + */
79586 +#define DMA_COOKIE(COOKIE, VPROC)       ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | VPROC)
79587 +#define DMA_REMOTE_COOKIE(COOKIE, VPROC) ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | DMA_COOKIE_REMOTE | VPROC)
79588 +
79589 +#define DMA_COOKIE_REFRESH(COOKIEVP, COOKIE)                           \
79590 +do {                                                                   \
79591 +       COOKIEVP &= ~DMA_COOKIE_MASK;           /* Clear cookie */      \
79592 +       COOKIEVP |= DMA_COOKIE(COOKIE,0);       /* Assign new cookie */ \
79593 +} while (0)
79594 +
79595 +typedef struct e3_dma
79596 +{
79597 +    E3_DmaType         dma_u;
79598 +    E3_uint32          dma_size;
79599 +    E3_Addr            dma_source;
79600 +    E3_Addr            dma_dest;
79601 +    E3_Addr            dma_destEvent;
79602 +    E3_CookieVProc     dma_destCookieProc;
79603 +    E3_Addr            dma_srcEvent;
79604 +    E3_CookieVProc     dma_srcCookieProc;
79605 +} E3_DMA;
79606 +
79607 +
79608 +/*
79609 + * Word-swapped version of DMA descriptor.
79610 + * This is used by the UltraSPARC code to format the descriptor
79611 + * in main memory before block-copying it down to Elan SDRAM.
79612 + * In the process it does a dword (64-bit) conversion and so swaps
79613 + * the word order on a double-word pair basis
79614 + */
79615 +typedef struct e3_dma_swapped
79616 +{
79617 +    E3_uint32          dma_size;
79618 +    E3_DmaType         dma_u;
79619 +    E3_Addr            dma_dest;
79620 +    E3_Addr            dma_source;
79621 +    E3_CookieVProc     dma_destCookieProc;
79622 +    E3_Addr            dma_destEvent;
79623 +    E3_CookieVProc     dma_srcCookieProc;
79624 +    E3_Addr            dma_srcEvent;
79625 +} E3_DMA_SWAPPED;
79626 +
79627 +/* Define a Main memory structure for DMA desc based on Endianess of machine */
79628 +#if defined(__LITTLE_ENDIAN__)
79629 +#define E3_DMA_MAIN E3_DMA
79630 +#else
79631 +#define E3_DMA_MAIN E3_DMA_SWAPPED;
79632 +#endif
79633 +
79634 +#define dma_type        dma_u.type
79635 +#define dma_failCount    dma_u.s.failCount
79636 +#define dma_isRemote     dma_u.s.isRemote
79637 +#define dma_opCode       dma_u.s.opCode
79638 +#define dma_direction    dma_u.s.direction
79639 +#define dma_dataType     dma_u.s.dataType
79640 +#define dma_queueContext dma_u.s.Context
79641 +
79642 +#define dma_destCookieVProc   dma_destCookieProc.cookie_vproc
79643 +#define dma_destVProc        dma_destCookieProc.s.vproc
79644 +#define dma_destCookie       dma_destCookieProc.s.cookie
79645 +#define dma_srcCookieVProc    dma_srcCookieProc.cookie_vproc
79646 +#define dma_srcVProc         dma_srcCookieProc.s.vproc
79647 +#define dma_srcCookie        dma_srcCookieProc.s.cookie
79648 +
79649 +/*
79650 + * Values for dma_opCode
79651 + */
79652 +#define DMA_NORMAL                     0
79653 +#define DMA_QUEUED                     1
79654 +#define DMA_NORMAL_BROADCAST           2
79655 +#define DMA_QUEUED_BROADCAST           3
79656 +#define DMA_NORMAL_UNSAFE              4
79657 +#define DMA_QUEUED_UNSAFE              5
79658 +#define DMA_NORMAL_BROADCAST_UNSAFE    6
79659 +#define DMA_QUEUED_BROADCAST_UNSAFE    7
79660 +
79661 +/*
79662 + * Values for dma_direction
79663 + */
79664 +#define DMA_WRITE              0
79665 +#define DMA_READ_REQUEUE       1
79666 +#define DMA_READ               3
79667 +#define DMA_READ_BROADCAST     7
79668 +
79669 +/*
79670 + * Values for dma_dataType
79671 + */
79672 +#define DMA_BYTE               0
79673 +#define DMA_HALFWORD           1
79674 +#define DMA_WORD               2
79675 +#define DMA_DOUBLE             3
79676 +
79677 +/* OUT OF DATE ?
79678 +  #define DMA_OPCODE_SHIFT     3
79679 +  #define DMA_FAILCOUNT_SHIFT  9
79680 +*/
79681 +#define DMA_TYPE_ISREMOTE      (1 << 15)
79682 +#define DMA_TYPE_READ          (3 << 2)
79683 +#define DMA_TYPE_READ_REQUEUE  (1 << 2)
79684 +#define DMA_TYPE_DIRECTION_MASK        (3 << 2)
79685 +
79686 +#endif /* __ELAN3_DMA_H */
79687 +
79688 +/*
79689 + * Local variables:
79690 + * c-file-style: "stroustrup"
79691 + * End:
79692 + */
79693 Index: linux-2.6.5-7.191/include/elan3/e3types.h
79694 ===================================================================
79695 --- linux-2.6.5-7.191.orig/include/elan3/e3types.h      2004-02-23 16:02:56.000000000 -0500
79696 +++ linux-2.6.5-7.191/include/elan3/e3types.h   2005-07-28 14:52:52.941664600 -0400
79697 @@ -0,0 +1,82 @@
79698 +/*
79699 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79700 + *
79701 + *    For licensing information please see the supplied COPYING file
79702 + *
79703 + */
79704 +
79705 +#ifndef __ELAN3_E3TYPES_H
79706 +#define __ELAN3_E3TYPES_H
79707 +
79708 +#ident "$Id: e3types.h,v 1.18 2002/08/09 11:23:33 addy Exp $"
79709 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/e3types.h,v $ */
79710 +
79711 +#include <qsnet/config.h>
79712 +/*
79713 + * "flip" values for correctly indexing into
79714 + * block data which was copied from the Elan
79715 + * using 64 bit accesses.
79716 + */
79717 +#if defined(__LITTLE_ENDIAN__)
79718 +#  define ByteEndianFlip  0
79719 +#  define ShortEndianFlip 0
79720 +#  define WordEndianFlip  0
79721 +#else
79722 +#  define ByteEndianFlip  7
79723 +#  define ShortEndianFlip 3
79724 +#  define WordEndianFlip  1
79725 +#endif
79726 +
79727 +
79728 +#ifndef _ASM
79729 +
79730 +typedef signed int        E3_int;
79731 +typedef unsigned int              E3_uint;
79732 +
79733 +typedef signed char       E3_int8;
79734 +typedef unsigned char     E3_uint8;
79735 +
79736 +typedef signed short      E3_int16;
79737 +typedef unsigned short            E3_uint16;
79738 +
79739 +typedef signed int        E3_int32;
79740 +typedef unsigned int              E3_uint32;
79741 +
79742 +#ifdef __ELAN3__
79743 +typedef signed long long   E3_int64;
79744 +typedef unsigned long long E3_uint64;
79745 +#ifdef _MAIN_LP64
79746 +/* NOTE: If the Main is 64-bit we declare the Elan thread's
79747 + * E3_uintptr to be 64-bits too
79748 + */
79749 +typedef unsigned long long E3_uintptr;
79750 +#else
79751 +typedef unsigned long      E3_uintptr;
79752 +#endif
79753 +
79754 +#else
79755 +
79756 +#ifdef _LP64
79757 +typedef signed long        E3_int64;
79758 +typedef unsigned long      E3_uint64;
79759 +typedef unsigned long      E3_uintptr;
79760 +#else /* _ILP32 */
79761 +typedef signed long long   E3_int64;
79762 +typedef unsigned long long E3_uint64;
79763 +typedef unsigned long      E3_uintptr;
79764 +#endif
79765 +
79766 +#endif /* __ELAN3__ */
79767 +
79768 +/* 32-bit Elan3 address */
79769 +typedef E3_uint32         E3_Addr;
79770 +
79771 +#endif /* _ASM */
79772 +
79773 +#endif /* __ELAN3_E3TYPES_H */
79774 +
79775 +/*
79776 + * Local variables:
79777 + * c-file-style: "stroustrup"
79778 + * End:
79779 + */
79780 Index: linux-2.6.5-7.191/include/elan3/elan3mmu.h
79781 ===================================================================
79782 --- linux-2.6.5-7.191.orig/include/elan3/elan3mmu.h     2004-02-23 16:02:56.000000000 -0500
79783 +++ linux-2.6.5-7.191/include/elan3/elan3mmu.h  2005-07-28 14:52:52.942664448 -0400
79784 @@ -0,0 +1,346 @@
79785 +/*
79786 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79787 + *
79788 + *    For licensing information please see the supplied COPYING file
79789 + *
79790 + */
79791 +
79792 +#ifndef __ELAN3_ELAN3MMU_H
79793 +#define __ELAN3_ELAN3MMU_H
79794 +
79795 +#ident "$Id: elan3mmu.h,v 1.40.2.1 2004/12/14 10:19:48 mike Exp $"
79796 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu.h,v $*/
79797 +
79798 +
79799 +#include <elan3/pte.h>
79800 +
79801 +#ifdef __cplusplus
79802 +extern "C" {
79803 +#endif
79804 +
79805 +typedef struct elan3mmu_global_stats
79806 +{
79807 +    int                version;
79808 +    int                pteload;
79809 +    int                pteunload;
79810 +    int                ptereload;
79811 +
79812 +    int                streamable_alloc;
79813 +    int                streamable_free;
79814 +    int                streamable_alloc_failed;
79815 +
79816 +    int                num_ptbl_level[4]; /* number of level N  ptbls */
79817 +
79818 +    int                create_ptbl_failed;                     /* count of ptbl creation failure */
79819 +
79820 +    int         lX_alloc_l3;                           /* count of l3 ptbls used as lX */
79821 +    int         lX_freed_l3;                           /* count of lX ptbls freed as l3 */
79822 +
79823 +    int                l2_alloc_l3;                            /* count of l3 ptbls used as l2 */
79824 +    int                l2_freed_l3;                            /* count of l2 ptbls freed as l3 */
79825 +
79826 +    int                stolen_ptbls;                           /* count of l3 ptbls stolen */
79827 +} ELAN3MMU_GLOBAL_STATS;
79828 +
79829 +#define ELAN3MMU_STATS_VERSION         1
79830 +
79831 +#define ELAN3MMU_STAT(what)            (elan3mmu_global_stats.what++)
79832 +#define ELAN3MMU_SET_STAT(what,count)  (elan3mmu_global_stats.what = count)
79833 +
79834 +#ifdef __KERNEL__
79835 +
79836 +#define ELAN3_PT_SHIFT (ELAN3_L2_SHIFT + 2)
79837 +
79838 +typedef struct elan3_ptbl
79839 +{
79840 +    struct elan3_ptbl   *ptbl_parent;                          /* Parent page table, or next on freelist */
79841 +    struct elan3mmu     *ptbl_elan3mmu;                        /* elan3mmu we're allocated for */
79842 +    E3_Addr              ptbl_base;                            /* Virtual address we're mapping */
79843 +    u_char               ptbl_index;                           /* Index in ptbl group */
79844 +    u_char               ptbl_valid;                           /* Number of valid entries */
79845 +    u_char               ptbl_flags;                           /* Flags, defined below. */
79846 +    u_char               ptbl_spare;
79847 +} ELAN3_PTBL;
79848 +
79849 +#define ptbl_next      ptbl_parent                             /* Parent pointer is next pointer when on free list */
79850 +
79851 +#define PTBL_LEVEL_X            0x00
79852 +#define PTBL_LEVEL_1           0x01
79853 +#define PTBL_LEVEL_2           0x02
79854 +#define PTBL_LEVEL_3           0x03
79855 +#define PTBL_LEVEL_MASK                0x03
79856 +#define PTBL_LOCKED            0x04                            /* Page table is locked,  protects all fields */
79857 +#define PTBL_KEEP              0x08                            /* This ptbl is not to be stolen */
79858 +#define PTBL_ALLOCED           0x10                            /* This ptbl has been allocated, and is not free */
79859 +#define PTBL_GROUPED           0x20                            /* This ptbl is a member of a group of ptbls */
79860 +#define PTBL_KERNEL            0x80                            /* This ptbl is allocated for the kernel */
79861 +
79862 +#define PTBL_LEVEL(flags)      ((flags) & PTBL_LEVEL_MASK)
79863 +#define PTBL_IS_LOCKED(flags)  (((flags) & (PTBL_LOCKED|PTBL_ALLOCED)) == (PTBL_LOCKED|PTBL_ALLOCED))
79864 +
79865 +#if ELAN3_PAGE_SHIFT == 13
79866 +#  define PTBL_GROUP_SIZE      8192                            /* page table groups are 8k bytes */
79867 +#  define PTBLS_PER_GROUP_L1   8                               /* Number of level 1 tables in a group */
79868 +#  define PTBLS_PER_GROUP_L2   32                              /*   ... level 2 */
79869 +#  define PTBLS_PER_GROUP_L3   32                              /*   ... level 3 */
79870 +#  define PTBLS_PER_GROUP_LX   32                              /*   ... level X */
79871 +#  define PTBLS_PER_GROUP_MAX  32                              /*  max of l1,l2,l3,lX */
79872 +#else
79873 +#  define PTBL_GROUP_SIZE      4096                            /* page table groups are 4k bytes */
79874 +#  define PTBLS_PER_GROUP_L1   4                               /* Number of level 1 tables in a group */
79875 +#  define PTBLS_PER_GROUP_L2   16                              /*   ... level 2 */
79876 +#  define PTBLS_PER_GROUP_L3   8                               /*   ... level 3 */
79877 +#  define PTBLS_PER_GROUP_LX   16                              /*   ... level X */
79878 +#  define PTBLS_PER_GROUP_MAX  16                              /*  max of l1,l2,l3,lX */
79879 +#endif
79880 +
79881 +#define HMES_PER_GROUP         (PTBLS_PER_GROUP_L3*ELAN3_L3_ENTRIES)
79882 +
79883 +#if ELAN3_PAGE_SHIFT == 13
79884 +#  define PTBLS_PER_PTBL_L1    4                               /* 256 PTPs */
79885 +#  define PTBLS_PER_PTBL_L2    1                               /* 64 PTPs */
79886 +#  define PTBLS_PER_PTBL_L3    1                               /* 32 PTEs */
79887 +#else
79888 +#  define PTBLS_PER_PTBL_L1    4                               /* 256 PTPs */
79889 +#  define PTBLS_PER_PTBL_L2    1                               /* 64 PTPs */
79890 +#  define PTBLS_PER_PTBL_L3    2                               /* 64 PTEs */
79891 +#endif
79892 +
79893 +#define ELAN3_LX_ENTRIES     (32) 
79894 +#define PTBLS_PER_PTBL_LX   (1)        
79895 +
79896 +#define L1_VA_PER_PTBL (ELAN3_L1_SIZE*(ELAN3_L1_ENTRIES/PTBLS_PER_PTBL_L1))    /* 4 ptbl for L1 */
79897 +#define L2_VA_PER_PTBL (ELAN3_L2_SIZE*(ELAN3_L2_ENTRIES/PTBLS_PER_PTBL_L2))    /* 1 ptbl for L2 */
79898 +#define L3_VA_PER_PTBL (ELAN3_L3_SIZE*(ELAN3_L3_ENTRIES/PTBLS_PER_PTBL_L3))    /* 1 ptbl for L3 */
79899 +
79900 +typedef struct elan3_ptbl_gr
79901 +{
79902 +    struct elan3_ptbl_gr *pg_next;                             /* Next in list. */
79903 +    int                         pg_level;                              /* Level PG allocated for */
79904 +    sdramaddr_t                 pg_addr;                               /* sdram offset of ptes/ptps */    
79905 +    ELAN3_PTBL          pg_ptbls[PTBLS_PER_GROUP_MAX];         /* The actual page tables */
79906 +} ELAN3_PTBL_GR;
79907 +
79908 +
79909 +/*
79910 + * The elan3mmu structure is the mmu dependant hardware address translation
79911 + * structure linked to the address space structure to show the translatioms
79912 + * provided by the elan for an address sapce.
79913 + *
79914 + * We also have a doubly linked list of 'regions' which allow the 
79915 + * elan3mmu code to determine the access permissions for the elan 
79916 + * dependant on the virtual address that the translation is being
79917 + * loaded at.
79918 + */
79919 +
79920 +typedef struct elan3mmu_rgn
79921 +{
79922 +    struct elan3mmu_rgn *rgn_mnext;                            /* Doubly linked list of regions */
79923 +    struct elan3mmu_rgn *rgn_mprev;                            /*   sorted on main address */ 
79924 +    caddr_t             rgn_mbase;                             /* main address of base of region */
79925 +
79926 +    struct elan3mmu_rgn *rgn_enext;                            /* Doubly linked list of regions */
79927 +    struct elan3mmu_rgn *rgn_eprev;                            /*   sorted on elan address */
79928 +    E3_Addr             rgn_ebase;                             /* elan address of base of region */
79929 +
79930 +    u_int               rgn_len;                               /* length of region */
79931 +    u_int               rgn_perm;                              /* elan access permission */
79932 +} ELAN3MMU_RGN;
79933 +
79934 +typedef struct elan3mmu
79935 +{
79936 +    spinlock_t             elan3mmu_lock;                      /* spinlock lock for regions */
79937 +    ELAN3MMU_RGN           *elan3mmu_mrgns;                    /* Doubly linked list of memory regions */
79938 +    ELAN3MMU_RGN          *elan3mmu_mtail;                     /* Last memory region on list */
79939 +    ELAN3MMU_RGN          *elan3mmu_mrgnlast;                  /* Last region 'hit' */
79940 +
79941 +    ELAN3MMU_RGN           *elan3mmu_ergns;                    /* Doubly linked list of memory regions */
79942 +    ELAN3MMU_RGN          *elan3mmu_etail;                     /* Last memory region on list */
79943 +    ELAN3MMU_RGN          *elan3mmu_ergnlast;                  /* Last region 'hit' */
79944 +
79945 +    struct elan3_dev        *elan3mmu_dev;                     /* Elan device we're using. */
79946 +    struct elan3_ctxt     *elan3mmu_ctxt;                      /* Elan ctxt we're associated with */
79947 +
79948 +    sdramaddr_t             elan3mmu_ctp;                      /* Context table entry for our context */
79949 +    ELAN3_PTBL            *elan3mmu_l1ptbl;                    /* Level 1 Page table (first of 4) */
79950 +
79951 +    spinlock_t             elan3mmu_lXptbl_lock;               /* spinlock for level X table list */
79952 +    ELAN3_PTBL              *elan3mmu_lXptbl;                    /* Level X Page table list         */
79953 +
79954 +#ifdef LINUX
79955 +    struct mm_struct       *elan3mmu_coproc_mm;                        /* Linux mm we're mapping */
79956 +#endif
79957 +} ELAN3MMU;
79958 +
79959 +_NOTE(LOCK_ORDER(elan3mmu::elan3mmu_lock elan3_dev::IntrLock))
79960 +
79961 +_NOTE(MUTEX_PROTECTS_DATA(elan3mmu::elan3mmu_lock,
79962 +                         elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail
79963 +                         elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail))
79964 +/* protected by dev->IntrLock for read by device driver */
79965 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail
79966 +                                elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail))
79967 +
79968 +_NOTE(SCHEME_PROTECTS_DATA("only set to valid region", 
79969 +                          elan3mmu::elan3mmu_ergnlast elan3mmu::elan3mmu_mrgnlast))
79970 +
79971 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock,
79972 +                         elan3mmu::elan3mmu_l1ptbl 
79973 +                         elan3mmu::elan3mmu_ctp 
79974 +                         elan3mmu::elan3mmu_dev))
79975 +
79976 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_l1ptbl
79977 +                                elan3mmu::elan3mmu_ctp 
79978 +                                elan3mmu::elan3mmu_dev))
79979 +
79980 +/*
79981 + * Macros for accessing ptes/ptbls/ptbl_grs
79982 + */
79983 +
79984 +#define OFFSETOF(object,member)        /* calculate offset of structure member */ \
79985 +       ((size_t) (&(((object *)0)->member)))
79986 +#define PTBL_TO_GR(ptbl)       /* convert ptbl to ptbl group */ \
79987 +       ((ELAN3_PTBL_GR *) ((caddr_t) ((ptbl) - (ptbl)->ptbl_index) - OFFSETOF(ELAN3_PTBL_GR,pg_ptbls[0])))
79988 +#define PTBL_TO_PTADDR(ptbl)   /* convert ptbl to a ptp pointing at it */ \
79989 +        (PTBL_TO_GR(ptbl)->pg_addr + ((ptbl)->ptbl_index<<ELAN3_PT_SHIFT))
79990 +#define PTE_TO_HME(ptbl,pte)   /* convert pte to corresponding hme */ \
79991 +        (PTBL_TO_GR(ptbl)->pg_hmes + ((pte) - (ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr))
79992 +#define HME_TO_PTE(ptebl,hme)  /* convert hme to corresponding pte */ \
79993 +        ((ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr + ((hme) - (PTBL_TO_GR(ptbl)->pg_hmes)))
79994 +
79995 +
79996 +/* Flags for lock_ptbl */
79997 +#define LK_PTBL_NOWAIT         0x1
79998 +#define LK_PTBL_FAILOK         0x2
79999 +
80000 +/* Return values for lock_ptbl */
80001 +#define LK_PTBL_OK             0x0
80002 +#define LK_PTBL_MISMATCH       0x1
80003 +#define LK_PTBL_FAILED         0x2
80004 +
80005 +/* Flags for elan3mmu_ptesync */
80006 +#define        NO_MLIST_LOCK   0
80007 +#define        MLIST_LOCKED    1
80008 +
80009 +/* Flags for elan3mmu_pteload */
80010 +#define PTE_LOAD               0x00
80011 +#define PTE_LOAD_LOCK          0x01                            /* translation should be locked */
80012 +#define PTE_LOAD_NOSYNC                0x02                            /* ref/mod bits should not be sync'ed to page */
80013 +#define PTE_NO_SLEEP            0x04                            /* true if we cant sleep */
80014 +#define PTE_NO_STEAL           0x08                            /* true if we don't want to steal ptbls */
80015 +
80016 +#define PTE_LOAD_ENDIAN_MASK   0x10                            /* mask for endian-ness */
80017 +#define PTE_LOAD_LITTLE_ENDIAN 0x00                            /* translation is to little-endian memory */
80018 +#define PTE_LOAD_BIG_ENDIAN    0x10                            /* translation is to big-endian memory */
80019 +
80020 +
80021 +/* Flags for elan3mmu_unload */
80022 +#define PTE_UNLOAD             0x00
80023 +#define PTE_UNLOAD_UNLOCK      0x01
80024 +#define PTE_UNLOAD_NOFLUSH     0x02
80025 +#define PTE_UNLOAD_NOSYNC      0x04
80026 +
80027 +extern int         elan3mmu_debug;
80028 +#ifdef DEBUG_PRINTF
80029 +#  define HAT_PRINTF0(n,msg)            ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg)             : (void) 0)
80030 +#  define HAT_PRINTF1(n,msg,a)          ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a)           : (void) 0)
80031 +#  define HAT_PRINTF2(n,msg,a,b)        ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b)         : (void) 0)
80032 +#  define HAT_PRINTF3(n,msg,a,b,c)      ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c)       : (void) 0)
80033 +#  define HAT_PRINTF4(n,msg,a,b,c,d)    ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d)     : (void) 0)
80034 +#  define HAT_PRINTF5(n,msg,a,b,c,d,e)  ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e)   : (void) 0)
80035 +#  define HAT_PRINTF6(n,msg,a,b,c,d,e,f) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e,f) : (void) 0)
80036 +#  ifdef LINUX
80037 +#    define HAT_PRINTF(n,args...)        ((elan3mmu_debug & n) ? (void) elan3_debugf(NULL, DBG_HAT, ##args) : (void) 0)
80038 +#  endif
80039 +#else
80040 +#  define HAT_PRINTF0(n,msg)
80041 +#  define HAT_PRINTF1(n,msg,a)
80042 +#  define HAT_PRINTF2(n,msg,a,b)
80043 +#  define HAT_PRINTF3(n,msg,a,b,c)
80044 +#  define HAT_PRINTF4(n,msg,a,b,c,d)
80045 +#  define HAT_PRINTF5(n,msg,a,b,c,d,e)
80046 +#  define HAT_PRINTF6(n,msg,a,b,c,d,e,f)
80047 +#  ifdef LINUX
80048 +#    define HAT_PRINTF(n,args...)
80049 +#  endif
80050 +#endif
80051 +
80052 +/* elan3mmu_generic.c */
80053 +extern ELAN3MMU_GLOBAL_STATS elan3mmu_global_stats;
80054 +
80055 +extern void         elan3mmu_init (void);
80056 +extern void         elan3mmu_fini (void);
80057 +
80058 +extern ELAN3MMU           *elan3mmu_alloc (struct elan3_ctxt *ctxt);
80059 +extern void        elan3mmu_free (ELAN3MMU *elan3mmu);
80060 +
80061 +extern void          elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp);
80062 +extern int          elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask);
80063 +extern void         elan3mmu_detach (ELAN3_DEV *dev, int ctx);
80064 +
80065 +extern ELAN3MMU_RGN *elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu, E3_Addr addr, int tail);
80066 +extern int           elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn);
80067 +extern ELAN3MMU_RGN *elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr);
80068 +extern ELAN3MMU_RGN *elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr);
80069 +extern ELAN3MMU_RGN *elan3mmu_findrgn_main (ELAN3MMU *elan3mmu, caddr_t addr, int tail);
80070 +extern int           elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn);
80071 +extern ELAN3MMU_RGN *elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr);
80072 +extern ELAN3MMU_RGN *elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr);
80073 +
80074 +extern int          elan3mmu_setperm (ELAN3MMU *elan3mmu, caddr_t maddr, E3_Addr eaddr, u_int len, u_int perm);
80075 +extern void         elan3mmu_clrperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len);
80076 +extern int          elan3mmu_checkperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int access);
80077 +extern caddr_t      elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr);
80078 +extern E3_Addr      elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr);
80079 +
80080 +extern void          elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr);
80081 +extern void          elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *);
80082 +extern void          elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *);
80083 +
80084 +extern void          elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr);
80085 +extern void         elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int flags);
80086 +extern void         elan3mmu_sync (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int clearflag);
80087 +extern void         elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock);
80088 +extern void         elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock);
80089 +extern sdramaddr_t   elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level);
80090 +extern sdramaddr_t   elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr, int *level, ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags);
80091 +extern sdramaddr_t   elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr, int level, ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags);
80092 +extern void         elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int flags);
80093 +extern int           elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int flags, E3_Addr addr, spinlock_t **pl2lock, unsigned long *lock_flags);
80094 +extern int           elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int flags, E3_Addr addr, spinlock_t **pl3lock, unsigned long *lock_flags);
80095 +
80096 +extern void          elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
80097 +extern void          elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
80098 +extern void          elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
80099 +
80100 +extern int          elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags);
80101 +extern int           elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags);
80102 +extern void         elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
80103 +
80104 +/* elan3mmu_osdep.c */
80105 +extern void         elan3mmu_init_osdep (void);
80106 +extern void         elan3mmu_fini_osdep (void);
80107 +extern void         elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu);
80108 +extern void         elan3mmu_free_osdep (ELAN3MMU *elan3mmu);
80109 +extern ELAN3_PTE     elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm);
80110 +extern ELAN3_PTE     elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu);
80111 +
80112 +#if defined (DIGITAL_UNIX)
80113 +#  include <elan3/elan3mmu_dunix.h>
80114 +#elif defined (LINUX)
80115 +#  include <elan3/elan3mmu_linux.h>
80116 +#endif
80117 +
80118 +#endif /* __KERNEL__ */
80119 +
80120 +#ifdef __cplusplus
80121 +}
80122 +#endif
80123 +
80124 +#endif /* __ELAN3_ELAN3MMU_H */
80125 +
80126 +/*
80127 + * Local variables:
80128 + * c-file-style: "stroustrup"
80129 + * End:
80130 + */
80131 Index: linux-2.6.5-7.191/include/elan3/elan3mmu_linux.h
80132 ===================================================================
80133 --- linux-2.6.5-7.191.orig/include/elan3/elan3mmu_linux.h       2004-02-23 16:02:56.000000000 -0500
80134 +++ linux-2.6.5-7.191/include/elan3/elan3mmu_linux.h    2005-07-28 14:52:52.942664448 -0400
80135 @@ -0,0 +1,39 @@
80136 +/*
80137 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80138 + *
80139 + *    For licensing information please see the supplied COPYING file
80140 + *
80141 + */
80142 +
80143 +#ifndef __ELAN3_MMU_LINUX_H
80144 +#define __ELAN3_MMU_LINUX_H
80145 +
80146 +#ident "$Id: elan3mmu_linux.h,v 1.12 2003/09/24 13:57:24 david Exp $"
80147 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu_linux.h,v $*/
80148 +
80149 +/* XXX copy of elan3mmu_dunix.h */
80150 +
80151 +#define ALLOC_ELAN3MMU(ptr,cansleep)           KMEM_ALLOC(ptr, ELAN3MMU *, sizeof (ELAN3MMU), cansleep)
80152 +#define ALLOC_PTBL_GR(ptr,cansleep)            KMEM_ALLOC(ptr, ELAN3_PTBL_GR *, sizeof (ELAN3_PTBL_GR), cansleep)
80153 +#define ALLOC_ELAN3MMU_RGN(ptr,cansleep)       KMEM_ALLOC(ptr, ELAN3MMU_RGN *, sizeof (ELAN3MMU_RGN), cansleep)
80154 +#define ALLOC_HMENTS(ptr,cansleep)             KMEM_ALLOC((ptr,ELAN3_HMENT *, sizeof (ELAN3_HMENT), cansleep)
80155 +
80156 +#define FREE_ELAN3MMU(ptr)                     KMEM_FREE(ptr,sizeof (ELAN3MMU))
80157 +#define FREE_PTBL_GR(ptr)                      KMEM_FREE(ptr,sizeof (ELAN3_PTBL_GR))
80158 +#define FREE_ELAN3MMU_RGN(ptr)                 KMEM_FREE(ptr,sizeof (ELAN3MMU_RGN))
80159 +#define FREE_HMENTS(ptr)                       KMEM_FREE(ptr,sizeof (ELAN3_HMENT))
80160 +
80161 +extern void         elan3mmu_init_osdep(void);
80162 +extern void         elan3mmu_fini_osdep(void);
80163 +
80164 +extern void          elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len);
80165 +extern void          elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len);
80166 +extern void          elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu);
80167 +
80168 +#endif
80169 +
80170 +/*
80171 + * Local variables:
80172 + * c-file-style: "stroustrup"
80173 + * End:
80174 + */
80175 Index: linux-2.6.5-7.191/include/elan3/elan3ops.h
80176 ===================================================================
80177 --- linux-2.6.5-7.191.orig/include/elan3/elan3ops.h     2004-02-23 16:02:56.000000000 -0500
80178 +++ linux-2.6.5-7.191/include/elan3/elan3ops.h  2005-07-28 14:52:52.942664448 -0400
80179 @@ -0,0 +1,42 @@
80180 +/*
80181 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80182 + *
80183 + *    For licensing information please see the supplied COPYING file
80184 + *
80185 + */
80186 +
80187 +/* $Id: elan3ops.h,v 1.3 2003/09/24 13:57:24 david Exp $ */
80188 +/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3ops.h,v $ */
80189 +
80190 +#ifndef _ELAN3_OPS_H
80191 +#define _ELAN3_OPS_H
80192 +
80193 +int get_position          (void *arg, ELAN_POSITION *position);
80194 +int set_position          (void *arg, unsigned short nodeId, unsigned short numNodes);
80195 +
80196 +int elan3mod_create_cap   (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
80197 +int elan3mod_destroy_cap  (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
80198 +
80199 +int elan3mod_create_vp    (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
80200 +int elan3mod_destroy_vp   (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
80201 +
80202 +int elan3mod_attach_cap   (void *arg_ctxt, ELAN_CAPABILITY *cap);
80203 +int elan3mod_detach_cap   (void *arg_ctxt);
80204 +
80205 +extern ELAN_DEV_OPS elan3_dev_ops;
80206 +
80207 +int stats_get_index_name  (void *arg, uint index, caddr_t name);
80208 +int stats_get_block       (void *arg, uint entries, ulong *value);
80209 +int stats_clear_block     (void *arg);
80210 +
80211 +int elan3_register_dev_stats   (ELAN3_DEV * dev);
80212 +void elan3_deregister_dev_stats (ELAN3_DEV * dev);
80213 +
80214 +
80215 +#endif /* __ELAN3_OPS_H */
80216 +
80217 +/*
80218 + * Local variables:
80219 + * c-file-style: "linux"
80220 + * End:
80221 + */
80222 Index: linux-2.6.5-7.191/include/elan3/elanctxt.h
80223 ===================================================================
80224 --- linux-2.6.5-7.191.orig/include/elan3/elanctxt.h     2004-02-23 16:02:56.000000000 -0500
80225 +++ linux-2.6.5-7.191/include/elan3/elanctxt.h  2005-07-28 14:52:52.944664144 -0400
80226 @@ -0,0 +1,856 @@
80227 +/*
80228 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80229 + *
80230 + *    For licensing information please see the supplied COPYING file
80231 + *
80232 + */
80233 +
80234 +#ifndef _ELAN3_ELANCTXT_H
80235 +#define _ELAN3_ELANCTXT_H
80236 +
80237 +#ident "$Id: elanctxt.h,v 1.81 2003/09/24 13:57:24 david Exp $"
80238 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanctxt.h,v $*/
80239 +
80240 +#ifdef __cplusplus
80241 +extern "C" {
80242 +#endif
80243 +
80244 +#include <elan3/elanregs.h>
80245 +#include <elan3/vmseg.h>
80246 +
80247 +#define BumpUserStat(ctxt, stat)       ((ctxt)->FlagPage->stat++)
80248 +
80249 +#if defined(__LITTLE_ENDIAN__)
80250 +
80251 +typedef union _CProcTrapBuf
80252 +{
80253 +    E3_uint64 Align64;
80254 +    struct
80255 +    {
80256 +       E3_uint32 Areg;
80257 +       E3_uint32 Breg;
80258 +    } r;
80259 +    struct
80260 +    {
80261 +       E3_uint32 Addr;
80262 +       E3_uint32 ContextType;
80263 +    } s;
80264 +} CProcTrapBuf_BE;
80265 +
80266 +typedef E3_EventInt        E3_EventInt_BE;
80267 +typedef E3_IprocTrapHeader E3_IprocTrapHeader_BE;
80268 +typedef E3_IprocTrapData   E3_IprocTrapData_BE;
80269 +typedef E3_FaultSave      E3_FaultSave_BE;
80270 +
80271 +typedef union
80272 +{
80273 +    E3_uint64  Align64;
80274 +    E3_DMA      s;
80275 +} E3_DMA_BE;
80276 +
80277 +typedef E3_ThreadQueue     E3_ThreadQueue_BE;
80278 +
80279 +#else
80280 +
80281 +/* "Big-Endian" data structures copied by 64 bit loads, these are 32 bit word flipped */
80282 +/* from the corresponding data structure. */
80283 +
80284 +typedef union _CProcTrapBuf
80285 +{
80286 +    E3_uint64 Align64;
80287 +    struct
80288 +    {
80289 +       E3_uint32 Breg;
80290 +       E3_uint32 Areg;
80291 +    } r;
80292 +    struct
80293 +    {
80294 +       E3_uint32 ContextType;
80295 +       E3_uint32 Addr;
80296 +    } s;
80297 +} CProcTrapBuf_BE;
80298 +
80299 +typedef union _E3_EventInt_BE
80300 +{
80301 +    E3_uint64    Align64;
80302 +    struct {
80303 +       E3_uint32 EventContext; /* Bits 16 to 28 */
80304 +       E3_uint32 IntCookie;
80305 +    } s;
80306 +} E3_EventInt_BE;
80307 +
80308 +typedef union _E3_IprocTrapHeader_BE
80309 +{
80310 +   E3_uint64            Align64;
80311 +
80312 +   struct
80313 +   {
80314 +      E3_uint32                 TrAddr;
80315 +      E3_TrTypeCntx     TrTypeCntx;
80316 +      union
80317 +      {
80318 +        E3_IProcStatus_Reg u_IProcStatus;
80319 +        E3_uint32          u_TrData1;
80320 +      } ipsotd;
80321 +      E3_uint32                 TrData0;
80322 +   } s;
80323 +} E3_IprocTrapHeader_BE;
80324 +
80325 +typedef E3_IprocTrapData E3_IprocTrapData_BE;
80326 +
80327 +typedef union _E3_FaultSave_be
80328 +{
80329 +    E3_uint64                  Align64;
80330 +    struct {
80331 +       volatile E3_uint32      FaultContext;
80332 +       E3_FaultStatusReg       FSR;
80333 +       volatile E3_uint32      EventAddress;
80334 +       volatile E3_uint32      FaultAddress;
80335 +    } s;
80336 +} E3_FaultSave_BE;
80337 +
80338 +typedef union _e3_dma_be
80339 +{
80340 +    E3_uint64          Align64;
80341 +    struct {
80342 +       E3_uint32       dma_size;
80343 +       E3_DmaType      dma_u;
80344 +       E3_Addr         dma_dest;
80345 +       E3_Addr         dma_source;
80346 +       E3_CookieVProc  dma_destCookieProc;
80347 +       E3_Addr         dma_destEvent;
80348 +       E3_CookieVProc  dma_srcCookieProc;
80349 +       E3_Addr         dma_srcEvent;
80350 +    } s;
80351 +} E3_DMA_BE;
80352 +
80353 +typedef union _E3_ThreadQueue_BE
80354 +{
80355 +   E3_uint64   Align64;
80356 +   struct
80357 +   {
80358 +       /* copied by 64 bit copy from elan to main */
80359 +       E3_uint32 :3;           /* Bits 29 to 31 */
80360 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
80361 +       E3_uint32 :16;          /* Bits 0  to 15 */
80362 +       E3_Addr  Thread;        /* Bits 32 to 63 */
80363 +   } s;
80364 +} E3_ThreadQueue_BE;
80365 +
80366 +#endif /* defined(LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) */
80367 +
80368 +typedef struct neterr_msg
80369 +{
80370 +    E3_uint32          Rail;                                   /* Rail error received on */
80371 +    ELAN_CAPABILITY    SrcCapability;                          /* Capability of source of packet */
80372 +    ELAN_CAPABILITY    DstCapability;                          /* Capability of dest of packet */
80373 +
80374 +    E3_uint32          DstProcess;                             /* Virtual Process of dest of packet */
80375 +    E3_Addr            CookieAddr;                             /* Cookie Address (or NULL for DMA) */
80376 +    E3_uint32          CookieVProc;                            /* Cookie and VP (identifies DMA) */
80377 +    E3_uint32          NextCookie;                             /* Next Cookie value (for thread) */
80378 +    E3_uint32          WaitForEop;                             /* Wait for EOP transaction */
80379 +} NETERR_MSG;
80380 +
80381 +#ifdef __KERNEL__
80382 +
80383 +/*
80384 + * Associated with each input channel can be a network error
80385 + * resolver structure, which can be queued on the network 
80386 + * error resolver threads to perform RPCs to the other kernels
80387 + * when a network error occurs with an identify transaction
80388 + * included
80389 + */
80390 +typedef struct neterr_resolver
80391 +{
80392 +    struct neterr_resolver *Next;
80393 +
80394 +    spinlock_t             Lock;
80395 +
80396 +    struct elan3_ctxt       *Ctxt;
80397 +    ELAN_LOCATION          Location;
80398 +
80399 +    int                            Completed;
80400 +    int                            Status;
80401 +    long                   Timestamp;
80402 +
80403 +    NETERR_MSG             Message;
80404 +} NETERR_RESOLVER;
80405 +
80406 +
80407 +typedef struct neterr_fixup
80408 +{
80409 +    struct neterr_fixup           *Next;
80410 +
80411 +    kcondvar_t             Wait;
80412 +    int                            Completed;
80413 +    int                            Status;
80414 +
80415 +    NETERR_MSG             Message;
80416 +} NETERR_FIXUP;
80417 +
80418 +#endif /* __KERNEL__ */
80419 +
80420 +/* Each of the following structures must be padded to a whole */
80421 +/* number of 64 bit words since the kernel uses 64 bit load/stores */
80422 +/* to transfer the elan register state. */
80423 +typedef struct command_trap
80424 +{
80425 +    E3_Status_Reg      Status;                                 /* 4  bytes */
80426 +    E3_uint32          Pad;                                    /* 4  bytes */
80427 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
80428 +    CProcTrapBuf_BE            TrapBuf;                                /* 8  bytes */
80429 +} COMMAND_TRAP;
80430 +
80431 +typedef struct thread_trap
80432 +{
80433 +    E3_uint32          Registers[32];                          /* 128 bytes */
80434 +#define REG_GLOBALS    0
80435 +#define REG_OUTS       8
80436 +#define REG_LOCALS     16
80437 +#define REG_INS                24
80438 +
80439 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
80440 +    E3_FaultSave_BE     DataFaultSave;                         /* 16 bytes */
80441 +    E3_FaultSave_BE     InstFaultSave;                         /* 16 bytes */
80442 +    E3_FaultSave_BE     OpenFaultSave;                         /* 16 bytes */
80443 +    
80444 +    E3_Status_Reg      Status;                                 /* 4 bytes */
80445 +
80446 +    E3_Addr            pc;                                     /* 4 bytes */
80447 +    E3_Addr            npc;                                    /* 4 bytes */
80448 +    E3_Addr            StartPC;                                /* 4 bytes */
80449 +    E3_Addr            sp;                                     /* 4 bytes */
80450 +    E3_uint32          mi;                                     /* 4 bytes */
80451 +    E3_TrapBits                TrapBits;                               /* 4 bytes */
80452 +    E3_DirtyBits       DirtyBits;                              /* 4 bytes */
80453 +} THREAD_TRAP;
80454 +
80455 +typedef struct dma_trap
80456 +{
80457 +    E3_DMA_BE          Desc;                                   /* 32 bytes */
80458 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
80459 +    E3_FaultSave_BE    Data0;                                  /* 16 bytes */
80460 +    E3_FaultSave_BE    Data1;                                  /* 16 bytes */
80461 +    E3_FaultSave_BE    Data2;                                  /* 16 bytes */
80462 +    E3_FaultSave_BE    Data3;                                  /* 16 bytes */
80463 +    E3_Status_Reg      Status;                                 /* 4 bytes */
80464 +    E3_DmaInfo         PacketInfo;                             /* 4 bytes */
80465 +} DMA_TRAP;
80466 +
80467 +typedef struct input_trap
80468 +{
80469 +    E3_uint32             State;                               /* 4 bytes */
80470 +    E3_Status_Reg         Status;                              /* 4 bytes */
80471 +    E3_FaultSave_BE       FaultSave;                           /* 16 bytes */
80472 +    
80473 +    u_int                 NumTransactions;                     /* 4 bytes */
80474 +    u_int                 Overflow;                            /* 4 bytes */
80475 +    u_int                 AckSent;                             /* 4 bytes */
80476 +    u_int                 BadTransaction;                      /* 4 bytes */
80477 +
80478 +    E3_IprocTrapHeader_BE *TrappedTransaction;                 /* 4 bytes */
80479 +    E3_IprocTrapData_BE   *TrappedDataBuffer;                  /* 4 bytes */
80480 +    E3_IprocTrapHeader_BE *WaitForEopTransaction;              /* 4 bytes */
80481 +    E3_IprocTrapData_BE   *WaitForEopDataBuffer;               /* 4 bytes */
80482 +    E3_IprocTrapHeader_BE *DmaIdentifyTransaction;             /* 4 bytes */
80483 +    E3_IprocTrapHeader_BE *ThreadIdentifyTransaction;          /* 4 bytes */
80484 +    E3_Addr               LockQueuePointer;                    /* 4 bytes */
80485 +    E3_Addr               UnlockQueuePointer;                  /* 4 bytes */
80486 +
80487 +    E3_IprocTrapHeader_BE  Transactions[MAX_TRAPPED_TRANS];    /* n * 8 bytes */
80488 +    E3_IprocTrapData_BE           DataBuffers[MAX_TRAPPED_TRANS];      /* n * 64 bytes */
80489 +} INPUT_TRAP;
80490 +
80491 +typedef struct input_fault_save
80492 +{
80493 +    struct input_fault_save *Next;
80494 +    E3_Addr                 Addr;
80495 +    E3_uint32               Count;
80496 +} INPUT_FAULT_SAVE;
80497 +
80498 +#define NUM_INPUT_FAULT_SAVE   32
80499 +#define MIN_INPUT_FAULT_PAGES  8
80500 +#define MAX_INPUT_FAULT_PAGES  128
80501 +
80502 +typedef E3_uint32 EVENT_COOKIE;
80503 +
80504 +#ifdef __KERNEL__
80505 +
80506 +typedef struct event_cookie_entry
80507 +{
80508 +    struct event_cookie_entry *ent_next;
80509 +    struct event_cookie_entry *ent_prev;
80510 +
80511 +    spinlock_t                ent_lock;
80512 +    unsigned                  ent_ref;
80513 +
80514 +    EVENT_COOKIE              ent_cookie;
80515 +    EVENT_COOKIE              ent_fired;
80516 +    kcondvar_t                ent_wait;
80517 +} EVENT_COOKIE_ENTRY;
80518 +
80519 +typedef struct event_cookie_table
80520 +{
80521 +    struct event_cookie_table *tbl_next;
80522 +    struct event_cookie_table *tbl_prev;
80523 +
80524 +    unsigned long              tbl_task;
80525 +    unsigned long              tbl_handle;
80526 +
80527 +    spinlock_t                tbl_lock;
80528 +    unsigned                  tbl_ref;
80529 +    EVENT_COOKIE_ENTRY        *tbl_entries;
80530 +} EVENT_COOKIE_TABLE;
80531 +
80532 +#define NBYTES_PER_SMALL_ROUTE 8
80533 +#define NBYTES_PER_LARGE_ROUTE 16
80534 +
80535 +#define ROUTE_BLOCK_SIZE       ELAN3_PAGE_SIZE
80536 +#define NROUTES_PER_BLOCK      (ROUTE_BLOCK_SIZE/NBYTES_PER_LARGE_ROUTE)
80537 +
80538 +typedef struct elan3_routes
80539 +{
80540 +    struct elan3_routes                *Next;                                  /* Can be chained together */
80541 +
80542 +    sdramaddr_t                         Routes;                                /* sdram offset of route entries */
80543 +    bitmap_t                    Bitmap[BT_BITOUL(NROUTES_PER_BLOCK)];  /* Bitmap of which entries are used */
80544 +} ELAN3_ROUTES; 
80545 +
80546 +
80547 +typedef struct elan3_route_table
80548 +{
80549 +    spinlock_t          Lock;                          /* Route lock */
80550 +    sdramaddr_t                 Table;                         /* Kernel address for route table */
80551 +    u_int               Size;                          /* # entries in route table */
80552 +
80553 +    ELAN3_ROUTES       *LargeRoutes;                   /* Large routes */
80554 +} ELAN3_ROUTE_TABLE;
80555 +
80556 +typedef struct elan3_vpseg
80557 +{
80558 +    struct elan3_vpseg         *Next;
80559 +    int                                 Process;                       /* Virtual process */
80560 +    int                                 Entries;                       /*  and # processes */
80561 +    int                                 Type;                          /* Type of cookie */
80562 +
80563 +    union
80564 +    {
80565 +       
80566 +       ELAN_CAPABILITY Capability;                     /* Capability of remote segment */
80567 +#  define SegCapability                SegUnion.Capability
80568 +       struct {
80569 +           u_short             LowProc;                        /* Base process number */
80570 +           u_short             HighProc;                       /*   and high process number */
80571 +#  define SegLowProc           SegUnion.BROADCAST.LowProc
80572 +#  define SegHighProc          SegUnion.BROADCAST.HighProc
80573 +       } BROADCAST;
80574 +    } SegUnion;
80575 +} ELAN3_VPSEG;
80576 +
80577 +#define ELAN3_VPSEG_UNINT      0                               /* Unitialised */
80578 +#define ELAN3_VPSEG_P2P                1                               /* Point to Point */
80579 +#define ELAN3_VPSEG_BROADCAST  2                               /* Broadcast */
80580 +
80581 +#define NUM_LISTS      7                                       /* Number of "swap" lists */
80582 +
80583 +typedef struct elan3_ctxt
80584 +{
80585 +    struct elan3_ctxt    *Next;                                        /* can be queued on a task */
80586 +    struct elan3_ctxt    *Prev;
80587 +
80588 +    CtxtHandle          Handle;                                /* user handle */
80589 +    int                         RefCnt;                                /* reference count */
80590 +
80591 +    ELAN3MMU           *Elan3mmu;                              /* elan3mmu allocated for Elan translations */
80592 +
80593 +    struct elan3_ops     *Operations;                          /* User supplied helper functions */
80594 +    void               *Private;                               /* Users private pointer */
80595 +
80596 +    int                         Status;                                /* Status (guarded by dev_mutex) */
80597 +    int                         OthersState;                           /* State of halt queueing for dma/thread */
80598 +    int                         LwpCount;                              /* Number of lwp's running */
80599 +
80600 +    ELAN3_DEV          *Device;                                /* Elan device */
80601 +
80602 +    ELAN_CAPABILITY     Capability;                            /* Capability I've attached as */
80603 +    ELAN_POSITION       Position;                              /* Position when I was created */
80604 +    
80605 +    ELAN3_VPSEG                *VpSegs;                                /* List of virtual process segments */
80606 +    ELAN3_ROUTE_TABLE    *RouteTable;
80607 +
80608 +    krwlock_t           VpLock;                                /* Reader/writer lock for vp list */
80609 +    kmutex_t            SwapListsLock;                         /* mutex to lock swap lists */
80610 +    kmutex_t            CmdLock;                               /* mutex to lock trapped dma command */
80611 +    kmutex_t            CmdPortLock;                           /* mutex to load/unload commandport xlation */
80612 +
80613 +    kcondvar_t          Wait;                                  /* Condition variable to sleep on */
80614 +    kcondvar_t          CommandPortWait;                       /* Condition variable to wait for commandport */
80615 +    kcondvar_t          LwpWait;                               /* Condition variable to wait for lwps to stop */
80616 +    kcondvar_t          HaltWait;                              /* Condition variable to wait for halt */
80617 +    int                         Halted;                                /*  and flag for halt cv */
80618 +
80619 +    caddr_t             CommandPageMapping;                    /* user virtual address for command page mapping */
80620 +    ioaddr_t             CommandPage;                          /* Elan command port mapping page */
80621 +    DeviceMappingHandle  CommandPageHandle;                    /* DDI Handle */
80622 +    ioaddr_t            CommandPort;                           /* Elan command port */
80623 +    void               *CommandPortItem;                       /* Item we're re-issuing to commandport */
80624 +
80625 +    ELAN3_FLAGSTATS      *FlagPage;                            /* Page visible to user process */
80626 +
80627 +    COMMAND_TRAP       *CommandTraps;                          /* Command port traps */
80628 +    ELAN3_SPLIT_QUEUE     CommandTrapQ;
80629 +                                                                  
80630 +    CProcTrapBuf_BE    *Commands;                              /* Overflowed commands */
80631 +    ELAN3_QUEUE           CommandQ;
80632 +
80633 +    THREAD_TRAP                *ThreadTraps;                           /* Thread processor traps */
80634 +    ELAN3_QUEUE                 ThreadTrapQ;
80635 +    
80636 +    DMA_TRAP           *DmaTraps;                              /* Dma processor tra[ed */
80637 +    ELAN3_QUEUE                 DmaTrapQ;
80638 +
80639 +    INPUT_TRAP          Input0Trap;                            /* Inputter channel 0 trap */
80640 +    INPUT_TRAP          Input1Trap;                            /* Inputter channel 1 trap */
80641 +    NETERR_RESOLVER    *Input0Resolver;                        /* Inputter channel 0 network error resolver */
80642 +    NETERR_RESOLVER    *Input1Resolver;                        /* Inputter channel 1 network error resolver */
80643 +
80644 +    INPUT_FAULT_SAVE    InputFaults[NUM_INPUT_FAULT_SAVE];     /* stored writeblock addresses */
80645 +    INPUT_FAULT_SAVE    *InputFaultList;                       /* organized in list for LRU */
80646 +    spinlock_t          InputFaultLock;                        /* and lock for list */
80647 +
80648 +    kmutex_t            NetworkErrorLock;
80649 +    NETERR_FIXUP       *NetworkErrorFixups;
80650 +
80651 +    EVENT_COOKIE        *EventCookies;                         /* Event cookies. */
80652 +    ELAN3_QUEUE                 EventCookieQ;
80653 +
80654 +    E3_Addr            *SwapThreads;                           /* Swapped Thread Queue */
80655 +    ELAN3_QUEUE                 SwapThreadQ;
80656 +
80657 +    E3_DMA_BE          *SwapDmas;                              /* Swapped Dmas Queue */
80658 +    ELAN3_QUEUE                 SwapDmaQ;
80659 +
80660 +    int                         ItemCount[NUM_LISTS];                  /* Count of items on each swap list */
80661 +    int                         inhibit;                               /* if set lwp not to reload translations */
80662 +
80663 +    int                  Disabled;
80664 +} ELAN3_CTXT;
80665 +
80666 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, 
80667 +                         elan3_ctxt::Status elan3_ctxt::OthersState
80668 +                         elan3_ctxt::CommandTrapQ elan3_ctxt::CommandQ elan3_ctxt::ThreadTrapQ elan3_ctxt::DmaTrapQ 
80669 +                         elan3_ctxt::Input0Trap elan3_ctxt::Input1Trap elan3_ctxt::EventCookieQ elan3_ctxt::SwapThreadQ 
80670 +                         elan3_ctxt::SwapDmaQ elan3_ctxt::CommandPortItem elan3_ctxt::LwpCount))
80671 +_NOTE(MUTEX_PROTECTS_DATA(elan3_ctxt::SwapListsLock, 
80672 +                         elan3_ctxt::ItemCount))
80673 +_NOTE(RWLOCK_PROTECTS_DATA(elan3_ctxt::VpLock, 
80674 +                          elan3_ctxt::VpSegs elan3_vpseg::Next elan3_vpseg::Process 
80675 +                          elan3_vpseg::Entries elan3_vpseg::Type))
80676 +
80677 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_ctxt::ItemCount elan3_ctxt::Status elan3_ctxt::CommandPortItem))
80678 +
80679 +_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock elan3_ctxt::CmdLock elan3_dev::IntrLock))
80680 +_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock as::a_lock))                        /* implicit by pagefault */
80681 +
80682 +#define CTXT_DETACHED                          (1 << 0)                /* Context is detached. */
80683 +#define CTXT_NO_LWPS                           (1 << 1)                /* No lwp's to handle faults */
80684 +#define CTXT_EXITING                           (1 << 2)                /* User process is exiting */
80685 +
80686 +#define CTXT_SWAPPING_OUT                      (1 << 3)                /* Context is swapping out */
80687 +#define CTXT_SWAPPED_OUT                       (1 << 4)                /* Context is swapped out */
80688 +
80689 +#define CTXT_SWAP_FREE                         (1 << 5)                /* Swap buffer is free */
80690 +#define CTXT_SWAP_VALID                                (1 << 6)                /* Swap buffer has queue entries in it */
80691 +
80692 +#define CTXT_DMA_QUEUE_FULL                    (1 << 7)                /* Dma trap queue is full */
80693 +#define CTXT_THREAD_QUEUE_FULL                 (1 << 8)                /* Thread trap queue is full */
80694 +#define CTXT_EVENT_QUEUE_FULL                  (1 << 9)                /* Event interrupt queue is full */
80695 +#define CTXT_COMMAND_OVERFLOW_ERROR            (1 << 10)               /* Trap queue overflow */
80696 +
80697 +#define CTXT_SWAP_WANTED                       (1 << 11)               /* Some one wanted to swap */
80698 +#define CTXT_WAITING_SWAPIN                    (1 << 12)               /* Someone waiting on swapin */
80699 +
80700 +#define CTXT_WAITING_COMMAND                   (1 << 13)               /* swgelan waiting on command port */
80701 +#define CTXT_COMMAND_MAPPED_MAIN               (1 << 14)               /* segelan has mapped command port */
80702 +
80703 +#define CTXT_QUEUES_EMPTY                      (1 << 15)               /* dma/thread run queues are empty */
80704 +#define CTXT_QUEUES_EMPTYING                   (1 << 16)               /* dma/thread run queues are being emptied */
80705 +
80706 +#define CTXT_USER_FILTERING                    (1 << 17)               /* user requested context filter */
80707 +
80708 +#define CTXT_KERNEL                            (1 << 18)               /* context is a kernel context */
80709 +#define CTXT_COMMAND_MAPPED_ELAN               (1 << 19)               /* command port is mapped for elan */
80710 +#define CTXT_FIXUP_NETERR                      (1 << 20)               /* fixing up a network error */
80711 +
80712 +
80713 +#define CTXT_SWAPPED_REASONS           (CTXT_NO_LWPS   |               \
80714 +                                        CTXT_DETACHED  |               \
80715 +                                        CTXT_EXITING   |               \
80716 +                                        CTXT_FIXUP_NETERR)
80717 +
80718 +#define CTXT_OTHERS_REASONS            (CTXT_EVENT_QUEUE_FULL  |       \
80719 +                                        CTXT_DMA_QUEUE_FULL    |       \
80720 +                                        CTXT_THREAD_QUEUE_FULL |       \
80721 +                                        CTXT_COMMAND_OVERFLOW_ERROR |  \
80722 +                                        CTXT_SWAPPED_REASONS)
80723 +
80724 +#define CTXT_INPUTTER_REASONS          (CTXT_USER_FILTERING |          \
80725 +                                        CTXT_OTHERS_REASONS)
80726 +
80727 +#define CTXT_COMMAND_MAPPED            (CTXT_COMMAND_MAPPED_MAIN |     \
80728 +                                        CTXT_COMMAND_MAPPED_ELAN)
80729 +
80730 +#define CTXT_IS_KERNEL(ctxt)           ((ctxt)->Status & CTXT_KERNEL)
80731 +
80732 +/*
80733 + * State values for ctxt_inputterState/ctxt_commandportStats
80734 + */
80735 +#define CTXT_STATE_OK                  0
80736 +#define CTXT_STATE_TRAPPED             1               /* Inputter channel 0 trapped */
80737 +#define CTXT_STATE_RESOLVING           2               /* An LWP is resolving the trap */
80738 +#define CTXT_STATE_NEEDS_RESTART       3               /* Th trapped packet needs to be executed */
80739 +#define CTXT_STATE_NETWORK_ERROR       4               /* We're waiting on an RPC for the identify transaction */
80740 +#define CTXT_STATE_EXECUTING           5               /* An LWP is executing the trapped packet */
80741 +
80742 +/*
80743 + * State values for OthersState.
80744 + */
80745 +#define CTXT_OTHERS_RUNNING            0
80746 +#define CTXT_OTHERS_HALTING            1
80747 +#define CTXT_OTHERS_SWAPPING           2
80748 +#define CTXT_OTHERS_HALTING_MORE       3
80749 +#define CTXT_OTHERS_SWAPPING_MORE      4
80750 +#define CTXT_OTHERS_SWAPPED            5
80751 +
80752 +typedef struct elan3_ops
80753 +{
80754 +    u_int  Version;
80755 +
80756 +    int         (*Exception)   (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap);
80757 +
80758 +    /* swap item list functions */
80759 +    int  (*GetWordItem)                (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep);
80760 +    int  (*GetBlockItem)       (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep);
80761 +    void (*PutWordItem)                (ELAN3_CTXT *ctxt, int list, E3_Addr value);
80762 +    void (*PutBlockItem)       (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr);
80763 +    void (*PutbackItem)                (ELAN3_CTXT *ctxt, int list, void *item);
80764 +    void (*FreeWordItem)       (ELAN3_CTXT *ctxt, void *item);
80765 +    void (*FreeBlockItem)      (ELAN3_CTXT *ctxt, void *item);
80766 +    int  (*CountItems)         (ELAN3_CTXT *ctxt, int list);
80767 +
80768 +    /* event interrupt cookie */
80769 +    int  (*Event)              (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
80770 +
80771 +    /* swapin/swapout functions. */
80772 +    void (*Swapin)             (ELAN3_CTXT *ctxt);
80773 +    void (*Swapout)            (ELAN3_CTXT *ctxt);
80774 +
80775 +    /* Free of private data */
80776 +    void (*FreePrivate)                (ELAN3_CTXT *ctxt);
80777 +
80778 +    /* Fixup a network error */
80779 +    int  (*FixupNetworkError)  (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef);
80780 +
80781 +    /* Interrupt handler trap interface */
80782 +    int  (*DProcTrap)          (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
80783 +    int  (*TProcTrap)          (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
80784 +    int         (*IProcTrap)           (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan);
80785 +    int         (*CProcTrap)           (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
80786 +    int  (*CProcReissue)        (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *TrapBuf);
80787 +
80788 +    /* User memory access functions */
80789 +    int              (*StartFaultCheck)(ELAN3_CTXT *ctxt);
80790 +    void      (*EndFaultCheck)  (ELAN3_CTXT *ctxt);
80791 +
80792 +    E3_uint8  (*Load8)         (ELAN3_CTXT *ctxt, E3_Addr addr);
80793 +    void      (*Store8)                (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
80794 +    E3_uint16 (*Load16)                (ELAN3_CTXT *ctxt, E3_Addr addr);
80795 +    void      (*Store16)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
80796 +    E3_uint32 (*Load32)                (ELAN3_CTXT *ctxt, E3_Addr addr);
80797 +    void      (*Store32)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
80798 +    E3_uint64 (*Load64)                (ELAN3_CTXT *ctxt, E3_Addr addr);
80799 +    void      (*Store64)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
80800 +
80801 +} ELAN3_OPS;
80802 +
80803 +#define ELAN3_OPS_VERSION      0xdeef0001
80804 +
80805 +/*
80806 + * Flags for ops_event.
80807 + */
80808 +#define OP_INTR                        0               /* Called from interrupt handler */
80809 +#define OP_LWP                 1               /* Called from "lwp" */
80810 +
80811 +/*
80812 + * Return codes for "ops" functions.
80813 + */
80814 +#define OP_DEFER               0               /* Defer to next lower interrupt */
80815 +#define OP_IGNORE              1               /* No event hander, so ignore it */
80816 +#define OP_HANDLED             2               /* Handled event (resume thread) */
80817 +#define OP_FAILED              3               /* Failed */
80818 +
80819 +#define ELAN3_CALL_OP(ctxt,fn)                         ((ctxt)->Operations && (ctxt)->Operations->fn) ? (ctxt)->Operations->fn 
80820 +
80821 +#define ELAN3_OP_EXCEPTION(ctxt,type,proc,trap,ap)     (ELAN3_CALL_OP(ctxt,Exception)    (ctxt,type,proc,trap,ap)      : OP_IGNORE)
80822 +#define ELAN3_OP_GET_WORD_ITEM(ctxt,list,itemp,valuep) (ELAN3_CALL_OP(ctxt,GetWordItem)  (ctxt,list,itemp,valuep)      : 0)
80823 +#define ELAN3_OP_GET_BLOCK_ITEM(ctxt,list,itemp,valuep)        (ELAN3_CALL_OP(ctxt,GetBlockItem) (ctxt,list,itemp,valuep)      : 0)
80824 +#define ELAN3_OP_PUT_WORD_ITEM(ctxt,list,value)                (ELAN3_CALL_OP(ctxt,PutWordItem)  (ctxt,list,value)             : (void)0)
80825 +#define ELAN3_OP_PUT_BLOCK_ITEM(ctxt,list,ptr)         (ELAN3_CALL_OP(ctxt,PutBlockItem) (ctxt,list,ptr)               : (void)0)
80826 +#define ELAN3_OP_PUTBACK_ITEM(ctxt,list,item)          (ELAN3_CALL_OP(ctxt,PutbackItem)  (ctxt,list,item)              : (void)0)
80827 +#define ELAN3_OP_FREE_WORD_ITEM(ctxt,item)             (ELAN3_CALL_OP(ctxt,FreeWordItem) (ctxt,item)                   : (void)0)
80828 +#define ELAN3_OP_FREE_BLOCK_ITEM(ctxt,item)            (ELAN3_CALL_OP(ctxt,FreeBlockItem)(ctxt,item)                   : (void)0)
80829 +#define ELAN3_OP_COUNT_ITEMS(ctxt,list)                        (ELAN3_CALL_OP(ctxt,CountItems)(ctxt,list)                      : 0)
80830 +#define ELAN3_OP_EVENT(ctxt,cookie,flag)               (ELAN3_CALL_OP(ctxt,Event)(ctxt,cookie,flag)                    : OP_IGNORE)
80831 +#define ELAN3_OP_SWAPIN(ctxt)                          (ELAN3_CALL_OP(ctxt,Swapin)(ctxt)                               : (void)0)
80832 +#define ELAN3_OP_SWAPOUT(ctxt)                         (ELAN3_CALL_OP(ctxt,Swapout)(ctxt)                              : (void)0)
80833 +#define ELAN3_OP_FREE_PRIVATE(ctxt)                    (ELAN3_CALL_OP(ctxt,FreePrivate)(ctxt)                          : (void)0)
80834 +#define ELAN3_OP_FIXUP_NETWORK_ERROR(ctxt, nef)                (ELAN3_CALL_OP(ctxt,FixupNetworkError)(ctxt,nef)                        : OP_FAILED)
80835 +
80836 +#define ELAN3_OP_DPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,DProcTrap)(ctxt,trap)                       : OP_DEFER)
80837 +#define ELAN3_OP_TPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,TProcTrap)(ctxt,trap)                       : OP_DEFER)
80838 +#define ELAN3_OP_IPROC_TRAP(ctxt, trap, chan)          (ELAN3_CALL_OP(ctxt,IProcTrap)(ctxt,trap,chan)                  : OP_DEFER)
80839 +#define ELAN3_OP_CPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,CProcTrap)(ctxt,trap)                       : OP_DEFER)
80840 +#define ELAN3_OP_CPROC_REISSUE(ctxt,tbuf)              (ELAN3_CALL_OP(ctxt,CProcReissue)(ctxt, tbuf)                   : OP_DEFER)
80841 +
80842 +#define ELAN3_OP_START_FAULT_CHECK(ctxt)               (ELAN3_CALL_OP(ctxt,StartFaultCheck)(ctxt)                      : 0)
80843 +#define ELAN3_OP_END_FAULT_CHECK(ctxt)                 (ELAN3_CALL_OP(ctxt,EndFaultCheck)(ctxt)                                : (void)0)
80844 +#define ELAN3_OP_LOAD8(ctxt,addr)                      (ELAN3_CALL_OP(ctxt,Load8)(ctxt,addr)                           : 0)
80845 +#define ELAN3_OP_STORE8(ctxt,addr,val)                 (ELAN3_CALL_OP(ctxt,Store8)(ctxt,addr,val)                      : (void)0)
80846 +#define ELAN3_OP_LOAD16(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load16)(ctxt,addr)                          : 0)
80847 +#define ELAN3_OP_STORE16(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store16)(ctxt,addr,val)                     : (void)0)
80848 +#define ELAN3_OP_LOAD32(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load32)(ctxt,addr)                          : 0)
80849 +#define ELAN3_OP_STORE32(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store32)(ctxt,addr,val)                     : (void)0)
80850 +#define ELAN3_OP_LOAD64(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load64)(ctxt,addr)                          : 0)
80851 +#define ELAN3_OP_STORE64(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store64)(ctxt,addr,val)                     : (void)0)
80852 +
80853 +#endif /* __KERNEL__ */
80854 +
80855 +/* "list" arguement to ops functions */
80856 +#define LIST_DMA_PTR           0
80857 +#define LIST_DMA_DESC          1
80858 +#define LIST_THREAD                    2
80859 +#define LIST_COMMAND           3
80860 +#define LIST_SETEVENT          4
80861 +#define LIST_FREE_WORD         5
80862 +#define LIST_FREE_BLOCK                6
80863 +
80864 +#define MAX_LISTS              7
80865 +
80866 +#if defined(__KERNEL__) && MAX_LISTS != NUM_LISTS
80867 +#  error Check NUM_LISTS == MAX_LISTS
80868 +#endif
80869 +
80870 +/*
80871 + * Values for the 'type' field to PostException().
80872 + */
80873 +#define EXCEPTION_INVALID_ADDR         1               /* FaultArea, res */
80874 +#define EXCEPTION_UNIMP_INSTR          2               /* instr */
80875 +#define EXCEPTION_INVALID_PROCESS      3               /* proc, res */
80876 +#define EXCEPTION_SIMULATION_FAILED    4               /* */
80877 +#define EXCEPTION_UNIMPLEMENTED                5               /* */
80878 +#define EXCEPTION_SWAP_FAULT           6               /* */
80879 +#define EXCEPTION_SWAP_FAILED          7               /* */
80880 +#define EXCEPTION_BAD_PACKET           8               /* */
80881 +#define EXCEPTION_FAULTED              9               /* addr */
80882 +#define EXCEPTION_QUEUE_OVERFLOW       10              /* FaultArea, TrapType */
80883 +#define EXCEPTION_COMMAND_OVERFLOW     11              /* count */
80884 +#define EXCEPTION_DMA_RETRY_FAIL       12              /* */
80885 +#define EXCEPTION_CHAINED_EVENT                13              /* EventAddr */
80886 +#define EXCEPTION_THREAD_KILLED                14              /* */
80887 +#define EXCEPTION_CANNOT_SAVE_THREAD   15
80888 +#define EXCEPTION_BAD_SYSCALL          16              /* */
80889 +#define EXCEPTION_DEBUG                        17
80890 +#define EXCEPTION_BAD_EVENT            18              /* */
80891 +#define EXCEPTION_NETWORK_ERROR                19              /* rvp */
80892 +#define EXCEPTION_BUS_ERROR            20
80893 +#define EXCEPTION_COOKIE_ERROR         21
80894 +#define EXCEPTION_PACKET_TIMEOUT       22
80895 +#define EXCEPTION_BAD_DMA              23              /* */
80896 +#define EXCEPTION_ENOMEM               24
80897 +
80898 +/*
80899 + * Values for the 'proc' field to ElanException().
80900 + */
80901 +#define COMMAND_PROC                   1
80902 +#define THREAD_PROC                    2
80903 +#define DMA_PROC                       3
80904 +#define INPUT_PROC                     4
80905 +#define EVENT_PROC                     5
80906 +
80907 +/* Flags to IssueDmaCommand */
80908 +#define ISSUE_COMMAND_FOR_CPROC                1
80909 +#define ISSUE_COMMAND_CANT_WAIT                2
80910 +
80911 +/* Return code from IssueDmaCommand.*/
80912 +#define ISSUE_COMMAND_OK               0
80913 +#define ISSUE_COMMAND_TRAPPED          1
80914 +#define ISSUE_COMMAND_RETRY            2
80915 +#define ISSUE_COMMAND_WAIT             3
80916 +
80917 +#ifdef __KERNEL__
80918 +
80919 +extern ELAN3_CTXT *elan3_alloc(ELAN3_DEV *dev, int kernel);
80920 +extern void       elan3_free      (ELAN3_CTXT *ctxt);
80921 +
80922 +extern int        elan3_attach    (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
80923 +extern int         elan3_doattach  (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
80924 +extern void       elan3_detach    (ELAN3_CTXT *ctxt);
80925 +extern void        elan3_dodetach  (ELAN3_CTXT *ctxt);
80926 +
80927 +extern int        elan3_addvp     (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap);
80928 +extern int        elan3_removevp  (ELAN3_CTXT *ctxt, int process);
80929 +extern int        elan3_addbcastvp(ELAN3_CTXT *ctxt, int process, int base, int count);
80930 +
80931 +extern int         elan3_process   (ELAN3_CTXT *ctxt);
80932 +
80933 +extern int        elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits);
80934 +extern int        elan3_check_route(ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError);
80935 +
80936 +extern int        elan3_lwp       (ELAN3_CTXT *ctxt);
80937 +
80938 +extern void       elan3_swapin (ELAN3_CTXT *ctxt, int reason);
80939 +extern void       elan3_swapout (ELAN3_CTXT *ctxt, int reason);
80940 +extern int         elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages);
80941 +extern void        elan3_block_inputter (ELAN3_CTXT *ctxt, int block);
80942 +
80943 +
80944 +extern E3_Addr     elan3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack, int stackSize, int nargs, ...);
80945 +
80946 +extern void       SetInputterState (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
80947 +extern void       SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
80948 +extern void        UnloadCommandPageMapping (ELAN3_CTXT *ctxt);
80949 +extern void       StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
80950 +
80951 +extern int        HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags);
80952 +extern int        RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags);
80953 +extern int         CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags);
80954 +extern int        IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int flags);
80955 +extern int        IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int flags);
80956 +extern int         WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int flags);
80957 +extern void       FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, 
80958 +                                  E3_FaultSave_BE *FaultSaveArea, int flags);
80959 +extern int        SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress);
80960 +extern void       ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr,int flags);
80961 +extern int         SetEventsNeedRestart (ELAN3_CTXT *ctxt);
80962 +extern void        RestartSetEvents (ELAN3_CTXT *ctxt);
80963 +extern int        RunEventType (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType);
80964 +extern void        WakeupLwp (ELAN3_DEV *dev, void *arg);
80965 +extern void       QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie);
80966 +extern int         WaitForCommandPort (ELAN3_CTXT *ctxt);
80967 +
80968 +extern int        ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...);
80969 +
80970 +/* context_osdep.c */
80971 +extern int        LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr elanAddr, int len, int protFault, int writeable);
80972 +extern void       LoadCommandPortTranslation (ELAN3_CTXT *ctxt);
80973 +
80974 +#if defined(DIGITAL_UNIX)
80975 +/* seg_elan.c */
80976 +extern caddr_t    elan3_segelan3_create (ELAN3_CTXT *ctxt);
80977 +extern void       elan3_segelan3_destroy (ELAN3_CTXT *ctxt);
80978 +extern int         elan3_segelan3_map (ELAN3_CTXT *ctxt);
80979 +extern void        elan3_segelan3_unmap (ELAN3_CTXT *ctxt);
80980 +
80981 +/* seg_elanmem.c */
80982 +extern int        elan3_segelanmem_create (ELAN3_DEV *dev, unsigned object, unsigned off, vm_offset_t *addrp, int len);
80983 +#endif /* defined(DIGITAL_UNIX) */
80984 +
80985 +/* route_table.c */
80986 +extern ELAN3_ROUTE_TABLE *AllocateRouteTable (ELAN3_DEV *dev, int size);
80987 +extern void              FreeRouteTable  (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl);
80988 +extern int               LoadRoute       (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp, int ctxnum, int nflits, E3_uint16 *flits);
80989 +extern int               GetRoute        (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits);
80990 +extern void             InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
80991 +extern void             ValidateRoute   (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
80992 +extern void             ClearRoute      (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
80993 +
80994 +extern int               GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri);
80995 +extern int               GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive);
80996 +extern int               GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive);
80997 +
80998 +/* virtual_process.c */
80999 +extern ELAN_LOCATION  ProcessToLocation     (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap);
81000 +extern int           ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process);
81001 +extern caddr_t        CapabilityString      (ELAN_CAPABILITY *cap);
81002 +extern void           UnloadVirtualProcess  (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
81003 +
81004 +extern int           elan3_get_route   (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits);
81005 +extern int           elan3_reset_route (ELAN3_CTXT *ctxt, int process);
81006 +
81007 +/* cproc.c */
81008 +extern int       NextCProcTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
81009 +extern void      ResolveCProcTrap (ELAN3_CTXT *ctxt);
81010 +extern int       RestartCProcTrap (ELAN3_CTXT *ctxt);
81011 +
81012 +/* iproc.c */
81013 +extern void       InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap);
81014 +extern void      ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvp);
81015 +extern int       RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap);
81016 +extern char      *IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData *datap);
81017 +extern void       SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck);
81018 +
81019 +/* tproc.c */
81020 +extern int       NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
81021 +extern void      ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
81022 +extern int       TProcNeedsRestart (ELAN3_CTXT *ctxt);
81023 +extern void      RestartTProcItems (ELAN3_CTXT *ctxt);
81024 +extern E3_Addr    SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction);
81025 +extern void       ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer);
81026 +
81027 +/* tprocinsts.c */
81028 +extern int        RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal);
81029 +
81030 +/* tproc_osdep.c */
81031 +extern int        ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip);
81032 +extern int       ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip);
81033 +
81034 +/* dproc.c */
81035 +extern int       NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
81036 +extern void      ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
81037 +extern int       DProcNeedsRestart (ELAN3_CTXT *ctxt);
81038 +extern void      RestartDProcItems (ELAN3_CTXT *ctxt);
81039 +extern void       RestartDmaDesc (ELAN3_CTXT *ctxt, E3_DMA_BE *desc);
81040 +extern void       RestartDmaTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
81041 +extern void      RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr);
81042 +
81043 +/* network_error.c */
81044 +extern void       InitialiseNetworkErrorResolver (void);
81045 +extern void       FinaliseNetworkErrorResolver (void);
81046 +extern int        QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp);
81047 +extern void      FreeNetworkErrorResolver (NETERR_RESOLVER *rvp);
81048 +extern void       CancelNetworkErrorResolver (NETERR_RESOLVER *rvp);
81049 +extern int       ExecuteNetworkErrorFixup (NETERR_MSG *msg);
81050 +extern void      CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status);
81051 +
81052 +extern int        AddNeterrServerSyscall (int elanId, void *configp, void *addrp, char *namep);
81053 +
81054 +/* eventcookie.c */
81055 +extern void                cookie_init(void);
81056 +extern void                cookie_fini(void);
81057 +extern EVENT_COOKIE_TABLE *cookie_alloc_table (unsigned long task, unsigned long handle);
81058 +extern void                cookie_free_table (EVENT_COOKIE_TABLE *tbl);
81059 +extern int                 cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
81060 +extern int                 cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
81061 +extern int                 cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
81062 +extern int                 cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
81063 +extern int                 cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
81064 +
81065 +/* routecheck.c */
81066 +extern int elan3_route_check          (ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNode);
81067 +extern int elan3_route_broadcast_check(ELAN3_CTXT *ctxt, E3_uint16 *flitsA, int lowNode, int highNode);
81068 +
81069 +
81070 +#endif /* __KERNEL__ */
81071 +
81072 +#ifdef __cplusplus
81073 +}
81074 +#endif
81075 +
81076 +#endif /* _ELAN3_ELANCTXT_H */
81077 +
81078 +/*
81079 + * Local variables:
81080 + * c-file-style: "stroustrup"
81081 + * End:
81082 + */
81083 Index: linux-2.6.5-7.191/include/elan3/elandebug.h
81084 ===================================================================
81085 --- linux-2.6.5-7.191.orig/include/elan3/elandebug.h    2004-02-23 16:02:56.000000000 -0500
81086 +++ linux-2.6.5-7.191/include/elan3/elandebug.h 2005-07-28 14:52:52.945663992 -0400
81087 @@ -0,0 +1,106 @@
81088 +/*
81089 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81090 + *
81091 + *    For licensing information please see the supplied COPYING file
81092 + *
81093 + */
81094 +
81095 +#ifndef _ELAN3_ELANDEBUG_H
81096 +#define _ELAN3_ELANDEBUG_H
81097 +
81098 +#ident "$Id: elandebug.h,v 1.38 2003/09/24 13:57:24 david Exp $"
81099 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandebug.h,v $ */
81100 +
81101 +#if defined(__KERNEL__)
81102 +
81103 +extern u_int elan3_debug;
81104 +extern u_int elan3_debug_console;
81105 +extern u_int elan3_debug_buffer;
81106 +extern u_int elan3_debug_ignore_dev;
81107 +extern u_int elan3_debug_ignore_kcomm;
81108 +extern u_int elan3_debug_ignore_ctxt;
81109 +extern u_int elan3_debug_display_ctxt;
81110 +
81111 +#define DBG_CONFIG     0x00000001                      /* Module configuration */
81112 +#define DBG_HAT                0x00000002
81113 +#define DBG_FN         0x00000004
81114 +#define DBG_SEG                0x00000008
81115 +#define DBG_INTR       0x00000010
81116 +#define DBG_LWP                0x00000020
81117 +#define DBG_FAULT      0x00000040
81118 +#define DBG_EVENT      0x00000080
81119 +#define DBG_CPROC      0x00000100
81120 +#define DBG_TPROC      0x00000200
81121 +#define DBG_DPROC      0x00000400
81122 +#define DBG_IPROC      0x00000800
81123 +#define DBG_SWAP       0x00001000
81124 +#define DBG_CMD                0x00002000
81125 +#define DBG_VP         0x00004000
81126 +#define DBG_SYSCALL    0x00008000
81127 +#define DBG_BSCAN      0x00010000
81128 +#define DBG_LINKERR    0x00020000
81129 +#define DBG_NETERR     0x00040000
81130 +#define DBG_NETRPC     0x00080000
81131 +#define DBG_EVENTCOOKIE 0x00100000
81132 +#define DBG_SDRAM      0x00200000
81133 +
81134 +#define DBG_EP         0x10000000
81135 +#define DBG_EPCONSOLE  0x20000000
81136 +
81137 +#define DBG_EIP                0x40000000
81138 +#define DBG_EIPFAIL    0x80000000
81139 +
81140 +#define DBG_ALL                0xffffffff
81141 +
81142 +/* values to pass as "ctxt" rather than a "ctxt" pointer */
81143 +#define DBG_DEVICE     ((void *) 0)
81144 +#define DBG_KCOMM      ((void *) 1)
81145 +#define DBG_ICS                ((void *) 2)
81146 +#define DBG_USER       ((void *) 3)
81147 +#define DBG_NTYPES     64
81148 +
81149 +#if defined(DEBUG_PRINTF)
81150 +#  define DBG(m,fn)                            ((elan3_debug&(m)) ? (void)(fn) : (void)0)
81151 +#  define PRINTF0(ctxt,m,fmt)                  ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt)             : (void)0)
81152 +#  define PRINTF1(ctxt,m,fmt,a)                        ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a)           : (void)0)
81153 +#  define PRINTF2(ctxt,m,fmt,a,b)              ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b)         : (void)0)
81154 +#  define PRINTF3(ctxt,m,fmt,a,b,c)            ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c)       : (void)0)
81155 +#  define PRINTF4(ctxt,m,fmt,a,b,c,d)          ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d)     : (void)0)
81156 +#  define PRINTF5(ctxt,m,fmt,a,b,c,d,e)                ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e)   : (void)0)
81157 +#  define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f)      ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e,f) : (void)0)
81158 +#ifdef __GNUC__
81159 +#  define PRINTF(ctxt,m,args...)               ((elan3_debug&(m)) ? elan3_debugf(ctxt,m, ##args)         : (void)0)
81160 +#endif
81161 +
81162 +#else
81163 +
81164 +#  define DBG(m, fn)                           do { ; } while (0)
81165 +#  define PRINTF0(ctxt,m,fmt)                  do { ; } while (0)
81166 +#  define PRINTF1(ctxt,m,fmt,a)                        do { ; } while (0)
81167 +#  define PRINTF2(ctxt,m,fmt,a,b)              do { ; } while (0)
81168 +#  define PRINTF3(ctxt,m,fmt,a,b,c)            do { ; } while (0)
81169 +#  define PRINTF4(ctxt,m,fmt,a,b,c,d)          do { ; } while (0)
81170 +#  define PRINTF5(ctxt,m,fmt,a,b,c,d,e)                do { ; } while (0)
81171 +#  define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f)      do { ; } while (0)
81172 +#ifdef __GNUC__
81173 +#  define PRINTF(ctxt,m,args...)               do { ; } while (0)
81174 +#endif
81175 +
81176 +#endif /* DEBUG_PRINTF */
81177 +
81178 +#ifdef __GNUC__
81179 +extern void       elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...)
81180 +    __attribute__ ((format (printf,3,4)));
81181 +#else
81182 +extern void       elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...);
81183 +#endif
81184 +
81185 +
81186 +#endif /* __KERNEL__ */
81187 +#endif /* _ELAN3_ELANDEBUG_H */
81188 +
81189 +/*
81190 + * Local variables:
81191 + * c-file-style: "stroustrup"
81192 + * End:
81193 + */
81194 Index: linux-2.6.5-7.191/include/elan3/elandev.h
81195 ===================================================================
81196 --- linux-2.6.5-7.191.orig/include/elan3/elandev.h      2004-02-23 16:02:56.000000000 -0500
81197 +++ linux-2.6.5-7.191/include/elan3/elandev.h   2005-07-28 14:52:52.946663840 -0400
81198 @@ -0,0 +1,581 @@
81199 +/*
81200 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81201 + *
81202 + *    For licensing information please see the supplied COPYING file
81203 + *
81204 + */
81205 +
81206 +#ifndef __ELAN3_ELANDEV_H
81207 +#define __ELAN3_ELANDEV_H
81208 +
81209 +#ident "$Id: elandev.h,v 1.74.2.2 2004/12/10 11:10:19 mike Exp $"
81210 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev.h,v $ */
81211 +
81212 +#include <elan/bitmap.h>
81213 +#include <elan/devinfo.h>
81214 +#include <elan/stats.h>
81215 +
81216 +#if defined(DIGITAL_UNIX)
81217 +#  include <elan3/elandev_dunix.h>
81218 +#elif defined(LINUX)
81219 +#  include <elan3/elandev_linux.h>
81220 +#elif defined(SOLARIS)
81221 +#  include <elan3/elandev_solaris.h>
81222 +#endif
81223 +
81224 +#ifndef TRUE
81225 +#  define TRUE 1
81226 +#endif
81227 +#ifndef FALSE
81228 +#  define FALSE 0
81229 +#endif
81230 +
81231 +/*
81232 + * Elan base address registers defined as follows :
81233 + */
81234 +#define ELAN3_BAR_SDRAM                0
81235 +#define ELAN3_BAR_COMMAND_PORT 1
81236 +#define ELAN3_BAR_REGISTERS    2
81237 +#define ELAN3_BAR_EBUS         3
81238 +
81239 +/* Macro to generate 'offset' to mmap "mem" device */
81240 +#define OFF_TO_SPACE(off)      ((off) >> 28)
81241 +#define OFF_TO_OFFSET(off)     ((off) & 0x0FFFFFFF)
81242 +#define GEN_OFF(space,off)     (((space) << 28) | ((off) & 0x0FFFFFFF))
81243 +
81244 +#ifdef __KERNEL__
81245 +
81246 +/*
81247 + * Elan EBUS is configured as follows :
81248 + */
81249 +#define ELAN3_EBUS_ROM_OFFSET          0x000000                /* rom */
81250 +#define ELAN3_EBUS_INTPAL_OFFSET       0x180000                /* interrupt pal (write only) */
81251 +
81252 +#define ELAN3_EBUS_ROM_SIZE            0x100000
81253 +
81254 +/*
81255 + * Elan SDRAM is arranged as follows :
81256 + */
81257 +#define ELAN3_TANDQ_SIZE               0x0020000               /* Trap And Queue Size */
81258 +#define ELAN3_CONTEXT_SIZE             0x0010000               /* Context Table Size */
81259 +#define ELAN3_COMMAND_TRAP_SIZE                0x0010000               /* Command Port Trap Size */
81260 +
81261 +#ifdef MPSAS
81262 +#define ELAN3_LN2_NUM_CONTEXTS 8                               /* Support 256 contexts */
81263 +#else
81264 +#define ELAN3_LN2_NUM_CONTEXTS 12                              /* Support 4096 contexts */
81265 +#endif
81266 +#define ELAN3_NUM_CONTEXTS     (1 << ELAN3_LN2_NUM_CONTEXTS)   /* Entries in context table */
81267 +
81268 +#define ELAN3_SDRAM_NUM_BANKS  4                               /* Elan supports 4 Banks of Sdram */
81269 +#define ELAN3_SDRAM_BANK_SHIFT 26                              /* each of which can be 64 mbytes ? */
81270 +#define ELAN3_SDRAM_BANK_SIZE  (1 << ELAN3_SDRAM_BANK_SHIFT)
81271 +
81272 +#define ELAN3_MAX_CACHE_SIZE   (64 * 1024)                     /* Maximum cache size */
81273 +#define ELAN3_CACHE_SIZE       (64 * 4 * E3_CACHELINE_SIZE)    /* Elan3 has 8K cache */
81274 +
81275 +#ifndef offsetof
81276 +#define offsetof(s, m)         (size_t)(&(((s *)0)->m))
81277 +#endif
81278 +
81279 +/*
81280 + * circular queue and macros to access members.
81281 + */
81282 +typedef struct
81283 +{
81284 +    u_int      q_back;                 /* Next free space */
81285 +    u_int      q_front;                /* First object to remove */
81286 +    u_int      q_size;                 /* Size of queue */
81287 +    u_int      q_count;                /* Current number of entries */
81288 +    u_int      q_slop;                 /* FULL <=> (count+slop) == size */
81289 +} ELAN3_QUEUE;
81290 +
81291 +typedef struct 
81292 +{
81293 +    u_int      q_back;                 /* Next free space */
81294 +    u_int      q_middle;               /* Middle pointer */
81295 +    u_int      q_front;                /* First object to remove */
81296 +    u_int      q_size;                 /* Size of queue */
81297 +    u_int      q_count;                /* Current number of entries */
81298 +    u_int      q_slop;                 /* FULL <=> (count+slop) == size */
81299 +} ELAN3_SPLIT_QUEUE;
81300 +
81301 +#define ELAN3_QUEUE_INIT(q,num,slop)   ((q).q_size = (num), (q).q_slop = (slop)+1, (q).q_front = (q).q_back = 0, (q).q_count = 0)
81302 +#define ELAN3_QUEUE_FULL(q)            ((q).q_count == ((q).q_size - (q).q_slop))
81303 +#define ELAN3_QUEUE_REALLY_FULL(q)     ((q).q_count == (q).q_size - 1)
81304 +#define ELAN3_QUEUE_EMPTY(q)           ((q).q_count == 0)
81305 +#define ELAN3_QUEUE_FRONT_EMPTY(q)     ((q).q_front == (q).q_middle)
81306 +#define ELAN3_QUEUE_BACK_EMPTY(q)      ((q).q_middle == (q).q_back)
81307 +#define ELAN3_QUEUE_ADD(q)             ((q).q_back = ((q).q_back+1) % (q).q_size, (q).q_count++)
81308 +#define ELAN3_QUEUE_REMOVE(q)          ((q).q_front = ((q).q_front+1) % (q).q_size, (q).q_count--)
81309 +#define ELAN3_QUEUE_ADD_FRONT(q)               ((q).q_front = ((q).q_front-1) % (q).q_size, (q).q_count++)
81310 +#define ELAN3_QUEUE_CONSUME(q)         ((q).q_middle = ((q).q_middle+1) % (q).q_size)
81311 +#define ELAN3_QUEUE_FRONT(q,qArea)     (&(qArea)[(q).q_front])
81312 +#define ELAN3_QUEUE_MIDDLE(q,qArea)    (&(qArea)[(q).q_middle])
81313 +#define ELAN3_QUEUE_BACK(q,qArea)      (&(qArea)[(q).q_back])
81314 +
81315 +#define SDRAM_MIN_BLOCK_SHIFT  10
81316 +#define SDRAM_NUM_FREE_LISTS   17                              /* allows max 64Mb block */
81317 +#define SDRAM_MIN_BLOCK_SIZE   (1 << SDRAM_MIN_BLOCK_SHIFT)
81318 +#define SDRAM_MAX_BLOCK_SIZE   (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1))
81319 +#define SDRAM_FREELIST_TRIGGER 32
81320 +
81321 +typedef struct elan3_sdram_bank
81322 +{
81323 +    u_int              Size;                                   /* Size of bank of memory */
81324 +
81325 +    ioaddr_t           Mapping;                                /* Where mapped in the kernel */
81326 +    DeviceMappingHandle Handle;                                        /* and mapping handle */
81327 +
81328 +    struct elan3_ptbl_gr **PtblGroups;
81329 +    
81330 +    bitmap_t          *Bitmaps[SDRAM_NUM_FREE_LISTS];
81331 +} ELAN3_SDRAM_BANK;
81332 +
81333 +typedef struct elan3_haltop
81334 +{
81335 +    struct elan3_haltop          *Next;                                /* Chain to next in list. */
81336 +    E3_uint32             Mask;                                /* Interrupt mask to see before calling function */
81337 +    
81338 +    void                (*Function)(void *, void *);           /* Function to call */
81339 +    void                 *Arguement;                           /* Arguement to pass to function */
81340 +} ELAN3_HALTOP;
81341 +
81342 +#define HALTOP_BATCH   32
81343 +
81344 +#endif /* __KERNEL__ */
81345 +
81346 +typedef struct elan3_stats
81347 +{
81348 +    u_long     Version;                                        /* version field */
81349 +    u_long     Interrupts;                                     /* count of elan interrupts */
81350 +    u_long     TlbFlushes;                                     /* count of tlb flushes */
81351 +    u_long     InvalidContext;                                 /* count of traps with invalid context */
81352 +    u_long     ComQueueHalfFull;                               /* count of interrupts due to com queue being half full */
81353 +
81354 +    u_long     CProcTraps;                                     /* count of cproc traps */
81355 +    u_long     DProcTraps;                                     /* count of dproc traps */
81356 +    u_long     TProcTraps;                                     /* cound of tproc traps */
81357 +    u_long     IProcTraps;                                     /* count of iproc traps */
81358 +    u_long     EventInterrupts;                                /* count of event interrupts */
81359 +
81360 +    u_long     PageFaults;                                     /* count of elan page faults */
81361 +
81362 +    /* inputter related */
81363 +    u_long     EopBadAcks;                                     /* count of EOP_BAD_ACKs */
81364 +    u_long     EopResets;                                      /* count of EOP_ERROR_RESET */
81365 +    u_long      InputterBadLength;                             /* count of BadLength */
81366 +    u_long      InputterCRCDiscards;                           /* count of CRC_STATUS_DISCARD */
81367 +    u_long      InputterCRCErrors;                             /* count of CRC_STATUS_ERROR */
81368 +    u_long      InputterCRCBad;                                        /* count of CRC_STATUS_BAD */
81369 +    u_long     DmaNetworkErrors;                               /* count of errors in dma data */
81370 +    u_long     DmaIdentifyNetworkErrors;                       /* count of errors after dma identify */
81371 +    u_long     ThreadIdentifyNetworkErrors;                    /* count of errors after thread identify */
81372 +
81373 +    /* dma related */
81374 +    u_long     DmaRetries;                                     /* count of dma retries (due to retry fail count) */    
81375 +    u_long     DmaOutputTimeouts;                              /* count of dma output timeouts */
81376 +    u_long     DmaPacketAckErrors;                             /* count of dma packet ack errors */
81377 +
81378 +    /* thread related */
81379 +    u_long     ForcedTProcTraps;                               /* count of forced tproc traps */
81380 +    u_long     TrapForTooManyInsts;                            /* count of too many instruction traps */
81381 +    u_long     ThreadOutputTimeouts;                           /* count of thread output timeouts */
81382 +    u_long       ThreadPacketAckErrors;                                /* count of thread packet ack errors */
81383 +
81384 +    /* link related */
81385 +    u_long     LockError;                                      /* count of RegPtr->Exts.LinkErrorTypes:LS_LockError */
81386 +    u_long     DeskewError;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_DeskewError */
81387 +    u_long     PhaseError;                                     /* count of RegPtr->Exts.LinkErrorTypes:LS_PhaseError */
81388 +    u_long     DataError;                                      /* count of RegPtr->Exts.LinkErrorTypes:LS_DataError */
81389 +    u_long     FifoOvFlow0;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow0 */
81390 +    u_long     FifoOvFlow1;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow1 */
81391 +    u_long     LinkErrorValue;                                 /* link error value on data error */
81392 +
81393 +    /* memory related */
81394 +    u_long     CorrectableErrors;                              /* count of correctable ecc errors */
81395 +    u_long     UncorrectableErrors;                            /* count of uncorrectable ecc errors */
81396 +    u_long       MultipleErrors;                                       /* count of multiple ecc errors */
81397 +    u_long     SdramBytesFree;                                 /* count of sdram bytes free */
81398 +    
81399 +    /* Interrupt related */
81400 +    u_long     LongestInterrupt;                               /* length of longest interrupt in ticks */
81401 +
81402 +    u_long     EventPunts;                                     /* count of punts of event interrupts to thread */
81403 +    u_long     EventRescheds;                                  /* count of reschedules of event interrupt thread */
81404 +} ELAN3_STATS;
81405 +
81406 +#define ELAN3_STATS_VERSION    (ulong)2
81407 +#define ELAN3_NUM_STATS                (sizeof (ELAN3_STATS)/sizeof (u_long))
81408 +
81409 +#define ELAN3_STATS_DEV_FMT   "elan3_stats_dev_%d"
81410 +
81411 +#ifdef __KERNEL__
81412 +
81413 +#define BumpStat(dev,stat)     ((dev)->Stats.stat++)
81414 +
81415 +typedef struct elan3_level_ptbl_block
81416 +{
81417 +    spinlock_t             PtblLock;                           /* Page table freelist lock */
81418 +    int                            PtblTotal;                          /* Count of level N page tables allocated */
81419 +    int                            PtblFreeCount;                      /* Count of free level N page tables */
81420 +    struct elan3_ptbl     *PtblFreeList;                       /* Free level N page tables */
81421 +    struct elan3_ptbl_gr          *PtblGroupList;                      /* List of Groups of level N page tables */
81422 +} ELAN3_LEVEL_PTBL_BLOCK;
81423
81424 +typedef struct elan3_dev
81425 +{
81426 +    ELAN3_DEV_OSDEP        Osdep;                              /* OS specific entries */
81427 +    int                            Instance;                           /* Device number */
81428 +    ELAN_DEVINFO            Devinfo;                           
81429 +    ELAN_POSITION          Position;                           /* position in switch network (for user code) */
81430 +    ELAN_DEV_IDX           DeviceIdx;                          /* device index registered with elanmod */
81431 +
81432 +    int                            ThreadsShouldStop;                  /* flag that kernel threads should stop */
81433 +
81434 +    spinlock_t             IntrLock;
81435 +    spinlock_t             TlbLock;
81436 +    spinlock_t             CProcLock;
81437 +    kcondvar_t             IntrWait;                           /* place event interrupt thread sleeps */
81438 +    unsigned               EventInterruptThreadStarted:1;      /* event interrupt thread started */
81439 +    unsigned               EventInterruptThreadStopped:1;      /* event interrupt thread stopped */
81440 +    
81441 +    DeviceMappingHandle            RegHandle;                          /* DDI Handle */
81442 +    ioaddr_t               RegPtr;                             /* Elan Registers */
81443 +
81444 +    volatile E3_uint32     InterruptMask;                      /* copy of RegPtr->InterruptMask */
81445 +    volatile E3_uint32     Event_Int_Queue_FPtr;               /* copy of RegPtr->Event_Int_Queue_FPtr */
81446 +    volatile E3_uint32      SchCntReg;                         /* copy of RegPtr->SchCntReg */
81447 +    volatile E3_uint32      Cache_Control_Reg;                 /* true value for RegPtr->Cache_Control_Reg */
81448 +    
81449 +    ELAN3_SDRAM_BANK       SdramBanks[ELAN3_SDRAM_NUM_BANKS];  /* Elan sdram banks */
81450 +    spinlock_t             SdramLock;                          /* Sdram allocator */
81451 +    sdramaddr_t                    SdramFreeLists[SDRAM_NUM_FREE_LISTS];
81452 +    unsigned               SdramFreeCounts[SDRAM_NUM_FREE_LISTS];
81453 +               
81454 +    sdramaddr_t                    TAndQBase;                          /* Trap and Queue area */
81455 +    sdramaddr_t                    ContextTable;                       /* Elan Context Table */
81456 +    u_int                  ContextTableSize;                   /* # entries in context table */
81457 +
81458 +    struct elan3_ctxt      **CtxtTable;                         /* array of ctxt pointers or nulls */
81459 +
81460 +    sdramaddr_t                    CommandPortTraps[2];                /* Command port trap overflow */
81461 +    int                            CurrentCommandPortTrap;             /* Which overflow queue we're using */
81462 +    
81463 +    u_int                  HaltAllCount;                       /* Count of reasons to halt context 0 queues */
81464 +    u_int                  HaltNonContext0Count;               /* Count of reasons to halt non-context 0 queues */
81465 +    u_int                  HaltDmaDequeueCount;                /* Count of reasons to halt dma from dequeuing */
81466 +    u_int                  HaltThreadCount;                    /* Count of reasons to halt the thread processor */
81467 +    u_int                  FlushCommandCount;                  /* Count of reasons to flush command queues */
81468 +    u_int                  DiscardAllCount;                    /* Count of reasons to discard context 0 */
81469 +    u_int                  DiscardNonContext0Count;            /* Count of reasons to discard non context 0 */
81470 +
81471 +    struct thread_trap    *ThreadTrap;                         /* Thread Processor trap space */
81472 +    struct dma_trap       *DmaTrap;                            /* DMA Processor trap space */
81473 +
81474 +    spinlock_t             FreeHaltLock;                       /* Lock for haltop free list */
81475 +    ELAN3_HALTOP                  *FreeHaltOperations;                 /* Free list of haltops */
81476 +    u_int                  NumHaltOperations;                  /* Number of haltops allocated */
81477 +    u_int                  ReservedHaltOperations;             /* Number of haltops reserved */
81478 +
81479 +    ELAN3_HALTOP                  *HaltOperations;                     /* List of operations to call */
81480 +    ELAN3_HALTOP                 **HaltOperationsTailpp;               /* Pointer to last "next" pointer in list */
81481 +    E3_uint32              HaltOperationsMask;                 /* Or of all bits in list of operations */
81482 +
81483 +    physaddr_t             SdramPhysBase;                      /* Physical address of SDRAM */
81484 +    physaddr_t             SdramPhysMask;                      /* and mask of significant bits */ 
81485 +    
81486 +    physaddr_t             PciPhysBase;                        /* physical address of local PCI segment */
81487 +    physaddr_t             PciPhysMask;                        /* and mask of significant bits */
81488 +
81489 +    long                   ErrorTime;                          /* lbolt at last error (link,ecc etc) */
81490 +    long                   ErrorsPerTick;                      /* count of errors for this tick */
81491 +    timer_fn_t             ErrorTimeoutId;                     /* id of timeout when errors masked out */
81492 +    timer_fn_t             DmaPollTimeoutId;                   /* id of timeout to poll for "bad" dmas */
81493 +    int                            FilterHaltQueued;
81494 +
81495 +    /*
81496 +     * HAT layer specific entries.
81497 +     */
81498 +    ELAN3_LEVEL_PTBL_BLOCK   Level[4];
81499 +    spinlock_t             PtblGroupLock;                      /* Lock for Page Table group lists */
81500 +    struct elan3_ptbl_gr    *Level3PtblGroupHand;              /* Hand for ptbl stealing */
81501 +
81502 +    /*
81503 +     * Per-Context Information structures.
81504 +     */
81505 +    struct elan3_info     *Infos;                              /* List of "infos" for this device */
81506 +
81507 +    char                    LinkShutdown;                       /* link forced into reset by panic/shutdown/dump */
81508 +
81509 +    /*
81510 +     * Device statistics.
81511 +     */
81512 +    ELAN3_STATS                    Stats;
81513 +    ELAN_STATS_IDX          StatsIndex;
81514 +
81515 +    struct {
81516 +       E3_Regs            *RegPtr;
81517 +       char               *Sdram[ELAN3_SDRAM_NUM_BANKS];
81518 +    } PanicState;
81519 +} ELAN3_DEV;
81520 +
81521 +#define ELAN3_DEV_CTX_TABLE(dev,ctxtn) ( (dev)->CtxtTable[ (ctxtn) &  MAX_ROOT_CONTEXT_MASK] )
81522 +
81523 +/* macros for accessing dev->RegPtr.Tags/Sets. */
81524 +#define write_cache_tag(dev,what,val)  writeq (val, dev->RegPtr + offsetof (E3_Regs, Tags.what))
81525 +#define read_cache_tag(dev,what)       readq (dev->RegPtr + offsetof (E3_Regs, Tags.what))
81526 +#define write_cache_set(dev,what,val)  writeq (val, dev->RegPtr + offsetof (E3_Regs, Sets.what))
81527 +#define read_cache_set(dev,what)       readq (dev->RegPtr + offsetof (E3_Regs, Sets.what))
81528 +
81529 +/* macros for accessing dev->RegPtr.Regs. */
81530 +#define write_reg64(dev,what,val)      writeq (val, dev->RegPtr + offsetof (E3_Regs, Regs.what))
81531 +#define write_reg32(dev,what,val)      writel (val, dev->RegPtr + offsetof (E3_Regs, Regs.what))
81532 +#define read_reg64(dev,what)           readq (dev->RegPtr + offsetof (E3_Regs, Regs.what))
81533 +#define read_reg32(dev,what)           readl (dev->RegPtr + offsetof (E3_Regs, Regs.what))
81534 +
81535 +/* macros for accessing dev->RegPtr.uRegs. */
81536 +#define write_ureg64(dev,what,val)     writeq (val, dev->RegPtr + offsetof (E3_Regs, URegs.what))
81537 +#define write_ureg32(dev,what,val)     writel (val, dev->RegPtr + offsetof (E3_Regs, URegs.what))
81538 +#define read_ureg64(dev,what)          readq (dev->RegPtr + offsetof (E3_Regs, URegs.what))
81539 +#define read_ureg32(dev,what)          readl (dev->RegPtr + offsetof (E3_Regs, URegs.what))
81540 +
81541 +/* macros for accessing dma descriptor/thread regs */
81542 +#define copy_dma_regs(dev, desc) \
81543 +MACRO_BEGIN \
81544 +    register int i;  \
81545 +    for (i = 0; i < sizeof (E3_DMA)/sizeof(E3_uint64); i++) \
81546 +       ((E3_uint64 *) desc)[i] = readq (dev->RegPtr + offsetof (E3_Regs, Regs.Dma_Desc) + i*sizeof (E3_uint64)); \
81547 +MACRO_END
81548 +
81549 +#define copy_thread_regs(dev, regs) \
81550 +MACRO_BEGIN \
81551 +    register int i;  \
81552 +    for (i = 0; i < (32*sizeof (E3_uint32))/sizeof(E3_uint64); i++) \
81553 +       ((E3_uint64 *) regs)[i] = readq (dev->RegPtr + offsetof (E3_Regs, Regs.Globals[0]) + i*sizeof (E3_uint64)); \
81554 +MACRO_END
81555 +
81556 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, 
81557 +                         _E3_DataBusMap::Exts _E3_DataBusMap::Input_Context_Fil_Flush
81558 +                         elan3_dev::CurrentCommandPortTrap elan3_dev::HaltAllCount elan3_dev::HaltDmaDequeueCount
81559 +                         elan3_dev::FlushCommandCount elan3_dev::DiscardAllCount elan3_dev::DiscardNonContext0Count
81560 +                         elan3_dev::HaltOperations elan3_dev::HaltOperationsMask))
81561 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::TlbLock, 
81562 +                         _E3_DataBusMap::Cache_Control_Reg))
81563 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock, 
81564 +                         elan3_dev::Infos elan3_dev::InfoTable))
81565 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::FreeHaltLock, 
81566 +                         elan3_dev::FreeHaltOperations elan3_dev::NumHaltOperations elan3_dev::ReservedHaltOperations))
81567 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PageFreeListLock, 
81568 +                         elan3_dev::PageFreeList elan3_dev::PageFreeListSize))
81569 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level1PtblLock,
81570 +                         elan3_dev::Level1PtblTotal elan3_dev::Level1PtblFreeCount elan3_dev::Level1PtblFreeList))
81571 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level2PtblLock,
81572 +                         elan3_dev::Level2PtblTotal elan3_dev::Level2PtblFreeCount elan3_dev::Level2PtblFreeList))
81573 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level3PtblLock,
81574 +                         elan3_dev::Level3PtblTotal elan3_dev::Level3PtblFreeCount elan3_dev::Level3PtblFreeList))
81575 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PtblGroupLock,
81576 +                         elan3_dev::Level1PtblGroupList elan3_dev::Level2PtblGroupList elan3_dev::Level3PtblGroupList))
81577 +
81578 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_dev::InfoTable elan3_dev::Level1PtblFreeList
81579 +                                elan3_dev::Level2PtblFreeList elan3_dev::Level3PtblFreeList))
81580 +
81581 +_NOTE(LOCK_ORDER(elan3_dev::InfoLock elan3_dev::IntrLock))
81582 +_NOTE(LOCK_ORDER(as::a_lock elan3_dev::InfoLock))
81583 +_NOTE(LOCK_ORDER(as::a_lock elan3_dev::IntrLock))
81584 +
81585 +#define SET_INT_MASK(dev,Mask)         MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev)->InterruptMask = (Mask)));  mmiob(); MACRO_END
81586 +#define ENABLE_INT_MASK(dev, bits)     MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask |= (bits)))); mmiob(); MACRO_END
81587 +#define DISABLE_INT_MASK(dev, bits)    MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask &= ~(bits)))); mmiob(); MACRO_END
81588 +
81589 +#define INIT_SCHED_STATUS(dev, val) \
81590 +MACRO_BEGIN \
81591 +       (dev)->SchCntReg = (val); \
81592 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
81593 +       mmiob(); \
81594 +MACRO_END
81595 +
81596 +#define SET_SCHED_STATUS(dev, val) \
81597 +MACRO_BEGIN \
81598 +       ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \
81599 +       (dev)->SchCntReg |= (val); \
81600 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
81601 +        mmiob (); \
81602 +MACRO_END
81603 +
81604 +#define CLEAR_SCHED_STATUS(dev, val) \
81605 +MACRO_BEGIN \
81606 +       ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \
81607 +       (dev)->SchCntReg &= ~(val); \
81608 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
81609 +        mmiob(); \
81610 +MACRO_END
81611 +
81612 +#define MODIFY_SCHED_STATUS(dev, SetBits, ClearBits) \
81613 +MACRO_BEGIN \
81614 +       ASSERT ((((SetBits)|(ClearBits)) & HaltStopAndExtTestMask) == ((SetBits)|(ClearBits))); \
81615 +       (dev)->SchCntReg = (((dev)->SchCntReg | (SetBits)) & ~(ClearBits)); \
81616 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
81617 +       mmiob(); \
81618 +MACRO_END
81619 +
81620 +#define PULSE_SCHED_STATUS(dev, RestartBits) \
81621 +MACRO_BEGIN \
81622 +       ASSERT (((RestartBits) & HaltStopAndExtTestMask) == 0); \
81623 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg | (RestartBits)); \
81624 +       mmiob(); \
81625 +MACRO_END
81626 +
81627 +#define SET_SCHED_LINK_VALUE(dev, enabled, val) \
81628 +MACRO_BEGIN \
81629 +       (dev)->SchCntReg = (((dev)->SchCntReg & HaltAndStopMask) | ((enabled) ? LinkBoundaryScan : 0) | LinkSetValue(val, 0)); \
81630 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
81631 +       mmiob(); \
81632 +MACRO_END
81633 +
81634 +#ifdef DEBUG_ASSERT
81635 +#  define ELAN3_ASSERT(dev, EX)        ((void)((EX) || elan3_assfail(dev, #EX, __FILE__, __LINE__)))
81636 +#else
81637 +#  define ELAN3_ASSERT(dev, EX)
81638 +#endif
81639 +
81640 +/* elandev_generic.c */
81641 +extern int        InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort);
81642 +extern void       FinaliseElan (ELAN3_DEV *dev);
81643 +extern int        InterruptHandler (ELAN3_DEV *dev);
81644 +extern void       PollForDmaHungup (void *arg);
81645 +
81646 +extern int        SetLinkBoundaryScan (ELAN3_DEV *dev);
81647 +extern void       ClearLinkBoundaryScan (ELAN3_DEV *dev);
81648 +extern int        WriteBoundaryScanValue (ELAN3_DEV *dev, int value);
81649 +extern int        ReadBoundaryScanValue(ELAN3_DEV *dev, int link);
81650 +
81651 +extern int        ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency);
81652 +
81653 +extern struct elan3_ptbl_gr *ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset);
81654 +extern void       ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, struct elan3_ptbl_gr *ptg);
81655 +
81656 +extern void       ElanFlushTlb (ELAN3_DEV *dev);
81657 +
81658 +extern void       SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp);
81659 +extern void      FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op);
81660 +extern int       ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep);
81661 +extern void      ReleaseHaltOperations (ELAN3_DEV *dev, int count);
81662 +extern void      ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend);
81663 +extern void      QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp,
81664 +                                     E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement);
81665 +
81666 +extern int        ComputePosition (ELAN_POSITION *pos, unsigned NodeId, unsigned NumNodes, unsigned numDownLinksVal);
81667 +
81668 +extern caddr_t   MiToName (int mi);
81669 +extern void      ElanBusError (ELAN3_DEV *dev);
81670 +
81671 +extern void      TriggerLsa (ELAN3_DEV *dev);
81672 +
81673 +extern ELAN3_DEV  *elan3_device (int instance);
81674 +extern int       DeviceRegisterSize (ELAN3_DEV *dev, int rnumber, int *sizep);
81675 +extern int       MapDeviceRegister (ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp, int offset, 
81676 +                                    int len, DeviceMappingHandle *handlep);
81677 +extern void       UnmapDeviceRegister (ELAN3_DEV *dev, DeviceMappingHandle *handlep);
81678 +
81679 +
81680 +/* sdram.c */
81681 +/* sdram accessing functions - define 4 different types for 8,16,32,64 bit accesses */
81682 +extern unsigned char      elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t ptr);
81683 +extern unsigned short     elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t ptr);
81684 +extern unsigned int       elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t ptr);
81685 +extern unsigned long long elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t ptr);
81686 +extern void               elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned char val);
81687 +extern void               elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned short val);
81688 +extern void               elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned int val);
81689 +extern void               elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned long long val);
81690 +
81691 +extern void              elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
81692 +extern void              elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
81693 +extern void              elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
81694 +extern void              elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
81695 +
81696 +extern void               elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
81697 +extern void               elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
81698 +extern void               elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
81699 +extern void               elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
81700 +extern void               elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
81701 +extern void               elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
81702 +extern void               elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
81703 +extern void               elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
81704 +
81705 +extern void              elan3_sdram_init (ELAN3_DEV *dev);
81706 +extern void               elan3_sdram_fini (ELAN3_DEV *dev);
81707 +extern void              elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top);
81708 +extern sdramaddr_t        elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes);
81709 +extern void               elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
81710 +extern physaddr_t         elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t addr);
81711 +
81712 +/* cproc.c */
81713 +extern void      HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Mask);
81714 +
81715 +/* iproc.c */
81716 +extern void      HandleIProcTrap (ELAN3_DEV *dev, int Channel, E3_uint32 Pend, sdramaddr_t FaultSaveOff, 
81717 +                                  sdramaddr_t TransactionsOff, sdramaddr_t DataOff);
81718 +
81719 +/* tproc.c */
81720 +extern int       HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits);
81721 +extern void      DeliverTProcTrap (ELAN3_DEV *dev, struct thread_trap *threadTrap, E3_uint32 Pend);
81722 +
81723 +/* dproc.c */
81724 +extern int       HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits);
81725 +extern void      DeliverDProcTrap (ELAN3_DEV *dev, struct dma_trap *dmaTrap, E3_uint32 Pend);
81726 +
81727 +#if defined(LINUX)
81728 +/* procfs_linux.h */
81729 +extern struct proc_dir_entry *elan3_procfs_root;
81730 +extern struct proc_dir_entry *elan3_config_root;
81731 +
81732 +extern void elan3_procfs_init(void);
81733 +extern void elan3_procfs_fini(void);
81734 +extern void elan3_procfs_device_init (ELAN3_DEV *dev);
81735 +extern void elan3_procfs_device_fini (ELAN3_DEV *dev);
81736 +#endif /* defined(LINUX) */
81737 +
81738 +/* elan3_osdep.c */
81739 +extern int        BackToBackMaster;
81740 +extern int        BackToBackSlave;
81741 +
81742 +#define ELAN_REG_REC_MAX (100)
81743 +#define ELAN_REG_REC(REG)  {                                         \
81744 +elan_reg_rec_file [elan_reg_rec_index] = __FILE__;                   \
81745 +elan_reg_rec_line [elan_reg_rec_index] = __LINE__;                   \
81746 +elan_reg_rec_reg  [elan_reg_rec_index] = REG;                        \
81747 +elan_reg_rec_cpu  [elan_reg_rec_index] = smp_processor_id();         \
81748 +elan_reg_rec_lbolt[elan_reg_rec_index] = lbolt;                      \
81749 +elan_reg_rec_index = ((elan_reg_rec_index+1) % ELAN_REG_REC_MAX);}
81750 +
81751 +extern char *    elan_reg_rec_file [ELAN_REG_REC_MAX];
81752 +extern int       elan_reg_rec_line [ELAN_REG_REC_MAX];
81753 +extern long      elan_reg_rec_lbolt[ELAN_REG_REC_MAX];
81754 +extern int       elan_reg_rec_cpu  [ELAN_REG_REC_MAX];
81755 +extern E3_uint32 elan_reg_rec_reg  [ELAN_REG_REC_MAX];
81756 +extern int       elan_reg_rec_index;
81757
81758 +#endif /* __KERNEL__ */
81759 +
81760 +
81761 +#define ELAN3_PROCFS_ROOT          "/proc/qsnet/elan3"
81762 +#define ELAN3_PROCFS_VERSION       "/proc/qsnet/elan3/version"
81763 +#define ELAN3_PROCFS_DEBUG         "/proc/qsnet/elan3/config/elandebug"
81764 +#define ELAN3_PROCFS_DEBUG_CONSOLE "/proc/qsnet/elan3/config/elandebug_console"
81765 +#define ELAN3_PROCFS_DEBUG_BUFFER  "/proc/qsnet/elan3/config/elandebug_buffer"
81766 +#define ELAN3_PROCFS_MMU_DEBUG     "/proc/qsnet/elan3/config/elan3mmu_debug"
81767 +#define ELAN3_PROCFS_PUNT_LOOPS    "/proc/qsnet/elan3/config/eventint_punt_loops"
81768 +
81769 +#define ELAN3_PROCFS_DEVICE_STATS_FMT    "/proc/qsnet/elan3/device%d/stats"
81770 +#define ELAN3_PROCFS_DEVICE_POSITION_FMT "/proc/qsnet/elan3/device%d/position"
81771 +#define ELAN3_PROCFS_DEVICE_NODESET_FMT  "/proc/qsnet/elan3/device%d/nodeset"
81772 +
81773 +#endif /* __ELAN3_ELANDEV_H */
81774 +
81775 +/*
81776 + * Local variables:
81777 + * c-file-style: "stroustrup"
81778 + * End:
81779 + */
81780 Index: linux-2.6.5-7.191/include/elan3/elandev_linux.h
81781 ===================================================================
81782 --- linux-2.6.5-7.191.orig/include/elan3/elandev_linux.h        2004-02-23 16:02:56.000000000 -0500
81783 +++ linux-2.6.5-7.191/include/elan3/elandev_linux.h     2005-07-28 14:52:52.946663840 -0400
81784 @@ -0,0 +1,74 @@
81785 +/*
81786 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81787 + *
81788 + *    For licensing information please see the supplied COPYING file
81789 + *
81790 + */
81791 +
81792 +#ifndef __ELANDEV_LINUX_H
81793 +#define __ELANDEV_LINUX_H
81794 +
81795 +#ident "$Id: elandev_linux.h,v 1.11.2.1 2005/03/07 16:27:42 david Exp $"
81796 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev_linux.h,v $*/
81797 +
81798 +#ifdef __KERNEL__
81799 +#include <linux/mm.h>
81800 +#include <linux/sched.h>
81801 +#include <linux/pci.h>
81802 +
81803 +#include <qsnet/autoconf.h>
81804 +
81805 +#if !defined(NO_COPROC)                                /* The older coproc kernel patch is applied */
81806 +#include <linux/coproc.h>
81807 +
81808 +#define ioproc_ops             coproc_ops_struct
81809 +#define ioproc_register_ops    register_coproc_ops
81810 +#define ioproc_unregister_ops  unregister_coproc_ops
81811 +
81812 +#define IOPROC_MM_STRUCT_ARG   1
81813 +#define IOPROC_PATCH_APPLIED   1
81814 +
81815 +#elif !defined(NO_IOPROC)                      /* The new ioproc kernel patch is applied */
81816 +#include <linux/ioproc.h>
81817 +
81818 +#define IOPROC_PATCH_APPLIED   1
81819 +#endif
81820 +#endif
81821 +
81822 +#define ELAN3_MAJOR              60
81823 +#define ELAN3_NAME               "elan3"
81824 +#define ELAN3_MAX_CONTROLLER     16                      /* limited to 4 bits */
81825
81826 +#define ELAN3_MINOR_DEVNUM(m)    ((m) & 0x0f)            /* card number */
81827 +#define ELAN3_MINOR_DEVFUN(m)    (((m) >> 4) & 0x0f)     /* function */
81828 +#define ELAN3_MINOR_CONTROL      0                       /* function values */
81829 +#define ELAN3_MINOR_MEM          1
81830 +#define ELAN3_MINOR_USER               2
81831
81832 +typedef void                   *DeviceMappingHandle;
81833 +
81834 +/* task and ctxt handle types */
81835 +typedef struct mm_struct       *TaskHandle;
81836 +typedef int                    CtxtHandle;
81837
81838 +#define ELAN3_MY_TASK_HANDLE() (current->mm)
81839 +#define KERNEL_TASK_HANDLE()   (get_kern_mm())
81840
81841 +/*
81842 + * OS-dependent component of ELAN3_DEV struct.
81843 + */
81844 +typedef struct elan3_dev_osdep
81845 +{
81846 +       struct pci_dev  *pci;                   /* PCI config data */
81847 +       int             ControlDeviceOpen;      /* flag to indicate control */
81848 +                                               /*   device open */
81849 +       struct proc_dir_entry *procdir;
81850 +} ELAN3_DEV_OSDEP;
81851 +
81852 +#endif /* __ELANDEV_LINUX_H */
81853 +
81854 +/*
81855 + * Local variables:
81856 + * c-file-style: "stroustrup"
81857 + * End:
81858 + */
81859 Index: linux-2.6.5-7.191/include/elan3/elanio.h
81860 ===================================================================
81861 --- linux-2.6.5-7.191.orig/include/elan3/elanio.h       2004-02-23 16:02:56.000000000 -0500
81862 +++ linux-2.6.5-7.191/include/elan3/elanio.h    2005-07-28 14:52:52.947663688 -0400
81863 @@ -0,0 +1,226 @@
81864 +/*
81865 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81866 + *
81867 + *    For licensing information please see the supplied COPYING file
81868 + *
81869 + */
81870 +
81871 +#ifndef __ELAN3_ELAN3IO_H
81872 +#define __ELAN3_ELAN3IO_H
81873 +
81874 +#ident "$Id: elanio.h,v 1.19 2003/12/08 15:40:26 mike Exp $"
81875 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanio.h,v $*/
81876 +
81877 +#define ELAN3IO_CONTROL_PATHNAME       "/dev/elan3/control%d"
81878 +#define ELAN3IO_MEM_PATHNAME   "/dev/elan3/mem%d"
81879 +#define ELAN3IO_USER_PATHNAME  "/dev/elan3/user%d"
81880 +#define ELAN3IO_SDRAM_PATHNAME   "/dev/elan3/sdram%d"
81881 +#define ELAN3IO_MAX_PATHNAMELEN        32
81882 +
81883 +/* ioctls on /dev/elan3/control */
81884 +#define ELAN3IO_CONTROL_BASE           0
81885 +
81886 +#define ELAN3IO_SET_BOUNDARY_SCAN      _IO   ('e', ELAN3IO_CONTROL_BASE + 0)
81887 +#define ELAN3IO_CLEAR_BOUNDARY_SCAN    _IO   ('e', ELAN3IO_CONTROL_BASE + 1)
81888 +#define ELAN3IO_READ_LINKVAL           _IOWR ('e', ELAN3IO_CONTROL_BASE + 2, E3_uint32)
81889 +#define ELAN3IO_WRITE_LINKVAL          _IOWR ('e', ELAN3IO_CONTROL_BASE + 3, E3_uint32)
81890 +
81891 +typedef struct elanio_set_debug_struct
81892 +{
81893 +    char       what[32];
81894 +    u_long     value;
81895 +} ELAN3IO_SET_DEBUG_STRUCT;
81896 +#define ELAN3IO_SET_DEBUG              _IOW  ('e', ELAN3IO_CONTROL_BASE + 4, ELAN3IO_SET_DEBUG_STRUCT)
81897 +
81898 +typedef struct elanio_debug_buffer_struct
81899 +{
81900 +    caddr_t    addr;
81901 +    size_t     len;
81902 +} ELAN3IO_DEBUG_BUFFER_STRUCT;
81903 +#define ELAN3IO_DEBUG_BUFFER           _IOWR ('e', ELAN3IO_CONTROL_BASE + 5, ELAN3IO_DEBUG_BUFFER_STRUCT)
81904 +
81905 +typedef struct elanio_neterr_server_struct
81906 +{
81907 +    u_int              elanid;
81908 +    void              *addr;
81909 +    char              *name;
81910 +} ELAN3IO_NETERR_SERVER_STRUCT;
81911 +#define ELAN3IO_NETERR_SERVER          _IOW  ('e', ELAN3IO_CONTROL_BASE + 6, ELAN3IO_NETERR_SERVER_STRUCT)
81912 +#define ELAN3IO_NETERR_FIXUP           _IOWR ('e', ELAN3IO_CONTROL_BASE + 7, NETERR_MSG)
81913 +
81914 +typedef struct elanio_set_position_struct
81915 +{
81916 +    u_int              device;
81917 +    unsigned short      nodeId;
81918 +    unsigned short      numNodes;
81919 +} ELAN3IO_SET_POSITION_STRUCT;
81920 +#define ELAN3IO_SET_POSITION           _IOW ('e', ELAN3IO_CONTROL_BASE + 8, ELAN3IO_SET_POSITION_STRUCT)
81921 +
81922 +#if defined(LINUX)
81923 +
81924 +/* ioctls on /dev/elan3/sdram */
81925 +#define ELAN3IO_SDRAM_BASE             20
81926 +
81927 +/* ioctls on /dev/elan3/user */
81928 +#define ELAN3IO_USER_BASE              30
81929 +
81930 +#define ELAN3IO_FREE                   _IO  ('e', ELAN3IO_USER_BASE + 0)
81931 +
81932 +#define ELAN3IO_ATTACH                 _IOWR('e', ELAN3IO_USER_BASE + 1, ELAN_CAPABILITY)
81933 +#define ELAN3IO_DETACH                 _IO  ('e', ELAN3IO_USER_BASE + 2)
81934 +
81935 +typedef struct elanio_addvp_struct
81936 +{
81937 +    u_int              process;
81938 +    ELAN_CAPABILITY     capability;
81939 +} ELAN3IO_ADDVP_STRUCT;
81940 +#define ELAN3IO_ADDVP                  _IOWR('e', ELAN3IO_USER_BASE + 3, ELAN3IO_ADDVP_STRUCT)
81941 +#define ELAN3IO_REMOVEVP                       _IOW ('e', ELAN3IO_USER_BASE + 4, int)
81942 +
81943 +typedef struct elanio_bcastvp_struct
81944 +{
81945 +    u_int              process;
81946 +    u_int              lowvp;
81947 +    u_int              highvp;
81948 +} ELAN3IO_BCASTVP_STRUCT;
81949 +#define ELAN3IO_BCASTVP                        _IOW ('e', ELAN3IO_USER_BASE + 5, ELAN3IO_BCASTVP_STRUCT)
81950 +
81951 +typedef struct elanio_loadroute_struct
81952 +{
81953 +    u_int              process;
81954 +    E3_uint16          flits[MAX_FLITS];
81955 +} ELAN3IO_LOAD_ROUTE_STRUCT;
81956 +#define ELAN3IO_LOAD_ROUTE             _IOW ('e', ELAN3IO_USER_BASE + 6, ELAN3IO_LOAD_ROUTE_STRUCT)
81957 +
81958 +#define ELAN3IO_PROCESS                        _IO  ('e', ELAN3IO_USER_BASE + 7)
81959 +
81960 +typedef struct elanio_setperm_struct
81961 +{
81962 +    caddr_t            maddr;
81963 +    E3_Addr            eaddr;
81964 +    size_t             len;
81965 +    int                        perm;
81966 +} ELAN3IO_SETPERM_STRUCT;
81967 +#define ELAN3IO_SETPERM                        _IOW ('e', ELAN3IO_USER_BASE + 8, ELAN3IO_SETPERM_STRUCT)
81968 +
81969 +typedef struct elanio_clearperm_struct
81970 +{
81971 +    E3_Addr            eaddr;
81972 +    size_t             len;
81973 +} ELAN3IO_CLEARPERM_STRUCT;
81974 +#define ELAN3IO_CLEARPERM              _IOW ('e', ELAN3IO_USER_BASE + 9, ELAN3IO_CLEARPERM_STRUCT)
81975 +
81976 +typedef struct elanio_changeperm_struct
81977 +{
81978 +    E3_Addr            eaddr;
81979 +    size_t             len;
81980 +    int                        perm;
81981 +} ELAN3IO_CHANGEPERM_STRUCT;
81982 +#define ELAN3IO_CHANGEPERM             _IOW ('e', ELAN3IO_USER_BASE + 10, ELAN3IO_CHANGEPERM_STRUCT)
81983 +
81984 +
81985 +#define ELAN3IO_HELPER_THREAD          _IO  ('e', ELAN3IO_USER_BASE + 11)
81986 +#define ELAN3IO_WAITCOMMAND            _IO  ('e', ELAN3IO_USER_BASE + 12)
81987 +#define ELAN3IO_BLOCK_INPUTTER         _IOW ('e', ELAN3IO_USER_BASE + 13, int)
81988 +#define ELAN3IO_SET_FLAGS              _IOW ('e', ELAN3IO_USER_BASE + 14, int)
81989 +
81990 +#define ELAN3IO_WAITEVENT              _IOW ('e', ELAN3IO_USER_BASE + 15, E3_Event)
81991 +#define ELAN3IO_ALLOC_EVENTCOOKIE      _IOW ('e', ELAN3IO_USER_BASE + 16, EVENT_COOKIE)
81992 +#define ELAN3IO_FREE_EVENTCOOKIE               _IOW ('e', ELAN3IO_USER_BASE + 17, EVENT_COOKIE)
81993 +#define ELAN3IO_ARM_EVENTCOOKIE                _IOW ('e', ELAN3IO_USER_BASE + 18, EVENT_COOKIE)
81994 +#define ELAN3IO_WAIT_EVENTCOOKIE               _IOW ('e', ELAN3IO_USER_BASE + 19, EVENT_COOKIE)
81995 +
81996 +#define ELAN3IO_SWAPSPACE              _IOW ('e', ELAN3IO_USER_BASE + 20, SYS_SWAP_SPACE)
81997 +#define ELAN3IO_EXCEPTION_SPACE                _IOW ('e', ELAN3IO_USER_BASE + 21, SYS_EXCEPTION_SPACE)
81998 +#define ELAN3IO_GET_EXCEPTION          _IOR ('e', ELAN3IO_USER_BASE + 22, SYS_EXCEPTION)
81999 +
82000 +typedef struct elanio_unload_struct
82001 +{
82002 +    void       *addr;
82003 +    size_t      len;
82004 +} ELAN3IO_UNLOAD_STRUCT;
82005 +#define ELAN3IO_UNLOAD                 _IOW ('e', ELAN3IO_USER_BASE + 23, ELAN3IO_UNLOAD_STRUCT)
82006 +
82007 +
82008 +
82009 +typedef struct elanio_getroute_struct
82010 +{
82011 +    u_int              process;
82012 +    E3_uint16          flits[MAX_FLITS];
82013 +} ELAN3IO_GET_ROUTE_STRUCT;
82014 +#define ELAN3IO_GET_ROUTE              _IOW ('e', ELAN3IO_USER_BASE + 24, ELAN3IO_GET_ROUTE_STRUCT)
82015 +
82016 +typedef struct elanio_resetroute_struct
82017 +{
82018 +    u_int              process;
82019 +} ELAN3IO_RESET_ROUTE_STRUCT;
82020 +#define ELAN3IO_RESET_ROUTE            _IOW ('e', ELAN3IO_USER_BASE + 25, ELAN3IO_RESET_ROUTE_STRUCT)
82021 +
82022 +typedef struct elanio_checkroute_struct
82023 +{
82024 +    u_int              process;
82025 +    E3_uint32           routeError;
82026 +    E3_uint16          flits[MAX_FLITS];
82027 +} ELAN3IO_CHECK_ROUTE_STRUCT;
82028 +#define ELAN3IO_CHECK_ROUTE            _IOW ('e', ELAN3IO_USER_BASE + 26, ELAN3IO_CHECK_ROUTE_STRUCT)
82029 +
82030 +typedef struct elanio_vp2nodeId_struct
82031 +{
82032 +    u_int              process;
82033 +    unsigned short      nodeId;
82034 +    ELAN_CAPABILITY    cap;
82035 +} ELAN3IO_VP2NODEID_STRUCT;
82036 +#define ELAN3IO_VP2NODEID      _IOWR('e', ELAN3IO_USER_BASE + 27, ELAN3IO_VP2NODEID_STRUCT)
82037 +
82038 +#define ELAN3IO_SET_SIGNAL     _IOW ('e', ELAN3IO_USER_BASE + 28, int)
82039 +
82040 +typedef struct elanio_process_2_location_struct
82041 +{
82042 +    u_int              process;
82043 +    ELAN_LOCATION       loc;
82044 +} ELAN3IO_PROCESS_2_LOCATION_STRUCT;
82045 +#define ELAN3IO_PROCESS_2_LOCATION     _IOW ('e', ELAN3IO_USER_BASE + 29, ELAN3IO_PROCESS_2_LOCATION_STRUCT)
82046 +
82047 +
82048 +
82049 +/* ioctls on all device */
82050 +#define ELAN3IO_GENERIC_BASE           100
82051 +typedef struct elanio_get_devinfo_struct
82052 +{
82053 +    ELAN_DEVINFO *devinfo;
82054 +} ELAN3IO_GET_DEVINFO_STRUCT;
82055 +#define ELAN3IO_GET_DEVINFO            _IOR ('e', ELAN3IO_GENERIC_BASE + 0, ELAN_DEVINFO)
82056 +
82057 +typedef struct elanio_get_position_struct
82058 +{
82059 +    ELAN_POSITION *position;
82060 +} ELAN3IO_GET_POSITION_STRUCT;
82061 +#define ELAN3IO_GET_POSITION             _IOR ('e', ELAN3IO_GENERIC_BASE + 1, ELAN_POSITION)
82062 +
82063 +typedef struct elanio_stats_struct
82064 +{
82065 +    int                which;
82066 +    void       *ptr;
82067 +} ELAN3IO_STATS_STRUCT;
82068 +#define ELAN3IO_STATS                  _IOR ('e', ELAN3IO_GENERIC_BASE + 2, ELAN3IO_STATS_STRUCT)
82069 +#  define ELAN3_SYS_STATS_DEVICE       0
82070 +#  define ELAN3_SYS_STATS_MMU          1
82071 +
82072 +/* offsets on /dev/elan3/control */
82073 +
82074 +/* offsets on /dev/elan3/mem */
82075 +
82076 +/* page numbers on /dev/elan3/user */
82077 +#define ELAN3IO_OFF_COMMAND_PAGE               0
82078 +#define ELAN3IO_OFF_FLAG_PAGE          1
82079 +#define ELAN3IO_OFF_UREG_PAGE          2
82080 +
82081 +#endif /* LINUX */
82082 +
82083 +#endif /* __ELAN3_ELAN3IO_H */
82084 +
82085 +/*
82086 + * Local variables:
82087 + * c-file-style: "stroustrup"
82088 + * End:
82089 + */
82090 Index: linux-2.6.5-7.191/include/elan3/elanregs.h
82091 ===================================================================
82092 --- linux-2.6.5-7.191.orig/include/elan3/elanregs.h     2004-02-23 16:02:56.000000000 -0500
82093 +++ linux-2.6.5-7.191/include/elan3/elanregs.h  2005-07-28 14:52:52.949663384 -0400
82094 @@ -0,0 +1,1063 @@
82095 +/*
82096 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82097 + *
82098 + *    For licensing information please see the supplied COPYING file
82099 + *
82100 + */
82101 +
82102 +/*
82103 + * Header file for internal slave mapping of the ELAN3 registers
82104 + */
82105 +
82106 +#ifndef _ELAN3_ELANREGS_H
82107 +#define _ELAN3_ELANREGS_H
82108 +
82109 +#ident "$Id: elanregs.h,v 1.87 2004/04/22 12:27:21 david Exp $"
82110 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanregs.h,v $*/
82111 +
82112 +#include <elan3/e3types.h>
82113 +#include <elan3/dma.h>
82114 +#include <elan3/elanuregs.h>
82115 +
82116 +#define MAX_ROOT_CONTEXT_MASK  0xfff
82117 +#define SYS_CONTEXT_BIT                0x1000
82118 +#define ALL_CONTEXT_BITS       (MAX_ROOT_CONTEXT_MASK | SYS_CONTEXT_BIT)
82119 +#define ROOT_TAB_OFFSET(Cntxt) (((Cntxt) & MAX_ROOT_CONTEXT_MASK) << 4)
82120 +#define CLEAR_SYS_BIT(Cntxt)   ((Cntxt) & ~SYS_CONTEXT_BIT)
82121 +
82122 +#define E3_CACHELINE_SIZE      (32)
82123 +#define E3_CACHE_SIZE          (8192)
82124 +
82125 +typedef volatile struct _E3_CacheSets
82126 +{
82127 +   E3_uint64   Set0[256];      /* 2k bytes per set */
82128 +   E3_uint64   Set1[256];      /* 2k bytes per set */
82129 +   E3_uint64   Set2[256];      /* 2k bytes per set */
82130 +   E3_uint64   Set3[256];      /* 2k bytes per set */
82131 +} E3_CacheSets;
82132 +
82133 +typedef union e3_cache_tag
82134 +{
82135 +   E3_uint64   Value;
82136 +   struct {
82137 +#if defined(__LITTLE_ENDIAN__)
82138 +       E3_uint32 pad2:8;               /* Undefined value when read */
82139 +       E3_uint32 LineError:1;          /* A line error has occured */
82140 +       E3_uint32 Modified:1;           /* Cache data is modified */
82141 +       E3_uint32 FillPending:1;                /* Pipelined fill occuring*/
82142 +       E3_uint32 AddrTag27to11:17;     /* Tag address bits 27 to 11 */
82143 +       E3_uint32 pad1:4;               /* Undefined value when read */
82144 +       E3_uint32 pad0;                 /* Undefined value when read */
82145 +#else
82146 +       E3_uint32 pad0;                 /* Undefined value when read */
82147 +       E3_uint32 pad1:4;               /* Undefined value when read */
82148 +       E3_uint32 AddrTag27to11:17;     /* Tag address bits 27 to 11 */
82149 +       E3_uint32 FillPending:1;                /* Pipelined fill occuring*/
82150 +       E3_uint32 Modified:1;           /* Cache data is modified */
82151 +       E3_uint32 LineError:1;          /* A line error has occured */
82152 +       E3_uint32 pad2:8;               /* Undefined value when read */
82153 +#endif
82154 +   } s;
82155 +} E3_CacheTag;
82156 +
82157 +#define E3_NumCacheLines       64
82158 +#define E3_NumCacheSets                4
82159 +
82160 +typedef volatile struct _E3_CacheTags
82161 +{
82162 +   E3_CacheTag Tags[E3_NumCacheLines][E3_NumCacheSets];        /* 2k bytes per set */
82163 +} E3_CacheTags;
82164 +
82165 +typedef union E3_IProcStatus_Reg
82166 +{
82167 +    E3_uint32 Status;
82168 +    struct
82169 +    {
82170 +#if defined(__LITTLE_ENDIAN__)
82171 +       E3_uint32 TrapType:8;           /* iprocs trap ucode address */
82172 +       E3_uint32 SuspendAddr:8;        /* iprocs suspend address */
82173 +       E3_uint32 EopType:2;            /* Type of Eop Received */
82174 +       E3_uint32 QueueingPacket:1;     /* receiving a queueing packet */
82175 +       E3_uint32 AckSent:1;            /* a packet ack has been sent */
82176 +       E3_uint32 Reject:1;             /* a packet nack has been sent */
82177 +       E3_uint32 CrcStatus:2;          /* Crc Status value */
82178 +       E3_uint32 BadLength:1;          /* Eop was received in a bad place */
82179 +       E3_uint32 Chan1:1;              /* This packet received on v chan1 */
82180 +       E3_uint32 First:1;              /* This is the first transaction in the packet */
82181 +       E3_uint32 Last:1;               /* This is the last transaction in the packet */
82182 +       E3_uint32 Unused:2;
82183 +       E3_uint32 WakeupFunction:3;     /* iprocs wakeup function */
82184 +#else
82185 +       E3_uint32 WakeupFunction:3;     /* iprocs wakeup function */
82186 +       E3_uint32 Unused:2;
82187 +       E3_uint32 Last:1;               /* This is the last transaction in the packet */
82188 +       E3_uint32 First:1;              /* This is the first transaction in the packet */
82189 +       E3_uint32 Chan1:1;              /* This packet received on v chan1 */
82190 +       E3_uint32 BadLength:1;          /* Eop was received in a bad place */
82191 +       E3_uint32 CrcStatus:2;          /* Crc Status value */
82192 +       E3_uint32 Reject:1;             /* a packet nack has been sent */
82193 +       E3_uint32 AckSent:1;            /* a packet ack has been sent */
82194 +       E3_uint32 QueueingPacket:1;     /* receiving a queueing packet */
82195 +       E3_uint32 EopType:2;            /* Type of Eop Received */
82196 +       E3_uint32 SuspendAddr:8;        /* iprocs suspend address */
82197 +       E3_uint32 TrapType:8;           /* iprocs trap ucode address */
82198 +#endif
82199 +    } s;
82200 +} E3_IProcStatus_Reg;
82201 +
82202 +#define CRC_STATUS_GOOD    (0 << 21)
82203 +#define CRC_STATUS_DISCARD (1 << 21)
82204 +#define CRC_STATUS_ERROR   (2 << 21)
82205 +#define CRC_STATUS_BAD     (3 << 21)
82206 +
82207 +#define CRC_MASK          (3 << 21)
82208 +
82209 +#define EOP_GOOD          (1 << 16)
82210 +#define EOP_BADACK        (2 << 16)
82211 +#define EOP_ERROR_RESET           (3 << 16)
82212 +
82213 +#define E3_IPS_LastTrans       (1 << 26)
82214 +#define E3_IPS_FirstTrans      (1 << 25)
82215 +#define E3_IPS_VChan1          (1 << 24)
82216 +#define E3_IPS_BadLength       (1 << 23)
82217 +#define E3_IPS_CrcMask         (3 << 21)
82218 +#define E3_IPS_Rejected                (1 << 20)
82219 +#define E3_IPS_AckSent         (1 << 19)
82220 +#define E3_IPS_QueueingPacket  (1 << 18)
82221 +#define E3_IPS_EopType         (3 << 16)
82222 +
82223 +typedef union E3_Status_Reg
82224 +{
82225 +    E3_uint32 Status;
82226 +    struct
82227 +    {
82228 +#if defined(__LITTLE_ENDIAN__)
82229 +       E3_uint32 TrapType:8;           /* procs trap ucode address */
82230 +       E3_uint32 SuspendAddr:8;        /* procs suspend address */
82231 +       E3_uint32 Context:13;           /* procs current context */
82232 +       E3_uint32 WakeupFunction:3;     /* procs wakeup function */
82233 +#else
82234 +       E3_uint32 WakeupFunction:3;     /* procs wakeup function */
82235 +       E3_uint32 Context:13;           /* procs current context */
82236 +       E3_uint32 SuspendAddr:8;        /* procs suspend address */
82237 +       E3_uint32 TrapType:8;           /* procs trap ucode address */
82238 +#endif
82239 +    } s;
82240 +} E3_Status_Reg;
82241 +
82242 +/* values for WakeupFunction */
82243 +#define SleepOneTick                   0
82244 +#define WakeupToSendTransOrEop         1
82245 +#define SleepOneTickThenRunnable       2
82246 +#define WakeupNever                    4
82247 +/* extra dma wakeup functions */
82248 +#define WakupeToSendTransOrEop         1
82249 +#define WakeupForPacketAck             3
82250 +#define WakeupToSendTrans              5
82251 +/* extra thread wakup function */
82252 +#define WakeupStopped                  3
82253 +/* extra cproc wakup function */
82254 +#define WakeupSetEvent                 3
82255 +
82256 +#define GET_STATUS_CONTEXT(Ptr)      ((Ptr.Status >> 16) & 0x1fff)
82257 +#define GET_STATUS_SUSPEND_ADDR(Ptr) ((Ptr.Status >> 8) & 0xff)
82258 +#define GET_STATUS_TRAPTYPE(Ptr)     ((E3_uint32)(Ptr.Status & 0xff))
82259 +
82260 +/*
82261 + * Interrupt register bits
82262 + */
82263 +#define INT_PciMemErr                  (1<<15) /* Pci memory access error */
82264 +#define INT_SDRamInt                   (1<<14) /* SDRam ECC interrupt */
82265 +#define INT_EventInterrupt             (1<<13) /* Event Interrupt */
82266 +#define INT_LinkError                  (1<<12) /* Link Error */
82267 +#define INT_ComQueue                   (1<<11) /* a comm queue half full */
82268 +#define INT_TProcHalted                        (1<<10) /* Tproc Halted */
82269 +#define INT_DProcHalted                        (1<<9) /* Dmas Halted */
82270 +#define INT_DiscardingNonSysCntx       (1<<8) /* Inputters Discarding Non-SysCntx */
82271 +#define INT_DiscardingSysCntx          (1<<7) /* Inputters Discarding SysCntx */
82272 +#define INT_TProc                      (1<<6) /* tproc interrupt */
82273 +#define INT_CProc                      (1<<5) /* cproc interrupt */
82274 +#define INT_DProc                      (1<<4) /* dproc interrupt */
82275 +#define INT_IProcCh1NonSysCntx         (1<<3) /* iproc non-SysCntx interrupt */
82276 +#define INT_IProcCh1SysCntx            (1<<2) /* iproc SysCntx interrupt */
82277 +#define INT_IProcCh0NonSysCntx         (1<<1) /* iproc non-SysCntx interrupt */
82278 +#define INT_IProcCh0SysCntx            (1<<0) /* iproc SysCntx interrupt */
82279 +
82280 +#define INT_Inputters          (INT_IProcCh0SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh1SysCntx | INT_IProcCh1NonSysCntx)
82281 +#define INT_Discarding         (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)
82282 +#define INT_Halted             (INT_DProcHalted | INT_TProcHalted)
82283 +#define INT_ErrorInterrupts    (INT_PciMemErr | INT_SDRamInt | INT_LinkError)
82284 +
82285 +/*
82286 + * Link state bits.
82287 + */
82288 +#define LS_LinkNotReady        (1 << 0) /* Link is in reset or recovering from an error */
82289 +#define LS_Locked      (1 << 1) /* Linkinput PLL is locked */
82290 +#define LS_LockError   (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */
82291 +#define LS_DeskewError (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */
82292 +#define LS_PhaseError  (1 << 4) /* Linkinput Phase alignment error. */
82293 +#define LS_DataError   (1 << 5) /* Received value was neither good data or a token. */
82294 +#define LS_FifoOvFlow0 (1 << 6) /* Channel 0 input fifo overflowed. */
82295 +#define LS_FifoOvFlow1 (1 << 7) /* Channel 1 input fifo overflowed. */
82296 +
82297 +/*
82298 + * Link State Constant defines, used for writing to LinkSetValue
82299 + */
82300 +
82301 +#define LRS_DataDel0           0x0
82302 +#define LRS_DataDel1           0x1
82303 +#define LRS_DataDel2           0x2
82304 +#define LRS_DataDel3           0x3
82305 +#define LRS_DataDel4           0x4
82306 +#define LRS_DataDel5           0x5
82307 +#define LRS_DataDel6           0x6
82308 +#define LRS_DataDel7           0x7
82309 +#define LRS_DataDel8           0x8
82310 +#define LRS_PllDelValue                0x9
82311 +#define LRS_ClockEven          0xA
82312 +#define LRS_ClockOdd           0xB
82313 +#define LRS_ErrorLSW           0xC
82314 +#define LRS_ErrorMSW           0xD
82315 +#define LRS_FinCoarseDeskew    0xE
82316 +#define LRS_LinkInValue                0xF
82317 +#define LRS_NumLinkDels         0x10
82318 +
82319 +#define LRS_Pllfast             0x40
82320
82321 +union Sched_Status
82322 +{
82323 +    E3_uint32 Status;
82324 +    struct
82325 +    {
82326 +#if defined(__LITTLE_ENDIAN__)
82327 +       E3_uint32 StopNonSysCntxs:1;
82328 +       E3_uint32 FlushCommandQueues:1;
82329 +       E3_uint32 HaltDmas:1;
82330 +       E3_uint32 HaltDmaDequeue:1;
82331 +       E3_uint32 HaltThread:1;
82332 +       E3_uint32 CProcStop:1;
82333 +       E3_uint32 DiscardSysCntxIn:1;
82334 +       E3_uint32 DiscardNonSysCntxIn:1;
82335 +       E3_uint32 RestartCh0SysCntx:1;
82336 +       E3_uint32 RestartCh0NonSysCntx:1;
82337 +       E3_uint32 RestartCh1SysCntx:1;
82338 +       E3_uint32 RestartCh1NonSysCntx:1;
82339 +       E3_uint32 RestartDProc:1;
82340 +       E3_uint32 RestartTProc:1;
82341 +       E3_uint32 RestartCProc:1;
82342 +       E3_uint32 ClearLinkErrorInt:1;
82343 +       E3_uint32 :3;
82344 +       E3_uint32 LinkSetValue:10; 
82345 +       E3_uint32 FixLinkDelays:1;
82346 +       E3_uint32 LinkBoundaryScan:1;
82347 +#else
82348 +       E3_uint32 LinkBoundaryScan:1;
82349 +       E3_uint32 FixLinkDelays:1;
82350 +       E3_uint32 LinkSetValue:10; 
82351 +       E3_uint32 :3;
82352 +       E3_uint32 ClearLinkErrorInt:1;
82353 +       E3_uint32 RestartCProc:1;
82354 +       E3_uint32 RestartTProc:1;
82355 +       E3_uint32 RestartDProc:1;
82356 +       E3_uint32 RestartCh1NonSysCntx:1;
82357 +       E3_uint32 RestartCh1SysCntx:1;
82358 +       E3_uint32 RestartCh0NonSysCntx:1;
82359 +       E3_uint32 RestartCh0SysCntx:1;
82360 +       E3_uint32 DiscardNonSysCntxIn:1;
82361 +       E3_uint32 DiscardSysCntxIn:1;
82362 +       E3_uint32 CProcStop:1;
82363 +       E3_uint32 HaltThread:1;
82364 +       E3_uint32 HaltDmaDequeue:1;
82365 +       E3_uint32 HaltDmas:1;
82366 +       E3_uint32 FlushCommandQueues:1;
82367 +       E3_uint32 StopNonSysCntxs:1;
82368 +#endif
82369 +    } s;
82370 +};
82371 +
82372 +#define LinkBoundaryScan       ((E3_uint32) 1<<31) /* Clears the link error interrupt */
82373 +#define FixLinkDelays          ((E3_uint32) 1<<30) /* Clears the link error interrupt */
82374 +#define LinkSetValue(Val, OldVal) ((E3_uint32) (((Val) & 0x3ff) << 20) | ((OldVal) & ((~0x3ff) << 20)))
82375 +
82376 +#define ClearLinkErrorInt      ((E3_uint32) 1<<16) /* Clears the link error interrupt */
82377 +#define RestartCProc           ((E3_uint32) 1<<15) /* Clears command proc interrupt */
82378 +#define RestartTProc           ((E3_uint32) 1<<14) /* Clears thread interrupt */
82379 +#define RestartDProc           ((E3_uint32) 1<<13) /* Clears dma0 interrupt */
82380 +#define RestartCh1NonSysCntx   ((E3_uint32) 1<<12) /* Clears interrupt */
82381 +#define RestartCh1SysCntx      ((E3_uint32) 1<<11) /* Clears interrupt */
82382 +#define RestartCh0NonSysCntx   ((E3_uint32) 1<<10) /* Clears interrupt */
82383 +#define RestartCh0SysCntx      ((E3_uint32) 1<<9) /* Clears interrupt */
82384 +#define CProcStopped           ((E3_uint32) 1<<9) /* Read value only */
82385 +
82386 +#define TraceSetEvents         ((E3_uint32) 1<<8)
82387 +#define DiscardNonSysCntxIn    ((E3_uint32) 1<<7)
82388 +#define DiscardSysCntxIn       ((E3_uint32) 1<<6)
82389 +#define CProcStop              ((E3_uint32) 1<<5) /* Will empty all the command port queues. */
82390 +#define HaltThread             ((E3_uint32) 1<<4) /* Will stop the thread proc and clear the tproc command queue */
82391 +#define HaltDmaDequeue         ((E3_uint32) 1<<3) /* Will stop the dmaers starting new dma's. */
82392 +#define HaltDmas               ((E3_uint32) 1<<2) /* Will stop the dmaers and clear the dma command queues */
82393 +#define FlushCommandQueues     ((E3_uint32) 1<<1) /* Causes the command ports to be flushed. */
82394 +#define StopNonSysCntxs                ((E3_uint32) 1<<0) /* Prevents a non-SysCntx from starting. */
82395 +
82396 +/* Initial value of schedule status register */
82397 +#define LinkResetToken         0x00F
82398 +
82399 +#define Sched_Initial_Value    (LinkBoundaryScan | (LinkResetToken << 20) | \
82400 +                                DiscardSysCntxIn | DiscardNonSysCntxIn | HaltThread | HaltDmas)
82401 +
82402 +#define StopDmaQueues       (HaltDmaDequeue | HaltDmas | \
82403 +                             DiscardNonSysCntxIn | DiscardSysCntxIn)
82404 +#define CheckDmaQueueStopped (INT_DiscardingNonSysCntx | INT_DiscardingSysCntx | INT_DProcHalted)
82405 +
82406 +#define HaltStopAndExtTestMask 0xfff001ff
82407 +#define HaltAndStopMask                0x000001ff
82408 +
82409 +
82410 +#define DmaComQueueNotEmpty    (1<<0)
82411 +#define ThreadComQueueNotEmpty (1<<1)
82412 +#define EventComQueueNotEmpty  (1<<2)
82413 +#define DmaComQueueHalfFull    (1<<3)
82414 +#define ThreadComQueueHalfFull (1<<4)
82415 +#define EventComQueueHalfFull  (1<<5)
82416 +#define DmaComQueueError       (1<<6)
82417 +#define ThreadComQueueError    (1<<7)
82418 +#define EventComQueueError     (1<<8)
82419 +
82420 +#define ComQueueNotEmpty       (DmaComQueueNotEmpty | ThreadComQueueNotEmpty | EventComQueueNotEmpty)
82421 +#define ComQueueError          (DmaComQueueError | ThreadComQueueError | EventComQueueError)
82422 +
82423 +typedef union _E3_DmaInfo
82424 +{
82425 +    E3_uint32  Value;
82426 +    struct
82427 +    {
82428 +#if defined(__LITTLE_ENDIAN__)
82429 +       E3_uint32 DmaOutputOpen:1;      /* The packet is currently open */
82430 +       E3_uint32 :7;
82431 +       E3_uint32 TimeSliceCount:2;     /* Time left to timeslice */
82432 +       E3_uint32 UseRemotePriv:1;      /* Set for remote read dmas */
82433 +       E3_uint32 DmaLastPacket:1;      /* Set for the last packet of a dma */
82434 +       E3_uint32 PacketAckValue:2;     /* Packet ack type. Valid if AckBufferValid set. */
82435 +       E3_uint32 PacketTimeout:1;      /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
82436 +       E3_uint32 AckBufferValid:1;     /* Packet ack is valid. */
82437 +       E3_uint32 :16;                  /* read as Zero */
82438 +#else
82439 +       E3_uint32 :16;                  /* read as Zero */
82440 +       E3_uint32 AckBufferValid:1;     /* Packet ack is valid. */
82441 +       E3_uint32 PacketTimeout:1;      /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
82442 +       E3_uint32 PacketAckValue:2;     /* Packet ack type. Valid if AckBufferValid set. */
82443 +       E3_uint32 DmaLastPacket:1;      /* Set for the last packet of a dma */
82444 +       E3_uint32 UseRemotePriv:1;      /* Set for remote read dmas */
82445 +       E3_uint32 TimeSliceCount:2;     /* Time left to timeslice */
82446 +       E3_uint32 :7;
82447 +       E3_uint32 DmaOutputOpen:1;      /* The packet is currently open */
82448 +#endif
82449 +    } s;
82450 +} E3_DmaInfo;
82451 +
82452 +typedef volatile struct _E3_DmaRds
82453 +{
82454 +   E3_uint32           DMA_Source4to0AndTwoReads;
82455 +   E3_uint32           pad13;
82456 +   E3_uint32           DMA_BytesToRead;
82457 +   E3_uint32           pad14;
82458 +   E3_uint32           DMA_MinusPacketSize;
82459 +   E3_uint32           pad15;
82460 +   E3_uint32           DMA_MaxMinusPacketSize;
82461 +   E3_uint32           pad16;
82462 +   E3_uint32           DMA_DmaOutputOpen;
82463 +   E3_uint32           pad16a;
82464 +   E3_DmaInfo          DMA_PacketInfo;
82465 +   E3_uint32           pad17[7];
82466 +   E3_uint32           IProcTrapBase;
82467 +   E3_uint32           pad18;
82468 +   E3_uint32           IProcBlockTrapBase;
82469 +   E3_uint32           pad19[11];
82470 +} E3_DmaRds;
82471 +   
82472 +typedef volatile struct _E3_DmaWrs
82473 +{
82474 +   E3_uint64           pad0;
82475 +   E3_uint64           LdAlignment;
82476 +   E3_uint64           ResetAckNLdBytesToWr;
82477 +   E3_uint64           SetAckNLdBytesToWr;
82478 +   E3_uint64           LdBytesToRd;
82479 +   E3_uint64           LdDmaType;
82480 +   E3_uint64           SendRoutes;
82481 +   E3_uint64           SendEop;
82482 +   E3_uint64           pad1[8];
82483 +} E3_DmaWrs;
82484 +
82485 +typedef volatile struct _E3_Exts
82486 +{
82487 +   E3_uint32           CurrContext;                            /* 0x12a00 */
82488 +   E3_uint32           pad0;
82489 +   E3_Status_Reg       DProcStatus;                            /* 0x12a08 */
82490 +   E3_uint32           pad1;
82491 +   E3_Status_Reg       CProcStatus;                            /* 0x12a10 */
82492 +   E3_uint32           pad2;
82493 +   E3_Status_Reg       TProcStatus;                            /* 0x12a18 */
82494 +   E3_uint32           pad3;
82495 +   E3_IProcStatus_Reg  IProcStatus;                            /* 0x12a20 */
82496 +   E3_uint32           pad4[3];
82497 +
82498 +   E3_uint32           IProcTypeContext;                       /* 0x12a30 */
82499 +   E3_uint32           pad5;
82500 +   E3_uint32           IProcTransAddr;                         /* 0x12a38 */
82501 +   E3_uint32           pad6;
82502 +   E3_uint32           IProcCurrTransData0;                    /* 0x12a40 */
82503 +   E3_uint32           pad7;
82504 +   E3_uint32           IProcCurrTransData1;                    /* 0x12a48 */
82505 +   E3_uint32           pad8;
82506 +
82507 +   E3_uint32           SchCntReg;                              /* 0x12a50 */
82508 +   E3_uint32           pad9;
82509 +   E3_uint32           InterruptReg;                           /* 0x12a58 */
82510 +   E3_uint32           pad10;
82511 +   E3_uint32           InterruptMask;                          /* 0x12a60 */
82512 +   E3_uint32           pad11;
82513 +   E3_uint32           LinkErrorTypes;                         /* 0x12a68 */
82514 +   E3_uint32           pad12[3];
82515 +   E3_uint32           LinkState;      /* a read here returens the DataDel value for the */
82516 +                                       /* link that has just been defined by a write to */
82517 +                                       /* Regs.Exts.SchCntReg.LinkSetValue */
82518 +   E3_uint32           pad13;
82519 +
82520 +   union                                                       /* 0x12a80 */
82521 +   {
82522 +      E3_DmaWrs                DmaWrs;
82523 +      E3_DmaRds                DmaRds;
82524 +   } Dmas;
82525 +} E3_Exts;
82526 +
82527 +typedef union com_port_entry
82528 +{
82529 +    E3_uint64  type;
82530 +    struct
82531 +    {
82532 +       E3_uint32 Address;              /* Command VAddr */
82533 +#if defined(__LITTLE_ENDIAN__)
82534 +       E3_uint32 Context0Issue:1;      /* Issue was for context 0 */
82535 +       E3_uint32 EventNotCommand:1;    /* Issue address bit 3 */
82536 +       E3_uint32 RemoteDesc:1;         /* Issue address bit 5 */
82537 +       E3_uint32 :13;                  /* read as Zero */
82538 +       E3_uint32 Context:12;           /* Command Context */
82539 +       E3_uint32 :4;                   /* read as Zero */
82540 +#else
82541 +       E3_uint32 :4;                   /* read as Zero */
82542 +       E3_uint32 Context:12;           /* Command Context */
82543 +       E3_uint32 :13;                  /* read as Zero */
82544 +       E3_uint32 RemoteDesc:1;         /* Issue address bit 5 */
82545 +       E3_uint32 EventNotCommand:1;    /* Issue address bit 3 */
82546 +       E3_uint32 Context0Issue:1;      /* Issue was for context 0 */
82547 +#endif
82548 +    } s;
82549 +} E3_ComPortEntry;
82550 +
82551 +/* control reg bits */
82552 +#define CONT_MMU_ENABLE                (1 << 0) /* bit 0 enables mmu */
82553 +#define CONT_ENABLE_8K_PAGES   (1 << 1) /* When set smallest page is 8k instead of 4k. */
82554 +#define CONT_EN_ALL_SETS       (1 << 2) /* enable cache */
82555 +#define CONT_CACHE_LEVEL0      (1 << 3) /* cache context table */
82556 +#define CONT_CACHE_LEVEL1      (1 << 4) /* cache up level 1 PTD/PTE */
82557 +#define CONT_CACHE_LEVEL2      (1 << 5) /* cache up level 2 PTD/PTE */
82558 +#define CONT_CACHE_LEVEL3      (1 << 6) /* cache up level 3 PTD/PTE */
82559 +#define CONT_CACHE_TRAPS       (1 << 7) /* cache up traps */
82560 +#define CONT_CACHE_LEV0_ROUTES (1 << 8) /* cache up small routes */
82561 +#define CONT_CACHE_LEV1_ROUTES (1 << 9) /* cache up large routes */
82562 +#define CONT_CACHE_ALL         (CONT_CACHE_LEVEL0 | CONT_CACHE_LEVEL1 | CONT_CACHE_LEVEL2 | \
82563 +                                CONT_CACHE_LEVEL3 | CONT_CACHE_TRAPS | \
82564 +                                CONT_CACHE_LEV0_ROUTES | CONT_CACHE_LEV1_ROUTES)
82565 +
82566 +#define CONT_SYNCHRONOUS       (1 << 10) /* PCI running sync */
82567 +#define CONT_SER               (1 << 11) /* Single bit output (Elan1 SER bit) */
82568 +#define CONT_SIR               (1 << 12) /* Writing 1 resets elan. */
82569 +
82570 +#define CONT_PSYCHO_MODE       (1 << 13) /* Enables all the perversion required by psycho */
82571 +#define CONT_ENABLE_ECC                (1 << 14) /* Enables error detecting on the ECC */
82572 +#define CONT_SDRAM_TESTING     (1 << 15) /* Switches to test mode for checking EEC data bits */
82573 +
82574 +/* defines SDRam CasLatency. Once set will not change again unless reset is reasserted. */
82575 +/* 1 = Cas Latency is 3, 0 = Cas Latency is 2 */
82576 +#define CAS_LATENCY_2          (0 << 16)
82577 +#define CAS_LATENCY_3          (1 << 16)
82578 +#define REFRESH_RATE_2US       (0 << 17) /* defines 2us SDRam Refresh rate. */
82579 +#define REFRESH_RATE_4US       (1 << 17) /* defines 4us SDRam Refresh rate. */
82580 +#define REFRESH_RATE_8US       (2 << 17) /* defines 8us SDRam Refresh rate. */
82581 +#define REFRESH_RATE_16US      (3 << 17) /* defines 16us SDRam Refresh rate. */
82582 +
82583 +#define CONT_PCI_ERR           (1 << 19) /* Read 1 if PCI Error */
82584 +#define CONT_CLEAR_PCI_ERROR   (1 << 19) /* Clears an PCI error. */
82585 +
82586 +/* Will cause the PCI error bit to become set. This is used to force the threads proc
82587 +   and the uProc to start to stall. */
82588 +#define CONT_SET_PCI_ERROR     (1 << 20)
82589 +
82590 +/* Writes SDram control reg when set. Also starts SDram memory system refreshing. */
82591 +#define SETUP_SDRAM            (1 << 21)
82592 +
82593 +/* Flushes the tlb */
82594 +#define MMU_FLUSH              (1 << 22)
82595 +/* and read back when it's finished */
82596 +#define MMU_FLUSHED            (1 << 0)
82597 +
82598 +/* Clears any ECC error detected by SDRam interface */
82599 +#define CLEAR_SDRAM_ERROR      (1 << 23)
82600 +
82601 +#define ECC_ADDR_MASK          0x0ffffff8
82602 +#define ECC_UE_MASK            0x1 
82603 +#define ECC_CE_MASK            0x2
82604 +#define ECC_ME_MASK            0x4 
82605 +#define ECC_SYN_MASK           0xff
82606 +
82607 +/* define page table entry bit fields */
82608 +#define TLB_PageSizeBits       (3 << 0)
82609 +#define TLB_ACCBits            (7 << 2)
82610 +#define TLB_LocalBit           (1 << 5)
82611 +#define TLB_PCI64BitTargetBit  (1 << 6)
82612 +#define TLB_PCIBigEndianBit    (1 << 7)
82613 +
82614 +#define TLB_ModifiedBit                (1 << 55)
82615 +#define TLB_ReferencedBit      (1 << 63)
82616 +
82617 +/* Used to read values from the tlb. */
82618 +#define TLB_TlbReadCntBitsSh   56
82619 +#define TLB_UseSelAddrSh       (1ULL << 60)
82620 +#define TLB_WriteTlbLine       (1ULL << 61)
82621 +
82622 +#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \
82623 +                             ((E3_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh))
82624 +
82625 +typedef union _E3_CacheContReg
82626 +{
82627 +    E3_uint32 ContReg;
82628 +    struct
82629 +    {
82630 +#if defined(__LITTLE_ENDIAN__)
82631 +       E3_uint32 MMU_Enable:1;         /* wr 1 to enable the MMU */
82632 +       E3_uint32 Set8kPages:1;         /* wr 1 smallest page is 8k. */
82633 +       E3_uint32 EnableAllSets:1;      /* wr 1 All the cache sets are enabled */
82634 +       E3_uint32 Cache_Level0:1;       /* wr 1 lev0 page tabs will be cached */
82635 +       E3_uint32 Cache_Level1:1;       /* wr 1 lev1 page tabs will be cached */
82636 +       E3_uint32 Cache_Level2:1;       /* wr 1 lev2 page tabs will be cached */
82637 +       E3_uint32 Cache_Level3:1;       /* wr 1 lev3 page tabs will be cached */
82638 +       E3_uint32 Cache_Traps:1;        /* wr 1 trap info will be cached */
82639 +       E3_uint32 Cache_Lev0_Routes:1;  /* wr 1 small routes will be cached */
82640 +       E3_uint32 Cache_Lev1_Routes:1;  /* wr 1 big routes will be cached */
82641 +       E3_uint32 PCI_Synchronous:1;    /* Pci and sys clocks are running synchronously*/
82642 +       E3_uint32 SER:1;                /* 1 bit output port */
82643 +       E3_uint32 SIR:1;                /* write 1 will reset elan */
82644 +       E3_uint32 PsychoMode:1;         /* Enables psycho perversion mode. */
82645 +       E3_uint32 CasLatency:1;         /* 1=cas latency=3, 1=cas latency=2 */
82646 +       E3_uint32 RefreshRate:2;        /* 0=2us, 1=4us, 2=8us, 3=16us */
82647 +       E3_uint32 Pci_Err:1;            /* pci error. Write 1 clears err */
82648 +       E3_uint32 Set_Pci_Error:1;      /* Will simulate an Pci error */
82649 +       E3_uint32 StartSDRam:1;         /* Starts the sdram subsystem */
82650 +       E3_uint32 FlushTlb:1;           /* Flush the contence of the tlb */
82651 +       E3_uint32 :11;
82652 +#else
82653 +       E3_uint32 :11;
82654 +       E3_uint32 FlushTlb:1;           /* Flush the contence of the tlb */
82655 +       E3_uint32 StartSDRam:1;         /* Starts the sdram subsystem */
82656 +       E3_uint32 Set_Pci_Error:1;      /* Will simulate an Pci error */
82657 +       E3_uint32 Pci_Err:1;            /* pci error. Write 1 clears err */
82658 +       E3_uint32 RefreshRate:2;        /* 0=2us, 1=4us, 2=8us, 3=16us */
82659 +       E3_uint32 CasLatency:1;         /* 1=cas latency=3, 1=cas latency=2 */
82660 +       E3_uint32 PsychoMode:1;         /* Enables psycho perversion mode. */
82661 +       E3_uint32 SIR:1;                /* write 1 will reset elan */
82662 +       E3_uint32 SER:1;                /* 1 bit output port */
82663 +       E3_uint32 PCI_Synchronous:1;    /* Pci and sys clocks are running synchronously*/
82664 +       E3_uint32 Cache_Lev1_Routes:1;  /* wr 1 big routes will be cached */
82665 +       E3_uint32 Cache_Lev0_Routes:1;  /* wr 1 small routes will be cached */
82666 +       E3_uint32 Cache_Traps:1;        /* wr 1 trap info will be cached */
82667 +       E3_uint32 Cache_Level3:1;       /* wr 1 lev3 page tabs will be cached */
82668 +       E3_uint32 Cache_Level2:1;       /* wr 1 lev2 page tabs will be cached */
82669 +       E3_uint32 Cache_Level1:1;       /* wr 1 lev1 page tabs will be cached */
82670 +       E3_uint32 Cache_Level0:1;       /* wr 1 lev0 page tabs will be cached */
82671 +       E3_uint32 EnableAllSets:1;      /* wr 1 All the cache sets are enabled */
82672 +       E3_uint32 Set8kPages:1;         /* wr 1 smallest page is 8k. */
82673 +       E3_uint32 MMU_Enable:1;         /* wr 1 to enable the MMU */
82674 +#endif
82675 +    } s;
82676 +} E3_CacheContReg;
82677 +
82678 +typedef union _E3_TrapBits
82679 +{
82680 +    volatile E3_uint32 Bits;
82681 +    struct
82682 +    {
82683 +#if defined(__LITTLE_ENDIAN__)
82684 +       E3_uint32 ForcedTProcTrap:1;     /* The theads proc has been halted */
82685 +       E3_uint32 InstAccessException:1; /* An instruction access exception */
82686 +       E3_uint32 Unimplemented:1;       /* Unimplemented instruction executed */
82687 +       E3_uint32 DataAccessException:1; /* A data access exception */  
82688 +
82689 +       E3_uint32 ThreadTimeout:1;       /* The threads outputer has timed out */
82690 +       E3_uint32 OpenException:1;       /* Invalid sequence of open, sendtr or close */
82691 +       E3_uint32 OpenRouteFetch:1;      /* Fault while fetching routes for previous open*/
82692 +       E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */
82693 +       
82694 +       E3_uint32 PacketAckValue:2;      /* Packet ack type. Valid if AckBufferValid set. */
82695 +       E3_uint32 PacketTimeout:1;       /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
82696 +
82697 +       E3_uint32 AckBufferValid:1;      /* The PacketAckValue bits are valid */
82698 +       E3_uint32 OutputWasOpen:1;       /* The output was open when tproc trapped */
82699 +       E3_uint32 TProcDeschedule:2;     /* The reason the tproc stopped running. */
82700 +       E3_uint32 :17;
82701 +#else
82702 +       E3_uint32 :17;
82703 +       E3_uint32 TProcDeschedule:2;     /* The reason the tproc stopped running. */
82704 +       E3_uint32 OutputWasOpen:1;       /* The output was open when tproc trapped */
82705 +       E3_uint32 AckBufferValid:1;      /* The PacketAckValue bits are valid */
82706 +       
82707 +       E3_uint32 PacketTimeout:1;       /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
82708 +       E3_uint32 PacketAckValue:2;      /* Packet ack type. Valid if AckBufferValid set. */
82709 +       
82710 +       E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */
82711 +       E3_uint32 OpenRouteFetch:1;      /* Fault while fetching routes for previous open*/
82712 +       E3_uint32 OpenException:1;       /* Invalid sequence of open, sendtr or close */
82713 +       E3_uint32 ThreadTimeout:1;       /* The threads outputer has timed out */
82714 +
82715 +       E3_uint32 DataAccessException:1; /* A data access exception */
82716 +       E3_uint32 Unimplemented:1;       /* Unimplemented instruction executed */
82717 +       E3_uint32 InstAccessException:1; /* An instruction access exception */
82718 +       E3_uint32 ForcedTProcTrap:1;     /* The theads proc has been halted */
82719 +#endif
82720 +    } s;
82721 +} E3_TrapBits;
82722 +
82723 +typedef union _E3_DirtyBits
82724 +{
82725 +    volatile E3_uint32 Bits;
82726 +    struct
82727 +    {
82728 +#if defined(__LITTLE_ENDIAN__)
82729 +       E3_uint32 GlobalsDirty:8;
82730 +       E3_uint32 OutsDirty:8;          /* will always read as dirty. */
82731 +       E3_uint32 LocalsDirty:8;
82732 +       E3_uint32 InsDirty:8;
82733 +#else
82734 +       E3_uint32 InsDirty:8;
82735 +       E3_uint32 LocalsDirty:8;
82736 +       E3_uint32 OutsDirty:8;          /* will always read as dirty. */
82737 +       E3_uint32 GlobalsDirty:8;
82738 +#endif
82739 +    } s;
82740 +} E3_DirtyBits;
82741 +
82742 +#define E3_TProcDescheduleMask    0x6000
82743 +#define E3_TProcDescheduleWait    0x2000
82744 +#define E3_TProcDescheduleSuspend 0x4000
82745 +#define E3_TProcDescheduleBreak   0x6000
82746 +
82747 +#define E3_TrapBitsMask          0x7fff
82748 +
82749 +#define ThreadRestartFromTrapBit       1
82750 +#define ThreadReloadAllRegs            2
82751 +
82752 +#define E3_PAckOk      0
82753 +#define E3_PAckTestFail        1
82754 +#define E3_PAckDiscard 2
82755 +#define E3_PAckError   3
82756 +
82757 +typedef volatile struct _E3_DataBusMap
82758 +{
82759 +   E3_uint64            Dma_Alignment_Port[8];         /* 0x00002800 */
82760 +   E3_uint32            pad0[0x30];                    /* 0x00002840 */
82761 +
82762 +   E3_uint32            Input_Trans0_Data[0x10];       /* 0x00002900 */
82763 +   E3_uint32            Input_Trans1_Data[0x10];
82764 +   E3_uint32            Input_Trans2_Data[0x10];
82765 +   E3_uint32            Input_Trans3_Data[0x10];
82766 +
82767 +/* this is the start of the exts directly addressable from the ucode. */
82768 +   E3_Exts              Exts;                          /* 0x00002a00 */
82769 +
82770 +/* this is the start of the registers directly addressable from the ucode. */
82771 +   E3_DMA               Dma_Desc;                      /* 0x00002b00 */
82772 +
82773 +   E3_uint32            Dma_Last_Packet_Size;          /* 0x00002b20 */
82774 +   E3_uint32            Dma_This_Packet_Size;          /* 0x00002b24 */
82775 +   E3_uint32            Dma_Tmp_Source;                /* 0x00002b28 */
82776 +   E3_uint32            Dma_Tmp_Dest;                  /* 0x00002b2c */
82777 +
82778 +   E3_Addr              Thread_SP_Save_Ptr;    /* points to the thread desched save word. */
82779 +   E3_uint32            Dma_Desc_Size_InProg;          /* 0x00002b34 */
82780 +
82781 +   E3_uint32            Thread_Desc_SP;                /* 0x00002b38 */
82782 +   E3_uint32            Thread_Desc_Context;           /* 0x00002b3c */
82783 +
82784 +   E3_uint32            uCode_TMP[0x10];               /* 0x00002b40 */
82785 +
82786 +   E3_uint32            TProc_NonSysCntx_FPtr;         /* 0x00002b80 */
82787 +   E3_uint32            TProc_NonSysCntx_BPtr;         /* 0x00002b84 */
82788 +   E3_uint32            TProc_SysCntx_FPtr;            /* 0x00002b88 */
82789 +   E3_uint32            TProc_SysCntx_BPtr;            /* 0x00002b8c */
82790 +   E3_uint32            DProc_NonSysCntx_FPtr;         /* 0x00002b90 */
82791 +   E3_uint32            DProc_NonSysCntx_BPtr;         /* 0x00002b94 */
82792 +   E3_uint32            DProc_SysCntx_FPtr;            /* 0x00002b98 */
82793 +   E3_uint32            DProc_SysCntx_BPtr;            /* 0x00002b9c */
82794 +
82795 +   E3_uint32            Input_Trap_Base;               /* 0x00002ba0 */
82796 +   E3_uint32            Input_Queue_Offset;            /* 0x00002ba4 */
82797 +   E3_uint32            CProc_TrapSave_Addr;           /* 0x00002ba8 */
82798 +   E3_uint32            Input_Queue_Addr;              /* 0x00002bac */
82799 +   E3_uint32            uCode_TMP10;                   /* 0x00002bb0 */
82800 +   E3_uint32            uCode_TMP11;                   /* 0x00002bb4 */
82801 +   E3_uint32            Event_Trace_Ptr;               /* 0x00002bb8 */
82802 +   E3_uint32            Event_Trace_Mask;              /* 0x00002bbc */
82803 +
82804 +   E3_ComPortEntry      DmaComQueue[3];                /* 0x00002bc0 */
82805 +
82806 +   E3_uint32            Event_Int_Queue_FPtr;          /* 0x00002bd8 */
82807 +   E3_uint32            Event_Int_Queue_BPtr;          /* 0x00002bdc */
82808 +
82809 +   E3_ComPortEntry      ThreadComQueue[2];             /* 0x00002be0 */
82810 +   E3_ComPortEntry      SetEventComQueue[2];           /* 0x00002bf0 */
82811 +
82812 +   E3_uint32            pad1[96];                      /* 0x00002c00 */
82813 +   E3_uint32            ComQueueStatus;                /* 0x00002d80 */
82814 +   E3_uint32            pad2[31];                      /* 0x00002d84 */
82815 +
82816 +/* These are the internal registers of the threads proc. */
82817 +   E3_uint32            Globals[8];                    /* 0x00002e00 */
82818 +   E3_uint32            Outs[8];
82819 +   E3_uint32            Locals[8];
82820 +   E3_uint32            Ins[8];
82821 +
82822 +   E3_uint32            pad3[16];
82823 +
82824 +   E3_uint32            IBufferReg[4];
82825 +
82826 +   E3_uint32            ExecuteNPC;
82827 +   E3_uint32            ExecutePC;
82828 +
82829 +   E3_uint32            StartPC;
82830 +   E3_uint32            pad4;
82831 +
82832 +   E3_uint32            StartnPC;
82833 +   E3_uint32            pad5;
82834 +
82835 +   E3_TrapBits          TrapBits;
82836 +   E3_DirtyBits                 DirtyBits;
82837 +   E3_uint64            LoadDataReg;
82838 +   E3_uint64            StoreDataReg;
82839 +
82840 +   E3_uint32            ECC_STATUS0;
82841 +   E3_uint32            ECC_STATUS1;
82842 +   E3_uint32            pad6[0xe];
82843 +
82844 +/* Pci slave port regs */
82845 +   E3_uint32            PciSlaveReadCache[0x10];
82846 +
82847 +   E3_uint32            Fault_Base_Ptr;
82848 +   E3_uint32            pad7;
82849 +   E3_uint32            Context_Ptr;
82850 +   E3_uint32            pad8;
82851 +   E3_uint32            Input_Context_Filter;      /* write only, No data */
82852 +   E3_uint32            Input_Context_Fil_Flush;   /* write only, No data */
82853 +   E3_CacheContReg      Cache_Control_Reg;
82854 +   E3_uint32            pad9;
82855 +
82856 +   E3_uint64            Tlb_Line_Value;
82857 +   
82858 +   E3_uint32            Walk_Datareg1;
82859 +   E3_uint32            Walk_VAddr_Tab_Base;
82860 +   E3_uint32            Walk_Datareg;
82861 +   E3_uint32            Walk_ContextReg;
82862 +   E3_uint32            Walk_FaultAddr;
82863 +   E3_uint32            Walk_EventAddr;
82864 +
82865 +/* outputers output cont ext registers. */
82866 +   E3_uint64            Dma_Route_012345_Context;
82867 +   E3_uint64            pad10;
82868 +   E3_uint64            Dma_Route_01234567;
82869 +   E3_uint64            Dma_Route_89ABCDEF;
82870 +
82871 +   E3_uint64            Thread_Route_012345_Context;
82872 +   E3_uint64            pad11;
82873 +   E3_uint64            Thread_Route_01234567;
82874 +   E3_uint64            Thread_Route_89ABCDEF;
82875 +} E3_DataBusMap;
82876 +
82877 +typedef volatile struct _E3_Regs
82878 +{
82879 +   E3_CacheSets                  Sets;                         /* 0x00000000 */
82880 +   E3_CacheTags                  Tags;                         /* 0x00002000 */
82881 +   E3_DataBusMap         Regs;                         /* 0x00002800 */
82882 +   E3_uint32             pad1[0x400];
82883 +   E3_User_Regs          URegs;
82884 +} E3_Regs;
82885 +
82886 +#define MAX_TRAPPED_TRANS      16
82887 +#define TRANS_DATA_WORDS       16
82888 +#define TRANS_DATA_BYTES       64
82889 +
82890 +/*
82891 + * Event interrupt
82892 + */
82893 +typedef volatile union _E3_EventInt
82894 +{
82895 +   E3_uint64    ForceAlign;
82896 +   struct {
82897 +       E3_uint32 IntCookie;
82898 +       E3_uint32 EventContext; /* Bits 16 to 28 */
82899 +    } s;
82900 +} E3_EventInt;
82901 +
82902 +#define GET_EVENT_CONTEXT(Ptr) ((Ptr->s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK)
82903 +
82904 +typedef volatile union _E3_ThreadQueue
82905 +{
82906 +   E3_uint64   ForceAlign;
82907 +   struct
82908 +   {
82909 +       E3_Addr  Thread;
82910 +#if defined(__LITTLE_ENDIAN__)
82911 +       E3_uint32 :16;          /* Bits 0  to 15 */
82912 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
82913 +       E3_uint32 :3;           /* Bits 29 to 31 */
82914 +#else
82915 +       E3_uint32 :3;           /* Bits 29 to 31 */
82916 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
82917 +       E3_uint32 :16;          /* Bits 0  to 15 */
82918 +#endif
82919 +   } s;
82920 +} E3_ThreadQueue;
82921 +
82922 +typedef volatile union _E3_FaultStatusReg
82923 +{
82924 +   E3_uint32 Status;
82925 +   struct
82926 +   {
82927 +#if defined(__LITTLE_ENDIAN__)
82928 +      E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */
82929 +      E3_uint32 AccSize:4;     /* Access size. See below for different types. Bits 3 to 6 */
82930 +      E3_uint32 WrAcc:1;       /* Access was a write. Bit 7 */
82931 +      E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */
82932 +      E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */
82933 +      E3_uint32 RdLine:1;      /* Access was a dma read line. Bit 11 */
82934 +      E3_uint32 RdMult:1;      /* Access was a dma read multiple. Bit 12 */
82935 +      E3_uint32 Walking:1;     /* The fault occued when walking. Bit 13 */
82936 +      E3_uint32 Level:2;       /* Page table level when the fault occued. Bits 14 to 15 */
82937 +      E3_uint32 ProtFault:1;   /* A protection fault occured. Bit 16 */
82938 +      E3_uint32 FaultPte:2;    /* Page table type when the fault occured. Bit 17 */
82939 +      E3_uint32 AlignmentErr:1;        /* Address alignment did not match the access size. Bit 19 */
82940 +      E3_uint32 VProcSizeErr:1;        /* VProc number is out of range. Bit 20 */
82941 +      E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */
82942 +      E3_uint32 :10;           /* Bits 22 to 31 */
82943 +#else
82944 +      E3_uint32 :10;           /* Bits 22 to 31 */
82945 +      E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */
82946 +      E3_uint32 VProcSizeErr:1;        /* VProc number is out of range. Bit 20 */
82947 +      E3_uint32 AlignmentErr:1;        /* Address alignment did not match the access size. Bit 19 */
82948 +      E3_uint32 FaultPte:2;    /* Page table type when the fault occured. Bit 17 */
82949 +      E3_uint32 ProtFault:1;   /* A protection fault occured. Bit 16 */
82950 +      E3_uint32 Level:2;       /* Page table level when the fault occued. Bits 14 to 15 */
82951 +      E3_uint32 Walking:1;     /* The fault occued when walking. Bit 13 */
82952 +      E3_uint32 RdMult:1;      /* Access was a dma read multiple. Bit 12 */
82953 +      E3_uint32 RdLine:1;      /* Access was a dma read line. Bit 11 */
82954 +      E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */
82955 +      E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */
82956 +      E3_uint32 WrAcc:1;       /* Access was a write. Bit 7 */
82957 +      E3_uint32 AccSize:4;     /* Access size. See below for different types. Bits 3 to 6 */
82958 +      E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */
82959 +#endif
82960 +   } s;
82961 +} E3_FaultStatusReg;
82962 +
82963 +typedef union _E3_FaultSave
82964 +{
82965 +   E3_uint64            ForceAlign;
82966 +   struct {
82967 +      E3_FaultStatusReg         FSR;
82968 +      volatile E3_uint32 FaultContext;
82969 +      volatile E3_uint32 FaultAddress;
82970 +      volatile E3_uint32 EventAddress;
82971 +   } s;
82972 +} E3_FaultSave;
82973 +
82974 +/* MMU fault status reg bit positions. */
82975 +#define FSR_WritePermBit       0       /* 1=Write access perm, 0=Read access perm */
82976 +#define FSR_RemotePermBit      1       /* 1=Remote access perm, 0=local access perm */
82977 +#define FSR_EventPermBit       2       /* 1=Event access perm, 0=data access perm */
82978 +#define FSR_Size0Bit           3
82979 +#define FSR_Size1Bit           4
82980 +#define FSR_Size2Bit           5
82981 +#define FSR_Size3Bit           6
82982 +#define FSR_WriteAccBit                7       /* 1=Write access, 0=Read access. */
82983 +#define FSR_NonAllocBit                8       /* 1=Do not fill cache with this data */
82984 +#define FSR_BlkDataTy0Bit      9
82985 +#define FSR_BlkDataTy1Bit      10
82986 +#define FSR_ReadLineBit                11
82987 +#define FSR_ReadMultipleBit    12
82988 +
82989 +#define FSR_PermMask           (0xf << FSR_WritePermBit)
82990 +#define FSR_SizeMask           (0xf << FSR_Size0Bit)
82991 +#define FSR_AccTypeMask                (3 << FSR_WriteAccBit)
82992 +#define FSR_BlkDataTyMask      (3 << FSR_BlkDataTy0Bit)
82993 +#define FSR_PciAccTyMask       (3 << FSR_ReadLineBit)
82994 +#define FSR_Walking            (0x1 << 13)
82995 +#define FSR_Level_Mask         (0x3 << 14)
82996 +#define FSR_ProtFault          (0x1 << 16)
82997 +#define FSR_FaultPTEType       (0x2 << 17)
82998 +#define FSR_AddrSizeError      (0x1 << 19)
82999 +#define FSR_VProcSizeError     (0x1 << 20)
83000 +#define FSR_WalkBadData                (0x1 << 21)
83001 +
83002 +#define FSR_PermRead           0
83003 +#define FSR_PermWrite          1
83004 +#define FSR_PermRemoteRead     2
83005 +#define FSR_PermRemoteWrite    3
83006 +#define FSR_PermEventRd                4
83007 +#define FSR_PermEventWr                5
83008 +#define FSR_PermRemoteEventRd  6
83009 +#define FSR_PermRemoteEventWr  7
83010 +
83011 +/* AT size values for each access type */
83012 +#define FSR_Word               (0x0 << FSR_Size0Bit)
83013 +#define FSR_DWord              (0x1 << FSR_Size0Bit)
83014 +#define FSR_QWord              (0x2 << FSR_Size0Bit)
83015 +#define FSR_Block32            (0x3 << FSR_Size0Bit)
83016 +#define FSR_ReservedBlock      (0x6 << FSR_Size0Bit)
83017 +#define FSR_Block64            (0x7 << FSR_Size0Bit)
83018 +#define FSR_GetCntxFilter      (0x8 << FSR_Size0Bit)
83019 +#define FSR_QueueDWord         (0x9 << FSR_Size0Bit)
83020 +#define FSR_RouteFetch         (0xa << FSR_Size0Bit)
83021 +#define FSR_QueueBlock         (0xb << FSR_Size0Bit)
83022 +#define FSR_Block32PartWrite   (0xe << FSR_Size0Bit)
83023 +#define FSR_Block64PartWrite   (0xf << FSR_Size0Bit)
83024 +
83025 +#define FSR_AllocRead          (0 << FSR_WriteAccBit)
83026 +#define FSR_AllocWrite         (1 << FSR_WriteAccBit)
83027 +#define FSR_NonAllocRd         (2 << FSR_WriteAccBit)
83028 +#define FSR_NonAllocWr         (3 << FSR_WriteAccBit)
83029 +
83030 +#define FSR_TypeByte           (0 << FSR_BlkDataTy0Bit)
83031 +#define FSR_TypeHWord          (1 << FSR_BlkDataTy0Bit)
83032 +#define FSR_TypeWord           (2 << FSR_BlkDataTy0Bit)
83033 +#define FSR_TypeDWord          (3 << FSR_BlkDataTy0Bit)
83034 +
83035 +typedef union E3_TrTypeCntx
83036 +{
83037 +   E3_uint32 TypeContext;
83038 +   struct
83039 +   {
83040 +#if defined(__LITTLE_ENDIAN__)
83041 +      E3_uint32 Type:16;               /* Transaction type field */
83042 +      E3_uint32 Context:13;            /* Transaction context */
83043 +      E3_uint32 TypeCntxInvalid:1;     /* Bit  29 */
83044 +      E3_uint32 StatusRegValid:1;      /* Bit  30 */
83045 +      E3_uint32 LastTrappedTrans:1;    /* Bit  31 */
83046 +#else
83047 +      E3_uint32 LastTrappedTrans:1;    /* Bit  31 */
83048 +      E3_uint32 StatusRegValid:1;      /* Bit  30 */
83049 +      E3_uint32 TypeCntxInvalid:1;     /* Bit  29 */
83050 +      E3_uint32 Context:13;            /* Transaction context */
83051 +      E3_uint32 Type:16;               /* Transaction type field */
83052 +#endif
83053 +   } s;
83054 +} E3_TrTypeCntx;
83055 +
83056 +#define GET_TRAP_TYPE(Ptr)    (Ptr.TypeContext & 0xfff)
83057 +#define GET_TRAP_CONTEXT(Ptr) ((Ptr.TypeContext >> 16) & 0x1fff)
83058 +
83059 +/* Words have been swapped for big endian access when fetched with dword access from elan.*/
83060 +typedef union _E3_IprocTrapHeader
83061 +{
83062 +   E3_uint64   forceAlign;
83063 +
83064 +   struct
83065 +   {
83066 +      E3_TrTypeCntx     TrTypeCntx;
83067 +      E3_uint32                 TrAddr;
83068 +      E3_uint32                 TrData0;
83069 +      union
83070 +      {
83071 +        E3_IProcStatus_Reg u_IProcStatus;
83072 +        E3_uint32          u_TrData1;
83073 +      } ipsotd;
83074 +   } s;
83075 +} E3_IprocTrapHeader;
83076 +
83077 +#define IProcTrapStatus ipsotd.u_IProcStatus
83078 +#define TrData1                ipsotd.u_TrData1
83079 +
83080 +typedef struct E3_IprocTrapData
83081 +{
83082 +   E3_uint32 TrData[TRANS_DATA_WORDS];
83083 +} E3_IprocTrapData;
83084 +
83085 +/*
83086 + * 64 kbytes of elan local memory. Must be aligned on a 64k boundary
83087 + */
83088 +#define E3_NonSysCntxQueueSize 0x400
83089 +#define E3_SysCntxQueueSize    0x100
83090 +
83091 +typedef struct _E3_TrapAndQueue
83092 +{
83093 +   E3_DMA              NonSysCntxDmaQueue[E3_NonSysCntxQueueSize];                     /* 0x000000 */
83094 +   E3_DMA              SysCntxDmaQueue[E3_SysCntxQueueSize];                           /* 0x008000 */
83095 +   E3_EventInt         EventIntQueue[E3_NonSysCntxQueueSize];                          /* 0x00A000 */
83096 +   E3_ThreadQueue      NonSysCntxThreadQueue[E3_NonSysCntxQueueSize];                  /* 0x00C000 */  
83097 +   E3_ThreadQueue      SysCntxThreadQueue[E3_SysCntxQueueSize];                        /* 0x00E000 */
83098 +   E3_FaultSave                IProcSysCntx;                                                   /* 0x00E800 */
83099 +   E3_Addr             Thread_SP_Save;                                                 /* 0x00E810 */
83100 +   E3_uint32           dummy0[3];                                                      /* 0x00E814 */
83101 +   E3_FaultSave                ThreadProcData;                                                 /* 0x00E820 */
83102 +   E3_FaultSave                ThreadProcInst;                                                 /* 0x00E830 */
83103 +   E3_FaultSave                dummy1[2];                                                      /* 0x00E840 */  
83104 +   E3_FaultSave                ThreadProcOpen;                                                 /* 0x00E860 */
83105 +   E3_FaultSave                dummy2;                                                         /* 0x00E870 */
83106 +   E3_FaultSave                IProcNonSysCntx;                                                /* 0x00E880 */
83107 +   E3_FaultSave                DProc;                                                          /* 0x00E890 */
83108 +   E3_FaultSave                CProc;                                                          /* 0x00E8A0 */
83109 +   E3_FaultSave                TProc;                                                          /* 0x00E8B0 */
83110 +   E3_FaultSave                DProcData0;                                                     /* 0x00E8C0 */
83111 +   E3_FaultSave                DProcData1;                                                     /* 0x00E8D0 */
83112 +   E3_FaultSave                DProcData2;                                                     /* 0x00E8E0 */
83113 +   E3_FaultSave                DProcData3;                                                     /* 0x00E8F0 */
83114 +   E3_uint32           dummy3[0xc0];                                                   /* 0x00E900 */
83115 +   E3_IprocTrapHeader  VCh0_C0_TrHead[MAX_TRAPPED_TRANS];
83116 +   E3_IprocTrapHeader  VCh0_NonC0_TrHead[MAX_TRAPPED_TRANS];
83117 +   E3_IprocTrapHeader  VCh1_C0_TrHead[MAX_TRAPPED_TRANS];
83118 +   E3_IprocTrapHeader  VCh1_NonC0_TrHead[MAX_TRAPPED_TRANS];
83119 +   E3_IprocTrapData    VCh0_C0_TrData[MAX_TRAPPED_TRANS];
83120 +   E3_IprocTrapData    VCh0_NonC0_TrData[MAX_TRAPPED_TRANS];
83121 +   E3_IprocTrapData    VCh1_C0_TrData[MAX_TRAPPED_TRANS];
83122 +   E3_IprocTrapData    VCh1_NonC0_TrData[MAX_TRAPPED_TRANS];
83123 +   E3_uint64           DmaOverflowQueueSpace[0x1000];
83124 +   E3_uint64           ThreadOverflowQueueSpace[0x800];
83125 +   E3_uint64           EventOverflowQueueSpace[0x800];
83126 +} E3_TrapAndQueue;
83127 +
83128 +
83129 +typedef struct _E3_ContextControlBlock 
83130 +{
83131 +   E3_uint32   rootPTP;
83132 +   E3_uint32   filter;
83133 +   E3_uint32   VPT_ptr;
83134 +   E3_uint32   VPT_mask;
83135 +} E3_ContextControlBlock;
83136 +
83137 +#define E3_CCB_CNTX0           (0x20000000)
83138 +#define E3_CCB_DISCARD_ALL     (0x40000000)
83139 +#define E3_CCB_ACKOK_ALL       (0x80000000)
83140 +#define E3_CCB_MASK            (0xc0000000)
83141 +
83142 +#define E3_NUM_CONTEXT_0       (0x20)
83143 +
83144 +/* Macros to manipulate event queue pointers */
83145 +/*     generate index in EventIntQueue */
83146 +#define E3_EVENT_INTQ_INDEX(fptr)      (((fptr) & 0x1fff) >> 3)
83147 +/*     generate next fptr */
83148 +#define E3_EVENT_INTQ_NEXT(fptr)       ((((fptr) + 8) & ~0x4000) | 0x2000)
83149 +
83150 +
83151 +#endif /* notdef _ELAN3_ELANREGS_H */
83152 +
83153 +/*
83154 + * Local variables:
83155 + * c-file-style: "stroustrup"
83156 + * End:
83157 + */
83158 Index: linux-2.6.5-7.191/include/elan3/elansyscall.h
83159 ===================================================================
83160 --- linux-2.6.5-7.191.orig/include/elan3/elansyscall.h  2004-02-23 16:02:56.000000000 -0500
83161 +++ linux-2.6.5-7.191/include/elan3/elansyscall.h       2005-07-28 14:52:52.949663384 -0400
83162 @@ -0,0 +1,124 @@
83163 +/*
83164 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83165 + *
83166 + *    For licensing information please see the supplied COPYING file
83167 + *
83168 + */
83169 +
83170 +#ifndef __ELAN3_ELANSYSCALL_H
83171 +#define __ELAN3_ELANSYSCALL_H
83172 +
83173 +#ident "$Id: elansyscall.h,v 1.34 2004/06/07 13:50:06 mike Exp $"
83174 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elansyscall.h,v $*/
83175 +
83176 +#ifdef __cplusplus
83177 +extern "C" {
83178 +#endif
83179 +
83180 +#ifndef _ASM
83181 +
83182 +typedef struct sys_word_item
83183 +{
83184 +    struct sys_word_item *Next;
83185 +    E3_uint32            Value;
83186 +} SYS_WORD_ITEM;
83187 +
83188 +typedef struct sys_block_item
83189 +{
83190 +    struct sys_block_item *Next;
83191 +    E3_uint32            *Pointer;
83192 +} SYS_BLOCK_ITEM;
83193 +
83194 +typedef struct sys_swap_space
83195 +{
83196 +    int                 Magic;
83197 +    void       *ItemListsHead[MAX_LISTS];
83198 +    void       **ItemListsTailp[MAX_LISTS];
83199 +} SYS_SWAP_SPACE;
83200 +
83201 +typedef struct sys_exception
83202 +{
83203 +    int                        Type;
83204 +    int                        Proc;
83205 +    u_long             Res;
83206 +    u_long             Value;
83207 +    E3_FaultSave_BE    FaultArea;
83208 +    
83209 +    union
83210 +    {
83211 +       DMA_TRAP        Dma;
83212 +       THREAD_TRAP     Thread;
83213 +       COMMAND_TRAP    Command;
83214 +       INPUT_TRAP      Input;
83215 +    }                  Union;
83216 +} SYS_EXCEPTION;
83217 +
83218 +typedef struct sys_exception_space
83219 +{
83220 +    struct sys_exception_space *Next;
83221 +    int                                Magic;
83222 +    int                                Front;
83223 +    int                                Back;
83224 +    int                                Count;
83225 +    int                                Overflow;
83226 +    SYS_EXCEPTION              Exceptions[1];
83227 +} SYS_EXCEPTION_SPACE;
83228 +
83229 +#ifdef __KERNEL__
83230 +
83231 +typedef struct sys_ctxt
83232 +{
83233 +    SYS_SWAP_SPACE      *Swap;
83234 +    SYS_EXCEPTION_SPACE *Exceptions;
83235 +    kmutex_t            Lock;
83236 +
83237 +    spinlock_t          WaitLock;
83238 +    kcondvar_t          NetworkErrorWait;
83239 +
83240 +    int                         Armed;
83241 +    int                         Backoff;
83242 +    long                Time;
83243 +
83244 +    u_long              Flags;
83245 +    int                  signal;
83246 +
83247 +    EVENT_COOKIE_TABLE  *Table;
83248 +} SYS_CTXT;
83249 +
83250 +extern SYS_CTXT *sys_init (ELAN3_CTXT *ctxt);
83251 +extern int       sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event);
83252 +extern void      sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t ptr, int size, 
83253 +                                  E3_FaultSave_BE *, u_long res, u_long value);
83254 +extern int       sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex);
83255 +
83256 +/* returns -ve error or ELAN_CAP_OK or ELAN_CAP_RMS */
83257 +/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */
83258 +extern int  elan3_validate_cap (ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use);
83259 +
83260 +#endif /* __KERNEL__ */
83261 +
83262 +#endif /* _ASM */
83263 +
83264 +/* values for "Flags" */
83265 +#define ELAN3_SYS_FLAG_DMA_BADVP               1
83266 +#define ELAN3_SYS_FLAG_THREAD_BADVP    2
83267 +#define ELAN3_SYS_FLAG_DMAFAIL         4
83268 +#define ELAN3_SYS_FLAG_NETERR          8
83269 +
83270 +#define SYS_SWAP_MAGIC         0xB23C52DF
83271 +#define SYS_EXCEPTION_MAGIC    0xC34D63E0
83272 +
83273 +#define EXCEPTION_GLOBAL_STRING        "elan3_exceptions"
83274 +#define EXCEPTION_ABORT_STRING  "elan3_abortstring"
83275 +
83276 +#ifdef __cplusplus
83277 +}
83278 +#endif
83279 +
83280 +#endif /* __ELAN3_ELANSYSCALL_H */
83281 +
83282 +/*
83283 + * Local variables:
83284 + * c-file-style: "stroustrup"
83285 + * End:
83286 + */
83287 Index: linux-2.6.5-7.191/include/elan3/elanuregs.h
83288 ===================================================================
83289 --- linux-2.6.5-7.191.orig/include/elan3/elanuregs.h    2004-02-23 16:02:56.000000000 -0500
83290 +++ linux-2.6.5-7.191/include/elan3/elanuregs.h 2005-07-28 14:52:52.950663232 -0400
83291 @@ -0,0 +1,295 @@
83292 +/*
83293 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83294 + *
83295 + *    For licensing information please see the supplied COPYING file
83296 + *
83297 + */
83298 +
83299 +#ifndef __ELAN3_ELANUREGS_H
83300 +#define __ELAN3_ELANUREGS_H
83301 +
83302 +#ident "$Id: elanuregs.h,v 1.10 2003/09/24 13:57:24 david Exp $"
83303 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanuregs.h,v $*/
83304 +
83305 +#ifdef __cplusplus
83306 +extern "C" {
83307 +#endif
83308 +
83309 +/*
83310 + * Statistic control reg values
83311 + * Each 4-bit nibble of the control word specifies what statistic
83312 + * is to be recorded in each of the 8 statistic counters
83313 + */
83314 +
83315 +/* Count reg 0 */
83316 +#define STC_INPUT_TRANSACTIONS         0
83317 +#define STP_DMA_EOP_WAIT_ACK           1
83318 +#define STP_THREAD_RUNNING             2
83319 +#define STP_UCODE_WAIT_MEM             3
83320 +#define STC_CACHE_WRITE_BACKS          4
83321 +#define STC_PCI_SLAVE_READS            5
83322 +#define STC_REG0_UNUSED6               6
83323 +#define STP_REG0_UNUSED7               7
83324 +
83325 +#define STATS_REG0_NAMES {             \
83326 +         "STC_INPUT_TRANSACTIONS",     \
83327 +         "STP_DMA_EOP_WAIT_ACK",       \
83328 +         "STP_THREAD_RUNNING",         \
83329 +         "STP_UCODE_WAIT_MEM",         \
83330 +         "STC_CACHE_WRITE_BACKS",      \
83331 +         "STC_PCI_SLAVE_READS",        \
83332 +         "STC_REG0_UNUSED6",           \
83333 +         "STP_REG0_UNUSED7"            \
83334 +}
83335 +
83336 +/* Count reg 1 */
83337 +#define STC_INPUT_WRITE_BLOCKS         (0 << 4)
83338 +#define STP_DMA_DATA_TRANSMITTING      (1 << 4)
83339 +#define STP_THEAD_WAITING_INST         (2 << 4)
83340 +#define STC_REG1_UNUSED3               (3 << 4)
83341 +#define STP_FETCHING_ROUTES            (4 << 4)
83342 +#define STC_REG1_UNUSED5               (5 << 4)
83343 +#define STC_PCI_SLAVE_WRITES           (6 << 4)
83344 +#define STP_PCI_SLAVE_READ_WAITING     (7 << 4)
83345 +
83346 +#define STATS_REG1_NAMES {             \
83347 +      "STC_INPUT_WRITE_BLOCKS",                \
83348 +         "STP_DMA_DATA_TRANSMITTING",  \
83349 +         "STP_THEAD_WAITING_INST",     \
83350 +         "STC_REG1_UNUSED3",           \
83351 +         "STP_FETCHING_ROUTES",        \
83352 +         "STC_REG1_UNUSED5",           \
83353 +         "STC_PCI_SLAVE_WRITES",       \
83354 +         "STP_PCI_SLAVE_READ_WAITING"  \
83355 +}
83356 +
83357 +/* Count reg 2 */
83358 +#define STC_INPUT_PKTS                 (0 << 8)
83359 +#define STP_DMA_WAITING_MEM            (1 << 8)
83360 +#define STP_THREAD_WAIT_OPEN_PKT       (2 << 8)
83361 +#define STC_REG2_UNUSED3               (3 << 8)
83362 +#define STC_ROUTE_FETCHES              (4 << 8)
83363 +#define STC_CACHE_NON_ALLOC_MISSES     (5 << 8)
83364 +#define STC_REG2_UNUSED6               (6 << 8)
83365 +#define STP_PCI_SLAVE_WRITE_WAITING    (7 << 8)
83366 +
83367 +#define STATS_REG2_NAMES {             \
83368 +      "STC_INPUT_PKTS",                        \
83369 +         "STP_DMA_WAITING_MEM",        \
83370 +         "STP_THREAD_WAIT_OPEN_PKT",   \
83371 +         "STC_REG2_UNUSED3",           \
83372 +         "STC_ROUTE_FETCHES",          \
83373 +         "STC_CACHE_NON_ALLOC_MISSES", \
83374 +         "STC_REG2_UNUSED6",           \
83375 +         "STP_PCI_SLAVE_WRITE_WAITING" \
83376 +}
83377 +
83378 +/* Count reg 3 */
83379 +#define STC_INPUT_PKTS_REJECTED                (0 << 12)
83380 +#define STP_DMA_WAIT_NETWORK_BUSY      (1 << 12)
83381 +#define STP_THREAD_WAIT_PACK           (2 << 12)
83382 +#define STP_UCODE_BLOCKED_UCODE                (3 << 12)
83383 +#define STC_TLB_HITS                   (4 << 12)
83384 +#define STC_REG3_UNUSED5               (5 << 12)
83385 +#define STC_PCI_MASTER_READS           (6 << 12)
83386 +#define STP_PCI_MASTER_WRITE_WAITING   (7 << 12)
83387 +
83388 +#define STATS_REG3_NAMES {             \
83389 +      "STC_INPUT_PKTS_REJECTED",       \
83390 +         "STP_DMA_WAIT_NETWORK_BUSY",  \
83391 +         "STP_THREAD_WAIT_PACK",       \
83392 +         "STP_UCODE_BLOCKED_UCODE",    \
83393 +         "STC_TLB_HITS",               \
83394 +         "STC_REG3_UNUSED5",           \
83395 +         "STC_PCI_MASTER_READS",       \
83396 +         "STP_PCI_MASTER_WRITE_WAITING"\
83397 +}
83398 +
83399 +/* Count reg 4 */
83400 +#define STP_INPUT_DATA_TRANSMITTING    (0 << 16)
83401 +#define STC_DMA_NON_CTX0_PKTS          (1 << 16)
83402 +#define STP_THREAD_EOP_WAIT_ACK                (2 << 16)
83403 +#define STP_UCODE_DPROC_RUNNING                (3 << 16)
83404 +#define STC_TLB_MEM_WALKS              (4 << 16)
83405 +#define STC_REG4_UNUSED5               (5 << 16)
83406 +#define STC_PCI_MASTER_WRITES          (6 << 16)
83407 +#define STP_PCI_MASTER_READ_WAITING    (7 << 16)
83408 +
83409 +#define STATS_REG4_NAMES {             \
83410 +      "STP_INPUT_DATA_TRANSMITTING",   \
83411 +         "STC_DMA_NON_CTX0_PKTS",      \
83412 +         "STP_THREAD_EOP_WAIT_ACK",    \
83413 +         "STP_UCODE_DPROC_RUNNING",    \
83414 +         "STC_TLB_MEM_WALKS",          \
83415 +         "STC_REG4_UNUSED5",           \
83416 +         "STC_PCI_MASTER_WRITES",      \
83417 +         "STP_PCI_MASTER_READ_WAITING" \
83418 +}
83419 +
83420 +/* Count reg 5 */
83421 +#define STP_INPUT_WAITING_NETWORK_DATA (0 << 20)
83422 +#define STC_DMA_NON_CTX0_PKTS_REJECTED (1 << 20)
83423 +#define STP_THREAD_WAITING_DATA                (2 << 20)
83424 +#define STP_UCODE_CPROC_RUNNING                (3 << 20)
83425 +#define STP_THREAD_TRANSMITTING_DATA   (4 << 20)
83426 +#define STP_PCI_WAITING_MAIN           (5 << 20)
83427 +#define STC_REG5_UNUSED6               (6 << 20)
83428 +#define STC_REG5_UNUSED7               (7 << 20)
83429 +
83430 +#define STATS_REG5_NAMES {                     \
83431 +      "STP_INPUT_WAITING_NETWORK_DATA",                \
83432 +         "STC_DMA_NON_CTX0_PKTS_REJECTED",     \
83433 +         "STP_THREAD_WAITING_DATA",            \
83434 +         "STP_UCODE_CPROC_RUNNING",            \
83435 +         "STP_THREAD_TRANSMITTING_DATA",       \
83436 +         "STP_PCI_WAITING_MAIN",               \
83437 +         "STC_REG5_UNUSED6",                   \
83438 +         "STC_REG5_UNUSED7"                    \
83439 +}
83440 +
83441 +/* Count reg 6 */
83442 +#define STP_INPUT_WAITING_MEMORY       (0 << 24)
83443 +#define STC_DMA_CTX0_PKTS              (1 << 24)
83444 +#define STP_THREAD_WAITING_MEMORY      (2 << 24)
83445 +#define STP_UCODE_TPROC_RUNNING                (3 << 24)
83446 +#define STC_CACHE_HITS                 (4 << 24)
83447 +#define STP_PCI_WAITING_ELAN           (5 << 24)
83448 +#define STC_REG6_UNUSED4               (6 << 24)
83449 +#define STC_REG6_UNUSED7               (7 << 24)
83450 +
83451 +#define STATS_REG6_NAMES {             \
83452 +      "STP_INPUT_WAITING_MEMORY",      \
83453 +         "STC_DMA_CTX0_PKTS",          \
83454 +         "STP_THREAD_WAITING_MEMORY",  \
83455 +         "STP_UCODE_TPROC_RUNNING",    \
83456 +         "STC_CACHE_HITS",             \
83457 +         "STP_PCI_WAITING_ELAN",       \
83458 +         "STC_REG6_UNUSED4",           \
83459 +         "STC_REG6_UNUSED7"            \
83460 +}
83461 +
83462 +/* Count reg 7 */
83463 +#define STC_INPUT_CTX_FILTER_FILL      (0 << 28)       
83464 +#define STC_DMA_CTX0_PKTS_REJECTED     (1 << 28)
83465 +#define STP_THREAD_WAIT_NETWORK_BUSY   (2 << 28)
83466 +#define STP_UCODE_IPROC_RUNNING                (3 << 28)
83467 +#define STP_TLB_MEM_WALKING            (4 << 28)
83468 +#define STC_CACHE_ALLOC_MISSES         (5 << 28)
83469 +#define STP_PCI_DATA_TRANSFER          (6 << 28)
83470 +#define STC_REG7_UNUSED7               (7 << 28)
83471 +
83472 +#define STATS_REG7_NAMES {             \
83473 +      "STC_INPUT_CTX_FILTER_FILL",     \
83474 +         "STC_DMA_CTX0_PKTS_REJECTED", \
83475 +         "STP_THREAD_WAIT_NETWORK_BUSY",\
83476 +         "STP_UCODE_IPROC_RUNNING",    \
83477 +         "STP_TLB_MEM_WALKING",        \
83478 +         "STC_CACHE_ALLOC_MISSES",     \
83479 +         "STP_PCI_DATA_TRANSFER",      \
83480 +         "STC_REG7_UNUSED7"            \
83481 +}
83482 +
83483 +#define STATS_REG_NAMES { \
83484 +    STATS_REG0_NAMES, \
83485 +    STATS_REG1_NAMES, \
83486 +    STATS_REG2_NAMES, \
83487 +    STATS_REG3_NAMES, \
83488 +    STATS_REG4_NAMES, \
83489 +    STATS_REG5_NAMES, \
83490 +    STATS_REG6_NAMES, \
83491 +    STATS_REG7_NAMES, \
83492 +}
83493 +
83494 +extern const char *elan3_stats_names[8][8];
83495 +
83496 +#define ELAN3_STATS_NAME(COUNT, CONTROL) (elan3_stats_names[(COUNT)][(CONTROL) & 7])
83497 +
83498 +typedef volatile union e3_StatsControl
83499 +{
83500 +   E3_uint32 StatsControl;
83501 +   struct
83502 +   {
83503 +#if defined(__LITTLE_ENDIAN__)
83504 +      E3_uint32 StatCont0:4;
83505 +      E3_uint32 StatCont1:4;
83506 +      E3_uint32 StatCont2:4;
83507 +      E3_uint32 StatCont3:4;
83508 +      E3_uint32 StatCont4:4;
83509 +      E3_uint32 StatCont5:4;
83510 +      E3_uint32 StatCont6:4;
83511 +      E3_uint32 StatCont7:4;
83512 +#else
83513 +      E3_uint32 StatCont7:4;
83514 +      E3_uint32 StatCont6:4;
83515 +      E3_uint32 StatCont5:4;
83516 +      E3_uint32 StatCont4:4;
83517 +      E3_uint32 StatCont3:4;
83518 +      E3_uint32 StatCont2:4;
83519 +      E3_uint32 StatCont1:4;
83520 +      E3_uint32 StatCont0:4;
83521 +#endif
83522 +   } s;
83523 +} E3_StatsControl;
83524 +
83525 +typedef volatile union e3_StatsCount
83526 +{
83527 +   E3_uint64    ClockStat; 
83528 +   struct
83529 +   {
83530 +       E3_uint32 ClockLSW;     /* read only */
83531 +       E3_uint32 StatsCount;
83532 +   } s;
83533 +} E3_StatsCount;
83534 +
83535 +typedef volatile union e3_clock
83536 +{
83537 +   E3_uint64 NanoSecClock;
83538 +   struct
83539 +   {
83540 +      E3_uint32 ClockLSW;
83541 +      E3_uint32 ClockMSW;
83542 +   } s;
83543 +} E3_Clock;
83544 +#define E3_TIME( X ) ((X).NanoSecClock)
83545 +
83546 +typedef volatile struct _E3_User_Regs
83547 +{
83548 +   E3_StatsCount       StatCounts[8];
83549 +   E3_StatsCount       InstCount;
83550 +   E3_uint32           pad0;
83551 +   E3_StatsControl     StatCont;
83552 +   E3_Clock            Clock;
83553 +   E3_uint32           pad1[0x7ea];
83554 +} E3_User_Regs;
83555 +
83556 +typedef volatile struct _E3_CommandPort 
83557 +{
83558 +   E3_Addr             PutDma;         /* 0x000 */
83559 +   E3_uint32           Pad1;
83560 +   E3_Addr             GetDma;         /* 0x008 */
83561 +   E3_uint32           Pad2;
83562 +   E3_Addr             RunThread;      /* 0x010 */
83563 +   E3_uint32           Pad3[3];
83564 +   E3_Addr             WaitEvent0;     /* 0x020 */
83565 +   E3_uint32           Pad4;
83566 +   E3_Addr             WaitEvent1;     /* 0x028 */
83567 +   E3_uint32           Pad5;
83568 +   E3_Addr             SetEvent;       /* 0x030 */
83569 +   E3_uint32           Pad6[3];
83570 +   E3_uint32           Pad7[0x7f0];    /* Fill out to an 8K page */
83571 +} E3_CommandPort;
83572 +/* Should have the new structures for the top four pages of the elan3 space */
83573 +
83574 +#define E3_COMMANDPORT_SIZE    (sizeof (E3_CommandPort))
83575 +
83576 +#ifdef __cplusplus
83577 +}
83578 +#endif
83579 +
83580 +#endif /* __ELAN3_ELANUREGS_H */
83581 +
83582 +/*
83583 + * Local variables:
83584 + * c-file-style: "stroustrup"
83585 + * End:
83586 + */
83587 Index: linux-2.6.5-7.191/include/elan3/elanvp.h
83588 ===================================================================
83589 --- linux-2.6.5-7.191.orig/include/elan3/elanvp.h       2004-02-23 16:02:56.000000000 -0500
83590 +++ linux-2.6.5-7.191/include/elan3/elanvp.h    2005-07-28 14:52:52.950663232 -0400
83591 @@ -0,0 +1,165 @@
83592 +/*
83593 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83594 + *
83595 + *    For licensing information please see the supplied COPYING file
83596 + *
83597 + */
83598 +
83599 +#ifndef _ELAN3_ELANVP_H
83600 +#define _ELAN3_ELANVP_H
83601 +
83602 +#ident "$Id: elanvp.h,v 1.45 2004/06/18 09:28:06 mike Exp $"
83603 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanvp.h,v $ */
83604 +
83605 +#include <elan3/e3types.h>
83606 +#include <elan/bitmap.h>
83607 +#include <elan/capability.h>
83608 +
83609 +#ifdef __cplusplus
83610 +extern "C" {
83611 +#endif
83612 +
83613 +/*
83614 + * Context number allocation.
83615 + * [0-31]      system contexts
83616 + * [32-63]     hardware test
83617 + * [64-1023]   available
83618 + * [1024-2047] RMS allocatable
83619 + * [2048-4095] kernel comms data contexts
83620 + */
83621 +#define ELAN3_KCOMM_CONTEXT_NUM                0x001                   /* old kernel comms context (system) */
83622 +#define ELAN3_CM_CONTEXT_NUM           0x002                   /* new cluster member ship comms context (system) */
83623 +#define ELAN3_MRF_CONTEXT_NUM          0x003                   /* multi-rail kernel comms context */
83624 +#define ELAN3_DMARING_BASE_CONTEXT_NUM 0x010                   /* 16 contexts for dma ring issue (system) */
83625 +#define ELAN3_DMARING_TOP_CONTEXT_NUM  0x01f
83626 +
83627 +#define ELAN3_HWTEST_BASE_CONTEXT_NUM  0x020                   /* reserved for hardware test */
83628 +#define ELAN3_HWTEST_TOP_CONTEXT_NUM   0x03f
83629 +
83630 +#define ELAN3_KCOMM_BASE_CONTEXT_NUM   0x800                   /* kernel comms data transfer contexts */
83631 +#define ELAN3_KCOMM_TOP_CONTEXT_NUM    0xfff
83632 +
83633 +#define ELAN3_HWTEST_CONTEXT(ctx)      ((ctx) >= ELAN3_HWTEST_BASE_CONTEXT_NUM && \
83634 +                                        (ctx) <= ELAN3_HWTEST_TOP_CONTEXT_NUM)    
83635 +
83636 +#define ELAN3_SYSTEM_CONTEXT(ctx)      (((ctx) & SYS_CONTEXT_BIT) != 0 || \
83637 +                                        (ctx) < E3_NUM_CONTEXT_0 || \
83638 +                                        (ctx) >= ELAN3_KCOMM_BASE_CONTEXT_NUM)
83639 +
83640 +/* Maximum number of virtual processes */
83641 +#define ELAN3_MAX_VPS          (16384)
83642 +
83643 +#define ELAN3_INVALID_PROCESS  (0x7fffffff)            /* A GUARANTEED invalid process # */
83644 +#define ELAN3_INVALID_NODE     (0xFFFF)
83645 +#define ELAN3_INVALID_CONTEXT  (0xFFFF)
83646 +
83647 +
83648 +
83649 +#if defined(__KERNEL__) && !defined(__ELAN3__)
83650 +
83651 +/*
83652 + * Contexts are accessible via Elan capabilities,
83653 + * for each context that can be "attached" to there
83654 + * is a ELAN3_CTXT_INFO structure created by its
83655 + * "owner".  This also "remembers" all remote 
83656 + * segments that have "blazed" a trail to it.
83657 + *
83658 + * If the "owner" goes away the soft info is 
83659 + * destroyed when it is no longer "attached" or 
83660 + * "referenced" by a remote segment.
83661 + *
83662 + * If the owner changes the capability, then 
83663 + * the soft info must be not "referenced" or 
83664 + * "attached" before a new process can "attach"
83665 + * to it.
83666 + */
83667 +
83668 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock,
83669 +                         elan3_info::Next elan3_info::Prev elan3_info::Device elan3_info::Owner
83670 +                         elan3_info::Capability elan3_info::AttachedCapability elan3_info::Context))
83671 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock,
83672 +                         elan3_info::Nacking elan3_info::Disabled))
83673 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_info::Context elan3_info::Device elan3_info::Capability))
83674 +
83675 +#endif /* __KERNEL__ */
83676 +
83677 +#define LOW_ROUTE_PRIORITY     0
83678 +#define HIGH_ROUTE_PRIORITY    1
83679 +
83680 +#define DEFAULT_ROUTE_TIMEOUT  3
83681 +#define DEFAULT_ROUTE_PRIORITY LOW_ROUTE_PRIORITY
83682 +
83683 +
83684 +/* a small route is 4 flits (8 bytes), a big route  */
83685 +/* is 8 flits (16 bytes) - each packed route is 4 bits */
83686 +/* so giving us a maximum of 28 as flit0 does not contain */
83687 +/* packed routes */
83688 +#define MAX_FLITS              8
83689 +#define MAX_PACKED             28
83690 +
83691 +/* bit definitions for 64 bit route pointer */
83692 +#define ROUTE_VALID            (1ULL << 63)
83693 +#define ROUTE_PTR              (1ULL << 62)
83694 +#define ROUTE_CTXT_SHIFT       48
83695 +#define ROUTE_PTR_MASK         ((1ull << ROUTE_CTXT_SHIFT)-1)
83696 +#define ROUTE_GET_CTXT          ((VAL >> ROUTE_CTXT_SHIFT) & 0x3fff )
83697 +
83698 +#define SMALL_ROUTE(flits, context)    (((E3_uint64) (flits)[0] <<  0) | ((E3_uint64) (flits)[1] << 16) | \
83699 +                                        ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (context) << ROUTE_CTXT_SHIFT) | \
83700 +                                        ROUTE_VALID)
83701 +
83702 +#define BIG_ROUTE_PTR(paddr, context)  ((E3_uint64) (paddr) | ((E3_uint64) context << ROUTE_CTXT_SHIFT) | ROUTE_VALID | ROUTE_PTR)
83703 +
83704 +#define BIG_ROUTE0(flits)              (((E3_uint64) (flits)[0] <<  0) | ((E3_uint64) (flits)[1] << 16) | \
83705 +                                        ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (flits)[3] << 48))
83706 +#define BIG_ROUTE1(flits)              (((E3_uint64) (flits)[4] <<  0) | ((E3_uint64) (flits)[5] << 16) | \
83707 +                                        ((E3_uint64) (flits)[6] << 32) | ((E3_uint64) (flits)[7] << 48))
83708 +
83709 +
83710 +/* defines for first flit of a route */
83711 +#define FIRST_HIGH_PRI         (1 << 15)
83712 +#define FIRST_AGE(Val)         ((Val) << 11)
83713 +#define FIRST_TIMEOUT(Val)     ((Val) << 9)
83714 +#define FIRST_PACKED(X)                ((X) << 7)
83715 +#define FIRST_ROUTE(Val)       (Val)
83716 +#define FIRST_ADAPTIVE         (0x30)
83717 +#define FIRST_BCAST_TREE       (0x20)
83718 +#define FIRST_MYLINK           (0x10)
83719 +#define FIRST_BCAST(Top, Bot)  (0x40 | ((Top) << 3) | (Bot))
83720 +
83721 +/* defines for 3 bit packed entries for subsequent flits */
83722 +#define PACKED_ROUTE(Val)      (8 | (Val))
83723 +#define PACKED_ADAPTIVE                (3)
83724 +#define PACKED_BCAST_TREE      (2)
83725 +#define PACKED_MYLINK          (1)
83726 +#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3))
83727 +#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2))
83728 +
83729 +/* ---------------------------------------------------------- 
83730 + * elan3_route functions 
83731 + * return ELAN3_ROUTE_xxx codes
83732 + * ---------------------------------------------------------- */
83733 +
83734 +#define ELAN3_ROUTE_SUCCESS        (0x00)
83735 +#define ELAN3_ROUTE_SYSCALL_FAILED (0x01)
83736 +#define ELAN3_ROUTE_INVALID        (0x02)
83737 +#define ELAN3_ROUTE_TOO_LONG       (0x04)
83738 +#define ELAN3_ROUTE_LOAD_FAILED    (0x08)
83739 +#define ELAN3_ROUTE_PROC_RANGE     (0x0f)
83740 +#define ELAN3_ROUTE_INVALID_LEVEL  (0x10)
83741 +#define ELAN3_ROUTE_OCILATES       (0x20)
83742 +#define ELAN3_ROUTE_WRONG_DEST     (0x40)
83743 +#define ELAN3_ROUTE_TURN_LEVEL     (0x80)
83744 +#define ELAN3_ROUTE_NODEID_UNKNOWN (0xf0)
83745 +
83746 +#ifdef __cplusplus
83747 +}
83748 +#endif
83749 +
83750 +#endif /* _ELAN3_ELANVP_H */
83751 +
83752 +/*
83753 + * Local variables:
83754 + * c-file-style: "stroustrup"
83755 + * End:
83756 + */
83757 Index: linux-2.6.5-7.191/include/elan3/events.h
83758 ===================================================================
83759 --- linux-2.6.5-7.191.orig/include/elan3/events.h       2004-02-23 16:02:56.000000000 -0500
83760 +++ linux-2.6.5-7.191/include/elan3/events.h    2005-07-28 14:52:52.951663080 -0400
83761 @@ -0,0 +1,183 @@
83762 +/*
83763 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83764 + *
83765 + *    For licensing information please see the supplied COPYING file
83766 + *
83767 + */
83768 +
83769 +#ifndef _ELAN3_EVENTS_H
83770 +#define _ELAN3_EVENTS_H
83771 +
83772 +#ident "$Id: events.h,v 1.45 2003/09/24 13:57:24 david Exp $"
83773 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/events.h,v $*/
83774 +
83775 +/*
83776 + * Alignments for events, event queues and blockcopy blocks.
83777 + */
83778 +#define E3_EVENT_ALIGN                 (8)
83779 +#define E3_QUEUE_ALIGN         (32)
83780 +#define E3_BLK_ALIGN           (64)
83781 +#define E3_BLK_SIZE            (64)
83782 +#define E3_BLK_PATTERN                 (0xfeedface)
83783 +
83784 +#define E3_EVENT_FREE          ((0 << 4) | EV_WCOPY)
83785 +#define E3_EVENT_PENDING       ((1 << 4) | EV_WCOPY)
83786 +#define E3_EVENT_ACTIVE                ((2 << 4) | EV_WCOPY)
83787 +#define E3_EVENT_FIRED         ((3 << 4) | EV_WCOPY)
83788 +#define E3_EVENT_FAILED                ((4 << 4) | EV_WCOPY)
83789 +#define E3_EVENT_DONE          ((5 << 4) | EV_WCOPY)
83790 +#define E3_EVENT_PRIVATE       ((6 << 4) | EV_WCOPY)
83791 +
83792 +/*
83793 + * Event values and masks
83794 + *
83795 + * Block Copy event    xxxxxxxxxxxxxxxx1
83796 + * Chained event       30 bit ptr ....0x
83797 + * Event interrupt     29 bit cookie 01x
83798 + * Dma event           28 bit ptr   011x
83799 + * thread event                28 bit ptr   111x
83800 + */
83801 +#define EV_CLEAR               (0x00000000)
83802 +#define EV_TYPE_BCOPY          (0x00000001)
83803 +#define EV_TYPE_CHAIN          (0x00000000)
83804 +#define EV_TYPE_EVIRQ          (0x00000002)
83805 +#define EV_TYPE_DMA            (0x00000006)
83806 +#define EV_TYPE_THREAD         (0x0000000e)
83807 +
83808 +#define EV_TYPE_BCOPY_BYTE     (0)
83809 +#define EV_TYPE_BCOPY_HWORD    (1)
83810 +#define EV_TYPE_BCOPY_WORD     (2)
83811 +#define EV_TYPE_BCOPY_DWORD    (3)
83812 +
83813 +/*
83814 + * Data type is in the lowest two bits of the Dest pointer.
83815 + */
83816 +#define EV_BCOPY_DTYPE_MASK    (3)
83817 +#define EV_WCOPY               (1)     /* [DestWord] = Source */
83818 +#define EV_BCOPY               (0)     /* [DestBlock] = [SourceBlock] */
83819 +
83820 +#define EV_TYPE_MASK           (0x0000000e)
83821 +#define EV_TYPE_MASK_BCOPY     (0x00000001)
83822 +#define EV_TYPE_MASK_CHAIN     (0x00000002)
83823 +#define EV_TYPE_MASK_EVIRQ     (0x00000006)
83824 +#define EV_TYPE_MASK_DMA       (0x0000000e)
83825 +#define EV_TYPE_MASK_THREAD    (0x0000000e)
83826 +#define EV_TYPE_MASK2          (0x0000000f)
83827 +
83828 +/*
83829 + * Min/Max size for Elan queue entries 
83830 + */
83831 +#define E3_QUEUE_MIN   E3_BLK_SIZE
83832 +#define E3_QUEUE_MAX   (E3_BLK_SIZE * 5)
83833 +
83834 +/*
83835 + * Elan queue state bits
83836 + */
83837 +#define E3_QUEUE_FULL  (1<<0)
83838 +#define E3_QUEUE_LOCKED        (1<<8)
83839 +
83840 +#ifndef _ASM
83841 +
83842 +typedef union _E3_Event
83843 +{
83844 +   E3_uint64   ev_Int64;
83845 +   struct {
83846 +      volatile E3_int32        u_Count;
83847 +      E3_uint32                u_Type;
83848 +   } ev_u;
83849 +} E3_Event;
83850 +
83851 +typedef union _E3_BlockCopyEvent
83852 +{
83853 +   E3_uint64 ev_ForceAlign;
83854 +   struct E3_BlockCopyEvent_u {
83855 +      volatile E3_int32        u_Count;
83856 +      E3_uint32                u_Type;
83857 +      E3_Addr          u_Source;
83858 +      E3_Addr          u_Dest;   /* lowest bits are the data type for endian conversion */
83859 +   } ev_u;
83860 +} E3_BlockCopyEvent;
83861 +
83862 +#define ev_Type   ev_u.u_Type
83863 +#define ev_Count  ev_u.u_Count
83864 +#define ev_Source ev_u.u_Source
83865 +#define ev_Dest   ev_u.u_Dest
83866 +
83867 +typedef union _E3_WaitEvent0
83868 +{
83869 +   E3_uint64            we_ForceAlign;
83870 +   struct {
83871 +       E3_Addr         u_EventLoc;
83872 +       E3_int32        u_WaitCount;
83873 +   } we_u;
83874 +} E3_WaitEvent0;
83875 +#define we_EventLoc we_u.u_EventLoc
83876 +#define we_WaitCount we_u.u_WaitCount
83877 +
83878 +typedef union _E3_Event_Blk
83879 +{
83880 +    E3_uint8  eb_Bytes[E3_BLK_SIZE];
83881 +    E3_uint32 eb_Int32[E3_BLK_SIZE/sizeof (E3_uint32)];
83882 +    E3_uint64 eb_Int64[E3_BLK_SIZE/sizeof (E3_uint64)];
83883 +} E3_Event_Blk;
83884 +
83885 +/* We make eb_done the last word of the blk
83886 + * so that we can guarantee the rest of the blk is
83887 + * correct when this value is set.
83888 + * However, when the TPORT code copies the envelope
83889 + * info into the blk, it uses a dword endian type.
83890 + * Thus we must correct for this when initialising
83891 + * the pattern in the Elan SDRAM blk (eeb_done)
83892 + */
83893 +#define eb_done eb_Int32[15]
83894 +#define eeb_done eb_Int32[15^WordEndianFlip]
83895 +
83896 +#define EVENT_WORD_READY(WORD) (*((volatile E3_uint32 *) WORD) != 0)
83897 +#define EVENT_BLK_READY(BLK) (((volatile E3_Event_Blk *) (BLK))->eb_done != 0)
83898 +#define EVENT_READY(EVENT)   (((volatile E3_Event *) (EVENT))->ev_Count <= 0)
83899 +
83900 +#define ELAN3_WAIT_EVENT (0)
83901 +#define ELAN3_POLL_EVENT (-1)
83902 +
83903 +#define SETUP_EVENT_TYPE(ptr,typeval) (((unsigned long)(ptr)) | (typeval))
83904 +
83905 +#define E3_RESET_BCOPY_BLOCK(BLK)                                                      \
83906 +       do {                                                                            \
83907 +               (BLK)->eb_done = 0;                                                     \
83908 +       } while (0)
83909 +
83910 +typedef struct e3_queue
83911 +{
83912 +   volatile E3_uint32  q_state;        /* queue is full=bit0, queue is locked=bit8 */
83913 +   volatile E3_Addr    q_bptr;         /* block aligned ptr to current back item */
83914 +   E3_uint32           q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
83915 +   E3_Addr             q_top;          /* block aligned ptr to last queue item */
83916 +   E3_Addr             q_base;         /* block aligned ptr to first queue item */
83917 +   volatile E3_Addr    q_fptr;         /* block aligned ptr to current front item */
83918 +   E3_Event            q_event;        /* queue event */
83919 +} E3_Queue;
83920 +
83921 +typedef struct e3_blockcopy_queue
83922 +{
83923 +   volatile E3_uint32  q_state;        /* queue is full=bit0, queue is locked=bit8 */
83924 +   volatile E3_Addr    q_bptr;         /* block aligned ptr to current back item */
83925 +   E3_uint32           q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
83926 +   E3_Addr             q_top;          /* block aligned ptr to last queue item */
83927 +   E3_Addr             q_base;         /* block aligned ptr to first queue item */
83928 +   volatile E3_Addr    q_fptr;         /* block aligned ptr to current front item */
83929 +   E3_BlockCopyEvent   q_event;        /* queue event */
83930 +   E3_uint32           q_pad[6];
83931 +} E3_BlockCopyQueue;
83932 +
83933 +#define E3_QUEUE_EVENT_OFFSET  24
83934 +#define QUEUE_FULL(Q)          ((Q)->q_state & E3_QUEUE_FULL)          
83935 +
83936 +#endif /* ! _ASM */
83937 +
83938 +#endif /* _ELAN3_EVENTS_H */
83939 +
83940 +/*
83941 + * Local variables:
83942 + * c-file-style: "stroustrup"
83943 + * End:
83944 + */
83945 Index: linux-2.6.5-7.191/include/elan3/intrinsics.h
83946 ===================================================================
83947 --- linux-2.6.5-7.191.orig/include/elan3/intrinsics.h   2004-02-23 16:02:56.000000000 -0500
83948 +++ linux-2.6.5-7.191/include/elan3/intrinsics.h        2005-07-28 14:52:52.952662928 -0400
83949 @@ -0,0 +1,320 @@
83950 +/*
83951 + *    Copyright (c) 2003 by Quadrics Limited.
83952 + * 
83953 + *    For licensing information please see the supplied COPYING file
83954 + *
83955 + */
83956 +
83957 +#ifndef _ELAN3_INTRINSICS_H
83958 +#define _ELAN3_INTRINSICS_H
83959 +
83960 +#ident "$Id: intrinsics.h,v 1.35 2003/09/24 13:57:24 david Exp $"
83961 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/intrinsics.h,v $ */
83962 +
83963 +#include <elan3/e3types.h>
83964 +#include <elan3/events.h>
83965 +
83966 +/* 
83967 + * This file contains definitions of the macros for accessing the QSW
83968 + * specific instructions, as if they were functions.
83969 + * The results from the function 
83970 + */
83971 +
83972 +#define C_ACK_OK       0                       /* return from c_close() */
83973 +#define C_ACK_TESTFAIL 1                       /* return from c_close() */
83974 +#define C_ACK_DISCARD  2                       /* return from c_close() */
83975 +#define C_ACK_ERROR    3                       /* return from c_close() */
83976 +
83977 +/*
83978 + * Elan asi's for tproc block accesses
83979 + */
83980 +#define EASI_BYTE      0
83981 +#define EASI_HALF      1
83982 +#define EASI_WORD      2
83983 +#define EASI_DOUBLE    3
83984 +
83985 +#if defined(__ELAN3__) && !defined (_ASM)
83986 +
83987 +extern inline void c_abort(void) 
83988 +{
83989 +    asm volatile (".word 0x0000                ! die you thread you " : : );
83990 +}
83991 +
83992 +extern inline void c_suspend(void) 
83993 +{
83994 +    asm volatile (
83995 +       "set 1f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
83996 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
83997 +       "bne 1f                         ! RevB bug fix. jump to other alignment\n"
83998 +       "nop                            ! RevB bug fix. delay slot\n"
83999 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
84000 +       "suspend                        ! do the real suspend\n"
84001 +       "1: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
84002 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
84003 +       "suspend                        ! do the real suspend\n" : : );
84004 +}
84005 +
84006 +extern inline int c_close(void) 
84007 +{
84008 +    register int rc asm("o0");
84009 +
84010 +    asm volatile ("close %0" : "=r" (rc) : );
84011 +
84012 +    return (rc);
84013 +}
84014 +
84015 +extern inline int c_close_cookie(volatile E3_uint32 *cookiep, E3_uint32 next)
84016 +{
84017 +    register int rc asm("o0");
84018 +
84019 +    asm volatile ("close       %0              ! close the packet\n"
84020 +                 "bz,a         1f              ! ack received\n"
84021 +                 "st           %1, [%2]        ! update cookie on ack\n"
84022 +                 "1:                           ! label for not-ack\n"
84023 +                 : "=r" (rc) : "r" (next), "r" (cookiep));
84024 +
84025 +    return (rc);
84026 +}
84027 +
84028 +extern inline void c_break_busywait(void)
84029 +{
84030 +    asm volatile (
84031 +       "breaktest                      ! test to see if break necessary\n"
84032 +       "bpos 1f                        ! no other thread ready\n"
84033 +       "nop                            ! delay slot\n"
84034 +       "sub     %%sp,3*8*4,%%sp        ! Space to save the registers\n"
84035 +       "stblock %%g0,[%%sp+0]          ! save the globals\n"
84036 +       "stblock %%i0,[%%sp+8*4]        ! save the ins\n"
84037 +       "stblock %%l0,[%%sp+16*4]       ! save the locals\n"
84038 +       "set 2f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
84039 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
84040 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
84041 +       "nop                            ! RevB bug fix. delay slot\n"
84042 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
84043 +       "break                          ! do the real break\n"
84044 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
84045 +       " ldblock [%%sp+16*4],%%l0      ! RevB bug fix. restore locals in delay slot\n"
84046 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
84047 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
84048 +       "break                          ! do the real break\n"
84049 +       "ldblock [%%sp+16*4],%%l0       ! restore locals\n"
84050 +       "4: ldblock [%%sp+8*4], %%i0    ! restore ins\n"
84051 +       "ldblock [%%sp+0],%%g0          ! restore globals\n"
84052 +       "add     %%sp,3*8*4,%%sp        ! restore stack pointer\n"
84053 +       "1: " : : );
84054 +}
84055 +
84056 +extern inline void c_break(void)
84057 +{
84058 +    asm volatile (
84059 +       "breaktest                      ! test to see if break necessary\n"
84060 +       "bne 1f                         ! haven't exceeded our inst count yet\n"
84061 +       "nop                            ! delay slot\n"
84062 +       "sub     %%sp,3*8*4,%%sp        ! Space to save the registers\n"
84063 +       "stblock %%g0,[%%sp+0]          ! save the globals\n"
84064 +       "stblock %%i0,[%%sp+8*4]        ! save the ins\n"
84065 +       "stblock %%l0,[%%sp+16*4]       ! save the locals\n"
84066 +       "set 2f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
84067 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
84068 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
84069 +       "nop                            ! RevB bug fix. delay slot\n"
84070 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
84071 +       "break                          ! do the real break\n"
84072 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
84073 +       " ldblock [%%sp+16*4],%%l0      ! RevB bug fix. restore locals in delay slot\n"
84074 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
84075 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
84076 +       "break                          ! do the real break\n"
84077 +       "ldblock [%%sp+16*4],%%l0       ! restore locals\n"
84078 +       "4: ldblock [%%sp+8*4], %%i0    ! restore ins\n"
84079 +       "ldblock [%%sp+0],%%g0          ! restore globals\n"
84080 +       "add     %%sp,3*8*4,%%sp        ! restore stack pointer\n"
84081 +       "1: " : : );
84082 +}
84083 +
84084 +extern inline void c_open( const int arg ) 
84085 +{
84086 +    asm volatile ("open %0" : : "r" (arg) );
84087 +    asm volatile ("nop; nop; nop; nop");
84088 +    asm volatile ("nop; nop; nop; nop");
84089 +    asm volatile ("nop; nop; nop; nop");
84090 +    asm volatile ("nop; nop; nop; nop");
84091 +    asm volatile ("nop; nop; nop; nop");
84092 +    asm volatile ("nop; nop; nop; nop");
84093 +}
84094 +
84095 +extern inline void c_waitevent( volatile E3_Event *const ptr,
84096 +                               const int count) 
84097 +{
84098 +    register volatile E3_Event *a_unlikely asm("o0") = ptr;
84099 +    register int a_very_unlikely asm("o1") = count;
84100 +
84101 +    asm volatile (
84102 +        "sub     %%sp,1*8*4,%%sp       ! Space to save the registers\n"
84103 +        "stblock %%i0,[%%sp+0]         ! save the ins\n"
84104 +       "set    2f, %%i7                ! RevB bug fix. get address of the wakeup inst\n"
84105 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
84106 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
84107 +       "nop                            ! RevB bug fix. delay slot\n"
84108 +       "ldd [%%i7],%%i4                ! RevB bug fix. data fetch of instructions\n"
84109 +        "waitevent                     ! do the business\n"
84110 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
84111 +        "  ldblock [%%sp+0],%%i0       ! RevB bug fix. restore ins in delay slot\n"
84112 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
84113 +       "ldd [%%i7],%%i4                ! RevB bug fix. data fetch of instructions\n"
84114 +        "waitevent                     ! do the business\n"
84115 +        "ldblock [%%sp+0],%%i0         ! restore ins\n"
84116 +        "4: add     %%sp,1*8*4,%%sp    ! restore stack pointer\n"
84117 +        : /* no outputs */
84118 +        : /* inputs */ "r" (a_unlikely), "r" (a_very_unlikely)
84119 +        : /* clobbered */ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
84120 +                         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7" );
84121 +
84122 +}
84123 +
84124 +#define c_sendtrans0(type,dest)                        \
84125 +       asm volatile ("sendtrans %0, %%g0, %1" : : "i" (type), "r" (dest))
84126 +
84127 +#define c_sendtrans1(type,dest,arg)            \
84128 +       asm volatile ("sendtrans %0, %2, %1" : : "i" (type), "r" (dest), "r" (arg))
84129 +
84130 +#define c_sendtrans2(type,dest,arg1,arg2)      \
84131 +       do {                                    \
84132 +            register const unsigned long a_unlikely_1 asm("o4") = arg1;                        \
84133 +            register const unsigned long a_unlikely_2 asm("o5") = arg2;                        \
84134 +            asm volatile ("sendtrans %0, %2, %1"                                       \
84135 +                : : "i" (type), "r" (dest), "r" (a_unlikely_1), "r" (a_unlikely_2));   \
84136 +       } while(0)
84137 +
84138 +#define c_sendmem(type,dest,ptr)               \
84139 +       asm volatile ("sendtrans %0, [%2], %1" : : "i" (type), "r" (dest), "r" (ptr))
84140 +
84141 +/* Copy a single 64-byte block (src blk is read using a BYTE endian type) */
84142 +extern inline void elan3_copy64b(void *src, void *dst)
84143 +{
84144 +    /* Copy 64 bytes using ldblock/stblock
84145 +     * We save and restore the locals/ins because if we don't gcc
84146 +     * really makes a bad job of optimisising the rest of the thread code!
84147 +     *
84148 +     * We force the parameters in g5, g6 so that they aren't
84149 +     * trashed by the loadblk32 into the locals/ins
84150 +     */
84151 +    register void *tmp1 asm("g5") = src;
84152 +    register void *tmp2 asm("g6") = dst;
84153 +
84154 +    asm volatile (
84155 +       "and     %%sp,63,%%g7           ! Calculate stack alignment\n"
84156 +       "sub     %%sp,2*8*4,%%sp        ! Space to save the registers\n"
84157 +       "sub     %%sp,%%g7,%%sp         ! align stack\n" 
84158 +       "stblock64 %%l0,[%%sp]          ! save the locals and ins\n"
84159 +       "ldblock64a [%0]%2,%%l0         ! load 64-byte block into locals/ins\n"
84160 +       "stblock64a %%l0,[%1]%2         ! store 64-byte block from local/ins\n"
84161 +       "ldblock64 [%%sp],%%l0          ! restore locals and ins\n"
84162 +       "add     %%sp,%%g7, %%sp        ! undo alignment\n"
84163 +       "add     %%sp,2*8*4,%%sp        ! restore stack pointer\n"
84164 +       : /* outputs */
84165 +       : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_BYTE)
84166 +       : /* clobbered */ "g5", "g6", "g7" );
84167 +}
84168 +
84169 +/* Copy a single 64-byte block (src blk is read using a WORD endian type) */
84170 +extern inline void elan3_copy64w(void *src, void *dst)
84171 +{
84172 +    /* Copy 64 bytes using ldblock/stblock
84173 +     * We save and restore the locals/ins because if we don't gcc
84174 +     * really makes a bad job of optimisising the rest of the thread code!
84175 +     *
84176 +     * We force the parameters in g5, g6 so that they aren't
84177 +     * trashed by the loadblk32 into the locals/ins
84178 +     */
84179 +    register void *tmp1 asm("g5") = src;
84180 +    register void *tmp2 asm("g6") = dst;
84181 +
84182 +    asm volatile (
84183 +       "and     %%sp,63,%%g7           ! Calculate stack alignment\n"
84184 +       "sub     %%sp,2*8*4,%%sp        ! Space to save the registers\n"
84185 +       "sub     %%sp,%%g7,%%sp         ! align stack\n" 
84186 +       "stblock64 %%l0,[%%sp]          ! save the locals and ins\n"
84187 +       "ldblock64a [%0]%2,%%l0         ! load 64-byte block into locals/ins\n"
84188 +       "stblock64a %%l0,[%1]%2         ! store 64-byte block from local/ins\n"
84189 +       "ldblock64 [%%sp],%%l0          ! restore locals and ins\n"
84190 +       "add     %%sp,%%g7, %%sp        ! undo alignment\n"
84191 +       "add     %%sp,2*8*4,%%sp        ! restore stack pointer\n"
84192 +       : /* outputs */
84193 +       : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_WORD)
84194 +       : /* clobbered */ "g5", "g6", "g7" );
84195 +}
84196 +
84197 +/* Read a 64-bit value with a WORD (32-bit) endian type */
84198 +extern inline E3_uint64 elan3_read64w( volatile E3_uint64 *const ptr )
84199 +{
84200 +    E3_uint64 result;
84201 +
84202 +    asm volatile (
84203 +       "ldblock8a [%1]%2, %0\n"
84204 +       : /* outputs */ "=r" (result)
84205 +       : /* inputs */ "r" (ptr), "n" (EASI_WORD) );
84206 +
84207 +    return( result );
84208 +}
84209 +
84210 +/* Read a 64-bit value with a DOUBLEWORD (64-bit) endian type */
84211 +extern inline E3_uint64 elan3_read64dw( volatile E3_uint64 *const ptr )
84212 +{
84213 +    E3_uint64 result;
84214 +
84215 +    asm volatile (
84216 +       "ldblock8a [%1]%2, %0\n"
84217 +       : /* outputs */ "=r" (result)
84218 +       : /* inputs */ "r" (ptr), "n" (EASI_DOUBLE) );
84219 +
84220 +    return( result );
84221 +}
84222 +
84223 +/* Write a 32-bit value with a WORD (32-bit) endian type */
84224 +extern inline void elan3_write64w( volatile E3_uint64 *const ptr, E3_uint64 value )
84225 +{
84226 +    asm volatile (
84227 +       "stblock8a %1, [%0]%2\n"
84228 +       : /* no outputs */
84229 +       : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_WORD) );
84230 +}
84231 +
84232 +/* Write a 64-bit value with a DOUBLEWORD (64-bit) endian type */
84233 +extern inline void elan3_write64dw( volatile E3_uint64 *const ptr, E3_uint64 value )
84234 +{
84235 +    asm volatile (
84236 +       "stblock8a %1, [%0]%2\n"
84237 +       : /* no outputs */
84238 +       : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_DOUBLE) );
84239 +}
84240 +
84241 +extern inline E3_uint32 c_swap(volatile E3_uint32 *source, E3_uint32 result)
84242 +{
84243 +   asm volatile("swap [%1],%0\n"
84244 +               : "=r" (result)
84245 +               : "r" (source) ,"0" (result)
84246 +               : "memory");
84247 +   return result;
84248 +}
84249 +
84250 +extern inline E3_uint32 c_swap_save(volatile E3_uint32 *source, const E3_uint32 result)
84251 +{
84252 +   register E3_uint32 a_unlikely;
84253 +   asm volatile("" : "=r" (a_unlikely) : );
84254 +
84255 +   asm volatile("mov %2,%0; swap [%1],%0\n"
84256 +               : "=r" (a_unlikely)
84257 +               : "r" (source) ,"r" (result), "0" (a_unlikely)
84258 +               : "memory");
84259 +   return a_unlikely;
84260 +}
84261 +#endif /* (__ELAN3__) && !(_ASM) */
84262 +
84263 +#endif /* _ELAN3_INTRINSICS_H */
84264 +
84265 +/*
84266 + * Local variables:
84267 + * c-file-style: "stroustrup"
84268 + * End:
84269 + */
84270 Index: linux-2.6.5-7.191/include/elan3/minames.h
84271 ===================================================================
84272 --- linux-2.6.5-7.191.orig/include/elan3/minames.h      2004-02-23 16:02:56.000000000 -0500
84273 +++ linux-2.6.5-7.191/include/elan3/minames.h   2005-07-28 14:52:52.952662928 -0400
84274 @@ -0,0 +1,256 @@
84275 +{MI_WaitForRemoteDescRead,     "MI_WaitForRemoteDescRead"},
84276 +{MI_WaitForRemoteDescRead2,    "MI_WaitForRemoteDescRead2"},
84277 +{MI_WaitForRemoteDescRead2_seq1,       "MI_WaitForRemoteDescRead2_seq1"},
84278 +{MI_SendRemoteDmaRoutes,       "MI_SendRemoteDmaRoutes"},
84279 +{MI_IProcTrapped,      "MI_IProcTrapped"},
84280 +{MI_DProcTrapped,      "MI_DProcTrapped"},
84281 +{MI_CProcTrapped,      "MI_CProcTrapped"},
84282 +{MI_TProcTrapped,      "MI_TProcTrapped"},
84283 +{MI_TestWhichDmaQueue, "MI_TestWhichDmaQueue"},
84284 +{MI_TestWhichDmaQueue_seq1,    "MI_TestWhichDmaQueue_seq1"},
84285 +{MI_InputRemoteDmaUpdateBPtr,  "MI_InputRemoteDmaUpdateBPtr"},
84286 +{MI_FixupQueueContextAndRemoteBit,     "MI_FixupQueueContextAndRemoteBit"},
84287 +{MI_FixupQueueContextAndRemoteBit_seq1,        "MI_FixupQueueContextAndRemoteBit_seq1"},
84288 +{MI_FixupQueueContextAndRemoteBit_seq2,        "MI_FixupQueueContextAndRemoteBit_seq2"},
84289 +{MI_FixupQueueContextAndRemoteBit_seq3,        "MI_FixupQueueContextAndRemoteBit_seq3"},
84290 +{MI_FixupQueueContextAndRemoteBit_seq4,        "MI_FixupQueueContextAndRemoteBit_seq4"},
84291 +{MI_RunDmaCommand,     "MI_RunDmaCommand"},
84292 +{MI_DoSendRemoteDmaDesc,       "MI_DoSendRemoteDmaDesc"},
84293 +{MI_DequeueNonSysCntxDma,      "MI_DequeueNonSysCntxDma"},
84294 +{MI_WaitForRemoteDescRead1,    "MI_WaitForRemoteDescRead1"},
84295 +{MI_RemoteDmaCommand,  "MI_RemoteDmaCommand"},
84296 +{MI_WaitForRemoteRoutes,       "MI_WaitForRemoteRoutes"},
84297 +{MI_DequeueSysCntxDma, "MI_DequeueSysCntxDma"},
84298 +{MI_ExecuteDmaDescriptorForQueue,      "MI_ExecuteDmaDescriptorForQueue"},
84299 +{MI_ExecuteDmaDescriptor1,     "MI_ExecuteDmaDescriptor1"},
84300 +{MI_ExecuteDmaDescriptor1_seq1,        "MI_ExecuteDmaDescriptor1_seq1"},
84301 +{MI_ExecuteDmaDescriptor1_seq2,        "MI_ExecuteDmaDescriptor1_seq2"},
84302 +{MI_ExecuteDmaDescriptor1_seq3,        "MI_ExecuteDmaDescriptor1_seq3"},
84303 +{MI_GetNewSizeInProg,  "MI_GetNewSizeInProg"},
84304 +{MI_GetNewSizeInProg_seq1,     "MI_GetNewSizeInProg_seq1"},
84305 +{MI_FirstBlockRead,    "MI_FirstBlockRead"},
84306 +{MI_ExtraFirstBlockRead,       "MI_ExtraFirstBlockRead"},
84307 +{MI_UnimplementedError,        "MI_UnimplementedError"},
84308 +{MI_UpdateDescriptor,  "MI_UpdateDescriptor"},
84309 +{MI_UpdateDescriptor_seq1,     "MI_UpdateDescriptor_seq1"},
84310 +{MI_UpdateDescriptor_seq2,     "MI_UpdateDescriptor_seq2"},
84311 +{MI_UpdateDescriptor_seq3,     "MI_UpdateDescriptor_seq3"},
84312 +{MI_UpdateDescriptor_seq4,     "MI_UpdateDescriptor_seq4"},
84313 +{MI_UpdateDescriptor_seq5,     "MI_UpdateDescriptor_seq5"},
84314 +{MI_GetNextSizeInProg, "MI_GetNextSizeInProg"},
84315 +{MI_DoStopThisDma,     "MI_DoStopThisDma"},
84316 +{MI_DoStopThisDma_seq1,        "MI_DoStopThisDma_seq1"},
84317 +{MI_GenNewBytesToRead, "MI_GenNewBytesToRead"},
84318 +{MI_WaitForEventReadTy1,       "MI_WaitForEventReadTy1"},
84319 +{MI_WaitUpdateEvent,   "MI_WaitUpdateEvent"},
84320 +{MI_WaitUpdateEvent_seq1,      "MI_WaitUpdateEvent_seq1"},
84321 +{MI_DoSleepOneTickThenRunable, "MI_DoSleepOneTickThenRunable"},
84322 +{MI_RunEvent,  "MI_RunEvent"},
84323 +{MI_EnqueueThread,     "MI_EnqueueThread"},
84324 +{MI_CheckContext0,     "MI_CheckContext0"},
84325 +{MI_EnqueueDma,        "MI_EnqueueDma"},
84326 +{MI_CprocTrapping,     "MI_CprocTrapping"},
84327 +{MI_CprocTrapping_seq1,        "MI_CprocTrapping_seq1"},
84328 +{MI_WaitForRemoteRoutes1,      "MI_WaitForRemoteRoutes1"},
84329 +{MI_SetEventCommand,   "MI_SetEventCommand"},
84330 +{MI_DoSetEvent,        "MI_DoSetEvent"},
84331 +{MI_DoRemoteSetEventNowOrTrapQueueingDma,      "MI_DoRemoteSetEventNowOrTrapQueueingDma"},
84332 +{MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1, "MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1"},
84333 +{MI_SendRemoteDmaRoutes2,      "MI_SendRemoteDmaRoutes2"},
84334 +{MI_WaitForRemoteRoutes2,      "MI_WaitForRemoteRoutes2"},
84335 +{MI_WaitEventCommandTy0,       "MI_WaitEventCommandTy0"},
84336 +{MI_DequeueNonSysCntxDma2,     "MI_DequeueNonSysCntxDma2"},
84337 +{MI_WaitEventCommandTy1,       "MI_WaitEventCommandTy1"},
84338 +{MI_WaitEventCommandTy1_seq1,  "MI_WaitEventCommandTy1_seq1"},
84339 +{MI_DequeueNonSysCntxThread,   "MI_DequeueNonSysCntxThread"},
84340 +{MI_DequeueSysCntxDma1,        "MI_DequeueSysCntxDma1"},
84341 +{MI_DequeueSysCntxThread,      "MI_DequeueSysCntxThread"},
84342 +{MI_TestNonSysCntxDmaQueueEmpty,       "MI_TestNonSysCntxDmaQueueEmpty"},
84343 +{MI_TestNonSysCntxDmaQueueEmpty_seq1,  "MI_TestNonSysCntxDmaQueueEmpty_seq1"},
84344 +{MI_TestNonSysCntxDmaQueueEmpty_seq2,  "MI_TestNonSysCntxDmaQueueEmpty_seq2"},
84345 +{MI_RunThreadCommand,  "MI_RunThreadCommand"},
84346 +{MI_SetEventWaitForLastAcess,  "MI_SetEventWaitForLastAcess"},
84347 +{MI_SetEventReadWait,  "MI_SetEventReadWait"},
84348 +{MI_SetEventReadWait_seq1,     "MI_SetEventReadWait_seq1"},
84349 +{MI_TestEventType,     "MI_TestEventType"},
84350 +{MI_TestEventType_seq1,        "MI_TestEventType_seq1"},
84351 +{MI_TestEventBit2,     "MI_TestEventBit2"},
84352 +{MI_DmaDescOrBlockCopyOrChainedEvent,  "MI_DmaDescOrBlockCopyOrChainedEvent"},
84353 +{MI_RunThread, "MI_RunThread"},
84354 +{MI_RunThread1,        "MI_RunThread1"},
84355 +{MI_RunThread1_seq1,   "MI_RunThread1_seq1"},
84356 +{MI_IncDmaSysCntxBPtr, "MI_IncDmaSysCntxBPtr"},
84357 +{MI_IncDmaSysCntxBPtr_seq1,    "MI_IncDmaSysCntxBPtr_seq1"},
84358 +{MI_IncDmaSysCntxBPtr_seq2,    "MI_IncDmaSysCntxBPtr_seq2"},
84359 +{MI_WaitForCntxDmaDescRead,    "MI_WaitForCntxDmaDescRead"},
84360 +{MI_FillInContext,     "MI_FillInContext"},
84361 +{MI_FillInContext_seq1,        "MI_FillInContext_seq1"},
84362 +{MI_WriteNewDescToQueue,       "MI_WriteNewDescToQueue"},
84363 +{MI_WriteNewDescToQueue_seq1,  "MI_WriteNewDescToQueue_seq1"},
84364 +{MI_TestForQueueWrap,  "MI_TestForQueueWrap"},
84365 +{MI_TestForQueueWrap_seq1,     "MI_TestForQueueWrap_seq1"},
84366 +{MI_TestQueueIsFull,   "MI_TestQueueIsFull"},
84367 +{MI_TestQueueIsFull_seq1,      "MI_TestQueueIsFull_seq1"},
84368 +{MI_TestQueueIsFull_seq2,      "MI_TestQueueIsFull_seq2"},
84369 +{MI_CheckPsychoShitFixup,      "MI_CheckPsychoShitFixup"},
84370 +{MI_PsychoShitFixupForcedRead, "MI_PsychoShitFixupForcedRead"},
84371 +{MI_PrepareDMATimeSlice,       "MI_PrepareDMATimeSlice"},
84372 +{MI_PrepareDMATimeSlice_seq1,  "MI_PrepareDMATimeSlice_seq1"},
84373 +{MI_TProcRestartFromTrapOrTestEventBit2,       "MI_TProcRestartFromTrapOrTestEventBit2"},
84374 +{MI_TProcRestartFromTrapOrTestEventBit2_seq1,  "MI_TProcRestartFromTrapOrTestEventBit2_seq1"},
84375 +{MI_WaitForGlobalsRead,        "MI_WaitForGlobalsRead"},
84376 +{MI_WaitForNPCRead,    "MI_WaitForNPCRead"},
84377 +{MI_EventInterrupt,    "MI_EventInterrupt"},
84378 +{MI_EventInterrupt_seq1,       "MI_EventInterrupt_seq1"},
84379 +{MI_EventInterrupt_seq2,       "MI_EventInterrupt_seq2"},
84380 +{MI_EventInterrupt_seq3,       "MI_EventInterrupt_seq3"},
84381 +{MI_TestSysCntxDmaQueueEmpty,  "MI_TestSysCntxDmaQueueEmpty"},
84382 +{MI_TestSysCntxDmaQueueEmpty_seq1,     "MI_TestSysCntxDmaQueueEmpty_seq1"},
84383 +{MI_TestIfRemoteDesc,  "MI_TestIfRemoteDesc"},
84384 +{MI_DoDmaLocalSetEvent,        "MI_DoDmaLocalSetEvent"},
84385 +{MI_DoDmaLocalSetEvent_seq1,   "MI_DoDmaLocalSetEvent_seq1"},
84386 +{MI_DoDmaLocalSetEvent_seq2,   "MI_DoDmaLocalSetEvent_seq2"},
84387 +{MI_DmaLoop1,  "MI_DmaLoop1"},
84388 +{MI_ExitDmaLoop,       "MI_ExitDmaLoop"},
84389 +{MI_ExitDmaLoop_seq1,  "MI_ExitDmaLoop_seq1"},
84390 +{MI_RemoteDmaTestPAckType,     "MI_RemoteDmaTestPAckType"},
84391 +{MI_PacketDiscardOrTestFailRecIfCCis0, "MI_PacketDiscardOrTestFailRecIfCCis0"},
84392 +{MI_PacketDiscardOrTestFailRecIfCCis0_seq1,    "MI_PacketDiscardOrTestFailRecIfCCis0_seq1"},
84393 +{MI_TestNackFailIsZero2,       "MI_TestNackFailIsZero2"},
84394 +{MI_TestNackFailIsZero3,       "MI_TestNackFailIsZero3"},
84395 +{MI_DmaFailCountError, "MI_DmaFailCountError"},
84396 +{MI_TestDmaForSysCntx, "MI_TestDmaForSysCntx"},
84397 +{MI_TestDmaForSysCntx_seq1,    "MI_TestDmaForSysCntx_seq1"},
84398 +{MI_TestDmaForSysCntx_seq2,    "MI_TestDmaForSysCntx_seq2"},
84399 +{MI_TestAeqB2, "MI_TestAeqB2"},
84400 +{MI_TestAeqB2_seq1,    "MI_TestAeqB2_seq1"},
84401 +{MI_GetNextDmaDescriptor,      "MI_GetNextDmaDescriptor"},
84402 +{MI_DequeueSysCntxDma2,        "MI_DequeueSysCntxDma2"},
84403 +{MI_InputSetEvent,     "MI_InputSetEvent"},
84404 +{MI_PutBackSysCntxDma, "MI_PutBackSysCntxDma"},
84405 +{MI_PutBackSysCntxDma_seq1,    "MI_PutBackSysCntxDma_seq1"},
84406 +{MI_PutBackSysCntxDma_seq2,    "MI_PutBackSysCntxDma_seq2"},
84407 +{MI_InputRemoteDma,    "MI_InputRemoteDma"},
84408 +{MI_InputRemoteDma_seq1,       "MI_InputRemoteDma_seq1"},
84409 +{MI_WaitOneTickForWakeup1,     "MI_WaitOneTickForWakeup1"},
84410 +{MI_SendRemoteDmaDesc, "MI_SendRemoteDmaDesc"},
84411 +{MI_InputLockQueue,    "MI_InputLockQueue"},
84412 +{MI_CloseTheTrappedPacketIfCCis1,      "MI_CloseTheTrappedPacketIfCCis1"},
84413 +{MI_CloseTheTrappedPacketIfCCis1_seq1, "MI_CloseTheTrappedPacketIfCCis1_seq1"},
84414 +{MI_PostDmaInterrupt,  "MI_PostDmaInterrupt"},
84415 +{MI_InputUnLockQueue,  "MI_InputUnLockQueue"},
84416 +{MI_WaitForUnLockDescRead,     "MI_WaitForUnLockDescRead"},
84417 +{MI_SendEOPforRemoteDma,       "MI_SendEOPforRemoteDma"},
84418 +{MI_LookAtRemoteAck,   "MI_LookAtRemoteAck"},
84419 +{MI_InputWriteBlockQueue,      "MI_InputWriteBlockQueue"},
84420 +{MI_WaitForSpStore,    "MI_WaitForSpStore"},
84421 +{MI_TProcNext, "MI_TProcNext"},
84422 +{MI_TProcStoppedRunning,       "MI_TProcStoppedRunning"},
84423 +{MI_InputWriteBlock,   "MI_InputWriteBlock"},
84424 +{MI_RunDmaOrDeqNonSysCntxDma,  "MI_RunDmaOrDeqNonSysCntxDma"},
84425 +{MI_ExecuteDmaDescriptorForRun,        "MI_ExecuteDmaDescriptorForRun"},
84426 +{MI_ConfirmQueueLock,  "MI_ConfirmQueueLock"},
84427 +{MI_DmaInputIdentify,  "MI_DmaInputIdentify"},
84428 +{MI_TProcStoppedRunning2,      "MI_TProcStoppedRunning2"},
84429 +{MI_TProcStoppedRunning2_seq1, "MI_TProcStoppedRunning2_seq1"},
84430 +{MI_TProcStoppedRunning2_seq2, "MI_TProcStoppedRunning2_seq2"},
84431 +{MI_ThreadInputIdentify,       "MI_ThreadInputIdentify"},
84432 +{MI_InputIdWriteAddrAndType3,  "MI_InputIdWriteAddrAndType3"},
84433 +{MI_IProcTrappedWriteStatus,   "MI_IProcTrappedWriteStatus"},
84434 +{MI_FinishTrappingEop, "MI_FinishTrappingEop"},
84435 +{MI_InputTestTrans,    "MI_InputTestTrans"},
84436 +{MI_TestAeqB3, "MI_TestAeqB3"},
84437 +{MI_ThreadUpdateNonSysCntxBack,        "MI_ThreadUpdateNonSysCntxBack"},
84438 +{MI_ThreadQueueOverflow,       "MI_ThreadQueueOverflow"},
84439 +{MI_RunContext0Thread, "MI_RunContext0Thread"},
84440 +{MI_RunContext0Thread_seq1,    "MI_RunContext0Thread_seq1"},
84441 +{MI_RunContext0Thread_seq2,    "MI_RunContext0Thread_seq2"},
84442 +{MI_RunDmaDesc,        "MI_RunDmaDesc"},
84443 +{MI_RunDmaDesc_seq1,   "MI_RunDmaDesc_seq1"},
84444 +{MI_RunDmaDesc_seq2,   "MI_RunDmaDesc_seq2"},
84445 +{MI_TestAeqB,  "MI_TestAeqB"},
84446 +{MI_WaitForNonCntxDmaDescRead, "MI_WaitForNonCntxDmaDescRead"},
84447 +{MI_DmaQueueOverflow,  "MI_DmaQueueOverflow"},
84448 +{MI_BlockCopyEvent,    "MI_BlockCopyEvent"},
84449 +{MI_BlockCopyEventReadBlock,   "MI_BlockCopyEventReadBlock"},
84450 +{MI_BlockCopyWaitForReadData,  "MI_BlockCopyWaitForReadData"},
84451 +{MI_InputWriteWord,    "MI_InputWriteWord"},
84452 +{MI_TraceSetEvents,    "MI_TraceSetEvents"},
84453 +{MI_TraceSetEvents_seq1,       "MI_TraceSetEvents_seq1"},
84454 +{MI_TraceSetEvents_seq2,       "MI_TraceSetEvents_seq2"},
84455 +{MI_InputWriteDoubleWd,        "MI_InputWriteDoubleWd"},
84456 +{MI_SendLockTransIfCCis1,      "MI_SendLockTransIfCCis1"},
84457 +{MI_WaitForDmaRoutes1, "MI_WaitForDmaRoutes1"},
84458 +{MI_LoadDmaContext,    "MI_LoadDmaContext"},
84459 +{MI_InputTestAndSetWord,       "MI_InputTestAndSetWord"},
84460 +{MI_InputTestAndSetWord_seq1,  "MI_InputTestAndSetWord_seq1"},
84461 +{MI_GetDestEventValue, "MI_GetDestEventValue"},
84462 +{MI_SendDmaIdentify,   "MI_SendDmaIdentify"},
84463 +{MI_InputAtomicAddWord,        "MI_InputAtomicAddWord"},
84464 +{MI_LoadBFromTransD0,  "MI_LoadBFromTransD0"},
84465 +{MI_ConditionalWriteBackCCTrue,        "MI_ConditionalWriteBackCCTrue"},
84466 +{MI_WaitOneTickForWakeup,      "MI_WaitOneTickForWakeup"},
84467 +{MI_SendFinalUnlockTrans,      "MI_SendFinalUnlockTrans"},
84468 +{MI_SendDmaEOP,        "MI_SendDmaEOP"},
84469 +{MI_GenLastAddrForPsycho,      "MI_GenLastAddrForPsycho"},
84470 +{MI_FailedAckIfCCis0,  "MI_FailedAckIfCCis0"},
84471 +{MI_FailedAckIfCCis0_seq1,     "MI_FailedAckIfCCis0_seq1"},
84472 +{MI_WriteDmaSysCntxDesc,       "MI_WriteDmaSysCntxDesc"},
84473 +{MI_TimesliceDmaQueueOverflow, "MI_TimesliceDmaQueueOverflow"},
84474 +{MI_DequeueNonSysCntxThread1,  "MI_DequeueNonSysCntxThread1"},
84475 +{MI_DequeueNonSysCntxThread1_seq1,     "MI_DequeueNonSysCntxThread1_seq1"},
84476 +{MI_TestThreadQueueEmpty,      "MI_TestThreadQueueEmpty"},
84477 +{MI_ClearThreadQueueIfCC,      "MI_ClearThreadQueueIfCC"},
84478 +{MI_DequeueSysCntxThread1,     "MI_DequeueSysCntxThread1"},
84479 +{MI_DequeueSysCntxThread1_seq1,        "MI_DequeueSysCntxThread1_seq1"},
84480 +{MI_TProcStartUpGeneric,       "MI_TProcStartUpGeneric"},
84481 +{MI_WaitForPCload2,    "MI_WaitForPCload2"},
84482 +{MI_WaitForNPCWrite,   "MI_WaitForNPCWrite"},
84483 +{MI_WaitForEventWaitAddr,      "MI_WaitForEventWaitAddr"},
84484 +{MI_WaitForWaitEventAccess,    "MI_WaitForWaitEventAccess"},
84485 +{MI_WaitForWaitEventAccess_seq1,       "MI_WaitForWaitEventAccess_seq1"},
84486 +{MI_WaitForWaitEventDesc,      "MI_WaitForWaitEventDesc"},
84487 +{MI_WaitForEventReadTy0,       "MI_WaitForEventReadTy0"},
84488 +{MI_SendCondTestFail,  "MI_SendCondTestFail"},
84489 +{MI_InputMoveToNextTrans,      "MI_InputMoveToNextTrans"},
84490 +{MI_ThreadUpdateSysCntxBack,   "MI_ThreadUpdateSysCntxBack"},
84491 +{MI_FinishedSetEvent,  "MI_FinishedSetEvent"},
84492 +{MI_EventIntUpdateBPtr,        "MI_EventIntUpdateBPtr"},
84493 +{MI_EventQueueOverflow,        "MI_EventQueueOverflow"},
84494 +{MI_MaskLowerSource,   "MI_MaskLowerSource"},
84495 +{MI_DmaLoop,   "MI_DmaLoop"},
84496 +{MI_SendNullSetEvent,  "MI_SendNullSetEvent"},
84497 +{MI_SendFinalSetEvent, "MI_SendFinalSetEvent"},
84498 +{MI_TestNackFailIsZero1,       "MI_TestNackFailIsZero1"},
84499 +{MI_DmaPacketTimedOutOrPacketError,    "MI_DmaPacketTimedOutOrPacketError"},
84500 +{MI_NextPacketIsLast,  "MI_NextPacketIsLast"},
84501 +{MI_TestForZeroLengthDma,      "MI_TestForZeroLengthDma"},
84502 +{MI_WaitForPCload,     "MI_WaitForPCload"},
84503 +{MI_ReadInIns, "MI_ReadInIns"},
84504 +{MI_WaitForInsRead,    "MI_WaitForInsRead"},
84505 +{MI_WaitForLocals,     "MI_WaitForLocals"},
84506 +{MI_WaitForOutsWrite,  "MI_WaitForOutsWrite"},
84507 +{MI_WaitForWaitEvWrBack,       "MI_WaitForWaitEvWrBack"},
84508 +{MI_WaitForLockRead,   "MI_WaitForLockRead"},
84509 +{MI_TestQueueLock,     "MI_TestQueueLock"},
84510 +{MI_InputIdWriteAddrAndType,   "MI_InputIdWriteAddrAndType"},
84511 +{MI_InputIdWriteAddrAndType2,  "MI_InputIdWriteAddrAndType2"},
84512 +{MI_ThreadInputIdentify2,      "MI_ThreadInputIdentify2"},
84513 +{MI_WriteIntoTrapArea0,        "MI_WriteIntoTrapArea0"},
84514 +{MI_GenQueueBlockWrAddr,       "MI_GenQueueBlockWrAddr"},
84515 +{MI_InputDiscardFreeLock,      "MI_InputDiscardFreeLock"},
84516 +{MI_WriteIntoTrapArea1,        "MI_WriteIntoTrapArea1"},
84517 +{MI_WriteIntoTrapArea2,        "MI_WriteIntoTrapArea2"},
84518 +{MI_ResetBPtrToBase,   "MI_ResetBPtrToBase"},
84519 +{MI_InputDoTrap,       "MI_InputDoTrap"},
84520 +{MI_RemoteDmaCntxt0Update,     "MI_RemoteDmaCntxt0Update"},
84521 +{MI_ClearQueueLock,    "MI_ClearQueueLock"},
84522 +{MI_IProcTrappedBlockWriteData,        "MI_IProcTrappedBlockWriteData"},
84523 +{MI_FillContextFilter, "MI_FillContextFilter"},
84524 +{MI_IProcTrapped4,     "MI_IProcTrapped4"},
84525 +{MI_RunSysCntxDma,     "MI_RunSysCntxDma"},
84526 +{MI_ChainedEventError, "MI_ChainedEventError"},
84527 +{MI_InputTrappingEOP,  "MI_InputTrappingEOP"},
84528 +{MI_CheckForRunIfZero, "MI_CheckForRunIfZero"},
84529 +{MI_TestForBreakOrSuspend,     "MI_TestForBreakOrSuspend"},
84530 +{MI_SwapForRunable,    "MI_SwapForRunable"},
84531 Index: linux-2.6.5-7.191/include/elan3/neterr_rpc.h
84532 ===================================================================
84533 --- linux-2.6.5-7.191.orig/include/elan3/neterr_rpc.h   2004-02-23 16:02:56.000000000 -0500
84534 +++ linux-2.6.5-7.191/include/elan3/neterr_rpc.h        2005-07-28 14:52:52.953662776 -0400
84535 @@ -0,0 +1,68 @@
84536 +/*
84537 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84538 + *
84539 + *    For licensing information please see the supplied COPYING file
84540 + *
84541 + */
84542 +
84543 +#ifndef __ELAN3_NETERR_RPC_H
84544 +#define __ELAN3_NETERR_RPC_H
84545 +
84546 +#ident "$Id: neterr_rpc.h,v 1.20 2003/06/26 16:05:22 fabien Exp $"
84547 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/neterr_rpc.h,v $*/
84548 +
84549 +#define NETERR_SERVICE "neterr-srv"
84550 +#define NETERR_PROGRAM ((u_long) 170002)
84551 +#define NETERR_VERSION ((u_long) 1)
84552 +
84553 +#define NETERR_NULL_RPC                0
84554 +#define NETERR_FIXUP_RPC       1
84555 +
84556 +/* network error rpc timeout */
84557 +#define NETERR_RPC_TIMEOUT     5
84558 +
84559 +/*
84560 + * XDR functions for Tru64 and Linux in userspace. 
84561 + *  NB Linux kernelspace xdr routines are in network_error.
84562 + *     and *must* be kept consistent.
84563 + */
84564 +#if defined(DIGITAL_UNIX) || !defined(__KERNEL__)
84565 +bool_t
84566 +xdr_capability (XDR *xdrs, void *arg)
84567 +{
84568 +    ELAN_CAPABILITY *cap = (ELAN_CAPABILITY *) arg;
84569 +
84570 +    return (xdr_opaque (xdrs, (caddr_t) &cap->cap_userkey, sizeof (cap->cap_userkey)) &&
84571 +           xdr_int (xdrs, &cap->cap_version) &&
84572 +           xdr_u_short (xdrs, &cap->cap_type) &&
84573 +           xdr_int (xdrs, &cap->cap_lowcontext) &&
84574 +           xdr_int (xdrs, &cap->cap_highcontext) &&
84575 +           xdr_int (xdrs, &cap->cap_mycontext) &&
84576 +           xdr_int (xdrs, &cap->cap_lownode) &&
84577 +           xdr_int (xdrs, &cap->cap_highnode) &&
84578 +           xdr_u_int (xdrs, &cap->cap_railmask) &&
84579 +           xdr_opaque (xdrs, (caddr_t) &cap->cap_bitmap[0], sizeof (cap->cap_bitmap)));
84580 +}
84581 +
84582 +bool_t
84583 +xdr_neterr_msg (XDR *xdrs, void *req)
84584 +{
84585 +    NETERR_MSG *msg = (NETERR_MSG *) req;
84586 +
84587 +    return (xdr_u_int (xdrs, &msg->Rail) &&
84588 +           xdr_capability (xdrs, &msg->SrcCapability) &&
84589 +           xdr_capability (xdrs, &msg->DstCapability) &&
84590 +           xdr_u_int (xdrs, &msg->DstProcess) &&
84591 +           xdr_u_int (xdrs, &msg->CookieAddr) &&
84592 +           xdr_u_int (xdrs, &msg->CookieVProc) &&
84593 +           xdr_u_int (xdrs, &msg->NextCookie) &&
84594 +           xdr_u_int (xdrs, &msg->WaitForEop));
84595 +}
84596 +#endif /* INCLUDE_XDR_INLINE */
84597 +
84598 +/*
84599 + * Local variables:
84600 + * c-file-style: "stroustrup"
84601 + * End:
84602 + */
84603 +#endif /* __ELAN3_NETERR_RPC_H */
84604 Index: linux-2.6.5-7.191/include/elan3/perm.h
84605 ===================================================================
84606 --- linux-2.6.5-7.191.orig/include/elan3/perm.h 2004-02-23 16:02:56.000000000 -0500
84607 +++ linux-2.6.5-7.191/include/elan3/perm.h      2005-07-28 14:52:52.953662776 -0400
84608 @@ -0,0 +1,29 @@
84609 +/*
84610 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84611 + *
84612 + *    For licensing information please see the supplied COPYING file
84613 + *
84614 + */
84615 +
84616 +#ifndef __ELAN3_PERM_H
84617 +#define __ELAN3_PERM_H
84618 +
84619 +#ident "$Id: perm.h,v 1.7 2003/09/24 13:57:24 david Exp $"
84620 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/perm.h,v $*/
84621 +
84622 +#define ELAN3_PERM_NULL                0x00
84623 +#define ELAN3_PERM_LOCAL_READ  0x04
84624 +#define ELAN3_PERM_READ                0x08
84625 +#define ELAN3_PERM_NOREMOTE    0x0c
84626 +#define ELAN3_PERM_REMOTEREAD  0x10
84627 +#define ELAN3_PERM_REMOTEWRITE 0x14
84628 +#define ELAN3_PERM_REMOTEEVENT 0x18
84629 +#define ELAN3_PERM_REMOTEALL   0x1c
84630 +
84631 +#endif /* __ELAN3_PERM_H */
84632 +
84633 +/*
84634 + * Local variables:
84635 + * c-file-style: "stroustrup"
84636 + * End:
84637 + */
84638 Index: linux-2.6.5-7.191/include/elan3/pte.h
84639 ===================================================================
84640 --- linux-2.6.5-7.191.orig/include/elan3/pte.h  2004-02-23 16:02:56.000000000 -0500
84641 +++ linux-2.6.5-7.191/include/elan3/pte.h       2005-07-28 14:52:52.953662776 -0400
84642 @@ -0,0 +1,139 @@
84643 +/*
84644 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84645 + *
84646 + *    For licensing information please see the supplied COPYING file
84647 + *
84648 + */
84649 +
84650 +#ifndef __ELAN3_PTE_H
84651 +#define __ELAN3_PTE_H
84652 +
84653 +#ident "$Id: pte.h,v 1.26 2003/09/24 13:57:24 david Exp $"
84654 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/pte.h,v $*/
84655 +
84656 +#ifdef __cplusplus
84657 +extern          "C"
84658 +{
84659 +#endif
84660 +
84661 +#include <elan3/e3types.h>
84662 +#include <elan3/perm.h>
84663 +
84664 +typedef E3_uint64 ELAN3_PTE;
84665 +typedef E3_uint32 ELAN3_PTP;
84666 +
84667 +#define ELAN3_PTE_SIZE         (8)
84668 +#define ELAN3_PTP_SIZE         (4)
84669 +
84670 +#define ELAN3_PTE_REF          ((E3_uint64) 1 << 63)           /* 63      - referenced bit */
84671 +#define ELAN3_PTE_MOD          ((E3_uint64) 1 << 55)           /* 55      - modified bit */
84672 +#define ELAN3_RM_MASK          (ELAN3_PTE_REF | ELAN3_PTE_MOD)
84673 +
84674 +#define ELAN3_PTE_PFN_MASK     0x0000fffffffff000ull           /* [12:48] - Physical address */
84675 +
84676 +#define ELAN3_PTE_BIG_ENDIAN   0x80                            /* 7       - big endian */
84677 +#define ELAN3_PTE_64_BIT               0x40                            /* 6       - 64 bit pci address */
84678 +#define ELAN3_PTE_LOCAL                0x20                            /* 5       - local sdram */
84679 +
84680 +#define ELAN3_PTE_PERM_MASK    0x1c                            /* [2:4]   - Permissions */
84681 +#define ELAN3_PTE_PERM_SHIFT      2
84682 +
84683 +#define ELAN3_ET_MASK          0x3
84684 +#define ELAN3_ET_INVALID               0x0                                     /* [0:1] */
84685 +#define ELAN3_ET_PTP           0x1
84686 +#define ELAN3_ET_PTE           0x2
84687 +
84688 +#define ELAN3_INVALID_PTP      ((ELAN3_PTP) 0)
84689 +#define ELAN3_INVALID_PTE      ((ELAN3_PTE) 0)
84690 +
84691 +#define ELAN3_PTP_TYPE(ptp)    ((ptp) & ELAN3_ET_MASK)
84692 +#define ELAN3_PTE_TYPE(pte)    ((pte) & ELAN3_ET_MASK)
84693 +#define ELAN3_PTE_PERM(pte)    ((pte) & ELAN3_PTE_PERM_MASK)
84694 +#define ELAN3_PTE_VALID(pte)   (((pte) & ELAN3_ET_MASK) == ELAN3_ET_PTE)
84695 +#define ELAN3_PTE_ISREF(pte)   ((pte) & ELAN3_PTE_REF)
84696 +#define ELAN3_PTE_ISMOD(pte)   ((pte) & ELAN3_PTE_MOD)
84697 +#define ELAN3_PTE_WRITEABLE(pte)       (ELAN3_PERM_WRITEABLE(ELAN3_PTE_PERM(pte)))
84698 +
84699 +#define ELAN3_PERM_WRITEABLE(perm)     ((perm) == ELAN3_PERM_NOREMOTE || (perm) > ELAN3_PERM_REMOTEREAD)
84700 +#define ELAN3_PERM_REMOTE(perm)                ((perm) > ELAN3_PERM_NOREMOTE)
84701 +
84702 +#define ELAN3_PERM_READONLY(perm)      ((perm) == ELAN3_PERM_NOREMOTE ? ELAN3_PERM_LOCAL_READ : \
84703 +                                        (perm) > ELAN3_PERM_REMOTEREAD ? ELAN3_PERM_READ : (perm))
84704 +#if PAGE_SHIFT == 12
84705 +#  define ELAN3_PAGE_SHIFT     12
84706 +#else
84707 +#  define ELAN3_PAGE_SHIFT     13
84708 +#endif
84709 +
84710 +#define ELAN3_PAGE_SIZE                (1 << ELAN3_PAGE_SHIFT)
84711 +#define ELAN3_PAGE_OFFSET      (ELAN3_PAGE_SIZE-1)
84712 +#define ELAN3_PAGE_MASK                (~ELAN3_PAGE_OFFSET)
84713 +
84714 +#if ELAN3_PAGE_SHIFT == 13
84715 +#  define ELAN3_L3_SHIFT               5
84716 +#else
84717 +#  define ELAN3_L3_SHIFT               6
84718 +#endif
84719 +#define ELAN3_L2_SHIFT         6
84720 +#define ELAN3_L1_SHIFT         8
84721 +
84722 +/* Number of entries in a given level ptbl */
84723 +#define ELAN3_L3_ENTRIES               (1 << ELAN3_L3_SHIFT)
84724 +#define ELAN3_L2_ENTRIES               (1 << ELAN3_L2_SHIFT)
84725 +#define ELAN3_L1_ENTRIES               (1 << ELAN3_L1_SHIFT)
84726 +
84727 +/* Virtual address spanned by each entry */
84728 +#define ELAN3_L3_SIZE          (1 << (ELAN3_PAGE_SHIFT))
84729 +#define ELAN3_L2_SIZE          (1 << (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
84730 +#define ELAN3_L1_SIZE          (1 << (ELAN3_L3_SHIFT+ELAN3_L2_SHIFT+ELAN3_PAGE_SHIFT))
84731 +
84732 +/* Virtual address size of page table */
84733 +#define ELAN3_L1_PTSIZE          (ELAN3_L1_ENTRIES * ELAN3_L1_SIZE)
84734 +#define ELAN3_L3_PTSIZE                (ELAN3_L3_ENTRIES * ELAN3_L3_SIZE)
84735 +#define ELAN3_L2_PTSIZE                (ELAN3_L2_ENTRIES * ELAN3_L2_SIZE)
84736 +
84737 +/* Mask for offset into page table */
84738 +#define ELAN3_L1_PTOFFSET        ((ELAN3_L1_SIZE*ELAN3_L1_ENTRIES)-1)
84739 +#define ELAN3_L3_PTOFFSET      ((ELAN3_L3_SIZE*ELAN3_L3_ENTRIES)-1)
84740 +#define ELAN3_L2_PTOFFSET      ((ELAN3_L2_SIZE*ELAN3_L2_ENTRIES)-1)
84741 +
84742 +#define ELAN3_L1_INDEX(addr)   (((E3_Addr) (addr) & 0xFF000000) >> (ELAN3_L2_SHIFT+ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
84743 +#define ELAN3_L2_INDEX(addr)   (((E3_Addr) (addr) & 0x00FD0000) >> (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
84744 +#define ELAN3_L3_INDEX(addr)   (((E3_Addr) (addr) & 0x0003F000) >> ELAN3_PAGE_SHIFT)
84745 +
84746 +#define        ELAN3_L1_BASE(addr)     (((E3_Addr)(addr)) & 0x00000000)
84747 +#define        ELAN3_L2_BASE(addr)     (((E3_Addr)(addr)) & 0xFF000000)
84748 +#define        ELAN3_L3_BASE(addr)     (((E3_Addr)(addr)) & 0xFFFC0000)
84749 +
84750 +/* Convert a page table pointer entry to the PT */
84751 +#define PTP_TO_PT_PADDR(ptp)   ((E3_Addr)(ptp & 0xFFFFFFFC))
84752 +
84753 +#ifdef __KERNEL__
84754 +/*
84755 + * incompatible access for permission macro.
84756 + */
84757 +extern  u_char  elan3mmu_permissionTable[8];
84758 +#define ELAN3_INCOMPAT_ACCESS(perm,access) (! (elan3mmu_permissionTable[(perm)>>ELAN3_PTE_PERM_SHIFT] & (1 << (access))))
84759 +
84760 +#define elan3_readptp(dev, ptp)                (elan3_sdram_readl (dev, ptp))
84761 +#define elan3_writeptp(dev, ptp, value)        (elan3_sdram_writel (dev, ptp, value))
84762 +#define elan3_readpte(dev, pte)                (elan3_sdram_readq (dev, pte))
84763 +#define elan3_writepte(dev,pte, value) (elan3_sdram_writeq (dev, pte, value))
84764 +
84765 +#define elan3_invalidatepte(dev, pte)  (elan3_sdram_writel (dev, pte, 0))
84766 +#define elan3_modifypte(dev,pte,new)   (elan3_sdram_writel (dev, pte, (int) (new)))
84767 +#define elan3_clrref(dev,pte)          (elan3_sdram_writeb (dev, pte + 7)
84768 +
84769 +#endif /* __KERNEL__ */
84770 +
84771 +#ifdef __cplusplus
84772 +}
84773 +#endif
84774 +
84775 +#endif /* __ELAN3_PTE_H */
84776 +
84777 +/*
84778 + * Local variables:
84779 + * c-file-style: "stroustrup"
84780 + * End:
84781 + */
84782 Index: linux-2.6.5-7.191/include/elan3/spinlock.h
84783 ===================================================================
84784 --- linux-2.6.5-7.191.orig/include/elan3/spinlock.h     2004-02-23 16:02:56.000000000 -0500
84785 +++ linux-2.6.5-7.191/include/elan3/spinlock.h  2005-07-28 14:52:52.954662624 -0400
84786 @@ -0,0 +1,195 @@
84787 +/*
84788 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84789 + *
84790 + *    For licensing information please see the supplied COPYING file
84791 + *
84792 + */
84793 +
84794 +#ifndef _ELAN3_SPINLOCK_
84795 +#define _ELAN3_SPINLOCK_
84796 +
84797 +#ident "$Id: spinlock.h,v 1.31 2003/09/24 13:57:24 david Exp $"
84798 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/spinlock.h,v $*/
84799 +
84800 +/*
84801 + * This spinlock is designed for main/elan processor interactions.
84802 + * The lock is split over Elan/Main memory in such a way that
84803 + * we don't end up busy-polling over the PCI.
84804 + * In the Elan memory we have two words; one is a sequence number
84805 + * and the other is a lock word for main.
84806 + * In main memory we have a copy of the sequence number which main polls when it is
84807 + * waiting for the Elan to drop the lock. Main polls this word until it becomes
84808 + * equal to the sequence number it sampled.
84809 + * The Elan drops the lock by writing the current sequence number to main memory.
84810 + * It is coded to always give priority to the Elan thread, and so when both go for the
84811 + * lock, main will back off first.
84812 + *
84813 + * 18/3/98
84814 + * This has been extended to avoid a starvation case where both the main and thread claim the
84815 + * lock and so both backoff (thread does a break). So now, main attempts to claim the
84816 + * lock by writing 'mainLock' then samples the 'sl_seq' and if it has the lock
84817 + * it sets 'mainGotLock'. The thread will now see the 'sl_mainLock' set, but will only
84818 + * backoff with a c_break_busywait() if 'mainGotLock' is set too.
84819 + */
84820 +typedef struct elan3_spinlock_elan {
84821 +    union {
84822 +       volatile E3_uint64      mainLocks;              /* main writes this dble word */
84823 +       struct {
84824 +           volatile E3_uint32  mainLock;               /* main wants a lock */
84825 +           volatile E3_uint32  mainGotLock;            /* main has the lock */
84826 +       } s;
84827 +    } sl_u;
84828 +    volatile E3_uint32         sl_seq;                 /* thread owns this word */
84829 +    volatile E3_uint32         sl_mainWait;            /* performance counter */
84830 +    volatile E3_uint32         sl_elanWait;            /* performance counter */
84831 +    volatile E3_uint32         sl_elanBusyWait;        /* performance counter */
84832 +    /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */
84833 +    E3_uint64                   sl_pad[5];             /* pad to 64-bytes */
84834 +} ELAN3_SPINLOCK_ELAN;
84835 +
84836 +#define sl_mainLocks sl_u.mainLocks
84837 +#define sl_mainLock  sl_u.s.mainLock
84838 +#define sl_mainGotLock sl_u.s.mainGotLock
84839 +
84840 +#define SL_MAIN_RECESSIVE      1
84841 +#define SL_MAIN_DOMINANT       2
84842 +
84843 +/* Declare this as a main memory cache block for efficiency */
84844 +typedef union elan3_spinlock_main {
84845 +    volatile E3_uint32         sl_seq;                 /* copy of seq number updated by Elan */
84846 +    volatile E3_uint32         sl_Int32[E3_BLK_SIZE/sizeof (E3_uint32)];
84847 +} ELAN3_SPINLOCK_MAIN;
84848 +
84849 +/* Main/Main or Elan/Elan lock word */
84850 +typedef volatile int   ELAN3_SPINLOCK;
84851 +
84852 +#ifdef __ELAN3__
84853 +
84854 +/* Main/Elan interlock */
84855 +
84856 +#define ELAN3_ME_SPINENTER(SLE,SL) do {\
84857 +                       asm volatile ("! elan3_spinlock store barrier");\
84858 +                       (SLE)->sl_seq++; \
84859 +                       if ((SLE)->sl_mainLock) \
84860 +                         elan3_me_spinblock(SLE, SL);\
84861 +                       asm volatile ("! elan3_spinlock store barrier");\
84862 +               } while (0)
84863 +#define ELAN3_ME_SPINEXIT(SLE,SL) do {\
84864 +                       asm volatile ("! elan3_spinlock store barrier");\
84865 +                       (SL)->sl_seq = (SLE)->sl_seq;\
84866 +                       asm volatile ("! elan3_spinlock store barrier");\
84867 +               } while (0)
84868 +
84869 +
84870 +/* Elan/Elan interlock */
84871 +#define ELAN3_SPINENTER(L)     do {\
84872 +                          asm volatile ("! store barrier");\
84873 +                          if (c_swap ((L), 1)) elan3_spinenter(L);\
84874 +                          asm volatile ("! store barrier");\
84875 +                       } while (0)
84876 +#define ELAN3_SPINEXIT(L)      do {\
84877 +                          asm volatile ("! store barrier");\
84878 +                          c_swap((L), 0);\
84879 +                          asm volatile ("! store barrier");\
84880 +                       } while (0)
84881 +
84882 +extern void elan3_me_spinblock (ELAN3_SPINLOCK_ELAN *sle, ELAN3_SPINLOCK_MAIN *sl);
84883 +extern void elan3_spinenter (ELAN3_SPINLOCK *l);
84884 +
84885 +#else                     
84886 +
84887 +/* Main/Elan interlock */
84888 +#ifdef DEBUG
84889 +#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\
84890 +                       register E3_int32 maxLoops = 0x7fffffff;        \
84891 +                       register E3_uint32 seq;\
84892 +                       elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84893 +                       MEMBAR_STORELOAD(); \
84894 +                       seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84895 +                       while (seq != (SL)->sl_seq) {\
84896 +                           elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \
84897 +                           while ((SL)->sl_seq == (seq-1) && maxLoops--) ; \
84898 +                           if (maxLoops < 0) { \
84899 +                               printf("Failed to get ME lock %lx/%lx seq %d sle_seq %d sl_seq %d\n", \
84900 +                                      SL, SLE, seq, \
84901 +                                      elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)), \
84902 +                                      (SL)->sl_seq); \
84903 +                           } \
84904 +                           elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84905 +                           MEMBAR_STORELOAD(); \
84906 +                           seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84907 +                       }\
84908 +                       elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84909 +                       MEMBAR_LOADLOAD();\
84910 +               } while (0)
84911 +#else
84912 +#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\
84913 +                       register E3_uint32 seq;\
84914 +                       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84915 +                       MEMBAR_STORELOAD(); \
84916 +                       seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84917 +                       while (seq != (SL)->sl_seq) {\
84918 +                           elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \
84919 +                           while ((SL)->sl_seq == (seq-1)) ; \
84920 +                           elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84921 +                           MEMBAR_STORELOAD(); \
84922 +                           seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84923 +                       }\
84924 +                       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84925 +                       MEMBAR_LOADLOAD();\
84926 +               } while (0)
84927 +#endif
84928 +#define ELAN3_ME_FORCEENTER(SDRAM,SLE,SL) do { \
84929 +       register E3_uint32 seq; \
84930 +       MEMBAR_STORELOAD(); \
84931 +       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_DOMINANT); \
84932 +       MEMBAR_STORELOAD(); \
84933 +       seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84934 +       while (seq != (SL)->sl_seq) \
84935 +       { \
84936 +               /* NOTE: we MUST call elan3_usecspin here for kernel comms */\
84937 +               while ((SL)->sl_seq == (seq)-1) \
84938 +                       elan3_usecspin (1); \
84939 +               seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84940 +       } \
84941 +       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84942 +       MEMBAR_LOADLOAD(); \
84943 +} while (0)
84944 +
84945 +#define ELAN3_ME_TRYENTER(SDRAM,SLE,SL,SEQ) do { \
84946 +    elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
84947 +    MEMBAR_STORELOAD(); \
84948 +    SEQ = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
84949 +} while (0)
84950 +
84951 +#define ELAN3_ME_CHECKENTER(SDRAM,SLE,SL,SEQ) do { \
84952 +    if ((SEQ) == ((SL)->sl_seq)) { \
84953 +        elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
84954 +        MEMBAR_LOADLOAD();\
84955 +    } \
84956 +    else ELAN3_ME_SPINENTER(SLE,SL); \
84957 +} while (0)
84958 +       
84959 +#define ELAN3_ME_SPINEXIT(SDRAM,SLE,SL) do {\
84960 +                       MEMBAR_STORESTORE(); \
84961 +                       elan3_write64_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLocks), 0); \
84962 +                       MEMBAR_STORESTORE(); \
84963 +               } while (0)
84964 +
84965 +
84966 +/* Main/Main */
84967 +#define ELAN3_SPINENTER(L)     do {\
84968 +                          while (c_swap ((L), 1)) ; \
84969 +                       } while (0)
84970 +#define ELAN3_SPINEXIT(L)      do {\
84971 +                          c_swap((L), 0);\
84972 +                       } while (0)
84973 +#endif /* _ELAN3_ */
84974 +
84975 +#endif /* _ELAN3_SPINLOCK_H */
84976 +
84977 +/*
84978 + * Local variables:
84979 + * c-file-style: "stroustrup"
84980 + * End:
84981 + */
84982 Index: linux-2.6.5-7.191/include/elan3/thread.h
84983 ===================================================================
84984 --- linux-2.6.5-7.191.orig/include/elan3/thread.h       2004-02-23 16:02:56.000000000 -0500
84985 +++ linux-2.6.5-7.191/include/elan3/thread.h    2005-07-28 14:52:52.954662624 -0400
84986 @@ -0,0 +1,137 @@
84987 +/*
84988 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84989 + *
84990 + *    For licensing information please see the supplied COPYING file
84991 + *
84992 + */
84993 +
84994 +#ifndef _ELAN3_THREAD_H
84995 +#define _ELAN3_THREAD_H
84996 +
84997 +#ident "$Id: thread.h,v 1.17 2002/08/09 11:23:34 addy Exp $"
84998 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/thread.h,v $*/
84999 +
85000 +/* Alignment for a stack frame */
85001 +#define E3_STACK_ALIGN         (64)
85002 +
85003 +typedef struct _E3_Frame {
85004 +    E3_uint32         fr_local[8];             /* saved locals (not used) */
85005 +    E3_uint32         fr_arg[6];               /* saved arguements o0 -> o5 */
85006 +    E3_Addr           fr_savefp;               /* saved frame pointer o6 */
85007 +    E3_Addr           fr_savepc;               /* saved program counter o7 */
85008 +    E3_Addr           fr_stret;                /* stuct return addr */
85009 +    E3_uint32         fr_argd[6];              /* arg dump area */
85010 +    E3_uint32         fr_argx[1];              /* array of args past the sixth */
85011 +} E3_Frame;
85012 +
85013 +typedef struct _E3_Stack {
85014 +    E3_uint32          Locals[8];
85015 +    E3_uint32          Ins[8];
85016 +    E3_uint32          Globals[8];
85017 +    E3_uint32          Outs[8];
85018 +} E3_Stack;
85019 +
85020 +typedef struct _E3_OutsRegs {
85021 +   E3_uint32   o[8];                           /* o6 == pc, o7 == fptr */
85022 +} E3_OutsRegs;
85023 +
85024 +/*
85025 + * "Magic" value for stack pointer to be ignored.
85026 + */
85027 +#define VanishingStackPointer  0x42
85028 +
85029 +
85030 +/*
85031 + * When the Elan traps the N & Z CC bits are held in the NPC
85032 + * and the V & C bits are in the PC
85033 + */
85034 +#define PSR_C_BIT      (1)
85035 +#define PSR_V_BIT      (2)
85036 +#define PSR_Z_BIT      (1)
85037 +#define PSR_N_BIT      (2)
85038 +#define CC_MASK                (3)
85039 +#define PC_MASK        (~3)
85040 +#define SP_MASK                (~3)
85041 +
85042 +/*
85043 + * Threads processor Opcodes.
85044 + */
85045 +#define OPCODE_MASK            (0xC1F80000)
85046 +#define OPCODE_IMM             (1 << 13)
85047 +
85048 +#define OPCODE_CLASS(instr)    ((instr) & 0xC0000000)
85049 +#define OPCODE_CLASS_0         0x00000000
85050 +#define OPCODE_CLASS_1         0x40000000
85051 +#define OPCODE_CLASS_2         0x80000000
85052 +#define OPCODE_CLASS_3         0xC0000000
85053 +
85054 +#define OPCODE_CPOP            0x81B00000
85055 +#define OPCODE_Ticc            0x81D00000
85056 +
85057 +#define OPCODE_FCODE_SHIFT     19
85058 +#define OPCODE_FCODE_MASK      0x1f
85059 +#define OPCODE_NOT_ALUOP       0x01000000
85060 +
85061 +#define OPCODE_SLL             0x81280000
85062 +#define OPCODE_SRL             0x81300000
85063 +#define OPCODE_SRA             0x81380000
85064 +
85065 +#define OPCODE_OPEN            0x81600000
85066 +#define OPCODE_CLOSE           0x81680000
85067 +#define OPCODE_BREAKTEST       0x81700000
85068 +
85069 +#define OPCODE_BREAK           0x81a00000
85070 +#define OPCODE_SUSPEND         0x81a80000
85071 +#define OPCODE_WAIT            0x81b00000
85072 +
85073 +#define OPCODE_JMPL            0x81c00000
85074 +
85075 +#define OPCODE_LD              0xC0000000
85076 +#define OPCODE_LDD             0xC0180000
85077 +
85078 +#define OPCODE_LDBLOCK16       0xC0900000
85079 +#define OPCODE_LDBLOCK32       0xC0800000
85080 +#define OPCODE_LDBLOCK64       0xC0980000
85081 +
85082 +#define OPCODE_ST              0xC0200000
85083 +#define OPCODE_STD             0xC0380000
85084 +
85085 +#define OPCODE_SWAP            0xC0780000
85086 +
85087 +#define OPCODE_STBLOCK16       0xC0b00000
85088 +#define OPCODE_STBLOCK32       0xC0a00000
85089 +#define OPCODE_STBLOCK64       0xC0b80000
85090 +
85091 +#define OPCODE_CLASS0_MASK     0xC1C00000
85092 +#define OPCODE_SETHI           0x01000000
85093 +#define OPCODE_BICC            0x00800000
85094 +#define OPCODE_SENDREG         0x01800000
85095 +#define OPCODE_SENDMEM         0x01c00000
85096 +
85097 +#define OPCODE_BICC_BN         0x00000000
85098 +#define OPCODE_BICC_BE         0x02000000
85099 +#define OPCODE_BICC_BLE                0x04000000
85100 +#define OPCODE_BICC_BL         0x06000000
85101 +#define OPCODE_BICC_BLEU       0x08000000
85102 +#define OPCODE_BICC_BCS                0x0A000000
85103 +#define OPCODE_BICC_BNEG       0x0C000000
85104 +#define OPCODE_BICC_BVS                0x0E000000
85105 +
85106 +#define OPCODE_BICC_MASK       0x0E000000
85107 +#define OPCODE_BICC_ANNUL      0x20000000
85108 +
85109 +#define INSTR_RS2(instr)       (((instr) >>  0) & 0x1F)
85110 +#define INSTR_RS1(instr)       (((instr) >> 14) & 0x1F)
85111 +#define INSTR_RD(instr)                (((instr) >> 25) & 0x1F)
85112 +#define INSTR_IMM(instr)       (((instr) & 0x1000) ? ((instr) & 0xFFF) | 0xFFFFF000 : (instr) & 0xFFF)
85113 +
85114 +#define Ticc_COND(instr)       INSTR_RD(instr)
85115 +#define Ticc_TA                        8
85116 +
85117 +#endif /* _ELAN3_THREAD_H */
85118 +
85119 +/*
85120 + * Local variables:
85121 + * c-file-style: "stroustrup"
85122 + * End:
85123 + */
85124 Index: linux-2.6.5-7.191/include/elan3/threadlinkage.h
85125 ===================================================================
85126 --- linux-2.6.5-7.191.orig/include/elan3/threadlinkage.h        2004-02-23 16:02:56.000000000 -0500
85127 +++ linux-2.6.5-7.191/include/elan3/threadlinkage.h     2005-07-28 14:52:52.955662472 -0400
85128 @@ -0,0 +1,103 @@
85129 +/*
85130 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85131 + *
85132 + *    For licensing information please see the supplied COPYING file
85133 + *
85134 + */
85135 +
85136 +#ifndef __ELAN3_THREADLINKAGE_H
85137 +#define        __ELAN3_THREADLINKAGE_H
85138 +
85139 +#ident "$Id: threadlinkage.h,v 1.6 2002/08/09 11:23:34 addy Exp $"
85140 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadlinkage.h,v $*/
85141 +
85142 +#ifdef __cplusplus
85143 +extern "C" {
85144 +#endif
85145 +
85146 +#if defined(_ASM) || defined(__LANGUAGE_ASSEMBLY__)
85147 +
85148 +/*
85149 + * Macro to define weak symbol aliases. These are similar to the ANSI-C
85150 + *     #pragma weak name = _name
85151 + * except a compiler can determine type. The assembler must be told. Hence,
85152 + * the second parameter must be the type of the symbol (i.e.: function,...)
85153 + */
85154 +#define        ANSI_PRAGMA_WEAK(sym, stype)    \
85155 +       .weak   sym; \
85156 +       .type sym, #stype; \
85157 +/* CSTYLED */ \
85158 +sym    = _/**/sym
85159 +
85160 +/*
85161 + * ENTRY provides the standard procedure entry code
85162 + */
85163 +#define        ENTRY(x) \
85164 +       .section        ".text"; \
85165 +       .align  4; \
85166 +       .global x; \
85167 +x:
85168 +
85169 +/*
85170 + * ENTRY2 is identical to ENTRY but provides two labels for the entry point.
85171 + */
85172 +#define        ENTRY2(x, y) \
85173 +       .section        ".text"; \
85174 +       .align  4; \
85175 +       .global x, y; \
85176 +/* CSTYLED */ \
85177 +x:     ; \
85178 +y:
85179 +
85180 +
85181 +/*
85182 + * ALTENTRY provides for additional entry points.
85183 + */
85184 +#define        ALTENTRY(x) \
85185 +       .global x; \
85186 +x:
85187 +
85188 +/*
85189 + * DGDEF and DGDEF2 provide global data declarations.
85190 + *
85191 + * DGDEF provides a word aligned word of storage.
85192 + *
85193 + * DGDEF2 allocates "sz" bytes of storage with **NO** alignment.  This
85194 + * implies this macro is best used for byte arrays.
85195 + *
85196 + * DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
85197 + */
85198 +#define        DGDEF2(name, sz) \
85199 +       .section        ".data"; \
85200 +       .global name; \
85201 +       .size   name, sz; \
85202 +name:
85203 +
85204 +#define        DGDEF3(name, sz, algn) \
85205 +       .section        ".data"; \
85206 +       .align  algn; \
85207 +       .global name; \
85208 +       .size   name, sz; \
85209 +name:
85210 +
85211 +#define        DGDEF(name)     DGDEF3(name, 4, 4)
85212 +
85213 +/*
85214 + * SET_SIZE trails a function and set the size for the ELF symbol table.
85215 + */
85216 +#define        SET_SIZE(x) \
85217 +       .size   x, (.-x)
85218 +
85219 +#endif /* _ASM || __LANGUAGE_ASSEMBLY__ */
85220 +
85221 +#ifdef __cplusplus
85222 +}
85223 +#endif
85224 +
85225 +#endif /* __ELAN3_THREADLINKAGE_H */
85226 +
85227 +/*
85228 + * Local variables:
85229 + * c-file-style: "stroustrup"
85230 + * End:
85231 + */
85232 Index: linux-2.6.5-7.191/include/elan3/threadsyscall.h
85233 ===================================================================
85234 --- linux-2.6.5-7.191.orig/include/elan3/threadsyscall.h        2004-02-23 16:02:56.000000000 -0500
85235 +++ linux-2.6.5-7.191/include/elan3/threadsyscall.h     2005-07-28 14:52:52.955662472 -0400
85236 @@ -0,0 +1,64 @@
85237 +/*
85238 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85239 + *
85240 + *    For licensing information please see the supplied COPYING file
85241 + *
85242 + */
85243 +
85244 +#ifndef __ELAN3_SYSCALL_H
85245 +#define __ELAN3_SYSCALL_H
85246 +
85247 +#ident "$Id: threadsyscall.h,v 1.12 2003/09/24 13:57:24 david Exp $"
85248 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadsyscall.h,v $*/
85249 +
85250 +/* 
85251 + * This file contains the system calls supported from the Elan.
85252 + */
85253 +#define ELAN3_DEBUG_TRAPNUM    5       /* thread debugging trap */
85254 +#define ELAN3_ABORT_TRAPNUM    6       /* bad abort trap */
85255 +#define ELAN3_ELANCALL_TRAPNUM 7       /* elansyscall trap */
85256 +#define ELAN3_SYSCALL_TRAPNUM  8       /* new syscall trap */
85257 +
85258 +#define ELAN3_T_SYSCALL_CODE   0       /* offsets in struct elan3_t_syscall */
85259 +#define ELAN3_T_SYSCALL_ERRNO  4
85260 +
85261 +#define ELAN3_SYS_open         1
85262 +#define ELAN3_SYS_close                2
85263 +#define ELAN3_SYS_write                3
85264 +#define ELAN3_SYS_read         4
85265 +#define ELAN3_SYS_poll         5
85266 +#define ELAN3_SYS_ioctl                6
85267 +#define ELAN3_SYS_lseek                7
85268 +#define ELAN3_SYS_mmap         8
85269 +#define ELAN3_SYS_munmap       9
85270 +#define ELAN3_SYS_kill         10
85271 +#define ELAN3_SYS_getpid       11
85272 +
85273 +#if !defined(SYS_getpid) && defined(__NR_getxpid) 
85274 +#define SYS_getpid __NR_getxpid                /* for linux */
85275 +#endif
85276 +
85277 +#if !defined(_ASM) && !defined(__LANGUAGE_ASSEMBLY__)
85278 +
85279 +extern int     elan3_t_open (const char *, int, ...);
85280 +extern ssize_t elan3_t_write (int, const void *, unsigned);
85281 +extern ssize_t elan3_t_read(int, void *, unsigned);
85282 +extern int     elan3_t_ioctl(int, int, ...);
85283 +extern int     elan3_t_close(int);
85284 +extern off_t   elan3_t_lseek(int filedes, off_t offset, int whence);
85285 +
85286 +extern caddr_t elan3_t_mmap(caddr_t, size_t, int, int, int, off_t);
85287 +extern int     elan3_t_munmap(caddr_t, size_t);
85288 +
85289 +extern int     elan3_t_getpid(void);
85290 +extern void    elan3_t_abort(char *str);
85291 +
85292 +#endif /* !_ASM && ! __LANGUAGE_ASSEMBLY__ */
85293 +
85294 +#endif /* __ELAN3_SYSCALL_H */
85295 +
85296 +/*
85297 + * Local variables:
85298 + * c-file-style: "stroustrup"
85299 + * End:
85300 + */
85301 Index: linux-2.6.5-7.191/include/elan3/trtype.h
85302 ===================================================================
85303 --- linux-2.6.5-7.191.orig/include/elan3/trtype.h       2004-02-23 16:02:56.000000000 -0500
85304 +++ linux-2.6.5-7.191/include/elan3/trtype.h    2005-07-28 14:52:52.955662472 -0400
85305 @@ -0,0 +1,116 @@
85306 +/*
85307 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85308 + *
85309 + *    For licensing information please see the supplied COPYING file
85310 + *
85311 + */
85312 +
85313 +#ifndef _ELAN3_TRTYPE_H
85314 +#define _ELAN3_TRTYPE_H
85315 +
85316 +#ident "$Id: trtype.h,v 1.13 2002/08/09 11:23:34 addy Exp $"
85317 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/trtype.h,v $ */
85318 +
85319 +/*<15> ackNow  */
85320 +#define TR_SENDACK     (1 << 15)
85321 +
85322 +#define TR_SIZE_SHIFT  12
85323 +#define TR_SIZE_MASK   7
85324 +
85325 +/*<14:12> Size 0, 1, 2, 4, 8, 16, 32, 64  Double Words
85326 +          Bit 14 is forced to zero currently so that only size 0, 1, 2, 4 are
85327 +         allowed    */
85328 +
85329 +#define TR_SIZE0       (0 << TR_SIZE_SHIFT)
85330 +#define TR_SIZE1       (1 << TR_SIZE_SHIFT)
85331 +#define TR_SIZE2       (2 << TR_SIZE_SHIFT)
85332 +#define TR_SIZE4       (3 << TR_SIZE_SHIFT)
85333 +#define TR_SIZE8       (4 << TR_SIZE_SHIFT)
85334 +
85335 +#define TR_64_BIT_ADDR (1 << 11)
85336 +#define TR_LAST_TRANS  (1 << 10)
85337 +
85338 +#define TR_WRITEBLOCK_BIT      (1 << 9)
85339 +#define TR_WRITEBLOCK          (TR_WRITEBLOCK_BIT | TR_SIZE8)
85340 +
85341 +
85342 +#define TR_WRITEBLOCK_SIZE     64
85343 +
85344 +/*
85345 + * write-block
85346 + */
85347 +/*     WriteBlock      <8:7>   Data type
85348 +                       <6:0>   Part write size */
85349 +#define TR_TYPE_SHIFT  7
85350 +#define TR_TYPE_MASK   ((1 << 2) - 1)
85351 +
85352 +#define TR_TYPE_BYTE   0
85353 +#define TR_TYPE_SHORT  1
85354 +#define TR_TYPE_WORD   2
85355 +#define TR_TYPE_DWORD  3
85356 +
85357 +#define TR_PARTSIZE_MASK ((1 << 7) -1)
85358 +
85359 +#define TR_WAIT_FOR_EOP        (1 << 8)
85360 +
85361 +/*
85362 + * trace-route format 
85363 + */
85364 +#define TR_TRACEROUTE0_CHANID(val)             ((val) & 1)                     /* 0     Chan Id */
85365 +#define TR_TRACEROUTE0_LINKID(val)             (((val) >> 1) & 7)              /* 1:3   Link Id */
85366 +#define TR_TRACEROUTE0_REVID(val)              (((val) >> 4) & 7)              /* 4:6   Revision ID */
85367 +#define TR_TRACEROUTE0_BCAST_TOP_PIN(val)      (((val) >> 7) & 1)              /* 7     Broadcast Top Pin (REV B) */
85368 +#define TR_TRACEROUTE0_LNR(val)                        ((val) >> 8)                    /* 8:15  Global Link Not Ready */
85369 +
85370 +#define TR_TRACEROUTE1_PRIO(val)               ((val & 0xF))                   /* 0:3   Arrival Priority (REV A) */
85371 +#define TR_TRACEROUTE1_AGE(val)                        (((val) >> 4) & 0xF)            /* 4:7   Priority Held(Age) (REV A) */
85372 +#define TR_TRACEROUTE1_ROUTE_SELECTED(val)     ((val) & 0xFF)                  /* 0:7   Arrival age (REV B) */
85373 +#define TR_TRACEROUTE1_BCAST_TOP(val)          (((val) >> 8) & 7)              /* 8:10  Broadcast Top */
85374 +#define TR_TRACEROUTE1_ADAPT(val)              (((val) >> 12) & 3)             /* 12:13 This Adaptive Value (REV A) */
85375 +#define TR_TRACEROUTE1_BCAST_BOT(val)          (((val) >> 12) & 7)             /* 12:14 Broadcast Bottom (REV B) */
85376 +
85377 +#define TR_TRACEROUTE2_ARRIVAL_AGE(val)                ((val) & 0xF)                   /* 0:3   Arrival Age (REV B) */
85378 +#define TR_TRACEROUTE2_CURR_AGE(val)           (((val) >> 4) & 0xF)            /* 4:7   Current Age (REV B) */
85379 +#define TR_TRACEROUTE2_BUSY(val)               (((val) >> 8) & 0xFF)           /* 8:15  Busy (REV B) */
85380 +
85381 +#define TR_TRACEROUTE_SIZE     32
85382 +#define TR_TRACEROUTE_ENTRIES  (TR_TRACEROUTE_SIZE/2)
85383 +
85384 +/*
85385 + * non-write block
85386 + */
85387 +#define TR_OPCODE_MASK         (((1 << 8) - 1) |                       \
85388 +                                (TR_SIZE_MASK << TR_SIZE_SHIFT) |      \
85389 +                                TR_WRITEBLOCK_BIT)
85390 +
85391 +#define TR_NOP_TRANS           (0x0 | TR_SIZE0)
85392 +#define TR_SETEVENT            (0x0 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS)
85393 +#define TR_REMOTEDMA           (0x1 | TR_SIZE4 | TR_SENDACK | TR_LAST_TRANS)
85394 +#define TR_LOCKQUEUE           (0x2 | TR_SIZE0)
85395 +#define TR_UNLOCKQUEUE         (0x3 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS)
85396 +
85397 +#define TR_SENDDISCARD         (0x4 | TR_SIZE0)
85398 +#define TR_TRACEROUTE          (0x5 | TR_SIZE4)
85399 +
85400 +#define TR_DMAIDENTIFY         (0x6 | TR_SIZE0)
85401 +#define TR_THREADIDENTIFY      (0x7 | TR_SIZE1)
85402 +
85403 +#define TR_GTE                 (0x8 | TR_SIZE1)
85404 +#define TR_LT                  (0x9 | TR_SIZE1)
85405 +#define TR_EQ                  (0xA | TR_SIZE1)
85406 +#define TR_NEQ                 (0xB | TR_SIZE1)
85407 +
85408 +#define TR_WRITEWORD           (0xC | TR_SIZE1)
85409 +#define TR_WRITEDOUBLEWORD     (0xD | TR_SIZE1)
85410 +#define TR_TESTANDWRITE        (0xE | TR_SIZE1)
85411 +#define TR_ATOMICADDWORD       (0xF | TR_SIZE1 | TR_SENDACK | TR_LAST_TRANS)
85412 +#define TR_OPCODE_TYPE_MASK    0xff
85413 +
85414 +
85415 +#endif /* notdef _ELAN3_TRTYPE_H */
85416 +
85417 +/*
85418 + * Local variables:
85419 + * c-file-style: "stroustrup"
85420 + * End:
85421 + */
85422 Index: linux-2.6.5-7.191/include/elan3/urom_addrs.h
85423 ===================================================================
85424 --- linux-2.6.5-7.191.orig/include/elan3/urom_addrs.h   2004-02-23 16:02:56.000000000 -0500
85425 +++ linux-2.6.5-7.191/include/elan3/urom_addrs.h        2005-07-28 14:52:52.956662320 -0400
85426 @@ -0,0 +1,262 @@
85427 +#define MI_WaitForRemoteDescRead 0x0
85428 +#define MI_WaitForRemoteDescRead2 0x1
85429 +#define MI_WaitForRemoteDescRead2_seq1 0x2
85430 +#define MI_SendRemoteDmaRoutes 0x3
85431 +#define MI_IProcTrapped 0x4
85432 +#define MI_DProcTrapped 0x5
85433 +#define MI_CProcTrapped 0x6
85434 +#define MI_TProcTrapped 0x7
85435 +#define MI_TestWhichDmaQueue 0x8
85436 +#define MI_TestWhichDmaQueue_seq1 0x9
85437 +#define MI_InputRemoteDmaUpdateBPtr 0xa
85438 +#define MI_FixupQueueContextAndRemoteBit 0xb
85439 +#define MI_FixupQueueContextAndRemoteBit_seq1 0xc
85440 +#define MI_FixupQueueContextAndRemoteBit_seq2 0xd
85441 +#define MI_FixupQueueContextAndRemoteBit_seq3 0xe
85442 +#define MI_FixupQueueContextAndRemoteBit_seq4 0xf
85443 +#define MI_RunDmaCommand 0x10
85444 +#define MI_DoSendRemoteDmaDesc 0x11
85445 +#define MI_DequeueNonSysCntxDma 0x12
85446 +#define MI_WaitForRemoteDescRead1 0x13
85447 +#define MI_RemoteDmaCommand 0x14
85448 +#define MI_WaitForRemoteRoutes 0x15
85449 +#define MI_DequeueSysCntxDma 0x16
85450 +#define MI_ExecuteDmaDescriptorForQueue 0x17
85451 +#define MI_ExecuteDmaDescriptor1 0x18
85452 +#define MI_ExecuteDmaDescriptor1_seq1 0x19
85453 +#define MI_ExecuteDmaDescriptor1_seq2 0x1a
85454 +#define MI_ExecuteDmaDescriptor1_seq3 0x1b
85455 +#define MI_GetNewSizeInProg 0x1c
85456 +#define MI_GetNewSizeInProg_seq1 0x1d
85457 +#define MI_FirstBlockRead 0x1e
85458 +#define MI_ExtraFirstBlockRead 0x1f
85459 +#define MI_UnimplementedError 0x20
85460 +#define MI_UpdateDescriptor 0x21
85461 +#define MI_UpdateDescriptor_seq1 0x22
85462 +#define MI_UpdateDescriptor_seq2 0x23
85463 +#define MI_UpdateDescriptor_seq3 0x24
85464 +#define MI_UpdateDescriptor_seq4 0x25
85465 +#define MI_UpdateDescriptor_seq5 0x26
85466 +#define MI_GetNextSizeInProg 0x27
85467 +#define MI_DoStopThisDma 0x28
85468 +#define MI_DoStopThisDma_seq1 0x29
85469 +#define MI_GenNewBytesToRead 0x2a
85470 +#define MI_WaitForEventReadTy1 0x2b
85471 +#define MI_WaitUpdateEvent 0x2c
85472 +#define MI_WaitUpdateEvent_seq1 0x2d
85473 +#define MI_DoSleepOneTickThenRunable 0x2e
85474 +#define MI_RunEvent 0x2f
85475 +#define MI_EnqueueThread 0x30
85476 +#define MI_CheckContext0 0x31
85477 +#define MI_EnqueueDma 0x32
85478 +#define MI_CprocTrapping 0x33
85479 +#define MI_CprocTrapping_seq1 0x34
85480 +#define MI_WaitForRemoteRoutes1 0x35
85481 +#define MI_SetEventCommand 0x36
85482 +#define MI_DoSetEvent 0x37
85483 +#define MI_DoRemoteSetEventNowOrTrapQueueingDma 0x38
85484 +#define MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1 0x39
85485 +#define MI_SendRemoteDmaRoutes2 0x3a
85486 +#define MI_WaitForRemoteRoutes2 0x3b
85487 +#define MI_WaitEventCommandTy0 0x3c
85488 +#define MI_DequeueNonSysCntxDma2 0x3d
85489 +#define MI_WaitEventCommandTy1 0x3e
85490 +#define MI_WaitEventCommandTy1_seq1 0x3f
85491 +#define MI_DequeueNonSysCntxThread 0x40
85492 +#define MI_DequeueSysCntxDma1 0x41
85493 +#define MI_DequeueSysCntxThread 0x42
85494 +#define MI_TestNonSysCntxDmaQueueEmpty 0x43
85495 +#define MI_TestNonSysCntxDmaQueueEmpty_seq1 0x44
85496 +#define MI_TestNonSysCntxDmaQueueEmpty_seq2 0x45
85497 +#define MI_RunThreadCommand 0x46
85498 +#define MI_SetEventWaitForLastAcess 0x47
85499 +#define MI_SetEventReadWait 0x48
85500 +#define MI_SetEventReadWait_seq1 0x49
85501 +#define MI_TestEventType 0x4a
85502 +#define MI_TestEventType_seq1 0x4b
85503 +#define MI_TestEventBit2 0x4c
85504 +#define MI_DmaDescOrBlockCopyOrChainedEvent 0x4d
85505 +#define MI_RunThread 0x4e
85506 +#define MI_RunThread1 0x4f
85507 +#define MI_RunThread1_seq1 0x50
85508 +#define MI_IncDmaSysCntxBPtr 0x51
85509 +#define MI_IncDmaSysCntxBPtr_seq1 0x52
85510 +#define MI_IncDmaSysCntxBPtr_seq2 0x53
85511 +#define MI_WaitForCntxDmaDescRead 0x54
85512 +#define MI_FillInContext 0x55
85513 +#define MI_FillInContext_seq1 0x56
85514 +#define MI_WriteNewDescToQueue 0x57
85515 +#define MI_WriteNewDescToQueue_seq1 0x58
85516 +#define MI_TestForQueueWrap 0x59
85517 +#define MI_TestForQueueWrap_seq1 0x5a
85518 +#define MI_TestQueueIsFull 0x5b
85519 +#define MI_TestQueueIsFull_seq1 0x5c
85520 +#define MI_TestQueueIsFull_seq2 0x5d
85521 +#define MI_CheckPsychoShitFixup 0x5e
85522 +#define MI_PsychoShitFixupForcedRead 0x5f
85523 +#define MI_PrepareDMATimeSlice 0x60
85524 +#define MI_PrepareDMATimeSlice_seq1 0x61
85525 +#define MI_TProcRestartFromTrapOrTestEventBit2 0x62
85526 +#define MI_TProcRestartFromTrapOrTestEventBit2_seq1 0x63
85527 +#define MI_WaitForGlobalsRead 0x64
85528 +#define MI_WaitForNPCRead 0x65
85529 +#define MI_EventInterrupt 0x66
85530 +#define MI_EventInterrupt_seq1 0x67
85531 +#define MI_EventInterrupt_seq2 0x68
85532 +#define MI_EventInterrupt_seq3 0x69
85533 +#define MI_TestSysCntxDmaQueueEmpty 0x6a
85534 +#define MI_TestSysCntxDmaQueueEmpty_seq1 0x6b
85535 +#define MI_TestIfRemoteDesc 0x6c
85536 +#define MI_DoDmaLocalSetEvent 0x6d
85537 +#define MI_DoDmaLocalSetEvent_seq1 0x6e
85538 +#define MI_DoDmaLocalSetEvent_seq2 0x6f
85539 +#define MI_DmaLoop1 0x70
85540 +#define MI_ExitDmaLoop 0x71
85541 +#define MI_ExitDmaLoop_seq1 0x72
85542 +#define MI_RemoteDmaTestPAckType 0x73
85543 +#define MI_PacketDiscardOrTestFailRecIfCCis0 0x74
85544 +#define MI_PacketDiscardOrTestFailRecIfCCis0_seq1 0x75
85545 +#define MI_TestNackFailIsZero2 0x76
85546 +#define MI_TestNackFailIsZero3 0x77
85547 +#define MI_DmaFailCountError 0x78
85548 +#define MI_TestDmaForSysCntx 0x79
85549 +#define MI_TestDmaForSysCntx_seq1 0x7a
85550 +#define MI_TestDmaForSysCntx_seq2 0x7b
85551 +#define MI_TestAeqB2 0x7c
85552 +#define MI_TestAeqB2_seq1 0x7d
85553 +#define MI_GetNextDmaDescriptor 0x7e
85554 +#define MI_DequeueSysCntxDma2 0x7f
85555 +#define MI_InputSetEvent 0x80
85556 +#define MI_PutBackSysCntxDma 0x81
85557 +#define MI_PutBackSysCntxDma_seq1 0x82
85558 +#define MI_PutBackSysCntxDma_seq2 0x83
85559 +#define MI_InputRemoteDma 0x84
85560 +#define MI_InputRemoteDma_seq1 0x85
85561 +#define MI_WaitOneTickForWakeup1 0x86
85562 +#define MI_SendRemoteDmaDesc 0x87
85563 +#define MI_InputLockQueue 0x88
85564 +#define MI_CloseTheTrappedPacketIfCCis1 0x89
85565 +#define MI_CloseTheTrappedPacketIfCCis1_seq1 0x8a
85566 +#define MI_PostDmaInterrupt 0x8b
85567 +#define MI_InputUnLockQueue 0x8c
85568 +#define MI_WaitForUnLockDescRead 0x8d
85569 +#define MI_SendEOPforRemoteDma 0x8e
85570 +#define MI_LookAtRemoteAck 0x8f
85571 +#define MI_InputWriteBlockQueue 0x90
85572 +#define MI_WaitForSpStore 0x91
85573 +#define MI_TProcNext 0x92
85574 +#define MI_TProcStoppedRunning 0x93
85575 +#define MI_InputWriteBlock 0x94
85576 +#define MI_RunDmaOrDeqNonSysCntxDma 0x95
85577 +#define MI_ExecuteDmaDescriptorForRun 0x96
85578 +#define MI_ConfirmQueueLock 0x97
85579 +#define MI_DmaInputIdentify 0x98
85580 +#define MI_TProcStoppedRunning2 0x99
85581 +#define MI_TProcStoppedRunning2_seq1 0x9a
85582 +#define MI_TProcStoppedRunning2_seq2 0x9b
85583 +#define MI_ThreadInputIdentify 0x9c
85584 +#define MI_InputIdWriteAddrAndType3 0x9d
85585 +#define MI_IProcTrappedWriteStatus 0x9e
85586 +#define MI_FinishTrappingEop 0x9f
85587 +#define MI_InputTestTrans 0xa0
85588 +#define MI_TestAeqB3 0xa1
85589 +#define MI_ThreadUpdateNonSysCntxBack 0xa2
85590 +#define MI_ThreadQueueOverflow 0xa3
85591 +#define MI_RunContext0Thread 0xa4
85592 +#define MI_RunContext0Thread_seq1 0xa5
85593 +#define MI_RunContext0Thread_seq2 0xa6
85594 +#define MI_RunDmaDesc 0xa7
85595 +#define MI_RunDmaDesc_seq1 0xa8
85596 +#define MI_RunDmaDesc_seq2 0xa9
85597 +#define MI_TestAeqB 0xaa
85598 +#define MI_WaitForNonCntxDmaDescRead 0xab
85599 +#define MI_DmaQueueOverflow 0xac
85600 +#define MI_BlockCopyEvent 0xad
85601 +#define MI_BlockCopyEventReadBlock 0xae
85602 +#define MI_BlockCopyWaitForReadData 0xaf
85603 +#define MI_InputWriteWord 0xb0
85604 +#define MI_TraceSetEvents 0xb1
85605 +#define MI_TraceSetEvents_seq1 0xb2
85606 +#define MI_TraceSetEvents_seq2 0xb3
85607 +#define MI_InputWriteDoubleWd 0xb4
85608 +#define MI_SendLockTransIfCCis1 0xb5
85609 +#define MI_WaitForDmaRoutes1 0xb6
85610 +#define MI_LoadDmaContext 0xb7
85611 +#define MI_InputTestAndSetWord 0xb8
85612 +#define MI_InputTestAndSetWord_seq1 0xb9
85613 +#define MI_GetDestEventValue 0xba
85614 +#define MI_SendDmaIdentify 0xbb
85615 +#define MI_InputAtomicAddWord 0xbc
85616 +#define MI_LoadBFromTransD0 0xbd
85617 +#define MI_ConditionalWriteBackCCTrue 0xbe
85618 +#define MI_WaitOneTickForWakeup 0xbf
85619 +#define MI_SendFinalUnlockTrans 0xc0
85620 +#define MI_SendDmaEOP 0xc1
85621 +#define MI_GenLastAddrForPsycho 0xc2
85622 +#define MI_FailedAckIfCCis0 0xc3
85623 +#define MI_FailedAckIfCCis0_seq1 0xc4
85624 +#define MI_WriteDmaSysCntxDesc 0xc5
85625 +#define MI_TimesliceDmaQueueOverflow 0xc6
85626 +#define MI_DequeueNonSysCntxThread1 0xc7
85627 +#define MI_DequeueNonSysCntxThread1_seq1 0xc8
85628 +#define MI_TestThreadQueueEmpty 0xc9
85629 +#define MI_ClearThreadQueueIfCC 0xca
85630 +#define MI_DequeueSysCntxThread1 0xcb
85631 +#define MI_DequeueSysCntxThread1_seq1 0xcc
85632 +#define MI_TProcStartUpGeneric 0xcd
85633 +#define MI_WaitForPCload2 0xce
85634 +#define MI_WaitForNPCWrite 0xcf
85635 +#define MI_WaitForEventWaitAddr 0xd0
85636 +#define MI_WaitForWaitEventAccess 0xd1
85637 +#define MI_WaitForWaitEventAccess_seq1 0xd2
85638 +#define MI_WaitForWaitEventDesc 0xd3
85639 +#define MI_WaitForEventReadTy0 0xd4
85640 +#define MI_SendCondTestFail 0xd5
85641 +#define MI_InputMoveToNextTrans 0xd6
85642 +#define MI_ThreadUpdateSysCntxBack 0xd7
85643 +#define MI_FinishedSetEvent 0xd8
85644 +#define MI_EventIntUpdateBPtr 0xd9
85645 +#define MI_EventQueueOverflow 0xda
85646 +#define MI_MaskLowerSource 0xdb
85647 +#define MI_DmaLoop 0xdc
85648 +#define MI_SendNullSetEvent 0xdd
85649 +#define MI_SendFinalSetEvent 0xde
85650 +#define MI_TestNackFailIsZero1 0xdf
85651 +#define MI_DmaPacketTimedOutOrPacketError 0xe0
85652 +#define MI_NextPacketIsLast 0xe1
85653 +#define MI_TestForZeroLengthDma 0xe2
85654 +#define MI_WaitForPCload 0xe3
85655 +#define MI_ReadInIns 0xe4
85656 +#define MI_WaitForInsRead 0xe5
85657 +#define MI_WaitForLocals 0xe6
85658 +#define MI_WaitForOutsWrite 0xe7
85659 +#define MI_WaitForWaitEvWrBack 0xe8
85660 +#define MI_WaitForLockRead 0xe9
85661 +#define MI_TestQueueLock 0xea
85662 +#define MI_InputIdWriteAddrAndType 0xeb
85663 +#define MI_InputIdWriteAddrAndType2 0xec
85664 +#define MI_ThreadInputIdentify2 0xed
85665 +#define MI_WriteIntoTrapArea0 0xee
85666 +#define MI_GenQueueBlockWrAddr 0xef
85667 +#define MI_InputDiscardFreeLock 0xf0
85668 +#define MI_WriteIntoTrapArea1 0xf1
85669 +#define MI_WriteIntoTrapArea2 0xf2
85670 +#define MI_ResetBPtrToBase 0xf3
85671 +#define MI_InputDoTrap 0xf4
85672 +#define MI_RemoteDmaCntxt0Update 0xf5
85673 +#define MI_ClearQueueLock 0xf6
85674 +#define MI_IProcTrappedBlockWriteData 0xf7
85675 +#define MI_FillContextFilter 0xf8
85676 +#define MI_IProcTrapped4 0xf9
85677 +#define MI_RunSysCntxDma 0xfa
85678 +#define MI_ChainedEventError 0xfb
85679 +#define MI_InputTrappingEOP 0xfc
85680 +#define MI_CheckForRunIfZero 0xfd
85681 +#define MI_TestForBreakOrSuspend 0xfe
85682 +#define MI_SwapForRunable 0xff
85683 +
85684 +/*
85685 + * Local variables:
85686 + * c-file-style: "stroustrup"
85687 + * End:
85688 + */
85689 Index: linux-2.6.5-7.191/include/elan3/vmseg.h
85690 ===================================================================
85691 --- linux-2.6.5-7.191.orig/include/elan3/vmseg.h        2004-02-23 16:02:56.000000000 -0500
85692 +++ linux-2.6.5-7.191/include/elan3/vmseg.h     2005-07-28 14:52:52.956662320 -0400
85693 @@ -0,0 +1,75 @@
85694 +/*
85695 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85696 + *
85697 + *    For licensing information please see the supplied COPYING file
85698 + *
85699 + */
85700 +
85701 +#ifndef _VM_SEG_ELAN3_H
85702 +#define _VM_SEG_ELAN3_H
85703 +
85704 +#ident "$Id: vmseg.h,v 1.20 2003/09/24 13:57:24 david Exp $"
85705 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vmseg.h,v $*/
85706 +
85707 +#include <elan3/elanuregs.h>
85708 +
85709 +/*
85710 + * This segment maps Elan registers,  it is fixed size and has 8K 
85711 + * pages split up as follows
85712 + *
85713 + *    ----------------------------------------
85714 + *    |    Performance Counters (read-only)  |
85715 + *    ----------------------------------------
85716 + *    |    Flag Page (read-only)            |
85717 + *    ----------------------------------------
85718 + *    |    Command Port                             |
85719 + *    ----------------------------------------
85720 + */
85721 +typedef volatile struct elan3_flagstats 
85722 +{
85723 +    u_int      CommandFlag;
85724 +    u_int      PageFaults;
85725 +    u_int      CProcTraps;
85726 +    u_int      DProcTraps;
85727 +    u_int      TProcTraps;
85728 +    u_int      IProcTraps;
85729 +    u_int      EopBadAcks;
85730 +    u_int      EopResets;
85731 +    u_int      DmaNetworkErrors;
85732 +    u_int      DmaIdentifyNetworkErrors;
85733 +    u_int      ThreadIdentifyNetworkErrors;
85734 +    u_int      DmaRetries;
85735 +    u_int      ThreadSystemCalls;
85736 +    u_int      ThreadElanCalls;
85737 +    u_int      LoadVirtualProcess;
85738 +} ELAN3_FLAGSTATS;
85739 +
85740 +#ifdef DIGITAL_UNIX
85741 +typedef volatile union elan3_flagpage
85742 +{
85743 +    u_char        Padding[8192];
85744 +    ELAN3_FLAGSTATS Stats;
85745 +} ELAN3_FLAGPAGE;
85746 +
85747 +typedef volatile struct elan3_vmseg
85748 +{
85749 +    E3_CommandPort CommandPort;
85750 +    ELAN3_FLAGPAGE  FlagPage;
85751 +    E3_User_Regs   UserRegs;
85752 +} ELAN3_VMSEG;
85753 +
85754 +#define SEGELAN3_SIZE   (sizeof (ELAN3_VMSEG))
85755 +
85756 +#define SEGELAN3_COMMAND_PORT  0
85757 +#define SEGELAN3_FLAG_PAGE     1
85758 +#define SEGELAN3_PERF_COUNTERS 2
85759 +
85760 +#endif /* DIGITAL_UNIX */
85761 +
85762 +#endif /* _VM_SEG_ELAN3_H */
85763 +
85764 +/*
85765 + * Local variables:
85766 + * c-file-style: "stroustrup"
85767 + * End:
85768 + */
85769 Index: linux-2.6.5-7.191/include/elan3/vpd.h
85770 ===================================================================
85771 --- linux-2.6.5-7.191.orig/include/elan3/vpd.h  2004-02-23 16:02:56.000000000 -0500
85772 +++ linux-2.6.5-7.191/include/elan3/vpd.h       2005-07-28 14:52:52.957662168 -0400
85773 @@ -0,0 +1,47 @@
85774 +/*
85775 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85776 + *
85777 + *    For licensing information please see the supplied COPYING file
85778 + *
85779 + */
85780 +
85781 +#ident "$Id: vpd.h,v 1.5 2002/08/09 11:23:34 addy Exp $"
85782 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vpd.h,v $*/
85783 +
85784 +#ifndef __ELAN3_VPD_H
85785 +#define __ELAN3_VPD_H
85786 +
85787 +#define LARGE_RESOURCE_BIT                     0x80
85788 +
85789 +#define SMALL_RESOURCE_COMPATIBLE_DEVICE_ID    0x3
85790 +#define SMALL_RESOURCE_VENDOR_DEFINED          0xE
85791 +#define SMALL_RESOURCE_END_TAG                 0xF
85792 +
85793 +#define LARGE_RESOURCE_STRING                  0x2
85794 +#define LARGE_RESOURCE_VENDOR_DEFINED          0x4
85795 +#define LARGE_RESOURCE_VITAL_PRODUCT_DATA      0x10
85796 +
85797 +#define VPD_PART_NUMBER                        "PN"
85798 +#define VPD_FRU_PART_NUMBER            "FN"
85799 +#define VPD_EC_LEVEL                   "EC"
85800 +#define VPD_MANUFACTURE_ID             "MN"
85801 +#define VPD_SERIAL_NUMBER              "SN"
85802 +
85803 +#define VPD_LOAD_ID                    "LI"
85804 +#define VPD_ROM_LEVEL                  "RL"
85805 +#define VPD_ALTERABLE_ROM_LEVEL                "RM"
85806 +#define VPD_NETWORK_ADDRESS            "NA"
85807 +#define VPD_DEVICE_DRIVER_LEVEL                "DD"
85808 +#define VPD_DIAGNOSTIC_LEVEL           "DG"
85809 +#define VPD_LOADABLE_MICROCODE_LEVEL   "LL"
85810 +#define VPD_VENDOR_ID                  "VI"
85811 +#define VPD_FUNCTION_NUMBER            "FU"
85812 +#define VPD_SUBSYSTEM_VENDOR_ID                "SI"
85813 +
85814 +#endif /* __ELAN3_VPD_H */
85815 +
85816 +/*
85817 + * Local variables:
85818 + * c-file-style: "stroustrup"
85819 + * End:
85820 + */
85821 Index: linux-2.6.5-7.191/include/elan4/commands.h
85822 ===================================================================
85823 --- linux-2.6.5-7.191.orig/include/elan4/commands.h     2004-02-23 16:02:56.000000000 -0500
85824 +++ linux-2.6.5-7.191/include/elan4/commands.h  2005-07-28 14:52:52.957662168 -0400
85825 @@ -0,0 +1,247 @@
85826 +/*
85827 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
85828 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
85829 + *
85830 + *    For licensing information please see the supplied COPYING file
85831 + *
85832 + */
85833 +
85834 +#ifndef __ELAN4_COMMANDS_H
85835 +#define __ELAN4_COMMANDS_H
85836 +
85837 +#ident "$Id: commands.h,v 1.29 2004/06/16 15:45:02 addy Exp $"
85838 +/*      $Source: /cvs/master/quadrics/elan4hdr/commands.h,v $*/
85839 +
85840 +/*
85841 + * This header file describes the command format for the Elan 4
85842 + *    See CommandFormat.doc
85843 + */
85844 +
85845 +/*
85846 + * Number of channels in traced elanlib_trace.c
85847 + */
85848 +#define TRACE_MAX_CHANNELS     2
85849 +
85850 +/*
85851 + * Define encoding for the commands issued into the command queues
85852 + */
85853 +#define RUN_THREAD_CMD       0x00
85854 +#define OPEN_STEN_PKT_CMD    0x01
85855 +#define WRITE_DWORD_CMD      0x02
85856 +#define ADD_DWORD_CMD        0x03
85857 +#define COPY64_CMD           0x05
85858 +#define GUARD_CMD            0x06
85859 +#define SET_EVENT_CMD        0x07
85860 +#define SEND_TRANS_CMD       0x09
85861 +#define INTERRUPT_CMD        0x0d
85862 +#define RUN_DMA_CMD          0x0e
85863 +#define SET_EVENTN_CMD       0x0f
85864 +#define NOP_CMD                     0x17
85865 +#define MAKE_EXT_CLEAN_CMD   0x37
85866 +#define WAIT_EVENT_CMD       0x1f
85867 +
85868 +/*
85869 + * Define the portion of the data word the user is NOT
85870 + * allowed to use. This varies with Commmand type
85871 + */
85872 +#define RUN_THREAD_CMD_MASK       0x03
85873 +#define OPEN_STEN_PKT_CMD_MASK    0x0f
85874 +#define WRITE_DWORD_CMD_MASK      0x07
85875 +#define ADD_DWORD_CMD_MASK        0x07
85876 +#define COPY64_CMD_MASK           0x0f
85877 +#define GUARD_CMD_MASK            0x0f
85878 +#define SET_EVENT_CMD_MASK        0x1f
85879 +#define SEND_TRANS_CMD_MASK       0x1f
85880 +#define INTERRUPT_CMD_MASK        0x0f
85881 +#define RUN_DMA_CMD_MASK          0x0f
85882 +#define SET_EVENTN_CMD_MASK       0x1f
85883 +#define NOP_CMD_MASK             0x3f
85884 +#define MAKE_EXT_CLEAN_MASK      0x3f
85885 +#define WAIT_EVENT_CMD_MASK       0x1f
85886 +
85887 +#define COPY64_DATA_TYPE_SHIFT    0x4
85888 +#define COPY64_DTYPE_BYTE        (0 << COPY64_DATA_TYPE_SHIFT)
85889 +#define COPY64_DTYPE_SHORT       (1 << COPY64_DATA_TYPE_SHIFT)
85890 +#define COPY64_DTYPE_WORD        (2 << COPY64_DATA_TYPE_SHIFT)
85891 +#define COPY64_DTYPE_LONG        (3 << COPY64_DATA_TYPE_SHIFT)
85892 +
85893 +/*
85894 + * SET_EVENTN - word 1 has following form
85895 + * [63:5]      Event Address
85896 + * [4:0]       Part Set Value.
85897 + */
85898 +#define SET_EVENT_PART_SET_MASK      0x1f
85899 +
85900 +/* OPEN_STEN_PKT_CMD 
85901 + *   [63:32]   Vproc
85902 + *   [31]      Use Test
85903 + *   [30:28]   unused
85904 + *   [27:21]   Test Acceptable PAck code
85905 + *   [20:16]   Test Ack Channel Number
85906 + *   [15:9]    Acceptable PAck code
85907 + *   [8:4]     Ack Channel Number (1 bit on Elan4)
85908 + *   [3:0]     Command type
85909 + */
85910 +/* Acceptable PAck code */
85911 +#define PACK_OK                        (1 << 0)
85912 +#define PACK_TESTFAIL          (1 << 1)
85913 +#define PACK_DISCARD           (1 << 2)
85914 +#define RESTART_COUNT_ZERO     (1 << 3)
85915 +#define PACK_ERROR             (1 << 7)
85916 +#define PACK_TIMEOUT           (1 << 8)
85917 +
85918 +/*
85919 + *#ifndef USE_DIRTY_COMMANDS
85920 + *#define USE_DIRTY_COMMANDS
85921 + *#endif
85922 + */
85923 +#ifdef USE_DIRTY_COMMANDS
85924 +#define OPEN_PACKET_USED_MASK    0x00000000780f00e0ULL
85925 +#define SEND_TRANS_USED_MASK     0xffffffff0000fff0ULL
85926 +#define COPY64_WRITE_USED_MASK   0x000000000000000fULL
85927 +#define MAIN_INT_USED_MASK       0x0000000000003ff0ULL
85928 +#define GUARD_USED_MASK          0xfffffe007000fde0ULL
85929 +#define DMA_TYPESIZE_USED_MASK   0x000000000000fff0ULL
85930 +#define SETEVENTN_USED_MASK      0xffffffffffffffe0ULL
85931 +#define NOP_USED_MASK            0xffffffffffffffc0ULL
85932 +#define EXT_CLEAN_USED_MASK      0xffffffffffffffc0ULL
85933 +#define WAIT_CNT_TYPE_USED_MASK  0x00000000fffff800ULL
85934 +#else
85935 +#define OPEN_PACKET_USED_MASK    0x0ULL
85936 +#define SEND_TRANS_USED_MASK     0x0ULL
85937 +#define COPY64_WRITE_USED_MASK   0x0ULL
85938 +#define MAIN_INT_USED_MASK       0x0ULL
85939 +#define GUARD_USED_MASK          0x0ULL
85940 +#define DMA_TYPESIZE_USED_MASK   0x0ULL
85941 +#define SETEVENTN_USED_MASK      0x0ULL
85942 +#define NOP_USED_MASK            0x0ULL
85943 +#define EXT_CLEAN_USED_MASK      0x0ULL
85944 +#define WAIT_CNT_TYPE_USED_MASK  0x0ULL
85945 +#endif
85946 +
85947 +#define OPEN_PACKET(chan, code, vproc) \
85948 +       ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | OPEN_STEN_PKT_CMD)
85949 +
85950 +#define OPEN_PACKET_TEST(chan, code, vproc, tchan, tcode) \
85951 +       ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | \
85952 +        (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | (((E4_uint64) 1) << 31) | OPEN_STEN_PKT_CMD)
85953 +
85954 +/*
85955 + * GUARD_CMD
85956 + *   [63:41]   unused
85957 + *   [40]      Reset Restart Fail Count        // only performed if the Guard executes the next command.
85958 + *   [39:32]   New Restart Fail Count value
85959 + *   [31]      Use Test
85960 + *   [30:28]   unused
85961 + *   [27:21]   Test Acceptable PAck code
85962 + *   [20:16]   Test Ack Channel Number
85963 + *   [15:9]    unused
85964 + *   [8:4]     Ack Channel Number
85965 + *   [3:0]     Command type
85966 + */
85967 +/* GUARD_CHANNEL(chan)
85968 + */
85969 +#define GUARD_ALL_CHANNELS     ((1 << 9) | GUARD_CMD)
85970 +#define GUARD_CHANNEL(chan)    ((((chan) & 1) << 4) | GUARD_CMD)
85971 +#define GUARD_TEST(chan,code)  ((1ull << 31) | (((code) & 0x7f) << 21) | (((chan) & 1) << 16))
85972 +#define GUARD_RESET(count)     ((1ull << 40) | ((((E4_uint64) count) & 0xff) << 32))
85973 +
85974 +#define GUARD_CHANNEL_TEST(chan,tchan,tcode) \
85975 +       ((((chan) & 1) << 4) | (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | \
85976 +        (((E4_uint64) 1) << 31) | GUARD_CMD)
85977 +
85978 +/*
85979 + * SEND_TRANS_CMD
85980 + * [63:32]     unused
85981 + * [31:16]     transaction type
85982 + * [15:4]      unused
85983 + * [3:0]       Command type
85984 + */
85985 +#define SEND_TRANS(TransType)  (((TransType) << 16) | SEND_TRANS_CMD)
85986 +
85987 +/*
85988 + * Command port trace debug levels
85989 + */
85990 +#define TRACE_CMD_BUFFER       0x01
85991 +#define TRACE_CMD_TYPE         0x02
85992 +#define TRACE_CHANNEL_OPENS    0x04
85993 +#define TRACE_GUARDED_ATOMICS  0x08
85994 +#define TRACE_CMD_TIMEOUT      0x10
85995 +
85996 +/*
85997 + * Commands that should be preceeded by a GUARD_CMD.
85998 + */
85999 +#define IS_ATOMIC_CMD(cmd)                                                             \
86000 +   ((cmd) == RUN_THREAD_CMD || (cmd) == ADD_DWORD_CMD || (cmd) == INTERRUPT_CMD ||     \
86001 +    (cmd) == RUN_DMA_CMD    || (cmd) == SET_EVENT_CMD || (cmd) == SET_EVENTN_CMD ||    \
86002 +    (cmd) == WAIT_EVENT_CMD)
86003 +
86004 +#ifndef _ASM
86005 +
86006 +/*
86007 + * These structures are used to build event copy command streams. They are intended to be included
86008 + * in a larger structure to form a self documenting command sequence that can be easily coped and manipulated.
86009 + */
86010 +
86011 +typedef struct e4_runthreadcmd
86012 +{
86013 +   E4_Addr     PC;
86014 +   E4_uint64   r[6];
86015 +} E4_RunThreadCmd;
86016 +
86017 +typedef E4_uint64 E4_OpenCmd;
86018 +
86019 +typedef struct e4_writecmd
86020 +{
86021 +   E4_Addr     WriteAddr;
86022 +   E4_uint64   WriteValue;
86023 +} E4_WriteCmd;
86024 +
86025 +typedef struct e4_addcmd
86026 +{
86027 +   E4_Addr     AddAddr;
86028 +   E4_uint64   AddValue;
86029 +} E4_AddCmd;
86030 +
86031 +typedef struct e4_copycmd
86032 +{
86033 +   E4_Addr     SrcAddr;
86034 +   E4_Addr     DstAddr;
86035 +} E4_CopyCmd;
86036 +
86037 +typedef E4_uint64 E4_GaurdCmd;
86038 +typedef E4_uint64 E4_SetEventCmd;
86039 +
86040 +/*
86041 + * The data to this command must be declared as a vector after the use of this.
86042 + */
86043 +typedef struct e4_sendtranscmd
86044 +{
86045 +   E4_Addr     Type;
86046 +   E4_Addr     Addr;
86047 +} E4_SendTransCmd;
86048 +
86049 +typedef E4_uint64 E4_IntCmd;
86050 +
86051 +/* The normal Dma struc can be used here. */
86052 +
86053 +typedef struct e4_seteventncmd
86054 +{
86055 +   E4_Addr     Event;
86056 +   E4_Addr     SetCount;
86057 +} E4_SetEventNCmd;
86058 +
86059 +typedef E4_uint64 E4_NopCmd;
86060 +typedef E4_uint64 E4_MakeExtCleanCmd;
86061 +
86062 +typedef struct e4_waitcmd
86063 +{
86064 +   E4_Addr     ev_Event;
86065 +   E4_Addr     ev_CountType;
86066 +   E4_Addr     ev_Params[2];
86067 +} E4_WaitCmd;
86068 +
86069 +#endif /* _ASM */
86070 +
86071 +#endif /* __ELAN4_COMMANDS_H  */
86072 +
86073 Index: linux-2.6.5-7.191/include/elan4/debug.h
86074 ===================================================================
86075 --- linux-2.6.5-7.191.orig/include/elan4/debug.h        2004-02-23 16:02:56.000000000 -0500
86076 +++ linux-2.6.5-7.191/include/elan4/debug.h     2005-07-28 14:52:52.958662016 -0400
86077 @@ -0,0 +1,113 @@
86078 +/*
86079 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
86080 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86081 + * 
86082 + *    For licensing information please see the supplied COPYING file
86083 + *
86084 + */
86085 +
86086 +#ifndef _ELAN4_ELANDEBUG_H
86087 +#define _ELAN4_ELANDEBUG_H
86088 +
86089 +#ident "$Id: debug.h,v 1.19.6.1 2005/01/18 14:36:10 david Exp $"
86090 +/*      $Source: /cvs/master/quadrics/elan4mod/debug.h,v $ */
86091 +
86092 +/* values for "type" field - note a "ctxt" is permissible */
86093 +/* and BUFFER/CONSOLE are for explict calls to elan4_debugf() */
86094 +#define DBG_DEVICE     ((void *) 0)
86095 +#define DBG_USER       ((void *) 1)
86096 +
86097 +#define DBG_BUFFER     ((void *) 62)
86098 +#define DBG_CONSOLE    ((void *) 63)
86099 +#define DBG_NTYPES     64
86100 +
86101 +/* values for "mode" field */
86102 +#define DBG_CONFIG     0x00000001
86103 +#define DBG_INTR       0x00000002
86104 +#define DBG_MAININT    0x00000004
86105 +#define DBG_SDRAM      0x00000008
86106 +#define DBG_MMU                0x00000010
86107 +#define DBG_REGISTER   0x00000020
86108 +#define DBG_CQ         0x00000040
86109 +#define DBG_NETWORK_CTX        0x00000080
86110 +
86111 +#define DBG_FLUSH      0x00000100
86112 +#define DBG_FILE       0x00000200
86113 +#define DBG_CONTROL    0x00000400
86114 +#define DBG_MEM                0x00000800
86115 +
86116 +#define DBG_PERM       0x00001000
86117 +#define DBG_FAULT      0x00002000
86118 +#define DBG_SWAP       0x00004000
86119 +#define DBG_TRAP       0x00008000
86120 +#define DBG_DDCQ       0x00010000
86121 +#define DBG_VP         0x00020000
86122 +#define DBG_RESTART    0x00040000
86123 +#define DBG_RESUME     0x00080000
86124 +#define DBG_CPROC      0x00100000
86125 +#define DBG_DPROC      0x00200000
86126 +#define DBG_EPROC      0x00400000
86127 +#define DBG_IPROC      0x00800000
86128 +#define DBG_TPROC      0x01000000
86129 +#define DBG_IOPROC     0x02000000
86130 +#define DBG_ROUTE      0x04000000
86131 +#define DBG_NETERR     0x08000000
86132 +
86133 +#define DBG_ALL                0x7FFFFFFF
86134 +
86135 +
86136 +#ifdef DEBUG_PRINTF
86137 +
86138 +#  define PRINTF0(type,m,fmt)                  ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt)                  : (void)0)
86139 +#  define PRINTF1(type,m,fmt,a)                        ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a)                : (void)0)
86140 +#  define PRINTF2(type,m,fmt,a,b)              ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b)              : (void)0)
86141 +#  define PRINTF3(type,m,fmt,a,b,c)            ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c)            : (void)0)
86142 +#  define PRINTF4(type,m,fmt,a,b,c,d)          ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d)          : (void)0)
86143 +#  define PRINTF5(type,m,fmt,a,b,c,d,e)                ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e)        : (void)0)
86144 +#  define PRINTF6(type,m,fmt,a,b,c,d,e,f)      ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f)      : (void)0)
86145 +#  define PRINTF7(type,m,fmt,a,b,c,d,e,f,g)    ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g)    : (void)0)
86146 +#  define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h)  ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h)  : (void)0)
86147 +#  define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i)        ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h,i): (void)0)
86148 +#ifdef __GNUC__
86149 +#  define PRINTF(type,m,args...)               ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m, ##args)              : (void)0)
86150 +#endif
86151 +#  define DBGCMD(type,m,cmd)                   ((elan4_debug&(m) || (type) == DBG_CONSOLE)  ? (void) (cmd) : (void) 0)
86152 +
86153 +#else
86154 +
86155 +#  define PRINTF0(type,m,fmt)                  (0)
86156 +#  define PRINTF1(type,m,fmt,a)                        (0)
86157 +#  define PRINTF2(type,m,fmt,a,b)              (0)
86158 +#  define PRINTF3(type,m,fmt,a,b,c)            (0)
86159 +#  define PRINTF4(type,m,fmt,a,b,c,d)          (0)
86160 +#  define PRINTF5(type,m,fmt,a,b,c,d,e)                (0)
86161 +#  define PRINTF6(type,m,fmt,a,b,c,d,e,f)      (0)
86162 +#  define PRINTF7(type,m,fmt,a,b,c,d,e,f,g)    (0)
86163 +#  define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h)  (0)
86164 +#  define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i)        (0)
86165 +#ifdef __GNUC__
86166 +#  define PRINTF(type,m,args...)
86167 +#endif
86168 +#  define DBGCMD(type,m,cmd)                   ((void) 0)
86169 +
86170 +#endif /* DEBUG_PRINTF */
86171 +
86172 +extern unsigned   elan4_debug;
86173 +extern unsigned   elan4_debug_toconsole;
86174 +extern unsigned   elan4_debug_tobuffer;
86175 +extern unsigned   elan4_debug_display_ctxt;
86176 +extern unsigned   elan4_debug_ignore_ctxt;
86177 +extern unsigned   elan4_debug_ignore_type;
86178 +
86179 +extern void      elan4_debug_init(void);
86180 +extern void      elan4_debug_fini(void);
86181 +extern void       elan4_debugf (void *type, int mode, char *fmt, ...);
86182 +extern int        elan4_debug_snapshot (caddr_t ubuffer, int len);
86183 +extern int       elan4_debug_display (void);
86184 +
86185 +/*
86186 + * Local variables:
86187 + * c-file-style: "stroustrup"
86188 + * End:
86189 + */
86190 +#endif /* _ELAN4_ELANDEBUG_H */
86191 Index: linux-2.6.5-7.191/include/elan4/device.h
86192 ===================================================================
86193 --- linux-2.6.5-7.191.orig/include/elan4/device.h       2004-02-23 16:02:56.000000000 -0500
86194 +++ linux-2.6.5-7.191/include/elan4/device.h    2005-07-28 14:52:52.960661712 -0400
86195 @@ -0,0 +1,811 @@
86196 +/*
86197 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
86198 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
86199 + * 
86200 + *    For licensing information please see the supplied COPYING file
86201 + *
86202 + */
86203 +
86204 +#ifndef __ELAN4_ELANDEV_H
86205 +#define __ELAN4_ELANDEV_H
86206 +
86207 +#ident "$Id: device.h,v 1.68.2.12 2005/03/09 12:00:08 addy Exp $"
86208 +/*      $Source: /cvs/master/quadrics/elan4mod/device.h,v $ */
86209 +
86210 +#include <elan/devinfo.h>
86211 +#include <elan/capability.h>
86212 +
86213 +#include <elan4/pci.h>
86214 +#include <elan4/sdram.h>
86215 +#include <elan4/dma.h>
86216 +#include <elan4/events.h>
86217 +#include <elan4/registers.h>
86218 +
86219 +#include <elan4/mmu.h>
86220 +#include <elan4/trap.h>
86221 +#include <elan4/stats.h>
86222 +#include <elan4/neterr.h>
86223 +
86224 +#ifdef CONFIG_MPSAS
86225 +#include <elan4/mpsas.h>
86226 +#endif
86227 +
86228 +#if defined(LINUX)
86229 +#include <elan4/device_Linux.h>
86230 +#elif defined(TRU64UNIX)
86231 +#include <elan4/device_OSF1.h>
86232 +#elif defined(SOLARIS)
86233 +#include <elan4/device_SunOS.h>
86234 +#endif
86235 +
86236 +/*
86237 + * Network context number allocation.
86238 + * [0]          neterr fixup system context
86239 + * [1]          kernel comms system context
86240 + * [2048-4095] kernel comms data contexts
86241 + */
86242 +#define ELAN4_NETERR_CONTEXT_NUM       0x00                    /* network error fixup context number */
86243 +#define ELAN4_KCOMM_CONTEXT_NUM                0x01                    /* kernel comms context number */
86244 +#define ELAN4_KCOMM_BASE_CONTEXT_NUM   0x800                   /* kernel comms data transfer contexts */
86245 +#define ELAN4_KCOMM_TOP_CONTEXT_NUM    0xfff
86246 +
86247 +#define ELAN4_SYSTEM_CONTEXT(ctx)  ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM)
86248 +
86249 +typedef void (ELAN4_HALTFN)(struct elan4_dev *dev, void *arg);
86250 +
86251 +typedef struct elan4_haltop
86252 +{
86253 +    struct list_head    op_link;                               /* chain on a list */
86254 +    E4_uint32          op_mask;                                /* Interrupt mask to see before calling function */
86255 +    
86256 +    ELAN4_HALTFN       *op_function;                           /* function to call */
86257 +    void              *op_arg;                                 /* arguement to pass to function */
86258 +} ELAN4_HALTOP;
86259 +
86260 +typedef void (ELAN4_DMA_FLUSHFN)(struct elan4_dev *dev, void *arg, int qfull);
86261 +
86262 +typedef struct elan4_dma_flushop
86263 +{
86264 +    struct list_head    op_link;                               /* chain on a list */
86265 +    ELAN4_DMA_FLUSHFN  *op_function;                           /* function to call */
86266 +    void              *op_arg;                                 /* arguement to pass to function */
86267 +} ELAN4_DMA_FLUSHOP;
86268 +
86269 +typedef void (ELAN4_INTFN)(struct elan4_dev *dev, void *arg);
86270 +
86271 +typedef struct elan4_intop
86272 +{
86273 +    struct list_head    op_link;                               /* chain on a list */
86274 +    ELAN4_INTFN        *op_function;                           /* function to call */
86275 +    void              *op_arg;                                 /* arguement to pass to function */
86276 +    E4_uint64          op_cookie;                              /* and main interrupt cookie */
86277 +} ELAN4_INTOP;
86278 +
86279 +typedef struct elan4_eccerrs
86280 +{
86281 +    E4_uint64        EccStatus;
86282 +    E4_uint64        ConfigReg;
86283 +    E4_uint32        ErrorCount;
86284 +} ELAN4_ECCERRS;
86285 +
86286 +#define SDRAM_MIN_BLOCK_SHIFT  10
86287 +#define SDRAM_NUM_FREE_LISTS   19                                      /* allows max 256 Mb block */
86288 +#define SDRAM_MIN_BLOCK_SIZE   (1 << SDRAM_MIN_BLOCK_SHIFT)
86289 +#define SDRAM_MAX_BLOCK_SIZE   (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1))
86290 +
86291 +#if PAGE_SHIFT < 13
86292 +#define SDRAM_PAGE_SIZE                8192
86293 +#define SDRAM_PGOFF_OFFSET     1
86294 +#define SDRAM_PGOFF_MASK       (~SDRAM_PGOFF_OFFSET)
86295 +#else
86296 +#define SDRAM_PAGE_SIZE                PAGE_SIZE
86297 +#define SDRAM_PGOFF_OFFSET     0
86298 +#define SDRAM_PGOFF_MASK       (~SDRAM_PGOFF_OFFSET)
86299 +#endif
86300 +
86301 +typedef struct elan4_sdram
86302 +{
86303 +    sdramaddr_t        b_base;                                 /* offset in sdram bar */
86304 +    unsigned           b_size;                                 /* size of bank */
86305 +    ioaddr_t           b_ioaddr;                               /* ioaddr where mapped into the kernel */
86306 +    ELAN4_MAP_HANDLE   b_handle;                               /*    and mapping handle */
86307 +    bitmap_t          *b_bitmaps[SDRAM_NUM_FREE_LISTS];        /* buddy allocator bitmaps */
86308 +} ELAN4_SDRAM_BANK;
86309 +
86310 +/* command queue */
86311 +typedef struct elan4_cq 
86312 +{
86313 +    struct elan4_cqa    *cq_cqa;                                       /* command queue allocator this belongs to */
86314 +    unsigned            cq_idx;                                        /*  and which command queue this is */
86315 +
86316 +    sdramaddr_t                 cq_space;                                      /* sdram backing up command queue */
86317 +    unsigned            cq_size;                                       /* size value */
86318 +    unsigned            cq_perm;                                       /* permissions */
86319 +    ioaddr_t            cq_mapping;                                    /* mapping of command queue page */
86320 +    ELAN4_MAP_HANDLE    cq_handle;                                     /*    and mapping handle */
86321 +} ELAN4_CQ;
86322 +
86323 +/* cqtype flags to elan4_alloccq() */
86324 +#define CQ_Priority    (1 << 0)
86325 +#define CQ_Reorder     (1 << 1)
86326 +
86327 +/* command queues are allocated in chunks,so that all the
86328 + * command ports are in a single system page */
86329 +#define ELAN4_CQ_PER_CQA       MAX(1, (PAGESIZE/CQ_CommandMappingSize))
86330 +
86331 +/* maximum number of command queues per context */
86332 +#define ELAN4_MAX_CQA          (256 / ELAN4_CQ_PER_CQA)
86333 +
86334 +typedef struct elan4_cqa
86335 +{
86336 +    struct list_head   cqa_link;                                       /* linked together */
86337 +    bitmap_t           cqa_bitmap[BT_BITOUL(ELAN4_CQ_PER_CQA)];        /* bitmap of which are free */
86338 +    unsigned int        cqa_type;                                      /* allocation type */
86339 +    unsigned int       cqa_cqnum;                                      /* base cq number */
86340 +    unsigned int       cqa_ref;                                        /* "mappings" to a queue */
86341 +    unsigned int       cqa_idx;                                        /* index number */
86342 +    ELAN4_CQ           cqa_cq[ELAN4_CQ_PER_CQA];                       /* command queue entries */
86343 +} ELAN4_CQA;
86344 +
86345 +#define elan4_cq2num(cq)       ((cq)->cq_cqa->cqa_cqnum + (cq)->cq_idx)
86346 +#define elan4_cq2idx(cq)       ((cq)->cq_cqa->cqa_idx * ELAN4_CQ_PER_CQA + (cq)->cq_idx)
86347 +
86348 +typedef struct elan4_ctxt
86349 +{
86350 +    struct elan4_dev      *ctxt_dev;                                   /* device we're associated with */
86351 +    struct list_head       ctxt_link;                                  /* chained on device */
86352 +    
86353 +    struct elan4_trap_ops *ctxt_ops;                                   /* client specific operations */
86354 +
86355 +    signed                ctxt_num;                                    /* local context number */
86356 +
86357 +    struct list_head      ctxt_cqalist;                                /* link list of command queue allocators */
86358 +    bitmap_t              ctxt_cqamap[BT_BITOUL(ELAN4_MAX_CQA)];       /*   bitmap for allocating cqa_idx */
86359 +
86360 +    ELAN4_HASH_ENTRY     **ctxt_mmuhash[2];                            /* software hash tables */
86361 +    spinlock_t            ctxt_mmulock;                                /*   and spinlock. */
86362 +} ELAN4_CTXT;
86363 +
86364 +typedef struct elan4_trap_ops
86365 +{
86366 +    void       (*op_eproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status);
86367 +    void       (*op_cproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum);
86368 +    void       (*op_dproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit);
86369 +    void       (*op_tproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status);
86370 +    void       (*op_iproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit);
86371 +    void       (*op_interrupt)  (ELAN4_CTXT *ctxt, E4_uint64 cookie);
86372 +    void       (*op_neterrmsg)  (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg);
86373 +} ELAN4_TRAP_OPS;
86374 +
86375 +typedef struct elan4_route_table
86376 +{
86377 +    spinlock_t  tbl_lock;
86378 +    unsigned   tbl_size;
86379 +    sdramaddr_t tbl_entries;
86380 +} ELAN4_ROUTE_TABLE;
86381 +
86382 +#ifdef ELAN4_LARGE_PAGE_SUPPORT
86383 +#define NUM_HASH_TABLES                2
86384 +#else
86385 +#define NUM_HASH_TABLES                1
86386 +#endif
86387 +
86388 +#define DEV_STASH_ROUTE_COUNT 20
86389 +
86390 +typedef struct elan4_route_ringbuf {
86391 +    int start;
86392 +    int end;
86393 +    E4_VirtualProcessEntry routes[DEV_STASH_ROUTE_COUNT]; 
86394 +} ELAN4_ROUTE_RINGBUF;
86395 +
86396 +#define elan4_ringbuf_init(ringbuf) memset(&ringbuf, 0, sizeof(ELAN4_ROUTE_RINGBUF));
86397 +
86398 +typedef struct elan4_dev
86399 +{
86400 +    ELAN4_CTXT          dev_ctxt;                                      /* context for device operations */
86401 +
86402 +    ELAN4_DEV_OSDEP     dev_osdep;                                     /* OS specific entries */
86403 +
86404 +    int                         dev_instance;                                  /* device number */
86405 +    ELAN_DEVINFO        dev_devinfo;                                   /* device information (revision etc */
86406 +    ELAN_POSITION       dev_position;                                  /* position connected to switch */
86407 +    ELAN_DEV_IDX        dev_idx;                                       /* device idx registered with elanmod */
86408 +
86409 +    kmutex_t            dev_lock;                                      /* lock for device state/references */
86410 +    unsigned            dev_state;                                     /* device state */
86411 +    unsigned            dev_references;                                /*  # references */
86412 +
86413 +    ioaddr_t            dev_regs;                                      /* Mapping of device registers */
86414 +    ELAN4_MAP_HANDLE    dev_regs_handle;
86415 +    ioaddr_t            dev_rom;                                       /* Mapping of rom */
86416 +    ELAN4_MAP_HANDLE    dev_rom_handle;
86417 +    ioaddr_t            dev_i2c;                                       /* Mapping of I2C registers */
86418 +    ELAN4_MAP_HANDLE    dev_i2c_handle;
86419 +    
86420 +    E4_uint64           dev_sdram_cfg;                                 /* SDRAM config value (from ROM) */
86421 +    E4_uint64           dev_sdram_initial_ecc_val;                     /* power on ECC register value */
86422 +    int                         dev_sdram_numbanks;                            /* # banks of sdram */
86423 +    ELAN4_SDRAM_BANK    dev_sdram_banks[SDRAM_MAX_BANKS];              /* Mapping of sdram banks */
86424 +    spinlock_t          dev_sdram_lock;                                /* spinlock for buddy allocator */
86425 +    sdramaddr_t                 dev_sdram_freelists[SDRAM_NUM_FREE_LISTS];
86426 +    unsigned            dev_sdram_freecounts[SDRAM_NUM_FREE_LISTS];
86427 +
86428 +    sdramaddr_t                 dev_cacheflush_space;                          /* sdram reserved for cache flush operation */
86429 +
86430 +    sdramaddr_t                 dev_faultarea;                                 /* fault areas for each unit */
86431 +    sdramaddr_t                 dev_inputtraparea;                             /* trap area for trapped transactions */
86432 +    sdramaddr_t                 dev_ctxtable;                                  /* context table (E4_ContextControlBlock) */
86433 +    int                         dev_ctxtableshift;                             /* and size (in bits) */
86434 +
86435 +    E4_uint32           dev_syscontrol;                                /* copy of system control register */
86436 +    spinlock_t          dev_syscontrol_lock;                           /*   spinlock to sequentialise modifications */
86437 +    unsigned            dev_direct_map_pci_writes;                     /*   # counts for CONT_DIRECT_MAP_PCI_WRITES */
86438 +
86439 +    volatile E4_uint32  dev_intmask;                                   /* copy of interrupt mask register */
86440 +    spinlock_t          dev_intmask_lock;                              /*   spinlock to sequentialise modifications */
86441 +
86442 +    /* i2c section */
86443 +    spinlock_t          dev_i2c_lock;                                  /* spinlock for i2c operations */
86444 +    unsigned int         dev_i2c_led_disabled;                         /* count of reasons led auto update disabled */
86445 +
86446 +    /* mmu section */
86447 +    unsigned            dev_pagesizeval[NUM_HASH_TABLES];              /* page size value */
86448 +    unsigned            dev_pageshift[NUM_HASH_TABLES];                        /* pageshift in bits. */
86449 +    unsigned            dev_hashsize[NUM_HASH_TABLES];                 /* # entries in mmu hash table */
86450 +    sdramaddr_t                 dev_hashtable[NUM_HASH_TABLES];                /* mmu hash table */
86451 +    ELAN4_HASH_ENTRY   *dev_mmuhash[NUM_HASH_TABLES];                  /*   and software shadow */
86452 +    ELAN4_HASH_ENTRY   **dev_mmufree[NUM_HASH_TABLES];                 /*   and partially free blocks */
86453 +    ELAN4_HASH_ENTRY    *dev_mmufreelist;                              /*   and free blocks */
86454 +    spinlock_t           dev_mmulock;
86455 +    E4_uint16           dev_topaddr[4];                                /* top address values */
86456 +    unsigned char       dev_topaddrvalid;
86457 +    unsigned char       dev_topaddrmode;
86458 +    unsigned char       dev_pteval;                                    /* allow setting of relaxed order/dont snoop attributes */
86459 +
86460 +    unsigned            dev_rsvd_hashmask[NUM_HASH_TABLES];
86461 +    unsigned            dev_rsvd_hashval[NUM_HASH_TABLES];
86462 +
86463 +    /* run queues */
86464 +    sdramaddr_t                 dev_comqlowpri;                                /* CProc low & high pri run queues */
86465 +    sdramaddr_t                 dev_comqhighpri;
86466 +
86467 +    sdramaddr_t                 dev_dmaqlowpri;                                /* DProc,TProc,Interrupt queues */
86468 +    sdramaddr_t                 dev_dmaqhighpri;
86469 +    sdramaddr_t                 dev_threadqlowpri;
86470 +    sdramaddr_t                 dev_threadqhighpri;
86471 +    sdramaddr_t                 dev_interruptq;
86472 +
86473 +    E4_uint32           dev_interruptq_nfptr;                          /* cache next main interrupt fptr */
86474 +    struct list_head     dev_interruptq_list;                          /*   list of operations to call when space in interruptq*/
86475 +
86476 +    /* command queue section */
86477 +    sdramaddr_t                 dev_cqaddr;                                    /* SDRAM address of command queues */
86478 +    unsigned            dev_cqoffset;                                  /* offset for command queue alignment constraints */
86479 +    unsigned            dev_cqcount;                                   /* number of command queue descriptors */
86480 +    bitmap_t           *dev_cqamap;                                    /* bitmap for allocation */
86481 +    spinlock_t          dev_cqlock;                                    /* spinlock to protect bitmap */
86482 +    unsigned            dev_cqreorder;                                 /* offset for first re-ordering queue with mtrr */
86483 +
86484 +    /* halt operation section */
86485 +    struct list_head     dev_haltop_list;                              /* list of operations to call when units halted */
86486 +    E4_uint32           dev_haltop_mask;                               /* mask of which ones to halt */
86487 +    E4_uint32           dev_haltop_active;                             /* mask of which haltops are executing */
86488 +    spinlock_t          dev_haltop_lock;                               /*    and their spinlock */
86489 +
86490 +    struct {
86491 +       struct list_head list;                                          /* list of halt operations for DMAs */
86492 +       ELAN4_CQ        *cq;                                            /*   and command queue's */
86493 +       ELAN4_INTOP      intop;                                         /*   and main interrupt op */
86494 +       E4_uint64        status;                                        /*   status register (when waiting for intop)*/
86495 +    }                   dev_dma_flushop[2];
86496 +
86497 +    unsigned            dev_halt_all_count;                            /* count of reasons to halt all units */
86498 +    unsigned            dev_halt_lowpri_count;                         /* count of reasons to halt lowpri queues */
86499 +    unsigned            dev_halt_cproc_count;                          /* count of reasons to halt command processor */
86500 +    unsigned            dev_halt_dproc_count;                          /* count of reasons to halt dma processor */
86501 +    unsigned            dev_halt_tproc_count;                          /* count of reasons to halt thread processor */
86502 +    unsigned            dev_discard_all_count;                         /* count of reasons to discard all packets */
86503 +    unsigned            dev_discard_lowpri_count;                      /* count of reasons to discard non-system packets */
86504 +    unsigned            dev_discard_highpri_count;                     /* count of reasons to discard system packets */
86505 +
86506 +    E4_uint32           dev_schedstatus;                               /* copy of schedule status register */
86507 +
86508 +    /* local context allocation section */
86509 +    spinlock_t          dev_ctxlock;                                   /* spinlock to protect bitmap */
86510 +    bitmap_t           *dev_ctxmap;                                    /* bitmap for local context allocation */
86511 +
86512 +    spinlock_t          dev_ctxt_lock;                                 /* spinlock to protect context list */
86513 +    struct list_head     dev_ctxt_list;                                        /* linked list of contexts */
86514 +
86515 +    /* locks to sequentialise interrupt handling */
86516 +    spinlock_t          dev_trap_lock;                                 /* spinlock while handling a trap */
86517 +    spinlock_t          dev_requeue_lock;                              /* spinlock sequentialising cproc requeue */
86518 +
86519 +    /* error rate interrupt section */
86520 +    long                dev_error_time;                                /* lbolt at start of sampling period */
86521 +    unsigned            dev_errors_per_period;                         /* errors so far this sampling period */
86522 +    timer_fn_t          dev_error_timeoutid;                           /* timeout to re-enable error interrupts */
86523 +    timer_fn_t          dev_linkerr_timeoutid;                         /* timeout to clear link error led */
86524 +
86525 +    /* kernel threads */
86526 +    unsigned            dev_stop_threads:1;                            /* kernel threads should exit */
86527 +
86528 +    /* main interrupt thread */
86529 +    kcondvar_t          dev_mainint_wait;                              /* place for mainevent interrupt thread to sleep */
86530 +    spinlock_t          dev_mainint_lock;                              /*   and it's spinlock */
86531 +    unsigned            dev_mainint_started:1;
86532 +    unsigned            dev_mainint_stopped:1;
86533 +
86534 +    /* device context - this is used to flush insert cache/instruction cache/dmas & threads */
86535 +    ELAN4_CPROC_TRAP     dev_cproc_trap;                               /* space to extract cproc trap into */
86536 +
86537 +    struct list_head     dev_intop_list;                               /* list of main interrupt operations */
86538 +    spinlock_t          dev_intop_lock;                                /*   and spinlock */
86539 +    E4_uint64           dev_intop_cookie;                              /*   and next cookie to use */
86540 +
86541 +    spinlock_t          dev_flush_lock;                                /* spinlock for flushing */
86542 +    kcondvar_t          dev_flush_wait;                                /*  and place to sleep */
86543 +
86544 +    ELAN4_CQ           *dev_flush_cq[COMMAND_INSERTER_CACHE_ENTRIES];  /* command queues to flush the insert cache */
86545 +    ELAN4_INTOP          dev_flush_op[COMMAND_INSERTER_CACHE_ENTRIES]; /* and a main interrupt operation for each one */
86546 +    unsigned            dev_flush_finished;                            /* flush command finished */
86547 +
86548 +    ELAN4_HALTOP        dev_iflush_haltop;                             /* halt operation for icache flush */
86549 +    unsigned            dev_iflush_queued:1;                           /* icache haltop queued */
86550 +
86551 +    ELAN4_ROUTE_TABLE   *dev_routetable;                               /* virtual process table (for dma queue flush)*/
86552 +    sdramaddr_t          dev_sdrampages[2];                            /* pages of sdram to hold suspend code sequence */
86553 +    E4_Addr             dev_tproc_suspend;                             /*  st8suspend instruction */
86554 +    E4_Addr             dev_tproc_space;                               /*     and target memory */
86555 +
86556 +    sdramaddr_t                 dev_neterr_inputq;                             /* network error input queue descriptor & event */
86557 +    sdramaddr_t                 dev_neterr_slots;                              /* network error message slots */
86558 +    ELAN4_CQ           *dev_neterr_msgcq;                              /* command queue for sending messages */
86559 +    ELAN4_CQ           *dev_neterr_intcq;                              /* command queue for message received interrupt */
86560 +    ELAN4_INTOP                 dev_neterr_intop;                              /*   and it's main interrupt operation */
86561 +    E4_uint64           dev_neterr_queued;                             /* # message queued in msgcq */
86562 +    spinlock_t           dev_neterr_lock;                              /*   and spinlock .... */
86563 +
86564 +    ELAN4_DEV_STATS     dev_stats;                                     /* device statistics */
86565 +    ELAN4_ECCERRS       dev_sdramerrs[30];                             /* last few sdram errors for procfs */
86566 +
86567 +    unsigned int       *dev_ack_errors;                                /* Map of source of dproc ack errors */
86568 +    ELAN4_ROUTE_RINGBUF  dev_ack_error_routes;
86569 +    unsigned int        *dev_dproc_timeout;                             /* Ditto dproc timeout errors */
86570 +    ELAN4_ROUTE_RINGBUF  dev_dproc_timeout_routes;
86571 +    unsigned int        *dev_cproc_timeout;                             /* Ditto cproc timeout errors */
86572 +    ELAN4_ROUTE_RINGBUF  dev_cproc_timeout_routes;
86573 +
86574 +    unsigned            dev_linkerr_signalled;                         /* linkerror signalled to switch controller */
86575 +
86576 +    struct list_head     dev_hc_list;                                   /* list of the allocated hash_chunks */
86577 +
86578 +    ELAN4_IPROC_TRAP     dev_iproc_trap;                               /* space for iproc trap */
86579 +} ELAN4_DEV;
86580 +
86581 +/* values for dev_state */
86582 +#define ELAN4_STATE_STOPPED            (1 << 0)                        /* device initialised but not started */
86583 +#define ELAN4_STATE_STARTING           (1 << 1)                        /* device in process of starting */
86584 +#define ELAN4_STATE_STARTED            (1 << 2)                        /* device started */
86585 +#define ELAN4_STATE_STOPPING           (1 << 3)                        /* device in process of stopping */
86586 +
86587 +extern __inline__ unsigned long long
86588 +__elan4_readq (ELAN4_DEV *dev, ioaddr_t addr)
86589 +{
86590 +#if defined(__i386)
86591 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
86592 +    {
86593 +       uint64_t save, rval;
86594 +       unsigned long flags;
86595 +    
86596 +       local_irq_save (flags);
86597 +       asm volatile ("sfence\n" \
86598 +                     "movq %%xmm0, %0\n" \
86599 +                     "sfence\n" \
86600 +                     "movq (%2), %%xmm0\n" \
86601 +                     "sfence\n" \
86602 +                     "movq %%xmm0, %1\n"
86603 +                     "sfence\n"
86604 +                     "movq %0, %%xmm0\n"
86605 +                     "sfence\n"
86606 +                     : "=m" (save), "=m" (rval) : "r" (addr) : "memory");
86607 +       
86608 +       local_irq_restore(flags);
86609 +       
86610 +       return rval;
86611 +    }
86612 +#endif
86613 +    return readq ((void *)addr);
86614 +}
86615 +
86616 +extern __inline__ unsigned int
86617 +__elan4_readl (ELAN4_DEV *dev, ioaddr_t addr)
86618 +{
86619 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
86620 +    {
86621 +       uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7));
86622 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffffffff);
86623 +    }
86624 +    return readl ((void *)addr);
86625 +}
86626 +
86627 +extern __inline__ unsigned int
86628 +__elan4_readw (ELAN4_DEV *dev, ioaddr_t addr)
86629 +{
86630 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
86631 +    {
86632 +       uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7));
86633 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffff);
86634 +    }
86635 +    return readw ((void *)addr);
86636 +}
86637 +
86638 +extern __inline__ unsigned int
86639 +__elan4_readb (ELAN4_DEV *dev, ioaddr_t addr)
86640 +{
86641 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
86642 +    {
86643 +       uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7));
86644 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xff);
86645 +    }
86646 +    return readb ((void *)addr);
86647 +}
86648 +
86649 +/* macros for accessing dev->dev_regs.Tags. */
86650 +#define write_tag(dev,what,val)                writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Tags.what)))
86651 +#define read_tag(dev,what)             __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, Tags.what))
86652 +
86653 +/* macros for accessing dev->dev_regs.Regs. */
86654 +#define write_reg64(dev,what,val)      writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Regs.what)))
86655 +#define write_reg32(dev,what,val)      writel (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Regs.what)))
86656 +#define read_reg64(dev,what)           __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what))
86657 +#define read_reg32(dev,what)           __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what))
86658 +
86659 +/* macros for accessing dev->dev_regs.uRegs. */
86660 +#define write_ureg64(dev,what,val)     writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, uRegs.what)))
86661 +#define write_ureg32(dev,what,val)     writel (val, (void *) (dev->dev_regs + offsetof (E4_Registers, uRegs.what)))
86662 +#define read_ureg64(dev,what)          __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
86663 +#define read_ureg32(dev,what)          __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
86664 +
86665 +/* macros for accessing dev->dev_i2c */
86666 +#define write_i2c(dev,what,val)                writeb (val, (void *) (dev->dev_i2c + offsetof (E4_I2C, what)))
86667 +#define read_i2c(dev,what)             __elan4_readb (dev, dev->dev_i2c + offsetof (E4_I2C, what))
86668 +
86669 +/* macros for accessing dev->dev_rom */
86670 +#define read_ebus_rom(dev,off)         __elan4_readb (dev, dev->dev_rom + off)
86671 +
86672 +/* PIO flush operations - ensure writes to registers/sdram are ordered */
86673 +#ifdef CONFIG_IA64_SGI_SN2
86674 +#define pioflush_reg(dev)              read_reg32(dev,InterruptReg)
86675 +#define pioflush_sdram(dev)            elan4_sdram_readl(dev, 0)
86676 +#else
86677 +#define pioflush_reg(dev)              mb()
86678 +#define pioflush_sdram(dev)            mb()
86679 +#endif
86680 +
86681 +/* macros for manipulating the interrupt mask register */
86682 +#define SET_INT_MASK(dev,value)        \
86683 +do { \
86684 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask = (value)); \
86685 +    pioflush_reg(dev);\
86686 +} while (0)
86687 +
86688 +#define CHANGE_INT_MASK(dev, value) \
86689 +do { \
86690 +    if ((dev)->dev_intmask != (value)) \
86691 +    {\
86692 +       write_reg32 (dev, InterruptMask, (dev)->dev_intmask = (value));\
86693 +       pioflush_reg(dev);\
86694 +    }\
86695 +} while (0)
86696 +
86697 +#define ENABLE_INT_MASK(dev,value) \
86698 +do { \
86699 +    unsigned long flags; \
86700 + \
86701 +    spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \
86702 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask |= (value)); \
86703 +    pioflush_reg(dev);\
86704 +    spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \
86705 +} while (0)
86706 +
86707 +#define DISABLE_INT_MASK(dev,value) \
86708 +do { \
86709 +    unsigned long flags; \
86710 + \
86711 +    spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \
86712 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask &= ~(value)); \
86713 +    pioflush_reg(dev);\
86714 +    spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \
86715 +} while (0)
86716 +
86717 +#define SET_SYSCONTROL(dev,what,value) \
86718 +do { \
86719 +    unsigned long flags; \
86720 +\
86721 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
86722 +    if ((dev)->what++ == 0) \
86723 +        write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol |= (value)); \
86724 +    pioflush_reg(dev);\
86725 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
86726 +} while (0)
86727 +
86728 +#define CLEAR_SYSCONTROL(dev,what,value) \
86729 +do { \
86730 +    unsigned long flags; \
86731 +\
86732 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
86733 +    if (--(dev)->what == 0)\
86734 +       write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol &= ~(value)); \
86735 +    pioflush_reg (dev); \
86736 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
86737 +} while (0)
86738 +
86739 +#define PULSE_SYSCONTROL(dev,value) \
86740 +do { \
86741 +    unsigned long flags; \
86742 +\
86743 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
86744 +    write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol | (value)); \
86745 +    pioflush_reg (dev); \
86746 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
86747 +} while (0)
86748 +
86749 +#define CHANGE_SYSCONTROL(dev,add,sub) \
86750 +do { \
86751 +    unsigned long flags; \
86752 +\
86753 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
86754 +    dev->dev_syscontrol |= (add);\
86755 +    dev->dev_syscontrol &= ~(sub);\
86756 +    write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol);\
86757 +    pioflush_reg (dev); \
86758 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
86759 +} while (0)
86760 +
86761 +#define SET_SCHED_STATUS(dev, value)\
86762 +do {\
86763 +    write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\
86764 +    pioflush_reg (dev);\
86765 +} while (0)
86766 +
86767 +#define CHANGE_SCHED_STATUS(dev, value)\
86768 +do {\
86769 +    if ((dev)->dev_schedstatus != (value))\
86770 +    {\
86771 +       write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\
86772 +       pioflush_reg (dev);\
86773 +    }\
86774 +} while (0)
86775 +
86776 +#define PULSE_SCHED_RESTART(dev,value)\
86777 +do {\
86778 +    write_reg32 (dev, SchedStatus.Restart, value);\
86779 +    pioflush_reg (dev);\
86780 +} while (0)
86781 +
86782 +/* device context elan address space */
86783 +#define DEVICE_TPROC_SUSPEND_ADDR              (0x1000000000000000ull)
86784 +#define DEVICE_TPROC_SPACE_ADDR                        (0x1000000000000000ull + SDRAM_PAGE_SIZE)
86785 +#if defined(__LITTLE_ENDIAN__)
86786 +#  define DEVICE_TPROC_SUSPEND_INSTR           0xd3f040c0 /* st64suspend %r16, [%r1] */
86787 +#else
86788 +#  define DEVICE_TPROC_SUSPEND_INSTR           0xc040f0d3 /* st64suspend %r16, [%r1] */
86789 +#endif
86790 +
86791 +#define DEVICE_NETERR_INPUTQ_ADDR              (0x2000000000000000ull)
86792 +#define DEVICE_NETERR_INTCQ_ADDR               (0x2000000000000000ull + SDRAM_PAGE_SIZE)
86793 +#define DEVICE_NETERR_SLOTS_ADDR               (0x2000000000000000ull + SDRAM_PAGE_SIZE*2)
86794 +
86795 +/*
86796 + * Interrupt operation cookie space
86797 + * [50:48]     type
86798 + * [47:0]      value
86799 + */
86800 +#define INTOP_PERSISTENT                       (0x1000000000000ull)
86801 +#define INTOP_ONESHOT                          (0x2000000000000ull)
86802 +#define INTOP_TYPE_MASK                                (0x3000000000000ull)
86803 +#define INTOP_VALUE_MASK                       (0x0ffffffffffffull)
86804 +
86805 +/* functions for accessing sdram - sdram.c */
86806 +extern unsigned char      elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t ptr);
86807 +extern unsigned short     elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t ptr);
86808 +extern unsigned int       elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t ptr);
86809 +extern unsigned long long elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t ptr);
86810 +extern void               elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned char val);
86811 +extern void               elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned short val);
86812 +extern void               elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned int val);
86813 +extern void               elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned long long val);
86814 +
86815 +extern void              elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
86816 +extern void              elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
86817 +extern void              elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
86818 +extern void              elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
86819 +
86820 +extern void               elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
86821 +extern void               elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
86822 +extern void               elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
86823 +extern void               elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
86824 +extern void               elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
86825 +extern void               elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
86826 +extern void               elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
86827 +extern void               elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
86828 +
86829 +/* device.c - configuration */
86830 +extern unsigned int elan4_hash_0_size_val;
86831 +extern unsigned int elan4_hash_1_size_val;
86832 +extern unsigned int elan4_ctxt_table_shift;
86833 +extern unsigned int elan4_ln2_max_cqs;
86834 +extern unsigned int elan4_dmaq_highpri_size;
86835 +extern unsigned int elan4_threadq_highpri_size;
86836 +extern unsigned int elan4_dmaq_lowpri_size;
86837 +extern unsigned int elan4_threadq_lowpri_size;
86838 +extern unsigned int elan4_interruptq_size;
86839 +extern unsigned int elan4_mainint_punt_loops;
86840 +extern unsigned int elan4_mainint_resched_ticks;
86841 +extern unsigned int elan4_linkport_lock;
86842 +extern unsigned int elan4_eccerr_recheck;
86843 +
86844 +/* device.c */
86845 +extern void               elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg);
86846 +extern void               elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op);
86847 +extern void              elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op);
86848 +extern void              elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op);
86849 +extern void              elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op);
86850 +extern void              elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri);
86851 +extern void              elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op);
86852 +
86853 +extern int                elan4_1msi0 (ELAN4_DEV *dev);
86854 +
86855 +extern int                elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops);
86856 +extern void               elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt);
86857 +extern ELAN4_CTXT        *elan4_localctxt (ELAN4_DEV *dev, unsigned num);
86858 +extern ELAN4_CTXT        *elan4_networkctxt (ELAN4_DEV *dev, unsigned num);
86859 +
86860 +extern int                elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum);
86861 +extern void               elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum);
86862 +extern void              elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state);
86863 +extern void              elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl);
86864 +
86865 +extern ELAN4_CQA *        elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx);
86866 +extern void               elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx);
86867 +extern ELAN4_CQ          *elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned cqperm, unsigned cqtype);
86868 +extern void               elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq);
86869 +extern void               elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq);
86870 +extern void               elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq);
86871 +extern void               elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart);
86872 +
86873 +extern void              elan4_flush_icache (ELAN4_CTXT *ctxt);
86874 +extern void              elan4_flush_icache_halted (ELAN4_CTXT *ctxt);
86875 +
86876 +extern int                elan4_initialise_device (ELAN4_DEV *dev);
86877 +extern void               elan4_finalise_device (ELAN4_DEV *dev);
86878 +extern int                elan4_start_device (ELAN4_DEV *dev);
86879 +extern void               elan4_stop_device (ELAN4_DEV *dev);
86880 +
86881 +extern int               elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned aritiyval);
86882 +extern int               elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos);
86883 +extern int               elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos);
86884 +extern void              elan4_get_params   (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask);
86885 +extern void              elan4_set_params   (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask);
86886 +
86887 +
86888 +extern int                elan4_read_vpd(ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) ;
86889 +
86890 +
86891 +/* device_osdep.c */
86892 +extern unsigned int      elan4_pll_cfg;
86893 +extern int               elan4_pll_div;
86894 +extern int               elan4_mod45disable;
86895 +
86896 +extern int                elan4_pciinit (ELAN4_DEV *dev);
86897 +extern void               elan4_pcifini (ELAN4_DEV *dev);
86898 +extern void              elan4_updatepll (ELAN4_DEV *dev, unsigned int val);
86899 +extern void               elan4_pcierror (ELAN4_DEV *dev);
86900 +
86901 +extern ELAN4_DEV        *elan4_reference_device (int instance, int state);
86902 +extern void              elan4_dereference_device (ELAN4_DEV *dev);
86903 +
86904 +extern ioaddr_t           elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handlep);
86905 +extern void               elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handlep);
86906 +extern unsigned long      elan4_resource_len (ELAN4_DEV *dev, unsigned bar);
86907 +
86908 +extern void               elan4_configure_writecombining (ELAN4_DEV *dev);
86909 +extern void              elan4_unconfigure_writecombining (ELAN4_DEV *dev);
86910 +
86911 +/* i2c.c */
86912 +extern int               i2c_disable_auto_led_update (ELAN4_DEV *dev);
86913 +extern void              i2c_enable_auto_led_update (ELAN4_DEV *dev);
86914 +extern int               i2c_write (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
86915 +extern int               i2c_read (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
86916 +extern int               i2c_writereg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data);
86917 +extern int               i2c_readreg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data);
86918 +extern int               i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
86919 +
86920 +#if defined(__linux__)
86921 +/* procfs_Linux.c */
86922 +extern void              elan4_procfs_device_init (ELAN4_DEV *dev);
86923 +extern void              elan4_procfs_device_fini (ELAN4_DEV *dev);
86924 +extern void              elan4_procfs_init(void);
86925 +extern void              elan4_procfs_fini(void);
86926 +
86927 +extern struct proc_dir_entry *elan4_procfs_root;
86928 +extern struct proc_dir_entry *elan4_config_root;
86929 +#endif
86930 +
86931 +/* sdram.c */
86932 +extern void              elan4_sdram_init (ELAN4_DEV *dev);
86933 +extern void               elan4_sdram_fini (ELAN4_DEV *dev);
86934 +extern void               elan4_sdram_setup_delay_lines (ELAN4_DEV *dev, int factor);
86935 +extern int                elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
86936 +extern void               elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
86937 +extern void              elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
86938 +extern sdramaddr_t        elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes);
86939 +extern void               elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
86940 +extern void               elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t base, int nbytes);
86941 +extern char              *elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, E4_uint64 ConfigReg, char *str);
86942 +
86943 +/* traps.c */
86944 +extern void               elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap);
86945 +extern void               elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap);
86946 +extern void               elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap);
86947 +extern void               elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap);
86948 +extern void               elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap);
86949 +
86950 +
86951 +extern void               elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent);
86952 +extern void               elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum);
86953 +extern void               elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit);
86954 +extern void               elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap);
86955 +extern void               elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit);
86956 +extern void elan4_ringbuf_store(ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev);
86957 +extern int                cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq, int chan);
86958 +
86959 +extern void               elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap);
86960 +extern E4_uint64          elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq);
86961 +
86962 +/* mmu.c */
86963 +extern void               elan4mmu_flush_tlb (ELAN4_DEV *dev);
86964 +extern ELAN4_HASH_ENTRY  *elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp);
86965 +extern int                elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, E4_uint64 pte);
86966 +extern void               elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len);
86967 +extern void               elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt);
86968 +
86969 +extern ELAN4_HASH_CACHE  *elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep);
86970 +extern void               elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc);
86971 +extern void               elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte);
86972 +extern E4_uint64          elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx);
86973 +extern void               elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx);
86974 +
86975 +/* mmu_osdep.c */
86976 +extern int               elan4mmu_categorise_paddr (ELAN4_DEV *dev, physaddr_t *physp);
86977 +extern int                elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type);
86978 +extern E4_uint64          elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t paddr, unsigned perm);
86979 +extern physaddr_t        elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte);
86980 +
86981 +/* neterr.c */
86982 +extern int                elan4_neterr_init (ELAN4_DEV *dev);
86983 +extern void               elan4_neterr_destroy (ELAN4_DEV *dev);
86984 +extern int                elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg);
86985 +extern int                elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap);
86986 +
86987 +/* routetable.c */
86988 +extern ELAN4_ROUTE_TABLE *elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size);
86989 +extern void               elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl);
86990 +extern void               elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry);
86991 +extern void               elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry);
86992 +extern void               elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp);
86993 +extern int                elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctxnum,
86994 +                                               unsigned lowid, unsigned highid, unsigned options);
86995 +extern int               elan4_check_route (ELAN_POSITION *pos, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags);
86996 +
86997 +/* user.c */
86998 +extern int        __categorise_command (E4_uint64 command, int *cmdSize);
86999 +extern int        __whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize);
87000 +
87001 +/*
87002 + * Local variables:
87003 + * c-file-style: "stroustrup"
87004 + * End:
87005 + */
87006 +#endif /* __ELAN4_ELANDEV_H */
87007 Index: linux-2.6.5-7.191/include/elan4/device_Linux.h
87008 ===================================================================
87009 --- linux-2.6.5-7.191.orig/include/elan4/device_Linux.h 2004-02-23 16:02:56.000000000 -0500
87010 +++ linux-2.6.5-7.191/include/elan4/device_Linux.h      2005-07-28 14:52:52.960661712 -0400
87011 @@ -0,0 +1,117 @@
87012 +/*
87013 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
87014 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87015 + * 
87016 + *    For licensing information please see the supplied COPYING file
87017 + *
87018 + */
87019 +
87020 +#ifndef __ELAN4_ELANDEV_LINUX_H
87021 +#define __ELAN4_ELANDEV_LINUX_H
87022 +
87023 +#ident "$Id: device_Linux.h,v 1.19.2.1 2005/03/07 16:29:06 david Exp $"
87024 +/*      $Source: /cvs/master/quadrics/elan4mod/device_Linux.h,v $*/
87025 +
87026 +#include <qsnet/autoconf.h>
87027 +
87028 +#if !defined(NO_COPROC)                                /* The older coproc kernel patch is applied */
87029 +#include <linux/coproc.h>
87030 +
87031 +#define ioproc_ops             coproc_ops_struct
87032 +#define ioproc_register_ops    register_coproc_ops
87033 +#define ioproc_unregister_ops  unregister_coproc_ops
87034 +
87035 +#define IOPROC_MM_STRUCT_ARG   1
87036 +#define IOPROC_PATCH_APPLIED   1
87037 +
87038 +#elif !defined(NO_IOPROC)                      /* The new ioproc kernel patch is applied */
87039 +#include <linux/ioproc.h>
87040 +
87041 +#define IOPROC_PATCH_APPLIED   1
87042 +#endif
87043 +
87044 +
87045 +#if defined(MPSAS)
87046 +#include <elan4/mpsas.h>
87047 +#endif
87048 +
87049 +#if defined(CONFIG_DEVFS_FS)
87050 +#include <linux/devfs_fs_kernel.h>
87051 +#endif
87052 +
87053 +#define ELAN4_MAJOR              61
87054 +#define ELAN4_NAME               "elan4"
87055 +#define ELAN4_MAX_CONTROLLER     16           /* limited to 4 bits */
87056
87057 +/* OS dependant component of ELAN4_DEV struct */
87058 +typedef struct elan4_dev_osdep
87059 +{
87060 +    struct pci_dev       *pdev;                        /* PCI config data */
87061 +
87062 +    struct proc_dir_entry *procdir;
87063 +    struct proc_dir_entry *configdir;
87064 +    struct proc_dir_entry *statsdir;
87065 +
87066 +#if defined(CONFIG_DEVFS_FS)
87067 +    devfs_handle_t devfs_control;
87068 +    devfs_handle_t devfs_sdram;
87069 +    devfs_handle_t devfs_user;
87070 +#endif
87071 +
87072 +#if defined(CONFIG_MTRR)
87073 +    int                           sdram_mtrr;
87074 +    int                           regs_mtrr;
87075 +#endif
87076 +} ELAN4_DEV_OSDEP;
87077 +
87078 +/* /dev/elan/rmsX */
87079 +
87080 +/* /dev/elan4/controlX */
87081 +typedef struct control_private
87082 +{
87083 +    struct elan4_dev   *pr_dev;
87084 +    unsigned           pr_boundary_scan;
87085 +} CONTROL_PRIVATE;
87086 +
87087 +/* /dev/elan4/sdramX */
87088 +typedef struct mem_page
87089 +{
87090 +    struct mem_page *pg_next;
87091 +    sdramaddr_t      pg_addr;
87092 +    unsigned long    pg_pgoff;
87093 +    unsigned        pg_ref;
87094 +} MEM_PAGE;
87095 +
87096 +#define MEM_HASH_SIZE  32
87097 +#define MEM_HASH(pgoff)        ((pgoff) & (MEM_HASH_SIZE-1))
87098 +
87099 +typedef struct mem_private
87100 +{
87101 +    struct elan4_dev *pr_dev;
87102 +    MEM_PAGE         *pr_pages[MEM_HASH_SIZE];
87103 +    spinlock_t        pr_lock;
87104 +} MEM_PRIVATE;
87105 +
87106 +/* /dev/elan4/userX */
87107 +typedef struct user_private
87108 +{
87109 +    atomic_t         pr_ref;
87110 +    struct user_ctxt *pr_uctx;
87111 +    struct mm_struct *pr_mm;
87112 +
87113 +#if defined(IOPROC_PATCH_APPLIED)
87114 +    struct ioproc_ops pr_ioproc;
87115 +#endif
87116 +} USER_PRIVATE;
87117 +
87118 +/* No mapping handles on linux */
87119 +typedef void *ELAN4_MAP_HANDLE;
87120 +
87121 +#define ELAN4_TASK_HANDLE()    ((unsigned long) current->mm)
87122 +
87123 +/*
87124 + * Local variables:
87125 + * c-file-style: "stroustrup"
87126 + * End:
87127 + */
87128 +#endif /* __ELAN4_ELANDEV_LINUX_H */
87129 Index: linux-2.6.5-7.191/include/elan4/dma.h
87130 ===================================================================
87131 --- linux-2.6.5-7.191.orig/include/elan4/dma.h  2004-02-23 16:02:56.000000000 -0500
87132 +++ linux-2.6.5-7.191/include/elan4/dma.h       2005-07-28 14:52:52.960661712 -0400
87133 @@ -0,0 +1,82 @@
87134 +/*
87135 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87136 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87137 + *
87138 + *    For licensing information please see the supplied COPYING file
87139 + *
87140 + */
87141 +
87142 +#ifndef __ELAN4_DMA_H
87143 +#define __ELAN4_DMA_H
87144 +
87145 +#ident "$Id: dma.h,v 1.16 2003/09/04 12:39:17 david Exp $"
87146 +/*      $Source: /cvs/master/quadrics/elan4hdr/dma.h,v $*/
87147 +
87148 +#include <elan4/types.h>
87149 +
87150 +/* Alignment for a DMA descriptor */
87151 +#define E4_DMA_ALIGN           (64)
87152 +
87153 +/* Maximum size of a single DMA ((1 << 31)-1) */
87154 +#define E4_MAX_DMA_SIZE                (0x7fffffff)
87155 +
87156 +/* 
87157 + * dma_typeSize
87158 + *
87159 + * [63:32]     Size
87160 + * [31]                unused
87161 + * [30]                IsRemote
87162 + * [29]                QueueWrite
87163 + * [28]                ShmemWrite
87164 + * [27:26]     DataType
87165 + * [25]                Broadcast
87166 + * [24]                AlignPackets
87167 + * [23:16]     FailCount
87168 + * [15:14]     unused
87169 + * [13:0]      Context
87170 + */
87171 +
87172 +#define DMA_FailCount(val)     (((val) & 0xff) << 16)
87173 +#define DMA_AlignPackets       (1 << 24)
87174 +#define DMA_Broadcast          (1 << 25)
87175 +#define DMA_ShMemWrite         (1 << 28)
87176 +#define DMA_QueueWrite         (1 << 29)
87177 +#define DMA_IsRemote           (1 << 30)
87178 +#define DMA_Context(val)       ((unsigned) (val) & 0x3ff)
87179 +#define DMA_ContextMask                0x3fffull
87180 +#define Dma_TypeSizeMask       0xfffffffffff00000ull
87181 +
87182 +#define DMA_DataTypeByte       (E4_DATATYPE_BYTE  << 26)
87183 +#define DMA_DataTypeShort      (E4_DATATYPE_SHORT << 26)
87184 +#define DMA_DataTypeWord       (E4_DATATYPE_WORD  << 26)
87185 +#define DMA_DataTypeLong       (E4_DATATYPE_DWORD << 26)
87186 +
87187 +#define E4_DMA_TYPE_SIZE(size, dataType, flags, failCount)     \
87188 +    ((((E4_uint64)(size)) << 32) |  ((dataType) & DMA_DataTypeLong) | \
87189 +     (flags) | DMA_FailCount(failCount))
87190 +
87191 +typedef volatile struct e4_dma
87192 +{
87193 +    E4_uint64          dma_typeSize;
87194 +    E4_uint64          dma_cookie;
87195 +    E4_uint64          dma_vproc;
87196 +    E4_Addr            dma_srcAddr;
87197 +    E4_Addr            dma_dstAddr;
87198 +    E4_Addr            dma_srcEvent;
87199 +    E4_Addr            dma_dstEvent;
87200 +} E4_DMA;
87201 +
87202 +/* Same as above but padded to 64-bytes */
87203 +typedef volatile struct e4_dma64
87204 +{
87205 +    E4_uint64          dma_typeSize;
87206 +    E4_uint64          dma_cookie;
87207 +    E4_uint64          dma_vproc;
87208 +    E4_Addr            dma_srcAddr;
87209 +    E4_Addr            dma_dstAddr;
87210 +    E4_Addr            dma_srcEvent;
87211 +    E4_Addr            dma_dstEvent;
87212 +    E4_Addr            dma_pad;
87213 +} E4_DMA64;
87214 +
87215 +#endif /* __ELAN4_DMA_H */
87216 Index: linux-2.6.5-7.191/include/elan4/events.h
87217 ===================================================================
87218 --- linux-2.6.5-7.191.orig/include/elan4/events.h       2004-02-23 16:02:56.000000000 -0500
87219 +++ linux-2.6.5-7.191/include/elan4/events.h    2005-07-28 14:52:52.961661560 -0400
87220 @@ -0,0 +1,179 @@
87221 +/*
87222 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87223 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87224 + *
87225 + *    For licensing information please see the supplied COPYING file
87226 + *
87227 + */
87228 +
87229 +#ifndef __ELAN4_EVENTS_H
87230 +#define __ELAN4_EVENTS_H
87231 +
87232 +#ident "$Id: events.h,v 1.22 2004/06/23 11:07:18 addy Exp $"
87233 +/*      $Source: /cvs/master/quadrics/elan4hdr/events.h,v $*/
87234 +
87235 +#define E4_EVENT_ALIGN         32
87236 +#define E4_EVENTBLOCK_SIZE     64
87237 +
87238 +#ifndef _ASM
87239 +/*
87240 + * Event locations must be aligned to a 32 byte boundary. It is very much more efficent to place
87241 + * them in elan local memory but is not essential.
87242 + */
87243 +typedef struct _E4_Event
87244 +{
87245 +    volatile E4_uint64 ev_CountAndType;
87246 +    E4_uint64          ev_Params[2];
87247 +} E4_Event;
87248 +
87249 +/* Same as above but padded to correct Event alignment */
87250 +typedef struct _E4_Event32
87251 +{
87252 +    volatile E4_uint64 ev_CountAndType;
87253 +    E4_uint64          ev_Params[2];
87254 +    E4_uint64          ev_pad;
87255 +} E4_Event32;
87256 +
87257 +/*
87258 + * An E4_EVENTBLOCK_SIZE aligned block of Main or Elan memory
87259 + */
87260 +typedef union _E4_Event_Blk
87261 +{
87262 +    /* Padded to 64-bytes in case a cache-line write is more efficient */
87263 +    volatile E4_uint8  eb_unit8[E4_EVENTBLOCK_SIZE];
87264 +    volatile E4_uint32 eb_uint32[E4_EVENTBLOCK_SIZE/sizeof(E4_uint32)];
87265 +    volatile E4_uint64 eb_uint64[E4_EVENTBLOCK_SIZE/sizeof(E4_uint64)];
87266 +} E4_Event_Blk;
87267 +#define eb_done       eb_uint32[14]
87268 +#define eb_done_dword eb_uint64[7]
87269 +
87270 +#endif /* ! _ASM */
87271 +
87272 +/*
87273 + * ev_CountAndType
87274 + *  [63:31]   Count
87275 + *  [10]      CopyType
87276 + *  [9:8]     DataType
87277 + *  [7:0]     CopySize
87278 + */
87279 +#define E4_EVENT_TYPE_MASK     0x00000000ffffffffull
87280 +#define E4_EVENT_COUNT_MASK    0xffffffff00000000ull
87281 +#define E4_EVENT_COUNT_SHIFT   32
87282 +#define E4_EVENT_COPY_TYPE_MASK        (1 << 10)
87283 +#define E4_EVENT_DATA_TYPE_MASK        (3 << 8)
87284 +#define E4_EVENT_COPY_SIZE_MASK        (0xff)
87285 +
87286 +/* CopyType */
87287 +#define E4_EVENT_COPY          (0 << 10)
87288 +#define E4_EVENT_WRITE         (1 << 10)
87289 +
87290 +/* DataType */
87291 +#define E4_EVENT_DTYPE_BYTE    (0 << 8)
87292 +#define E4_EVENT_DTYPE_SHORT   (1 << 8)
87293 +#define E4_EVENT_DTYPE_WORD    (2 << 8)
87294 +#define E4_EVENT_DTYPE_LONG    (3 << 8)
87295 +
87296 +#define EVENT_COUNT(EventPtr)  ((E4_int32)(elan4_load64 (&(EventPtr)->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT))
87297 +#define EVENT_TYPE(EventPtr)   ((E4_uint32)(elan4_load64 (&(EventPtr)->ev_CountAndType) & E4_EVENT_TYPE_MASK))
87298 +
87299 +#define E4_WAITEVENT_COUNT_TYPE_VALUE(Count, EventType, DataType, CopySize) \
87300 +       (((E4_uint64)(Count) << E4_EVENT_COUNT_SHIFT) | (EventType) | (DataType) | (CopySize))
87301 +
87302 +#define E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize)     \
87303 +       ((EventType) | (DataType) | (CopySize))
87304 +
87305 +#define E4_EVENT_INIT_VALUE(InitialCount, EventType, DataType, CopySize)       \
87306 +       (((E4_uint64)(InitialCount) << E4_EVENT_COUNT_SHIFT) | E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize))
87307 +
87308 +#define ev_CopySource  ev_Params[0]
87309 +#define ev_CopyDest    ev_Params[1]
87310 +#define ev_WritePtr    ev_Params[0]
87311 +#define ev_WriteValue  ev_Params[1]
87312 +
87313 +#define EVENT_BLK_READY(BLK) ((BLK)->eb_done != 0)
87314 +#define EVENT_READY(EVENT)   ((E4_uint32)((((volatile E4_Event *) (EVENT))->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT) >= 0)
87315 +
87316 +#define ELAN_WAIT_EVENT (0)
87317 +#define ELAN_POLL_EVENT (-1)
87318 +
87319 +#define E4_BLK_PATTERN ((E4_uint32)0xfeedface)
87320 +
87321 +#define E4_INIT_COPY_EVENT(EVENT, BLK_ELAN, BLK, SIZE)                                                         \
87322 +       do {                                                                                            \
87323 +          elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, SIZE), &(EVENT)->ev_CountAndType); \
87324 +           elan4_store64 ((BLK_ELAN), &(EVENT)->ev_CopySource); \
87325 +          elan4_store64 ((BLK), &(EVENT)->ev_CopyDest); \
87326 +       } while (0)
87327 +
87328 +#define E4_INIT_WRITE_EVENT(EVENT, DWORD)                                                              \
87329 +       do {                                                                                            \
87330 +           elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);  \
87331 +           elan4_store64 ((DWORD), &(EVENT)->ev_WritePtr); \
87332 +           elan4_store64 ((E4_Addr) (E4_BLK_PATTERN), &(EVENT)->ev_WriteValue); \
87333 +       } while (0)
87334 +
87335 +#define E4_RESET_BLK_EVENT(BLK)                                        \
87336 +       do {                                                            \
87337 +               (BLK)->eb_done = (0);                                   \
87338 +       } while (0)
87339 +
87340 +#define E4_PRIME_BLK_EVENT(EVENT, COUNT)                               \
87341 +       do {                                                            \
87342 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), &(EVENT)->ev_CountAndType);\
87343 +       } while (0)
87344 +
87345 +#define E4_PRIME_COPY_EVENT(EVENT, SIZE, COUNT)                                \
87346 +       do {                                                            \
87347 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, (SIZE >> 3)), &(EVENT)->ev_CountAndType);\
87348 +       } while (0)
87349 +
87350 +#define E4_PRIME_WRITE_EVENT(EVENT, COUNT)                                     \
87351 +       do {                                                                    \
87352 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);\
87353 +       } while (0)
87354 +
87355 +#ifndef _ASM
87356 +
87357 +#define E4_INPUTQ_ALIGN                        32      /* Descriptor must be 32-byte aligned */
87358 +
87359 +typedef struct _E4_InputQueue
87360 +{
87361 +   volatile E4_Addr    q_bptr;         /* 64 bit aligned ptr to current back item */
87362 +   E4_Addr             q_fptr;         /* 64 bit aligned ptr to current front item */
87363 +   E4_uint64           q_control;      /* this defines the last item, item size, and offset back to the first item. */
87364 +   E4_Addr             q_event;        /* queue event */
87365 +} E4_InputQueue;
87366 +
87367 +#define E4_INPUTQ_LASTITEM_MASK        0x00000000ffffffffULL
87368 +#define E4_INPUTQ_ITEMSIZE_MASK                0x000000ff00000000ULL
87369 +#define E4_INPUTQ_LASTITEM_OFFSET_MASK 0xffffff0000000000ULL
87370 +#define E4_INPUTQ_LASTITEM_SHIFT       0
87371 +#define E4_INPUTQ_ITEMSIZE_SHIFT       32
87372 +#define E4_INPUTQ_LASTITEM_OFFSET_SHIFT        40
87373 +
87374 +/*
87375 + * Macro to initialise the InputQueue control word given the FirstItem, LastItem & ItemSize
87376 + * FirstItem and LastItem are 64 bit double word aligned elan addresses.
87377 + */
87378 +#define E4_InputQueueControl(FirstItem, LastItem, ItemSizeInBytes)\
87379 +   (((((E4_uint64)(LastItem)))                                                      & E4_INPUTQ_LASTITEM_MASK) |\
87380 +    ((((E4_uint64)(ItemSizeInBytes))        << (E4_INPUTQ_ITEMSIZE_SHIFT-3))        & E4_INPUTQ_ITEMSIZE_MASK)  |\
87381 +    ((((E4_uint64)((FirstItem)-(LastItem))) << (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3)) & E4_INPUTQ_LASTITEM_OFFSET_MASK))    
87382 +
87383 +/* 
87384 + * LastItemOffset is a sign extended -ve quantity with LastItemOffset[26:3] == q_control[63:40]
87385 + * we sign extend this by setting LastItemOffset[63:27] to be #one.
87386 + */
87387 +#define E4_InputQueueLastItemOffset(control)  ((((E4_int64) -1) << (64 - (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))) | \
87388 +                                              ((E4_int64) (((control) & E4_INPUTQ_LASTITEM_OFFSET_MASK) >> (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))))
87389 +#define E4_InputQueueItemSize(control)       (((control) & E4_INPUTQ_ITEMSIZE_MASK) >> (E4_INPUTQ_ITEMSIZE_SHIFT-3))
87390 +
87391 +/*
87392 + * Macro to increment the InputQ front pointer taking into account wrap 
87393 + */
87394 +#define E4_InputQueueFptrIncrement(Q, FirstItem, LastItem, ItemSizeInBytes) \
87395 +       ((Q)->q_fptr = ( ((Q)->q_fptr == (LastItem)) ? (FirstItem) : ((Q)->q_fptr + (ItemSizeInBytes))) )
87396 +
87397 +#endif /* _ASM */
87398 +
87399 +#endif /* __ELAN4_EVENTS_H */
87400 Index: linux-2.6.5-7.191/include/elan4/i2c.h
87401 ===================================================================
87402 --- linux-2.6.5-7.191.orig/include/elan4/i2c.h  2004-02-23 16:02:56.000000000 -0500
87403 +++ linux-2.6.5-7.191/include/elan4/i2c.h       2005-07-28 14:52:52.961661560 -0400
87404 @@ -0,0 +1,47 @@
87405 +/*
87406 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87407 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87408 + *
87409 + *    For licensing information please see the supplied COPYING file
87410 + *
87411 + */
87412 +
87413 +#ifndef _ELAN4_I2C_H
87414 +#define _ELAN4_I2C_H
87415 +
87416 +#ident "@(#)$Id: i2c.h,v 1.10 2003/12/02 16:11:22 lee Exp $ $Name: QSNETMODULES-4-31_20050321 $"
87417 +/*      $Source: /cvs/master/quadrics/elan4hdr/i2c.h,v $*/
87418 +
87419 +/* I2C address space - bits[7:1] */
87420 +#define I2C_LED_I2C_ADDR                       0x20
87421 +#define I2C_TEMP_ADDR                          0x48
87422 +#define I2C_EEPROM_ADDR                                0x50
87423 +
87424 +#define I2C_WRITE_ADDR(addr)                   ((addr) << 1 | 0)
87425 +#define I2C_READ_ADDR(addr)                    ((addr) << 1 | 1)
87426 +
87427 +/* I2C EEPROM appears as 8 I2C 256 byte devices */
87428 +#define I2C_24LC16B_BLOCKSIZE                  (256)
87429 +#define I2C_24LC16B_BLOCKADDR(addr)            ((addr) >> 8)
87430 +#define I2C_24LC16B_BLOCKOFFSET(addr)          ((addr) & 0xff)
87431 +
87432 +#define I2C_ELAN_EEPROM_PCI_BASEADDR           0       /* PCI config starts at addr 0 in the EEPROM */
87433 +#define I2C_ELAN_EEPROM_VPD_BASEADDR           256     /* VPD data start                            */
87434 +#define I2C_ELAN_EEPROM_PCI_SIZE               256     /* PCI data max size                         */
87435 +#define I2C_ELAN_EEPROM_VPD_SIZE               256     /* VPD data max size                         */
87436 +
87437 +#define I2C_ELAN_EEPROM_SIZE                   2048
87438 +
87439 +#define I2C_ELAN_EEPROM_DEVICE_ID              0xA0
87440 +#define I2C_ELAN_EEPROM_FAIL_LIMIT              8
87441 +
87442 +#define I2C_ELAN_EEPROM_ADDR_BLOCKSIZE_SHIFT   0x8
87443 +#define I2C_ELAN_EEPROM_ADDR_BLOCK_MASK                0x7
87444 +#define I2C_ELAN_EEPROM_ADDR_BLOCK_SHIFT       0x1
87445 +
87446 +/*
87447 + * Local variables:
87448 + * c-file-style: "stroustrup"
87449 + * End:
87450 + */
87451 +#endif /* _ELAN4_I2C_H */
87452 Index: linux-2.6.5-7.191/include/elan4/intcookie.h
87453 ===================================================================
87454 --- linux-2.6.5-7.191.orig/include/elan4/intcookie.h    2004-02-23 16:02:56.000000000 -0500
87455 +++ linux-2.6.5-7.191/include/elan4/intcookie.h 2005-07-28 14:52:52.961661560 -0400
87456 @@ -0,0 +1,62 @@
87457 +/*
87458 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
87459 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87460 + * 
87461 + *    For licensing information please see the supplied COPYING file
87462 + *
87463 + */
87464 +
87465 +#ident "@(#)$Id: intcookie.h,v 1.10 2004/08/09 14:02:37 daniel Exp $"
87466 +/*      $Source: /cvs/master/quadrics/elan4mod/intcookie.h,v $*/
87467 +
87468 +#ifndef __ELAN4_INTCOOKIE_H
87469 +#define __ELAN4_INTCOOKIE_H
87470 +
87471 +typedef E4_uint64 ELAN4_INTCOOKIE;
87472 +
87473 +#ifdef __KERNEL__
87474 +
87475 +typedef struct intcookie_entry
87476 +{
87477 +    struct intcookie_entry    *ent_next;
87478 +    struct intcookie_entry    *ent_prev;
87479 +
87480 +    spinlock_t                ent_lock;
87481 +    unsigned                  ent_ref;
87482 +
87483 +    ELAN4_INTCOOKIE           ent_cookie;
87484 +    ELAN4_INTCOOKIE           ent_fired;
87485 +    kcondvar_t                ent_wait;
87486 +} INTCOOKIE_ENTRY;
87487 +
87488 +typedef struct intcookie_table
87489 +{
87490 +    struct intcookie_table    *tbl_next;
87491 +    struct intcookie_table    *tbl_prev;
87492 +
87493 +    ELAN_CAPABILITY           *tbl_cap;
87494 +
87495 +    spinlock_t                tbl_lock;
87496 +    unsigned                  tbl_ref;
87497 +    INTCOOKIE_ENTRY           *tbl_entries;
87498 +} INTCOOKIE_TABLE;
87499 +
87500 +extern void                intcookie_init(void);
87501 +extern void                intcookie_fini(void);
87502 +extern INTCOOKIE_TABLE    *intcookie_alloc_table (ELAN_CAPABILITY *cap);
87503 +extern void                intcookie_free_table (INTCOOKIE_TABLE *tbl);
87504 +extern int                 intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
87505 +extern int                 intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
87506 +extern int                 intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
87507 +extern int                 intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie);
87508 +extern int                 intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
87509 +extern int                 intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
87510 +
87511 +#endif /* __KERNEL */
87512 +
87513 +/*
87514 + * Local variables:
87515 + * c-file-style: "stroustrup"
87516 + * End:
87517 + */
87518 +#endif /* __ELAN4_INTCOOKIE_H */
87519 Index: linux-2.6.5-7.191/include/elan4/ioctl.h
87520 ===================================================================
87521 --- linux-2.6.5-7.191.orig/include/elan4/ioctl.h        2004-02-23 16:02:56.000000000 -0500
87522 +++ linux-2.6.5-7.191/include/elan4/ioctl.h     2005-07-28 14:52:52.962661408 -0400
87523 @@ -0,0 +1,320 @@
87524 +/*
87525 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
87526 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87527 + * 
87528 + *    For licensing information please see the supplied COPYING file
87529 + *
87530 + */
87531 +
87532 +#ifndef __ELAN4_IOCTL_H
87533 +#define __ELAN4_IOCTL_H
87534 +
87535 +#ident "@(#)$Id: ioctl.h,v 1.27.6.2 2005/01/11 12:15:39 duncant Exp $"
87536 +/*      $Source: /cvs/master/quadrics/elan4mod/ioctl.h,v $*/
87537 +
87538 +#include <elan/devinfo.h>
87539 +#include <elan/capability.h>
87540 +
87541 +#include <elan4/dma.h>
87542 +#include <elan4/neterr.h>
87543 +#include <elan4/registers.h>
87544 +#include <elan4/intcookie.h>
87545 +
87546 +#define ELAN4IO_CONTROL_PATHNAME       "/dev/elan4/control%d"
87547 +#define ELAN4IO_USER_PATHNAME          "/dev/elan4/user%d"
87548 +#define ELAN4IO_SDRAM_PATHNAME         "/dev/elan4/sdram%d"
87549 +#define ELAN4IO_MAX_PATHNAMELEN        32
87550 +
87551 +/*
87552 + * NOTE - ioctl values 0->0x1f are defined for 
87553 + *        generic/control usage.
87554 + */
87555 +
87556 +/* Macro to generate 'offset' to mmap "control" device */
87557 +#define OFF_TO_BAR(off)                (((off) >> 28) & 0xF)
87558 +#define OFF_TO_OFFSET(off)     ((off) & 0x0FFFFFFF)
87559 +#define GEN_OFF(bar,off)       (((bar) << 28) | ((off) & 0x0FFFFFFF))
87560 +
87561 +/* Definiations for generic ioctls */
87562 +#define ELAN4IO_GENERIC_BASE           0x00
87563 +
87564 +typedef struct elan4io_stats_struct
87565 +{
87566 +    int                       which;
87567 +    unsigned long long ptr;                                    /* always pass pointer as 64 bit */
87568 +} ELAN4IO_STATS_STRUCT;
87569 +
87570 +#define ELAN4IO_STATS                  _IOR ('e', ELAN4IO_GENERIC_BASE + 0, ELAN4IO_STATS_STRUCT)
87571 +#define ELAN4IO_DEVINFO                        _IOR ('e', ELAN4IO_GENERIC_BASE + 1, ELAN_DEVINFO)
87572 +#define ELAN4IO_POSITION               _IOR ('e', ELAN4IO_GENERIC_BASE + 2, ELAN_POSITION)
87573 +
87574 +
87575 +/* 
87576 + * Definitions for /dev/elan4/controlX
87577 + */
87578 +#define ELAN4IO_CONTROL_BASE           0x20
87579 +
87580 +#define ELAN4IO_GET_POSITION           _IOR ('e', ELAN4IO_CONTROL_BASE + 0, ELAN_POSITION)
87581 +#define ELAN4IO_SET_POSITION           _IOW ('e', ELAN4IO_CONTROL_BASE + 1, ELAN_POSITION)
87582 +#define ELAN4IO_DEBUG_SNAPSHOT         _IOW ('e', ELAN4IO_CONTROL_BASE + 2, )
87583 +
87584 +typedef struct elan4io_params_mask_struct
87585 +{
87586 +    unsigned short     p_mask;
87587 +    ELAN_PARAMS                p_params;
87588 +} ELAN4IO_PARAMS_STRUCT;
87589 +#define ELAN4IO_GET_PARAMS             _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN4IO_PARAMS_STRUCT)
87590 +#define ELAN4IO_SET_PARAMS             _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN4IO_PARAMS_STRUCT)
87591 +
87592 +/* old versions - implicit p_mask == 3 */
87593 +#define ELAN4IO_OLD_GET_PARAMS         _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN_PARAMS)
87594 +#define ELAN4IO_OLD_SET_PARAMS         _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN_PARAMS)
87595 +
87596 +/*
87597 + * Definitions for /dev/elan4/userX
87598 + */
87599 +#define ELAN4IO_USER_BASE              0x40
87600 +
87601 +#define ELAN4IO_FREE                   _IO   ('e', ELAN4IO_USER_BASE + 0)
87602 +#define ELAN4IO_ATTACH                 _IOWR ('e', ELAN4IO_USER_BASE + 1, ELAN_CAPABILITY)
87603 +#define ELAN4IO_DETACH                 _IOWR ('e', ELAN4IO_USER_BASE + 2, ELAN_CAPABILITY)
87604 +#define ELAN4IO_BLOCK_INPUTTER         _IO   ('e', ELAN4IO_USER_BASE + 3)
87605 +
87606 +typedef struct elan4io_add_p2pvp_struct 
87607 +{
87608 +    unsigned        vp_process;
87609 +    ELAN_CAPABILITY vp_capability;
87610 +} ELAN4IO_ADD_P2PVP_STRUCT;
87611 +
87612 +#define ELAN4IO_ADD_P2PVP              _IOW  ('e', ELAN4IO_USER_BASE + 4, ELAN4IO_ADD_P2PVP_STRUCT)
87613 +
87614 +typedef struct elan4io_add_bcastvp_struct
87615 +{
87616 +    unsigned int       vp_process;
87617 +    unsigned int       vp_lowvp;
87618 +    unsigned int       vp_highvp;
87619 +} ELAN4IO_ADD_BCASTVP_STRUCT;
87620 +
87621 +#define ELAN4IO_ADD_BCASTVP            _IOW  ('e', ELAN4IO_USER_BASE + 5, ELAN4IO_ADD_BCASTVP_STRUCT)
87622 +
87623 +#define ELAN4IO_REMOVEVP               _IO   ('e', ELAN4IO_USER_BASE + 6)
87624 +
87625 +typedef struct elan4io_route_struct
87626 +{
87627 +    unsigned int          rt_process;
87628 +    unsigned int          rt_error;
87629 +    E4_VirtualProcessEntry rt_route;
87630 +} ELAN4IO_ROUTE_STRUCT;
87631 +
87632 +#define ELAN4IO_SET_ROUTE              _IOW  ('e', ELAN4IO_USER_BASE + 7, ELAN4IO_ROUTE_STRUCT)
87633 +#define ELAN4IO_RESET_ROUTE            _IOW  ('e', ELAN4IO_USER_BASE + 9, ELAN4IO_ROUTE_STRUCT)
87634 +#define ELAN4IO_GET_ROUTE              _IOWR ('e', ELAN4IO_USER_BASE + 8, ELAN4IO_ROUTE_STRUCT)
87635 +#define ELAN4IO_CHECK_ROUTE            _IOWR ('e', ELAN4IO_USER_BASE + 10, ELAN4IO_ROUTE_STRUCT)
87636 +
87637 +typedef struct elan4io_alloc_cq_struct
87638 +{
87639 +    unsigned int cq_size;                                      /* input: size of queue */
87640 +    unsigned int cq_perm;                                      /* input: requested permissions */
87641 +    unsigned int cq_type;                                      /* input: queue type */
87642 +    unsigned int cq_indx;                                      /* output: queue number */
87643 +} ELAN4IO_ALLOCCQ_STRUCT;
87644 +
87645 +#define ELAN4IO_ALLOCCQ                        _IOWR ('e', ELAN4IO_USER_BASE + 11, ELAN4IO_ALLOCCQ_STRUCT)
87646 +#define ELAN4IO_FREECQ                 _IOWR ('e', ELAN4IO_USER_BASE + 12, unsigned)
87647 +
87648 +#define ELAN4IO_CQ_TYPE_REORDER                1                       /* revb reordering command queue */
87649 +
87650 +typedef struct elan4io_perm_struct
87651 +{
87652 +    E4_Addr            ps_eaddr;
87653 +    E4_uint64          ps_len;
87654 +    unsigned long      ps_maddr;
87655 +    unsigned int       ps_perm;
87656 +} ELAN4IO_PERM_STRUCT;
87657 +
87658 +typedef struct elan4io_perm_struct32
87659 +{
87660 +    E4_Addr            ps_eaddr;
87661 +    E4_uint64          ps_len;
87662 +    unsigned int       ps_maddr;
87663 +    unsigned int       ps_perm;
87664 +} ELAN4IO_PERM_STRUCT32;
87665 +
87666 +#define ELAN4IO_SETPERM                        _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT)
87667 +#define ELAN4IO_SETPERM32              _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT32)
87668 +#define ELAN4IO_CLRPERM                        _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT)
87669 +#define ELAN4IO_CLRPERM32              _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT32)
87670 +
87671 +typedef struct elan4io_trapsig_struct
87672 +{
87673 +    int                ts_signo;
87674 +} ELAN4IO_TRAPSIG_STRUCT;
87675 +#define ELAN4IO_TRAPSIG                        _IOW  ('e', ELAN4IO_USER_BASE + 15, ELAN4IO_TRAPSIG_STRUCT)
87676 +
87677 +typedef struct elan4io_traphandler_struct
87678 +{
87679 +    unsigned int       th_nticks;                              /* number of ticks to sleep for next trap */
87680 +    unsigned int       th_proc;                                        /* elan processor involved */
87681 +    unsigned long      th_trapp;                               /* space to store trap */
87682 +} ELAN4IO_TRAPHANDLER_STRUCT;
87683 +
87684 +typedef struct elan4io_traphandler_struct32
87685 +{
87686 +    unsigned int       th_nticks;                              /* number of ticks to sleep for next trap */
87687 +    unsigned int       th_proc;                                        /* elan processor involved */
87688 +    unsigned int       th_trapp;                               /* space to store trap */
87689 +} ELAN4IO_TRAPHANDLER_STRUCT32;
87690 +
87691 +#define ELAN4IO_TRAPHANDLER            _IOW  ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT)
87692 +#define ELAN4IO_TRAPHANDLER32          _IOW  ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT32)
87693 +
87694 +typedef struct elan4io_required_mappings_struct
87695 +{
87696 +    E4_Addr    rm_upage_addr;                                  /* elan address of user page */
87697 +    E4_Addr    rm_trestart_addr;                               /* elan address of tproc restart trampoline */
87698 +} ELAN4IO_REQUIRED_MAPPINGS_STRUCT;
87699 +#define ELAN4IO_REQUIRED_MAPPINGS      _IOW  ('e', ELAN4IO_USER_BASE + 17, ELAN4IO_REQUIRED_MAPPINGS_STRUCT)
87700 +
87701 +typedef struct elan4io_resume_eproc_trap_struct
87702 +{
87703 +    E4_Addr             rs_addr;
87704 +} ELAN4IO_RESUME_EPROC_TRAP_STRUCT;
87705 +#define ELAN4IO_RESUME_EPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 18, ELAN4IO_RESUME_EPROC_TRAP_STRUCT)
87706 +
87707 +typedef struct elan4io_resume_cproc_trap_struct
87708 +{
87709 +    unsigned int       rs_indx;
87710 +} ELAN4IO_RESUME_CPROC_TRAP_STRUCT;
87711 +#define ELAN4IO_RESUME_CPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 19, ELAN4IO_RESUME_CPROC_TRAP_STRUCT)
87712 +
87713 +typedef struct elan4io_resume_dproc_trap_struct
87714 +{
87715 +    E4_DMA             rs_desc;
87716 +} ELAN4IO_RESUME_DPROC_TRAP_STRUCT;
87717 +#define ELAN4IO_RESUME_DPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 20, ELAN4IO_RESUME_DPROC_TRAP_STRUCT)
87718 +
87719 +typedef struct elan4io_resume_tproc_trap_struct
87720 +{
87721 +    E4_ThreadRegs      rs_regs;
87722 +} ELAN4IO_RESUME_TPROC_TRAP_STRUCT;
87723 +#define ELAN4IO_RESUME_TPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 21, ELAN4IO_RESUME_TPROC_TRAP_STRUCT)
87724 +
87725 +typedef struct elan4io_resume_iproc_trap_struct
87726 +{
87727 +    unsigned int       rs_channel;
87728 +    unsigned int       rs_trans;
87729 +    E4_IprocTrapHeader  rs_header;
87730 +    E4_IprocTrapData    rs_data;
87731 +} ELAN4IO_RESUME_IPROC_TRAP_STRUCT;
87732 +#define ELAN4IO_RESUME_IPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 22, ELAN4IO_RESUME_IPROC_TRAP_STRUCT)
87733 +
87734 +#define ELAN4IO_FLUSH_ICACHE           _IO   ('e', ELAN4IO_USER_BASE + 23)
87735 +#define ELAN4IO_STOP_CTXT              _IO   ('e', ELAN4IO_USER_BASE + 24)
87736 +
87737 +#define ELAN4IO_ALLOC_INTCOOKIE                _IOW  ('e', ELAN4IO_USER_BASE + 25, ELAN4_INTCOOKIE)
87738 +#define ELAN4IO_FREE_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 26, ELAN4_INTCOOKIE)
87739 +#define ELAN4IO_ARM_INTCOOKIE          _IOW  ('e', ELAN4IO_USER_BASE + 27, ELAN4_INTCOOKIE)
87740 +#define ELAN4IO_WAIT_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 28, ELAN4_INTCOOKIE)
87741 +
87742 +typedef struct elan4io_alloc_trap_queues_struct
87743 +{
87744 +    unsigned int       tq_ndproc_traps;
87745 +    unsigned int       tq_neproc_traps;
87746 +    unsigned int       tq_ntproc_traps;
87747 +    unsigned int       tq_nthreads;
87748 +    unsigned int       tq_ndmas;
87749 +} ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT;
87750 +#define ELAN4IO_ALLOC_TRAP_QUEUES      _IOW  ('e', ELAN4IO_USER_BASE + 29, ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT)
87751 +
87752 +typedef struct elan4io_neterr_msg_struct
87753 +{
87754 +    unsigned int       nm_vp;
87755 +    unsigned int       nm_nctx;
87756 +    unsigned int       nm_retries;
87757 +    unsigned int        nm_pad;
87758 +    ELAN4_NETERR_MSG    nm_msg;
87759 +} ELAN4IO_NETERR_MSG_STRUCT;
87760 +#define ELAN4IO_NETERR_MSG             _IOW ('e', ELAN4IO_USER_BASE + 30, ELAN4IO_NETERR_MSG_STRUCT)
87761 +
87762 +typedef struct elan4io_neterr_timer_struct 
87763 +{
87764 +    unsigned int       nt_usecs;
87765 +} ELAN4IO_NETERR_TIMER_STUCT;
87766 +
87767 +#define ELAN4IO_NETERR_TIMER           _IO  ('e', ELAN4IO_USER_BASE + 31)
87768 +
87769 +typedef struct elan4io_neterr_fixup_struct
87770 +{
87771 +    E4_uint64          nf_cookie;
87772 +    unsigned int       nf_waitforeop;
87773 +    unsigned int       nf_sten;
87774 +    unsigned int       nf_vp;
87775 +    unsigned int       nf_pad;
87776 +} ELAN4IO_NETERR_FIXUP_STRUCT;
87777 +
87778 +#define ELAN4IO_NETERR_FIXUP           _IOW ('e', ELAN4IO_USER_BASE + 32, ELAN4IO_NETERR_FIXUP_STRUCT)
87779 +
87780 +typedef struct elan4io_firecap_struct 
87781 +{
87782 +    ELAN_CAPABILITY     fc_capability;
87783 +    ELAN4_INTCOOKIE     fc_cookie;
87784 +} ELAN4IO_FIRECAP_STRUCT;
87785 +
87786 +#define ELAN4IO_FIRE_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 33, ELAN4IO_FIRECAP_STRUCT)
87787 +
87788 +#define ELAN4IO_ALLOC_INTCOOKIE_TABLE  _IOW  ('e', ELAN4IO_USER_BASE + 34, ELAN_CAPABILITY)
87789 +#define ELAN4IO_FREE_INTCOOKIE_TABLE   _IO   ('e', ELAN4IO_USER_BASE + 35)
87790 +
87791 +typedef struct elan4io_translation
87792 +{
87793 +    E4_Addr            tr_addr;
87794 +    unsigned long      tr_len;
87795 +    unsigned int       tr_access;
87796 +} ELAN4IO_TRANSLATION_STRUCT;
87797 +
87798 +#define ELAN4IO_LOAD_TRANSLATION       _IOW  ('e', ELAN4IO_USER_BASE + 36, ELAN4IO_TRANSLATION_STRUCT)
87799 +#define ELAN4IO_UNLOAD_TRANSLATION     _IOW  ('e', ELAN4IO_USER_BASE + 37, ELAN4IO_TRANSLATION_STRUCT)
87800 +
87801 +typedef struct elan4io_dumpcq_struct32
87802 +{
87803 +    E4_uint64 cq_space;      /* output: sdram addr of q, used to decode ptrs */
87804 +    E4_uint32 cq_size;       /* output: The real size of the command queue */
87805 +    E4_uint32 bufsize;       /* input: The size of the buffer to dump to */
87806 +    E4_uint32 cq_indx;       /* input: index of cq to dump */
87807 +    unsigned int buffer;     /* input: user address of rgs->buffer to dump to */
87808 +} ELAN4IO_DUMPCQ_STRUCT32;
87809 +
87810 +typedef struct elan4io_dumpcq_struct
87811 +{
87812 +    E4_uint64 cq_space;      /* output: sdram addr of q, used to decode ptrs */
87813 +    E4_uint32 cq_size;       /* output: The real size of the command queue */
87814 +    E4_uint32 bufsize;       /* input: The size of the buffer to dump to */
87815 +    E4_uint32 cq_indx;       /* input: index of cq to dump */
87816 +    unsigned long buffer;    /* input: user address of rgs->buffer to dump to */
87817 +} ELAN4IO_DUMPCQ_STRUCT;
87818 +
87819 +#define ELAN4IO_DUMPCQ                 _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT)
87820 +#define ELAN4IO_DUMPCQ32                       _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT32)
87821 +
87822 +/* mmap offsets - - we define the file offset space as follows:
87823 + *
87824 + * page 0 - 4095 - command queues
87825 + * page 4096    - device user registers
87826 + * page 4097    - flag page/user stats
87827 + * page 4098    - device stats
87828 + * page 4099     - tproc trampoline
87829 + */
87830 +
87831 +#define ELAN4_OFF_COMMAND_QUEUES       0
87832 +#define ELAN4_OFF_USER_REGS            4096
87833 +#define ELAN4_OFF_USER_PAGE            4097
87834 +#define ELAN4_OFF_DEVICE_STATS         4098
87835 +#define ELAN4_OFF_TPROC_TRAMPOLINE     4099
87836 +
87837 +
87838 +/*
87839 + * Local variables:
87840 + * c-file-style: "stroustrup"
87841 + * End:
87842 + */
87843 +#endif /* __ELAN4_IOCTL_H */
87844 Index: linux-2.6.5-7.191/include/elan4/mmu.h
87845 ===================================================================
87846 --- linux-2.6.5-7.191.orig/include/elan4/mmu.h  2004-02-23 16:02:56.000000000 -0500
87847 +++ linux-2.6.5-7.191/include/elan4/mmu.h       2005-07-28 14:52:52.963661256 -0400
87848 @@ -0,0 +1,94 @@
87849 +/*
87850 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
87851 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87852 + * 
87853 + *    For licensing information please see the supplied COPYING file
87854 + *
87855 + */
87856 +
87857 +#ident "@(#)$Id: mmu.h,v 1.11 2004/04/21 12:04:24 david Exp $"
87858 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu.h,v $*/
87859 +
87860 +
87861 +#ifndef __ELAN4_MMU_H
87862 +#define __ELAN4_MMU_H
87863 +
87864 +typedef struct elan4_hash_entry
87865 +{
87866 +    struct elan4_hash_entry    *he_next;
87867 +    struct elan4_hash_entry    *he_prev;
87868 +
87869 +    sdramaddr_t                         he_entry;
87870 +    
87871 +    struct elan4_hash_entry    *he_chain[2];
87872 +    E4_uint64                   he_tag[2];
87873 +    E4_uint32                   he_pte[2];
87874 +} ELAN4_HASH_ENTRY;
87875 +
87876 +#define ELAN4_HENT_CHUNKS      16              /* SDRAM_MIN_BLOCK_SIZE/sizeof (E4_HashTableEntry) */
87877 +
87878 +typedef struct elan4_hash_chunk
87879 +{
87880 +    struct list_head            hc_link;
87881 +    ELAN4_HASH_ENTRY           hc_hents[ELAN4_HENT_CHUNKS];
87882 +} ELAN4_HASH_CHUNK;
87883 +
87884 +typedef struct elan4_hash_cache
87885 +{
87886 +    E4_Addr           hc_start;
87887 +    E4_Addr           hc_end;
87888 +    int                      hc_tbl;
87889 +
87890 +    ELAN4_HASH_ENTRY *hc_hes[1];
87891 +} ELAN4_HASH_CACHE;
87892 +
87893 +/* 
87894 + * he_pte is really 4 bytes of pte "type" one for each pte
87895 + * entry - however we declare it as an "int" so we can
87896 + * easily determine that all 4 entries are invalid 
87897 + */
87898 +#define HE_SET_PTE(he,tagidx,pteidx,val)       (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx] = (val))
87899 +#define HE_GET_PTE(he,tagidx,pteidx)           (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx])
87900 +
87901 +/*
87902 + * he_tag has the following form :
87903 + *     [63:27] tag
87904 + *     [20:17]  pte valid
87905 + *     [16]     locked
87906 + *     [15]     copy
87907 + *     [14]     valid
87908 + *     [13:0]  context
87909 + */
87910 +
87911 +#define HE_TAG_VALID           (1 << 14)
87912 +#define HE_TAG_COPY            (1 << 15)
87913 +#define HE_TAG_LOCKED          (1 << 16)
87914 +
87915 +#define INVALID_CONTEXT                0
87916 +
87917 +extern u_char elan4_permtable[];
87918 +#define ELAN4_INCOMPAT_ACCESS(perm,access) ((elan4_permtable[(perm)] & (1 << (access))) == 0)
87919 +extern u_char elan4_permreadonly[];
87920 +#define ELAN4_PERM_READONLY(perm)        (elan4_permreadonly[(perm)])
87921 +
87922 +/* return code from elan4mmu_categorise_paddr */
87923 +#define ELAN4MMU_PADDR_SDRAM           0
87924 +#define ELAN4MMU_PADDR_COMMAND         1
87925 +#define ELAN4MMU_PADDR_LOCALPCI                2
87926 +#define ELAN4MMU_PADDR_PAGE            3
87927 +#define ELAN4MMU_PADDR_OTHER           4
87928 +
87929 +extern int elan4_debug_mmu;
87930 +
87931 +#ifdef DEBUG_PRINTF
87932 +#  define MPRINTF(ctxt,lvl,args...)    (elan4_debug_mmu > (lvl) ? elan4_debugf(ctxt,DBG_MMU, ##args) : (void)0)
87933 +#else
87934 +#  define MPRINTF(ctxt,lvl,args...)    ((void) 0)
87935 +#endif
87936 +
87937 +/*
87938 + * Local variables:
87939 + * c-file-style: "stroustrup"
87940 + * End:
87941 + */
87942 +#endif /* __ELAN4_MMU_H */
87943 Index: linux-2.6.5-7.191/include/elan4/neterr.h
87944 ===================================================================
87945 --- linux-2.6.5-7.191.orig/include/elan4/neterr.h       2004-02-23 16:02:56.000000000 -0500
87946 +++ linux-2.6.5-7.191/include/elan4/neterr.h    2005-07-28 14:52:52.963661256 -0400
87947 @@ -0,0 +1,40 @@
87948 +/*
87949 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87950 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
87951 + *
87952 + *    For licensing information please see the supplied COPYING file
87953 + *
87954 + */
87955 +
87956 +#ifndef __ELAN4_NETERR_H
87957 +#define __ELAN4_NETERR_H
87958 +
87959 +#ident "@(#)$Id: neterr.h,v 1.1 2004/01/19 14:38:34 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
87960 +/*      $Source: /cvs/master/quadrics/elan4mod/neterr.h,v $*/
87961 +
87962 +typedef struct elan4_neterr_msg
87963 +{
87964 +    E4_uint8           msg_type;
87965 +    E4_uint8           msg_waitforeop;
87966 +    E4_uint16          msg_context;                            /* network context # message sent to */
87967 +    E4_int16           msg_found;                              /* # cookie found (response) */
87968 +
87969 +    ELAN_LOCATION      msg_sender;                             /* nodeid/context # message sent from */
87970 +    E4_uint32          msg_pad;
87971 +
87972 +    E4_uint64          msg_cookies[6];                         /* 64 bit cookies from identify packets */
87973 +} ELAN4_NETERR_MSG;
87974 +
87975 +#define ELAN4_NETERR_MSG_SIZE          sizeof (ELAN4_NETERR_MSG)
87976 +#define ELAN4_NETERR_MSG_REQUEST       1
87977 +#define ELAN4_NETERR_MSG_RESPONSE      2
87978 +
87979 +#define ELAN4_NETERR_MAX_COOKIES       (sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies) / \
87980 +                                        sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies[0]))
87981 +
87982 +/*
87983 + * Local variables:
87984 + * c-file-style: "stroustrup"
87985 + * End:
87986 + */
87987 +#endif /* __ELAN4_NETERR_H */
87988 Index: linux-2.6.5-7.191/include/elan4/pci.h
87989 ===================================================================
87990 --- linux-2.6.5-7.191.orig/include/elan4/pci.h  2004-02-23 16:02:56.000000000 -0500
87991 +++ linux-2.6.5-7.191/include/elan4/pci.h       2005-07-28 14:52:52.964661104 -0400
87992 @@ -0,0 +1,227 @@
87993 +/*
87994 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87995 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
87996 + *
87997 + *    For licensing information please see the supplied COPYING file
87998 + *
87999 + */
88000 +
88001 +#ifndef __ELAN4_PCI_H 
88002 +#define __ELAN4_PCI_H
88003 +
88004 +#ident "$Id: pci.h,v 1.32 2003/09/04 12:39:17 david Exp $"
88005 +/*      $Source: /cvs/master/quadrics/elan4hdr/pci.h,v $*/
88006 +
88007 +/* Elan has 2 64 bit bars */
88008 +#define ELAN4_BAR_SDRAM                        0
88009 +#define ELAN4_BAR_REGISTERS            2
88010 +
88011 +#define PCI_VENDOR_ID_QUADRICS         0x14fc
88012 +#define PCI_DEVICE_ID_ELAN3            0x0000
88013 +#define   PCI_REVISION_ID_ELAN3_REVA   0x0000
88014 +#define   PCI_REVISION_ID_ELAN3_REVB   0x0001
88015 +#define PCI_DEVICE_ID_ELAN4            0x0001
88016 +#define   PCI_REVISION_ID_ELAN4_REVA   0x0000
88017 +#define   PCI_REVISION_ID_ELAN4_REVB   0x0001
88018 +
88019 +/* support standard pseudo bars */
88020 +#define ELAN4_PSEUDO_BAR_ROM           8
88021 +
88022 +/* Elan PCI control
88023 + configuration space register. ElanControlRegister */
88024 +#define PCI_ELAN_PARITY_ADDR_LO                0x40
88025 +#define PCI_ELAN_PARITY_ADDR_HI                0x44
88026 +#define PCI_ELAN_PARITY_TYPE           0x48
88027 +#define PCI_ELAN_CONTROL               0x4c
88028 +#define PCI_ELAN_PLL_CONTROL           0x50
88029 +#define PCI_ELAN_SPLIT_MESSAGE_ATTR    0x54
88030 +#define PCI_ELAN_SPLIT_MESSAGE_VALUE   0x54
88031 +#define PCI_ELAN_RAMBIST_FAILED                0x54
88032 +#define PCI_ELAN_TOPPHYSADDR(i)                (0x58 + ((i)<<1))
88033 +
88034 +/*
88035 + * [31]           PciM66EN             This is set it the bus is running in PCI2.3 - 66MHz mode.
88036 + * [30:28] InitPattern         This gives the PCI-X startup mode. See "Pci intialisation patterns" below.
88037 + * [27]           notBusIs64Bits       If set the bus is running 32 bits wide. If Clear it is a 64 bit bus.
88038 + * [26:24] RamBistCntl         Used to control the Elan4 RAM BIST. Not acitive it zero.
88039 + * [23]           RamBistFinished      Only used when performing the RAM BIST test.
88040 + * [22]           SelectSplitMessAttr  See ECTRL_SELECT_SPLIT_MESS_ATTR below.
88041 + * [21]           ReceivedSplitCompError See ECTRL_REC_SPLIT_COMP_MESSAGE below
88042 + * [20:16] WriteHighPriTime    Used with ReadHighPriTime to control the ratio of PCI master write to PCI master
88043 + *                             read bandwidth under heavy load. The high the value of WriteHighPriTime the longer
88044 + *                             the PCI write bursts will be allowed without interruption from a read transfer.
88045 + * [15]    DisableCouplingTest This is only used as part of the RAM BIST test. It effects the testing of the main
88046 + *                             cache tag RAMS.
88047 + * [14:13] Not used            Will read as zero.
88048 + * [12:8]  ReadHighPriTime     Used with WriteHighPriTime to control the ratio of PCI master write to PCI master
88049 + *                             read bandwidth under heavy load. The high the value of ReadHighPriTime the longer
88050 + *                             the PCI read bursts will be allowed without interruption from a write transfer.
88051 + * [7] EnableLatencyCountReset  This bit effect the behaviour of disconnects due to the removal of GNT# after the latency
88052 + *                             counter has expired. If set it will allow the latency counter to be reset each time the
88053 + *                             GNT# is reasserted. If asserted it should provided improved bandwidth on the PCI bus
88054 + *                             without increasing the maximum latency another device would have for access to the bus.
88055 + *                             It will increase the average latency of other devices.
88056 + * [6] ExtraMasterAddrBits     This bit used to control the physical PCI addresses generated by the MMU.
88057 + * [5] ReducedPciDecode                If set the PCI local memory BAR will decode 256Mbytes of PCI address space. If clear it
88058 + *                             will decode 2Gbyte of PCI address space.
88059 + * [4] ConfigInEBusRom         If set the constant values of the Elan4 PCI configuration space will be taken from the
88060 + *                             EEPROM. If clear the internal values will be used.
88061 + * [3] EnableRd2_2Bursts       This bit only effects the behaviour of burst reads when the PCI bus is operating in
88062 + *                             PCI-2.2 mode. It allows adjacent reads to be merged into longer bursts for higher
88063 + *                             performance.
88064 + * [2] SoftIntReset            If set this bit will cause the Elan4 to reset itself with the exception of the PCI
88065 + *                             configuation space. All internal state machines will be put into the reset state.
88066 + * [1] EnableWrBursts          This bit allows much longer PCI-X write bursts. If set it will stop the Elan4 from
88067 + *                             being completely PCI-X compliant as the Elan4 may request a long PCI-X write burst that
88068 + *                             it does not complete. However it should significantly increase the maximum PCI-X write
88069 + *                             bandwidth and is unlikely to cause problems with many PCI-X bridge chips.
88070 + * [0] InvertMSIPriority       This bit effect the way MSI interrupts are generated. It provides flexiblity to generate
88071 + *                             the MSI interrupts in a different way to allow for different implimentations of MSI
88072 + *                             logic and still give the correct priority of Elan4 interrupts.
88073 + *
88074 + *     {PciM66EN, InitPattern, notBusIs64Bits, RamBistCntl, RamBistFinished,
88075 + *      SelectSplitMessAttr, ReceivedSplitCompError, WriteHighPriTime,
88076 + *      DisableCouplingTest, 2'h0, ReadHighPriTime,
88077 + *      EnableLatencyCountReset, ExtraMasterAddrBits, ReducedPciDecode, ConfigInEBusRom,
88078 + *      EnableRd2_2Bursts, SoftIntReset, EnableWrBursts, InvertMSIPriority}
88079 + */
88080 +
88081 +#define ECTRL_INVERT_MSI_PRIO          (1 << 0)
88082 +#define ECTRL_ENABLE_WRITEBURSTS       (1 << 1)
88083 +#define ECTRL_SOFTWARE_INTERNAL_RESET  (1 << 2)
88084 +#define ECTRL_ENABLE_2_2READBURSTS     (1 << 3)
88085 +#define ECTRL_CONFIG_IN_EBUS_ROM       (1 << 4)
88086 +#define ECTRL_28_NOT_30_BIT_LOCAL_BAR  (1 << 5)
88087 +#define ECTRL_ExtraMasterAddrBits      (1 << 6)
88088 +#define ECTRL_ENABLE_LATENCY_RESET      (1 << 7)
88089 +#define ECTRL_DISABLE_COUPLING_TEST    (1 << 15)
88090 +
88091 +/*
88092 + * Ratio of the following two registers set the relative bandwidth given to intputer data
88093 + * versus other PCI pci traffic when scheduling new PCI master accesses.
88094 + */
88095 +#define ECTRL_OTHER_HIGH_PRI_TIME_SHIFT        (8)     /* Sets top 4 bits of 8 bit counter */
88096 +#define ECTRL_OTHER_HIGH_PRI_TIME_MASK (0x1f)
88097 +
88098 +
88099 +#define ECTRL_IPROC_HIGH_PRI_TIME_SHIFT        (16)    /* Sets top 4 bits of 8 bit counter */
88100 +#define ECTRL_IPROC_HIGH_PRI_TIME_MASK (0x1f)
88101 +
88102 +/*
88103 + * This is set if a split completion message is received.
88104 + * This will cause a PCI error interrupt.
88105 + * This error is cleared by writting a 1 to this bit.
88106 + */
88107 +#define ECTRL_REC_SPLIT_COMP_MESSAGE   (1 << 21)
88108 +/*
88109 + * This bit is used to select reading of either the Split message attribute value when
88110 + * set or the split completion message data value from 0x54 in the config space
88111 + * if the ECTRL_REC_SPLIT_COMP_MESSAGE bit is set. 0x54 returns the the BistFailed flags
88112 + * if any of the BIST control bits are set (bits 26 to 24)
88113 + */
88114 +#define ECTRL_SELECT_SPLIT_MESS_ATTR   (1 << 22)
88115 +
88116 +// Internal RAM bist control bits.
88117 +// Three bits of state control the RAM BIST (Built in self test).
88118 +//
88119 +// These bits must not be set unless the ECTRL_SOFTWARE_INTERNAL_RESET bit has also been set!
88120 +//
88121 +// For a normal fast ram test assert ECTRL_BIST_FAST_TEST. 
88122 +// For a data retention test first write ECTRL_START_RETENTION_TEST then wait the retention period of
88123 +// at least 1ms and preferably much longer then write ECTRL_CONTINUE_RETENTION_TEST then wait
88124 +// again and finallly write ECTRL_FINISH_RETENTION_TEST.
88125 +// 
88126 +// The read only bit ECTRL_BIST_FINISHED_TEST can be polled to check that the test has compleated.
88127 +#define ECTRL_BIST_CTRL_SHIFT          (24)
88128 +#define ECTRL_BIST_CTRL_MASK           (7 << 24)
88129 +
88130 +#define ECTRL_BIST_FAST_TEST           ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)     // old scheme
88131 +#define ECTRL_START_RETENTION_TEST     ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
88132 +#define ECTRL_CONTINUE_RETENTION_TEST  ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
88133 +#define ECTRL_FINISH_RETENTION_TEST    ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
88134 +
88135 +#define ECTRL_BIST_KICK_OFF            ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)     // new scheme
88136 +#define ECTRL_BIST_MOVE_ON_ODD         ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
88137 +#define ECTRL_BIST_MOVE_ON_EVEN                ((5 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
88138 +#define ECTRL_BIST_SCREAM_THROUGH      ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
88139 +
88140 +#define ECTRL_CLEAR_BIST_TEST          (0 << 24)
88141 +#define ECTRL_BIST_FINISHED_TEST       (1 << 23)
88142 +
88143 +// Read only current PCI bus type.
88144 +#define ECTRL_RUNNING_32BIT_MODE       (1 << 27)
88145 +#define ECTRL_INITIALISATION_MODE      (7 << 28)
88146 +#define ECTRL_RUNNING_M66EN_MODE       (1 << 31)
88147 +
88148 +#define ECTRL_INIT_PATTERN_SHIFT       (28)
88149 +#define ECTRL_INIT_PATTERN_MASK                (0x7)
88150 +
88151 +// Pci intialisation patterns
88152 +#define Pci2_2                         (0 << 28)
88153 +#define PciX50To66MHz                  (1 << 28)
88154 +#define PciX66to100MHz                 (2 << 28)
88155 +#define PciX100to133MHz                        (3 << 28)
88156 +#define PciXReserved1                  (4 << 28)
88157 +#define PciXReserved2                  (5 << 28)
88158 +#define PciXReserved3                  (6 << 28)
88159 +#define PciXReserved4                  (7 << 28)
88160 +
88161 +/* Elan PCI pll and pad control configuration space register. ElanPllControlReg */
88162 +// This overrides the default PCI pll control settings.
88163 +#define PciPll_FeedForwardISel0                (1 << 0)        // Lsi name Z0
88164 +#define PciPll_FeedForwardISel1                (1 << 1)        // Lsi name Z1
88165 +#define PciPll_ChargePumpISel0         (1 << 2)        // Lsi name P0
88166 +#define PciPll_ChargePumpISel1         (1 << 3)        // Lsi name P1
88167 +#define PciPll_EnableAutoReset         (1 << 4)        // Lsi name ENARST
88168 +#define PciPll_RSEL200500              (1 << 5)        // Lsi name Range Select, 0: 100 - 250MHz, 1: 200 - 500MHz
88169 +#define PciPll_DivideFeedback          (1 << 6)        // Just used for test - This divides the shortcut feedback to the PCI PLL so that it can lock to the tester clock.
88170 +#define PciPll_CutFeedback             (1 << 7)        // Just used for test - This disables the shortcut feedback.
88171 +
88172 +// This overrides the default PCI BZ controler settings.
88173 +#define PciBZ_UPDI                     (0xf << 8)
88174 +#define PciBZ_WAIT_INT                 (0xf << 12)
88175 +
88176 +// This overrides the default Sys and SDRam pll control settings.
88177 +#define SysPll_FeedForwardISel0                (1 << 16)       // Lsi name P0     
88178 +#define SysPll_FeedForwardISel1                (1 << 17)       // Lsi name P1     
88179 +#define SysPll_ChargePumpISel0         (1 << 18)       // Lsi name Z0    
88180 +#define SysPll_ChargePumpISel1         (1 << 19)       // Lsi name Z1    
88181 +#define SysPll_EnableAutoReset         (1 << 20)       // Lsi name ENARST
88182 +#define SysPll_DivPhaseCompInBy2       (1 << 21)       // Lsi name NODIV (Should be DIV)
88183 +#define SysPll_PllTestClkSel           (1 << 22)       // If asserted the master clock source is not taken from the pll.
88184 +
88185 +#define Pll_ForceEBusADTristate                (1 << 23)       // Required to enable the testing of EnableAutoReset. Enables use of EBusAD[7] (rev A)
88186 +#define Pll_LinkErrDirectToSDA         (1 << 23)       // Access to link error flag for triggering (rev B)
88187 +
88188 +
88189 +#define ECTRL_SYS_CLOCK_RATIO_SHIFT    (24)
88190 +// Config: with 800MHz         Speeds are 266 200 160 133.
88191 +//         0 = 133/133 (1:1)   6:6     1
88192 +//        1 = 160/133 (6:5)    5:6     1.2
88193 +//         2 = 200/133 (3:2)   4:6     1.5
88194 +//        3 = 266/133 (2:1)    3:6     2
88195 +//        4 = 200/200 (1:1)    4:4     1
88196 +//        5 = 266/200 (4:3)    3:4     1.33
88197 +
88198 +// Config: with 600MHz         Speeds are 200 150 120 100
88199 +//         0 = 100/100 (1:1)   6:6     1
88200 +//        1 = 120/100 (6:5)    5:6     1.2
88201 +//         2 = 150/100 (3:2)   4:6     1.5
88202 +//        3 = 200/100 (2:1)    3:6     2
88203 +//        4 = 150/150 (1:1)    4:4     1
88204 +//        5 = 200/150 (4:3)    3:4     1.33
88205 +
88206 +#define ECTRL_SYS_CLOCK_RATIO_SHIFT    (24)
88207 +#define ECTRL_SYS_CLOCK_RATIO_1_1Slow  (0 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88208 +#define ECTRL_SYS_CLOCK_RATIO_6_5      (1 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88209 +#define ECTRL_SYS_CLOCK_RATIO_3_2      (2 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88210 +#define ECTRL_SYS_CLOCK_RATIO_2_1      (3 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88211 +#define ECTRL_SYS_CLOCK_RATIO_1_1Fast  (4 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88212 +#define ECTRL_SYS_CLOCK_RATIO_4_3      (5 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88213 +#define ECTRL_SYS_CLOCK_MAX_NORMAL     (6)                                     /* used to generate a valid random value */
88214 +#define GET_RANDOM_CLOCK_RATIO         (Random(ECTRL_SYS_CLOCK_MAX_NORMAL) << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88215 +#define ECTRL_SYS_CLOCK_RATIO_PLL_TEST (6 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88216 +#define ECTRL_SYS_CLOCK_RATIO_TEST     (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88217 +#define ECTRL_SYS_CLOCK_RATIO_MASK     (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
88218 +
88219 +#endif /* __ELAN4_PCI_H */
88220 Index: linux-2.6.5-7.191/include/elan4/registers.h
88221 ===================================================================
88222 --- linux-2.6.5-7.191.orig/include/elan4/registers.h    2004-02-23 16:02:56.000000000 -0500
88223 +++ linux-2.6.5-7.191/include/elan4/registers.h 2005-07-28 14:52:52.967660648 -0400
88224 @@ -0,0 +1,1587 @@
88225 +/*
88226 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88227 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
88228 + *
88229 + *    For licensing information please see the supplied COPYING file
88230 + *
88231 + */
88232 +
88233 +#ifndef _ELAN4_REGISTERS_H
88234 +#define _ELAN4_REGISTERS_H
88235 +
88236 +#ident "$Id: registers.h,v 1.117.2.3 2005/03/03 16:29:57 david Exp $"
88237 +/*      $Source: /cvs/master/quadrics/elan4hdr/registers.h,v $*/
88238 +
88239 +/*
88240 + * Header file for internal slave mapping of the ELAN4 registers
88241 + */
88242 +
88243 +#define E4_CACHELINE_SIZE      (64)
88244 +#define E4_STACK_ALIGN         (64)
88245 +
88246 +#ifndef _ASM
88247 +
88248 +#include <elan4/types.h>
88249 +#include <elan4/dma.h>
88250 +#include <elan4/userregs.h>
88251 +
88252 +typedef volatile struct _E4_CacheSets
88253 +{
88254 +   E4_uint64   Set0[1024];     /* 8k bytes per set */
88255 +   E4_uint64   Set1[1024];     /* 8k bytes per set */
88256 +   E4_uint64   Set2[1024];     /* 8k bytes per set */
88257 +   E4_uint64   Set3[1024];     /* 8k bytes per set */
88258 +} E4_CacheSets;
88259 +
88260 +typedef union e4_cache_tag
88261 +{
88262 +   struct {
88263 +       E4_uint32 pad0;                 /* Undefined value when read */
88264 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
88265 +       E4_uint32 :10;                                          /* 0-9   - reserved */
88266 +       E4_uint32 LineError:1;                                  /* 10    - line error */
88267 +       E4_uint32 Modified:1;                                   /* 11    - modified */
88268 +       E4_uint32 FillPending:1;                                        /* 12    - fill pending */
88269 +       E4_uint32 AddrTag30to13:18;                             /* 30-13 - tag */
88270 +       E4_uint32 :1;                                           /* 31    -  */
88271 +#else
88272 +       E4_uint32 :1;                                           /* 31    -  */
88273 +       E4_uint32 AddrTag30to13:18;                             /* 30-13 - tag */
88274 +       E4_uint32 FillPending:1;                                        /* 12    - fill pending */
88275 +       E4_uint32 Modified:1;                                   /* 11    - modified */
88276 +       E4_uint32 LineError:1;                                  /* 10    - line error */
88277 +       E4_uint32 :10;                                          /* 0-9   - reserved */
88278 +#endif
88279 +   } s;
88280 +   E4_uint64   Value;
88281 +} E4_CacheTag;
88282 +
88283 +typedef volatile struct _E4_CacheTags
88284 +{
88285 +   E4_CacheTag Tags[4][128];   /* 8k bytes per set, 64 byte cache line */
88286 +} E4_CacheTags;
88287 +
88288 +#define E4_NumCacheSets                4
88289 +#define E4_NumCacheLines       128
88290 +#define E4_CacheLineSize       64
88291 +#define E4_CacheSize           (E4_NumCacheSets * E4_NumCacheLines * E4_CacheLineSize)
88292 +#define E4_CacheSetSize        (E4_NumCacheLines * E4_CacheLineSize)
88293 +
88294 +/*
88295 + * Run Queue pointers 
88296 + *
88297 + * [62:35]     FrontPointer[30:3]
88298 + * [33:32]     Size Value
88299 + * [30:3]      BackPointer[30:3]
88300 + */
88301 +#define E4_QueuePtrMask                (0x7ffffff8ULL)
88302 +#define E4_QueueSizeMask       3
88303 +#define E4_QueueEntrySize       sizeof (E4_uint64)
88304 +
88305 +#define E4_Queue8KBytes                0
88306 +#define E4_Queue64KBytes       1
88307 +#define E4_Queue512KBytes      2
88308 +#define E4_Queue4MBytes                3
88309 +
88310 +#define E4_QueueFrontValue(val,size)   ((val) | (size))
88311 +#define E4_QueueValue(queue,size)      (((E4_uint64) E4_QueueFrontValue(queue,size)) << 32 | ((E4_uint64) (queue)))
88312 +
88313 +#define E4_QueueFrontPointer(val)      /* extract queue front pointer from register */\
88314 +       (((val) >> 32) & E4_QueuePtrMask)
88315 +#define E4_QueueBackPointer(val)       /* extract queue back pointer from register */ \
88316 +       ((val) & E4_QueuePtrMask)
88317 +#define E4_QueueSizeValue(val)         /* extract queue size value from register */ \
88318 +       (((val) >> 32) & E4_QueueSizeMask)
88319 +#define E4_QueueSize(value)            /* queue size in bytes from size value */ \
88320 +       (1 << (((value)*3) + 13))
88321 +#define E4_QueueOffsetMask(fptr)\
88322 +        ((8192 << (((fptr) & E4_QueueSizeMask) << 3)) - 1)
88323 +#define E4_QueueOffset(fptr)\
88324 +        ((fptr) & E4_QueueOffsetMask(fptr))
88325 +#define E4_QueueFrontPointerInc(fptr)   \
88326 +        ( ((fptr) & ~E4_QueueOffsetMask(fptr)) | ((E4_QueueOffset(fptr) + 8) & E4_QueueOffsetMask(fptr)) )
88327 +
88328 +typedef union _E4_QueuePtr
88329 +{
88330 +   E4_uint64   Value;
88331 +   struct {
88332 +       E4_uint32 Back;
88333 +       E4_uint32 Front;
88334 +   } s;
88335 +} E4_QueuePtr;
88336 +
88337 +/*
88338 + * DMA processor status register.
88339 + *
88340 + * [48]                FirstSendTrans          Set for the first packet of a dma.
88341 + * [47:46]     TimeSliceCount          Time left to timeslice.
88342 + * [45]                DmaLastPacket           Set for the last packet of a dma.
88343 + * [44]                CurrPrefetchDma         Dma descriptor the prefetcher is valid for.
88344 + * [43:39]     PrefetcherState         Dma prefetcher's state machines value.
88345 + * [38:33]     PacketAssemblyState     Packet assembler's state machines value.
88346 + * [32:31]     PrefetcherWakeupFnt     Dma prefetcher's wakeup function.
88347 + * [30:28]     PacketAssWakeupFnt      Packet assembler's wakeup function.
88348 + * [27]                AckBufferValid          Packet ack is valid.
88349 + * [26]                PrefetchedDataProblem   Had either a data read fault or data error. Valid if AckBufferValid.
88350 + * [25]                PrefetcherHalting       Prefetch data about to stop for halt. Valid if AckBufferValid.
88351 + * [24]                PacketTimeout           Packet timeout. Sent an EopError. Valid if AckBufferValid set.
88352 + * [23:22]     PacketAckValue          Packet ack type. Valid if AckBufferValid set.
88353 + * [21:20]     FaultUnitNo             Set if the dma prefetcher has faulted.
88354 + * [19:17]     TrapType                Packet assembler's trap type.
88355 + * [16]                PrefetcherFault         Set if the dma prefetcher has faulted for this DMA unit.
88356 + * [15]                Remote                  The Dma had been issued remotly
88357 + * [14]                Priority                Running at high priority.
88358 + * [13:0]      Context                 procs current context.
88359 + */
88360 +
88361 +#define DPROC_FirstSendTrans(s)                ((unsigned)((s) >> 48) & 1)
88362 +#define DPROC_TimeSliceCount(s)                ((unsigned)(((s) >> 46) & 3)
88363 +#define DPROC_DmaLastPacket(s)         ((unsigned)((s) >> 45) & 1)
88364 +#define DPROC_CurrPrefetchDma(s)       ((unsigned)((s) >> 44) & 1)
88365 +#define DPROC_PrefetcerState(s)                ((unsigned)((s) >> 39) & 0x1f)
88366 +#define DPROC_PacketAssemblerState(s)  ((unsigned)((s) >> 33) & 0x1f)
88367 +#define DPROC_PrefetcherWakeupFn(s)    ((unsigned)((s) >> 31) & 3)
88368 +#define DPROC_PacketAssemblerWakeupFn(s)((unsigned)((s) >> 28) & 3)
88369 +#define DPROC_AckBufferValid(s)                ((unsigned)((s) >> 27) & 1)
88370 +#define DPROC_PrefetcherDataProblem(s) ((unsigned)((s) >> 26) & 1)
88371 +#define DPROC_PrefetcherHalting(s)     ((unsigned)((s) >> 25) & 1)
88372 +#define DPROC_PacketTimeout(s)         ((unsigned)((s) >> 24) & 1)
88373 +#define DPROC_PacketAckValue(s)                ((unsigned)((s) >> 22) & 3)
88374 +#define DPROC_FaultUnitNo(s)           ((unsigned)((s) >> 20) & 3)
88375 +#define DPROC_TrapType(s)              ((unsigned)((s) >> 17) & 7)
88376 +#define DPROC_PrefetcherFault(s)       ((unsigned)((s) >> 16) & 1)
88377 +#define DPROC_Remote(s)                        ((unsigned)((s) >> 15) & 1)
88378 +#define DPROC_Priority(s)              ((unsigned)((s) >> 14) & 1)
88379 +#define DPROC_Context(s)               ((unsigned)(s) & 0x3fff)
88380 +
88381 +/*
88382 + * Command processor status register.
88383 + *
88384 + * [26:21]     CPState         procs current state.
88385 + * [20]                WakeupFnt       procs wakeup function.
88386 + * [19:16]     TrapValue       procs trap value.
88387 + * [15]                Remote          Issued remotely.
88388 + * [14]                Priority        Running at high priority.
88389 + * [13:0]      Context         procs current context.
88390 + */
88391 +
88392 +#define CPROC_TrapType(s)              ((unsigned)((s) >> 16) & 0xf)
88393 +#define CPROC_Remote(s)                        ((unsigned)((s) >> 15) & 0x1)
88394 +#define CPROC_Priority(s)              ((unsigned)((s) >> 14) & 0x1)
88395 +#define CPROC_Context(s)               ((unsigned)(s) & 0x3fff)
88396 +
88397 +/*
88398 + * Event processor status register.
88399 + *
88400 + * [34:30]     CPState         event procs current state.
88401 + * [29:28]     WakeupFnt       event procs wakeup function.
88402 + * [27:20]     EventCopySize   This is the number of DWords to still be copied on a copy dword event.
88403 + * [19]                EProcPort1Fault CUN_EventProc1 has taken a translation fault.
88404 + * [18]                EProcPort0Fault CUN_EventProc0 has taken a translation fault.
88405 + * [17:16]     TrapValue       event proc's trap value.
88406 + * [15]                Remote          Issued remotely.
88407 + * [14]                Priority        Running at high priority.
88408 + * [13:0]      Context         procs current context.
88409 + */
88410 +
88411 +#define EPROC_CPState(s)               ((unsigned)((s) >> 30) & 0x1f)
88412 +#define EPROC_WakeupFunction(s)                ((unsigned)((s) >> 28) & 3)
88413 +#define EPROC_CopySize(s)              ((unsigned)((s) >> 20) & 0xFF)
88414 +#define EPROC_Port1Fault(s)            ((unsigned)((s) >> 19) & 1)
88415 +#define EPROC_Port0Fault(s)            ((unsigned)((s) >> 18) & 1)
88416 +#define EPROC_TrapType(s)              ((unsigned)((s) >> 16) & 3)
88417 +#define EPROC_Remote(s)                        ((unsigned)((s) >> 15) & 1)
88418 +#define EPROC_Priority(s)              ((unsigned)((s) >> 14) & 1)
88419 +#define EPROC_Context(s)               ((unsigned)(s) & 0x3fff)
88420 +
88421 +/*
88422 + * Thread processor status register.
88423 + *
88424 + * [39:24]     MemPortBusy             16 bits of port busy flags for all FFU memory ports.
88425 + * [23:21]     Reads as zero
88426 + * [20:18]     TQState                 State vector for thread queuing proc.
88427 + * [17]                HighRunQueueFull        High priority run queue is full
88428 + * [16]                LowRunQueueFull         Low priority run queue is full
88429 + * [15]                ReadyHigh               More runable threads at high priority
88430 + * [14]                ReadyLow                More runable threads at low priority
88431 + * [13:0]      Context                 procs current context.
88432 + */
88433 +#define TPROC_HighRunQueueFull(s)      ((unsigned)((s) >> 17) & 1)
88434 +#define TPROC_LowRunQueueFull(s)       ((unsigned)((s) >> 16) & 1)
88435 +#define TPROC_ReadyHigh(s)             ((unsigned)((s) >> 15) & 1)
88436 +#define TPROC_ReadyLow(s)              ((unsigned)((s) >> 14) & 1)
88437 +#define TPROC_Context(s)               ((unsigned)((s) & 0x3fff))
88438 +
88439 +/*
88440 + * Input processor status register
88441 + *
88442 + * [55]                Last Trans (~EOP)
88443 + * [54]                First Trans (~EOP)
88444 + * [53]                Channel (~EOP) 
88445 + * [52]                Bad Length (~EOP)
88446 + * [51:50]     Trans CRC Status (~EOP)
88447 + * [49:48]     EOP type
88448 + * [47]                EOP trap
88449 + * [46]                Trapping priority
88450 + * [45]                Trapping Channel
88451 + * [44:43]     Bad ack sent
88452 + * [42:41]     Good ack sent
88453 + * [40]                Queueing Packet (~EOP)
88454 + * [39:36]     Channel trapped bits
88455 + * [35:32]     IProc Trap Value
88456 + * [31:16]     Network Context (~EOP)
88457 + * [15:0]      Transaction Type (~EOP)
88458 + */
88459 +#define IPROC_LastTrans(s)             ((unsigned)((s) >> 55) & 0x1)
88460 +#define IPROC_FirstTrans(s)            ((unsigned)((s) >> 54) & 0x1)
88461 +#define IPROC_Channel(s)               ((unsigned)((s) >> 53) & 0x1)
88462 +#define IPROC_BadLength(s)             ((unsigned)((s) >> 52) & 0x1)
88463 +#define IPROC_TransCRCStatus(s)                ((unsigned)((s) >> 50) & 0x3)
88464 +#define IPROC_EOPType(s)               ((unsigned)((s) >> 48) & 0x3)
88465 +#define IPROC_EOPTrap(s)               ((unsigned)((s) >> 47) & 0x1)
88466 +#define IPROC_InputterPri(s)           ((unsigned)((s) >> 46) & 0x1)
88467 +#define IPROC_InputterChan(s)          ((unsigned)((s) >> 45) & 0x1)
88468 +#define IPROC_BadAckSent(s)            ((unsigned)((s) >> 43) & 0x3)
88469 +#define IPROC_GoodAckSent(s)           ((unsigned)((s) >> 41) & 0x3)
88470 +#define IPROC_QueueingPacket(s)                ((unsigned)((s) >> 40) & 0x1)
88471 +#define IPROC_ChannelTrapped(s)                ((unsigned)((s) >> 36) & 0xF)
88472 +#define IPROC_TrapValue(s)             ((unsigned)((s) >> 32) & 0xF)
88473 +#define IPROC_NetworkContext(s)                ((unsigned)((s) >> 16) & 0xFFFF)
88474 +#define IPROC_TransactionType(s)       ((unsigned)(s) & 0xFFFF)
88475 +
88476 +/* values for IPROC_TransCRCStatus */
88477 +#define CRC_STATUS_GOOD    (0)
88478 +#define CRC_STATUS_DISCARD (1)
88479 +#define CRC_STATUS_ERROR   (2)
88480 +#define CRC_STATUS_BAD     (3)
88481 +
88482 +/* values for IPROC_EOPType */
88483 +#define EOP_GOOD          (1)
88484 +#define EOP_BADACK        (2)
88485 +#define EOP_ERROR_RESET           (3)
88486 +
88487 +/*
88488 + * Interrupt register bits
88489 + *
88490 + * There are up to four sources of interrupt for the MSI port.
88491 + * The Elan will request 4 ports but may only get either 2 or 1 port. The Interrupts are assigned
88492 + * as shown below:
88493 + * No Of MSI ints      Low Prioity                                                     High Prioity
88494 + *     4               Event Ints      OtherInts               Inputer Ints            Hard Error ints.
88495 + *                i.e.                 Dproc, Tproc, Sten.     HighPri and LowPri      Link errs, ECC errs,
88496 + *
88497 + *     2               Event Ints      All other interrupts.
88498 + *     1               All together.
88499 + * 
88500 + * It is not safe to change the number of sources of interrupt while there may be outstanding,
88501 + * unserviced interrupts pending.
88502 + * There two forms of encoding. This has been provided in case an MSI implimentation assumes either
88503 + * a high value to have a high priority or a low value to have a high priority. This is controled
88504 + * by a bit in the Elan Pci Control register.
88505 + */
88506 +#define INT_LinkPortKeyFail            (1<<18)
88507 +#define INT_PciMemErr                  (1<<17)
88508 +#define INT_SDRamInt                   (1<<16)
88509 +#define INT_LinkError                  (1<<15)
88510 +#define INT_IProcCh1HighPri            (1<<14)
88511 +#define INT_IProcCh0HighPri            (1<<13)
88512 +#define INT_IProcCh1LowPri             (1<<12)
88513 +#define INT_IProcCh0LowPri             (1<<11)
88514 +#define INT_DiscardingHighPri          (1<<10)
88515 +#define INT_DiscardingLowPri           (1<<9)
88516 +#define INT_CProcHalted                        (1<<8)
88517 +#define INT_TProcHalted                        (1<<7)
88518 +#define INT_DProcHalted                        (1<<6)
88519 +#define INT_EProc                      (1<<5)
88520 +#define INT_TProc                      (1<<4)
88521 +#define INT_CProc                      (1<<3)
88522 +#define INT_Dma1Proc                   (1<<2)
88523 +#define INT_Dma0Proc                   (1<<1)
88524 +#define INT_MainInterrupt              (1<<0)
88525 +
88526 +#define INT_Units              (INT_EProc | INT_TProc | INT_CProc | INT_Dma1Proc | INT_Dma0Proc)
88527 +#define INT_Inputters          (INT_IProcCh1HighPri | INT_IProcCh0HighPri | INT_IProcCh1LowPri | INT_IProcCh0LowPri)
88528 +#define INT_Discarding         (INT_DiscardingHighPri | INT_DiscardingLowPri)
88529 +#define INT_Halted             (INT_CProcHalted | INT_TProcHalted | INT_DProcHalted)
88530 +#define INT_ErrorInterrupts    (INT_PciMemErr | INT_SDRamInt | INT_LinkError)
88531 +
88532 +#define INT_MSI0               INT_MainInterrupt
88533 +#define INT_MSI1               (INT_Units | INT_Discarding | INT_Halted)
88534 +#define INT_MSI2               (INT_Inputters)
88535 +#define INT_MSI3               (INT_ErrorInterrupts)
88536 +
88537 +#define E4_INTERRUPT_REG_SHIFT 32
88538 +#define E4_INTERRUPT_MASK_MASK (0xffffffffULL)
88539 +
88540 +/*
88541 + * Trap type values - see trapvalues.v
88542 + */
88543 +
88544 +#define CommandProcInserterError               0x1
88545 +#define CommandProcPermissionTrap              0x2
88546 +#define CommandProcSendTransInvalid            0x3
88547 +#define CommandProcSendTransExpected           0x4
88548 +#define CommandProcDmaQueueOverflow            0x5
88549 +#define CommandProcInterruptQueueOverflow      0x6
88550 +#define CommandProcMemoryFault                 0x7
88551 +#define CommandProcRouteFetchFault             0x8
88552 +#define CommandProcFailCountZero               0x9
88553 +#define CommandProcAddressAlignment            0xa
88554 +#define CommandProcWaitTrap                    0xb
88555 +#define CommandProcMultipleGuards              0xc
88556 +#define CommandProcOpenOnGuardedChan           0xd
88557 +#define CommandProcThreadQueueOverflow         0xe
88558 +#define CommandProcBadData                      0xf
88559 +
88560 +#define DmaProcNoFault                         0x0
88561 +#define DmaProcRouteFetchFault                 0x1
88562 +#define DmaProcFailCountError                  0x2
88563 +#define DmaProcPacketAckError                  0x3
88564 +#define DmaProcRunQueueReadFault               0x4
88565 +#define DmaProcQueueOverflow                   0x5
88566 +
88567 +#define EventProcNoFault                       0x0
88568 +#define EventProcAddressAlignment              0x1
88569 +#define EventProcMemoryFault                   0x2
88570 +#define EventProcCountWrapError                        0x3
88571 +
88572 +#define InputNoFault                           0x0
88573 +#define InputAddressAlignment                  0x1
88574 +#define InputMemoryFault                       0x2
88575 +#define InputInvalidTransType                  0x3
88576 +#define InputDmaQueueOverflow                  0x4
88577 +#define InputEventEngineTrapped                        0x5
88578 +#define InputCrcErrorAfterPAckOk               0x6
88579 +#define InputEopErrorOnWaitForEop              0x7
88580 +#define InputEopErrorTrap                      0x8
88581 +#define InputDiscardAfterAckOk                 0x9
88582
88583 +typedef struct _E4_Sched_Status
88584 +{
88585 +    E4_uint32 Status;
88586 +    E4_uint32 Restart;
88587 +} E4_Sched_Status;
88588
88589 +typedef struct _E4_Input_Ptrs
88590 +{
88591 +    E4_uint32 ContextFilterTable;
88592 +    E4_uint32 TrapBasePtr;
88593 +} E4_Input_Ptrs;
88594 +
88595 +#define SCH_StopLowPriQueues           (1 << 0)
88596 +#define SCH_DProcHalt                  (1 << 1)
88597 +#define SCH_TProcHalt                  (1 << 2)
88598 +#define SCH_CProcHalt                  (1 << 3)
88599 +
88600 +#define SCH_CProcTimeout600ns          (1 << 4)
88601 +#define SCH_CProcTimeout1p4us          (2 << 4)
88602 +#define SCH_CProcTimeout3p0us          (3 << 4)
88603 +#define SCH_CProcTimeout6p2us          (4 << 4)
88604 +#define SCH_CProcTimeout12p6us         (5 << 4)
88605 +#define SCH_CProcTimeout25p4us         (6 << 4)
88606 +#define SCH_CProcTimeout51p0us         (7 << 4)
88607 +#define SCH_DiscardLowPriInput         (1 << 7)
88608 +#define SCH_DiscardHighPriInput                (1 << 8)
88609 +
88610 +#define SCH_DProcTimeslice64us         (0 << 9)
88611 +#define SCH_DProcTimeslice128us                (1 << 9)
88612 +#define SCH_DProcTimeslice256us                (2 << 9)
88613 +#define SCH_DProcTimeslice512us                (3 << 9)
88614 +
88615 +#define SCH_Halt                       (SCH_StopLowPriQueues | SCH_DProcHalt | SCH_TProcHalt | SCH_CProcHalt)
88616 +#define SCH_Discard                    (SCH_DiscardLowPriInput | SCH_DiscardHighPriInput)
88617 +
88618 +#define SCH_RestartCProc               (1 << 0)
88619 +#define SCH_RestartTProc               (1 << 1)
88620 +#define SCH_RestartEProc               (1 << 2)
88621 +#define SCH_RestartDma0Proc            (1 << 3)
88622 +#define SCH_RestartDma1Proc            (1 << 4)
88623 +#define SCH_RestartDmaPrefetchProc     (1 << 5)
88624 +#define SCH_RestartCh0LowPriInput      (1 << 6)
88625 +#define SCH_RestartCh1LowPriInput      (1 << 7)
88626 +#define SCH_RestartCh0HighPriInput     (1 << 8)
88627 +#define SCH_RestartCh1HighPriInput     (1 << 9)
88628 +#define SCH_ClearLinkErrorInt          (1 << 10)
88629 +#define SCH_ContextFilterFlush         (1 << 11)
88630 +
88631 +/*
88632 + * Link state bits.
88633 + */
88634 +#define LS_LinkNotReady                (1 << 0) /* Link is in reset or recovering from an error */
88635 +#define LS_Locked              (1 << 1) /* Linkinput PLL is locked */
88636 +#define LS_LockError           (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */
88637 +#define LS_DeskewError         (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */
88638 +#define LS_PhaseError          (1 << 4) /* Linkinput Phase alignment error. */
88639 +#define LS_DataError           (1 << 5) /* Received value was neither good data or a token. */
88640 +#define LS_FifoOvFlow0         (1 << 6) /* Channel 0 input fifo overflowed. */
88641 +#define LS_FifoOvFlow1         (1 << 7) /* Channel 1 input fifo overflowed. */
88642 +#define LS_Mod45Changed                (1 << 8) /* Mod45 bit has changed. Error setr to force reset. */
88643 +#define LS_PAckNotSeenError    (1 << 9) /* PAck value not returned for this packet. */
88644 +
88645 +/*
88646 + * Link State Constant defines, used for writing to LinkSetValue
88647 + */
88648 +
88649 +#define LRS_DataDel0           0x0
88650 +#define LRS_DataDel1           0x1
88651 +#define LRS_DataDel2           0x2
88652 +#define LRS_DataDel3           0x3
88653 +#define LRS_DataDel4           0x4
88654 +#define LRS_DataDel5           0x5
88655 +#define LRS_DataDel6           0x6
88656 +#define LRS_DataDel7           0x7
88657 +#define LRS_DataDel8           0x8
88658 +#define LRS_LinkInValue                0x9
88659 +#define LRS_PllDelValue                0xA
88660 +#define LRS_ClockEven          0xB
88661 +#define LRS_ErrorVal8to0       0xC
88662 +#define LRS_ErrorVal17to9      0xD
88663 +#define LRS_ErrorVal26to18     0xE
88664 +#define LRS_ErrorVal35to27     0xF
88665 +#define LRS_NumLinkDels         0x10
88666 +
88667 +#define LRS_Pllfast             0x40
88668 +
88669 +typedef struct _E4_CommandControl
88670 +{
88671 +    volatile E4_uint32 CommandQueueDescsBase;
88672 +    volatile E4_uint32 CommandRequeuePtr;
88673 +} E4_CommandControl;
88674 +
88675 +#define E4_CommandRequeueBusy          0x80000000      /* Test against read value of CommandRequeuePtr */
88676 +#define E4_CommandRequeueHighPri       0x1             /* Will requeue onto the high pri queue */
88677 +#define E4_QueueDescPtrMask            0x7fffffe0
88678 +
88679 +typedef struct _E4_CommandQueueDesc
88680 +{
88681 +    E4_uint64 CQ_QueuePtrs;
88682 +    E4_uint64 CQ_HoldingValue;         /* 32 bit value for 32 bit accesses or OutOfOrderMask*/
88683 +    E4_uint64 CQ_AckBuffers;           /* Space for 32 4 bit ack buffer values. */
88684 +    E4_uint64 CQ_Control;
88685 +} E4_CommandQueueDesc;
88686 +
88687 +/*
88688 + * Rev A - CQ_QueuePtrs
88689 + * [63]                Unused          Should be set to zero.
88690 + * [62:51]     Unused          (reads as top of InsertPtr)
88691 + * [50:35]     CompletedPtr    Completed pointer. This is alligned to a byte address.
88692 + * [34]                Trapped         Will be set if the command has trapped.
88693 + * [33:32]     Size            Size of queue.
88694 + * [31]                Used            Will be set if the descriptor has been changed and written back by the elan.
88695 + * [30:3]      InsertPtr       Insert pointer. This is alligned to a byte address.
88696 + * [2]         TimedOut        Will be set if the queue timedout executing a command.
88697 + * [1]         Priority        When set the queue runs at high priority.
88698 + * [0]         Error           If this becomes set all new data written to the queue is * discarded.
88699 + *
88700 + * Rev B - CQ_QueuePtrs
88701 + * [63]                TimedOut        Will be set if the queue timedout executing a command.
88702 + * [62]                Priority        When set the queue runs at high priority.
88703 + * [61]                QueueType       1=will accept unordered 64 bit PCI writes. 0=will accept ordered 32 or 64 bit PCI writes.
88704 + * [60:51]     Unused          (reads as top of InsertPtr)
88705 + * [50:35]     CompletedPtr    Completed pointer. This is alligned to a byte address.
88706 + * [34]                Trapped         Will be set if the command has trapped.
88707 + * [33:32]     Size            Size of queue.
88708 + * [31]                Used            Will be set if the descriptor has been changed and written back by the elan.
88709 + * [30:3]      InsertPtr       Insert pointer. This is alligned to a byte address.
88710 + * [2]         OrderControl    Holds bit 8 of last PCI accesses. Used by a reordering queue.
88711 + * [1:0]       ErrorType       This field has the current error status of the queue.
88712 + */
88713 +
88714 +/* Common between revA and RevB */
88715 +#define CQ_PtrMask             (0x7ffffff8)                    /* 31 bit sdram address */
88716 +#define CQ_PtrOffsetMask       (0x7fff8)
88717 +#define CQ_PtrBaseMask         (0x7ff80000)
88718 +
88719 +#define CQ_InsertPtrShift      (3 - 3)                         /* InsertPtr is 64 bit aligned */
88720 +#define CQ_SizeShift           (32)
88721 +#  define CQ_Size1K            0
88722 +#  define CQ_Size8K            1
88723 +#  define CQ_Size64K           2
88724 +#  define CQ_Size512K          3
88725 +#  define CQ_SizeMask          3
88726 +
88727 +#define CQ_CompletedPtrShift   (35 - 3)                        /* CompletedPtr is 64 but aligned */
88728 +
88729 +#define CQ_Used                        (1ull << 31)
88730 +#define CQ_Trapped             (1ull << 34)
88731 +
88732 +#define CQ_QueuePtrsValue(Size,Inserter,Completer) \
88733 +       (((E4_uint64) (Size) << CQ_SizeShift) | \
88734 +        ((E4_uint64) (Inserter) << CQ_InsertPtrShift) | \
88735 +        ((E4_uint64) (Completer) << CQ_CompletedPtrShift))
88736 +
88737 +#define CQ_InsertPtr(QueuePtrs) \
88738 +       (((E4_uint64) QueuePtrs) & CQ_PtrMask)
88739 +
88740 +#define CQ_CompletedPtr(QueuePtrs) \
88741 +       (((E4_uint32)((QueuePtrs) >> CQ_CompletedPtrShift) & CQ_PtrOffsetMask) | \
88742 +        (CQ_InsertPtr(QueuePtrs) & CQ_PtrBaseMask))
88743 +
88744 +#define CQ_Size(SizeVal)               (1024 * (1 << ((SizeVal)*3)))
88745 +
88746 +/* Rev A specific */
88747 +#define CQ_RevA_Error                  (1 << 0)
88748 +#define CQ_RevA_Priority               (1 << 1)
88749 +#define CQ_RevA_TimedOut               (1 << 2)
88750 +
88751 +/* Rev B specific */
88752 +#define CQ_RevB_ErrorType(QueuePtr)    ((QueuePtr) & (3 << 0))
88753 +#  define CQ_RevB_NoError              (0ull << 0)
88754 +#  define CQ_RevB_Overflowed           (1ull << 0)
88755 +#  define CQ_RevB_InvalidWriteSize     (2ull << 0)
88756 +#  define CQ_RevB_InvalidWriteOrder    (3ull << 0)
88757 +#define CQ_RevB_OrderControl           (1ull << 2)
88758 +
88759 +#define CQ_RevB_QueueType(QueuePtr)    ((QueuePtr) & (1ull << 61))
88760 +#  define CQ_RevB_ReorderingQueue      (1ull << 61)
88761 +#  define CQ_RevB_32bitWriteQueue      (0ull << 61)
88762 +
88763 +#define CQ_RevB_Priority               (1ull << 62)
88764 +#define CQ_RevB_TimedOut               (1ull << 62)
88765 +
88766 +/* 
88767 + * CQ_AckBuffers - Packet Ack Values
88768 + */
88769 +#define PackOk                 (0x0)
88770 +#define PackTestFail           (0x1)
88771 +#define PackDiscard            (0x2)
88772 +#define PackError              (0x7)
88773 +#define PackTimeout            (0x8)
88774 +#define PackWaiting            (0xF)
88775 +#define PackValue(val,chan)    (((val) >> ((chan) * 4)) & 0xf)
88776 +
88777 +/*
88778 + * CQ_Control
88779 + * [63:35]     ExtractPtr
88780 + * [34]                Unused
88781 + * [33:32]     ChannelNotCompleted
88782 + * [31:24]     Permissions
88783 + * [23:16]     RestartCount            Decremented after each restart. Will trap when zero
88784 + * [15:14]     Unused                  Should be set to zero
88785 + * [13:0]      Context
88786 + */
88787 +#define CQ_Context(Control)            ((E4_uint32) ((Control) >>  0) & 0x3fff)
88788 +#define CQ_RestartCount(Control)       ((E4_uint32) ((Control) >> 16) & 0x7f)
88789 +#define CQ_ChannelNotCompleted(Control)        ((E4_uint32) ((Control) >> 32) & 3)
88790 +#define CQ_ExtractPtr(Control)         ((E4_uint32) ((Control) >> 32) & 0xFFFFFFF8)
88791 +
88792 +#define CQ_RestartCountShift           16
88793 +
88794 +#define CQ_SetEventEnableBit   (1 << 24)
88795 +#define CQ_WaitEventEnableBit  (1 << 25)
88796 +#define CQ_ModifyEnableBit     (1 << 26)
88797 +#define CQ_WriteEnableBit      (1 << 27)
88798 +#define CQ_ThreadStartEnableBit        (1 << 28)
88799 +#define CQ_DmaStartEnableBit   (1 << 29)
88800 +#define CQ_STENEnableBit       (1 << 30)
88801 +#define CQ_InterruptEnableBit  (1 << 31)
88802 +#define CQ_EnableAllBits        (0xFF000000)
88803 +#define CQ_PermissionMask      (0xFF000000)
88804 +
88805 +#define CQ_ControlValue(Cntx, RestartCount, Permissions) \
88806 +       (((Cntx) & 0x3fff) | (((RestartCount) & 0xff) << 16) | ((Permissions) & CQ_PermissionMask))
88807 +
88808 +/*
88809 + * This file describes the slave address map of Elan4.
88810 + *
88811 + * Elan4 has two PCI 64 bit base address registers. One is setup for elan
88812 + * local memory and the other is for the command port, elan registers and ebus.
88813 + *
88814 + * This file describes the command port, elan registers and ebus BAR. This is a
88815 + * 26 bit base address register and is split up as follows:
88816 + * 1 The ebus requires 21 bits of address. 26'h3e00000 to 26'h3ffffff
88817 + * 2 The control regsiters requires 16 bits of address. 26'h3df0000 to 26'h3dfffff
88818 + * 3 The command port has the rest. This give just under 8k command ports or about 123 per
88819 + *   processor of a 64 node SMP.
88820 + */
88821 +
88822 +/* BAR1 contains the command queues followed by the registers and the Ebus - and is 26 bits */
88823 +/* each command queue has an 8K page associated with it */
88824 +#define CQ_CommandMappingSize          (1 << 13)
88825 +#define CQ_NumCommandDescs             ((1 << (26 - 13)))
88826 +#define CQ_CommandDescsAlignment       ((1 << (26 - 13)) * sizeof (E4_CommandQueueDesc))
88827 +
88828 +/* control reg bits i.e. E4_DataBusMap.SysControlReg */
88829 +#define CONT_EN_ALL_SETS               (1ULL << 0) /* enable cache */
88830 +#define CONT_MMU_ENABLE                        (1ULL << 1) /* bit 0 enables mmu */
88831 +#define CONT_CACHE_HASH_TABLE          (1ULL << 2) /* cache up hash table entries */
88832 +#define CONT_CACHE_CHAINS              (1ULL << 3) /* cache up chain entries */
88833 +#define CONT_CACHE_ROOT_CNTX           (1ULL << 4) /* cache root context table for routes and filters. */
88834 +#define CONT_CACHE_STEN_ROUTES         (1ULL << 5) /* cache up sten packet routes */
88835 +#define CONT_CACHE_DMA_ROUTES          (1ULL << 6) /* cache up dma packet routes */
88836 +
88837 +#define CONT_CACHE_NONE                0ULL
88838 +#define CONT_CACHE_ALL         (CONT_CACHE_HASH_TABLE | CONT_CACHE_CHAINS | CONT_CACHE_ROOT_CNTX | \
88839 +                                CONT_CACHE_STEN_ROUTES | CONT_CACHE_DMA_ROUTES)
88840 +
88841 +/* This controls the format size and position of the MMU hash tables. */
88842 +#define CONT_INHIBIT_MAX_CHAIN_ITEMS   (1ULL << 7)     /* Prevents the MaxChainItems value of 1024 from forcing a translation miss */
88843 +#define CONT_TABLE0_MASK_SIZE_SHIFT    8               /* Defines the size of hash table 0 */
88844 +#define CONT_TABLE0_PAGE_SIZE_SHIFT    13              /* Set the page size for hash table 0 */
88845 +#define CONT_TABLE1_MASK_SIZE_SHIFT    16              /* Defines the size of hash table 1 */
88846 +#define CONT_TABLE1_PAGE_SIZE_SHIFT    21              /* Set the page size for hash table 1 */
88847 +#define CONT_TWO_HASH_TABLES           (1ULL << 24)    /* Sets the MMU to use two hash tables. If not set only 0 used. */
88848 +#define CONT_2K_NOT_1K_DMA_PACKETS     (1ULL << 25)    /* Used to select the default DMA packet size. */
88849 +#define CONT_ALIGN_ALL_DMA_PACKETS     (1ULL << 26)    /* Will force all dma packets to be aligned to a page.*/
88850 +#define CONT_DIRECT_MAP_PCI_WRITES     (1ULL << 27)    /* Will force pci writes to write and flush the dcache.*/
88851 +#define CONT_TLB_FLUSH                 (1ULL << 28)    /* Invalidates the TLB and indicates when flushed */
88852 +#define CONT_CLEAR_WALK_WROTE_TABLES   (1ULL << 29)    /* Used to guarantee that the elan is using new PTE values. */
88853 +#define CONT_ROUTE_FLUSH               (1ULL << 30)    /* Invalidates all route cache entries. */
88854 +#define CONT_CLEAR_LINKPORT_INT                (1ULL << 31)    /* Clears the Linkport key fail interrupt. Reads as 0. */
88855 +#define CONT_CLEAR_SDRAM_ERROR         (1ULL << 32)    /* Clears an EEC error interrupt. Reads as 0. */
88856 +
88857 +/*
88858 + * These are extra control bits used for testing the DLLs of the SDRAM interface. Most of the Sdram
88859 + * control bits are defined in xsdram.h
88860 + */
88861 +#define SDRAM_FIXED_DLL_DELAY_SHIFT    47
88862 +#define SDRAM_FIXED_DLL_DELAY_BITS     5
88863 +#define SDRAM_FIXED_DLL_DELAY_MASK     ((1ULL << SDRAM_FIXED_DLL_DELAY_BITS) - 1ULL)
88864 +#define SDRAM_FIXED_DLL_DELAY(Value)   ((SDRAM_FIXED_DLL_DELAY_MASK & (Value)) << SDRAM_FIXED_DLL_DELAY_SHIFT)
88865 +#define SDRAM_FIXED_DELAY_ENABLE       (1ULL << 52)
88866 +#define SDRAM_GET_DLL_DELAY(Value)     (((Value) >> SDRAM_FIXED_DLL_DELAY_SHIFT) & SDRAM_FIXED_DLL_DELAY_MASK)
88867 +
88868 +#define SDRAM_166_DLL_CORRECTION_FACTOR        3       /* This is to allow for SSO and ringing on the DQ lines */
88869 +#define SDRAM_150_DLL_CORRECTION_FACTOR        2       /* This is to allow for SSO and ringing on the DQ lines */
88870 +
88871 +#define PAGE_SIZE_4K   0x0
88872 +#define PAGE_SIZE_8K   0x1
88873 +#define PAGE_SIZE_64K  0x2
88874 +#define PAGE_SIZE_512K 0x3
88875 +#define PAGE_SIZE_2M   0x4
88876 +#define PAGE_SIZE_4M   0x5
88877 +#define PAGE_SIZE_64M  0x6
88878 +#define PAGE_SIZE_512M 0x7
88879 +
88880 +#define PAGE_SIZE_MASK 0x7
88881 +#define PAGE_MASK_MASK 0x1f
88882 +
88883 +/* control reg bits i.e. E4_DataBusMap.LinkControlReg */
88884 +#define LCONT_REVA_GREEN_LED           (1 << 0)
88885 +#define LCONT_REVA_YELLOW_LED          (1 << 1)
88886 +#define LCONT_REVA_RED_LED             (1 << 2)
88887 +#define LCONT_REVA_ENABLE_LED_DRIVE    (1 << 3) /* Enable manual setting of the Leds to the bits set above. */
88888 +
88889 +#define LCONT_REVB_DISABLE_TLB_PREFETCH        (1 << 0)
88890 +#define LCONT_REVB_DISABLE_CRC_ERROR_CHECKING  (1 << 1)
88891 +
88892 +
88893 +#define LCONT_EN_SYS_WRITES            (1 << 4) /* Enable linkport writes to sys registers. i.e. all of E4_DataBusMap. */
88894 +#define LCONT_EN_SYS_READS             (1 << 5) /* Enable linkport reads from sys registers. i.e. all of E4_DataBusMap. */
88895 +#define LCONT_EN_USER_WRITES           (1 << 6) /* Enable linkport writes to user registers. i.e. all of E4_User_Regs. */
88896 +#define LCONT_EN_USER_READS            (1 << 7) /* Enable linkport reads from user registers. i.e. all of E4_User_Regs. */
88897 +
88898 +#define LCONT_TEST_VALUE_MASK          0x3ff    /* Value used for test writes and link boundary scan. */
88899 +#define LCONT_TEST_VALUE_SHIFT         8
88900 +#define LCONT_TEST_VALUE(Value)                ((LCONT_LINK_STATE_MASK & (Value)) << LCONT_TEST_VALUE_SHIFT)
88901 +
88902 +/*
88903 + * State read from LINK_STATE when TEST_VALUE is set to the following values.
88904 + * TEST_VALUE    LINK_STATE read       TEST_VALUE        LINK_STATE read
88905 + *    000     -   Data delay count 0      008       -  Data delay count 8
88906 + *    001     -   Data delay count 1      009       -  Link in value
88907 + *    002     -   Data delay count 2      00a       -  PLL delay
88908 + *    003     -   Data delay count 3      00b       -  Clock Delay
88909 + *    004     -   Data delay count 4      00c       ?  ErrorVal8to0
88910 + *    005     -   Data delay count 5      00d       ?  ErrorVal17to9
88911 + *    006     -   Data delay count 6      00e       ?  ErrorVal26to18
88912 + *    007     -   Data delay count 7      00f       ?  ErrorVal35to27
88913 + */
88914 +
88915 +#define LCONT_TEST_CONTROL_MASK                0x3     /* Selects and controls the action of the LINK_STATE value. */
88916 +#define LCONT_TEST_CONTROL_SHIFT       18
88917 +
88918 +#define LCONT_READ_ERRORS              0       /* {Mod45RequestChanged, FifoOverflowError, DataError, PhaseError,
88919 +                                                *      DeskewError, LockError, Locked, LinkNotReady} */
88920 +#define LCONT_READ_STATE               1       /* Read valus addressed by TEST_CONTROL value */
88921 +#define LCONT_FIX_LINK_DELAYS          2       /* Sets delays to TEST_CONTROL value */
88922 +#define LCONT_BOUNDARY_SCAN            3       /* Puts link into boundary scan. Outputs TEST_CONTROL value to link,
88923 +                                                * reads LINK_STATE from link. */ 
88924 +
88925 +#define LCONT_LINK_STATE_MASK          0x3ff   /* Read only */
88926 +#define LCONT_LINK_STATE_SHIFT         20      /* Read only */
88927 +#define LCONT_LINK_STATE(ControlRegValue)      (LCONT_LINK_STATE_MASK & ((ControlRegValue) >> LCONT_LINK_STATE_SHIFT))
88928 +
88929 +/* control reg bits i.e. E4_DataBusMap.LinkContSettings */
88930 +#define LCONT_MOD45_DISABLE            (1 << 0) /* is set the link will try to run in TNB mode. */
88931 +#define LCONT_CONFIG_PHASE_MASK                0x7     /* This set the delay through the phase alignment buffer. */
88932 +#define LCONT_CONFIG_PHASE_SHIFT       1
88933 +
88934 +#define LCONT_PLL_REF_VAL_BITS_MASK    0x7f    /* This is the divide value on the LinkIn clock to form the comms PLL */
88935 +#define LCONT_PLL_REF_VAL_BITS_SHIFT   4       /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */
88936 +
88937 +#define LCONT_FORCE_COMMSCLK_LOCAL     (1 << 11) /* This must be set at one end of a back to back Elan configuration. */
88938 +#define LCONT_LVDS_VOLTAGE_BITS_MASK   0x3     /* This is used to set the voltage swing on the LVDS link output pads. */
88939 +#define LCONT_LVDS_VOLTAGE_BITS_SHIFT  12      /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */
88940 +
88941 +#define LCONT_VOD_170                  0       /* Approximate differential voltage swing in mV of link outputs into */
88942 +#define LCONT_VOD_360                  1       /* a 100 ohm diferential load. */
88943 +#define LCONT_VOD_460                  2
88944 +#define LCONT_VOD_550                  3
88945 +
88946 +#define LCONT_LVDS_TERMINATION_MASK    0x3     /* This set the resistor values of the internal single ended termation */
88947 +#define LCONT_LVDS_TERMINATION_SHIFT   14      /* resistors of the link input and comms input clcok. */
88948 +
88949 +#define LCONT_TERM_55_OHM              0       /* Resistor values for internal termination of LVDS pads. */
88950 +#define LCONT_TERM_50_OHM              1
88951 +#define LCONT_TERM_AUTO_OHM            2       /* Should normally be set to auto. */
88952 +#define LCONT_TERM_45_OHM              3
88953 +
88954 +#define LCONT_LVDS_EN_TERM_UPDATE      (1 << 47) /* This should be asserted and deasserted if LCONT_LVDS_TERMINATION is changed. */
88955 +
88956 +/* Macros used to access and construct MMU hash table and chain entries. */
88957 +/*
88958 + * Each hash entry is made up of a 64 byte block. Each entry hash two tags where each
88959 + * tag has 4 PTE's. PTE's 0 to 2 use the bottom 48 bits of a 64 bit word and PTE 3
88960 + * uses the top 16 bits of 3 64 bit words.
88961 + *
88962 + * These macros can be used to build a single PTE. PTE3 needs to be built into a 48 bit
88963 + * object before they can be used.
88964 + */
88965 +#define PTE_ENTRY_MASK         0x0000ffffffffffffULL
88966 +#define PTE_TYPE_MASK          0x000000000000000fULL   
88967 +#define PTE_PERM_MASK          0x00000000000000f0ULL
88968 +#define PTE_PERM_TYPE_MASK     0x00000000000000ffULL
88969 +#define PTE_REF_MASK           0x0000000000000100ULL
88970 +#define PTE_PPN_MASK           0x00007ffffffffe00ULL
88971 +#define PTE_MOD_MASK           0x0000800000000000ULL
88972 +#define PTE_TOPADDR_MASK       0x0000600000000000ULL
88973 +
88974 +#define PTE_MOD_SHIFT          47
88975 +#define PTE_PPN_SHIFT          9
88976 +#define PTE_REF_SHIFT          8
88977 +#define PTE_PERM_SHIFT         4
88978 +#define PTE_TYPE_SHIFT         0
88979 +
88980 +#define PTE_PADDR_SHIFT                (12 - 9)                /* Physical addresses are shifted down 3 this to go into the PTE */
88981 +
88982 +
88983 +/* Values required for tag 3 */
88984 +#define PTE_REF_3                      0x0100000000000000ULL
88985 +#define PTE_MOD_3                      0x8000000000000000ULL
88986 +#define PTE_ENTRY_MASK_3               0xffff000000000000ULL
88987 +#define PTE_PERM_TYPE_MASK_3           0x00ff000000000000ULL
88988 +#define PTE_ENTRY_3_FOR_0(NewPte)      ((NewPte << (48)) & PTE_ENTRY_MASK_3)
88989 +#define PTE_ENTRY_3_FOR_1(NewPte)      ((NewPte << (32)) & PTE_ENTRY_MASK_3)
88990 +#define PTE_ENTRY_3_FOR_2(NewPte)      ((NewPte << (16)) & PTE_ENTRY_MASK_3)
88991 +
88992 +/* Values required for the tags */
88993 +#define TAG_CONTEXT_MASK               0x0000000000003fffULL
88994 +#define TAG_ADDRESS_MASK               0xfffffffff8000000ULL
88995 +#define TAG_CHAINPTR_18TO6_MASK                0x0000000007ffc000ULL
88996 +#define TAG_CHAINPTR_LOW_SHIFT         (14 - 6)
88997 +#define TAG_CHAINPTR_30TO19_MASK       0x0000000003ffc000ULL
88998 +#define TAG_CHAINPTR_HIGH_SHIFT                (19 - 14)
88999 +#define TAG_COPY_BIT                   0x0000000004000000ULL
89000 +
89001 +/*
89002 + * This takes number loaded into the control register and returns the page size as a power of two.
89003 + */
89004 +
89005 +#define E4_PAGE_SIZE_TABLE             E4_uint32 const PageSizeTable[] = {12, 13, 16, 19, 21, 22, 26, 29}
89006 +#define E4_PAGE_SIZE_TABLE_SIZE                (sizeof(PageSizeTable)/sizeof(PageSizeTable[0]))
89007 +
89008 +/*
89009 + * This macro generates a hash block index.
89010 + *
89011 + * Cntx                 This is the 14 bit context. It should not be larger than 14 bits.
89012 + * VAddr        This is the 64 bit virtual address. It does not require any masking and can be a byte address.
89013 + * PageSize     This is the value loaded into the control register for this hash table.
89014 + * HashTableMask This should be set mask out upper bits past the end of the hash table.
89015 + */
89016 +#define E4MMU_SHIFT_ADDR(VAddr, Shift) \
89017 +    ((((E4_uint32)(VAddr)) >> (Shift)) | (((E4_uint32)((VAddr) >> 32)) << (32 - (Shift))))
89018 +
89019 +#define E4MMU_CONTEXT_SCRAMBLE(Cntx) \
89020 +             ((((Cntx) << 8) | ((Cntx) >> 6)) ^ (((Cntx) << 15) | ((Cntx) << 1)))
89021 +
89022 +#define E4MMU_HASH_INDEX(Cntx, VAddr, PageShift, HashTableMask)                \
89023 +           ((E4MMU_SHIFT_ADDR(VAddr, (PageShift) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(Cntx)) & (HashTableMask))
89024 +
89025 +#define E4MMU_TAG(vaddr,ctx)   (((vaddr) & TAG_ADDRESS_MASK) | ((ctx) & TAG_CONTEXT_MASK))
89026 +
89027 +#define E4MMU_TAG2VADDR(tag,hashidx,PageShift,HashTableMask)   \
89028 +               (((tag) & TAG_ADDRESS_MASK) | ((((hashidx) ^ E4MMU_CONTEXT_SCRAMBLE((tag) & TAG_CONTEXT_MASK)) & (HashTableMask)) << ((PageShift + 2))))
89029 +
89030 +/*
89031 + * Detailed bit descriptions for the tags and PTE's are better done with the macros
89032 + * defined above.
89033 + */
89034 +typedef struct _E4_HashTableEntry
89035 +{
89036 +   E4_uint64   Tag[2];
89037 +   E4_uint64   TagPTE[2][3];
89038 +} E4_HashTableEntry;
89039 +
89040 +#define E4MMU_TAG_OFFSET(tag)          ((tag) << 3)
89041 +#define E4MMU_PTE_LOW_OFFSET(tag,pte)  ((((tag)*3 + (pte) + 2) << 3))
89042 +#define E4MMU_PTE_HIGH_OFFSET(tag,pte) ((((tag)*3 + (pte) + 2) << 3) + 4)
89043 +#define E4MMU_PTE3_WORD0_OFFSET(tag)   ((((tag)*3 + 2) << 3) + 6)
89044 +#define E4MMU_PTE3_WORD1_OFFSET(tag)   ((((tag)*3 + 3) << 3) + 6)
89045 +#define E4MMU_PTE3_WORD2_OFFSET(tag)   ((((tag)*3 + 4) << 3) + 6)
89046 +
89047 +
89048 +/*
89049 + * Hash0AddrBits is the size of the hash table in bytes as a power of 2.
89050 + * e.g. 11 would give 32 hash entries where each entry is 64 bytes.
89051 + */
89052 +#define SETUP_HASH_TABLES(Hash0PageSize, Hash0AddrBits, Hash1PageSize, Hash1AddrBits)  \
89053 +                         (((Hash0PageSize) << CONT_TABLE0_PAGE_SIZE_SHIFT) |   \
89054 +                          ((Hash0AddrBits) << CONT_TABLE0_MASK_SIZE_SHIFT) |   \
89055 +                          ((Hash1PageSize) << CONT_TABLE1_PAGE_SIZE_SHIFT) |   \
89056 +                          ((Hash1AddrBits) << CONT_TABLE1_MASK_SIZE_SHIFT))
89057 +
89058 +/* ECC status register */
89059 +#define ECC_Addr(s)                    ((s) & 0x7ffffff8ULL)
89060 +#define ECC_Syndrome(s)                        (((s) >> 32) & 0xffffULL)
89061 +#define ECC_RisingDQSSyndrome(s)       (((s) >> 32) & 0xffULL)
89062 +#define ECC_FallingDQSSyndrome(s)      (((s) >> 40) & 0xffULL)
89063 +#define ECC_UncorrectableErr(s)        (((s) >> 48) & 1ULL)
89064 +#define ECC_MultUncorrectErrs(s)       (((s) >> 49) & 1ULL)
89065 +#define ECC_CorrectableErr(s)          (((s) >> 50) & 1ULL)
89066 +#define ECC_MultCorrectErrs(s)         (((s) >> 51) & 1ULL)
89067 +
89068 +/* Permission type saved in a PTE. This is a four bit field */
89069 +#define PERM_Disabled          0x0
89070 +#define PERM_Unused            0x1
89071 +#define PERM_LocDataRead       0x2
89072 +#define PERM_LocDataWrite      0x3
89073 +#define PERM_LocRead           0x4
89074 +#define PERM_LocExecute                0x5
89075 +#define PERM_ReadOnly          0x6
89076 +#define PERM_LocWrite          0x7
89077 +#define PERM_LocEventOnly      0x8
89078 +#define PERM_LocEventWrite     0x9
89079 +#define PERM_RemoteEvent       0xa
89080 +#define PERM_RemoteAll         0xb
89081 +#define PERM_RemoteReadOnly    0xc
89082 +#define PERM_RemoteWriteLocRead        0xd
89083 +#define PERM_DataReadWrite     0xe
89084 +#define PERM_NoFault           0xf
89085 +
89086 +#define PERM_Mask              0xf
89087 +
89088 +/* Permission type hints to device driver */
89089 +#define PERM_Preload           0x10
89090 +
89091 +#define PTE_SetPerm(Perm)      (((Perm) & PERM_Mask) << 4)
89092 +
89093 +/* Control info saved in the lookup field of the TLB */
89094 +#define PTE_PciNotLocal                (1ULL << 0)             /* Directs the access to the PCI interface */
89095 +#define PTE_BigEndian          (1ULL << 1)             /* Valid for PCI entries only */
89096 +#define PTE_RelaxedOrder       (1ULL << 2)             /* Valid for PCI entries only */
89097 +#define PTE_DontSnoop          (1ULL << 3)             /* Valid for PCI entries only */
89098 +
89099 +#define PTE_UseFixedSet                (1ULL << 1)             /* Value for non PCI entries only */
89100 +#define PTE_CommandQueue       (1ULL << 2)             /* Value for non PCI entries only */
89101 +#define PTE_SetFixedSetNo(Set) ((((Set) & 3) << 2) | PTE_UseFixedSet)
89102 +
89103 +#define PTE_TypeBitsMask       (0xfULL)
89104 +#define PTE_PermissionTypeMask (0xfULL << 4)
89105 +#define PTE_Referenced         (1ULL << 8)
89106 +#define PTE_PhysicalPageNoMask (0x7ffffffffe00ULL)
89107 +#define PTE_Modified           (1ULL << 47)
89108 +
89109 +#define PTE_PhysicalAddrShiftIntoPTE   (12 - 9)
89110 +
89111 +/* define page table entry bit fields */
89112 +#define TLB_PageSizeBits       (3 << 0)
89113 +#define TLB_ACCBits            (7 << 2)
89114 +#define TLB_LocalBit           (1 << 5)
89115 +#define TLB_PCI64BitTargetBit  (1 << 6)
89116 +#define TLB_PCIBigEndianBit    (1 << 7)
89117 +
89118 +#define TLB_ModifiedBit                (1 << 55)
89119 +#define TLB_ReferencedBit      (1 << 63)
89120 +
89121 +/* Used to read values from the tlb. */
89122 +#define TLB_TlbReadCntBitsSh   56
89123 +#define TLB_UseSelAddrSh       (1ULL << 60)
89124 +#define TLB_WriteTlbLine       (1ULL << 61)
89125 +
89126 +#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \
89127 +                             ((E4_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh))
89128 +
89129 +#define TLB_NUM_ENTRIES                16
89130 +/*
89131 + * The following macros are used with the test access port (TlbLineValue) for the TLBs.
89132 + */
89133 +#define TLV_DoPciAccess                        (1ULL << 0)
89134 +#define TLV_CommandAccess              (1ULL << 1)
89135 +#define TLV_DoCacheAccess              (1ULL << 2)
89136 +#define TLV_notStartTLBWalk            (1ULL << 3)
89137 +#define TLV_UseFixedSet                        (1ULL << 4)
89138 +#define TLV_BigEndian                  (1ULL << 4)
89139 +#define TLV_RelaxedOrder               (1ULL << 5)
89140 +#define TLV_DontSnoop                  (1ULL << 6)
89141 +#define TLV_FixedSetNo_MASK            (3ULL << 5)
89142 +#define TLV_PciTypeBits_MASK           (7ULL << 4)
89143 +#define TLV_LookupBits_MASK            (0x7fULL)
89144 +#define TLV_MissErr                    (1ULL << 7)
89145 +#define TLV_TypeBits                   (0xffULL)
89146 +
89147 +#define TLV_PhysicalAddr_MASK          (0x3fffffffff000ULL)
89148 +
89149 +#define TLV_TlbTesting                 (1ULL << 51)
89150 +#define TLV_SelectUnitsTlbRead         (1ULL << 52)
89151 +#define TLV_SelectTProcTlbRead         (1ULL << 53)
89152 +
89153 +#define TLV_TlbLineSelect_MASK         (0xf)
89154 +#define TLV_UnitsTlbLineSelect_SHIFT   (54)
89155 +#define TLV_TProcTlbLineSelect_SHIFT   (59)
89156 +#define TLV_EnableUnitsTlbRead         (1ULL << 58)
89157 +#define TLV_EnableTProcTlbRead         (1ULL << 63)
89158 +
89159 +/*
89160 + * Use this macro to enable direct testing of the Units TLB.
89161 + * When Line is in the range 0 to 15 a TLB line is selected for reading or writing.
89162 + * When Line is set to -1 the tlb will be activated to perform a match.
89163 + */
89164 +#define TLV_UnitsTlbLineSel(Line) (((Line) == -1) ? 0ULL : \
89165 +    (TLV_EnableUnitsTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_UnitsTlbLineSelect_SHIFT)))
89166 +#define TLV_TProcTlbLineSel(Line) (((Line) == -1) ? 0ULL : \
89167 +    (TLV_EnableTProcTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_TProcTlbLineSelect_SHIFT)))
89168
89169 +/* 
89170 + * Thread_Trap_State
89171 + *  see f_RegFileControl.v TProcStatus
89172 + */
89173 +#define TS_HaltThread                (1 << 0)
89174 +#define TS_TrapForTooManyInstructions (1 << 1)
89175 +#define TS_InstAccessException       (1 << 2)
89176 +#define TS_Unimplemented             (1 << 3)
89177 +#define TS_DataAccessException       (1 << 4)
89178 +#define TS_DataAlignmentError        (1 << 5)
89179 +#define TS_TrapForUsingBadData       (1 << 6)
89180 +#define TS_TrapTypeMask                      (0x7f)
89181 +#define TS_DataPortNo(ts)            (((ts) >> 7) & 7)
89182 +#define TS_TrappedFlag               (1 << 10)
89183 +#define TS_MemLock                   (1 << 11)
89184 +#define TS_XCCshift                  12
89185 +#define TS_XCCmask                   0xff
89186 +#define TS_ICC(ts)                   (((ts) >> 12) & 15)
89187 +#define TS_XCC(ts)                   (((ts) >> 16) & 15)
89188 +#define TS_InstValid_F               (1 << 20)
89189 +#define TS_InstValid_R               (1 << 21)
89190 +#define TS_InstValid_E               (1 << 22)
89191 +#define TS_InstValid_W               (1 << 23)
89192 +#define TS_HighPriority                      (1 << 24)
89193 +#define TS_RemoteThread                      (1 << 25)
89194 +#define TS_TProcTranslationInProgress (1 << 26)
89195 +#define TS_MemLock_E                 (1 << 27)
89196 +
89197 +/* Thread run queue entries */
89198 +typedef struct E4_ThreadRegs
89199 +{
89200 +    E4_uint64 Registers[7];
89201 +} E4_ThreadRegs;
89202 +
89203 +typedef struct E4_TProcQueueEntry
89204 +{
89205 +    E4_ThreadRegs      Regs;                   /* XXXX: jon check this */
89206 +    E4_uint64          Context;                /* XXXX: jon check this */
89207 +} E4_TProcQueueEntry;
89208 +
89209 +typedef struct E4_DProcQueueEntry
89210 +{
89211 +    E4_DMA             Desc;
89212 +    E4_uint64          Pad;
89213 +} E4_DProcQueueEntry;
89214 +
89215 +/*
89216 + * Packet acknowledge values.
89217 + */
89218 +#define E4_PAckOk      0
89219 +#define E4_PAckTestFail        1
89220 +#define E4_PAckDiscard 2
89221 +#define E4_PAckError   3
89222 +
89223 +/*
89224 + * return values from breaktest instruction.
89225 + */
89226 +#define ICC_CARRY_BIT           (0x1ULL << 0)  /* Breaktest: Load pending         */
89227 +#define ICC_ZERO_BIT            (0x1ULL << 1)  /* Breaktest: Time to break        */
89228 +#define ICC_SIGNED_BIT          (0x1ULL << 2)  /* Breaktest: Another thread ready */
89229 +#define ICC_TPROC_RDY_LOW_PRI   (0x1ULL << 3)
89230 +#define ICC_TPROC_RDY_HIGH_PRI  (0x1ULL << 4)
89231 +#define ICC_RUNNING_HIGH_PRI    (0x1ULL << 5)
89232 +#define ICC_RUNNING_AS_REMOTE   (0x1ULL << 6)
89233 +#define ICC_TIME_TO_BREAK       (0x1ULL << 7)
89234 +#define ICC_RS1LOAD_PENDING     (0x1ULL << 8)
89235 +#define ICC_TPROC_HALT          (0x1ULL << 9)
89236 +
89237 +/*
89238 + * Main Interrupt cookies
89239 + * [63:14]     user cookie
89240 + * [13:0]      context
89241 + */
89242 +#define E4_MAIN_INT_SHIFT              14
89243 +#define E4_MAIN_INT_COOKIE(cookie)     ((cookie) >> E4_MAIN_INT_SHIFT)
89244 +#define E4_MAIN_INT_CTX(cookie)                ((cookie) & 0x3FFF)
89245 +
89246 +typedef E4_uint64 E4_MainIntEntry;
89247 +
89248 +#define E4_MainIntEntrySize    sizeof (E4_MainIntEntry)
89249 +
89250 +/*
89251 + * The internal databus is 64 bits wide.
89252 + * All writes to the internal registers MUST be made with 64 bit write operations.
89253 + * These can be made up of pairs 32 bit writes on the PCI bus. The writes will be
89254 + * treated as nops if they are performed with two separate 32 bit writes.
89255 + */
89256 +typedef volatile struct _E4_DataBusMap
89257 +{
89258 +   E4_uint64           InputTrans[4][16];                                                                      /* 0x000 */
89259 +
89260 +   E4_uint64           Dma0TransAddr;                                                                          /* 0x200 */
89261 +   E4_DMA              Dma0Desc;       /* Current Dma0 registers */                                            /* 0x208 */
89262 +
89263 +   E4_uint64           Dma1TransAddr;                                                                          /* 0x240 */
89264 +   E4_DMA              Dma1Desc;       /* Current Dma1 registers */                                            /* 0x248 */
89265 +  
89266 +   E4_uint64           Dma0LastPacketSize;                                                                     /* 0x280 */
89267 +   E4_uint64           Dma0ThisPacketSize;                                                                     /* 0x288 */
89268 +   E4_uint64           Dma0DescSizeInProg;                                                                     /* 0x290 */
89269 +   E4_uint64           Dma0BytesToPrefetch;                                                                    /* 0x298 */
89270 +   E4_uint64           Dma0PrefetchAddr;                                                                       /* 0x2a0 */
89271 +   E4_uint64           EventCountAndType;                                                                      /* 0x2a8 */
89272 +   E4_uint64           EventParameters[2];                                                                     /* 0x2b0 */
89273 +  
89274 +   E4_uint64           Dma1LastPacketSize;                                                                     /* 0x2c0 */
89275 +   E4_uint64           Dma1ThisPacketSize;                                                                     /* 0x2c8 */
89276 +   E4_uint64           Dma1DescSizeInProg;                                                                     /* 0x2d0 */
89277 +   E4_uint64           Dma1BytesToPrefetch;                                                                    /* 0x2d8 */
89278 +   E4_uint64           Dma1PrefetchAddr;                                                                       /* 0x2e0 */
89279 +   E4_Input_Ptrs       InputTrapAndFilter;                                                                     /* 0x2e8 */
89280 +   E4_uint64           EventAddress;                                                                           /* 0x2f0 */
89281 +   E4_QueuePtr         MainIntQueuePtrs;                                                                       /* 0x2f8 */
89282 +   
89283 +   E4_uint64           Event_Copy[16];                                                                         /* 0x300 */
89284 +
89285 +   E4_uint64           CommandCopy[7];                                                                         /* 0x380 */
89286 +   E4_uint64           CommandHold;                                                                            /* 0x3b8 */
89287 +
89288 +   E4_uint64           InputQueueDesc[4];                                                                      /* 0x3c0 */
89289 +
89290 +   /* Run queue Pointers */
89291 +   E4_uint64           DProcLowPriPtrs;                                                                        /* 0x3e0 */
89292 +   E4_uint64           DProcHighPriPtrs;                                                                       /* 0x3e8 */
89293 +   E4_uint64           TProcLowPriPtrs;                                                                        /* 0x3f0 */
89294 +   E4_uint64           TProcHighPriPtrs;                                                                       /* 0x3f8 */
89295 +
89296 +   E4_uint64           CProcStatus;                                                                            /* 0x400 */
89297 +   E4_uint64           TProcStatus;                                                                            /* 0x408 */
89298 +   E4_uint64           IProcStatus;                                                                            /* 0x410 */
89299 +   E4_uint64           EProcStatus;                                                                            /* 0x418 */
89300 +   E4_uint64           DProc0Status;                                                                           /* 0x420 */
89301 +   E4_uint64           DProc1Status;                                                                           /* 0x428 */
89302 +   E4_Sched_Status     SchedStatus;                                                                            /* 0x430 */
89303 +
89304 +   E4_uint64           LoadIProcCntxFilter;    /* Will load one of 4 cntx filter regs. Write only */           /* 0x438 */
89305 +
89306 +   E4_CommandControl   CommandControl;                                                                         /* 0x440 */
89307 +   E4_uint64           CommandCacheTestPort;                                                                   /* 0x448 */
89308 +   E4_uint64           CommandLowPriRunPtrs;                                                                   /* 0x450 */
89309 +   E4_uint64           CommandHighPriRunPtrs;                                                                  /* 0x458 */
89310 +   E4_uint64           CommandSchedDataPort[4];                                                                /* 0x460 */
89311 +
89312 +   E4_uint64           DmaRouteBuffer[2][2];   /* Write only. Should not be written to. */                     /* 0x480 */
89313 +   E4_uint64           StenRouteBuffer[2];     /* Write only. Should not be written to. */                     /* 0x4a0 */
89314 +   E4_uint64           pad4[0x098 - 0x096];                                                                    /* 0x4b0 */
89315 +
89316 +   E4_uint64           DmaAlignmentPort[8];    /* Write only. Should only be written to clear the prev reg. */ /* 0x4c0 */
89317 +
89318 +   E4_uint64           MmuBlockEntry[8];       /* Used for hash table and chain fetches */                     /* 0x500 */
89319 +   E4_uint64           WriteUnitsTlbLine[3];                                                                   /* 0x550 */
89320 +   E4_uint64           pad5;                                                                                   /* 0x540 */
89321 +   E4_uint64           WriteTProcTlbLine[3];                                                                   /* 0x568 */
89322 +   E4_uint64           pad6;                                                                                   /* 0x540 */
89323 +
89324 +   E4_uint64           MmuTableBasePtrs;       /* Both tables packed into a single 64 bit value */             /* 0x580 */
89325 +   E4_uint64           MmuFaultAndRootCntxPtr; /* Both packed into a single 64 bit value */                    /* 0x588 */
89326 +   E4_uint64           UnitsVAddr;                                                                             /* 0x590 */
89327 +   E4_uint64           TProcVAddr;                                                                             /* 0x598 */
89328 +   E4_uint64           UnitsCntx;                                                                              /* 0x5a0 */
89329 +   E4_uint64           TProcCntx;              /* Read only. Writes access VProcCacheWritePort */              /* 0x5a8 */
89330 +   E4_uint64           FaultAddrReg;                                                                           /* 0x5b0 */
89331 +   E4_uint64           FaultTypeAndContextReg;                                                                 /* 0x5b8 */
89332 +
89333 +   E4_uint32           SysControlReg;                                                                          /* 0x5c0 */
89334 +   E4_uint32           CacheTagValue;                                                                          /* 0x5c4 */
89335 +   E4_uint64           TlbLineValue;                                                                           /* 0x5c8 */
89336 +   E4_uint64           SDRamConfigReg;                                                                         /* 0x5d0 */
89337 +   E4_uint32           InterruptMask;                                                                          /* 0x5d8 */
89338 +   E4_uint32           InterruptReg;                                                                           /* 0x5dc */
89339 +   E4_uint64           SDRamECCStatus;                                                                         /* 0x5e0 */
89340 +   E4_uint32           LinkControlReg;                                                                         /* 0x5e8 */
89341 +   E4_uint32           LinkContSettings;                                                                       /* 0x5ec */
89342 +   E4_uint64           LinkPortKey;                                                                            /* 0x5f0 */
89343 +   E4_uint64           LinkPortLock;                                                                           /* 0x5f8 */
89344 +
89345 +   E4_uint64           SDRamWriteBuffer[4][8];                                                                 /* 0x600 */
89346 +   E4_uint64           SDRamReadBuffer[4][8];                                                                  /* 0x700 */
89347 +
89348 +   E4_uint64           TProcRegs[64];                                                                          /* 0x800 */
89349 +   E4_uint64           TProcStartUp[8];        /* Not to be used except by the elan itself */                  /* 0xa00 */
89350 +
89351 +   E4_uint64           LoadPending;                                                                            /* 0xa40 */
89352 +   E4_uint64           StortPending;                                                                           /* 0xa48 */
89353 +   E4_uint64           DirtyBits;                                                                              /* 0xa50 */
89354 +   E4_uint64           BadBits;                                                                                /* 0xa58 */
89355 +
89356 +   E4_uint64           ICachePort_Cntl_Addr;                                                                   /* 0xa60 */
89357 +   E4_uint64           Thread_Trap_State;                                                                      /* 0xa68 */
89358 +
89359 +/* Instruction buffer (4 * 32 bit words) */
89360 +   E4_uint64           nPC_W;                                                                                  /* 0xa70 */
89361 +   E4_uint64           PC_W;                                                                                   /* 0xa78 */
89362 +
89363 +   E4_uint64           ICacheFillData[8];                                                                      /* 0xa80 */
89364 +   E4_uint64           ICachePort[8];                                                                          /* 0xac0 */
89365 +
89366 +   E4_uint64           PciDataBufs[4][8];                                                                      /* 0xb00 */
89367 +
89368 +   E4_uint64           CommandQueueBuffer[128];                                                                /* 0xc00 */
89369 +} E4_DataBusMap;
89370 +
89371 +/*
89372 + * These macros are used to setup the thread pcoessors ICache.
89373 + */
89374 +#define E4_ICacheTagAddrShift          6
89375 +#define E4_AccessICacheRams            1
89376 +#define E4_InvalidTagValue             0xffffffffffffffffULL
89377 +#define E4_ICacheSizeInBytes           (1024*16)
89378 +#define E4_ICacheLineSizeInBytes       (64)
89379 +#define E4_ICacheLines                 (E4_ICacheSizeInBytes/E4_ICacheLineSizeInBytes)
89380 +#define E4_ICachePortSize              ( (sizeof((E4_DataBusMap *) 0)->ICachePort) /   \
89381 +                                         (sizeof((E4_DataBusMap *) 0)->ICachePort[0]))
89382 +
89383 +#define E4_ICacheFixupInsn             0xc0b02f95ull           /* st1 [%r0 +  0xf95] */
89384 +#define E4_ICacheFixupAddr             0xf95ull
89385 +#define E4_ICacheFixupOffset           0xfc0
89386 +
89387 +/*
89388 + * Event interrupt
89389 + */
89390 +typedef volatile union _E4_EventInt
89391 +{
89392 +   E4_uint64    ForceAlign;
89393 +   struct {
89394 +       E4_uint32 IntCookie;
89395 +       E4_uint32 EventContext; /* Bits 16 to 28 */
89396 +    } s;
89397 +} E4_EventInt;
89398 +
89399 +/*
89400 + * The following are used to interpret a fault status register.
89401 + */
89402 +
89403 +/*
89404 + * FSR[14:0] - AccessType
89405 + *
89406 + * T = Type bit
89407 + * S = size bit. Size is in units of 64 bits or 8 bytes.
89408 + * E = Byte end pointer. Used to define the last written byte of the last 64 bits written.
89409 + * D = Data type bit. Used for endian conversion in the PCI interface.
89410 + * C = Used by the cache to decide if this access should allocate a cache line.
89411 + * d = Set if dma read or write data data. This is used to guarantee order at the PCI interface.
89412 + * A = Access type used to check permissions by the MMU in a virtual access.
89413 + * P = Part Write. If set some byte enables may be used. Effects the action of a cache miss.
89414 + */
89415 +
89416 +/* FSR[7:0] */
89417 +/* bit 7 => virtual write */
89418 +#define AT_VirtualWriteAccBit          (1 << 7)                /* AAADDdC1EEESSSS = Virtual Write */
89419 +#define AT_VirtualWriteSizeMask                0xf                     /* size of write access (0 => 128 bytes) */
89420 +#define AT_VirtualWriteEndPtrShift     4                       /* end byte pointer for part write block */
89421 +#define AT_VirtualWriteEndPtrMask      0x7
89422 +
89423 +/* else bit 6 => virtual read */
89424 +#define AT_VirtualReadAccBit           (1 << 6)                /* AAADDdC01SSSSSS = Virtual Read */
89425 +#define AT_VirtualReadSizeMask         0x3f                    /* size of read access (0 => 512 bytes) */
89426 +
89427 +/* else => special access */
89428 +#define AT_SelBitsMask                 0xf                     /* Bits to select the type of acces from */
89429 +#define AT_SelBitsShift                        0x4
89430 +#define AT_SpecialRd                   (0x0 << 4)              /* AAADDdC0000TTTT = Special read Access */
89431 +#define AT_SpecialWr                   (0x1 << 4)              /* AAADDdC0001TTTT = Special write Access */
89432 +#define AT_PhysicalRd                  (0x2 << 4)              /* AAADDdC00100SSS = Physical Read */
89433 +#define AT_PhysicalWr                  (0x3 << 4)              /* AAADDdC0011PSSS = Physical write */
89434 +
89435 +#define AT_OtherSizeMask               0xf                     /* Size bits used by all other accesses. 0=128 bytes */
89436 +#define AT_SpecialBitsMask             0xf                     /* Bits used to define the special access types */
89437 +#define AT_CacheSizeBitsMask           0x7                     /* Size bits used for local accesses. 0=64 */
89438 +#define AT_CachePhysPartWriteBit       0x8                     /* This bit is set if the access is a part write to the cache */
89439 +
89440 +/* Special memory access operations */
89441 +#define AT_RegAccess                   0x0
89442 +#define AT_GetCntxFilter               0xe                     /* Only used by special reads */
89443 +#define AT_RouteFetch                  0xf                     /* Only used by special reads */
89444 +
89445 +/* FSR[9:8] */
89446 +#define AT_NonAlloc                    (1 << 8)                /* 1=Do not fill cache with this data */
89447 +#define AT_DmaData                     (1 << 9)                /* This is a DMA read access. Required to guarantee dma read order. */
89448 +
89449 +/* FSR[11:10] - Data Type - defines data type for endian conversion in PCI interface*/
89450 +#define AT_BlkDataTyMask               0x3
89451 +#define AT_BlkDataTyShift              10
89452 +
89453 +#define AT_BlkDataType(FSR)            (((FSR) >> AT_BlkDataTyShift) & AT_BlkDataTyMask)
89454 +#define AT_TypeByte                    0x0
89455 +#define AT_TypeHWord                   0x1
89456 +#define AT_TypeWord                    0x2
89457 +#define AT_TypeDWord                   0x3
89458 +
89459 +/* FSR[14:12] - Access Permissions */
89460 +#define AT_PermBitsMask                        0x7
89461 +#define AT_PermBitsShift               12
89462 +
89463 +#define AT_Perm(FSR)                   (((FSR) >> AT_PermBitsShift) & AT_PermBitsMask)
89464 +#define AT_PermLocalDataRead           0x0
89465 +#define AT_PermLocalDataWrite          0x1
89466 +#define AT_PermRemoteRead              0x2
89467 +#define AT_PermRemoteWrite             0x3
89468 +#define AT_PermExecute                 0x4
89469 +#define AT_PermLocalEvent              0x5
89470 +#define AT_PermRemoteEvent             0x7
89471 +
89472 +/* FSR[22:15] - reason for fault */
89473 +
89474 +#define FSR_WalkForThread              (1 << 15) /* The thread processor caused the fault */
89475 +#define FSR_Walking                    (1 << 16) /* The fault was caused during a hash table access */
89476 +#define FSR_NoTranslationsFound                (1 << 17) /* The hash table did not contain a matching tag */
89477 +#define FSR_WalkingProtectionFault     (1 << 18) /* A protection fault was detected while walking */
89478 +#define FSR_HashTable1                 (1 << 19) /* Was accessing hash table 1 not 0 */
89479 +#define FSR_RouteVProcErr              (1 << 20) /* This is an invalid vproc for a route fetch */
89480 +#define FSR_FaultForBadData            (1 << 21) /* Bad data (double bit ECC error) while performing a walk access */
89481 +#define FSR_FaultForMaxChainCount      (1 << 22) /* The Elan4 has walked a chain of 1024 items. */
89482 +
89483 +typedef volatile struct _E4_FaultSave
89484 +{
89485 +    E4_uint64 FSRAndFaultContext;                 /* Bits 0-31 : FaultContext. Bits 32-63 : FaultStatus Register */
89486 +    E4_uint64 FaultAddress;
89487 +} E4_FaultSave;
89488 +
89489 +#define FaultSaveContext(FSRAndFaultContext)   ((E4_uint32) ((FSRAndFaultContext) & 0xFFFFFFFF))
89490 +#define FaultSaveFSR(FSRAndFaultContext)       ((E4_uint32) ((FSRAndFaultContext) >> 32))
89491 +
89492 +typedef union E4_TrTypeCntx
89493 +{
89494 +   E4_uint32 TypeContext;
89495 +   struct
89496 +   {
89497 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
89498 +      E4_uint32 Type:16;               /* Transaction type field */
89499 +      E4_uint32 Context:13;            /* Transaction context */
89500 +      E4_uint32 TypeCntxInvalid:1;     /* Bit  29 */
89501 +      E4_uint32 StatusRegValid:1;      /* Bit  30 */
89502 +      E4_uint32 LastTrappedTrans:1;    /* Bit  31 */
89503 +#else
89504 +      E4_uint32 LastTrappedTrans:1;    /* Bit  31 */
89505 +      E4_uint32 StatusRegValid:1;      /* Bit  30 */
89506 +      E4_uint32 TypeCntxInvalid:1;     /* Bit  29 */
89507 +      E4_uint32 Context:13;            /* Transaction context */
89508 +      E4_uint32 Type:16;               /* Transaction type field */
89509 +#endif
89510 +   } s;
89511 +} E4_TrTypeCntx;
89512 +
89513 +#define MAX_TRAPPED_TRANS      28
89514 +#define TRANS_DATA_DWORDS      16
89515 +#define TRANS_DATA_BYTES       128
89516 +#define NO_OF_INPUT_CHANNELS   4
89517 +
89518 +#define CH0_LOW_PRI_CHAN       0
89519 +#define CH1_LOW_PRI_CHAN       1
89520 +#define CH0_HIGH_PRI_CHAN      2
89521 +#define CH1_HIGH_PRI_CHAN      3
89522 +
89523 +/* Words have been swapped for big endian access when fetched with dword access from elan.*/
89524 +typedef struct _E4_IprocTrapHeader
89525 +{
89526 +   E4_uint64   TrAddr;
89527 +   E4_uint64   IProcStatusCntxAndTrType;
89528 +} E4_IprocTrapHeader;
89529 +
89530 +typedef struct _E4_IprocTrapData
89531 +{
89532 +   E4_uint64 Data[TRANS_DATA_DWORDS];
89533 +} E4_IprocTrapData;
89534 +
89535 +/*
89536 + * This struct defines the trap state for the inputers. It requires a contiguous 16K byte block of local memory.
89537 + * The channel bits have been grouped to the low end of the address to force all Identify cookies to use the
89538 + * same cache line.
89539 + */
89540 +typedef struct _E4_IprocTrapState
89541 +{
89542 +   E4_IprocTrapData   TrData[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS];
89543 +   E4_IprocTrapHeader TrHeader[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS];
89544 +   E4_uint64         pad[8*NO_OF_INPUT_CHANNELS];
89545 +} E4_IprocTrapState;
89546 +
89547 +/*
89548 + * 64 kbytes of elan local memory. Must be aligned on a 64k boundary
89549 + */
89550 +#define E4_LowPriQueueSize     0x400
89551 +#define E4_HighPriQueueSize    0x100
89552 +
89553 +typedef struct _E4_FaultSaveArea
89554 +{
89555 +   E4_FaultSave                TProcData[8];
89556 +   E4_FaultSave                TProcInst;
89557 +   E4_FaultSave                Dummy[7];
89558 +   E4_FaultSave                SchedProc;
89559 +   E4_FaultSave                DProc;
89560 +   E4_FaultSave                EventProc;
89561 +   E4_FaultSave                IProc;
89562 +   E4_FaultSave                DProcData[4];
89563 +   E4_FaultSave                QReadData[8];
89564 +} E4_FaultSaveArea;
89565 +
89566 +/* Macros to manipulate event queue pointers */
89567 +/*     generate index in EventIntQueue */
89568 +#define E4_EVENT_INTQ_INDEX(fptr)      (((fptr) & 0x1fff) >> 3)
89569 +/*     generate next fptr */
89570 +#define E4_EVENT_INTQ_NEXT(fptr)       ((((fptr) + 8) & ~0x4000) | 0x2000)
89571 +
89572 +typedef struct _E4_CommandPort
89573 +{
89574 +   volatile E4_uint64 Command[1024];   /* a whole 8k page */
89575 +} E4_CommandPort;
89576 +
89577 +/*
89578 + * This is the allocation of unit numbers within the ELAN. It is used to extract the fault address
89579 + * and fault type after a unit has trapped on a memory fetch. Only units that can generate traps
89580 + * have been included.
89581 + */
89582 +#define CUN_TProcData0         0x00
89583 +#define CUN_TProcData1         0x01
89584 +#define CUN_TProcData2         0x02
89585 +#define CUN_TProcData3         0x03
89586 +#define CUN_TProcData4         0x04
89587 +#define CUN_TProcData5         0x05
89588 +#define CUN_TProcData6         0x06
89589 +#define CUN_TProcData7         0x07
89590 +#define CUN_TProcInst          0x08
89591 +
89592 +/* memory current unit numbers
89593 + * TProc data bus */
89594 +#define CUN_DProcPA0           0x10
89595 +#define CUN_DProcPA1           0x11
89596 +#define CUN_DProcPrefetch      0x12
89597 +#define CUN_CommandProc                0x13
89598 +#define CUN_DProcData0         0x14    /* Dma prefetch reads. */
89599 +#define CUN_DProcData1         0x15    /* Dma prefetch reads. */
89600 +#define CUN_DProcData2         0x16    /* Dma prefetch reads. */
89601 +#define CUN_DProcData3         0x17    /* Dma prefetch reads. */
89602 +
89603 +#define CUN_IProcLowPri                0x18
89604 +#define CUN_IProcHighPri       0x19
89605 +#define CUN_Spare0             0x1A
89606 +#define CUN_Spare1             0x1B
89607 +#define CUN_Spare2             0x1C
89608 +#define CUN_ThreadQueue                0x1D
89609 +#define CUN_EventProc0         0x1e
89610 +#define CUN_EventProc1         0x1f
89611 +
89612 +#define CUN_Entries            0x20
89613 +
89614 +typedef struct E4_Registers
89615 +{
89616 +   E4_CacheTags                Tags;                           /* 4k bytes  c000 -> cfff */
89617 +   E4_DataBusMap       Regs;                           /* 4k bytes  d000 -> dfff */
89618 +   E4_User_Regs                uRegs;                          /* 8k bytes  e000 -> ffff */
89619 +} E4_Registers;
89620 +
89621 +#define I2cCntl_I2cPortWrite           (0 << 0)
89622 +#define I2cCntl_I2cPortRead            (1 << 0)
89623 +#define I2cCntl_I2cPortGenStopBit      (1 << 1)
89624 +#define I2cCntl_I2cPortGenRestartBit   (1 << 2)
89625 +#define I2cCntl_I2cPortAccFailed       (1 << 3)
89626 +#define I2cCntl_I2cStopped             (1 << 4)
89627 +#define I2cCntl_I2cWakeupFailed                (1 << 5)
89628 +#define I2cCntl_I2cFastMode            (1 << 6)
89629 +#define I2cCntl_I2cPortBusy            (1 << 7)
89630 +
89631 +#define I2cCntl_LedI2cRegBase_Mask     0x7f
89632 +#define I2cCntl_I2cUpdatingLedReg      (1 << 7)
89633 +
89634 +#define I2cCntl_InvertLedValues                (1 << 0)                /* read/write */
89635 +#define I2cCntl_LedRegWriteFailed      (1 << 1)                /* read only */
89636 +#define I2cCntl_EEPromLoadFailed       (1 << 2)                /* read only */
89637 +#define I2cCntl_InhibitI2CRom          (1 << 3)                /* read only */
89638 +#define I2cCntl_BadRomCrc              (1 << 4)                /* read only */
89639 +#define I2cCntl_MapInI2cConfigData     (1 << 5)                /* read/write */
89640 +#define I2cCntl_SampleNewLedValues     (1 << 6)                /* read/write */
89641 +#define I2cCntl_ClearLinkError         (1 << 7)                /* write only */
89642 +
89643 +typedef struct E4_I2C
89644 +{
89645 +   volatile E4_uint8    I2cWrData;
89646 +   volatile E4_uint8    I2cRdData;
89647 +   volatile E4_uint8    I2cPortControl;
89648 +   volatile E4_uint8   I2cLedBase;
89649 +   volatile E4_uint8    I2cStatus;
89650 +   volatile E4_uint8    I2cLedsValue;
89651 +   volatile E4_uint16  I2cPad;
89652
89653 +   E4_uint8            pad[256 - sizeof(E4_uint64)];
89654 +
89655 +   E4_uint8            UnchangedElan4ConfigRegs[256];
89656 +   E4_uint8            I2cRomConfigShadowValues[256];
89657 +   E4_uint8            ChangedElan4ConfigRegs[256];
89658 +} E4_I2C;
89659 +
89660 +typedef struct _E4_ContextControlBlock 
89661 +{
89662 +    E4_uint32 Filter;                  /* Use a Network context to index for this value */
89663 +    E4_uint32 VirtualProcessTable;     /* Use a local context to index for this value */
89664 +} E4_ContextControlBlock;
89665 +
89666 +/*
89667 + * Filter
89668 + *   [13:0]    Context
89669 + *   [14]      DiscardAll
89670 + *   [15]      AckAll
89671 + *   [16]      HighPri
89672 + *   [17]      CountStats
89673 + *   [31:18]   Unused
89674 + */
89675 +#define E4_FILTER_STATS                (1 << 17)
89676 +#define E4_FILTER_HIGH_PRI     (1 << 16)
89677 +#define E4_FILTER_ACKOK_ALL    (1 << 15)
89678 +#define E4_FILTER_DISCARD_ALL  (1 << 14)
89679 +#define E4_FILTER_CONTEXT_MASK (0x3FFF)
89680 +
89681 +/*
89682 + * VirtualProcessTable
89683 + *   [8:0]     Unused  
89684 + *   [12:9]    Size       num vp entries = 512 << Size
89685 + *   [30:13]   Pointer
89686 + *   [31]      Valid
89687 + */
89688 +#define E4_VPT_MIN_ENTRIES      512
89689 +#define E4_VPT_VALID           ((unsigned)1 << 31)
89690 +#define E4_VPT_PTR_SHIFT       0
89691 +#define E4_VPT_SIZE_SHIFT      9
89692 +#define E4_VPT_SIZE_MASK        0xf
89693 +#define E4_VPT_NUM_VP(vpt_val)  (E4_VPT_MIN_ENTRIES << (((vpt_val) >> E4_VPT_SIZE_SHIFT) & E4_VPT_SIZE_MASK))
89694 +#define E4_VPT_VALUE(ptr,size) (((ptr) << E4_VPT_PTR_SHIFT) | ((size) << E4_VPT_SIZE_SHIFT))
89695 +
89696 +
89697 +/* Virtual Process Table */
89698 +typedef struct _E4_VirtualProcessEntry
89699 +{
89700 +    E4_uint64  Values[2];
89701 +} E4_VirtualProcessEntry;
89702 +
89703 +/*
89704 + * Entries have the following format - rtX is a packed route 
89705 + *
89706 + * |rt11|rt10|rt9 |rt8 |rt7 |rt6 |rt5 |rt4 |rt3 |rt2 |rt2 |rt0 |PAAADD       RRRRRR|
89707 + * |output context     |rt23|rt22|rt21|rt20|rt19|rt18|rt17|rt16|rt15|rt14|rt13|rt12|
89708 + */
89709 +
89710 +#define ROUTE_CTXT_SHIFT       48
89711 +#define ROUTE_CTXT_MASK                (~((1ull << ROUTE_CTXT_SHIFT)-1))
89712 +#define ROUTE_CTXT_VALUE(ctx)  (((E4_uint64) ctx) << ROUTE_CTXT_SHIFT)
89713 +
89714 +#define ROUTE_PACKED_OFFSET    16
89715 +#define ROUTE_NUM_PACKED       24
89716 +
89717 +/* defines for first flit of a route */
89718 +#define FIRST_TIMEOUT(Val)     ((Val) << 14)                   /* [15:14]  */
89719 +#define FIRST_SYSTEM_PACKET     (1 << 13)                       /* [13]     */
89720 +#define FIRST_FLOOD_PACKET      (1 << 12)                       /* [12]     */
89721 +#define FIRST_HIGH_PRI         (1 << 11)                       /* [11]    */
89722 +#define FIRST_AGE(Val)         ((Val) << 7)                    /* [10:7] */
89723 +#define FIRST_OPTIONS_MASK     (0xFF80)
89724 +
89725 +/* [6:0] unpacked 1st route value */
89726 +#define FIRST_INVALID          (0)
89727 +#define FIRST_ROUTE(Val)       (0x08 | (Val))
89728 +#define FIRST_ADAPTIVE         (0x30)
89729 +#define FIRST_BCAST_TREE       (0x20)
89730 +#define FIRST_MYLINK           (0x10)
89731 +#define FIRST_BCAST(Top, Bot)  (0x40 | ((Top) << 3) | (Bot))
89732 +
89733 +/* defines for 3 bit packed entries for subsequent flits */
89734 +#define PACKED_INVALID         (0)
89735 +#define PACKED_ROUTE(Val)      (8 | (Val))
89736 +#define PACKED_ADAPTIVE                (3)
89737 +#define PACKED_BCAST_TREE      (2)
89738 +#define PACKED_MYLINK          (1)
89739 +#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3))
89740 +#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2))
89741 +
89742 +#endif /* _ASM */
89743 +/* The MMU root context pointer has a mask to bounds check 
89744 + * it - this is computed as follows.
89745 + */
89746 +#define E4_CONTEXT_MASK(num)   (((num) >= 0x2000) ? 0x00 :     \
89747 +                                ((num) >= 0x1000) ? 0x80 :     \
89748 +                                ((num) >= 0x0800) ? 0xc0 :     \
89749 +                                ((num) >= 0x0400) ? 0xe0 :     \
89750 +                                ((num) >= 0x0200) ? 0xf0 :     \
89751 +                                ((num) >= 0x0100) ? 0xf8 :     \
89752 +                                ((num) >= 0x0080) ? 0xfc :     \
89753 +                                ((num) >= 0x0040) ? 0xfe : 0xff)
89754 +/*
89755 + * This generates the size field for a virtual process table.
89756 + * Size defined as 2^n no of 8K pages.
89757 + * Single cycle route fetches are possible if the minimum vproc table size is 8k.
89758 + */
89759 +#define E4_GEN_VPT_SIZE(Size)  (((Size) & E4_VPT_SIZE_MASK) << E4_VPT_SIZE_SHIFT)
89760 +
89761 +#define COMMAND_RUN_QUEUE_BITS         (13 + 2) /* 8K entries of 4 bytes. This is fixed in hardware. */
89762 +#define COMMAND_DESCS_SPACE_BITS       (13 + 5) /* 8K entries of 32 bytes. This is fixed in hardware. */
89763 +#define COMMAND_INSERTER_CACHE_ENTRIES 16
89764 +
89765 +#define COM_TEST_PORT_ADDR_MASK                0xfULL
89766 +#define COM_TEST_PORT_ADDR_SH          0
89767 +
89768 +/*
89769 + * The flush register is accessed through the CommandControl register.
89770 + * The address is naturally alligned. It also positions the command descriptors in memory.
89771 + * When no command queues need flushing it should be or with COM_FLUSH_INVALID. This sets
89772 + * it to the top command queue descriptor. This cannot be accessed from the PCI.
89773 + */
89774 +#define COM_ENABLE_DEQUEUE             (1 << 4)
89775 +#define COM_FLUSH_DESCRIPTOR_MASK      0x7fffffe0ULL
89776 +#define COM_FLUSH_INVALID              0x0003ffe0ULL
89777 +
89778 +
89779 +/*
89780 + * Elan4 BAR1 is split up as follows :
89781 + *
89782 + * RevA
89783 + *     0x3f00000 EBUS other
89784 + *     0x3e00000 EBUS ROM
89785 + *     0x3dfc000 registers
89786 + *     0x0000000 command ports
89787 + *
89788 + * RevB
89789 + *     0x3ffc000 registers
89790 + *     0x3ff8000 padding
89791 + *     0x3ff6000 i2c registers
89792 + *     0x0000000 command ports
89793 + */
89794 +#define ELAN4_BAR1_SIZE                        (1 << 26)       /* 64M */
89795 +#define ELAN4_REG_SIZE                 (1 << 14)       /* 16K */
89796 +
89797 +#define ELAN4_REVA_EBUS_SIZE           (1 << 21)       /* 2M */
89798 +#define ELAN4_REVA_EBUS_OFFSET         (ELAN4_BAR1_SIZE - ELAN4_REVA_EBUS_SIZE)
89799 +#define ELAN4_REVA_REG_OFFSET          (ELAN4_REVA_EBUS_OFFSET - ELAN4_REG_SIZE)
89800 +#define ELAN4_REVA_NUM_COMMAND_QUEUES  (ELAN4_REVA_REG_OFFSET >> 13)
89801 +
89802 +#define ELAN4_REVA_EBUS_ROM_SIZE       (1 << 20)       /* 1M */
89803 +#define ELAN4_REVA_EBUS_ROM_OFFSET     0
89804 +
89805 +#define ELAN4_REVB_I2C_PADDING         (1 << 14)       /* 16K */
89806 +#define ELAN4_REVB_I2C_SIZE            (1 << 13)       /* 8k */
89807 +#define ELAN4_REVB_REG_OFFSET          (ELAN4_BAR1_SIZE - ELAN4_REG_SIZE)
89808 +#define ELAN4_REVB_I2C_OFFSET          (ELAN4_REVB_REG_OFFSET - ELAN4_REVB_I2C_PADDING - ELAN4_REVB_I2C_SIZE)
89809 +#define ELAN4_REVB_NUM_COMMAND_QUEUES  (ELAN4_REVB_I2C_OFFSET >> 13)
89810 +
89811 +#endif /* notdef _ELAN4_REGISTERS_H */
89812 Index: linux-2.6.5-7.191/include/elan4/sdram.h
89813 ===================================================================
89814 --- linux-2.6.5-7.191.orig/include/elan4/sdram.h        2004-02-23 16:02:56.000000000 -0500
89815 +++ linux-2.6.5-7.191/include/elan4/sdram.h     2005-07-28 14:52:52.967660648 -0400
89816 @@ -0,0 +1,41 @@
89817 +/*
89818 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
89819 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89820 + *
89821 + *    For licensing information please see the supplied COPYING file
89822 + *
89823 + */
89824 +
89825 +#ifndef __ELAN4_SDRAM_H
89826 +#define __ELAN4_SDRAM_H
89827 +
89828 +#ident "$Id: sdram.h,v 1.8 2003/09/24 13:55:55 david Exp $"
89829 +/*      $Source: /cvs/master/quadrics/elan4hdr/sdram.h,v $*/
89830 +
89831 +/* Include header file generated by sdram configuration program */
89832 +#include <elan4/xsdram.h> 
89833 +
89834 +/* SDRAM bank shift definitions */
89835 +#define SDRAM_0_CS_SHIFT       25
89836 +#define SDRAM_1_CS_SHIFT       27
89837 +#define SDRAM_2_CS_SHIFT       28
89838 +#define SDRAM_3_CS_SHIFT       29
89839 +
89840 +#define SDRAM_BANK_SHIFT(cfg) \
89841 +       (((cfg >> SDRAM_RamSize_SH) & 3) == 0 ? SDRAM_0_CS_SHIFT : \
89842 +        ((cfg >> SDRAM_RamSize_SH) & 3) == 1 ? SDRAM_1_CS_SHIFT : \
89843 +        ((cfg >> SDRAM_RamSize_SH) & 3) == 2 ? SDRAM_2_CS_SHIFT : SDRAM_3_CS_SHIFT)
89844 +
89845 +#define SDRAM_BANK_SIZE(cfg)           (1ULL << SDRAM_BANK_SHIFT(cfg))
89846 +#define SDRAM_BANK_OFFSET(cfg,bank)    ((unsigned long long)(bank) << SDRAM_BANK_SHIFT(cfg))
89847 +#define SDRAM_NUM_BANKS(cfg)           (4)
89848 +#define SDRAM_MAX_BANKS                        4
89849 +
89850 +/* When the elan access sdram it passes eaddr[12] as sdramaddr[12] when
89851 + * running with a 4k page size, however PCI accesses pass paddr[12], so
89852 + * we must ensure that sdram pages are allocated such that eaddr[12] is the
89853 + * same as paddr[12] - the easiest way is to allocate sdram in 8k chunks and
89854 + * ensure that maddr[12] == eaddr[12] == pgoff[0] */
89855 +#define SDRAM_MIN_PAGE_SIZE            (8192)
89856 +
89857 +#endif /* __ELAN4_SDRAM_H */
89858 Index: linux-2.6.5-7.191/include/elan4/stats.h
89859 ===================================================================
89860 --- linux-2.6.5-7.191.orig/include/elan4/stats.h        2004-02-23 16:02:56.000000000 -0500
89861 +++ linux-2.6.5-7.191/include/elan4/stats.h     2005-07-28 14:52:52.967660648 -0400
89862 @@ -0,0 +1,83 @@
89863 +/*
89864 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
89865 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89866 + * 
89867 + *    For licensing information please see the supplied COPYING file
89868 + *
89869 + */
89870 +
89871 +#ident "@(#)$Id: stats.h,v 1.10.12.1 2004/10/06 11:09:12 david Exp $"
89872 +/*      $Source: /cvs/master/quadrics/elan4mod/stats.h,v $*/
89873 +
89874 +#ifndef __ELAN4_STATS_H
89875 +#define __ELAN4_STATS_H
89876 +
89877 +#define ELAN4_DEV_STATS_BUCKETS                8
89878 +
89879 +
89880 +typedef struct elan4_dev_stats
89881 +{
89882 +    unsigned long      s_interrupts;
89883 +    
89884 +    unsigned long       s_mainints[ELAN4_DEV_STATS_BUCKETS];
89885 +    unsigned long      s_mainint_punts;
89886 +    unsigned long      s_mainint_rescheds;
89887 +
89888 +    unsigned long       s_haltints;
89889 +
89890 +    unsigned long      s_cproc_traps;
89891 +    unsigned long      s_dproc_traps;
89892 +    unsigned long      s_eproc_traps;
89893 +    unsigned long      s_iproc_traps;
89894 +    unsigned long      s_tproc_traps;
89895 +
89896 +    unsigned long       s_cproc_trap_types[0x10];
89897 +    unsigned long       s_dproc_trap_types[6];
89898 +    unsigned long       s_eproc_trap_types[4];
89899 +    unsigned long       s_iproc_trap_types[0xa];
89900 +    unsigned long       s_tproc_trap_types[7];
89901 +
89902 +    unsigned long       s_correctable_errors;
89903 +    unsigned long       s_multiple_errors;
89904 +    
89905 +    unsigned long       s_link_errors;
89906 +    unsigned long       s_lock_errors;
89907 +    unsigned long       s_deskew_errors;
89908 +    unsigned long       s_phase_errors;
89909 +    unsigned long      s_data_errors;
89910 +    unsigned long      s_fifo_overflow0;
89911 +    unsigned long      s_fifo_overflow1;
89912 +    unsigned long       s_mod45changed;
89913 +    unsigned long       s_pack_not_seen;
89914 +    unsigned long       s_linkport_keyfail;
89915 +
89916 +    unsigned long      s_eop_reset;
89917 +    unsigned long       s_bad_length;
89918 +    unsigned long       s_crc_bad;
89919 +    unsigned long       s_crc_error;
89920 +
89921 +    unsigned long      s_cproc_timeout;
89922 +    unsigned long      s_dproc_timeout;
89923 +
89924 +    unsigned long      s_sdram_bytes_free;
89925 +} ELAN4_DEV_STATS;
89926 +
89927 +#define MainIntBuckets         ((int[ELAN4_DEV_STATS_BUCKETS-1]) {1, 2, 3, 4, 8, 16, 32})
89928 +
89929 +#define BumpDevStat(dev,stat)  ((dev)->dev_stats.stat++)
89930 +#define BucketDevStat(dev,stat,n,bucket)       ((n) <= (bucket)[0] ? (dev)->dev_stats.stat[0]++ : \
89931 +                                                (n) <= (bucket)[1] ? (dev)->dev_stats.stat[1]++ : \
89932 +                                                (n) <= (bucket)[2] ? (dev)->dev_stats.stat[2]++ : \
89933 +                                                (n) <= (bucket)[3] ? (dev)->dev_stats.stat[3]++ : \
89934 +                                                (n) <= (bucket)[4] ? (dev)->dev_stats.stat[4]++ : \
89935 +                                                (n) <= (bucket)[5] ? (dev)->dev_stats.stat[5]++ : \
89936 +                                                (n) <= (bucket)[6] ? (dev)->dev_stats.stat[6]++ : \
89937 +                                                                     (dev)->dev_stats.stat[7]++)
89938 +
89939 +
89940 +/*
89941 + * Local variables:
89942 + * c-file-style: "stroustrup"
89943 + * End:
89944 + */
89945 +#endif /*__ELAN4_STATS_H */
89946 Index: linux-2.6.5-7.191/include/elan4/tprintf.h
89947 ===================================================================
89948 --- linux-2.6.5-7.191.orig/include/elan4/tprintf.h      2004-02-23 16:02:56.000000000 -0500
89949 +++ linux-2.6.5-7.191/include/elan4/tprintf.h   2005-07-28 14:52:52.968660496 -0400
89950 @@ -0,0 +1,24 @@
89951 +/*
89952 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
89953 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89954 + *
89955 + *    For licensing information please see the supplied COPYING file
89956 + *
89957 + */
89958 +
89959 +#ifndef __ELAN4_TPRINTF_H
89960 +#define __ELAN4_TPRINTF_H
89961 +
89962 +#ident "$Id: tprintf.h,v 1.6 2003/09/04 12:39:17 david Exp $"
89963 +/*      $Source: /cvs/master/quadrics/elan4hdr/tprintf.h,v $*/
89964 +
89965 +
89966 +#ifdef _ASM
89967 +#define TPRINTF0(string)           add %r0, __LINE__, %r0
89968 +#define TPRINTF1(string,reg)       add reg, __LINE__, %r0
89969 +#else
89970 +#define TPRINTF0(string)           asm volatile ("add %%r0, %0, %%r0" : : "i" (__LINE__))
89971 +#define TPRINTF1(string, value)            asm volatile ("add %0,   %1, %%r0" : : "r" (value), "i" (__LINE__))
89972 +#endif /* _ASM */
89973 +
89974 +#endif /* __ELAN4_TPRINTF_H */
89975 Index: linux-2.6.5-7.191/include/elan4/trap.h
89976 ===================================================================
89977 --- linux-2.6.5-7.191.orig/include/elan4/trap.h 2004-02-23 16:02:56.000000000 -0500
89978 +++ linux-2.6.5-7.191/include/elan4/trap.h      2005-07-28 14:52:52.968660496 -0400
89979 @@ -0,0 +1,95 @@
89980 +/*
89981 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
89982 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89983 + * 
89984 + *    For licensing information please see the supplied COPYING file
89985 + *
89986 + */
89987 +
89988 +#ident "@(#)$Id: trap.h,v 1.10 2003/10/07 12:11:10 david Exp $"
89989 +/*      $Source: /cvs/master/quadrics/elan4mod/trap.h,v $*/
89990 +
89991 +#ifndef __ELAN4_TRAP_H
89992 +#define __ELAN4_TRAP_H
89993 +
89994 +/*
89995 + * If the EProc Faults whilst performing an action (e.g. Read/Write on the data src or dest Addr)
89996 + *  the Eproc increments the Addr(s) by a block size (64 bytes):
89997 + *  1: Fault on Read: 
89998 + *                     Src EventAddr = Read Addr + block
89999 + *  2: Fault on Write:
90000 + *                     Src EventAddr = Read Addr + block
90001 + *                     Dst EventAddr = Read Addr + block
90002 + *                     Size          = Size - block ndwords
90003 + *  We must rewind the addr correctly to completely the transfer successfully
90004 + */
90005 +#define EVENT_COPY_NDWORDS     0x8
90006 +#define EVENT_COPY_BLOCK_SIZE  0x40
90007 +
90008 +typedef struct elan4_eproc_trap
90009 +{
90010 +    E4_uint64          tr_status;
90011 +    E4_FaultSave       tr_faultarea;
90012 +    E4_Event           tr_event;
90013 +    E4_Addr            tr_eventaddr;
90014 +} ELAN4_EPROC_TRAP;
90015 +
90016 +typedef struct elan4_cproc_trap
90017 +{
90018 +    E4_uint64          tr_status;                                      /* cproc status register */
90019 +    E4_uint64          tr_command;                                     /* cproc command */
90020 +    E4_CommandQueueDesc tr_qdesc;                                      /* copy of command queue descriptor */
90021 +    E4_FaultSave       tr_faultarea;                                   /* fault area for mmu traps */
90022 +    ELAN4_EPROC_TRAP   tr_eventtrap;                                   /* associated event trap (waitevent) */
90023 +} ELAN4_CPROC_TRAP;
90024 +
90025 +typedef struct elan4_dproc_trap
90026 +{
90027 +    E4_DMA             tr_desc;
90028 +    E4_FaultSave       tr_packAssemFault;
90029 +    E4_FaultSave       tr_prefetchFault;
90030 +    E4_uint64          tr_status;
90031 +} ELAN4_DPROC_TRAP;
90032 +
90033 +typedef struct elan4_tproc_trap
90034 +{
90035 +    E4_uint64          tr_regs[64];
90036 +    E4_FaultSave       tr_dataFault;
90037 +    E4_FaultSave       tr_instFault;
90038 +    E4_uint64          tr_status;
90039 +    E4_uint64          tr_state;
90040 +    E4_Addr            tr_pc;
90041 +    E4_Addr            tr_npc;
90042 +    E4_uint64          tr_dirty;
90043 +    E4_uint64          tr_bad;
90044 +} ELAN4_TPROC_TRAP;
90045 +
90046 +typedef struct elan4_iproc_trap
90047 +{
90048 +    E4_uint32            tr_numTransactions;
90049 +    E4_uint32            tr_flags;
90050 +    E4_uint32            tr_trappedTrans;
90051 +    E4_uint32            tr_waitForEopTrans;
90052 +    E4_uint32            tr_identifyTrans;
90053 +    E4_uint32            tr_pad;
90054 +
90055 +    E4_FaultSave          tr_faultarea;
90056 +    E4_IprocTrapHeader    tr_transactions[MAX_TRAPPED_TRANS];
90057 +    E4_IprocTrapData      tr_dataBuffers[MAX_TRAPPED_TRANS];
90058 +} ELAN4_IPROC_TRAP;
90059 +
90060 +#define TR_FLAG_ACK_SENT       (1 << 0)
90061 +#define TR_FLAG_EOP_ERROR      (1 << 1)
90062 +#define TR_FLAG_BAD_TRANS      (1 << 2)
90063 +#define TR_FLAG_DMA_PACKET     (1 << 3)
90064 +#define TR_FLAG_EOP_BAD                (1 << 4)
90065 +#define TR_FLAG_TOOMANY_TRANS  (1 << 5)
90066 +
90067 +#define TR_TRANS_INVALID       (0xffffffff)
90068 +
90069 +/*
90070 + * Local variables:
90071 + * c-file-style: "stroustrup"
90072 + * End:
90073 + */
90074 +#endif /* __ELAN4_TRAP_H */
90075 Index: linux-2.6.5-7.191/include/elan4/trtype.h
90076 ===================================================================
90077 --- linux-2.6.5-7.191.orig/include/elan4/trtype.h       2004-02-23 16:02:56.000000000 -0500
90078 +++ linux-2.6.5-7.191/include/elan4/trtype.h    2005-07-28 14:52:52.968660496 -0400
90079 @@ -0,0 +1,112 @@
90080 +/*
90081 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90082 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90083 + *
90084 + *    For licensing information please see the supplied COPYING file
90085 + *
90086 + */
90087 +
90088 +#ifndef _ELAN4_TRTYPE_H
90089 +#define _ELAN4_TRTYPE_H
90090 +
90091 +#ident "$Id: trtype.h,v 1.20 2004/02/06 10:38:21 mike Exp $"
90092 +/*      $Source: /cvs/master/quadrics/elan4hdr/trtype.h,v $*/
90093 +
90094 +/*<15:11> Size field is used to give the number of additional 64 bit data values.
90095 +         A value from 0 to 16 inclusive is valid. */
90096 +
90097 +#include <elan4/types.h>
90098 +
90099 +#define TR_SIZE_SHIFT          (11)
90100 +#define TR_SIZE_MASK           (0x1f << TR_SIZE_SHIFT)
90101 +#define SET_TR_SIZE(Size)      (((Size) << TR_SIZE_SHIFT) & TR_SIZE_MASK)
90102 +
90103 +/* <10:9> Last Transaction and AckNow bits, marks the last transaction and
90104 +          enables a PACK_OK to be sent. */
90105 +#define TR_LAST_AND_SEND_ACK   (3 << 9)
90106 +
90107 +
90108 +/* <8>  Only valid on the last transaction. Delays execution until an EOP_GOOD is received.
90109 + *      Any other EOP type will abort execution of this transaction. */
90110 +#define TR_WAIT_FOR_EOP                (1 << 8)
90111 +
90112 +/*
90113 + * Data type. This is used by transactions of variable data type. It controls any endian
90114 + * converion required if the destiantion host processor has a big endian memory format.
90115 + */
90116 +/*     WriteBlock      <8:7>   Data type
90117 +                       <6:0>   Part write size */
90118 +#define TR_DATATYPE_SHIFT      (6)
90119 +#define TR_DATATYPE_MASK       ((1 << 2) - 1)
90120 +
90121 +#define TR_DATATYPE_BYTE       E4_DATATYPE_BYTE        
90122 +#define TR_DATATYPE_SHORT      E4_DATATYPE_SHORT
90123 +#define TR_DATATYPE_WORD       E4_DATATYPE_WORD        
90124 +#define TR_DATATYPE_DWORD      E4_DATATYPE_DWORD
90125 +
90126 +/* <5:0> Transaction Type
90127 + *       For Writeblock <5:3> 000 => Write, 0001 => Read
90128 + *                      <2:0> End Byte Addr */
90129 +#define TR_OPCODE_MASK         0x3F
90130 +#define TR_BLOCK_OPCODE_MASK   0x38
90131 +
90132 +#define TR_WRITEBLOCK          0x0
90133 +#define TR_ENDBYTE_MASK                0x7
90134 +#define TR_WRITE(Size, EndByte, DataType)                                              \
90135 +                       (0x0 | SET_TR_SIZE(Size) | ((EndByte) & TR_ENDBYTE_MASK) |      \
90136 +                        (((DataType) & TR_DATATYPE_MASK) << TR_DATATYPE_SHIFT))
90137 +
90138 +#define TR_NOP_TRANS           (0x10 | SET_TR_SIZE(0))
90139 +#define TR_SETEVENT            0x10
90140 +#define TR_SETEVENT_NOIDENT    (TR_SETEVENT | SET_TR_SIZE(0) | TR_LAST_AND_SEND_ACK)
90141 +#define TR_SETEVENT_IDENTIFY   (TR_SETEVENT | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK)
90142 +#define TR_REMOTEDMA           (0x11 | SET_TR_SIZE(7) | TR_LAST_AND_SEND_ACK)
90143 +#define TR_SENDDISCARD         (0x12 | SET_TR_SIZE(0))
90144 +
90145 +/*
90146 + * Conditional transactions that might return PAckTestFail.
90147 + * All will allow further exection of the packet if ([Address] operator DataValue) is true.
90148 + * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true.
90149 + * These should be used where a definite TRUE/FALSE answer is required.
90150 + */
90151 +#define TR_GTE                 (0x14 | SET_TR_SIZE(1))
90152 +#define TR_LT                  (0x15 | SET_TR_SIZE(1))
90153 +#define TR_EQ                  (0x16 | SET_TR_SIZE(1))
90154 +#define TR_NEQ                 (0x17 | SET_TR_SIZE(1))
90155 +
90156 +/*
90157 + * Conditional transactions that might return PAckDiscard.
90158 + * All will allow further exection of the packet if ([Address] operator DataValue) is true.
90159 + * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true.
90160 + * These should be used where eventually a TRUE answer is expected but the node might not be ready yet.
90161 + * These can be mixed with the normal conditionals to allow a single packet to test for readyness and
90162 + * a TRUE/FALSE answer.
90163 + */
90164 +#define TR_GTE_DISCARD         (0x34 | SET_TR_SIZE(1))
90165 +#define TR_LT_DISCARD          (0x35 | SET_TR_SIZE(1))
90166 +#define TR_EQ_DISCARD          (0x36 | SET_TR_SIZE(1))
90167 +#define TR_NEQ_DISCARD         (0x37 | SET_TR_SIZE(1))
90168 +
90169 +#define TR_TRACEROUTE_TRANS    0x18
90170 +#define TR_TRACEROUTE(Size)    (TR_TRACEROUTE_TRANS | (TR_DATATYPE_WORD << TR_DATATYPE_SHIFT) |SET_TR_SIZE(Size))
90171 +#define TR_IDENTIFY            (0x19 | SET_TR_SIZE(0))
90172 +
90173 +#define TR_ADDWORD             (0x1c | SET_TR_SIZE(2) | TR_LAST_AND_SEND_ACK)
90174 +#define TR_INPUT_Q_COMMIT      (0x1d | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK)
90175 +#define TR_TESTANDWRITE        (0x1e | SET_TR_SIZE(3) | TR_LAST_AND_SEND_ACK)
90176 +#define TR_INPUT_Q_GETINDEX    (0x1f | SET_TR_SIZE(0))
90177 +
90178 +
90179 +
90180 +/* TraceRoute formate */
90181 +#define TR_TRACEROUTE0_CHANID(val)             ((val) & 1)                     /* 0     Chan Id */
90182 +#define TR_TRACEROUTE0_LINKID(val)             (((val) >> 1) & 7)              /* 1:3   Link Id */
90183 +#define TR_TRACEROUTE0_REVID(val)              (((val) >> 4) & 7)              /* 4:6   Revision Id */
90184 +#define TR_TRACEROUTE0_BCAST_PIN(val)          (((val) >> 7) & 1)              /* 7     Bcast Top Pin */
90185 +#define TR_TRACEROUTE0_LNR(val)                        (((val) >> 8) & 0xFF)           /* 8:15  Global Link Not Ready */
90186 +
90187 +#define TR_TRACEROUTE1_ROUTES_SELECTED(val)    ((val & 0xFF))                  /* 0:7   Routes Selected */
90188 +#define TR_TRACEROUTE1_BCAST_TOP(val)          (((val) >> 8) & 7)              /* 8:10  Broadcast Top */
90189 +#define TR_TRACEROUTE1_BCAST_BOTTOM(val)       (((val) >> 12) & 7)             /* 12:14 Broadcast Bottom */
90190 +
90191 +#endif /* _ELAN4_TRANSACTIONTYPE_H */
90192 Index: linux-2.6.5-7.191/include/elan4/types.h
90193 ===================================================================
90194 --- linux-2.6.5-7.191.orig/include/elan4/types.h        2004-02-23 16:02:56.000000000 -0500
90195 +++ linux-2.6.5-7.191/include/elan4/types.h     2005-07-28 14:52:52.969660344 -0400
90196 @@ -0,0 +1,69 @@
90197 +/*
90198 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90199 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90200 + *
90201 + *    For licensing information please see the supplied COPYING file
90202 + *
90203 + */
90204 +
90205 +#ifndef __ELAN4_TYPES_H
90206 +#define __ELAN4_TYPES_H
90207 +
90208 +#ident "@(#)$Id: types.h,v 1.9 2003/09/04 12:39:17 david Exp $"
90209 +/*      $Source: /cvs/master/quadrics/elan4hdr/types.h,v $*/
90210 +
90211 +#include <qsnet/config.h>
90212 +/*
90213 + * "flip" values for correctly indexing into
90214 + * block data which was copied from the Elan
90215 + * using 64 bit accesses.
90216 + */
90217 +#if defined(__LITTLE_ENDIAN__)
90218 +#  define ByteEndianFlip  0
90219 +#  define ShortEndianFlip 0
90220 +#  define WordEndianFlip  0
90221 +#else
90222 +#  define ByteEndianFlip  7
90223 +#  define ShortEndianFlip 3
90224 +#  define WordEndianFlip  1
90225 +#endif
90226 +
90227 +
90228 +#ifndef _ASM
90229 +
90230 +typedef signed int        E4_int;
90231 +typedef unsigned int              E4_uint;
90232 +
90233 +typedef signed char       E4_int8;
90234 +typedef unsigned char     E4_uint8;
90235 +
90236 +typedef signed short      E4_int16;
90237 +typedef unsigned short            E4_uint16;
90238 +
90239 +typedef signed int        E4_int32;
90240 +typedef unsigned int              E4_uint32;
90241 +
90242 +#ifdef _LP64
90243 +typedef signed long        E4_int64;
90244 +typedef unsigned long      E4_uint64;
90245 +#else
90246 +typedef signed long long   E4_int64;
90247 +typedef unsigned long long E4_uint64;
90248 +#endif
90249 +
90250 +/* 64-bit Elan4 */
90251 +typedef E4_uint64         E4_Addr;
90252 +typedef E4_uint32         E4_LocPhysAddr;      /* Really 31 bits */
90253 +
90254 +#define OneK   (1024)
90255 +#define EightK (8*OneK)
90256 +
90257 +#define E4_DATATYPE_BYTE       0
90258 +#define E4_DATATYPE_SHORT      1
90259 +#define E4_DATATYPE_WORD       2
90260 +#define E4_DATATYPE_DWORD      3
90261 +
90262 +#endif /* _ASM */
90263 +
90264 +#endif /* __ELAN4_TYPES_H */
90265 +
90266 Index: linux-2.6.5-7.191/include/elan4/user.h
90267 ===================================================================
90268 --- linux-2.6.5-7.191.orig/include/elan4/user.h 2004-02-23 16:02:56.000000000 -0500
90269 +++ linux-2.6.5-7.191/include/elan4/user.h      2005-07-28 14:52:52.970660192 -0400
90270 @@ -0,0 +1,344 @@
90271 +/*
90272 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
90273 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90274 + * 
90275 + *    For licensing information please see the supplied COPYING file
90276 + *
90277 + */
90278 +
90279 +#ident "@(#)$Id: user.h,v 1.37.2.2 2004/11/18 17:54:17 duncant Exp $"
90280 +/*      $Source: /cvs/master/quadrics/elan4mod/user.h,v $*/
90281 +
90282 +#ifndef __ELAN4_USER_H
90283 +#define __ELAN4_USER_H
90284 +
90285 +#include <elan/capability.h>
90286 +#include <elan4/usertrap.h>
90287 +#include <elan4/intcookie.h>
90288 +
90289 +typedef struct trap_queue
90290 +{
90291 +    unsigned   q_back;                 /* Next free space */
90292 +    unsigned   q_front;                /* First object to remove */
90293 +    unsigned   q_size;                 /* Size of queue */
90294 +    unsigned   q_count;                /* Current number of entries */
90295 +    unsigned   q_slop;                 /* FULL <=> (count+slop) == size */
90296 +} RING_QUEUE;
90297 +
90298 +#define RING_QUEUE_INIT(q,num,slop)    ((q).q_size = (num), (q).q_slop = (slop), (q).q_front = (q).q_back = 0, (q).q_count = 0)
90299 +#define RING_QUEUE_FULL(q)             ((q).q_count >= ((q).q_size - (q).q_slop))
90300 +#define RING_QUEUE_REALLY_FULL(q)      ((q).q_count == (q).q_size)
90301 +#define RING_QUEUE_EMPTY(q)            ((q).q_count == 0)
90302 +#define RING_QUEUE_NEXT(q,indx)                ((indx) = (((indx)+1) % (q).q_size))
90303 +#define RING_QUEUE_PREV(q,indx)                ((indx) = (((indx)+(q).q_size-1) % (q).q_size))
90304 +#define RING_QUEUE_ADD(q)              (RING_QUEUE_NEXT(q ,(q).q_back),  (++(q).q_count) >= ((q).q_size - (q).q_slop))
90305 +#define RING_QUEUE_REMOVE(q)           (RING_QUEUE_NEXT(q, (q).q_front), (--(q).q_count) == 0)
90306 +#define RING_QUEUE_ADD_FRONT(q)                (RING_QUEUE_PREV(q, (q).q_front), (++(q).q_count) >= ((q).q_size - (q).q_slop))
90307 +#define RING_QUEUE_ENTRY(qArea,indx)   (&(qArea)[(indx)])
90308 +#define RING_QUEUE_FRONT(q,qArea)      RING_QUEUE_ENTRY(qArea, (q).q_front)
90309 +#define RING_QUEUE_BACK(q,qArea)       RING_QUEUE_ENTRY(qArea, (q).q_back)
90310 +#define RING_QUEUE_ITERATE(q,idx)      for (idx = (q).q_front; idx != (q).q_back; idx = (((idx) + 1) % (q).q_size))
90311 +
90312 +typedef struct user_rgn
90313 +{
90314 +    struct user_rgn *rgn_mnext;                                        /* Doubly linked list of regions */
90315 +    struct user_rgn *rgn_mprev;                                        /*   sorted on main address */ 
90316 +    virtaddr_t       rgn_mbase;                                        /* main address of base of region */
90317 +
90318 +    struct user_rgn *rgn_enext;                                        /* Doubly linked list of regions */
90319 +    struct user_rgn *rgn_eprev;                                        /*   sorted on elan address */
90320 +    E4_Addr         rgn_ebase;                                 /* elan address of base of region */
90321 +
90322 +    unsigned long    rgn_len;                                  /* length of region */
90323 +    unsigned        rgn_perm;                                  /* elan access permission */
90324 +} USER_RGN;
90325 +
90326 +typedef struct user_vpseg
90327 +{ 
90328 +    struct list_head   vps_link;
90329 +
90330 +    unsigned short     vps_process;                            /* virtual process number */
90331 +    unsigned short     vps_entries;                            /*   and # virtual processes */
90332 +
90333 +    unsigned           vps_type;
90334 +    union
90335 +    {
90336 +       struct {
90337 +           ELAN_CAPABILITY        *cap;
90338 +           E4_VirtualProcessEntry *routes;
90339 +       } p2p;
90340 +#define vps_p2p_cap    vps_u.p2p.cap
90341 +#define vps_p2p_routes  vps_u.p2p.routes
90342 +
90343 +       struct {
90344 +           unsigned short lowvp;
90345 +           unsigned short highvp;
90346 +       } bcast;
90347 +#define vps_bcast_lowvp                vps_u.bcast.lowvp
90348 +#define vps_bcast_highvp       vps_u.bcast.highvp
90349 +    } vps_u;
90350 +} USER_VPSEG;
90351 +
90352 +/* values for vps_type */
90353 +#define USER_VPSEG_P2P         0
90354 +#define USER_VPSEG_BCAST       1
90355 +
90356 +typedef struct user_cq
90357 +{
90358 +    struct list_head ucq_link;
90359 +
90360 +    ELAN4_CQ       *ucq_cq;                                    /* the real command queue */
90361 +
90362 +    unsigned char    ucq_state;                                        /* command queue state */
90363 +    unsigned char    ucq_errored;                              /* command queue has errored */
90364 +    unsigned char    ucq_flags;                                        /* flags */
90365 +    ELAN4_CPROC_TRAP ucq_trap;                                 /* trap state */
90366 +
90367 +    atomic_t        ucq_ref;                                   /* # references to this cq (mmaps) */
90368 +} USER_CQ;
90369 +
90370 +/* values for ucq_state */
90371 +#define UCQ_RUNNING                     0                      /* command queue is running */
90372 +#define UCQ_TRAPPED                     1                      /* command queue has trapped */
90373 +#define UCQ_NEEDS_RESTART                2                     /* command queue has trapped, and needs restarting */
90374 +#define UCQ_STOPPED                     3                      /* command queue has trapped, and delivered to user */
90375 +
90376 +/* values for ucq_flags */
90377 +#define UCQ_SYSTEM             (1 << 0)
90378 +#define UCQ_REORDER            (1 << 1)
90379 +
90380 +extern int num_fault_save;
90381 +extern int min_fault_pages;
90382 +extern int max_fault_pages;
90383 +
90384 +typedef struct fault_save
90385 +{
90386 +    struct fault_save           *next;
90387 +    E4_Addr                      addr;
90388 +    E4_uint32                    count;
90389 +} FAULT_SAVE;
90390 +
90391 +typedef struct user_iproc_trap
90392 +{
90393 +    unsigned char     ut_state;
90394 +    ELAN4_IPROC_TRAP  ut_trap;
90395 +} USER_IPROC_TRAP;
90396 +
90397 +/* values for ut_state */
90398 +#define UTS_IPROC_RUNNING                      0
90399 +#define UTS_IPROC_TRAPPED                      1
90400 +#define UTS_IPROC_RESOLVING                    2
90401 +#define UTS_IPROC_EXECUTE_PACKET               3
90402 +#define UTS_IPROC_EXECUTING                    4
90403 +#define UTS_IPROC_NETWORK_ERROR                        5
90404 +#define UTS_IPROC_STOPPED                      6
90405 +
90406 +typedef struct user_ctxt_entry
90407 +{
90408 +    struct list_head    cent_link;                                     /* entry chained on context */
90409 +    ELAN_CAPABILITY    *cent_cap;                                      /* capability we attached with */
90410 +} USER_CTXT_ENTRY;
90411 +
90412 +typedef struct user_ctxt
90413 +{
90414 +    ELAN4_CTXT         uctx_ctxt;                              /* is also an elan context */
90415 +
90416 +    spinlock_t        uctx_spinlock;                           /* spinlock for items used with interrupt handler */
90417 +    kcondvar_t        uctx_wait;                               /* place to sleep (traphandler/swapout/swapin/neterr fixup) */
90418 +
90419 +    unsigned          uctx_status;                             /* status                               (uctx_spinlock) */
90420 +
90421 +    pid_t             uctx_trap_pid;                           /* pid to deliver signals to on trap */
90422 +    int                       uctx_trap_signo;                         /* signal number to deliver */
90423 +    unsigned          uctx_trap_state;                         /* state of trap handling code */
90424 +    unsigned          uctx_trap_count;                         /* count of "thread" in user_trap_handler() */
90425 +
90426 +    unsigned          uctx_int_count;                          /* # interrupts since last zeroed */
90427 +    unsigned long      uctx_int_start;                         /* tick when int_count last zeroed */
90428 +    unsigned long      uctx_int_delay;                         /* # ticks to delay next wakeup */
90429 +    struct timer_list  uctx_int_timer;                         /* and timer to use to delay signal */
90430 +
90431 +    struct timer_list  uctx_neterr_timer;                      /* network error timer */
90432 +
90433 +    struct list_head   uctx_vpseg_list;                                /* list of vp segments we've got */
90434 +    kmutex_t           uctx_vpseg_lock;                                /*   and lock to protect it. */
90435 +    ELAN4_ROUTE_TABLE *uctx_routetable;                                /* our virtual process table */
90436 +    ELAN_POSITION      uctx_position;                          /* position in network */
90437 +
90438 +    struct list_head   uctx_cent_list;                                 /* list of attached network contexts */
90439 +
90440 +    USER_CQ          *uctx_ddcq;                               /* command queue for re-issueing traps */
90441 +    E4_uint64         uctx_ddcq_insertcnt;                     /* # dwords inserted into command queue */
90442 +    E4_uint64          uctx_ddcq_completed;                    /* last "completed" write was here */
90443 +    int                       uctx_ddcq_intr;                          /* count of outstanding ddcq interrupts */
90444 +
90445 +    ELAN4_HALTOP       uctx_haltop;                            /* halt operation for flushing */
90446 +    ELAN4_DMA_FLUSHOP  uctx_dma_flushop;                       /* flush operation for flushing dma runqueue */
90447 +
90448 +    INTCOOKIE_TABLE   *uctx_intcookie_table;                   /* table of interrupt cookies (shared with other uctxs for this task) */
90449 +
90450 +    kmutex_t          uctx_cqlock;                             /* lock for create/destory cqs */
90451 +    struct list_head   uctx_cqlist;                            /* list of command queues               (uctx_cqlock,uctx_spinlock) */
90452 +
90453 +    ELAN4_DPROC_TRAP  *uctx_dprocTraps;                                /* queue of dproc traps to resolve/reissue */
90454 +    RING_QUEUE        uctx_dprocTrapQ;
90455 +
90456 +    ELAN4_TPROC_TRAP  *uctx_tprocTraps;                                /* queue of tproc traps to resolve/reissue */
90457 +    RING_QUEUE         uctx_tprocTrapQ;
90458 +
90459 +    ELAN4_EPROC_TRAP  *uctx_eprocTraps;                                /* queue of eproc traps to resolve */
90460 +    RING_QUEUE        uctx_eprocTrapQ;
90461 +
90462 +    USER_IPROC_TRAP    uctx_iprocTrap[2];                      /* input trap state, 1 per virtual channel */
90463 +
90464 +    E4_DMA           *uctx_dmas;                               /* queue of dmas to restart */
90465 +    RING_QUEUE         uctx_dmaQ;
90466 +    
90467 +    E4_ThreadRegs     *uctx_threads;                           /* queue of threads to restart */
90468 +    RING_QUEUE         uctx_threadQ;
90469 +
90470 +    ELAN4_NETERR_MSG  *uctx_msgs;                              /* queue of neterr messages */
90471 +    RING_QUEUE        uctx_msgQ;
90472 +    kmutex_t          uctx_rgnmutex;                           /* lock for create/destroy regions */
90473 +    spinlock_t        uctx_rgnlock;                            /* spinlock to protect linked lists */
90474 +    USER_RGN         *uctx_mrgns;                              /* Doubly linked list of memory regions (uctx_rgnlock) */
90475 +    USER_RGN         *uctx_mtail;                              /* Last memory region on list           (uctx_rgnlock) */
90476 +    USER_RGN         *uctx_mrgnlast;                           /* Last region 'hit'                    (uctx_rgnlock) */
90477 +
90478 +    USER_RGN         *uctx_ergns;                              /* Doubly linked list of memory regions (uctx_rgnlock) */
90479 +    USER_RGN         *uctx_etail;                              /* Last memory region on list           (uctx_rgnlock) */
90480 +    USER_RGN         *uctx_ergnlast;                           /* Last region 'hit'                    (uctx_rgnlock) */
90481 +
90482 +    ELAN4_USER_PAGE   *uctx_upage;                             /* kernel page shared with user */
90483 +    sdramaddr_t               uctx_trampoline;                         /* sdram page for tproc trampoline */
90484 +
90485 +    E4_Addr           uctx_upage_addr;                         /*   elan addr page mapped into */
90486 +    E4_Addr           uctx_trestart_addr;                      /* address of thread restart code */
90487 +    FAULT_SAVE         *uctx_faults;
90488 +    FAULT_SAVE         *uctx_fault_list;
90489 +    int                 uctx_num_fault_save;
90490 +    spinlock_t          uctx_fault_lock;
90491 +} USER_CTXT;
90492 +
90493 +/* bit values for uctx_status */
90494 +#define UCTX_EXITING                           (1 << 0)                /* context is exiting. */
90495 +#define UCTX_USER_FILTERING                    (1 << 1)                /* user requested context filter */
90496 +#define UCTX_USER_STOPPED                      (1 << 2)                /* user requested stop */
90497 +
90498 +#define UCTX_SWAPPING                          (1 << 3)                /* context is swapping out */
90499 +#define UCTX_SWAPPED                           (1 << 4)                /* context is swapped out */
90500 +
90501 +#define UCTX_STOPPING                          (1 << 5)                /* stopping elan from running this context */
90502 +#define UCTX_STOPPED                           (1 << 6)                /* elan no longer running this context */
90503 +
90504 +#define UCTX_EPROC_QUEUE_FULL                  (1 << 7)                /* reasons for stopping running */
90505 +#define UCTX_DPROC_QUEUE_FULL                  (1 << 8)
90506 +#define UCTX_TPROC_QUEUE_FULL                  (1 << 9)
90507 +#define UCTX_IPROC_CH0_TRAPPED                 (1 << 10)
90508 +#define UCTX_IPROC_CH1_TRAPPED                 (1 << 11)
90509 +
90510 +#define UCTX_NETERR_TIMER                      (1 << 12)
90511 +#define UCTX_NETERR_FIXUP                      (1 << 13)
90512 +
90513 +#define UCTX_EPROC_QUEUE_OVERFLOW              (1 << 14)
90514 +#define UCTX_DPROC_QUEUE_OVERFLOW              (1 << 15)
90515 +#define UCTX_TPROC_QUEUE_OVERFLOW              (1 << 16)
90516 +
90517 +#define UCTX_EPROC_QUEUE_ERROR                 (1 << 17)
90518 +#define UCTX_DPROC_QUEUE_ERROR                 (1 << 18)
90519 +#define UCTX_TPROC_QUEUE_ERROR                 (1 << 19)
90520 +
90521 +#define UCTX_STOPPED_REASONS                   (UCTX_EPROC_QUEUE_FULL | UCTX_DPROC_QUEUE_FULL | UCTX_TPROC_QUEUE_FULL)
90522 +#define UCTX_SWAPPED_REASONS                   (UCTX_EXITING | UCTX_USER_STOPPED | UCTX_NETERR_FIXUP)
90523 +#define UCTX_NACKING_REASONS                   (UCTX_USER_FILTERING | UCTX_IPROC_CH0_TRAPPED | UCTX_IPROC_CH1_TRAPPED)
90524 +
90525 +#define UCTX_OVERFLOW_REASONS                  (UCTX_EPROC_QUEUE_OVERFLOW | UCTX_DPROC_QUEUE_OVERFLOW | UCTX_TPROC_QUEUE_OVERFLOW)
90526 +#define UCTX_ERROR_REASONS                     (UCTX_EPROC_QUEUE_ERROR | UCTX_DPROC_QUEUE_ERROR | UCTX_TPROC_QUEUE_ERROR)
90527 +
90528 +#define UCTX_RUNNABLE(uctx)                    (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS)) == 0)
90529 +#define UCTX_NACKING(uctx)                     (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS | UCTX_NACKING_REASONS)) != 0)
90530 +
90531 +/* values for uctx_trap_signalled */
90532 +#define UCTX_TRAP_IDLE                         0
90533 +#define UCTX_TRAP_SLEEPING                     1
90534 +#define UCTX_TRAP_SIGNALLED                    2
90535 +#define UCTX_TRAP_ACTIVE                       3
90536 +
90537 +extern int        user_p2p_route_options;
90538 +extern int        user_bcast_route_options;
90539 +extern int       user_dproc_retry_count;
90540 +extern int       user_cproc_retry_count;
90541 +
90542 +extern USER_CTXT *user_alloc (ELAN4_DEV *dev);
90543 +extern void       user_free (USER_CTXT *uctx);
90544 +extern void       user_swapout (USER_CTXT *uctx, unsigned reason);
90545 +extern void       user_swapin (USER_CTXT *uctx, unsigned reason);
90546 +extern int        user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap);
90547 +extern void       user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap);
90548 +extern void       user_block_inputter (USER_CTXT *uctx, unsigned blocked);
90549 +extern int        user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, 
90550 +                                         unsigned ntproc_traps, unsigned nthreads, unsigned ndmas);
90551 +
90552 +extern int        user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap);
90553 +extern int        user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp);
90554 +extern int        user_removevp (USER_CTXT *uctx, unsigned process);
90555 +
90556 +extern int        user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route);
90557 +extern int        user_reset_route (USER_CTXT *uctx, unsigned process);
90558 +extern int        user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route);
90559 +extern int        user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error);
90560 +extern int       user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg);
90561 +extern int        user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop);
90562 +extern int        user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop);
90563 +
90564 +extern int        user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr);
90565 +extern int        user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx);
90566 +extern int        user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma);
90567 +extern int        user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs);
90568 +extern int        user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans,
90569 +                                         E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap);
90570 +
90571 +extern int        user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks);
90572 +extern USER_CQ   *user_findcq (USER_CTXT *uctx, unsigned num);
90573 +extern USER_CQ   *user_alloccq (USER_CTXT *uctx, unsigned size, unsigned perm, unsigned flags);
90574 +extern void       user_freecq (USER_CTXT *uctx, USER_CQ *cq);
90575 +extern void       user_dropcq (USER_CTXT *uctx, USER_CQ *cq);
90576 +
90577 +/* user_osdep.c */
90578 +extern int        user_load_range (USER_CTXT *uctx, E4_Addr addr, unsigned long nbytes, E4_uint32 fsr);
90579 +extern void       user_update_main (USER_CTXT *uctx, struct mm_struct *mm, unsigned long start, unsigned long len);
90580 +extern void       user_unload_main (USER_CTXT *uctx, unsigned long start, unsigned long len);
90581 +
90582 +
90583 +/* regions.c */
90584 +extern USER_RGN  *user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail);
90585 +extern USER_RGN  *user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail);
90586 +extern USER_RGN  *user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr);
90587 +extern USER_RGN  *user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr);
90588 +extern int        user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm);
90589 +extern void       user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len);
90590 +extern int        user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access);
90591 +extern virtaddr_t user_elan2main (USER_CTXT *uctx, E4_Addr addr);
90592 +extern E4_Addr    user_main2elan (USER_CTXT *uctx, virtaddr_t addr);
90593 +extern void       user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len);
90594 +extern void       user_freergns (USER_CTXT *uctx);
90595 +
90596 +/* user_ddcq.c */
90597 +extern int        user_ddcq_check (USER_CTXT *uctx, unsigned num);
90598 +extern int        user_ddcq_flush (USER_CTXT *uctx);
90599 +extern void       user_ddcq_intr (USER_CTXT *uctx);
90600 +extern void       user_ddcq_write_dword (USER_CTXT *uctx, E4_Addr addr, E4_uint64 value);
90601 +extern void       user_ddcq_interrupt (USER_CTXT *uctx, E4_uint64 cookie);
90602 +extern void       user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma);
90603 +extern void       user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs);
90604 +extern void       user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr);
90605 +extern void       user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count);
90606 +extern void       user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1);
90607 +
90608 +
90609 +/*
90610 + * Local variables:
90611 + * c-file-style: "stroustrup"
90612 + * End:
90613 + */
90614 +#endif /* __ELAN4_USER_H */
90615 Index: linux-2.6.5-7.191/include/elan4/userregs.h
90616 ===================================================================
90617 --- linux-2.6.5-7.191.orig/include/elan4/userregs.h     2004-02-23 16:02:56.000000000 -0500
90618 +++ linux-2.6.5-7.191/include/elan4/userregs.h  2005-07-28 14:52:52.971660040 -0400
90619 @@ -0,0 +1,383 @@
90620 +/*
90621 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90622 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90623 + *
90624 + *    For licensing information please see the supplied COPYING file
90625 + *
90626 + */
90627 +
90628 +#ifndef __ELAN4_USERREGS_H
90629 +#define __ELAN4_USERREGS_H
90630 +
90631 +#ident "$Id: userregs.h,v 1.14.2.1 2004/10/07 10:57:40 addy Exp $"
90632 +/*      $Source: /cvs/master/quadrics/elan4hdr/userregs.h,v $*/
90633 +
90634 +#ifdef __cplusplus
90635 +extern "C" {
90636 +#endif
90637 +
90638 +/*
90639 + * Statistic control reg values
90640 + * Each 4-bit nibble of the control word specifies what statistic
90641 + * is to be recorded in each of the 8 statistic counters
90642 + */
90643 +#define COUNT_REG0_SHIFT   32ull
90644 +#define COUNT_REG1_SHIFT   36ull
90645 +#define COUNT_REG2_SHIFT   40ull
90646 +#define COUNT_REG3_SHIFT   44ull
90647 +#define COUNT_REG4_SHIFT   48ull
90648 +#define COUNT_REG5_SHIFT   52ull
90649 +#define COUNT_REG6_SHIFT   56ull
90650 +#define COUNT_REG7_SHIFT   60ull
90651 +
90652 +
90653 +/* Count reg 0 */
90654 +#define STC_INPUT_NON_WRITE_BLOCKS     (0x0ull << COUNT_REG0_SHIFT)
90655 +#define STP_DMA_EOP_WAIT_ACK           (0x1ull << COUNT_REG0_SHIFT)
90656 +#define STP_TPROC_RUNNING              (0x2ull << COUNT_REG0_SHIFT)
90657 +#define STC_STEN_PKTS_OPEN              (0x3ull << COUNT_REG0_SHIFT)
90658 +#define STP_CPROC_HOLDS_FFU_DP         (0x4ull << COUNT_REG0_SHIFT)
90659 +#define STC_TLB_TABLE_WALKS             (0x5ull << COUNT_REG0_SHIFT)
90660 +#define STC_CACHE_HITS                  (0x6ull << COUNT_REG0_SHIFT)
90661 +#define STC_PCI_SLAVE_READS             (0x7ull << COUNT_REG0_SHIFT)
90662 +#define STP_PCI_WAITING_FOR_GNT         (0x8ull << COUNT_REG0_SHIFT)
90663 +#define STP_SYS_CLOCK_RATE0            (0xfull << COUNT_REG0_SHIFT)
90664 +
90665 +#define STATS_REG0_NAMES {                     \
90666 +          "STC_INPUT_NON_WRITE_BLOCKS",        \
90667 +          "STP_DMA_EOP_WAIT_ACK",              \
90668 +          "STP_TPROC_RUNNING",                 \
90669 +          "STC_STEN_PKTS_OPEN",                \
90670 +          "STP_CPROC_HOLDS_FFU_DP",            \
90671 +          "STC_TLB_TABLE_WALKS",               \
90672 +          "STC_CACHE_HITS",                    \
90673 +          "STC_PCI_SLAVE_READS",               \
90674 +          "STP_PCI_WAITING_FOR_GNT",           \
90675 +          "STP_SYS_CLOCK_RATE0"                \
90676 +}
90677 +
90678 +/* Count reg 1 */
90679 +#define STC_INPUT_WRITE_BLOCKS         (0x0ull << COUNT_REG1_SHIFT)
90680 +#define STP_DMA_DATA_TRANSMITTING      (0x1ull << COUNT_REG1_SHIFT)
90681 +#define STC_CPROC_VALUES_EXE           (0x2ull << COUNT_REG1_SHIFT)
90682 +#define STC_STEN_TRANS_SENT            (0x3ull << COUNT_REG1_SHIFT)
90683 +#define STP_TPROC_DQ_HOLDS_FFU_DP      (0x4ull << COUNT_REG1_SHIFT)
90684 +#define STC_TPROC_TLB_HITS             (0x5ull << COUNT_REG1_SHIFT)
90685 +#define STC_CACHE_ALLOC_MISSES         (0x6ull << COUNT_REG1_SHIFT)
90686 +#define STP_PCI_MASTER_READ_WAITING    (0x7ull << COUNT_REG1_SHIFT)
90687 +#define STP_PCI_WAITING_FOR_DEVSEL      (0x8ull << COUNT_REG1_SHIFT)
90688 +#define STP_SYS_CLOCK_RATE1            (0xfull << COUNT_REG1_SHIFT)
90689 +
90690 +#define STATS_REG1_NAMES {                    \
90691 +          "STC_INPUT_WRITE_BLOCKS",            \
90692 +          "STP_DMA_DATA_TRANSMITTING",         \
90693 +          "STC_CPROC_VALUES_EXE",              \
90694 +          "STC_STEN_TRANS_SENT",               \
90695 +          "STP_TPROC_DQ_HOLDS_FFU_DP",         \
90696 +          "STC_TPROC_TLB_HITS",                \
90697 +          "STC_CACHE_ALLOC_MISSES",            \
90698 +          "STP_PCI_MASTER_READ_WAITING",       \
90699 +          "STP_PCI_WAITING_FOR_DEVSEL",        \
90700 +          "STP_SYS_CLOCK_RATE1"                \
90701 +}
90702 +
90703 +/* Count reg 2 */
90704 +#define STC_INPUT_PKTS                 (0x0ull << COUNT_REG2_SHIFT)
90705 +#define STP_DMA_WAITING_MEM            (0x1ull << COUNT_REG2_SHIFT)
90706 +#define STC_CPROC_TRANSFERS             (0x2ull << COUNT_REG2_SHIFT)
90707 +#define STP_STEN_WAIT_NETWORK_BUSY     (0x3ull << COUNT_REG2_SHIFT)
90708 +#define STP_IPROC_HOLDS_FFU_DP         (0x4ull << COUNT_REG2_SHIFT)
90709 +#define STC_UNITS_TLB_HITS             (0x5ull << COUNT_REG2_SHIFT)
90710 +#define STC_CACHE_NON_ALLOC_MISSES      (0x6ull << COUNT_REG2_SHIFT)
90711 +#define STP_PCI_MASTER_WRITE_WAITING   (0x7ull << COUNT_REG2_SHIFT)
90712 +#define STC_PCI_OUT_OF_ORDER_SPLIT_COMP (0x8ull << COUNT_REG2_SHIFT)
90713 +#define STP_SYS_CLOCK_RATE2            (0xfull << COUNT_REG2_SHIFT)
90714 +
90715 +#define STATS_REG2_NAMES {                    \
90716 +          "STC_INPUT_PKTS",                    \
90717 +          "STP_DMA_WAITING_MEM",               \
90718 +          "STC_CPROC_TRANSFERS",               \
90719 +          "STP_STEN_WAIT_NETWORK_BUSY",        \
90720 +          "STP_IPROC_HOLDS_FFU_DP",            \
90721 +          "STC_UNITS_TLB_HITS",                \
90722 +          "STC_CACHE_NON_ALLOC_MISSES",        \
90723 +          "STP_PCI_MASTER_WRITE_WAITING",      \
90724 +          "STC_PCI_OUT_OF_ORDER_SPLIT_COMP",   \
90725 +          "STP_SYS_CLOCK_RATE2"                \
90726 +}
90727 +
90728 +/* Count reg 3 */
90729 +#define STC_INPUT_PKTS_REJECTED         (0x0ull << COUNT_REG3_SHIFT)
90730 +#define STP_DMA_WAIT_NETWORK_BUSY       (0x1ull << COUNT_REG3_SHIFT)
90731 +#define STC_CPROC_PREFETCH_SDRAM        (0x2ull << COUNT_REG3_SHIFT)
90732 +#define STP_STEN_BLOCKED_ACKS_OR_VC     (0x3ull << COUNT_REG3_SHIFT)
90733 +#define STP_EPROC_HOLDS_FFU_DP          (0x4ull << COUNT_REG3_SHIFT)
90734 +#define STP_TPROC_BLOCKED_MEMSYS        (0x5ull << COUNT_REG3_SHIFT)
90735 +#define STC_CACHE_WRITE_BACKS           (0x6ull << COUNT_REG3_SHIFT)
90736 +#define STP_PCI_SLAVE_READ_WAITING      (0x7ull << COUNT_REG3_SHIFT)
90737 +#define STP_PCI_IDLE_CYCLES            (0x8ull << COUNT_REG3_SHIFT)
90738 +#define STP_SYS_CLOCK_RATE3            (0xfull << COUNT_REG3_SHIFT)
90739 +
90740 +#define STATS_REG3_NAMES {                    \
90741 +          "STC_INPUT_PKTS_REJECTED",           \
90742 +          "STP_DMA_WAIT_NETWORK_BUSY",         \
90743 +          "STC_CPROC_PREFETCH_SDRAM",          \
90744 +          "STP_STEN_BLOCKED_ACKS_OR_VC",       \
90745 +          "STP_EPROC_HOLDS_FFU_DP",            \
90746 +          "STP_TPROC_BLOCKED_MEMSYS",          \
90747 +          "STC_CACHE_WRITE_BACKS",             \
90748 +          "STP_PCI_SLAVE_READ_WAITING",        \
90749 +          "STP_PCI_IDLE_CYCLES",               \
90750 +          "STP_SYS_CLOCK_RATE3"                \
90751 +}
90752 +
90753 +/* Count reg 4 */
90754 +#define STP_INPUT_DATA_TRANSMITTING    (0x0ull << COUNT_REG4_SHIFT)
90755 +#define STC_DMA_PKTS_ACCEPTED          (0x1ull << COUNT_REG4_SHIFT)
90756 +#define STC_CPROC_FLUSH_REQ_SDRAM      (0x2ull << COUNT_REG4_SHIFT)
90757 +#define STP_STEN_EOP_WAIT_ACK          (0x3ull << COUNT_REG4_SHIFT)
90758 +#define STP_DMA_HOLDS_FFU_DP           (0x4ull << COUNT_REG4_SHIFT)
90759 +#define STP_UNIT_BLOCKED_MEMSYS        (0x5ull << COUNT_REG4_SHIFT)
90760 +#define STC_PCI_MASTER_READS           (0x6ull << COUNT_REG4_SHIFT)
90761 +#define STP_PCI_SLAVE_WRITE_WAITING    (0x7ull << COUNT_REG4_SHIFT)
90762 +#define STC_INPUT_PACKETS_DISCARDED    (0x8ull << COUNT_REG4_SHIFT)
90763 +#define STP_SYS_CLOCK_RATE4            (0xfull << COUNT_REG4_SHIFT)
90764 +
90765 +#define STATS_REG4_NAMES {                    \
90766 +          "STP_INPUT_DATA_TRANSMITTING",       \
90767 +          "STC_DMA_PKTS_ACCEPTED",             \
90768 +          "STC_CPROC_FLUSH_REQ_SDRAM",         \
90769 +          "STP_STEN_EOP_WAIT_ACK",             \
90770 +          "STP_DMA_HOLDS_FFU_DP",              \
90771 +          "STP_UNIT_BLOCKED_MEMSYS",           \
90772 +          "STC_PCI_MASTER_READS",              \
90773 +          "STP_PCI_SLAVE_WRITE_WAITING",       \
90774 +          "STC_INPUT_PACKETS_DISCARDED",       \
90775 +          "STP_SYS_CLOCK_RATE4"                \
90776 +}
90777 +
90778 +/* Count reg 5 */
90779 +#define STP_INPUT_WAITING_NETWORK_DATA  (0x0ull << COUNT_REG5_SHIFT)
90780 +#define STC_DMA_PKTS_REJECTED           (0x1ull << COUNT_REG5_SHIFT)
90781 +#define STC_CPROC_INSERT_CACHE_MISSES   (0x2ull << COUNT_REG5_SHIFT)
90782 +#define STP_STEN_TRANSMITTING_DATA      (0x3ull << COUNT_REG5_SHIFT)
90783 +#define FFU_BLOCKED_DIFF_FFU_PROC       (0x4ull << COUNT_REG5_SHIFT)
90784 +#define STP_TABLE_WALKS_BLOCKED_MEMSYS  (0x5ull << COUNT_REG5_SHIFT)
90785 +#define STC_PCI_MASTER_WRITES           (0x6ull << COUNT_REG5_SHIFT)
90786 +#define STP_PCI_MASTER_HOLDS_BUS        (0x7ull << COUNT_REG5_SHIFT)
90787 +#define STC_PCI_NO_SPLIT_COMPS         (0x8ull << COUNT_REG5_SHIFT)
90788 +#define STP_SYS_CLOCK_RATE5            (0xfull << COUNT_REG5_SHIFT)
90789 +
90790 +#define STATS_REG5_NAMES {                    \
90791 +          "STP_INPUT_WAITING_NETWORK_DATA",    \
90792 +          "STC_DMA_PKTS_REJECTED",             \
90793 +          "STC_CPROC_INSERT_CACHE_MISSES",     \
90794 +          "STP_STEN_TRANSMITTING_DATA",        \
90795 +          "FFU_BLOCKED_DIFF_FFU_PROC",         \
90796 +          "STP_TABLE_WALKS_BLOCKED_MEMSYS",    \
90797 +          "STC_PCI_MASTER_WRITES",             \
90798 +          "STP_PCI_MASTER_HOLDS_BUS",          \
90799 +          "STC_PCI_NO_SPLIT_COMPS",            \
90800 +          "STP_SYS_CLOCK_RATE5"                \
90801 +}
90802 +
90803 +/* Count reg 6 */
90804 +#define STP_INPUT_BLOCKED_WAITING_TRANS (0x0ull << COUNT_REG6_SHIFT)
90805 +#define STP_TPROC_INST_STALL           (0x1ull << COUNT_REG6_SHIFT)
90806 +#define STP_CPROC_WAITING_DESCHED      (0x2ull << COUNT_REG6_SHIFT)
90807 +#define STP_STEN_PKT_OPEN_WAITING_DATA (0x3ull << COUNT_REG6_SHIFT)
90808 +#define STP_TLB_HASH_TABLE_ACCESSES    (0x4ull << COUNT_REG6_SHIFT)
90809 +#define STP_PCI_SLAVE_BLOCKED_MEMSYS   (0x5ull << COUNT_REG6_SHIFT)
90810 +#define STP_PCI_TRANSFERRING_DATA       (0x6ull << COUNT_REG6_SHIFT)
90811 +#define STP_PCI_MASTER_WAITING_BUS      (0x7ull << COUNT_REG6_SHIFT)
90812 +#define STP_PCI_READ_LATENCY           (0x8ull << COUNT_REG6_SHIFT)
90813 +#define STP_SYS_CLOCK_RATE6            (0xfull << COUNT_REG6_SHIFT)
90814 +
90815 +#define STATS_REG6_NAMES {                    \
90816 +          "STP_INPUT_BLOCKED_WAITING_TRANS",   \
90817 +          "STP_TPROC_INST_STALL",              \
90818 +          "STP_CPROC_WAITING_DESCHED",         \
90819 +          "STP_STEN_PKT_OPEN_WAITING_DATA",    \
90820 +          "STP_TLB_HASH_TABLE_ACCESSES",       \
90821 +          "STP_PCI_SLAVE_BLOCKED_MEMSYS",      \
90822 +          "STP_PCI_TRANSFERRING_DATA",         \
90823 +          "STP_PCI_MASTER_WAITING_BUS",        \
90824 +          "STP_PCI_READ_LATENCY",              \
90825 +          "STP_SYS_CLOCK_RATE6"                \
90826 +}
90827 +
90828 +/* Count reg 7 */
90829 +#define STC_INPUT_CTX_FILTER_FILL       (0x0ull << COUNT_REG7_SHIFT)   
90830 +#define STP_TPROC_LOAD_STORE_STALL      (0x1ull << COUNT_REG7_SHIFT)
90831 +#define STC_CPROC_TIMEOUTS              (0x2ull << COUNT_REG7_SHIFT)
90832 +#define STP_STEN_BLOCKED_NETWORK        (0x3ull << COUNT_REG7_SHIFT)
90833 +#define STP_TLB_CHAIN_ACCESSES          (0x4ull << COUNT_REG7_SHIFT)
90834 +#define STP_CPROC_SCHED_BLOCKED_MEMSYS  (0x5ull << COUNT_REG7_SHIFT)
90835 +#define STC_PCI_SLAVE_WRITES            (0x6ull << COUNT_REG7_SHIFT)
90836 +#define STC_PCI_DISCONNECTS_RETRIES     (0x7ull << COUNT_REG7_SHIFT)
90837 +#define STC_RING_OSCILLATOR            (0x8ull << COUNT_REG7_SHIFT)
90838 +#define STP_SYS_CLOCK_RATE7            (0xfull << COUNT_REG7_SHIFT)
90839 +
90840 +#define STATS_REG7_NAMES {                    \
90841 +          "STC_INPUT_CTX_FILTER_FILL",         \
90842 +          "STP_TPROC_LOAD_STORE_STALL",        \
90843 +          "STC_CPROC_TIMEOUTS",                \
90844 +          "STP_STEN_BLOCKED_NETWORK",          \
90845 +          "STP_TLB_CHAIN_ACCESSES",            \
90846 +          "STP_CPROC_SCHED_BLOCKED_MEMSYS",    \
90847 +          "STC_PCI_SLAVE_WRITES",              \
90848 +          "STC_PCI_DISCONNECTS_RETRIES",       \
90849 +          "STC_RING_OSCILLATOR",               \
90850 +          "STP_SYS_CLOCK_RATE7"                \
90851 +}
90852 +
90853 +#define STATS_REG_NAMES { \
90854 +    STATS_REG0_NAMES, \
90855 +    STATS_REG1_NAMES, \
90856 +    STATS_REG2_NAMES, \
90857 +    STATS_REG3_NAMES, \
90858 +    STATS_REG4_NAMES, \
90859 +    STATS_REG5_NAMES, \
90860 +    STATS_REG6_NAMES, \
90861 +    STATS_REG7_NAMES, \
90862 +}
90863 +
90864 +
90865 +#define INPUT_PERF_STATS        (STC_INPUT_NON_WRITE_BLOCKS | STC_INPUT_WRITE_BLOCKS |              \
90866 +                                STC_INPUT_PKTS | STC_INPUT_PKTS_REJECTED |                         \
90867 +                                 STC_INPUT_CTX_FILTER_FILL | STP_INPUT_DATA_TRANSMITTING |           \
90868 +                                STP_INPUT_WAITING_NETWORK_DATA | STP_INPUT_BLOCKED_WAITING_TRANS | STC_INPUT_PACKETS_DISCARDED) 
90869 +
90870 +#define DMA_PERF_STATS          (STC_DMA_PKTS_ACCEPTED | STC_DMA_PKTS_REJECTED |                    \
90871 +                                 STP_DMA_EOP_WAIT_ACK | STP_DMA_DATA_TRANSMITTING |                 \
90872 +                                STP_DMA_WAITING_MEM | STP_DMA_WAIT_NETWORK_BUSY)                 
90873 +
90874 +
90875 +#define TPROC_PERF_STATS        (STP_TPROC_RUNNING | STP_TPROC_INST_STALL |                         \
90876 +                                 STP_TPROC_LOAD_STORE_STALL)
90877 +
90878 +#define CPROC_PERF_STATS        (STC_CPROC_VALUES_EXE | STC_CPROC_TRANSFERS |                       \
90879 +                                STC_CPROC_PREFETCH_SDRAM | STC_CPROC_FLUSH_REQ_SDRAM |             \
90880 +                                STC_CPROC_INSERT_CACHE_MISSES | STP_CPROC_WAITING_DESCHED |        \
90881 +                                STC_CPROC_TIMEOUTS)
90882 +
90883 +#define STEN_PERF_STATS         (STC_STEN_PKTS_OPEN | STC_STEN_TRANS_SENT |                         \
90884 +                                STP_STEN_WAIT_NETWORK_BUSY | STP_STEN_BLOCKED_ACKS_OR_VC |         \
90885 +                                STP_STEN_EOP_WAIT_ACK | STP_STEN_TRANSMITTING_DATA |               \
90886 +                                STP_STEN_PKT_OPEN_WAITING_DATA | STP_STEN_BLOCKED_NETWORK)
90887 +
90888 +#define FFU_PREF_STATS          (STP_CPROC_HOLDS_FFU_DP | STP_TPROC_DQ_HOLDS_FFU_DP |               \
90889 +                                STP_IPROC_HOLDS_FFU_DP | STP_EPROC_HOLDS_FFU_DP |                  \
90890 +                                STP_DMA_HOLDS_FFU_DP | FFU_BLOCKED_DIFF_FFU_PROC)
90891 +
90892 +#define TABLE_WALK_PERF_STATS   (STC_TPROC_TLB_HITS | STC_UNITS_TLB_HITS |                          \
90893 +                                STP_TLB_HASH_TABLE_ACCESSES | STP_TLB_CHAIN_ACCESSES |             \
90894 +                                STC_TLB_TABLE_WALKS)
90895 +
90896 +#define ADDRESS_ARB_PERF_STATS  (STP_UNIT_BLOCKED_MEMSYS | STP_TPROC_BLOCKED_MEMSYS |               \
90897 +                                STP_TABLE_WALKS_BLOCKED_MEMSYS | STP_CPROC_SCHED_BLOCKED_MEMSYS |  \
90898 +                                STP_PCI_SLAVE_BLOCKED_MEMSYS)
90899 +
90900 +#define CACHE_PERF_STATS        (STC_CACHE_HITS | STC_CACHE_ALLOC_MISSES |                          \
90901 +                                STC_CACHE_NON_ALLOC_MISSES | STC_CACHE_WRITE_BACKS)
90902 +
90903 +
90904 +#define PCI_PERF_STATS          (STC_PCI_SLAVE_READS | STP_PCI_MASTER_READ_WAITING |                \
90905 +                                 STP_PCI_MASTER_WRITE_WAITING | STP_PCI_SLAVE_READ_WAITING |        \
90906 +                                 STP_PCI_SLAVE_WRITE_WAITING | STC_PCI_MASTER_WRITES |              \
90907 +                                 STP_PCI_TRANSFERRING_DATA | STC_PCI_SLAVE_WRITES)
90908 +
90909 +#define PCIBUS_PERF_STATS       (STP_PCI_WAITING_FOR_GNT | STP_PCI_WAITING_FOR_DEVSEL |                    \
90910 +                                STC_PCI_OUT_OF_ORDER_SPLIT_COMP | STP_PCI_IDLE_CYCLES |            \
90911 +                                STC_PCI_MASTER_READS | STP_PCI_MASTER_HOLDS_BUS |                  \
90912 +                                STP_PCI_MASTER_WAITING_BUS | STC_PCI_DISCONNECTS_RETRIES)
90913 +
90914 +                                
90915 +    extern const char *elan_stats_names[8][10];
90916 +
90917 +#define ELAN_STATS_NAME(COUNT, CONTROL) (elan_stats_names[(COUNT)][(CONTROL) & 7])
90918 +
90919 +    typedef volatile union e4_StatsControl
90920 +    {
90921 +       E4_uint64 StatsControl;
90922 +       struct
90923 +       {
90924 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
90925 +           E4_uint32 StatCont0:4;
90926 +           E4_uint32 StatCont1:4;
90927 +           E4_uint32 StatCont2:4;
90928 +           E4_uint32 StatCont3:4;
90929 +           E4_uint32 StatCont4:4;
90930 +           E4_uint32 StatCont5:4;
90931 +           E4_uint32 StatCont6:4;
90932 +           E4_uint32 StatCont7:4;
90933 +#else
90934 +           E4_uint32 StatCont7:4;
90935 +           E4_uint32 StatCont6:4;
90936 +           E4_uint32 StatCont5:4;
90937 +
90938 +           E4_uint32 StatCont4:4;
90939 +           E4_uint32 StatCont3:4;
90940 +           E4_uint32 StatCont2:4;
90941 +           E4_uint32 StatCont1:4;
90942 +           E4_uint32 StatCont0:4;
90943 +#endif
90944 +           E4_uint32 pad;
90945 +       } s;
90946 +    } E4_StatsControl;
90947 +
90948 +typedef volatile union e4_StatsCount
90949 +{
90950 +   E4_uint64    ClockStat; 
90951 +   struct
90952 +   {
90953 +       E4_uint32 ClockLSW;     /* read only */
90954 +       E4_uint32 StatsCount;
90955 +   } s;
90956 +} E4_StatsCount;
90957 +
90958 +typedef volatile union e4_clock
90959 +{
90960 +   E4_uint64 NanoSecClock;
90961 +   struct
90962 +   {
90963 +      E4_uint32 ClockLSW;
90964 +      E4_uint32 ClockMSW;
90965 +   } s;
90966 +} E4_Clock;
90967 +#define E4_TIME( X ) ((X).NanoSecClock)
90968 +
90969 +#define ELAN4_COMMS_CLOCK_FREQUENCY    660             /* In Mhz. This is half the bit rate. */
90970 +#define ELAN4_CLOCK_ADD_VALUE          200             /* For 200ns increment rate */
90971 +#define ELAN4_CLOCK_COMMS_DIV_VALUE    (((ELAN4_COMMS_CLOCK_FREQUENCY * ELAN4_CLOCK_ADD_VALUE) / (1000 * 4)) - 1)
90972 +#define ELAN4_CLOCK_TICK_RATE          ((ELAN4_CLOCK_ADD_VALUE << 8) + ELAN4_CLOCK_COMMS_DIV_VALUE)
90973 +
90974 +typedef volatile union e4_clocktickrate
90975 +{
90976 +   E4_uint64 NanoSecClock;
90977 +   struct
90978 +   {
90979 +      E4_uint32 pad1;
90980 +      E4_uint32 TickRates;
90981 +   } s;
90982 +} E4_ClockTickRate;
90983 +
90984 +/*
90985 + * This is made into an 8k byte object.
90986 + */
90987 +typedef volatile struct _E4_User_Regs
90988 +{
90989 +   E4_StatsCount       StatCounts[8];
90990 +   E4_StatsCount       InstCount;
90991 +   E4_Clock            Clock;
90992 +   E4_StatsControl     StatCont;
90993 +   E4_ClockTickRate    ClockTickRate;
90994 +   E4_uint8            pad1[EightK - ((sizeof(E4_StatsCount)*9)+sizeof(E4_StatsControl)+
90995 +                                       sizeof(E4_Clock)+sizeof(E4_ClockTickRate))];
90996 +} E4_User_Regs;
90997 +
90998 +#ifdef __cplusplus
90999 +}
91000 +#endif
91001 +
91002 +#endif /* __ELAN4_USERREGS_H */
91003 Index: linux-2.6.5-7.191/include/elan4/usertrap.h
91004 ===================================================================
91005 --- linux-2.6.5-7.191.orig/include/elan4/usertrap.h     2004-02-23 16:02:56.000000000 -0500
91006 +++ linux-2.6.5-7.191/include/elan4/usertrap.h  2005-07-28 14:52:52.971660040 -0400
91007 @@ -0,0 +1,114 @@
91008 +/*
91009 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
91010 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
91011 + * 
91012 + *    For licensing information please see the supplied COPYING file
91013 + *
91014 + */
91015 +
91016 +#ident "@(#)$Id: usertrap.h,v 1.17 2004/05/05 09:08:35 david Exp $"
91017 +/*      $Source: /cvs/master/quadrics/elan4mod/usertrap.h,v $*/
91018 +
91019 +#ifndef __ELAN4_USERTRAP_H
91020 +#define __ELAN4_USERTRAP_H
91021 +
91022 +#ifndef _ASM
91023 +typedef struct elan4_user_page
91024 +{
91025 +    E4_uint64          upage_ddcq_completed;
91026 +} ELAN4_USER_PAGE;
91027 +
91028 +typedef struct elan4_user_trap
91029 +{
91030 +    int                                ut_type;
91031 +    unsigned                   ut_proc;
91032 +    unsigned                   ut_args[4];
91033 +
91034 +    union {
91035 +       ELAN4_EPROC_TRAP        eproc;
91036 +       ELAN4_CPROC_TRAP        cproc;
91037 +       ELAN4_DPROC_TRAP        dproc;
91038 +       ELAN4_IPROC_TRAP        iproc;
91039 +       ELAN4_TPROC_TRAP        tproc;
91040 +       ELAN4_NETERR_MSG        msg;
91041 +    }                  ut_trap;
91042 +} ELAN4_USER_TRAP;
91043 +
91044 +#endif /* _ASM */
91045 +
91046 +
91047 +/* value for ut_type */
91048 +#define UTS_FINISHED           0                               /* all pending traps have been handled */
91049 +#define UTS_RESCHEDULE         1                               /* must return to user mode and re-enter */
91050 +#define UTS_UNIMP_INSTR                2                               /* unimplemented thread instruction */
91051 +#define UTS_EXECUTE_PACKET     3                               /* iproc trap needs packet executing */
91052 +#define UTS_NETWORK_ERROR_TRAP 4                               /* network error on this trap */
91053 +#define UTS_NETWORK_ERROR_MSG  5                               /* network error message  */
91054 +#define UTS_NETWORK_ERROR_TIMER        6                               /* network error timer expired */
91055 +
91056 +#define UTS_EFAULT             -1                              /* failed to copyout trap */
91057 +#define UTS_INVALID_ADDR       -2                              /* all -ve codes mean trap could not be resolved. */
91058 +#define UTS_INVALID_VPROC      -3
91059 +#define UTS_INVALID_COMMAND    -4
91060 +#define UTS_BAD_TRAP           -5
91061 +#define UTS_ALIGNMENT_ERROR    -6
91062 +#define UTS_QUEUE_OVERFLOW     -7
91063 +#define UTS_QUEUE_ERROR                -8
91064 +#define UTS_INVALID_TRANS      -9
91065 +#define UTS_PERMISSION_DENIED  -10
91066 +#define UTS_CPROC_ERROR                -11
91067 +#define UTS_INVALID_COOKIE     -12
91068 +#define UTS_NETERR_ERROR       -13
91069 +
91070 +/* "special" values for registering handlers */
91071 +#define UTS_ALL_TRAPS          -9999
91072 +
91073 +/* value for ut_proc */
91074 +#define UTS_NOPROC             0
91075 +#define UTS_EPROC              1
91076 +#define UTS_CPROC              2
91077 +#define UTS_DPROC              3
91078 +#define UTS_TPROC              4
91079 +#define UTS_IPROC              5
91080 +#define UTS_NETERR_MSG         6
91081 +
91082 +/* unimplemented trap numbers for thread processor */
91083 +#define ELAN4_T_TRAP_INSTR(t)  (0x80202000 | ((t) & 0xFF))
91084 +
91085 +#define ELAN4_T_SYSCALL_TRAP   1
91086 +#  define ELAN4_T_OPEN         0
91087 +#  define ELAN4_T_WRITE                1
91088 +#  define ELAN4_T_READ         2
91089 +#  define ELAN4_T_IOCTL                3
91090 +#  define ELAN4_T_LSEEK                4
91091 +#  define ELAN4_T_POLL         5
91092 +#  define ELAN4_T_CLOSE                6
91093 +#  define ELAN4_T_KILL         7
91094 +#  define ELAN4_T_MMAP         8
91095 +#  define ELAN4_T_MUNMAP       9
91096 +#  define ELAN4_T_ABORT                100
91097 +#  define ELAN4_T_DEBUG                101
91098 +#  define ELAN4_T_REGDUMP      102
91099 +
91100 +#define ELAN4_T_REGDUMP_TRAP   2
91101 +
91102 +#define ELAN4_T_LIBELAN_TRAP   3
91103 +#  define ELAN4_T_TPORT_NEWBUF 0
91104 +#  define ELAN4_T_TPORT_GC     1
91105 +#  define ELAN4_T_TPORT_DEBUG  2
91106 +
91107 +#define ELAN4_T_ALLOC_TRAP     4
91108 +#  define ELAN4_T_ALLOC_ELAN   0
91109 +#  define ELAN4_T_ALLOC_MAIN   1
91110 +#  define ELAN4_T_FREE_ELAN    2
91111 +#  define ELAN4_T_FREE_MAIN    3
91112 +
91113 +/* reserved main interrupt cookies */
91114 +#define ELAN4_INT_COOKIE_DDCQ  0
91115 +
91116 +/*
91117 + * Local variables:
91118 + * c-file-style: "stroustrup"
91119 + * End:
91120 + */
91121 +#endif /* __ELAN4_USERTRAP_H */
91122 Index: linux-2.6.5-7.191/include/elan4/xsdram.h
91123 ===================================================================
91124 --- linux-2.6.5-7.191.orig/include/elan4/xsdram.h       2004-02-23 16:02:56.000000000 -0500
91125 +++ linux-2.6.5-7.191/include/elan4/xsdram.h    2005-07-28 14:52:52.971660040 -0400
91126 @@ -0,0 +1,59 @@
91127 +/*
91128 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91129 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
91130 + *
91131 + *    For licensing information please see the supplied COPYING file
91132 + *
91133 + */
91134 +
91135 +#ifndef __ELAN4_XSDRAM_H
91136 +#define __ELAN4_XSDRAM_H
91137 +
91138 +#ident "@(#)$Id: xsdram.h,v 1.13 2004/03/05 12:32:04 jon Exp $ $Name: QSNETMODULES-4-31_20050321 $"
91139 +/*      $Source: /cvs/master/quadrics/elan4hdr/xsdram.h,v $*/
91140 +
91141 +/* SAMSUNG K4H281638D-TCB3 */
91142 +
91143 +#define SDRAM_tRCF_1_SH         0
91144 +#define SDRAM_tRP_1_SH          4
91145 +#define SDRAM_tRCD_SH           8
91146 +#define SDRAM_tRRD_SH           12
91147 +#define SDRAM_tEndWr_SH         16
91148 +#define SDRAM_tEndRd_SH         20
91149 +#define SDRAM_Burst_SH          24
91150 +#define SDRAM_CL_SH             28
91151 +#define SDRAM_DsblBypass       (1ULL << 31)
91152 +#define SDRAM_RefreshRate_SH    32
91153 +#define SDRAM_RamSize_SH        34
91154 +#define SDRAM_ReadLtncy_1_SH    36
91155 +#define SDRAM_RdOffset_SH       40
91156 +#define SDRAM_FlightDelay_SH    42
91157 +
91158 +#define SDRAM_ENABLE_ECC       (1ULL << 44) // Enables error detecting on the ECC.
91159 +#define SDRAM_SDRAM_TESTING    (1ULL << 45) // Switches to test mode for checking EEC data bits
91160 +#define SDRAM_SETUP            (1ULL << 46) // Writes SDram control reg when set. Also starts
91161 +
91162 +#define SDRAM_CS_MODE0          0ULL         // 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output)
91163 +#define SDRAM_CS_MODE1          1ULL         // 64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output)
91164 +#define SDRAM_CS_MODE2          2ULL         // 2Gbit (16-bit output) or 1Gbit (8-bit output)
91165 +#define SDRAM_CS_MODE3          3ULL         // 4Gbit (16-bit output) or 2Gbit (8-bit output)
91166 +
91167 +#if defined(LINUX) && !defined(CONFIG_MPSAS)
91168 +#define SDRAM_STARTUP_VALUE   ((0xbULL << SDRAM_tRCF_1_SH)      | (0x2ULL << SDRAM_tRP_1_SH)       | \
91169 +                               (0x3ULL << SDRAM_tRCD_SH)        | (0x2ULL << SDRAM_tRRD_SH)        | \
91170 +                               (0xaULL << SDRAM_tEndWr_SH)      | (0x6ULL << SDRAM_tEndRd_SH)      | \
91171 +                               (0x8ULL << SDRAM_Burst_SH)       | (0x6ULL << SDRAM_CL_SH)          | \
91172 +                               (0x2ULL << SDRAM_RefreshRate_SH) | (0x3ULL << SDRAM_RamSize_SH)     | \
91173 +                               (0x1ULL << SDRAM_RdOffset_SH)    | (0x1ULL << SDRAM_FlightDelay_SH) | \
91174 +                               (0x4ULL << SDRAM_ReadLtncy_1_SH))
91175 +#else
91176 +#define SDRAM_STARTUP_VALUE   ((0xbULL << SDRAM_tRCF_1_SH)      | (0x2ULL << SDRAM_tRP_1_SH)       | \
91177 +                               (0x3ULL << SDRAM_tRCD_SH)        | (0x2ULL << SDRAM_tRRD_SH)        | \
91178 +                               (0xaULL << SDRAM_tEndWr_SH)      | (0x6ULL << SDRAM_tEndRd_SH)      | \
91179 +                               (0x8ULL << SDRAM_Burst_SH)       | (0x6ULL << SDRAM_CL_SH)          | \
91180 +                               (0x0ULL << SDRAM_RefreshRate_SH) | (0x0ULL << SDRAM_RamSize_SH)     | \
91181 +                               (0x1ULL << SDRAM_RdOffset_SH)    | (0x1ULL << SDRAM_FlightDelay_SH) | \
91182 +                               (0x4ULL << SDRAM_ReadLtncy_1_SH) | SDRAM_ENABLE_ECC | SDRAM_SETUP)
91183 +#endif
91184 +
91185 +#endif /* __ELAN4_XSDRAM_H */
91186 Index: linux-2.6.5-7.191/include/jtag/jtagio.h
91187 ===================================================================
91188 --- linux-2.6.5-7.191.orig/include/jtag/jtagio.h        2004-02-23 16:02:56.000000000 -0500
91189 +++ linux-2.6.5-7.191/include/jtag/jtagio.h     2005-07-28 14:52:52.972659888 -0400
91190 @@ -0,0 +1,106 @@
91191 +/*
91192 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91193 + *
91194 + *    For licensing information please see the supplied COPYING file
91195 + *
91196 + */
91197 +
91198 +#ident "$Id: jtagio.h,v 1.7.8.1 2005/01/27 15:21:47 lee Exp $"
91199 +/*             $Source: /cvs/master/quadrics/jtagmod/jtagio.h,v $*/
91200 +
91201 +
91202 +#ifndef __SYS_JTAGMOD_H
91203 +#define __SYS_JTAGMOD_H
91204 +
91205 +#ifdef __cplusplus
91206 +extern "C" {
91207 +#endif
91208 +
91209 +#define JTAG_MAX_CHIPS         8
91210 +#define JTAG_MAX_INSTR_LEN     8
91211 +#define JTAG_MAX_BITS           (JTAG_MAX_CHIPS * JTAG_MAX_INSTR_LEN)
91212 +#define JTAG_MAX_DATA_LEN      1024
91213 +
91214 +#define JTAG_BYPASS            0xFF
91215 +
91216 +#define I2C_ADDR_LEN           7                               /* 7 bits of address */
91217 +#define I2C_DATA_LEN           8                               /* 8 bits of data */
91218 +#define I2C_MAX_DATA_LEN       9                               /* and upto 9 bytes worth */
91219 +
91220 +#define BITS_PER_BYTE          8
91221 +#define JTAG_NBYTES(nbits)     (((nbits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
91222 +#define JTAG_BIT(v, num)       (((v)[(num) / BITS_PER_BYTE] >> ((num) % BITS_PER_BYTE)) & 1)
91223 +#define JTAG_SET_BIT(v, num)   ((v)[(num) / BITS_PER_BYTE] |= (1 << ((num) % BITS_PER_BYTE)))
91224 +#define JTAG_CLR_BIT(v, num)   ((v)[(num) / BITS_PER_BYTE] &= ~(1 << ((num) % BITS_PER_BYTE)))
91225 +
91226 +#define RING_CLOCK_CARD                (0x3D)
91227 +#define RING_CLOCK_SHIFT       (0x3E)
91228 +#define RING_JTAG_LOOPBACK     (0x3F)
91229 +#define RING_MAX               (0x40)
91230 +
91231 +#define RING_QUAD_BIT          (0x40)
91232 +#define RING_I2C_BIT           (0x80)
91233 +
91234 +#define VALID_JTAG_RING(ring)  ((ring) < 0x20 || (ring) == RING_JTAG_LOOPBACK)
91235 +#define VALID_I2C_RING(ring)   ((ring) < 0x20 || (ring) == RING_CLOCK_CARD)
91236 +
91237 +
91238 +typedef struct jtag_value
91239 +{
91240 +    u_char     bytes[JTAG_NBYTES(JTAG_MAX_DATA_LEN)];
91241 +} JTAG_VALUE;
91242 +
91243 +/* arguements to JTAG_SHIFT_IR/JTAG_SHIFT_DR */
91244 +typedef struct jtag_reset_args
91245 +{
91246 +    u_int      ring;
91247 +} JTAG_RESET_ARGS;
91248 +
91249 +typedef struct jtag_shift_args
91250 +{
91251 +    u_int      ring;
91252 +    u_int      nbits;
91253 +    u_char     *value;
91254 +} JTAG_SHIFT_ARGS;
91255 +
91256 +typedef struct i2c_args
91257 +{
91258 +    u_int      ring;
91259 +    u_int      device;
91260 +    u_int      reg;
91261 +    u_int      count;
91262 +    u_int      ok;
91263 +    u_char     data[I2C_MAX_DATA_LEN];
91264 +} I2C_ARGS;
91265 +
91266 +/* values for 'ok' - the return value from i2c_xx functions */
91267 +#define I2C_OP_SUCCESS         0
91268 +#define I2C_OP_ERROR           1
91269 +#define I2C_OP_NOT_IDLE                2
91270 +#define I2C_OP_NO_DEVICE       3
91271 +#define I2C_OP_WRITE_TO_BIG    4
91272 +#define I2C_OP_BAD_RESOURCE    5
91273 +
91274 +typedef struct i2c_clock_shift_args
91275 +{
91276 +    u_int      t;
91277 +    u_int      n;
91278 +    u_int      m;
91279 +} I2C_CLOCK_SHIFT_ARGS;
91280 +
91281 +#define JTAG_RESET             _IOWR('j', '0', JTAG_RESET_ARGS)
91282 +#define JTAG_SHIFT_IR          _IOWR('j', '1', JTAG_SHIFT_ARGS)
91283 +#define JTAG_SHIFT_DR          _IOWR('j', '2', JTAG_SHIFT_ARGS)
91284 +
91285 +#define I2C_CLOCK_SHIFT                _IOWR('j', '4', I2C_CLOCK_SHIFT_ARGS)
91286 +#define I2C_WRITE              _IOWR('j', '5', I2C_ARGS)
91287 +#define I2C_READ               _IOWR('j', '6', I2C_ARGS)
91288 +#define I2C_WRITEREG           _IOWR('j', '7', I2C_ARGS)
91289 +#define I2C_READREG            _IOWR('j', '8', I2C_ARGS)
91290 +
91291 +
91292 +#ifdef __cplusplus
91293 +}
91294 +#endif
91295 +
91296 +#endif /* __SYS_JTAGMOD_H */
91297 Index: linux-2.6.5-7.191/include/linux/init_task.h
91298 ===================================================================
91299 --- linux-2.6.5-7.191.orig/include/linux/init_task.h    2005-06-28 12:24:09.000000000 -0400
91300 +++ linux-2.6.5-7.191/include/linux/init_task.h 2005-07-28 14:52:52.972659888 -0400
91301 @@ -3,6 +3,7 @@
91302  
91303  #include <linux/file.h>
91304  #include <linux/pagg.h>
91305 +#include <linux/ptrack.h>
91306  
91307  #define INIT_FILES \
91308  {                                                      \
91309 @@ -116,6 +117,7 @@
91310         .map_base       = __TASK_UNMAPPED_BASE,                         \
91311         .io_wait        = NULL,                                         \
91312         INIT_TASK_PAGG(tsk)                                             \
91313 +       INIT_TASK_PTRACK(tsk)                                           \
91314  }
91315  
91316  
91317 Index: linux-2.6.5-7.191/include/linux/ioproc.h
91318 ===================================================================
91319 --- linux-2.6.5-7.191.orig/include/linux/ioproc.h       2004-02-23 16:02:56.000000000 -0500
91320 +++ linux-2.6.5-7.191/include/linux/ioproc.h    2005-07-28 14:52:52.973659736 -0400
91321 @@ -0,0 +1,271 @@
91322 +/* -*- linux-c -*-
91323 + *
91324 + *    Copyright (C) 2002-2004 Quadrics Ltd.
91325 + *
91326 + *    This program is free software; you can redistribute it and/or modify
91327 + *    it under the terms of the GNU General Public License as published by
91328 + *    the Free Software Foundation; either version 2 of the License, or
91329 + *    (at your option) any later version.
91330 + *
91331 + *    This program is distributed in the hope that it will be useful,
91332 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
91333 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
91334 + *    GNU General Public License for more details.
91335 + *
91336 + *    You should have received a copy of the GNU General Public License
91337 + *    along with this program; if not, write to the Free Software
91338 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
91339 + *
91340 + *
91341 + */
91342 +
91343 +/*
91344 + * Callbacks for IO processor page table updates.
91345 + */
91346 +
91347 +#ifndef __LINUX_IOPROC_H__
91348 +#define __LINUX_IOPROC_H__
91349 +
91350 +#include <linux/sched.h>
91351 +#include <linux/mm.h>
91352 +
91353 +typedef struct ioproc_ops {
91354 +       struct ioproc_ops *next;
91355 +       void *arg;
91356 +
91357 +       void (*release)(void *arg, struct mm_struct *mm);
91358 +       void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
91359 +       void (*invalidate_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
91360 +       void (*update_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
91361 +
91362 +       void (*change_protection)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot);
91363 +
91364 +       void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
91365 +       void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
91366 +       void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
91367 +
91368 +} ioproc_ops_t;
91369 +
91370 +/* IOPROC Registration
91371 + * 
91372 + * Called by the IOPROC device driver to register its interest in page table
91373 + * changes for the process associated with the supplied mm_struct
91374 + *
91375 + * The caller should first allocate and fill out an ioproc_ops structure with 
91376 + * the function pointers initialised to the device driver specific code for
91377 + * each callback. If the device driver doesn't have code for a particular 
91378 + * callback then it should set the function pointer to be NULL.
91379 + * The ioproc_ops arg parameter will be passed unchanged as the first argument
91380 + * to each callback function invocation.
91381 + *
91382 + * The ioproc registration is not inherited across fork() and should be called
91383 + * once for each process that the IOPROC device driver is interested in.
91384 + *
91385 + * Must be called holding the mm->page_table_lock
91386 + */
91387 +extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip);
91388 +
91389 +
91390 +/* IOPROC De-registration
91391 + * 
91392 + * Called by the IOPROC device driver when it is no longer interested in page 
91393 + * table changes for the process associated with the supplied mm_struct
91394 + *
91395 + * Normally this is not needed to be called as the ioproc_release() code will
91396 + * automatically unlink the ioproc_ops struct from the mm_struct as the
91397 + * process exits
91398 + *
91399 + * Must be called holding the mm->page_table_lock
91400 + */
91401 +extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip);
91402 +
91403 +#ifdef CONFIG_IOPROC
91404 +
91405 +/* IOPROC Release
91406 + *
91407 + * Called during exit_mmap() as all vmas are torn down and unmapped.
91408 + *
91409 + * Also unlinks the ioproc_ops structure from the mm list as it goes.
91410 + *
91411 + * No need for locks as the mm can no longer be accessed at this point
91412 + *
91413 + */
91414 +static inline void 
91415 +ioproc_release(struct mm_struct *mm)
91416 +{
91417 +       struct ioproc_ops *cp;
91418 +
91419 +       while ((cp = mm->ioproc_ops) != NULL) {
91420 +               mm->ioproc_ops = cp->next;
91421 +        
91422 +               if (cp->release)
91423 +                       cp->release(cp->arg, mm);
91424 +       }
91425 +}
91426 +
91427 +/* IOPROC SYNC RANGE
91428 + *
91429 + * Called when a memory map is synchronised with its disk image i.e. when the 
91430 + * msync() syscall is invoked. Any future read or write to the associated 
91431 + * pages by the IOPROC should cause the page to be marked as referenced or 
91432 + * modified.
91433 + *
91434 + * Called holding the mm->page_table_lock
91435 + */
91436 +static inline void 
91437 +ioproc_sync_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
91438 +{
91439 +       struct ioproc_ops *cp;
91440 +
91441 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91442 +               if (cp->sync_range)
91443 +                       cp->sync_range(cp->arg, vma, start, end);
91444 +}
91445 +
91446 +/* IOPROC INVALIDATE RANGE
91447 + *
91448 + * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the
91449 + * user or paged out by the kernel. 
91450 + *
91451 + * After this call the IOPROC must not access the physical memory again unless
91452 + * a new translation is loaded.
91453 + *
91454 + * Called holding the mm->page_table_lock
91455 + */
91456 +static inline void 
91457 +ioproc_invalidate_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
91458 +{
91459 +       struct ioproc_ops *cp;
91460 +       
91461 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91462 +               if (cp->invalidate_range)
91463 +                       cp->invalidate_range(cp->arg, vma, start, end);
91464 +}
91465 +
91466 +/* IOPROC UPDATE RANGE
91467 + *
91468 + * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk 
91469 + * up, when breaking COW or faulting in an anonymous page of memory.
91470 + *
91471 + * These give the IOPROC device driver the opportunity to load translations 
91472 + * speculatively, which can improve performance by avoiding device translation
91473 + * faults.
91474 + *
91475 + * Called holding the mm->page_table_lock
91476 + */
91477 +static inline void 
91478 +ioproc_update_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
91479 +{
91480 +       struct ioproc_ops *cp;
91481 +
91482 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91483 +               if (cp->update_range)
91484 +                       cp->update_range(cp->arg, vma, start, end);
91485 +}
91486 +
91487 +
91488 +/* IOPROC CHANGE PROTECTION
91489 + *
91490 + * Called when the protection on a region of memory is changed i.e. when the 
91491 + * mprotect() syscall is invoked.
91492 + *
91493 + * The IOPROC must not be able to write to a read-only page, so if the 
91494 + * permissions are downgraded then it must honour them. If they are upgraded 
91495 + * it can treat this in the same way as the ioproc_update_[range|sync]() calls
91496 + *
91497 + * Called holding the mm->page_table_lock
91498 + */
91499 +static inline void 
91500 +ioproc_change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
91501 +{
91502 +       struct ioproc_ops *cp;
91503 +
91504 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91505 +               if (cp->change_protection)
91506 +                       cp->change_protection(cp->arg, vma, start, end, newprot);
91507 +}
91508 +
91509 +/* IOPROC SYNC PAGE
91510 + *
91511 + * Called when a memory map is synchronised with its disk image i.e. when the 
91512 + * msync() syscall is invoked. Any future read or write to the associated page
91513 + * by the IOPROC should cause the page to be marked as referenced or modified.
91514 + *
91515 + * Not currently called as msync() calls ioproc_sync_range() instead
91516 + *
91517 + * Called holding the mm->page_table_lock
91518 + */
91519 +static inline void 
91520 +ioproc_sync_page(struct vm_area_struct *vma, unsigned long addr)
91521 +{
91522 +       struct ioproc_ops *cp;
91523 +
91524 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91525 +               if (cp->sync_page)
91526 +                       cp->sync_page(cp->arg, vma, addr);
91527 +}
91528 +
91529 +/* IOPROC INVALIDATE PAGE
91530 + *
91531 + * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the
91532 + * user or paged out by the kernel. 
91533 + *
91534 + * After this call the IOPROC must not access the physical memory again unless
91535 + * a new translation is loaded.
91536 + *
91537 + * Called holding the mm->page_table_lock
91538 + */
91539 +static inline void 
91540 +ioproc_invalidate_page(struct vm_area_struct *vma, unsigned long addr)
91541 +{
91542 +       struct ioproc_ops *cp;
91543 +
91544 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91545 +               if (cp->invalidate_page)
91546 +                       cp->invalidate_page(cp->arg, vma, addr);
91547 +}
91548 +
91549 +/* IOPROC UPDATE PAGE
91550 + *
91551 + * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk 
91552 + * up, when breaking COW or faulting in an anoymous page of memory.
91553 + *
91554 + * These give the IOPROC device the opportunity to load translations 
91555 + * speculatively, which can improve performance by avoiding device translation
91556 + * faults.
91557 + *
91558 + * Called holding the mm->page_table_lock
91559 + */
91560 +static inline void 
91561 +ioproc_update_page(struct vm_area_struct *vma, unsigned long addr)
91562 +{
91563 +       struct ioproc_ops *cp;
91564 +
91565 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
91566 +               if (cp->update_page)
91567 +                       cp->update_page(cp->arg, vma, addr);
91568 +}
91569 +
91570 +#else
91571 +
91572 +/* ! CONFIG_IOPROC so make all hooks empty */
91573 +
91574 +#define ioproc_release(mm)                     do { } while (0)
91575 +
91576 +#define ioproc_sync_range(vma,start,end)               do { } while (0)
91577 +
91578 +#define ioproc_invalidate_range(vma, start,end)        do { } while (0)
91579 +
91580 +#define ioproc_update_range(vma, start, end)   do { } while (0)
91581 +
91582 +#define ioproc_change_protection(vma, start, end, prot)        do { } while (0)
91583 +
91584 +#define ioproc_sync_page(vma, addr)            do { } while (0)
91585 +
91586 +#define ioproc_invalidate_page(vma, addr)      do { } while (0)
91587 +
91588 +#define ioproc_update_page(vma, addr)          do { } while (0)
91589 +
91590 +#endif /* CONFIG_IOPROC */
91591 +
91592 +#endif /* __LINUX_IOPROC_H__ */
91593 Index: linux-2.6.5-7.191/include/linux/ptrack.h
91594 ===================================================================
91595 --- linux-2.6.5-7.191.orig/include/linux/ptrack.h       2004-02-23 16:02:56.000000000 -0500
91596 +++ linux-2.6.5-7.191/include/linux/ptrack.h    2005-07-28 14:52:52.973659736 -0400
91597 @@ -0,0 +1,65 @@
91598 +/*
91599 + *    Copyright (C) 2000  Regents of the University of California
91600 + *
91601 + *    This program is free software; you can redistribute it and/or modify
91602 + *    it under the terms of the GNU General Public License as published by
91603 + *    the Free Software Foundation; either version 2 of the License, or
91604 + *    (at your option) any later version.
91605 + *
91606 + *    This program is distributed in the hope that it will be useful,
91607 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
91608 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
91609 + *    GNU General Public License for more details.
91610 + *
91611 + *    You should have received a copy of the GNU General Public License
91612 + *    along with this program; if not, write to the Free Software
91613 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
91614 + *
91615 + * Derived from exit_actn.c by
91616 + *    Copyright (C) 2003 Quadrics Ltd.
91617 + *
91618 + */
91619 +#ifndef __LINUX_PTRACK_H
91620 +#define __LINUX_PTRACK_H
91621 +
91622 +/* 
91623 + * Process tracking - this allows a module to keep track of processes
91624 + * in order that it can manage all tasks derived from a single process.
91625 + */
91626 +
91627 +#define PTRACK_PHASE_CLONE     1
91628 +#define PTRACK_PHASE_CLONE_FAIL        2
91629 +#define PTRACK_PHASE_EXEC      3
91630 +#define PTRACK_PHASE_EXIT              4
91631 +
91632 +#define PTRACK_FINISHED                0
91633 +#define PTRACK_INNHERIT                1
91634 +#define PTRACK_DENIED          2
91635 +
91636 +#ifdef CONFIG_PTRACK
91637 +
91638 +typedef int (*ptrack_callback_t)(void *arg, int phase, struct task_struct *child);
91639 +
91640 +struct ptrack_desc {
91641 +       struct list_head        link;
91642 +       ptrack_callback_t       callback;
91643 +       void                   *arg;
91644 +};
91645 +
91646 +extern int     ptrack_register (ptrack_callback_t callback, void *arg);
91647 +extern void    ptrack_deregister (ptrack_callback_t callback, void *arg);
91648 +extern int     ptrack_registered (ptrack_callback_t callback, void *arg);
91649 +
91650 +extern int     ptrack_call_callbacks (int phase, struct task_struct *child);
91651 +
91652 +#define INIT_TASK_PTRACK(tsk) \
91653 +       .ptrack_list = LIST_HEAD_INIT(tsk.ptrack_list),
91654 +
91655 +#else
91656 +#define ptrack_call_callbacks(phase, child) (0)
91657 +
91658 +#define INIT_TASK_PTRACK(tsk)
91659 +
91660 +#endif
91661 +
91662 +#endif /* __LINUX_PTRACK_H */
91663 Index: linux-2.6.5-7.191/include/linux/sched.h
91664 ===================================================================
91665 --- linux-2.6.5-7.191.orig/include/linux/sched.h        2005-06-28 12:24:22.000000000 -0400
91666 +++ linux-2.6.5-7.191/include/linux/sched.h     2005-07-28 14:52:52.974659584 -0400
91667 @@ -188,6 +188,9 @@
91668  extern int max_timeslice, min_timeslice;
91669  
91670  struct namespace;
91671 +#ifdef CONFIG_IOPROC
91672 +struct ioproc_ops;
91673 +#endif
91674  
91675  /* Maximum number of active map areas.. This is a random (large) number */
91676  #define DEFAULT_MAX_MAP_COUNT  65536
91677 @@ -241,6 +244,15 @@
91678         struct kioctx           default_kioctx;
91679  
91680         unsigned long hiwater_rss, hiwater_vm;
91681 +
91682 +#ifdef CONFIG_IOPROC
91683 +       /* hooks for io devices with advanced RDMA capabilities */
91684 +       struct ioproc_ops       *ioproc_ops;
91685 +#endif
91686 +#ifdef CONFIG_PTRACK
91687 +/* process tracking callback */
91688 +       struct list_head ptrack_list;
91689 +#endif
91690  };
91691  
91692  extern int mmlist_nr;
91693 @@ -601,6 +613,10 @@
91694         struct rw_semaphore pagg_sem;
91695  #endif
91696  
91697 +#ifdef CONFIG_PTRACK
91698 +/* process tracking callback */
91699 +       struct list_head ptrack_list;
91700 +#endif
91701  };
91702  
91703  static inline pid_t process_group(struct task_struct *tsk)
91704 Index: linux-2.6.5-7.191/include/qsnet/autoconf.h
91705 ===================================================================
91706 --- linux-2.6.5-7.191.orig/include/qsnet/autoconf.h     2004-02-23 16:02:56.000000000 -0500
91707 +++ linux-2.6.5-7.191/include/qsnet/autoconf.h  2005-07-28 14:52:52.975659432 -0400
91708 @@ -0,0 +1,38 @@
91709 +/*
91710 + *    Copyright (c) 2004 by Quadrics Ltd.
91711 + *
91712 + *    For licensing information please see the supplied COPYING file
91713 + *
91714 + * NOTE: This file has been automatically generated:
91715 + *       node   : milano
91716 + *       kernel : /src/linux/2.6/linux-2.6.5
91717 + *       date   : Wed May 11 12:17:34 EDT 2005
91718 + *
91719 + */
91720 +
91721 +#include <linux/version.h>
91722 +#undef NO_RMAP
91723 +#undef AC
91724 +#undef NO_O1_SCHED
91725 +#undef NO_NPTL
91726 +#undef NO_ABI
91727 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
91728 +#define        PROCESS_ACCT
91729 +#endif
91730 +#undef RSS_ATOMIC
91731 +#define        NO_COPROC
91732 +#undef NO_IOPROC
91733 +#undef NO_PTRACK
91734 +#define        NO_PANIC_NOTIFIER
91735 +#undef NO_SHM_CLEANUP
91736 +#undef NO_PDE
91737 +
91738 +
91739 +#define        CONFIG_EIP
91740 +#define        CONFIG_ELAN
91741 +#define        CONFIG_ELAN3
91742 +#define        CONFIG_ELAN4
91743 +#define        CONFIG_EP
91744 +#define        CONFIG_JTAG
91745 +#define        CONFIG_QSNET
91746 +#define        CONFIG_RMS
91747 Index: linux-2.6.5-7.191/include/qsnet/condvar.h
91748 ===================================================================
91749 --- linux-2.6.5-7.191.orig/include/qsnet/condvar.h      2004-02-23 16:02:56.000000000 -0500
91750 +++ linux-2.6.5-7.191/include/qsnet/condvar.h   2005-07-28 14:52:52.975659432 -0400
91751 @@ -0,0 +1,140 @@
91752 +/*
91753 + *    Copyright (C) 2000  Regents of the University of California
91754 + *
91755 + *    This program is free software; you can redistribute it and/or modify
91756 + *    it under the terms of the GNU General Public License as published by
91757 + *    the Free Software Foundation; either version 2 of the License, or
91758 + *    (at your option) any later version.
91759 + *
91760 + *    This program is distributed in the hope that it will be useful,
91761 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
91762 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
91763 + *    GNU General Public License for more details.
91764 + *
91765 + *    You should have received a copy of the GNU General Public License
91766 + *    along with this program; if not, write to the Free Software
91767 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
91768 + *
91769 + */
91770 +
91771 +#if    !defined(_LINUX_CONDVAR_H)
91772 +#define        _LINUX_CONDVAR_H
91773 +
91774 +#if    defined(__KERNEL__)
91775 +
91776 +#include <linux/list.h>
91777 +#include <qsnet/debug.h>
91778 +
91779 +#define CV_RET_SIGPENDING      0
91780 +#define CV_RET_TIMEOUT         (-1)
91781 +#define CV_RET_NORMAL          1
91782 +
91783 +struct kcondvar_task {
91784 +       struct task_struct      *task;          /* need to wrap task in this */
91785 +       struct list_head        list;           /*   to thread as a list */
91786 +       int                     blocked;
91787 +};
91788 +
91789 +typedef struct {
91790 +       struct list_head        task_list;      /* list of kcondvar_task's */
91791 +} kcondvar_t;
91792 +
91793 +#define kcondvar_wait(c,l,fl)                  debug_kcondvar_wait(c, l, fl, 0,  TASK_UNINTERRUPTIBLE)
91794 +#define kcondvar_waitsig(c,l,fl)               debug_kcondvar_wait(c, l, fl, 0,  TASK_INTERRUPTIBLE)
91795 +#define kcondvar_timedwait(c,l,fl,to)          debug_kcondvar_wait(c, l, fl, to, TASK_UNINTERRUPTIBLE)
91796 +#define kcondvar_timedwaitsig(c,l,fl,to)       debug_kcondvar_wait(c, l, fl, to, TASK_INTERRUPTIBLE)
91797 +#define kcondvar_wakeupone(c,l)                        kcondvar_wakeup(c, l, 0)
91798 +#define kcondvar_wakeupall(c,l)                        kcondvar_wakeup(c, l, 1)
91799
91800 +extern __inline__ void
91801 +kcondvar_init(kcondvar_t *c)
91802 +{
91803 +       INIT_LIST_HEAD(&c->task_list);
91804 +}
91805 +
91806 +extern __inline__ void
91807 +kcondvar_destroy(kcondvar_t *c)
91808 +{
91809 +       ASSERT(list_empty(&c->task_list));
91810 +}
91811 +
91812 +/*
91813 + * We thread a struct kcondvar_task, allocated on the stack, onto the kcondvar_t's
91814 + * task_list, and take it off again when we wake up.
91815 + */
91816 +extern __inline__ int
91817 +debug_kcondvar_wait(kcondvar_t *c, spinlock_t *l, unsigned long *fl, long tmo, int state)
91818 +{
91819 +       struct kcondvar_task cvt;
91820 +       int ret = CV_RET_NORMAL;
91821 +
91822 +       ASSERT(!in_interrupt());                /* we can block */
91823 +       ASSERT(SPINLOCK_HELD(l));               /* enter holding lock */
91824 +
91825 +       cvt.task = current;
91826 +       cvt.blocked = 1;
91827 +       list_add(&cvt.list, &c->task_list);
91828 +       do {
91829 +              /* Note: we avoid using TASK_UNINTERRUPTIBLE here because avenrun()
91830 +               * (linux/kernel/timer.c:calc_load())
91831 +               * computation treats it like TASK_RUNNABLE hence creates false high
91832 +               * load averages when we create kernel threads.
91833 +               * The cvt.blocked flag distinguishes a signal wakeup from a kcondvar_wakeup.
91834 +               *
91835 +               * However, if we do take a signal we could end up busily spinning here, if
91836 +               * we ignore it (state == TASK_UNINTERRUPTIBLE) so once we see a signal
91837 +               * pending we do sleep TASK_UNINTERRUPTIBLE to stop a busy spin.
91838 +               * I have now blocked all signals for kernel threads to prevent this
91839 +               * happening but other users of kcondvar_wait may still hit this spin.
91840 +               */
91841 +               set_current_state (signal_pending(current) ? state : TASK_INTERRUPTIBLE);
91842 +
91843 +               if (fl)
91844 +                   spin_unlock_irqrestore(l, *fl);
91845 +               else
91846 +                   spin_unlock(l);
91847 +               if (tmo) {
91848 +                       if (tmo <= jiffies || !schedule_timeout(tmo - jiffies))
91849 +                               ret = CV_RET_TIMEOUT;
91850 +               } else
91851 +                       schedule();
91852 +               if (fl)
91853 +                   spin_lock_irqsave (l, *fl);
91854 +               else
91855 +                   spin_lock(l);
91856 +               
91857 +               /* signal_pending - Only exit the loop if the user was waiting TASK_INTERRUPTIBLE */
91858 +               if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
91859 +                       ret = CV_RET_SIGPENDING;
91860 +
91861 +       } while (cvt.blocked && ret == CV_RET_NORMAL);
91862 +       list_del(&cvt.list);
91863 +
91864 +       /* Reset task state in case we didn't sleep above */
91865 +       set_current_state (TASK_RUNNING);
91866 +
91867 +       return ret;                             /* return holding lock */
91868 +}
91869 +
91870 +extern __inline__ void
91871 +kcondvar_wakeup(kcondvar_t *c, spinlock_t *l, int wakeall)
91872 +{
91873 +       struct list_head *lp;
91874 +       struct kcondvar_task *cvtp;
91875 +
91876 +       ASSERT(SPINLOCK_HELD(l));                       /* already holding lock */
91877 +       for (lp = c->task_list.next; lp != &c->task_list; lp = lp->next) {
91878 +               cvtp = list_entry(lp, struct kcondvar_task, list);
91879 +               if (cvtp->blocked) {
91880 +                       cvtp->blocked = 0;
91881 +                       /* wake_up_process added to kernel/ksyms.c */
91882 +                       wake_up_process(cvtp->task); 
91883 +                       if (!wakeall)
91884 +                               break;
91885 +               }
91886 +       }
91887 +}                                              /* return still holding lock */
91888 +
91889 +
91890 +#endif /* __KERNEL__ */
91891 +#endif /* _LINUX_CONDVAR_H */
91892 Index: linux-2.6.5-7.191/include/qsnet/config.h
91893 ===================================================================
91894 --- linux-2.6.5-7.191.orig/include/qsnet/config.h       2004-02-23 16:02:56.000000000 -0500
91895 +++ linux-2.6.5-7.191/include/qsnet/config.h    2005-07-28 14:52:52.976659280 -0400
91896 @@ -0,0 +1,195 @@
91897 +/*
91898 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91899 + *
91900 + *    For licensing information please see the supplied COPYING file
91901 + *
91902 + */
91903 +
91904 +#ifndef _QSNET_CONFIG_H
91905 +#define _QSNET_CONFIG_H
91906 +
91907 +#ident "$Id: config.h,v 1.23 2003/07/24 21:31:19 robin Exp $"
91908 +/*      $Source: /cvs/master/quadrics/qsnet/config.h,v $*/
91909 +
91910 +
91911 +/*
91912 + * QSNET standard defines :
91913 + *
91914 + *   Target operating system defines
91915 + *             SOLARIS
91916 + *             TRU64UNIX/DIGITAL_UNIX
91917 + *             LINUX
91918 + *
91919 + *   Target processor defines
91920 + *             SPARC
91921 + *             ALPHA
91922 + *             I386
91923 + *             IA64
91924 + *             X86_64
91925 + *
91926 + *   Byte order defines
91927 + *             __LITTLE_ENDIAN__
91928 + *             __BIG_ENDIAN__
91929 + *
91930 + *   Data size defines
91931 + *             _LP64                   - LP64 - long/pointer is 64 bits
91932 + *             _ILP32                  - LP32 - long/pointer is 32 bits
91933 + *
91934 + *   Elan defines for main processor
91935 + *             __MAIN_LITTLE_ENDIAN__  - main byte order (for thread code)
91936 + *             __MAIN_BIG_ENDIAN__
91937 + *             _MAIN_LP64              - main long size (for thread code)
91938 + *             _MAIN_ILP32
91939 + *
91940 + *   Compiling for kernel (defined in makefile)
91941 + *             _KERNEL
91942 + *
91943 + */
91944 +
91945 +#if defined(__LP64__) && !defined(_LP64)
91946 +#  define _LP64
91947 +#endif
91948 +
91949 +#if defined(__arch64__) && !defined(_LP64) && !defined(_ILP32)
91950 +#  define _LP64
91951 +#endif
91952 +
91953 +#if defined(__alpha__) && !defined(_LP64) && !defined(_ILP32)
91954 +#  define _LP64
91955 +#endif
91956 +
91957 +#if !defined(__arch64__) && !defined(_ILP32) && !defined(_LP64)
91958 +#  define _ILP32
91959 +#endif
91960 +
91961 +#if defined(__ELAN__) || defined(__ELAN3__)
91962 +
91963 +#define __LITTLE_ENDIAN__
91964 +
91965 +#if defined(__host_solaris) && defined(__host_sparc)
91966 +#define SOLARIS
91967 +#define SPARC
91968 +#define SOLARIS_SPARC
91969 +#define _MAIN_ILP32
91970 +#define __MAIN_BIG_ENDIAN__
91971 +
91972 +#elif defined(__host_osf)
91973 +#define TRU64UNIX
91974 +#define DIGITAL_UNIX
91975 +#define ALPHA
91976 +#define _MAIN_LP64
91977 +#define __MAIN_LITTLE_ENDIAN__
91978 +
91979 +#elif defined(__host_linux) && defined(__host_alpha)
91980 +#define LINUX
91981 +#define ALPHA
91982 +#define LINUX_ALPHA
91983 +#define _MAIN_LP64
91984 +#define __MAIN_LITTLE_ENDIAN__
91985 +
91986 +#elif defined(__host_linux) && defined(__host_sparc)
91987 +#define LINUX
91988 +#define SPARC
91989 +#define LINUX_SPARC
91990 +#define __MAIN_BIG_ENDIAN__
91991 +#ifdef __KERNEL__
91992 +#  define _MAIN_LP64
91993 +#else
91994 +#  define _MAIN_ILP32
91995 +#endif
91996 +
91997 +#elif defined(__host_linux) && defined(__host_i386)
91998 +#define LINUX
91999 +#define I386
92000 +#define LINUX_I386
92001 +#define _MAIN_ILP32
92002 +#define __MAIN_LITTLE_ENDIAN__
92003 +
92004 +#elif defined(__host_linux) && defined(__host_ia64)
92005 +#define LINUX
92006 +#define IA64
92007 +#define LINUX_IA64
92008 +#define _MAIN_LP64
92009 +#define __MAIN_LITTLE_ENDIAN__
92010 +
92011 +#elif defined(__host_linux) && defined(__host_x86_64)
92012 +#define LINUX
92013 +#define X86_64
92014 +#define LINUX_X86_64
92015 +#define _MAIN_LP64
92016 +#define __MAIN_LITTLE_ENDIAN__
92017 +
92018 +#else
92019 +#error Cannot determine operating system/processor architecture.
92020 +#endif
92021 +
92022 +#else /* !defined(__ELAN3__) */
92023 +
92024 +#if (defined(sun) || defined(__sun)) && defined(sparc) && !defined(__sparcv9)  /* Sun Solaris 5.6 */
92025 +#define SOLARIS
92026 +#define SPARC
92027 +#define SOLARIS_SPARC
92028 +#ifndef __BIG_ENDIAN__
92029 +#define __BIG_ENDIAN__
92030 +#endif 
92031 +
92032 +#elif (defined(sun) || defined(__sun)) && defined(sparc) && defined(__sparcv9) /* Sun Solaris 5.7 */
92033 +#define SOLARIS
92034 +#define SPARC
92035 +#define SOLARIS_SPARC
92036 +#define __BIG_ENDIAN__
92037 +
92038 +#elif defined(__osf__) && defined(__alpha)                                     /* Digital Unix */
92039 +#define TRU64UNIX
92040 +#define DIGITAL_UNIX
92041 +#define ALPHA
92042 +#define __LITTLE_ENDIAN__
92043 +
92044 +#elif (defined(linux) || defined(__linux__)) && defined(__alpha)               /* Linux Alpha */
92045 +
92046 +#define LINUX
92047 +#define ALPHA
92048 +#define LINUX_ALPHA
92049 +#define __LITTLE_ENDIAN__
92050 +
92051 +#elif (defined(linux) || defined(__linux__)) && defined(__sparc)               /* Linux Sparc */
92052 +
92053 +#define LINUX
92054 +#define SPARC
92055 +#define LINUX_SPARC
92056 +#define __BIG_ENDIAN__
92057 +
92058 +#elif (defined(linux) || defined(__linux__)) && defined(__i386)                        /* Linux i386 */
92059 +
92060 +#define LINUX
92061 +#define I386
92062 +#define LINUX_I386
92063 +#define __LITTLE_ENDIAN__
92064 +
92065 +#elif (defined(linux) || defined(__linux__)) && defined(__ia64)                        /* Linux ia64 */
92066 +
92067 +#define LINUX
92068 +#define IA64
92069 +#define LINUX_IA64
92070 +#define __LITTLE_ENDIAN__
92071 +
92072 +#elif (defined(linux) || defined(__linux__)) && defined(__x86_64)                      /* Linux x86_64 */
92073 +
92074 +#define LINUX
92075 +#define X86_64
92076 +#define LINUX_X86_64
92077 +#define __LITTLE_ENDIAN__
92078 +
92079 +#elif defined(__QNXNTO__)
92080 +#define QNX
92081 +#define I386
92082 +#define __LITTLE_ENDIAN__
92083 +#else
92084 +#error Cannot determine operating system/processor architecture.
92085 +#endif
92086 +
92087 +#endif
92088 +
92089 +#include <qsnet/workarounds.h>
92090 +
92091 +#endif /* _QSNET_CONFIG_H */
92092 Index: linux-2.6.5-7.191/include/qsnet/crwlock.h
92093 ===================================================================
92094 --- linux-2.6.5-7.191.orig/include/qsnet/crwlock.h      2004-02-23 16:02:56.000000000 -0500
92095 +++ linux-2.6.5-7.191/include/qsnet/crwlock.h   2005-07-28 14:52:52.976659280 -0400
92096 @@ -0,0 +1,207 @@
92097 +/* 
92098 + *    Copyright (C) 2000  Regents of the University of California
92099 + *
92100 + *    This program is free software; you can redistribute it and/or modify
92101 + *    it under the terms of the GNU General Public License as published by
92102 + *    the Free Software Foundation; either version 2 of the License, or
92103 + *    (at your option) any later version.
92104 + *
92105 + *    This program is distributed in the hope that it will be useful,
92106 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
92107 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
92108 + *    GNU General Public License for more details.
92109 + *
92110 + *    You should have received a copy of the GNU General Public License
92111 + *    along with this program; if not, write to the Free Software
92112 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
92113 + *
92114 + */
92115 +
92116 +/*
92117 + *    Complex - Reader/Writer locks
92118 + *    Ref: "UNIX Systems for Modern Architectures", by Curt Schimmel, 
92119 + *    sec 11.6.3. 
92120 + *
92121 + *    This implementation is based on semaphores and may not be called from 
92122 + *    interrupt handlers.
92123 + *
92124 + */
92125 +
92126 +#if    !defined(_LINUX_RWLOCK_H)
92127 +#define        _LINUX_RWLOCK_H
92128 +
92129 +#if    defined(__KERNEL__)
92130 +
92131 +typedef enum { RD, WRT, ANY } crwlock_type_t;
92132 +
92133 +#define crwlock_write_held(l) debug_crwlock_held(l, WRT, __BASE_FILE__,__LINE__)
92134 +#define crwlock_read_held(l) debug_crwlock_held(l, RD, __BASE_FILE__, __LINE__)
92135 +#define crwlock_held(l)      debug_crwlock_held(l, ANY, __BASE_FILE__, __LINE__)
92136 +
92137 +#define crwlock_read(l)             debug_crwlock_read(l, __BASE_FILE__, __LINE__)
92138 +#define crwlock_write(l)     debug_crwlock_write(l, __BASE_FILE__, __LINE__)
92139 +#define crwlock_done(l)      debug_crwlock_done(l, __BASE_FILE__, __LINE__)
92140 +
92141 +#if     defined(DEBUG_RWLOCK) && defined(__alpha__) && !defined(DEBUG_SPINLOCK)
92142 +#define DEBUG_SPINLOCK
92143 +#endif
92144 +
92145 +#include <linux/spinlock.h>
92146 +#include <asm/semaphore.h>
92147 +#include <qsnet/debug.h>
92148 +#include <qsnet/mutex.h>
92149 +#include <linux/version.h>
92150 +
92151 +#if    !defined(DEBUG_SPINLOCK)
92152 +#define debug_spin_lock(lock, file, line)       spin_lock(lock)
92153 +#endif
92154 +
92155 +typedef struct {
92156 +        spinlock_t             m_lock;         /* protects cnt fields below */
92157 +        int                     m_rdcnt;        /* # of rdrs in crit section */
92158 +        int                     m_wrcnt;        /* # of wrtrs in crit section */
92159 +        int                     m_rdwcnt;       /* # of waiting readers */
92160 +        int                     m_wrwcnt;       /* # of waiting writers */
92161 +        struct semaphore        m_rdwait;       /* sema where readers wait */
92162 +        struct semaphore        m_wrwait;       /* sema where writers wait */
92163 +        pid_t                  m_wrholder;     /* task holding write lock */
92164 +} crwlock_t;
92165
92166 +extern __inline__ void 
92167 +crwlock_init(crwlock_t *l)
92168 +{
92169 +       l->m_lock = SPIN_LOCK_UNLOCKED;
92170 +#if    LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
92171 +       l->m_rdwait = MUTEX_LOCKED;
92172 +       l->m_wrwait = MUTEX_LOCKED;
92173 +#else
92174 +       sema_init(&l->m_rdwait,0);
92175 +       sema_init(&l->m_wrwait,0);
92176 +#endif
92177 +       l->m_rdcnt = l->m_wrcnt = l->m_rdwcnt = l->m_wrwcnt = 0;
92178 +       l->m_wrholder = PID_NONE;
92179 +}
92180 +
92181 +extern __inline__ void 
92182 +crwlock_destroy(crwlock_t *l)
92183 +{
92184 +       ASSERT(l->m_rdcnt == 0 && l->m_wrcnt == 0);
92185 +}
92186 +
92187 +/*
92188 + * If a writer has the lock presently or there are writers waiting,
92189 + * then we have to wait.
92190 + */
92191 +extern __inline__ void 
92192 +debug_crwlock_read(crwlock_t *l, char *file, int line)
92193 +{
92194 +       ASSERT(!in_interrupt());
92195 +       spin_lock(&l->m_lock);
92196 +       if (l->m_wrcnt || l->m_wrwcnt) {
92197 +               l->m_rdwcnt++;
92198 +               spin_unlock(&l->m_lock);
92199 +               down(&l->m_rdwait); /* P */
92200 +       } else {
92201 +               l->m_rdcnt++;
92202 +               spin_unlock(&l->m_lock);
92203 +       }
92204 +}
92205 +
92206 +/*
92207 + * If we're the last reader, and a writer is waiting,
92208 + * then let the writer go now.
92209 + */
92210 +/* private */
92211 +extern __inline__ void 
92212 +debug_crwlock_read_done(crwlock_t *l, char *file, int line)
92213 +{
92214 +       spin_lock(&l->m_lock);
92215 +       l->m_rdcnt--;
92216 +       if (l->m_wrwcnt && l->m_rdcnt == 0) {
92217 +               l->m_wrcnt = 1;
92218 +               l->m_wrwcnt--;
92219 +               spin_unlock(&l->m_lock);
92220 +               up(&l->m_wrwait); /* V */       
92221 +               return;
92222 +       }
92223 +       spin_unlock(&l->m_lock);
92224 +}
92225 +
92226 +extern __inline__ void 
92227 +debug_crwlock_write(crwlock_t *l, char *file, int line)
92228 +{
92229 +       ASSERT(!in_interrupt());
92230 +       spin_lock(&l->m_lock);
92231 +       if (l->m_wrcnt || l->m_rdcnt) {         /* block if lock is in use */
92232 +               l->m_wrwcnt++;
92233 +               spin_unlock(&l->m_lock);
92234 +               down(&l->m_wrwait); /* P */
92235 +       } else {                                /* lock is not in use */
92236 +               l->m_wrcnt = 1;
92237 +               spin_unlock(&l->m_lock);
92238 +       }
92239 +       l->m_wrholder = current->pid;
92240 +}
92241 +
92242 +/* private */
92243 +extern __inline__ void
92244 +debug_crwlock_write_done(crwlock_t *l, char *file, int line)
92245 +{
92246 +       int rdrs;
92247 +
92248 +       spin_lock(&l->m_lock);
92249 +       l->m_wrholder = PID_NONE;
92250 +       if (l->m_rdwcnt) {                      /* let any readers go first */
92251 +               l->m_wrcnt = 0;
92252 +               rdrs = l->m_rdwcnt;
92253 +               l->m_rdcnt = rdrs;
92254 +               l->m_rdwcnt = 0;
92255 +               spin_unlock(&l->m_lock);
92256 +               while (rdrs--)
92257 +                       up(&l->m_rdwait); /* V */
92258 +       } else if (l->m_wrwcnt) {               /* or let any writer go */
92259 +               l->m_wrwcnt--;
92260 +               spin_unlock(&l->m_lock);
92261 +               up(&l->m_wrwait); /* V */
92262 +       } else {                                /* nobody waiting, unlock */
92263 +               l->m_wrcnt = 0;
92264 +               spin_unlock(&l->m_lock);
92265 +       }
92266 +}
92267 +
92268 +extern __inline__ void
92269 +debug_crwlock_done(crwlock_t *l, char *file, int line)
92270 +{
92271 +       if (l->m_wrholder == current->pid)
92272 +               debug_crwlock_write_done(l, file, line);
92273 +       else
92274 +               debug_crwlock_read_done(l, file, line);
92275 +}
92276 +
92277 +/*
92278 + * Return nonzero if lock is held
92279 + */
92280 +extern __inline__ int  
92281 +debug_crwlock_held(crwlock_t *l, crwlock_type_t t, char *file, int line)
92282 +{
92283 +       int res;
92284 +
92285 +       spin_lock(&l->m_lock);
92286 +       switch(t) {
92287 +               case RD:
92288 +                       res = l->m_rdcnt;
92289 +                       break;
92290 +               case WRT:
92291 +                       res = l->m_wrcnt;
92292 +                       break;
92293 +               case ANY:
92294 +                       res = l->m_wrcnt + l->m_rdcnt;
92295 +                       break;
92296 +       }
92297 +       spin_unlock(&l->m_lock);
92298 +
92299 +       return res;
92300 +}
92301 +
92302 +#endif /* __KERNEL__ */
92303 +#endif /* _LINUX_RWLOCK_H */
92304 Index: linux-2.6.5-7.191/include/qsnet/ctrl_linux.h
92305 ===================================================================
92306 --- linux-2.6.5-7.191.orig/include/qsnet/ctrl_linux.h   2004-02-23 16:02:56.000000000 -0500
92307 +++ linux-2.6.5-7.191/include/qsnet/ctrl_linux.h        2005-07-28 14:52:52.977659128 -0400
92308 @@ -0,0 +1,37 @@
92309 +/*
92310 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92311 + *
92312 + *    For licensing information please see the supplied COPYING file
92313 + *
92314 + */
92315 +
92316 +#ifndef __QSNET_CTRL_LINUX_H
92317 +#define __QSNET_CTRL_LINUX_H
92318 +
92319 +#ident "$Id: ctrl_linux.h,v 1.3 2003/03/26 09:32:03 mike Exp $"
92320 +/*      $Source: /cvs/master/quadrics/qsnet/ctrl_linux.h,v $*/
92321 +
92322 +#define QSNETIO_USER_BASE              0x40
92323 +
92324 +#define QSNETIO_DEBUG_DUMP             _IO   ('e', QSNETIO_USER_BASE + 0)
92325 +
92326 +typedef struct qsnetio_debug_buffer_struct
92327 +{
92328 +       caddr_t addr; 
92329 +       size_t  len;
92330 +} QSNETIO_DEBUG_BUFFER_STRUCT;
92331 +#define QSNETIO_DEBUG_BUFFER           _IOWR ('e', QSNETIO_USER_BASE + 1, QSNETIO_DEBUG_BUFFER_STRUCT)
92332 +
92333 +typedef struct qsnetio_debug_kmem_struct
92334 +{
92335 +       void *handle;
92336 +} QSNETIO_DEBUG_KMEM_STRUCT;
92337 +#define QSNETIO_DEBUG_KMEM             _IOWR   ('e', QSNETIO_USER_BASE + 2, QSNETIO_DEBUG_KMEM_STRUCT)
92338 +
92339 +#endif /* __QSNET_CTRL_LINUX_H */
92340 +
92341 +/*
92342 + * Local variables:
92343 + * c-file-style: "linux"
92344 + * End:
92345 + */
92346 Index: linux-2.6.5-7.191/include/qsnet/debug.h
92347 ===================================================================
92348 --- linux-2.6.5-7.191.orig/include/qsnet/debug.h        2004-02-23 16:02:56.000000000 -0500
92349 +++ linux-2.6.5-7.191/include/qsnet/debug.h     2005-07-28 14:52:52.977659128 -0400
92350 @@ -0,0 +1,68 @@
92351 +/*
92352 + *    Copyright (C) 2000  Regents of the University of California
92353 + *
92354 + *    This program is free software; you can redistribute it and/or modify
92355 + *    it under the terms of the GNU General Public License as published by
92356 + *    the Free Software Foundation; either version 2 of the License, or
92357 + *    (at your option) any later version.
92358 + *
92359 + *    This program is distributed in the hope that it will be useful,
92360 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
92361 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
92362 + *    GNU General Public License for more details.
92363 + *
92364 + *    You should have received a copy of the GNU General Public License
92365 + *    along with this program; if not, write to the Free Software
92366 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
92367 + *
92368 + */
92369 +#ifndef _QSNET_DEBUG_H
92370 +#define _QSNET_DEBUG_H
92371 +
92372 +#if defined(DIGITAL_UNIX) 
92373 +#include <kern/assert.h>
92374 +#elif defined(LINUX)
92375 +extern int qsnet_assfail (char *ex, const char *func, char *file, int line);
92376 +
92377 +#define ASSERT(EX)     do { \
92378 +       if (!(EX) && qsnet_assfail (#EX, __FUNCTION__, __BASE_FILE__, __LINE__)) { \
92379 +               BUG(); \
92380 +       } \
92381 +} while (0)
92382 +#endif /* DIGITAL_UNIX */
92383 +
92384 +/* debug.c */
92385 +extern void qsnet_debug_init(void);
92386 +extern void qsnet_debug_fini(void);
92387 +extern void qsnet_debug_disable(int);
92388 +extern void qsnet_debug_alloc(void);
92389 +
92390 +#define QSNET_DEBUG_BUFFER  ((unsigned int)(0x01))
92391 +#define QSNET_DEBUG_CONSOLE ((unsigned int)(0x02))
92392 +#define QSNET_DEBUG_BUF_CON ( QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE )
92393 +
92394 +#ifdef __GNUC__
92395 +extern void qsnet_debugf      (unsigned int mode, char *fmt, ...)
92396 +       __attribute__ ((format (printf,2,3)));
92397 +extern void kqsnet_debugf      (char *fmt, ...)
92398 +       __attribute__ ((format (printf,1,2)));
92399 +#else
92400 +extern void qsnet_debugf      (unsigned int mode, char *fmt, ...);
92401 +extern void kqsnet_debugf     (char *fmt, ...);
92402 +#endif
92403 +extern void qsnet_vdebugf     (unsigned int mode, char * prefix, char *fmt,  va_list ap);
92404 +extern int  qsnet_debug_buffer(caddr_t ubuffer, int len);
92405 +extern int  qsnet_debug_dump  (void);
92406 +extern int  qsnet_debug_kmem  (void *handle);
92407 +
92408 +extern void qsnet_debug_buffer_on(void);
92409 +extern void qsnet_debug_buffer_clear(void);
92410 +extern void qsnet_debug_buffer_mark(char *str);
92411 +
92412 +#endif /* _QSNET_DEBUG_H */
92413 +
92414 +/*
92415 + * Local variables:
92416 + * c-file-style: "linux"
92417 + * End:
92418 + */
92419 Index: linux-2.6.5-7.191/include/qsnet/fence.h
92420 ===================================================================
92421 --- linux-2.6.5-7.191.orig/include/qsnet/fence.h        2004-02-23 16:02:56.000000000 -0500
92422 +++ linux-2.6.5-7.191/include/qsnet/fence.h     2005-07-28 14:52:52.977659128 -0400
92423 @@ -0,0 +1,178 @@
92424 +/*
92425 + *    Copyright (c) 2003 by Quadrics Supercomputers World Ltd.
92426 + *
92427 + *    For licensing information please see the supplied COPYING file
92428 + *
92429 + */
92430 +
92431 +/* $Id: fence.h,v 1.21.6.4 2004/11/23 14:34:45 addy Exp $ */
92432 +/*             $Source: /cvs/master/quadrics/qsnet/fence.h,v $*/
92433 +
92434 +#ifndef _CONFIG_FENCE_H
92435 +#define _CONFIG_FENCE_H
92436 +
92437 +#ident "$Id: fence.h,v 1.21.6.4 2004/11/23 14:34:45 addy Exp $"
92438 +
92439 +#ifdef __cplusplus
92440 +extern "C" {
92441 +#endif
92442 +
92443 +#if defined(__ELAN__) || defined(__ELAN3__)
92444 +
92445 +/* no memory barriers required on elan3/elan4 */
92446 +
92447 +#elif defined QSNET_MEMBARS_ASSERT
92448 +
92449 +#include <assert.h>
92450 +#define MEMBAR_MEMISSUE()       assert(0);
92451 +#define MEMBAR_SYNC()           assert(0);
92452 +#define MEMBAR_STORELOAD()      assert(0);
92453 +#define MEMBAR_LOADSTORE()      assert(0);
92454 +#define MEMBAR_STORESTORE()     assert(0);
92455 +#define MEMBAR_LOADLOAD()       assert(0);
92456 +#define MEMBAR_VISIBLE()        assert(0);
92457 +#define MEMBAR_DRAIN()          assert(0);
92458 +    
92459 +#elif defined(__alpha)
92460 +
92461 +/* Memory barrier instructions */
92462 +#if defined(__DECC) || defined(__DECXX)
92463 +long   asm( const char *,...);
92464 +#pragma intrinsic( asm )
92465 +#define MEMBAR_MEMISSUE()      asm("mb")
92466 +#define MEMBAR_SYNC()          asm("mb")
92467 +#define MEMBAR_STORELOAD()     asm("wmb")
92468 +#define MEMBAR_LOADSTORE()     asm("mb")
92469 +#define MEMBAR_STORESTORE()    asm("wmb")
92470 +#define MEMBAR_LOADLOAD()      asm("mb")
92471 +#define MEMBAR_VISIBLE()       asm("")
92472 +#define MEMBAR_DRAIN()                 asm("wmb")
92473 +
92474 +#else
92475 +/* Assume gcc */
92476 +#define MEMBAR_MEMISSUE()      asm volatile ("mb"::)
92477 +#define MEMBAR_SYNC()          asm volatile ("mb"::)
92478 +#define MEMBAR_STORELOAD()     asm volatile ("wmb"::)
92479 +#define MEMBAR_LOADSTORE()     asm volatile ("mb"::)
92480 +#define MEMBAR_STORESTORE()    asm volatile ("wmb"::)
92481 +#define MEMBAR_LOADLOAD()      asm volatile ("mb"::)
92482 +#define MEMBAR_VISIBLE()       asm volatile (""   ::: "memory")
92483 +#define MEMBAR_DRAIN()         asm volatile ("wmb"::: "memory")
92484 +
92485 +#endif /* __DECC */
92486 +
92487 +#elif defined(__sparc)
92488 +
92489 +/* UltraSPARC with WRITE MERGING enabled */
92490 +#define MEMBAR_MEMISSUE()      asm volatile ("membar #MemIssue");
92491 +#define MEMBAR_SYNC()          asm volatile ("membar #Sync");
92492 +#define MEMBAR_STORELOAD()     asm volatile ("membar #StoreLoad");
92493 +#define MEMBAR_LOADSTORE()     asm volatile ("membar #LoadStore");
92494 +#define MEMBAR_STORESTORE()    asm volatile ("membar #StoreStore");
92495 +#define MEMBAR_LOADLOAD()      asm volatile ("membar #LoadLoad");
92496 +#define MEMBAR_VISIBLE()       asm volatile (""::: "memory")
92497 +#define MEMBAR_DRAIN()         asm volatile (""::: "memory")
92498 +
92499 +#elif defined(__linux__)
92500 +
92501 +#if defined(__INTEL_COMPILER)
92502 +
92503 +/* NB: Intel compiler version 8.0 now also defines __GNUC__ unless you set the -no-gcc cmdline option
92504 + * I've moved the check for __INTEL_COMPILER to be first to get around this
92505 + */
92506 +#ifdef __ECC
92507 +
92508 +#include <ia64intrin.h>
92509 +
92510 +#define MEMBAR_MEMISSUE()       __mf()
92511 +#define MEMBAR_SYNC()           __mf()
92512 +#define MEMBAR_STORELOAD()      __mf()
92513 +#define MEMBAR_LOADSTORE()      __mf()
92514 +#define MEMBAR_STORESTORE()     __mf()
92515 +#define MEMBAR_LOADLOAD()       __mf()
92516 +#define MEMBAR_VISIBLE()       __mf()
92517 +#define MEMBAR_DRAIN()         __mf()
92518 +
92519 +#else
92520 +
92521 +#warning Membars not implemented with this compiler.
92522 +#define MEMBAR_MEMISSUE()       ;
92523 +#define MEMBAR_SYNC()           ;
92524 +#define MEMBAR_STORELOAD()      ;
92525 +#define MEMBAR_LOADSTORE()      ;
92526 +#define MEMBAR_STORESTORE()     ;
92527 +#define MEMBAR_LOADLOAD()       ;
92528 +#define MEMBAR_VISIBLE()        ;
92529 +#define MEMBAR_DRAIN()          ;
92530 +
92531 +#endif /* __ECC */
92532 +
92533 +#elif defined(__GNUC__)
92534 +
92535 +#ifndef __ia64
92536 +
92537 +/* These are needed by <asm/system.h> on AMD64 */
92538 +#include <asm/types.h>
92539 +#include <asm/bitops.h>
92540 +
92541 +#ifndef __cplusplus
92542 +/* this header file has a parameter called "new" - great huh */
92543 +#include <asm/system.h>
92544 +#endif
92545 +
92546 +#else
92547 +#  define mb()        __asm__ __volatile__ ("mf" ::: "memory")
92548 +#  define rmb()       mb()
92549 +#  define wmb()       mb()
92550 +#endif /* !__ia64 */
92551 +
92552 +#if defined(__x86_64) || defined(__i386)
92553 +/* For some reason the AMD64 definition (glibc-devel 2.3.X) of this 
92554 + * is not useful (compiler only directive) so we overload it here
92555 + */
92556 +/* I don't trust the IA32 header files either as with mtrr enabled
92557 + * we really need a membar and not a compiler directive
92558 + * NB: sfence is only available with X86_FEATURE_XMM CPUs
92559 + */
92560 +#undef wmb
92561 +#define wmb()    asm volatile("sfence":::"memory");
92562 +#endif /* __x86_64 */
92563 +
92564 +#define MEMBAR_MEMISSUE()      mb()
92565 +#define MEMBAR_SYNC()          mb()
92566 +#define MEMBAR_STORELOAD()     wmb()
92567 +#define MEMBAR_LOADSTORE()     mb()
92568 +#define MEMBAR_STORESTORE()    wmb()
92569 +#define MEMBAR_LOADLOAD()      mb()
92570 +
92571 +#ifdef __ia64
92572 +#define MEMBAR_VISIBLE()       asm volatile ("mf.a;;mf;;"::: "memory")
92573 +#define MEMBAR_DRAIN()         asm volatile ("mf;"::: "memory")
92574 +#else
92575 +#define MEMBAR_VISIBLE()       asm volatile (""::: "memory")
92576 +#define MEMBAR_DRAIN()         wmb()
92577 +#endif
92578 +
92579 +#else /* elif __GNUC__ */
92580 +
92581 +#error Membars not implemented for this architecture/compiler.
92582 +
92583 +#endif /* __INTEL_COMPILER */
92584 +
92585 +#else /* elif __linux__ */
92586 +
92587 +#error Membars not implemented for this architecture/compiler.
92588 +
92589 +#endif
92590 +
92591 +#ifdef __cplusplus
92592 +}
92593 +#endif
92594 +
92595 +#endif /* _CONFIG_FENCE_H */
92596 +
92597 +/*
92598 + * Local variables:
92599 + * c-file-style: "stroustrup"
92600 + * End:
92601 + */
92602 Index: linux-2.6.5-7.191/include/qsnet/kernel.h
92603 ===================================================================
92604 --- linux-2.6.5-7.191.orig/include/qsnet/kernel.h       2004-02-23 16:02:56.000000000 -0500
92605 +++ linux-2.6.5-7.191/include/qsnet/kernel.h    2005-07-28 14:52:52.978658976 -0400
92606 @@ -0,0 +1,38 @@
92607 +/*
92608 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92609 + *
92610 + *    For licensing information please see the supplied COPYING file
92611 + *
92612 + */
92613 +
92614 +#ifndef __QSNET_KERNEL_H
92615 +#define __QSNET_KERNEL_H
92616 +
92617 +#ident "$Id: kernel.h,v 1.8 2003/03/14 10:18:22 mike Exp $"
92618 +/*      $Source: /cvs/master/quadrics/qsnet/kernel.h,v $*/
92619 +
92620 +#include <qsnet/config.h>
92621 +#include <qsnet/types.h>
92622 +
92623 +#if defined(SOLARIS)
92624 +#include <qsnet/kernel_solaris.h>
92625 +#endif
92626 +
92627 +#if defined(DIGITAL_UNIX)
92628 +#include <qsnet/kernel_dunix.h>
92629 +#endif
92630 +
92631 +#if defined(LINUX)
92632 +#include <qsnet/kernel_linux.h>
92633 +#endif
92634 +
92635 +#include <qsnet/debug.h>
92636 +
92637 +#endif /* __QSNET_KERNEL_H */
92638 +
92639 +
92640 +
92641 +
92642 +
92643 +
92644 +
92645 Index: linux-2.6.5-7.191/include/qsnet/kernel_linux.h
92646 ===================================================================
92647 --- linux-2.6.5-7.191.orig/include/qsnet/kernel_linux.h 2004-02-23 16:02:56.000000000 -0500
92648 +++ linux-2.6.5-7.191/include/qsnet/kernel_linux.h      2005-07-28 14:52:52.978658976 -0400
92649 @@ -0,0 +1,352 @@
92650 +/*
92651 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92652 + *
92653 + *    For licensing information please see the supplied COPYING file
92654 + *
92655 + */
92656 +
92657 +#ifndef __QSNET_KERNEL_LINUX_H
92658 +#define __QSNET_KERNEL_LINUX_H
92659 +
92660 +#ident "$Id: kernel_linux.h,v 1.62.6.6 2005/03/07 16:43:32 david Exp $"
92661 +/*      $Source: /cvs/master/quadrics/qsnet/kernel_linux.h,v $*/
92662 +
92663 +#if defined(MODVERSIONS)
92664 +#include <linux/modversions.h>
92665 +#endif
92666 +
92667 +#include <linux/autoconf.h>
92668 +#include <linux/module.h>
92669 +
92670 +
92671 +/* ASSERT(spin_is_locked(l)) would always fail on UP kernels */
92672 +#if defined(CONFIG_SMP)
92673 +#define SPINLOCK_HELD(l)       spin_is_locked(l)
92674 +#else
92675 +#define SPINLOCK_HELD(l)       (1) 
92676 +#endif
92677 +
92678 +#include <asm/io.h>
92679 +#include <asm/uaccess.h>
92680 +
92681 +#include <linux/types.h>
92682 +#include <linux/time.h>
92683 +
92684 +#include <linux/delay.h>
92685 +#include <linux/smp_lock.h>
92686 +#include <linux/spinlock.h>
92687 +#include <linux/module.h>
92688 +
92689 +#include <linux/highmem.h>
92690 +
92691 +#include <qsnet/mutex.h>
92692 +#include <qsnet/condvar.h>
92693 +#include <qsnet/crwlock.h>
92694 +
92695 +#if defined(LINUX_ALPHA)
92696 +#  include <asm/core_tsunami.h>        /* for TSUNAMI_MEM */
92697 +#endif
92698 +
92699 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
92700 +#      undef   MOD_INC_USE_COUNT
92701 +#      undef   MOD_DEC_USE_COUNT
92702 +#      define  MOD_INC_USE_COUNT
92703 +#      define  MOD_DEC_USE_COUNT
92704 +#endif
92705 +
92706 +#define MIN(a,b)       ((a) > (b) ? (b) : (a))
92707 +#define MAX(a,b)       ((a) > (b) ? (a) : (b))
92708 +
92709 +/* stray types */
92710 +typedef u64               u_longlong_t;
92711 +typedef unsigned long     uintptr_t;
92712 +typedef int               bool_t;
92713 +
92714 +typedef unsigned long     virtaddr_t;                          /* virtual address */
92715 +typedef unsigned long      ioaddr_t;                           /* io address */
92716 +typedef unsigned long      sdramaddr_t;                                /* elan sdram offset */
92717 +
92718 +/* 386 kernel can be compiled with PAE enabled to use a 44 bit physical address */
92719 +#if defined(CONFIG_X86_PAE)
92720 +typedef unsigned long long physaddr_t;
92721 +#else
92722 +typedef unsigned long     physaddr_t;
92723 +#endif
92724 +
92725 +/* ticks since reboot, and tick freq */
92726 +#define lbolt          jiffies 
92727 +#define hz             HZ
92728 +
92729 +/* System page size and friends */
92730 +#define PAGESIZE       PAGE_SIZE
92731 +#define PAGESHIFT      PAGE_SHIFT
92732 +#define PAGEOFFSET     (PAGE_SIZE - 1)
92733 +#define PAGEMASK       PAGE_MASK
92734 +
92735 +#define PAGE_ALIGNED(a)        (((a) & PAGE_MASK) == a)
92736 +
92737 +/* convert between bytes and pages */
92738 +#define btop(b)         ((unsigned long)(b) >> PAGE_SHIFT)     /* rnd down */ 
92739 +#define btopr(b)        btop(PAGE_ALIGN((unsigned long) b))    /* rnd up */
92740 +#define ptob(p)                ((unsigned long)(p) << PAGE_SHIFT)
92741 +
92742 +/* round up sz to the nearest multiple of blk */
92743 +#define roundup(sz,blk) ((blk) * ((sz) / (blk) + ((sz) % (blk) ? 1 : 0)))      
92744 +
92745 +/* send a signal to a process */
92746 +#define psignal(pr,sig)        send_sig(sig,pr,0)
92747 +
92748 +/* microsecond delay */
92749 +#define DELAY(us)      udelay(us)
92750 +
92751 +/* macro macros */
92752 +#define MACRO_BEGIN     do {
92753 +#define MACRO_END       } while (0)
92754 +
92755 +/* D-Unix compatable errno values */
92756 +#define ESUCCESS        0
92757 +#define EFAIL           255
92758 +
92759 +/* ASSERT(NO_LOCKS_HELD) will be a no-op */
92760 +#define NO_LOCKS_HELD  1
92761 +
92762 +/* misc */
92763 +typedef int            label_t;
92764 +#define on_fault(ljp)  ((ljp) == NULL)
92765 +#define _NOTE(X)
92766 +#define no_fault()     ((void) 0)
92767 +#define panicstr       0
92768 +
92769 +/* return from system call is -EXXX on linux */
92770 +#define set_errno(e)   (-(e))
92771 +
92772 +/* 
92773 + * BSD-style byte ops 
92774 + */
92775 +
92776 +#define bcmp(src1,src2,len)            memcmp(src1,src2,len)
92777 +#define bzero(dst,len)                 memset(dst,0,len)
92778 +#define bcopy(src,dst,len)             memcpy(dst,src,len)
92779 +
92780 +#define preemptable_start              do { long must_yield_at = lbolt + (hz/10);
92781 +#define preemptable_end                        } while (0)
92782 +#define preemptable_check()            do {\
92783 +                                            if ((lbolt - must_yield_at) > 0)\
92784 +                                           {\
92785 +                                               preemptable_yield() ; \
92786 +                                               must_yield_at = lbolt + (hz/10);\
92787 +                                           }\
92788 +                                       } while (0)
92789 +
92790 +#define preemptable_yield()            schedule()
92791 +
92792 +#define CURPROC()                       current
92793 +#define CURTHREAD()                     current
92794 +#define SUSER()                                suser()
92795 +
92796 +/* 64 bit IO operations on 32 bit intel cpus using MMX */
92797 +#if defined(LINUX_I386)
92798 +extern u64         qsnet_readq (volatile u64 *ptr);
92799 +extern void        qsnet_writeq (u64 value, volatile u64 *ptr);
92800 +
92801 +#define readq(ptr)             qsnet_readq((void *) ptr)
92802 +#define writeq(val,ptr)                qsnet_writeq(val, (void *)ptr)
92803 +#endif
92804 +
92805 +/*
92806 + * Memory barriers
92807 + */
92808 +#ifndef mmiob
92809 +#  define mmiob()                      mb()
92810 +#endif
92811 +
92812 +/* 
92813 + * Exit handlers
92814 + */
92815 +#define HANDLER_REGISTER(func,arg,flags)   xa_handler_register(func,arg,flags)
92816 +#define HANDLER_UNREGISTER(func,arg,flags) xa_handler_unregister(func,arg,flags)
92817 +
92818 +/* 
92819 + * KMEM_GETPAGES and KMEM_ALLOC both call kmem_alloc, which 
92820 + * translates the call to kmalloc if < PAGE_SIZE, or vmalloc 
92821 + * if >= PAGE_SIZE.  vmalloc will always return a page-aligned 
92822 + * region rounded up to the nearest page, while kmalloc will 
92823 + * return bits and pieces of a page.
92824 + */
92825 +
92826 +#ifdef KMEM_DEBUG
92827 +extern void          *qsnet_kmem_alloc_debug(int len, int sleep, int zerofill, char *file, int line);
92828 +extern void           qsnet_kmem_free_debug(void *ptr, int len, char *file, int line);
92829 +#define KMEM_ALLOC(ptr,type,len,sleep) \
92830 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,0,__FILE__,__LINE__); }
92831 +#define KMEM_ZALLOC(ptr,type,len,sleep) \
92832 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,1,__FILE__,__LINE__); }
92833 +
92834 +#define KMEM_FREE(ptr,len)               qsnet_kmem_free_debug((void *)ptr,len,__FILE__,__LINE__)
92835 +
92836 +#else
92837 +
92838 +extern void          *qsnet_kmem_alloc(int len, int sleep, int zerofill);
92839 +extern void           qsnet_kmem_free(void *ptr, int len);
92840 +
92841 +#define KMEM_ALLOC(ptr,type,len,sleep) \
92842 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,0); }
92843 +#define KMEM_ZALLOC(ptr,type,len,sleep) \
92844 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,1); }
92845 +
92846 +#define KMEM_FREE(ptr,len)               qsnet_kmem_free((void *)ptr,len)
92847 +
92848 +#endif
92849 +extern void       qsnet_kmem_display(void *handle);
92850 +extern physaddr_t kmem_to_phys(void *ptr);
92851 +
92852 +#define KMEM_ASSERT(sleep)              ASSERT(!(in_interrupt() && sleep))
92853 +
92854 +
92855 +#define KMEM_GETPAGES(ptr,type,pgs,sleep) KMEM_ZALLOC(ptr,type,ptob(pgs),sleep)
92856 +#define KMEM_FREEPAGES(ptr,pgs)          KMEM_FREE(ptr,ptob(pgs));
92857 +
92858 +/*
92859 + * Copying from user space -> kernel space (perms checked)
92860 + */
92861 +#define copyin(up,kp,size)             copy_from_user(kp,up,size)
92862 +#define copyin_noerr(up,kp,size)       copy_from_user(kp,up,size)
92863 +
92864 +/* get_user() gets xfer width right */
92865 +#define fulinux(ret, up)               (get_user(ret, (up)) == 0 ? ret : -1)
92866 +#define fulinuxp(ret, up)              (get_user(ret, (up)) == 0 ? ret : NULL)
92867 +
92868 +extern __inline__ int fubyte    (u8  *up) { u8  ret;   return fulinux(ret, up);}
92869 +extern __inline__ int fusword   (u16 *up) { u16 ret;   return fulinux(ret, up);}
92870 +extern __inline__ int fuword    (u32 *up) { u32 ret;   return fulinux(ret, up);}
92871 +#if BITS_PER_LONG > 32
92872 +extern __inline__ u64 fulonglong(u64 *up) { u64 ret;   return fulinux(ret, up);}
92873 +#else
92874 +extern __inline__ u64 fulonglong(u64 *up) { return ((u64) fuword((u32 *)up) | (((u64) fuword(((u32 *)up)+1))<<32)); }
92875 +#endif
92876 +extern __inline__ void *fuptr (void **up) { void *ret; return fulinuxp(ret,up);}
92877 +
92878 +#define fubyte_noerr(up)               fubyte(up)
92879 +#define fusword_noerr(up)              fusword(up)
92880 +#define fuword_noerr(up)               fuword(up)
92881 +#define fulonglong_noerr(up)           fulonglong(up)
92882 +#define fuptr_noerr(up)                        fuptr(up)
92883 +
92884 +extern __inline__ int copyinstr(char *up, char *kp, int max, int *size)
92885 +{ 
92886 +       for (*size = 1; *size <= max; (*size)++) {
92887 +               if (get_user(*kp, up++) != 0)
92888 +                       return EFAULT;  /* bad user space addr */
92889 +               if (*kp++ == '\0')
92890 +                       return 0;       /* success */
92891 +       }
92892 +       *size = max;
92893 +       return ENAMETOOLONG;            /* runaway string */
92894 +}
92895
92896 +/*
92897 + * Copying from kernel space -> user space (perms checked)
92898 + */
92899 +
92900 +#define copyout(kp,up,size)            copy_to_user(up,kp,size)
92901 +#define copyout_noerr(kp,up,size)      copy_to_user(up,kp,size)
92902 +
92903 +/* put_user() gets xfer width right */
92904 +#define sulinux(val, up)               (put_user(val, (up)) == 0 ? 0 : -1)
92905 +
92906 +extern __inline__ int subyte    (u8  *up, u8  val) { return sulinux(val, up); }
92907 +extern __inline__ int susword   (u16 *up, u16 val) { return sulinux(val, up); }
92908 +extern __inline__ int suword    (u32 *up, u32 val) { return sulinux(val, up); }
92909 +#if BITS_PER_LONG > 32
92910 +extern __inline__ int sulonglong(u64 *up, u64 val) { return sulinux(val, up); }
92911 +#else
92912 +extern __inline__ int sulonglong(u64 *up, u64 val) { return (suword((u32 *) up, (u32) val) == 0 ? 
92913 +                                                            suword(((u32 *) up)+1, (u32) (val >> 32)) : -1); }
92914 +#endif
92915 +extern __inline__ int suptr   (void **up,void *val){ return sulinux(val, up); }
92916 +
92917 +#define subyte_noerr(up,val)           subyte(up,val)  
92918 +#define susword_noerr(up,val)          susword(up,val) 
92919 +#define suword_noerr(up,val)           suword(up,val)  
92920 +#define sulonglong_noerr(up,val)       sulonglong(up,val)      
92921 +#define suptr_noerr(up,val)            suptr(up,val)   
92922 +
92923 +/*
92924 + * /proc/qsnet interface
92925 + */
92926 +extern inline int
92927 +str_append(char *buf, char *add, int size)
92928 +{
92929 +#define TRUNC_MSG       "[Output truncated]\n"
92930 +       int full = 0;
92931 +       int max = size - strlen(TRUNC_MSG) - strlen(add) - 1;
92932 +
92933 +       if (strlen(buf) > max) {
92934 +               strcat(buf, TRUNC_MSG);
92935 +               full = 1;
92936 +       } else
92937 +               strcat(buf, add);
92938 +       return full;
92939 +}
92940 +
92941 +/* Spinlocks */
92942 +#define spin_lock_destroy(l)           ((void) 0)
92943 +
92944 +/* Complex - Reader/Writer locks - we added <linux/crwlock.h> */
92945 +typedef crwlock_t                      krwlock_t;
92946 +#define krwlock_init(l)                        crwlock_init(l)
92947 +#define krwlock_destroy(l)             crwlock_destroy(l)
92948 +#define krwlock_write(l)               crwlock_write(l)
92949 +#define krwlock_read(l)                        crwlock_read(l)
92950 +#define krwlock_done(l)                        crwlock_done(l)
92951 +#define krwlock_is_locked(l)           crwlock_held(l)
92952 +#define krwlock_is_write_locked(l)     crwlock_write_held(l)
92953 +#define krwlock_is_read_locked(l)      crwlock_read_held(l)
92954 +
92955 +/*
92956 + * Timeouts - Solaris style.
92957 + */
92958 +typedef struct timer_list timer_fn_t;
92959 +
92960 +extern inline void
92961 +schedule_timer_fn(timer_fn_t *timer, void (*fun)(void *), void *arg, long hz_delay)
92962 +{
92963 +       init_timer(timer);
92964 +
92965 +       timer->function = (void (*)(unsigned long)) fun;
92966 +       timer->data     = (unsigned long) arg;
92967 +       timer->expires  = jiffies + hz_delay;
92968 +
92969 +       add_timer(timer);
92970 +}
92971 +
92972 +/* returns 1 if timer_fn was cancelled */
92973 +extern inline int
92974 +cancel_timer_fn(timer_fn_t *timer)
92975 +{
92976 +    return (del_timer_sync(timer));
92977 +}
92978 +
92979 +extern inline int
92980 +timer_fn_queued(timer_fn_t *timer)
92981 +{
92982 +    return (timer_pending (timer));
92983 +}
92984 +/*
92985 + * Hold/release CPU's.
92986 + */
92987 +
92988 +extern void    cpu_hold_all(void);
92989 +extern void    cpu_release_all(void);
92990 +#define CAPTURE_CPUS()         cpu_hold_all()
92991 +#define RELEASE_CPUS()         cpu_release_all()
92992 +
92993 +#define IASSERT ASSERT
92994 +
92995 +#endif /* __QSNET_KERNEL_LINUX_H */
92996 +
92997 +/*
92998 + * Local variables:
92999 + * c-file-style: "linux"
93000 + * End:
93001 + */
93002 Index: linux-2.6.5-7.191/include/qsnet/kpte.h
93003 ===================================================================
93004 --- linux-2.6.5-7.191.orig/include/qsnet/kpte.h 2004-02-23 16:02:56.000000000 -0500
93005 +++ linux-2.6.5-7.191/include/qsnet/kpte.h      2005-07-28 14:52:52.979658824 -0400
93006 @@ -0,0 +1,109 @@
93007 +/*
93008 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93009 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
93010 + *
93011 + *    For licensing information please see the supplied COPYING file
93012 + *
93013 + */
93014 +
93015 +#ifndef __QSNET_KPTE_H
93016 +#define __QSNET_KPTE_H
93017 +
93018 +#ident "@(#)$Id: kpte.h,v 1.1.2.2 2005/03/02 09:51:49 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
93019 +/*      $Source: /cvs/master/quadrics/qsnet/kpte.h,v $*/
93020 +
93021 +#include <qsnet/autoconf.h>
93022 +
93023 +#ifdef NO_RMAP
93024 +#      define pte_offset_kernel pte_offset
93025 +#      define pte_offset_map    pte_offset
93026 +#       define pte_unmap(A)      do { ; } while (0)
93027 +#endif
93028 +
93029 +/* 
93030 + * Pte stuff
93031 + */
93032 +static __inline__ struct mm_struct *
93033 +get_kern_mm(void)
93034 +{
93035 +        return &init_mm;
93036 +}
93037 +
93038 +static __inline__ pte_t *
93039 +find_pte_map(struct mm_struct *mm, unsigned long vaddr)
93040 +{
93041 +        pgd_t *pgd;
93042 +        pmd_t *pmd;
93043 +       pte_t *ptep;
93044 +
93045 +/* XXXX - handle hugh tlb code */
93046 +       pgd = pgd_offset(mm, vaddr);
93047 +       if (pgd_none(*pgd) || pgd_bad(*pgd))
93048 +               goto out;
93049 +       
93050 +       pmd = pmd_offset(pgd, vaddr);
93051 +       if (pmd_none(*pmd) || pmd_bad (*pmd))
93052 +               goto out;
93053 +
93054 +       ptep = pte_offset_map (pmd, vaddr);
93055 +       if (! ptep)
93056 +               goto out;
93057 +       
93058 +       if (pte_present (*ptep))
93059 +               return ptep;
93060 +
93061 +       pte_unmap (ptep);
93062 +out:
93063 +       return NULL;
93064 +}
93065 +
93066 +static __inline__ pte_t *
93067 +find_pte_kernel(unsigned long vaddr)
93068 +{
93069 +        pgd_t *pgd;
93070 +        pmd_t *pmd;
93071 +       pte_t *pte;
93072 +
93073 +       pgd = pgd_offset_k(vaddr);
93074 +       if (pgd && !pgd_none(*pgd)) {
93075 +           pmd = pmd_offset(pgd, vaddr);
93076 +           if (pmd && pmd_present(*pmd)) {
93077 +               pte = pte_offset_kernel(pmd, vaddr);
93078 +               if (pte && pte_present(*pte))
93079 +                   return (pte);
93080 +           }
93081 +       }
93082 +       return (NULL);
93083 +}
93084 +
93085 +static __inline__ physaddr_t
93086 +pte_phys(pte_t pte)
93087 +{
93088 +#if defined(LINUX_ALPHA)
93089 +       /* RedHat 7.1 2.4.3-12 
93090 +        * They have now enabled Monster windows on Tsunami
93091 +        * and so can use the Main's phys pte value 
93092 +        */
93093 +       return (pte_val(pte) >> (32-PAGE_SHIFT));
93094 +#elif defined(LINUX_I386) || defined(LINUX_X86_64)
93095 +#if defined(_PAGE_NX)
93096 +       return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1) & ~_PAGE_NX);
93097 +#else
93098 +       return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1));
93099 +#endif
93100 +#elif defined(LINUX_SPARC)
93101 +       return (pte_val(pte) & _PAGE_PADDR);
93102 +#elif defined(LINUX_IA64)
93103 +       return (pte_val(pte) & _PFN_MASK);
93104 +#else
93105 +#error Unknown architecture
93106 +#endif
93107 +}
93108 +
93109 +#endif /* __QSNET_KPTE_H */
93110 +
93111 +/*
93112 + * Local variables:
93113 + * c-file-style: "stroustrup"
93114 + * End:
93115 + */
93116 Index: linux-2.6.5-7.191/include/qsnet/kthread.h
93117 ===================================================================
93118 --- linux-2.6.5-7.191.orig/include/qsnet/kthread.h      2004-02-23 16:02:56.000000000 -0500
93119 +++ linux-2.6.5-7.191/include/qsnet/kthread.h   2005-07-28 14:52:52.979658824 -0400
93120 @@ -0,0 +1,71 @@
93121 +/*
93122 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93123 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
93124 + *
93125 + *    For licensing information please see the supplied COPYING file
93126 + *
93127 + */
93128 +
93129 +#ifndef __QSNET_KTHREAD_H
93130 +#define __QSNET_KTHREAD_H
93131 +
93132 +#ident "@(#)$Id: kthread.h,v 1.1 2004/10/28 11:50:29 david Exp $ $Name: QSNETMODULES-4-31_20050321 $"
93133 +/*      $Source: /cvs/master/quadrics/qsnet/kthread.h,v $*/
93134 +
93135 +#include <qsnet/autoconf.h>
93136 +
93137 +/* 
93138 + * kernel threads 
93139 + */
93140 +extern __inline__ void
93141 +kernel_thread_init(char *comm)
93142 +{
93143 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
93144 +#ifndef NO_NPTL
93145 +#      define sigmask_lock                     sighand->siglock
93146 +#endif
93147 +       lock_kernel();
93148 +       daemonize();
93149 +        reparent_to_init();
93150 +
93151 +        /* avoid getting signals */
93152 +        spin_lock_irq(&current->sigmask_lock);
93153 +        flush_signals(current);
93154 +        sigfillset(&current->blocked);
93155 +       
93156 +#ifdef NO_NPTL
93157 +        recalc_sigpending(current);
93158 +#else
93159 +        recalc_sigpending();
93160 +#endif
93161 +
93162 +        spin_unlock_irq(&current->sigmask_lock);
93163 +
93164 +       /* set our name for identification purposes */
93165 +       strncpy(current->comm, comm, sizeof(current->comm));
93166 +
93167 +       unlock_kernel();
93168 +#else
93169 +       daemonize(comm);
93170 +#endif
93171 +}
93172 +
93173 +extern __inline__ void *
93174 +kernel_thread_wrap(caddr_t stk, int stksize, void (*proc)(void *), void *arg)
93175 +{
93176 +        ASSERT(stk == NULL && stksize == 0);
93177 +        kernel_thread((int (*)(void *))proc, arg, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
93178 +        return (void *)1; /* non-null value */
93179 +}
93180 +
93181 +#define kernel_thread_create(proc,arg)  kernel_thread_wrap(NULL,0,(void (*)(void *))proc,arg)
93182 +#define kernel_thread_exit()           ((void) 0)
93183 +#define kernel_thread_become_highpri() ((void) 0)
93184 +
93185 +#endif /* __QSNET_KTHREAD_H */
93186 +
93187 +/*
93188 + * Local variables:
93189 + * c-file-style: "linux"
93190 + * End:
93191 + */
93192 Index: linux-2.6.5-7.191/include/qsnet/list.h
93193 ===================================================================
93194 --- linux-2.6.5-7.191.orig/include/qsnet/list.h 2004-02-23 16:02:56.000000000 -0500
93195 +++ linux-2.6.5-7.191/include/qsnet/list.h      2005-07-28 14:52:52.979658824 -0400
93196 @@ -0,0 +1,80 @@
93197 +/*
93198 + *    Copyright (c) 2003 by Quadrics Limited.
93199 + * 
93200 + *    For licensing information please see the supplied COPYING file
93201 + *
93202 + */
93203 +
93204 +#ident "@(#)$Id: list.h,v 1.5 2003/10/27 13:55:33 david Exp $"
93205 +/*      $Source: /cvs/master/quadrics/qsnet/list.h,v $*/
93206 +
93207 +#ifndef __QSNET_LIST_H
93208 +#define __QSNET_LIST_H
93209 +
93210 +/* Implementation of doubly linked lists - compatible with linux */
93211 +struct list_head 
93212 +{
93213 +    struct list_head *next;
93214 +    struct list_head *prev;
93215 +};
93216 +
93217 +#if !defined(LINUX)
93218 +#if ! defined( offsetof ) 
93219 +#define offsetof(T,F) ((int )&(((T *)0)->F))
93220 +#endif
93221 +
93222 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
93223 +
93224 +#define LIST_HEAD(name) \
93225 +       struct list_head name = LIST_HEAD_INIT(name)
93226 +#endif
93227 +
93228 +#define list_entry(ptr, type, off) \
93229 +       ((type *) ((unsigned long)(ptr) - offsetof (type,off)))
93230 +
93231 +#define INIT_LIST_HEAD(list) \
93232 +MACRO_BEGIN \
93233 +       (list)->next = (list)->prev = (list); \
93234 +MACRO_END
93235 +
93236 +#define list_add(new, list) \
93237 +MACRO_BEGIN \
93238 +       (list)->next->prev = (new); \
93239 +       (new)->next = (list)->next; \
93240 +       (new)->prev = (list); \
93241 +       (list)->next = (new); \
93242 +MACRO_END
93243 +
93244 +#define list_add_tail(new, list) \
93245 +MACRO_BEGIN \
93246 +       (list)->prev->next = new; \
93247 +       (new)->prev = (list)->prev; \
93248 +       (new)->next = (list); \
93249 +       (list)->prev = (new); \
93250 +MACRO_END
93251 +
93252 +#define list_del(entry) \
93253 +MACRO_BEGIN \
93254 +       (entry)->prev->next = (entry)->next; \
93255 +       (entry)->next->prev = (entry)->prev; \
93256 +MACRO_END
93257 +
93258 +#define list_del_init(entry) \
93259 +MACRO_BEGIN \
93260 +       (entry)->prev->next = (entry)->next; \
93261 +       (entry)->next->prev = (entry)->prev; \
93262 +       (entry)->next = (entry)->prev = (entry); \
93263 +MACRO_END
93264 +
93265 +#define list_empty(list) \
93266 +       ((list)->next == (list))
93267 +
93268 +#define list_for_each(pos,list) \
93269 +       for (pos = (list)->next; pos != (list); \
93270 +            pos = (pos)->next)
93271 +
93272 +#define list_for_each_safe(pos,n,list) \
93273 +       for (pos = (list)->next, n = (pos)->next; pos != (list); \
93274 +            pos = n, n = (pos)->next)
93275 +
93276 +#endif /* __QSNET_LIST_H */
93277 Index: linux-2.6.5-7.191/include/qsnet/mutex.h
93278 ===================================================================
93279 --- linux-2.6.5-7.191.orig/include/qsnet/mutex.h        2004-02-23 16:02:56.000000000 -0500
93280 +++ linux-2.6.5-7.191/include/qsnet/mutex.h     2005-07-28 14:52:52.980658672 -0400
93281 @@ -0,0 +1,91 @@
93282 +/*
93283 + *    Copyright (C) 2000  Regents of the University of California
93284 + *
93285 + *    This program is free software; you can redistribute it and/or modify
93286 + *    it under the terms of the GNU General Public License as published by
93287 + *    the Free Software Foundation; either version 2 of the License, or
93288 + *    (at your option) any later version.
93289 + *
93290 + *    This program is distributed in the hope that it will be useful,
93291 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
93292 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
93293 + *    GNU General Public License for more details.
93294 + *
93295 + *    You should have received a copy of the GNU General Public License
93296 + *    along with this program; if not, write to the Free Software
93297 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
93298 + *
93299 + */
93300 +
93301 +#if    !defined(_LINUX_MUTEX_H)
93302 +#define        _LINUX_MUTEX_H
93303 +#if    defined(__KERNEL__)
93304 +
93305 +#include <asm/smp.h>
93306 +#include <linux/spinlock.h>
93307 +#include <asm/semaphore.h>
93308 +#include <qsnet/debug.h>
93309 +#include <linux/interrupt.h>
93310 +#include <linux/version.h>
93311 +
93312 +#define PID_NONE       0
93313 +
93314 +typedef struct
93315 +{
93316 +    struct semaphore sem;
93317 +    pid_t           holder;
93318 +} kmutex_t;
93319 +
93320 +extern __inline__ void
93321 +kmutex_init (kmutex_t *l)
93322 +{
93323 +#if    LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
93324 +    l->sem = MUTEX;
93325 +#else
93326 +    init_MUTEX(&l->sem);
93327 +#endif
93328 +    l->holder = PID_NONE;
93329 +}
93330 +
93331 +extern __inline__ void
93332 +kmutex_destroy (kmutex_t *l) 
93333 +{
93334 +    ASSERT (l->holder == PID_NONE); 
93335 +}
93336 +
93337 +extern __inline__ void
93338 +kmutex_lock (kmutex_t *l) 
93339 +{ 
93340 +    ASSERT(l->holder != current->pid);
93341 +    down (&l->sem);
93342 +    l->holder = current->pid; 
93343 +}
93344 +
93345 +extern __inline__ void
93346 +kmutex_unlock (kmutex_t *l) 
93347 +{
93348 +    ASSERT(l->holder == current->pid);
93349 +
93350 +    l->holder = PID_NONE;
93351 +    up (&l->sem);
93352 +}
93353 +
93354 +extern __inline__ int
93355 +kmutex_trylock (kmutex_t *l) 
93356 +{
93357 +    if (down_trylock (&l->sem) == 0) 
93358 +    {
93359 +       l->holder = current->pid;
93360 +       return (1);
93361 +    }
93362 +    return (0);
93363 +}
93364 +
93365 +extern __inline__ int
93366 +kmutex_is_locked (kmutex_t *l) 
93367 +{
93368 +    return (l->holder == current->pid);
93369 +}
93370 +
93371 +#endif /* __KERNEL__ */
93372 +#endif /* _LINUX_MUTEX_H */
93373 Index: linux-2.6.5-7.191/include/qsnet/procfs_linux.h
93374 ===================================================================
93375 --- linux-2.6.5-7.191.orig/include/qsnet/procfs_linux.h 2004-02-23 16:02:56.000000000 -0500
93376 +++ linux-2.6.5-7.191/include/qsnet/procfs_linux.h      2005-07-28 14:52:52.980658672 -0400
93377 @@ -0,0 +1,234 @@
93378 +/*
93379 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93380 + *
93381 + *    For licensing information please see the supplied COPYING file
93382 + *
93383 + */
93384 +
93385 +#ifndef __PROCFS_LINUX_H
93386 +#define __PROCFS_LINUX_H
93387 +
93388 +#ident "$Id: procfs_linux.h,v 1.6.2.6 2004/12/06 17:36:24 robin Exp $"
93389 +/*      $Source: /cvs/master/quadrics/qsnet/procfs_linux.h,v $ */
93390 +
93391 +#if defined(__KERNEL__)
93392 +
93393 +#include <qsnet/kernel_linux.h>
93394 +#include <qsnet/autoconf.h>
93395 +#include <linux/proc_fs.h>
93396 +
93397 +extern gid_t qsnet_procfs_gid;
93398 +
93399 +/* borrowed from fs/proc/proc_misc - helper for proc_read_int */
93400 +static inline int 
93401 +qsnet_proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
93402 +{
93403 +       if (len <= off+count) *eof = 1;
93404 +       *start = page + off;
93405 +       len -= off;
93406 +       if (len>count) len = count;
93407 +       if (len<0) len = 0;
93408 +       return len;
93409 +}
93410 +
93411 +static inline int
93412 +qsnet_proc_write_int(struct file *file, const char *buf, unsigned long count, void *data)
93413 +{
93414 +       char tmpbuf[16];
93415 +       int  res = count;
93416 +       
93417 +       if (count > sizeof(tmpbuf) - 1)
93418 +               return (-EINVAL);
93419 +       
93420 +       MOD_INC_USE_COUNT;
93421 +       if (copy_from_user(tmpbuf, buf, count))
93422 +               res = -EFAULT;
93423 +       else
93424 +       {
93425 +               tmpbuf[count] = '\0'; 
93426 +               *(int *)data = simple_strtoul(tmpbuf, NULL, 0);
93427 +       }
93428 +       MOD_DEC_USE_COUNT;
93429 +       
93430 +       return (res);
93431 +}
93432 +
93433 +static inline int
93434 +qsnet_proc_read_int(char *page, char **start, off_t off, int count, int *eof, void *data)
93435 +{
93436 +       int len, res;
93437 +       
93438 +       MOD_INC_USE_COUNT;
93439 +       
93440 +       len = sprintf(page, "%d\n", *(int *)data);
93441 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
93442 +       
93443 +       MOD_DEC_USE_COUNT;
93444 +       return (res);
93445 +}
93446 +
93447 +static inline struct proc_dir_entry *
93448 +qsnet_proc_register_int(struct proc_dir_entry *dir, char *path, int *var, int read_only)
93449 +{
93450 +       struct proc_dir_entry *p;
93451 +       
93452 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
93453 +       if (p) {
93454 +               if (! read_only) 
93455 +                       p->write_proc = qsnet_proc_write_int;
93456 +               p->read_proc  = qsnet_proc_read_int;
93457 +               p->data       = var;
93458 +               p->owner      = THIS_MODULE;
93459 +               p->gid        = qsnet_procfs_gid;
93460 +       }
93461 +       return p;
93462 +}
93463 +
93464 +static inline int
93465 +qsnet_proc_write_hex(struct file *file, const char *buf, unsigned long count, void *data)
93466 +{
93467 +       char tmpbuf[16];
93468 +       int  res = count;
93469 +       
93470 +       if (count > sizeof(tmpbuf) - 1)
93471 +               return (-EINVAL);
93472 +       
93473 +       MOD_INC_USE_COUNT;
93474 +       if (copy_from_user(tmpbuf, buf, count))
93475 +               res = -EFAULT;
93476 +       else
93477 +       {
93478 +               tmpbuf[count] = '\0'; 
93479 +               *(int *)data = simple_strtoul(tmpbuf, NULL, 0);
93480 +       }
93481 +       MOD_DEC_USE_COUNT;
93482 +       
93483 +       return (res);
93484 +}
93485 +
93486 +static inline int
93487 +qsnet_proc_read_hex(char *page, char **start, off_t off, int count, int *eof, void *data)
93488 +{
93489 +       int len, res;
93490 +       
93491 +       MOD_INC_USE_COUNT;
93492 +       
93493 +       len = sprintf(page, "0x%x\n", *(int *)data);
93494 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
93495 +       
93496 +       MOD_DEC_USE_COUNT;
93497 +       return (res);
93498 +}
93499 +
93500 +static inline struct proc_dir_entry *
93501 +qsnet_proc_register_hex(struct proc_dir_entry *dir, char *path, int *var, int read_only)
93502 +{
93503 +       struct proc_dir_entry *p;
93504 +       
93505 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
93506 +       if (p) {
93507 +               if (! read_only) 
93508 +                       p->write_proc = qsnet_proc_write_hex;
93509 +               p->read_proc  = qsnet_proc_read_hex;
93510 +               p->data       = var;
93511 +               p->owner      = THIS_MODULE;
93512 +               p->gid        = qsnet_procfs_gid;
93513 +       }
93514 +       return p;
93515 +}
93516 +
93517 +#define QSNET_PROC_STR_LEN_MAX ((int)256)
93518 +
93519 +static inline int
93520 +qsnet_proc_write_str(struct file *file, const char *buf, unsigned long count, void *data)
93521 +{
93522 +       int  res = count;
93523 +       
93524 +       if (count > (QSNET_PROC_STR_LEN_MAX - 1))
93525 +               return (-EINVAL);
93526 +       
93527 +       MOD_INC_USE_COUNT;
93528 +       if (copy_from_user((char *)data, buf, count))
93529 +               res = -EFAULT;
93530 +       else
93531 +       {
93532 +               ((char *)data)[count] = '\0'; 
93533 +               /* remove linefeed */
93534 +               if ( (count) && (((char *)data)[count -1] == '\n'))
93535 +                       ((char *)data)[count -1] = '\0';
93536 +       }
93537 +       MOD_DEC_USE_COUNT;
93538 +       
93539 +       return (res);
93540 +}
93541 +
93542 +static inline int
93543 +qsnet_proc_read_str(char *page, char **start, off_t off, int count, int *eof, void *data)
93544 +{
93545 +       int len, res;
93546 +       
93547 +       if ( strlen(data) > (count + 1))
93548 +               return (-EINVAL);       
93549 +
93550 +       MOD_INC_USE_COUNT;
93551 +       
93552 +       /* cant output too much */
93553 +       if ( strlen(data) > (count + 1))
93554 +       {
93555 +               MOD_DEC_USE_COUNT;
93556 +               return (-EINVAL);       
93557 +       }
93558 +
93559 +
93560 +       len = sprintf(page, "%s\n", (char *)data);
93561 +       if (len > count)
93562 +       {
93563 +               MOD_DEC_USE_COUNT;
93564 +               return (-EINVAL);       
93565 +       }
93566 +
93567 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
93568 +       
93569 +       MOD_DEC_USE_COUNT;
93570 +       return (res);
93571 +}
93572 +
93573 +static inline struct proc_dir_entry *
93574 +qsnet_proc_register_str(struct proc_dir_entry *dir, char *path, char *var, int read_only)
93575 +{
93576 +       struct proc_dir_entry *p;
93577 +       
93578 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
93579 +       if (p) {
93580 +               if (! read_only) 
93581 +                       p->write_proc = qsnet_proc_write_str;
93582 +               p->read_proc  = qsnet_proc_read_str;
93583 +               p->data       = var;
93584 +               p->owner      = THIS_MODULE;
93585 +               p->gid        = qsnet_procfs_gid;
93586 +       }
93587 +       return p;
93588 +}
93589 +
93590 +extern struct proc_dir_entry *qsnet_procfs_root; 
93591 +extern struct proc_dir_entry *qsnet_procfs_config;
93592 +
93593 +#ifdef NO_PDE
93594 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
93595 +{
93596 +    return inode->u.generic_ip;
93597 +}
93598 +#endif
93599 +#endif /* __KERNEL__ */
93600 +
93601 +#define QSNET_PROCFS_IOCTL      "/proc/qsnet/ioctl"
93602 +#define QSNET_PROCFS_KMEM_DEBUG "/proc/qsnet/kmem_debug"
93603 +#define QSNET_PROCFS_VERSION    "/proc/qsnet/version"
93604 +
93605 +#endif /* __PROCFS_LINUX_H */
93606 +
93607 +/*
93608 + * Local variables:
93609 + * c-file-style: "linux"
93610 + * End:
93611 + */
93612 Index: linux-2.6.5-7.191/include/qsnet/pthread.h
93613 ===================================================================
93614 --- linux-2.6.5-7.191.orig/include/qsnet/pthread.h      2004-02-23 16:02:56.000000000 -0500
93615 +++ linux-2.6.5-7.191/include/qsnet/pthread.h   2005-07-28 14:52:52.980658672 -0400
93616 @@ -0,0 +1,59 @@
93617 +/*
93618 + *    Copyright (c) 2003 by Quadrics Supercomputers World Ltd.
93619 + *
93620 + *    For licensing information please see the supplied COPYING file
93621 + *
93622 + */
93623 +
93624 +/* $Id: pthread.h,v 1.5 2004/06/07 10:47:06 addy Exp $ */
93625 +/*             $Source: /cvs/master/quadrics/qsnet/pthread.h,v $*/
93626 +
93627 +#ifndef _CONFIG_PTHREAD_H
93628 +#define _CONFIG_PTHREAD_H
93629 +
93630 +#ifdef __cplusplus
93631 +extern "C" {
93632 +#endif
93633 +
93634 +#if defined(__ELAN__)
93635 +
93636 +/* No pthread support on Elan co-processor */
93637 +
93638 +#define MUTEX                   unsigned long long
93639 +#define MUTEX_INIT(X)          ;
93640 +#define        MUTEX_LOCK(X)           ;
93641 +#define        MUTEX_UNLOCK(X)         ;
93642 +
93643 +#else
93644 +#if defined(DIGITAL_UNIX)
93645 +#include <tis.h>
93646 +#define MUTEX                  pthread_mutex_t
93647 +#define MUTEX_INIT(X)          tis_mutex_init(X)
93648 +#define        MUTEX_LOCK(X)           tis_mutex_lock(X)
93649 +#define        MUTEX_UNLOCK(X)         tis_mutex_unlock(X)
93650 +#define        MUTEX_TRYLOCK(X)        (tis_mutex_trylock(X) == 0)
93651 +
93652 +#else /* Linux... */
93653 +
93654 +/* Use standard pthread calls */
93655 +#include <pthread.h>
93656 +#define MUTEX                  pthread_mutex_t
93657 +#define MUTEX_INIT(X)          pthread_mutex_init(X, NULL)
93658 +#define        MUTEX_LOCK(X)           pthread_mutex_lock(X)
93659 +#define        MUTEX_UNLOCK(X)         pthread_mutex_unlock(X)
93660 +#define        MUTEX_TRYLOCK(X)        (pthread_mutex_trylock(X) == 0)
93661 +
93662 +#endif /* DIGITAL_UNIX */
93663 +#endif /* __ELAN__ */
93664 +
93665 +#ifdef __cplusplus
93666 +}
93667 +#endif
93668 +
93669 +#endif /* _CONFIG_PTHREAD_H */
93670 +
93671 +/*
93672 + * Local variables:
93673 + * c-file-style: "stroustrup"
93674 + * End:
93675 + */
93676 Index: linux-2.6.5-7.191/include/qsnet/statsformat.h
93677 ===================================================================
93678 --- linux-2.6.5-7.191.orig/include/qsnet/statsformat.h  2004-02-23 16:02:56.000000000 -0500
93679 +++ linux-2.6.5-7.191/include/qsnet/statsformat.h       2005-07-28 14:52:52.981658520 -0400
93680 @@ -0,0 +1,25 @@
93681 +#ifndef _QSNET_STATSFORMAT_H
93682 +#define _QSNET_STATSFORMAT_H
93683 +
93684 +#ident "$Id: statsformat.h,v 1.2 2003/05/22 19:37:14 addy Exp $"
93685 +/*      $Source: /cvs/master/quadrics/qsnet/statsformat.h,v $*/
93686 +
93687 +#include <qsnet/config.h>
93688 +
93689 +/*
93690 + * format of an Elan stats record
93691 + *
93692 + * type    char(8), type of statistic, e.g. FPAGE, ELAN3, TPORT
93693 + * time    uint64, 10 digits, time in millisecs since counters initialised
93694 + * device  uint, 2 digits, Elan device id
93695 + * name    char(32), name of the statistic
93696 + * value   uint64, current value of statistic
93697 + */
93698 +    
93699 +#ifdef _ILP32
93700 +#define ELAN_STATSFORMAT "%-8s %10llu %2d %-32s %llu\n"
93701 +#else
93702 +#define ELAN_STATSFORMAT "%-8s %10lu %2d %-32s %lu\n"
93703 +#endif
93704 +
93705 +#endif
93706 Index: linux-2.6.5-7.191/include/qsnet/types.h
93707 ===================================================================
93708 --- linux-2.6.5-7.191.orig/include/qsnet/types.h        2004-02-23 16:02:56.000000000 -0500
93709 +++ linux-2.6.5-7.191/include/qsnet/types.h     2005-07-28 14:52:52.981658520 -0400
93710 @@ -0,0 +1,90 @@
93711 +/*
93712 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93713 + *
93714 + *    For licensing information please see the supplied COPYING file
93715 + *
93716 + */
93717 +
93718 +#ifndef __QSNET_TYPES_H
93719 +#define __QSNET_TYPES_H
93720 +
93721 +#ident "$Id: types.h,v 1.16 2003/08/01 16:21:38 addy Exp $"
93722 +/*      $Source: /cvs/master/quadrics/qsnet/types.h,v $*/
93723 +
93724 +/*
93725 + * Include typedefs for ISO/IEC 9899:1990 standard types
93726 + *
93727 + *
93728 + *    The following integer typedefs are used:
93729 + *
93730 + *     int8_t, int16_t, int32_t, int64_t, intptr_t
93731 + *     uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t
93732 + *     uchar_t, ushort_t, uint_t, ulong_t
93733 + *
93734 + *    <sys/types.h> also defines the following:
93735 + *     u_char, u_short, u_int, u_long, caddr_t
93736 + */
93737 +
93738 +#include <qsnet/config.h>
93739 +
93740 +#if defined(SOLARIS) && defined(__KERNEL__)
93741 +#  include <sys/inttypes.h>
93742 +#endif
93743 +
93744 +#if defined(SOLARIS) && !defined(__KERNEL__)
93745 +#  include <inttypes.h>
93746 +#  include <sys/types.h>
93747 +#endif
93748 +
93749 +#if defined(DIGITAL_UNIX) && defined(__KERNEL__)
93750 +#  include <sys/bitypes.h>
93751 +#endif
93752 +
93753 +#if defined(DIGITAL_UNIX) && !defined(__KERNEL__)
93754 +#  include <inttypes.h>
93755 +#  include <sys/types.h>
93756 +#endif
93757 +
93758 +#if defined(LINUX) && defined(__KERNEL__)
93759 +#  include <linux/types.h>
93760 +#endif
93761 +
93762 +#if defined(LINUX) && !defined(__KERNEL__)
93763 +#  include <stdint.h>
93764 +#  include <inttypes.h>
93765 +#  include <sys/types.h>
93766 +
93767 +typedef unsigned char  uchar_t;
93768 +typedef unsigned short ushort_t;
93769 +typedef unsigned int   uint_t;
93770 +typedef unsigned long  ulong_t;
93771 +#endif
93772 +
93773 +#if defined(QNX)
93774 +#  include <inttypes.h>
93775 +#  include <sys/types.h>
93776 +#endif
93777 +
93778 +/* Define a type that will represent a Main CPU pointer
93779 + * on both the Main and the Elan
93780 + */
93781 +#ifdef __ELAN__
93782 +
93783 +#if defined(_MAIN_LP64)
93784 +#define QSNET_MAIN_PTR uint64_t
93785 +#else
93786 +#define QSNET_MAIN_PTR uint32_t
93787 +#endif
93788 +
93789 +#else
93790 +
93791 +#ifdef _LP64
93792 +#define QSNET_MAIN_PTR uint64_t
93793 +#else
93794 +#define QSNET_MAIN_PTR uint32_t
93795 +#endif
93796 +
93797 +#endif
93798 +
93799 +
93800 +#endif /* __QSNET_TYPES_H */
93801 Index: linux-2.6.5-7.191/include/qsnet/workarounds.h
93802 ===================================================================
93803 --- linux-2.6.5-7.191.orig/include/qsnet/workarounds.h  2004-02-23 16:02:56.000000000 -0500
93804 +++ linux-2.6.5-7.191/include/qsnet/workarounds.h       2005-07-28 14:52:52.981658520 -0400
93805 @@ -0,0 +1,24 @@
93806 +/*
93807 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93808 + *
93809 + *    For licensing information please see the supplied COPYING file
93810 + *
93811 + */
93812 +
93813 +#ifndef _QSNET_WORKAROUNDS_H
93814 +#define _QSNET_WORKAROUNDS_H
93815 +
93816 +#ident "$Id: workarounds.h,v 1.11 2002/08/09 11:15:55 addy Exp $"
93817 +/*      $Source: /cvs/master/quadrics/qsnet/workarounds.h,v $ */
93818 +
93819 +/* Elan workarounds */
93820 +#undef  ELAN_REVA_SUPPORTED    /* rev a elans no longer supported. */
93821 +#undef  ELITE_REVA_SUPPORTED   /* removed since RMS disables broadcast on rev A elites. */
93822 +#define ELAN_REVB_BUG_1
93823 +/* WORKAROUND for GNAT hw-elan3/3263 */
93824 +#define ELAN_REVB_BUG_2
93825 +
93826 +/* WORKAROUND for GNATs ic-elan3/3637 & ic-elan3/3550 */
93827 +#define ELAN_REVB_BUG_3
93828 +
93829 +#endif /* _QSNET_WORKAROUNDS_H */
93830 Index: linux-2.6.5-7.191/include/rms/rmscall.h
93831 ===================================================================
93832 --- linux-2.6.5-7.191.orig/include/rms/rmscall.h        2004-02-23 16:02:56.000000000 -0500
93833 +++ linux-2.6.5-7.191/include/rms/rmscall.h     2005-07-28 14:52:52.982658368 -0400
93834 @@ -0,0 +1,144 @@
93835 +/*
93836 + * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93837 + *
93838 + * For licensing information please see the supplied COPYING file
93839 + *
93840 + * rmscall.h:  user interface to rms kernel module
93841 + *
93842 + * $Id: rmscall.h,v 1.25 2004/05/14 08:55:57 duncan Exp $
93843 + * $Source: /cvs/master/quadrics/rmsmod/rmscall.h,v $
93844 + *
93845 + */
93846 +
93847 +#ifndef RMSCALL_H_INCLUDED
93848 +#define RMSCALL_H_INCLUDED 1
93849 +
93850 +#ident "$Id: rmscall.h,v 1.25 2004/05/14 08:55:57 duncan Exp $"
93851 +
93852 +#ifdef __cplusplus
93853 +extern "C" {
93854 +#endif
93855 +
93856 +/*
93857 + * flags for rms_fork_register
93858 + *
93859 + * RMS_IOF is not in a public header file 
93860 + */
93861 +#define RMS_IOF 1              /* inherit on fork */
93862 +
93863 +#ifndef __KERNEL__
93864 +#include <sys/types.h>
93865 +#endif
93866 +
93867 +#include <qsnet/types.h>
93868 +#include <elan/capability.h>
93869 +
93870 +#define MAXCOREPATHLEN 32
93871 +
93872 +#if defined(SOLARIS)
93873 +typedef long long rmstime_t;
93874 +#else  /* DIGITAL_UNIX */
93875 +typedef long rmstime_t;
93876 +#endif
93877 +
93878 +typedef enum {
93879 +    
93880 +    PRG_RUNNING  = 0x01,       /* program is running                  */
93881 +    PRG_ZOMBIE   = 0x02,       /* last process on a node has exited   */
93882 +    PRG_NODE     = 0x04,       /* stats are complete for this node    */
93883 +    PRG_KILLED   = 0x08,       /* program was killed                  */
93884 +    PRG_SUSPEND  = 0x10                /* program is suspended                */
93885 +
93886 +} PRGSTATUS_FLAGS;
93887 +
93888 +/*
93889 + * program time statistics extended in version 5 of the kernel module
93890 + */
93891 +typedef struct {
93892 +    rmstime_t etime;           /* elapsed cpu time (milli-secs)       */
93893 +    rmstime_t atime;           /* allocated cpu time (cpu milli-secs) */
93894 +    rmstime_t utime;           /* user cpu time (cpu milli-secs)      */
93895 +    rmstime_t stime;           /* system cpu time (cpu milli-secs)    */
93896 +    int ncpus;                 /* number of cpus allocated            */
93897 +    int flags;                 /* program status flags                */
93898 +    int mem;                   /* max memory size in MBytes           */
93899 +    int pageflts;              /* number of page faults               */
93900 +    rmstime_t memint;          /* memory integral                     */
93901 +} prgstats_old_t;
93902 +
93903 +typedef struct {
93904 +    uint64_t etime;            /* elapsed cpu time (milli-secs)       */
93905 +    uint64_t atime;            /* allocated cpu time (cpu milli-secs) */
93906 +    uint64_t utime;            /* user cpu time (cpu milli-secs)      */
93907 +    uint64_t stime;            /* system cpu time (cpu milli-secs)    */
93908 +    uint64_t pageflts;         /* number of page faults               */
93909 +    uint64_t memint;           /* memory integral                     */
93910 +    uint64_t ebytes;           /* data transferred by the Elan(s)     */
93911 +    uint64_t exfers;           /* number of Elan data transfers       */
93912 +    uint64_t spare64[4];       /* expansion space                     */
93913 +    int ncpus;                 /* number of cpus allocated            */
93914 +    int flags;                 /* program status flags                */
93915 +    int mem;                   /* max memory size in MBytes           */
93916 +    int spare32[5];             /* expansion space                     */
93917 +} prgstats_t;
93918 +
93919 +int  rmsmod_init(void);
93920 +void rmsmod_fini(void);
93921 +
93922 +int rms_setcorepath(caddr_t path);
93923 +int rms_getcorepath(pid_t pid, caddr_t path, int maxlen);
93924 +int rms_prgcreate(int id, uid_t uid, int cpus);
93925 +int rms_prgdestroy(int id);
93926 +int rms_prgids(int maxids, int *prgids, int *nprgs);
93927 +int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs);
93928 +int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap);
93929 +
93930 +int rms_prgsuspend(int id);
93931 +int rms_prgresume(int id);
93932 +int rms_prgsignal(int id, int signo);
93933 +
93934 +int rms_getprgid(pid_t pid, int *id);
93935 +int rms_ncaps(int *ncaps);
93936 +int rms_getcap(int index, ELAN_CAPABILITY *cap);
93937 +int rms_mycap(int *index);
93938 +int rms_setcap(int index, int ctx);
93939 +int rms_prefcap(int nprocess, int *index);
93940 +
93941 +int   rms_prggetstats(int id, prgstats_t *stats);
93942 +void  rms_accumulatestats(prgstats_t *total, prgstats_t *stats);
93943 +char *rms_statsreport(prgstats_t *stats, char *buf);
93944 +
93945 +int rms_elaninitdone(int vp);
93946 +int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids);
93947 +int rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers);
93948 +
93949 +int rms_setpset(int psid);
93950 +int rms_getpset(int id, int *psid);
93951 +int rms_modversion();
93952 +
93953 +#ifdef __cplusplus
93954 +}
93955 +#endif
93956 +
93957 +
93958 +#if defined(__KERNEL__)
93959 +
93960 +int rms_init(void);
93961 +int rms_fini(void);
93962 +int rms_reconfigure(void);
93963 +
93964 +extern int rms_debug;
93965 +
93966 +#if 1
93967 +#define DBG(x) do if (rms_debug) x ; while (0)
93968 +#else
93969 +#define DBG(x)
93970 +#endif
93971 +
93972 +#endif
93973 +
93974 +#endif /* RMSCALL_H_INCLUDED */
93975 +
93976 +
93977 +
93978 +
93979 Index: linux-2.6.5-7.191/include/rms/rmsio.h
93980 ===================================================================
93981 --- linux-2.6.5-7.191.orig/include/rms/rmsio.h  2004-02-23 16:02:56.000000000 -0500
93982 +++ linux-2.6.5-7.191/include/rms/rmsio.h       2005-07-28 14:52:52.982658368 -0400
93983 @@ -0,0 +1,185 @@
93984 +/*
93985 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93986 + *
93987 + *    For licensing information please see the supplied COPYING file
93988 + *
93989 + */
93990 +
93991 +#ident "@(#)$Id: rmsio.h,v 1.6 2004/05/14 08:55:57 duncan Exp $"
93992 +/*      $Source: /cvs/master/quadrics/rmsmod/rmsio.h,v $*/
93993 +
93994 +
93995 +#ifndef __RMSMOD_RMSIO_H
93996 +#define __RMSMOD_RMSIO_H
93997 +
93998 +/* arg is corepath string */
93999 +#define RMSIO_SETCOREPATH      _IOW ('r', 1, char)
94000 +
94001 +typedef struct rmsio_getcorepath_struct
94002 +{
94003 +    pid_t              pid;
94004 +    char               *corepath;
94005 +    int                        maxlen;
94006 +} RMSIO_GETCOREPATH_STRUCT;
94007 +#define RMSIO_GETCOREPATH      _IOW ('r', 2, RMSIO_GETCOREPATH_STRUCT)
94008 +
94009 +typedef struct rmsio_prgcreate_struct
94010 +{
94011 +    int                        id;
94012 +    uid_t              uid;
94013 +    int                        cpus;
94014 +} RMSIO_PRGCREATE_STRUCT;
94015 +#define RMSIO_PRGCREATE                _IOW ('r', 3, RMSIO_PRGCREATE_STRUCT)
94016 +
94017 +typedef struct rmsio_prginfo_struct
94018 +{
94019 +    int                        id;
94020 +    int                        maxpids;
94021 +    pid_t              *pids;
94022 +    int                        *nprocs;
94023 +} RMSIO_PRGINFO_STRUCT;
94024 +#define RMSIO_PRGINFO          _IOW ('r', 4, RMSIO_PRGINFO_STRUCT)
94025 +
94026 +typedef struct rmsio_prgsignal_struct
94027 +{
94028 +    int                        id;
94029 +    int                        signo;
94030 +} RMSIO_PRGSIGNAL_STRUCT;
94031 +#define RMSIO_PRGSIGNAL                _IOW ('r', 5, RMSIO_PRGSIGNAL_STRUCT)
94032 +
94033 +typedef struct rmsio_prgaddcap_struct
94034 +{
94035 +    int                        id;
94036 +    int                        index;
94037 +    ELAN_CAPABILITY    *cap;
94038 +} RMSIO_PRGADDCAP_STRUCT;
94039 +#define RMSIO_PRGADDCAP                _IOW ('r', 6, RMSIO_PRGADDCAP_STRUCT)
94040 +typedef struct rmsio_setcap_struct
94041 +{
94042 +    int                        index;
94043 +    int                        ctx;
94044 +} RMSIO_SETCAP_STRUCT;
94045 +#define RMSIO_SETCAP           _IOW ('r', 7, RMSIO_SETCAP_STRUCT)
94046 +
94047 +typedef struct rmsio_getcap_struct
94048 +{
94049 +    int                        index;
94050 +    ELAN_CAPABILITY     *cap;
94051 +} RMSIO_GETCAP_STRUCT;
94052 +#define RMSIO_GETCAP           _IOW ('r', 8, RMSIO_GETCAP_STRUCT)
94053 +
94054 +typedef struct rmsio_getcap_struct32
94055 +{
94056 +    int                        index;
94057 +    unsigned int        capptr;
94058 +} RMSIO_GETCAP_STRUCT32;
94059 +#define RMSIO_GETCAP32         _IOW ('r', 8, RMSIO_GETCAP_STRUCT32)
94060 +
94061 +/* arg is pointer to ncaps */
94062 +#define RMSIO_NCAPS            _IOW ('r', 9, int)
94063 +
94064 +typedef struct rmsio_prggetstats_struct
94065 +{
94066 +    int                        id;
94067 +    prgstats_old_t     *stats;
94068 +} RMSIO_PRGGETSTATS_STRUCT;
94069 +#define RMSIO_PRGGETSTATS      _IOW ('r', 10, RMSIO_PRGGETSTATS_STRUCT)
94070 +
94071 +/* arg is program id */
94072 +#define RMSIO_PRGSUSPEND       _IOW ('r', 11, int)
94073 +#define RMSIO_PRGRESUME                _IOW ('r', 12, int)
94074 +#define RMSIO_PRGDESTROY       _IOW ('r', 13, int)
94075 +
94076 +typedef struct rmsio_getprgid_struct
94077 +{
94078 +    pid_t              pid;
94079 +    int                        *id;
94080 +} RMSIO_GETPRGID_STRUCT;
94081 +#define RMSIO_GETPRGID         _IOW ('r', 14, RMSIO_GETPRGID_STRUCT)
94082 +
94083 +typedef struct rmsio_getprgid_struct32
94084 +{
94085 +    pid_t              pid;
94086 +    unsigned int       idptr;
94087 +} RMSIO_GETPRGID_STRUCT32;
94088 +#define RMSIO_GETPRGID32       _IOW ('r', 14, RMSIO_GETPRGID_STRUCT32)
94089 +
94090 +/* arg is pointer to index */
94091 +#define RMSIO_GETMYCAP         _IOW ('r', 15, int)
94092 +
94093 +typedef struct rmsio_prgids_struct
94094 +{
94095 +    int                        maxids;
94096 +    int                        *prgids;
94097 +    int                        *nprgs;
94098 +} RMSIO_PRGIDS_STRUCT;
94099 +#define RMSIO_PRGIDS           _IOW ('r', 16, RMSIO_PRGIDS_STRUCT)
94100 +
94101 +/* arg is pointer to vp */
94102 +#define RMSIO_ELANINITDONE     _IOW ('r', 17, int)
94103 +
94104 +typedef struct rmsio_prgelanpids_struct
94105 +{
94106 +    int    id;
94107 +    int    maxpids;
94108 +    int   *vps;
94109 +    int   *pids;
94110 +    int   *npids;
94111 +} RMSIO_PRGELANPIDS_STRUCT;
94112 +#define RMSIO_PRGELANPIDS      _IOW ('r', 18, RMSIO_PRGELANPIDS_STRUCT)
94113 +
94114 +typedef struct rmsio_setpset_struct
94115 +{
94116 +    int    id;
94117 +    int    psid;
94118 +} RMSIO_SETPSET_STRUCT;
94119 +#define RMSIO_SETPSET          _IOW ('r', 19, RMSIO_SETPSET_STRUCT)
94120 +
94121 +typedef struct rmsio_getpset_struct
94122 +{
94123 +    int    id;
94124 +    int   *psid;
94125 +} RMSIO_GETPSET_STRUCT;
94126 +#define RMSIO_GETPSET          _IOW ('r', 20, RMSIO_GETPSET_STRUCT)
94127 +
94128 +/*
94129 + * have to pass a pointer to the stats, the switch
94130 + * statement goes wrong in the module of the size
94131 + * is too large
94132 + */
94133 +typedef struct {
94134 +    uint64_t ebytes;
94135 +    uint64_t exfers;
94136 +} elanstats_t;
94137 +
94138 +typedef struct rmsio_setelanstats_struct
94139 +{
94140 +    int    id;
94141 +    elanstats_t *estats;
94142 +} RMSIO_SETELANSTATS_STRUCT;
94143 +#define RMSIO_SETELANSTATS      _IOW ('r', 21, RMSIO_SETELANSTATS_STRUCT)
94144 +
94145 +typedef struct rmsio_prggetstats2_struct
94146 +{
94147 +    int                        id;
94148 +    prgstats_t         *stats;
94149 +} RMSIO_PRGGETSTATS2_STRUCT;
94150 +#define RMSIO_PRGGETSTATS2     _IOW ('r', 22, RMSIO_PRGGETSTATS2_STRUCT)
94151 +
94152 +typedef struct rmsio_modversion_struct
94153 +{
94154 +    int *version;
94155 +} RMSIO_MODVERSION_STRUCT;
94156 +#define RMSIO_MODVERSION       _IOW ('r', 23, RMSIO_MODVERSION_STRUCT)
94157 +
94158 +
94159 +#endif /* __RMSMOD_RMSIO_H */
94160 +
94161 +
94162 +
94163 +
94164 +
94165 +
94166 +
94167 +
94168 +
94169 Index: linux-2.6.5-7.191/ipc/shm.c
94170 ===================================================================
94171 --- linux-2.6.5-7.191.orig/ipc/shm.c    2005-06-28 12:24:24.000000000 -0400
94172 +++ linux-2.6.5-7.191/ipc/shm.c 2005-07-28 14:52:52.983658216 -0400
94173 @@ -27,6 +27,7 @@
94174  #include <linux/shmem_fs.h>
94175  #include <linux/security.h>
94176  #include <linux/audit.h>
94177 +#include <linux/module.h>
94178  #include <linux/trigevent_hooks.h>
94179  #include <asm/uaccess.h>
94180  
94181 @@ -877,6 +878,44 @@
94182         return audit_result(retval);
94183  }
94184  
94185 +/*
94186 + * Mark all segments created by this process for destruction
94187 + */
94188 +int shm_cleanup (void)
94189 +{
94190 +       int i;
94191 +
94192 +       down(&shm_ids.sem);
94193 +
94194 +       for (i = 0; i <= shm_ids.max_id; i++) {
94195 +               struct shmid_kernel *shp;
94196 +
94197 +               shp = shm_lock(i);
94198 +               if (shp != NULL) {
94199 +                       /* mark this segment for destruction if we created it */
94200 +                       if (current->pid == shp->shm_cprid)
94201 +                       {
94202 +                               /* copy of IPC_RMID code */
94203 +                               if (shp->shm_nattch) {
94204 +                                       shp->shm_flags |= SHM_DEST;
94205 +                                       /* do not find it any more */
94206 +                                       shp->shm_perm.key = IPC_PRIVATE;
94207 +                               } else {
94208 +                                       shm_destroy(shp);
94209 +                                       continue;
94210 +                               }
94211 +                       }
94212 +
94213 +                       shm_unlock(shp);
94214 +               }
94215 +       }
94216 +
94217 +       up(&shm_ids.sem);
94218 +
94219 +       return 0;
94220 +}
94221 +EXPORT_SYMBOL_GPL(shm_cleanup);
94222 +
94223  #ifdef CONFIG_PROC_FS
94224  static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
94225  {
94226 Index: linux-2.6.5-7.191/kernel/exit.c
94227 ===================================================================
94228 --- linux-2.6.5-7.191.orig/kernel/exit.c        2005-06-28 12:24:24.000000000 -0400
94229 +++ linux-2.6.5-7.191/kernel/exit.c     2005-07-28 14:52:52.984658064 -0400
94230 @@ -40,6 +40,8 @@
94231  /* tng related changes */
94232  int (*tng_exitfunc)(int) = NULL;
94233  
94234 +#include <linux/ptrack.h>
94235 +
94236  extern void sem_exit (void);
94237  extern struct task_struct *child_reaper;
94238  void (*do_eop_acct) (int, struct task_struct *);
94239 @@ -838,6 +840,8 @@
94240                 audit_exit(tsk, code);
94241         audit_free(tsk->audit);
94242  #endif
94243 +       /* Notify any ptrack callbacks of the process exit */
94244 +       ptrack_call_callbacks (PTRACK_PHASE_EXIT, NULL);
94245         __exit_mm(tsk);
94246  
94247         if (unlikely(tng_exitfunc))
94248 Index: linux-2.6.5-7.191/kernel/fork.c
94249 ===================================================================
94250 --- linux-2.6.5-7.191.orig/kernel/fork.c        2005-06-28 12:24:10.000000000 -0400
94251 +++ linux-2.6.5-7.191/kernel/fork.c     2005-07-28 14:52:52.985657912 -0400
94252 @@ -14,6 +14,7 @@
94253  #include <linux/config.h>
94254  #include <linux/slab.h>
94255  #include <linux/init.h>
94256 +#include <linux/ptrack.h>
94257  #include <linux/unistd.h>
94258  #include <linux/smp_lock.h>
94259  #include <linux/module.h>
94260 @@ -432,6 +433,9 @@
94261         mm->page_table_lock = SPIN_LOCK_UNLOCKED;
94262         mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
94263         mm->ioctx_list = NULL;
94264 +#ifdef CONFIG_IOPROC
94265 +       mm->ioproc_ops = NULL;
94266 +#endif
94267         mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
94268         mm->free_area_cache = TASK_UNMAPPED_BASE;
94269  
94270 @@ -1267,6 +1271,11 @@
94271                       audit_fork(current, p);
94272  #endif
94273  
94274 +               if (ptrack_call_callbacks(PTRACK_PHASE_CLONE, p)) {
94275 +                       sigaddset(&p->pending.signal, SIGKILL);
94276 +                       set_tsk_thread_flag(p, TIF_SIGPENDING);
94277 +               }
94278 +
94279                 /* Trace the event  */
94280                 TRIG_EVENT(fork_hook, clone_flags, p, pid);
94281                 if (!(clone_flags & CLONE_STOPPED)) {
94282 Index: linux-2.6.5-7.191/kernel/Kconfig
94283 ===================================================================
94284 --- linux-2.6.5-7.191.orig/kernel/Kconfig       2004-02-23 16:02:56.000000000 -0500
94285 +++ linux-2.6.5-7.191/kernel/Kconfig    2005-07-28 14:52:52.985657912 -0400
94286 @@ -0,0 +1,14 @@
94287 +#
94288 +# Kernel subsystem specific config
94289 +# 
94290 +
94291 +# Support for Process Tracking callbacks
94292 +#
94293 +config PTRACK
94294 +       bool "Enable PTRACK process tracking hooks"
94295 +       default y
94296 +       help
94297 +       This option enables hooks to be called when processes are
94298 +       created and destoryed in order for a resource management 
94299 +       system to know which processes are a member of a "job" and 
94300 +       to be able to clean up when the job is terminated.
94301 Index: linux-2.6.5-7.191/kernel/Makefile
94302 ===================================================================
94303 --- linux-2.6.5-7.191.orig/kernel/Makefile      2005-06-28 12:24:10.000000000 -0400
94304 +++ linux-2.6.5-7.191/kernel/Makefile   2005-07-28 14:52:52.985657912 -0400
94305 @@ -25,6 +25,7 @@
94306  obj-$(CONFIG_EVLOG) += evlbuf.o evlapi.o evlposix.o
94307  obj-$(CONFIG_HOOK) += hook.o
94308  obj-$(CONFIG_TRIGEVENT_HOOKS) += trigevent_hooks.o
94309 +obj-$(CONFIG_PTRACK) += ptrack.o
94310  obj-$(CONFIG_LTT) += ltt/
94311  obj-$(CONFIG_KPROBES) += kprobes.o
94312  obj-$(CONFIG_CPUSETS) += cpuset.o
94313 Index: linux-2.6.5-7.191/kernel/ptrack.c
94314 ===================================================================
94315 --- linux-2.6.5-7.191.orig/kernel/ptrack.c      2004-02-23 16:02:56.000000000 -0500
94316 +++ linux-2.6.5-7.191/kernel/ptrack.c   2005-07-28 14:52:52.986657760 -0400
94317 @@ -0,0 +1,145 @@
94318 +/*
94319 + *    Copyright (C) 2000  Regents of the University of California
94320 + *
94321 + *    This program is free software; you can redistribute it and/or modify
94322 + *    it under the terms of the GNU General Public License as published by
94323 + *    the Free Software Foundation; either version 2 of the License, or
94324 + *    (at your option) any later version.
94325 + *
94326 + *    This program is distributed in the hope that it will be useful,
94327 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
94328 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
94329 + *    GNU General Public License for more details.
94330 + *
94331 + *    You should have received a copy of the GNU General Public License
94332 + *    along with this program; if not, write to the Free Software
94333 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
94334 + *
94335 + * Derived from exit_actn.c by
94336 + *    Copyright (C) 2003 Quadrics Ltd.
94337 + */
94338 +
94339 +
94340 +#include <linux/module.h>
94341 +#include <linux/spinlock.h>
94342 +#include <linux/sched.h>
94343 +#include <linux/ptrack.h>
94344 +#include <linux/slab.h>
94345 +#include <linux/list.h>
94346 +
94347 +#include <asm/errno.h>
94348 +
94349 +int
94350 +ptrack_register (ptrack_callback_t callback, void *arg)
94351 +{
94352 +       struct ptrack_desc *desc = kmalloc (sizeof (struct ptrack_desc), GFP_KERNEL);
94353 +       
94354 +       if (desc == NULL)
94355 +               return -ENOMEM;
94356 +
94357 +       desc->callback = callback;
94358 +       desc->arg      = arg;
94359 +       
94360 +       list_add_tail (&desc->link, &current->ptrack_list);
94361 +       
94362 +       return 0;
94363 +}
94364 +
94365 +void
94366 +ptrack_deregister (ptrack_callback_t callback, void *arg)
94367 +{      
94368 +       struct list_head *el, *nel;
94369 +       
94370 +       list_for_each_safe (el, nel, &current->ptrack_list) {
94371 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
94372 +               
94373 +               if (desc->callback == callback && desc->arg == arg) {
94374 +                       list_del (&desc->link);
94375 +                       kfree (desc);
94376 +               }
94377 +       }
94378 +}
94379 +
94380 +int
94381 +ptrack_registered (ptrack_callback_t callback, void *arg)
94382 +{
94383 +       struct list_head *el;
94384 +       
94385 +       list_for_each (el, &current->ptrack_list) {
94386 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
94387 +               
94388 +               if (desc->callback == callback && desc->arg == arg)
94389 +                       return 1;
94390 +       }
94391 +       return 0;
94392 +}      
94393 +        
94394 +int
94395 +ptrack_call_callbacks (int phase, struct task_struct *child)
94396 +{
94397 +       struct list_head *el, *nel;
94398 +       struct ptrack_desc *new;
94399 +       int res;
94400 +
94401 +       if (phase == PTRACK_PHASE_CLONE)
94402 +               INIT_LIST_HEAD (&child->ptrack_list);
94403 +
94404 +       list_for_each_safe (el, nel, &current->ptrack_list) {
94405 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
94406 +               
94407 +              res = desc->callback (desc->arg, phase, child);
94408 +               
94409 +               switch (phase)
94410 +               {
94411 +               case PTRACK_PHASE_EXIT:
94412 +                       list_del (&desc->link);
94413 +                       kfree (desc);
94414 +                       break;
94415 +                       
94416 +               case PTRACK_PHASE_CLONE:
94417 +                      switch (res)
94418 +                      {
94419 +                      case PTRACK_FINISHED:
94420 +                              break;
94421 +
94422 +                      case PTRACK_INNHERIT:
94423 +                              if ((new = kmalloc (sizeof (struct ptrack_desc), GFP_ATOMIC)) == NULL)
94424 +                              {
94425 +                                      /* allocation failed - notify that this process is not going
94426 +                                       * to be started by signalling clone failure.
94427 +                                       */
94428 +                                      desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child);
94429 +                                      
94430 +                                      goto failed;
94431 +                              }
94432 +
94433 +                               new->callback = desc->callback;
94434 +                               new->arg      = desc->arg;
94435 +                               
94436 +                               list_add_tail (&new->link, &child->ptrack_list);
94437 +                              break;
94438 +
94439 +                      case PTRACK_DENIED:
94440 +                              goto failed;
94441 +                       }
94442 +                      break;
94443 +               }
94444 +       }
94445 +
94446 +       return 0;
94447 +
94448 + failed:
94449 +       while (! list_empty (&child->ptrack_list))
94450 +       {
94451 +              struct ptrack_desc *desc = list_entry (child->ptrack_list.next, struct ptrack_desc, link);
94452 +              
94453 +              desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child);
94454 +
94455 +              list_del (&desc->link);
94456 +              kfree (desc);
94457 +       }
94458 +       return 1;
94459 +}
94460 +EXPORT_SYMBOL(ptrack_register);
94461 +EXPORT_SYMBOL(ptrack_deregister);
94462 +EXPORT_SYMBOL(ptrack_registered);
94463 Index: linux-2.6.5-7.191/kernel/signal.c
94464 ===================================================================
94465 --- linux-2.6.5-7.191.orig/kernel/signal.c      2005-06-28 12:24:23.000000000 -0400
94466 +++ linux-2.6.5-7.191/kernel/signal.c   2005-07-28 14:52:52.987657608 -0400
94467 @@ -2282,6 +2282,7 @@
94468         read_unlock(&tasklist_lock);
94469         return audit_lresult(error);
94470  }
94471 +EXPORT_SYMBOL_GPL(sys_kill);
94472  
94473  asmlinkage long
94474  sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
94475 Index: linux-2.6.5-7.191/mm/fremap.c
94476 ===================================================================
94477 --- linux-2.6.5-7.191.orig/mm/fremap.c  2005-06-28 12:23:58.000000000 -0400
94478 +++ linux-2.6.5-7.191/mm/fremap.c       2005-07-28 14:52:52.988657456 -0400
94479 @@ -14,6 +14,7 @@
94480  #include <linux/swapops.h>
94481  #include <linux/objrmap.h>
94482  #include <linux/module.h>
94483 +#include <linux/ioproc.h>
94484  
94485  #include <asm/mmu_context.h>
94486  #include <asm/cacheflush.h>
94487 @@ -29,6 +30,7 @@
94488         if (pte_present(pte)) {
94489                 unsigned long pfn = pte_pfn(pte);
94490  
94491 +               ioproc_invalidate_page(vma, addr);
94492                 flush_cache_page(vma, addr);
94493                 pte = ptep_clear_flush(vma, addr, ptep);
94494                 if (pfn_valid(pfn)) {
94495 @@ -80,6 +82,7 @@
94496         pte_val = *pte;
94497         pte_unmap(pte);
94498         update_mmu_cache(vma, addr, pte_val);
94499 +       ioproc_update_page(vma, addr);
94500  
94501         err = 0;
94502  err_unlock:
94503 @@ -118,6 +121,7 @@
94504         pte_val = *pte;
94505         pte_unmap(pte);
94506         update_mmu_cache(vma, addr, pte_val);
94507 +       ioproc_update_page(vma, addr);
94508         spin_unlock(&mm->page_table_lock);
94509         return 0;
94510  
94511 Index: linux-2.6.5-7.191/mm/ioproc.c
94512 ===================================================================
94513 --- linux-2.6.5-7.191.orig/mm/ioproc.c  2004-02-23 16:02:56.000000000 -0500
94514 +++ linux-2.6.5-7.191/mm/ioproc.c       2005-07-28 14:52:52.988657456 -0400
94515 @@ -0,0 +1,58 @@
94516 +/* -*- linux-c -*-
94517 + *
94518 + *    Copyright (C) 2002-2004 Quadrics Ltd.
94519 + *
94520 + *    This program is free software; you can redistribute it and/or modify
94521 + *    it under the terms of the GNU General Public License as published by
94522 + *    the Free Software Foundation; either version 2 of the License, or
94523 + *    (at your option) any later version.
94524 + *
94525 + *    This program is distributed in the hope that it will be useful,
94526 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
94527 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
94528 + *    GNU General Public License for more details.
94529 + *
94530 + *    You should have received a copy of the GNU General Public License
94531 + *    along with this program; if not, write to the Free Software
94532 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
94533 + *
94534 + *
94535 + */
94536 +
94537 +/*
94538 + * Registration for IO processor page table updates.
94539 + */
94540 +
94541 +#include <linux/kernel.h>
94542 +#include <linux/module.h>
94543 +
94544 +#include <linux/mm.h>
94545 +#include <linux/ioproc.h>
94546 +
94547 +int
94548 +ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip)
94549 +{
94550 +       ip->next = mm->ioproc_ops;
94551 +       mm->ioproc_ops = ip;
94552 +
94553 +       return 0;
94554 +}
94555 +
94556 +EXPORT_SYMBOL_GPL(ioproc_register_ops);
94557 +
94558 +int
94559 +ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip)
94560 +{
94561 +       struct ioproc_ops **tmp;
94562 +
94563 +       for (tmp = &mm->ioproc_ops; *tmp && *tmp != ip; tmp= &(*tmp)->next)
94564 +               ;
94565 +       if (*tmp) {
94566 +               *tmp = ip->next;
94567 +               return 0;
94568 +       }
94569 +
94570 +       return -EINVAL;
94571 +}
94572 +
94573 +EXPORT_SYMBOL_GPL(ioproc_unregister_ops);
94574 Index: linux-2.6.5-7.191/mm/Kconfig
94575 ===================================================================
94576 --- linux-2.6.5-7.191.orig/mm/Kconfig   2004-02-23 16:02:56.000000000 -0500
94577 +++ linux-2.6.5-7.191/mm/Kconfig        2005-07-28 14:52:52.989657304 -0400
94578 @@ -0,0 +1,15 @@
94579 +#
94580 +# VM subsystem specific config
94581 +# 
94582 +
94583 +# Support for IO processors which have advanced RDMA capabilities
94584 +#
94585 +config IOPROC
94586 +       bool "Enable IOPROC VM hooks"
94587 +       depends on MMU
94588 +       default y
94589 +       help
94590 +       This option enables hooks in the VM subsystem so that IO devices which
94591 +       incorporate advanced RDMA capabilities can be kept in sync with CPU 
94592 +       page table changes.
94593 +       See Documentation/vm/ioproc.txt for more details.
94594 Index: linux-2.6.5-7.191/mm/Makefile
94595 ===================================================================
94596 --- linux-2.6.5-7.191.orig/mm/Makefile  2005-06-28 12:23:58.000000000 -0400
94597 +++ linux-2.6.5-7.191/mm/Makefile       2005-07-28 14:52:52.989657304 -0400
94598 @@ -15,4 +15,5 @@
94599  obj-$(CONFIG_SWAP)     += page_io.o swap_state.o swapfile.o
94600  obj-$(CONFIG_PROC_MM)  += proc_mm.o
94601  obj-$(CONFIG_NUMA)     += policy.o
94602 +obj-$(CONFIG_IOPROC)    += ioproc.o
94603  
94604 Index: linux-2.6.5-7.191/mm/memory.c
94605 ===================================================================
94606 --- linux-2.6.5-7.191.orig/mm/memory.c  2005-06-28 12:24:14.000000000 -0400
94607 +++ linux-2.6.5-7.191/mm/memory.c       2005-07-28 14:55:16.271875096 -0400
94608 @@ -43,6 +43,7 @@
94609  #include <linux/swap.h>
94610  #include <linux/highmem.h>
94611  #include <linux/pagemap.h>
94612 +#include <linux/ioproc.h>
94613  #include <linux/objrmap.h>
94614  #include <linux/module.h>
94615  #include <linux/acct.h>
94616 @@ -630,6 +631,7 @@
94617  
94618         lru_add_drain();
94619         spin_lock(&mm->page_table_lock);
94620 +       ioproc_invalidate_range(vma, address, end);
94621         tlb = tlb_gather_mmu(mm, 0);
94622         unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
94623         tlb_finish_mmu(tlb, address, end);
94624 @@ -936,6 +938,7 @@
94625                 BUG();
94626  
94627         spin_lock(&mm->page_table_lock);
94628 +       ioproc_invalidate_range(vma, beg, end);
94629         do {
94630                 pmd_t *pmd = pmd_alloc(mm, dir, address);
94631                 error = -ENOMEM;
94632 @@ -950,6 +953,7 @@
94633         /*
94634          * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
94635          */
94636 +       ioproc_update_range(vma, beg, end);
94637         flush_tlb_range(vma, beg, end);
94638         spin_unlock(&mm->page_table_lock);
94639         return error;
94640 @@ -1020,6 +1024,7 @@
94641                 BUG();
94642  
94643         spin_lock(&mm->page_table_lock);
94644 +       ioproc_invalidate_range(vma, beg, end);
94645         do {
94646                 pmd_t *pmd = pmd_alloc(mm, dir, from);
94647                 error = -ENOMEM;
94648 @@ -1034,6 +1039,7 @@
94649         /*
94650          * Why flush? remap_pte_range has a BUG_ON for !pte_none()
94651          */
94652 +       ioproc_update_range(vma, beg, end);
94653         flush_tlb_range(vma, beg, end);
94654         spin_unlock(&mm->page_table_lock);
94655         return error;
94656 @@ -1122,6 +1128,7 @@
94657                         update_mmu_cache(vma, address, entry);
94658                         lazy_mmu_prot_update(entry);
94659                         pte_unmap(page_table);
94660 +                       ioproc_update_page(vma, address);
94661                         spin_unlock(&mm->page_table_lock);
94662                         return VM_FAULT_MINOR;
94663                 }
94664 @@ -1157,6 +1164,7 @@
94665                 }
94666  
94667                 page_remove_rmap(old_page);
94668 +               ioproc_invalidate_page(vma, address);
94669                 break_cow(vma, new_page, address, page_table);
94670                 page_add_rmap(new_page, vma, address, 1);
94671                 lru_cache_add_active(new_page);
94672 @@ -1165,6 +1173,7 @@
94673                 new_page = old_page;
94674         }
94675         pte_unmap(page_table);
94676 +       ioproc_update_page(vma, address);
94677         page_cache_release(new_page);
94678         page_cache_release(old_page);
94679         spin_unlock(&mm->page_table_lock);
94680 @@ -1472,6 +1481,7 @@
94681         update_mmu_cache(vma, address, pte);
94682         lazy_mmu_prot_update(pte);
94683         pte_unmap(page_table);
94684 +       ioproc_update_page(vma, address);
94685         spin_unlock(&mm->page_table_lock);
94686  out:
94687         return ret;
94688 @@ -1534,6 +1544,7 @@
94689         /* No need to invalidate - it was non-present before */
94690         update_mmu_cache(vma, addr, entry);
94691         lazy_mmu_prot_update(entry);
94692 +       ioproc_update_page(vma, addr);
94693         spin_unlock(&mm->page_table_lock);
94694         ret = VM_FAULT_MINOR;
94695  
94696 @@ -1674,6 +1685,7 @@
94697         /* no need to invalidate: a not-present page shouldn't be cached */
94698         update_mmu_cache(vma, address, entry);
94699         lazy_mmu_prot_update(entry);
94700 +       ioproc_update_page(vma, address);
94701         spin_unlock(&mm->page_table_lock);
94702   out:
94703         return ret;
94704 @@ -1774,6 +1786,7 @@
94705         spin_unlock(&mm->page_table_lock);
94706         return VM_FAULT_MINOR;
94707  }
94708 +EXPORT_SYMBOL(make_pages_present);
94709  
94710  
94711  /* Can be overwritten by the architecture */
94712 Index: linux-2.6.5-7.191/mm/mmap.c
94713 ===================================================================
94714 --- linux-2.6.5-7.191.orig/mm/mmap.c    2005-06-28 12:24:15.000000000 -0400
94715 +++ linux-2.6.5-7.191/mm/mmap.c 2005-07-28 14:52:52.992656848 -0400
94716 @@ -25,6 +25,7 @@
94717  #include <linux/init.h>
94718  #include <linux/file.h>
94719  #include <linux/fs.h>
94720 +#include <linux/ioproc.h>
94721  #include <linux/personality.h>
94722  #include <linux/security.h>
94723  #include <linux/hugetlb.h>
94724 @@ -1378,6 +1379,7 @@
94725         unsigned long nr_accounted = 0;
94726  
94727         lru_add_drain();
94728 +       ioproc_invalidate_range(vma, start, end);
94729         tlb = tlb_gather_mmu(mm, 0);
94730         unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
94731         vm_unacct_memory(nr_accounted);
94732 @@ -1697,6 +1699,7 @@
94733  
94734         spin_lock(&mm->page_table_lock);
94735  
94736 +       ioproc_release(mm);
94737         tlb = tlb_gather_mmu(mm, 1);
94738         flush_cache_mm(mm);
94739         /* Use ~0UL here to ensure all VMAs in the mm are unmapped */
94740 Index: linux-2.6.5-7.191/mm/mprotect.c
94741 ===================================================================
94742 --- linux-2.6.5-7.191.orig/mm/mprotect.c        2005-06-28 12:24:14.000000000 -0400
94743 +++ linux-2.6.5-7.191/mm/mprotect.c     2005-07-28 14:52:52.992656848 -0400
94744 @@ -10,6 +10,7 @@
94745  
94746  #include <linux/mm.h>
94747  #include <linux/hugetlb.h>
94748 +#include <linux/ioproc.h>
94749  #include <linux/slab.h>
94750  #include <linux/shm.h>
94751  #include <linux/mman.h>
94752 @@ -101,6 +102,7 @@
94753         if (start >= end)
94754                 BUG();
94755         spin_lock(&current->mm->page_table_lock);
94756 +       ioproc_change_protection(vma, start, end, newprot);
94757         do {
94758                 change_pmd_range(dir, start, end - start, newprot);
94759                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
94760 Index: linux-2.6.5-7.191/mm/mremap.c
94761 ===================================================================
94762 --- linux-2.6.5-7.191.orig/mm/mremap.c  2005-06-28 12:24:09.000000000 -0400
94763 +++ linux-2.6.5-7.191/mm/mremap.c       2005-07-28 14:52:52.993656696 -0400
94764 @@ -9,6 +9,7 @@
94765  
94766  #include <linux/mm.h>
94767  #include <linux/hugetlb.h>
94768 +#include <linux/ioproc.h>
94769  #include <linux/slab.h>
94770  #include <linux/shm.h>
94771  #include <linux/mman.h>
94772 @@ -144,6 +145,8 @@
94773  {
94774         unsigned long offset = len;
94775  
94776 +       ioproc_invalidate_range(vma, old_addr, old_addr + len);
94777 +       ioproc_invalidate_range(vma, new_addr, new_addr + len);
94778         flush_cache_range(vma, old_addr, old_addr + len);
94779  
94780         /*
94781 Index: linux-2.6.5-7.191/mm/msync.c
94782 ===================================================================
94783 --- linux-2.6.5-7.191.orig/mm/msync.c   2005-06-28 12:23:58.000000000 -0400
94784 +++ linux-2.6.5-7.191/mm/msync.c        2005-07-28 14:52:52.993656696 -0400
94785 @@ -12,6 +12,7 @@
94786  #include <linux/mm.h>
94787  #include <linux/mman.h>
94788  #include <linux/hugetlb.h>
94789 +#include <linux/ioproc.h>
94790  
94791  #include <asm/pgtable.h>
94792  #include <asm/pgalloc.h>
94793 @@ -116,6 +117,7 @@
94794  
94795         if (address >= end)
94796                 BUG();
94797 +       ioproc_sync_range(vma, address, end);
94798         do {
94799                 error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
94800                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
94801 Index: linux-2.6.5-7.191/mm/objrmap.c
94802 ===================================================================
94803 --- linux-2.6.5-7.191.orig/mm/objrmap.c 2005-06-28 12:24:10.000000000 -0400
94804 +++ linux-2.6.5-7.191/mm/objrmap.c      2005-07-28 14:52:52.994656544 -0400
94805 @@ -29,6 +29,7 @@
94806  #include <linux/swapops.h>
94807  #include <linux/objrmap.h>
94808  #include <linux/init.h>
94809 +#include <linux/ioproc.h>
94810  #include <asm/tlbflush.h>
94811  
94812  kmem_cache_t * anon_vma_cachep;
94813 @@ -393,6 +394,8 @@
94814  {
94815         pte_t pteval;
94816  
94817 +       ioproc_invalidate_page(vma, address);
94818 +
94819         flush_cache_page(vma, address);
94820         pteval = ptep_clear_flush(vma, address, pte);
94821