Whamcloud - gitweb
land b1_5 onto HEAD
[fs/lustre-release.git] / lustre / kernel_patches / patches / qsnet-rhel4-2.6.patch
1 diff -urN clean/arch/i386/defconfig linux-2.6.9/arch/i386/defconfig
2 --- clean/arch/i386/defconfig   2004-10-18 17:54:38.000000000 -0400
3 +++ linux-2.6.9/arch/i386/defconfig     2005-10-10 17:47:17.000000000 -0400
4 @@ -119,6 +119,8 @@
5  CONFIG_IRQBALANCE=y
6  CONFIG_HAVE_DEC_LOCK=y
7  # CONFIG_REGPARM is not set
8 +CONFIG_IOPROC=y
9 +CONFIG_PTRACK=y
10  
11  #
12  # Power management options (ACPI, APM)
13 diff -urN clean/arch/i386/Kconfig linux-2.6.9/arch/i386/Kconfig
14 --- clean/arch/i386/Kconfig     2005-05-13 13:39:03.000000000 -0400
15 +++ linux-2.6.9/arch/i386/Kconfig       2005-10-10 17:47:17.000000000 -0400
16 @@ -946,6 +946,9 @@
17           support.  As of this writing the exact hardware interface is
18           strongly in flux, so no good recommendation can be made.
19  
20 +source "mm/Kconfig"
21 +source "kernel/Kconfig"
22 +       
23  endmenu
24  
25  
26 diff -urN clean/arch/ia64/defconfig linux-2.6.9/arch/ia64/defconfig
27 --- clean/arch/ia64/defconfig   2004-10-18 17:53:12.000000000 -0400
28 +++ linux-2.6.9/arch/ia64/defconfig     2005-10-10 17:47:17.000000000 -0400
29 @@ -83,6 +83,8 @@
30  CONFIG_COMPAT=y
31  CONFIG_PERFMON=y
32  CONFIG_IA64_PALINFO=y
33 +CONFIG_IOPROC=y
34 +CONFIG_PTRACK=y
35  
36  #
37  # Firmware Drivers
38 diff -urN clean/arch/ia64/Kconfig linux-2.6.9/arch/ia64/Kconfig
39 --- clean/arch/ia64/Kconfig     2005-05-13 13:39:00.000000000 -0400
40 +++ linux-2.6.9/arch/ia64/Kconfig       2005-10-10 17:47:17.000000000 -0400
41 @@ -299,6 +299,9 @@
42           To use this option, you have to ensure that the "/proc file system
43           support" (CONFIG_PROC_FS) is enabled, too.
44  
45 +source "mm/Kconfig"
46 +source "kernel/Kconfig"
47 +
48  source "drivers/firmware/Kconfig"
49  
50  source "fs/Kconfig.binfmt"
51 diff -urN clean/arch/x86_64/defconfig linux-2.6.9/arch/x86_64/defconfig
52 --- clean/arch/x86_64/defconfig 2004-10-18 17:54:39.000000000 -0400
53 +++ linux-2.6.9/arch/x86_64/defconfig   2005-10-10 17:47:17.000000000 -0400
54 @@ -87,6 +87,8 @@
55  CONFIG_GART_IOMMU=y
56  CONFIG_SWIOTLB=y
57  CONFIG_X86_MCE=y
58 +CONFIG_IOPROC=y
59 +CONFIG_PTRACK=y
60  
61  #
62  # Power management options
63 diff -urN clean/arch/x86_64/Kconfig linux-2.6.9/arch/x86_64/Kconfig
64 --- clean/arch/x86_64/Kconfig   2005-05-13 13:39:03.000000000 -0400
65 +++ linux-2.6.9/arch/x86_64/Kconfig     2005-10-10 17:47:17.000000000 -0400
66 @@ -327,6 +327,9 @@
67            machine check error logs. See
68            ftp://ftp.x86-64.org/pub/linux/tools/mcelog
69  
70 +source "mm/Kconfig"
71 +source "kernel/Kconfig"
72 +
73  endmenu
74  
75  
76 diff -urN clean/Documentation/vm/ioproc.txt linux-2.6.9/Documentation/vm/ioproc.txt
77 --- clean/Documentation/vm/ioproc.txt   1969-12-31 19:00:00.000000000 -0500
78 +++ linux-2.6.9/Documentation/vm/ioproc.txt     2005-10-10 17:47:17.000000000 -0400
79 @@ -0,0 +1,468 @@
80 +Linux IOPROC patch overview
81 +===========================
82 +
83 +The network interface for an HPC network differs significantly from
84 +network interfaces for traditional IP networks. HPC networks tend to
85 +be used directly from user processes and perform large RDMA transfers
86 +between theses processes address space. They also have a requirement
87 +for low latency communication, and typically achieve this by OS bypass
88 +techniques.  This then requires a different model to traditional
89 +interconnects, in that a process may need to expose a large amount of
90 +it's address space to the network RDMA.
91 +
92 +Locking down of memory has been a common mechanism for performing
93 +this, together with a pin-down cache implemented in user
94 +libraries. The disadvantage of this method is that large portions of
95 +the physical memory can be locked down for a single process, even if
96 +it's working set changes over the different phases of it's
97 +execution. This leads to inefficient memory utilisation - akin to the
98 +disadvantage of swapping compared to paging.
99 +
100 +This model also has problems where memory is being dynamically
101 +allocated and freed, since the pin down cache is unaware that memory
102 +may have been released by a call to munmap() and so it will still be
103 +locking down the now unused pages.
104 +
105 +Some modern HPC network interfaces implement their own MMU and are
106 +able to handle a translation fault during a network access. The
107 +Quadrics (http://www.quadrics.com) devices (Elan3 and Elan4) have done
108 +this for some time and we expect others to follow the same route in
109 +the relatively near future. These NICs are able to operate in an
110 +environment where paging occurs and do not require memory to be locked
111 +down. The advantage of this is that the user process can expose large
112 +portions of it's address space without having to worry about physical
113 +memory constraints.
114 +
115 +However should the operating system decide to swap a page to disk,
116 +then the NIC must be made aware that it should no longer read/write
117 +from this memory, but should generate a translation fault instead.
118 +
119 +The ioproc patch has been developed to provide a mechanism whereby the
120 +device driver for a NIC can be aware of when a user process's address
121 +translations change, either by paging or by explicitly mapping or
122 +unmapping memory.
123 +
124 +The patch involves inserting callbacks where translations are being
125 +invalidated to notify the NIC that the memory behind those
126 +translations is no longer visible to the application (and so should
127 +not be visible to the NIC). This callback is then responsible for
128 +ensuring that the NIC will not access the physical memory that was
129 +being mapped.
130 +
131 +An ioproc invalidate callback in the kswapd code could be utilised to
132 +prevent memory from being paged out if the NIC is unable to support
133 +network page faulting.
134 +
135 +For NICs which support network page faulting, there is no requirement
136 +for a user level pin down cache, since they are able to page-in their
137 +translations on the first communication using a buffer. However this
138 +is likely to be inefficient, resulting in slow first use of the
139 +buffer. If the communication buffers were continually allocated and
140 +freed using mmap based malloc() calls then this would lead to all
141 +communications being slower than desirable.
142 +
143 +To optimise these warm-up cases the ioproc patch adds calls to
144 +ioproc_update wherever the kernel is creating translations for a user
145 +process. These then allows the device driver to preload translations
146 +so that they are already present for the first network communication
147 +from a buffer.
148 +
149 +Linux 2.6 IOPROC implementation details
150 +=======================================
151 +
152 +The Linux IOPROC patch adds hooks to the Linux VM code whenever page
153 +table entries are being created and/or invalidated. IOPROC device
154 +drivers can register their interest in being informed of such changes
155 +by registering an ioproc_ops structure which is defined as follows;
156 +
157 +extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip);
158 +extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip);
159 +
160 +typedef struct ioproc_ops {
161 +       struct ioproc_ops *next;
162 +       void *arg;
163 +
164 +       void (*release)(void *arg, struct mm_struct *mm);
165 +       void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
166 +       void (*invalidate_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
167 +       void (*update_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
168 +
169 +       void (*change_protection)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot);
170 +
171 +       void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
172 +       void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
173 +       void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
174 +
175 +} ioproc_ops_t;
176 +
177 +ioproc_register_ops
178 +===================
179 +This function should be called by the IOPROC device driver to register
180 +its interest in PTE changes for the process associated with the passed
181 +in mm_struct.
182 +
183 +The ioproc registration is not inherited across fork() and should be
184 +called once for each process that IOPROC is interested in.
185 +
186 +This function must be called whilst holding the mm->page_table_lock.
187 +
188 +ioproc_unregister_ops
189 +=====================
190 +This function should be called by the IOPROC device driver when it no
191 +longer requires informing of PTE changes in the process associated
192 +with the supplied mm_struct.
193 +
194 +This function is not normally needed to be called as the ioproc_ops
195 +struct is unlinked from the associated mm_struct during the
196 +ioproc_release() call.
197 +
198 +This function must be called whilst holding the mm->page_table_lock.
199 +
200 +ioproc_ops struct
201 +=================
202 +A linked list ioproc_ops structures is hung off the user process
203 +mm_struct (linux/sched.h). At each hook point in the patched kernel
204 +the ioproc patch will call the associated ioproc_ops callback function
205 +pointer in turn for each registered structure.
206 +
207 +The intention of the callbacks is to allow the IOPROC device driver to
208 +inspect the new or modified PTE entry via the Linux kernel
209 +(e.g. find_pte_map()). These callbacks should not modify the Linux
210 +kernel VM state or PTE entries.
211 +
212 +The ioproc_ops callback function pointers are defined as follows;
213 +
214 +ioproc_release
215 +==============
216 +The release hook is called when a program exits and all its vma areas
217 +are torn down and unmapped. i.e. during exit_mmap(). Before each
218 +release hook is called the ioproc_ops structure is unlinked from the
219 +mm_struct.
220 +
221 +No locks are required as the process has the only reference to the mm
222 +at this point.
223 +
224 +ioproc_sync_[range|page]
225 +========================
226 +The sync hooks are called when a memory map is synchronised with its
227 +disk image i.e. when the msync() syscall is invoked. Any future read
228 +or write by the IOPROC device to the associated pages should cause the
229 +page to be marked as referenced or modified.
230 +
231 +Called holding the mm->page_table_lock
232 +
233 +ioproc_invalidate_[range|page]
234 +==============================
235 +The invalidate hooks are called whenever a valid PTE is unloaded
236 +e.g. when a page is unmapped by the user or paged out by the
237 +kernel. After this call the IOPROC must not access the physical memory
238 +again unless a new translation is loaded.
239 +
240 +Called holding the mm->page_table_lock
241 +
242 +ioproc_update_[range|page]
243 +==========================
244 +The update hooks are called whenever a valid PTE is loaded
245 +e.g. mmaping memory, moving the brk up, when breaking COW or faulting
246 +in an anonymous page of memory. These give the IOPROC device the
247 +opportunity to load translations speculatively, which can improve
248 +performance by avoiding device translation faults.
249 +
250 +Called holding the mm->page_table_lock
251 +
252 +ioproc_change_protection
253 +========================
254 +This hook is called when the protection on a region of memory is
255 +changed i.e. when the mprotect() syscall is invoked.
256 +
257 +The IOPROC must not be able to write to a read-only page, so if the
258 +permissions are downgraded then it must honour them. If they are
259 +upgraded it can treat this in the same way as the
260 +ioproc_update_[range|page]() calls
261 +
262 +Called holding the mm->page_table_lock
263 +
264 +
265 +Linux 2.6 IOPROC patch details
266 +==============================
267 +
268 +Here are the specific details of each ioproc hook added to the Linux
269 +2.6 VM system and the reasons for doing so;
270 +
271 +++++ FILE
272 +       mm/fremap.c
273 +
274 +==== FUNCTION
275 +       zap_pte
276 +
277 +CALLED FROM
278 +       install_page
279 +       install_file_pte
280 +
281 +PTE MODIFICATION
282 +       ptep_clear_flush
283 +
284 +ADDED HOOKS
285 +       ioproc_invalidate_page
286 +
287 +==== FUNCTION
288 +       install_page
289 +
290 +CALLED FROM
291 +       filemap_populate, shmem_populate
292 +
293 +PTE MODIFICATION
294 +       set_pte
295 +
296 +ADDED HOOKS
297 +       ioproc_update_page
298 +
299 +==== FUNCTION
300 +       install_file_pte
301 +
302 +CALLED FROM
303 +       filemap_populate, shmem_populate
304 +
305 +PTE MODIFICATION
306 +       set_pte
307 +
308 +ADDED HOOKS
309 +       ioproc_update_page
310 +
311 +
312 +++++ FILE
313 +       mm/memory.c
314 +
315 +==== FUNCTION
316 +       zap_page_range
317 +
318 +CALLED FROM
319 +       read_zero_pagealigned, madvise_dontneed, unmap_mapping_range,
320 +       unmap_mapping_range_list, do_mmap_pgoff
321 +
322 +PTE MODIFICATION
323 +       set_pte (unmap_vmas)
324 +
325 +ADDED HOOKS
326 +       ioproc_invalidate_range
327 +
328 +
329 +==== FUNCTION
330 +       zeromap_page_range
331 +
332 +CALLED FROM
333 +       read_zero_pagealigned, mmap_zero
334 +
335 +PTE MODIFICATION
336 +       set_pte (zeromap_pte_range)
337 +
338 +ADDED HOOKS
339 +       ioproc_invalidate_range
340 +       ioproc_update_range
341 +
342 +
343 +==== FUNCTION
344 +       remap_page_range
345 +
346 +CALLED FROM
347 +       many device drivers
348 +
349 +PTE MODIFICATION
350 +       set_pte (remap_pte_range)
351 +
352 +ADDED HOOKS
353 +       ioproc_invalidate_range
354 +       ioproc_update_range
355 +
356 +
357 +==== FUNCTION
358 +       break_cow
359 +
360 +CALLED FROM
361 +       do_wp_page
362 +
363 +PTE MODIFICATION
364 +       ptep_establish
365 +
366 +ADDED HOOKS
367 +       ioproc_invalidate_page
368 +       ioproc_update_page
369 +
370 +
371 +==== FUNCTION
372 +       do_wp_page
373 +
374 +CALLED FROM
375 +       do_swap_page, handle_pte_fault
376 +
377 +PTE MODIFICATION
378 +       ptep_set_access_flags
379 +
380 +ADDED HOOKS
381 +       ioproc_update_page
382 +
383 +
384 +==== FUNCTION
385 +       do_swap_page
386 +
387 +CALLED FROM
388 +       handle_pte_fault
389 +
390 +PTE MODIFICATION
391 +       set_pte
392 +
393 +ADDED HOOKS
394 +       ioproc_update_page
395 +
396 +
397 +==== FUNCTION
398 +       do_anonymous_page
399 +
400 +CALLED FROM
401 +       do_no_page
402 +
403 +PTE MODIFICATION
404 +       set_pte
405 +
406 +ADDED HOOKS
407 +       ioproc_update_page
408 +
409 +
410 +==== FUNCTION
411 +       do_no_page
412 +
413 +CALLED FROM
414 +       do_file_page, handle_pte_fault
415 +
416 +PTE MODIFICATION
417 +       set_pte
418 +
419 +ADDED HOOKS
420 +       ioproc_update_page
421 +
422 +
423 +++++ FILE
424 +       mm/mmap.c
425 +
426 +==== FUNCTION
427 +       unmap_region
428 +
429 +CALLED FROM
430 +       do_munmap
431 +
432 +PTE MODIFICATION
433 +       set_pte (unmap_vmas)
434 +
435 +ADDED HOOKS
436 +       ioproc_invalidate_range
437 +
438 +
439 +==== FUNCTION
440 +       exit_mmap
441 +
442 +CALLED FROM
443 +       mmput
444 +
445 +PTE MODIFICATION
446 +       set_pte (unmap_vmas)
447 +
448 +ADDED HOOKS
449 +       ioproc_release
450 +
451 +
452 +++++ FILE
453 +       mm/mprotect.c
454 +
455 +==== FUNCTION
456 +       change_protection
457 +
458 +CALLED FROM
459 +       mprotect_fixup
460 +
461 +PTE MODIFICATION
462 +       set_pte (change_pte_range)
463 +
464 +ADDED HOOKS
465 +       ioproc_change_protection
466 +
467 +
468 +++++ FILE
469 +       mm/mremap.c
470 +
471 +==== FUNCTION
472 +       move_page_tables
473 +
474 +CALLED FROM
475 +       move_vma
476 +
477 +PTE MODIFICATION
478 +       ptep_clear_flush (move_one_page)
479 +
480 +ADDED HOOKS
481 +       ioproc_invalidate_range
482 +       ioproc_invalidate_range
483 +
484 +
485 +++++ FILE
486 +       mm/rmap.c
487 +
488 +==== FUNCTION
489 +       try_to_unmap_one
490 +
491 +CALLED FROM
492 +       try_to_unmap_anon, try_to_unmap_file
493 +
494 +PTE MODIFICATION
495 +       ptep_clear_flush
496 +
497 +ADDED HOOKS
498 +       ioproc_invalidate_page
499 +
500 +
501 +==== FUNCTION
502 +       try_to_unmap_cluster
503 +
504 +CALLED FROM
505 +       try_to_unmap_file
506 +
507 +PTE MODIFICATION
508 +       ptep_clear_flush
509 +
510 +ADDED HOOKS
511 +       ioproc_invalidate_page
512 +
513 +
514 +
515 +++++ FILE 
516 +       mm/msync.c
517 +
518 +==== FUNCTION
519 +       filemap_sync
520 +
521 +CALLED FROM
522 +       msync_interval
523 +
524 +PTE MODIFICATION
525 +       ptep_clear_flush_dirty (filemap_sync_pte)
526 +
527 +ADDED HOOKS
528 +       ioproc_sync_range
529 +
530 +
531 +++++ FILE
532 +       mm/hugetlb.c
533 +
534 +==== FUNCTION
535 +       zap_hugepage_range
536 +
537 +CALLED FROM
538 +       hugetlb_vmtruncate_list
539 +
540 +PTE MODIFICATION
541 +       ptep_get_and_clear (unmap_hugepage_range)
542 +
543 +ADDED HOOK
544 +       ioproc_invalidate_range
545 +
546 +
547 +-- Last update DavidAddison - 17 Aug 2004
548 diff -urN clean/drivers/net/qsnet/eip/eip_linux.c linux-2.6.9/drivers/net/qsnet/eip/eip_linux.c
549 --- clean/drivers/net/qsnet/eip/eip_linux.c     1969-12-31 19:00:00.000000000 -0500
550 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_linux.c       2005-09-07 10:34:58.000000000 -0400
551 @@ -0,0 +1,1575 @@
552 +/*
553 + *    Copyright (c) 2003 by Quadrics Ltd.
554 + * 
555 + *    For licensing information please see the supplied COPYING file
556 + *
557 + */
558 +
559 +#ident "@(#)$Id: eip_linux.c,v 1.96.2.3 2005/09/07 14:34:58 mike Exp $"
560 +
561 +#include <qsnet/kernel.h>
562 +#include <qsnet/debug.h>
563 +
564 +#include <qsnet/module.h>
565 +#include <linux/init.h>
566 +#include <linux/list.h>
567 +#include <linux/netdevice.h>
568 +#include <linux/etherdevice.h>
569 +#include <linux/skbuff.h>
570 +#include <linux/kernel.h>
571 +#include <linux/proc_fs.h>
572 +#include <linux/time.h>
573 +#include <linux/version.h>
574 +
575 +#include <asm/uaccess.h>
576 +#include <asm/unaligned.h>
577 +
578 +#undef ASSERT
579 +#include <net/sock.h>
580 +#include <net/ip.h>
581 +
582 +
583 +
584 +#include <elan/epcomms.h>
585 +#include <elan/epsvc.h>
586 +
587 +#include "eip_linux.h"
588 +#include "eip_stats.h"
589 +
590 +#ifdef UNUSED
591 +static void eip_skb_display(struct sk_buff *);
592 +#endif
593 +static void eip_iph_display(struct iphdr *);
594 +#ifdef UNUSED
595 +static void eip_eiph_display(EIP_HEADER *);
596 +static void eip_packet_display(unsigned char *);
597 +#endif
598 +static void eip_tmd_display(EIP_TMD *);
599 +static void eip_tmd_head_display(EIP_TMD_HEAD *);
600 +static void eip_rmd_display(EIP_RMD *);
601 +static void eip_rmd_head_display(EIP_RMD_HEAD *);
602 +
603 +static void eip_rmd_reclaim(EIP_RMD *);
604 +
605 +static inline EP_NMH *eip_dma_reserve(int, int);
606 +static inline void __eip_tmd_load(EIP_TMD *, EP_RAILMASK *);
607 +static inline void __eip_tmd_unload(EIP_TMD *);
608 +static inline unsigned long eip_buff_alloc(int, int);
609 +static inline void eip_buff_free(unsigned long, int);
610 +static struct iphdr *eip_ipfrag_get(char *);
611 +static inline void eip_rmd_free(EIP_RMD *);
612 +static inline void eip_skb_load(EIP_RMD *);
613 +static inline void eip_skb_unload(EIP_RMD *);
614 +static inline void eip_rmd_requeue(EIP_RMD *);
615 +static EIP_RMD *eip_rmd_alloc(int, int);
616 +static int eip_rmd_alloc_replace(EIP_RMD *, int, int);
617 +static int eip_rmd_alloc_queue(int, int, int, int);
618 +static int eip_rmds_alloc(void);
619 +static void eip_rxhandler(EP_RXD *);
620 +static void eip_rx_tasklet(unsigned long);
621 +static inline void eip_tmd_init(EIP_TMD *, unsigned long, EIP_TMD_HEAD *, unsigned long, int);
622 +static inline EIP_TMD *eip_tmd_get(int);
623 +static inline void eip_tmd_put(EIP_TMD *);
624 +static inline void eip_tmd_load(EIP_TMD *);
625 +static inline void eip_tmd_unload(EIP_TMD *);
626 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD *, EIP_TMD_HEAD *, int);
627 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD *, int);
628 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD *, int);
629 +static int eip_tmds_alloc(void);
630 +int eip_hard_start_xmit(struct sk_buff *, struct net_device *);
631 +static inline int eip_do_xmit(EIP_TMD *, EP_NMD *i, EP_PAYLOAD *);
632 +static void eip_txhandler(EP_TXD *, void *, EP_STATUS);
633 +static void eip_tx_tasklet(unsigned long);
634 +void eip_stop_queue(void);
635 +void eip_start_queue(void);
636 +static int eip_open(struct net_device *);
637 +static int eip_close(struct net_device *);
638 +static struct net_device_stats *eip_get_stats(struct net_device *);
639 +static int eip_change_mtu(struct net_device *, int);
640 +
641 +static int eip_rx_dropping = 0;
642 +static int eip_rx_tasklet_locked = 1;
643 +
644 +/* Global */
645 +struct timer_list eip_rx_tasklet_timer;
646 +       
647 +EIP_RX *eip_rx = NULL;
648 +EIP_TX *eip_tx = NULL;
649 +int  eip_checksum_state=CHECKSUM_NONE;
650 +
651 +int tmd_max = EIP_TMD_MAX_NR;
652 +int rmd_max = EIP_RMD_MAX_NR;
653 +int rx_envelope_nr = EIP_RX_ENVELOPE_NR;
654 +int rx_granularity = EIP_RX_GRANULARITY;
655 +int tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
656 +EP_RAILMASK tx_railmask = EP_RAILMASK_ALL;
657 +int eipdebug = 0;
658 +
659 +#ifdef UNUSED
660 +static void eip_skb_display(struct sk_buff *skb)
661 +{
662 +       if (skb) {
663 +               __EIP_DBG_PRINTF("SKB [%p] : len %d truesize %d  proto %x pkt type %x cloned %d users %d summed %d\n", 
664 +                       skb, skb->len, skb->truesize, skb->protocol, skb->pkt_type, skb->cloned, atomic_read(&skb->users), skb->ip_summed);
665 +               __EIP_DBG_PRINTF("SKB [%p] : skb_shinfo dataref %d nr_frags %d frag_list[%p] (device %p)\n", skb,
666 +                        atomic_read(&skb_shinfo(skb)->dataref), skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->frag_list, skb->dev);
667 +               __EIP_DBG_PRINTF("SKB [%p] : head[%p] data[%p] tail [%p] end [%p] data_len [%d]\n", skb, skb->head, skb->data, 
668 +                               skb->tail, skb->end, skb->data_len);
669 +               __EIP_DBG_PRINTF("SKB [%p] : Transport Layer h.(th, uh, icmph, raw)[%p]\n", skb, skb->h.th);
670 +               __EIP_DBG_PRINTF("SKB [%p] : Network Layer      nh.(iph, arph, raw)[%p]\n", skb, skb->nh.iph);
671 +               __EIP_DBG_PRINTF("SKB [%p] : Link Layer         mac.(ethernet, raw)[%p]\n", skb, skb->mac.ethernet);
672 +               return;
673 +       }
674 +       EIP_ERR_PRINTF("SKB IS NULL - NO SKB TO DISPLAY\n");
675 +}
676 +#endif
677 +static void eip_iph_display(struct iphdr *iph)
678 +{
679 +       if (iph) {
680 +               __EIP_DBG_PRINTF("IPH [%p] : version %d header len %d TOS 0x%x Total len %d\n", 
681 +                       iph, iph->version, iph->ihl, htons(iph->tos), htons(iph->tot_len));
682 +               __EIP_DBG_PRINTF("IPH [%p] : id %d frag flags 0x%x offset %d\n",
683 +                               iph, htons(iph->id), (iph->frag_off & htons(IP_CE | IP_DF | IP_MF)) >> 4, 
684 +                               (htons(iph->frag_off) << 3) & IP_OFFSET);
685 +               __EIP_DBG_PRINTF("IPH [%p] : TTL %d proto %d header checksum 0x%x\n", iph, iph->ttl, iph->protocol, iph->check);
686 +               __EIP_DBG_PRINTF("IPH [%p] : IP src %u.%u.%u.%u dest %u.%u.%u.%u\n", iph, 
687 +                                ((unsigned char *)&(iph->saddr))[0],((unsigned char *)&(iph->saddr))[1], ((unsigned char *)&(iph->saddr))[2],((unsigned char *)&(iph->saddr))[3],
688 +                                ((unsigned char *)&(iph->daddr))[0],((unsigned char *)&(iph->daddr))[1], ((unsigned char *)&(iph->daddr))[2],((unsigned char *)&(iph->daddr))[3]);
689 +               return;
690 +       }
691 +       EIP_ERR_PRINTF("IPH IS NULL - NO IPH TO DISPLAY\n");
692 +}
693 +#ifdef UNUSED
694 +static void eip_eiph_display(EIP_HEADER * eiph)
695 +{
696 +       if (eiph) {
697 +               __EIP_DBG_PRINTF("EIPH [%p] : dhost %04x.%04x.%04x sap %x\n", eiph, eiph->h_dhost.ip_bcast, eiph->h_dhost.ip_inst, 
698 +                               eiph->h_dhost.ip_addr, eiph->h_sap);
699 +               __EIP_DBG_PRINTF("EIPH [%p] : shost %04x.%04x.%04x \n", eiph, eiph->h_shost.ip_bcast, eiph->h_shost.ip_inst,
700 +                                eiph->h_shost.ip_addr);
701 +               return;
702 +       }
703 +       EIP_ERR_PRINTF("EIPH IS NULL - NO EIPH TO DISPLAY\n");
704 +}
705 +static void eip_packet_display(unsigned char *data)
706 +{
707 +       eip_eiph_display((EIP_HEADER *) data);
708 +       eip_iph_display((struct iphdr *) (data + EIP_HEADER_PAD + ETH_HLEN));
709 +}
710 +#endif
711 +static void eip_tmd_display(EIP_TMD * tmd)
712 +{
713 +       if (tmd) {
714 +               __EIP_DBG_PRINTF("\t\tTMD [%p] : next[%p] skb[%p] DVMA[%d]\n", tmd, tmd->chain.next, tmd->skb, tmd->dvma_idx);
715 +               if (tmd->dma_base)
716 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] *data 0x%lx\n", tmd, tmd->head, *((unsigned long *) tmd->dma_base));
717 +               else
718 +                       __EIP_DBG_PRINTF("TMD [%p] : head[%p] NO DATA !!!\n", tmd, tmd->head);
719 +               __EIP_DBG_PRINTF("TMD [%p] : DMA(%lx,%d,%d) ebase[%x]\n",tmd,  tmd->dma_base, tmd->dma_len, tmd->nmd.nmd_len,
720 +                                tmd->nmd.nmd_addr);
721 +               return;
722 +       }
723 +       EIP_ERR_PRINTF("TMD IS NULL - NO TMD TO DISPLAY\n");
724 +       
725 +}
726 +static void eip_ipf_display(EIP_IPFRAG * ipf)
727 +{
728 +       if (ipf) {
729 +               __EIP_DBG_PRINTF("IPF[%p] : datagram len %d dma correction %d uts %lx frag_nr %d\n", ipf, ipf->datagram_len,
730 +                               ipf->dma_correction, ipf->timestamp.tv_usec, ipf->frag_nr);
731 +               eip_tmd_display((EIP_TMD *) ipf);
732 +               return;
733 +       }
734 +       EIP_ERR_PRINTF("IPF IS NULL - NO IPF TO DISPLAY\n");
735 +}
736 +
737 +static void eip_tmd_head_display(EIP_TMD_HEAD * head)
738 +{
739 +       if (head) {
740 +               __EIP_DBG_PRINTF("TMD HEAD [%p] : handle[%p] tmds[%p] %3.3d/%3.3d/%3.3d\n", head, head->handle, head->tmd, 
741 +                       EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats),
742 +                       eip_tx->tmd_max_nr);
743 +               return;
744 +       }
745 +       EIP_ERR_PRINTF("TMD HEAD IS NULL - NO TMD HEAD TO DISPLAY\n");
746 +}
747 +static void eip_rmd_display(EIP_RMD * rmd)
748 +{
749 +       if (rmd) {
750 +               __EIP_DBG_PRINTF("RMD [%p] : next[%p] rxd[%p] DVMA[%d]\n", rmd, rmd->chain.next, rmd->rxd, rmd->dvma_idx);
751 +               __EIP_DBG_PRINTF("RMD [%p] : head[%p]\n", rmd, rmd->head); 
752 +               __EIP_DBG_PRINTF("RMD [%p] : ebase[%x]\n", rmd,  rmd->nmd.nmd_addr); 
753 +               return;
754 +       }
755 +       EIP_ERR_PRINTF("RMD IS NULL - NO RMD TO DISPLAY\n");
756 +}
757 +static void eip_rmd_head_display(EIP_RMD_HEAD * head)
758 +{
759 +       if (head) {
760 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : rcvr[%p] handle[%p] busy list[%p]\n", head, head->rcvr, head->handle, head->busy_list);
761 +               __EIP_DBG_PRINTF("RMD HEAD [%p] : %3.3d/%3.3d/%3.3d\n", head, 
762 +                               EIP_STAT_QUEUED_GET(&head->stats), EIP_STAT_ALLOC_GET(&head->stats), eip_rx->rmd_max_nr);
763 +               return;
764 +       }
765 +       EIP_ERR_PRINTF("RMD HEAD IS NULL - NO RMD HEAD TO DISPLAY\n");
766 +}
767 +
768 +/* END  - DISPLAY FUNCTIONS */
769 +static inline EP_NMH *eip_dma_reserve(int pages_nr, int perm)
770 +{
771 +       EP_NMH *handle = ep_dvma_reserve(eip_tx->ep_system, pages_nr, perm);
772 +       
773 +       if (handle)
774 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HANDLE [%p] %d pages of elan address space reserved\n", 
775 +                               handle, pages_nr);
776 +       else
777 +               EIP_ERR_PRINTF("cannot reserve %d page(s) of elan address space\n", pages_nr);
778 +
779 +       return handle;
780 +}
781 +
782 +static inline void __eip_tmd_load(EIP_TMD * tmd, EP_RAILMASK *rmask)
783 +{
784 +       EIP_ASSERT(tmd->nmd.nmd_len > 0);
785 +       
786 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) tmd->dma_base, tmd->nmd.nmd_len, tmd->head->handle,
787 +                       tmd->dvma_idx, rmask, &tmd->nmd);
788 +}
789 +
790 +static inline void __eip_tmd_unload(EIP_TMD * tmd)
791 +{
792 +       EIP_ASSERT(tmd->nmd.nmd_addr && tmd->head->handle);
793 +       
794 +       ep_dvma_unload(eip_tx->ep_system, tmd->head->handle, &tmd->nmd);
795 +       tmd->nmd.nmd_addr = 0;
796 +}
797 +static inline unsigned long eip_buff_alloc(int buff_len, int gfp)
798 +{
799 +       unsigned long buff_base = (buff_len < PAGE_SIZE) ? 
800 +                               (unsigned long) kmalloc(buff_len, gfp) :
801 +                               __get_dma_pages(gfp, get_order(buff_len));
802 +       
803 +       if (likely(buff_base))
804 +               return buff_base;
805 +
806 +       EIP_ERR_PRINTF("cannot allocate %db of memory\n", buff_len);
807 +       return 0;
808 +}
809 +static inline void eip_buff_free(unsigned long buff_base, int buff_len)
810 +{
811 +       (buff_len < PAGE_SIZE) ?  kfree((void *) buff_base) :
812 +               free_pages(buff_base, get_order(buff_len));
813 +}
814 +static struct iphdr *eip_ipfrag_get(char *data)
815 +{
816 +       struct ethhdr *eh = (struct ethhdr *) (data);
817 +       struct iphdr *iph;
818 +
819 +       if (eh->h_proto == htons(ETH_P_IP)) {
820 +               iph = (struct iphdr *) ((char *) eh + ETH_HLEN);
821 +
822 +               /* EIP_DBG(eip_iph_display(iph)); */
823 +
824 +               if ((iph->frag_off & htons(IP_MF | IP_OFFSET)))
825 +                       return iph;
826 +       }
827 +       return NULL;
828 +}
829 +
830 +static inline void eip_rmd_free(EIP_RMD * rmd)
831 +{
832 +       EIP_ASSERT2(rmd->nmd.nmd_addr == 0, eip_rmd_display, rmd);
833 +       
834 +       if ( rmd->skb != NULL) 
835 +               kfree_skb (rmd->skb);
836 +       
837 +       kfree(rmd);
838 +
839 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "RMD [%p] : FREED\n", rmd);
840 +}
841 +static inline void eip_skb_load(EIP_RMD * rmd)
842 +{
843 +       EP_RAILMASK rmask = rmd->rxd ? ep_rxd_railmask (rmd->rxd) : 0;
844 +
845 +       EIP_ASSERT(skb_tailroom(rmd->skb) > 0);
846 +
847 +       ep_dvma_load(eip_tx->ep_system, NULL, (caddr_t) rmd->skb->data, skb_tailroom(rmd->skb), rmd->head->handle,
848 +                    rmd->dvma_idx, &rmask, &rmd->nmd);
849 +       
850 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : LOADED\n", rmd);
851 +}
852 +static inline void eip_skb_unload(EIP_RMD * rmd)
853 +{
854 +       EIP_ASSERT(rmd->nmd.nmd_addr && rmd->head->handle);
855 +       
856 +       ep_dvma_unload(eip_tx->ep_system, rmd->head->handle, &rmd->nmd);
857 +       rmd->nmd.nmd_addr = 0;
858 +       
859 +       EIP_DBG_PRINTF(EIP_DBG_RMD_EP_DVMA, "RMD [%p] : UNLOADED\n", rmd);
860 +}
861 +static inline void eip_rmd_requeue(EIP_RMD * rmd)
862 +{
863 +       EIP_ASSERT(rmd->rxd);
864 +
865 +       rmd->chain.next    = NULL;
866 +
867 +       ep_requeue_receive(rmd->rxd, eip_rxhandler, rmd, &rmd->nmd, EP_NO_ALLOC|EP_NO_SLEEP );
868 +
869 +       atomic_inc(&rmd->head->stats);
870 +       
871 +       EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : REQUEUED\n", rmd);
872 +}
873 +static EIP_RMD * eip_rmd_alloc(int svc, int gfp)
874 +{
875 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
876 +       EIP_RMD *rmd;
877 +       struct sk_buff *skb;
878 +
879 +       if (!(skb = alloc_skb((buff_len -  EIP_EXTRA), gfp)))
880 +               return NULL;
881 +       
882 +       skb_reserve(skb, 2);
883 +
884 +       if (!(rmd = (EIP_RMD *) kmalloc(buff_len, gfp))) {
885 +               kfree_skb(skb);
886 +               return NULL;
887 +       }
888 +
889 +       rmd->skb = skb;
890 +
891 +       rmd->chain.next = NULL;
892 +       rmd->rxd = NULL;
893 +       rmd->head = &eip_rx->head[svc];
894 +
895 +       return rmd;
896 +}
897 +
898 +static int eip_rmd_alloc_replace(EIP_RMD *rmd, int svc, int gfp) 
899 +{
900 +       struct sk_buff *skb,*old;
901 +       int buff_len = EIP_SVC_SMALLEST_LEN << svc;
902 +
903 +       if (!(skb = alloc_skb(buff_len, gfp)))
904 +               return 1;
905 +       
906 +       skb_reserve(skb, 2);
907 +
908 +       eip_skb_unload(rmd);
909 +
910 +       old      = rmd->skb;
911 +       rmd->skb = skb;
912 +
913 +       eip_skb_load(rmd);
914 +
915 +       eip_rmd_requeue(rmd);
916 +
917 +       kfree_skb(old);
918 +
919 +       return 0;
920 +}
921 +
922 +static int eip_rmd_alloc_queue(int svc, int dvma_idx, int gfp, int attr)
923 +{
924 +       EIP_RMD * rmd = eip_rmd_alloc(svc, gfp);
925 +
926 +       if (!rmd)
927 +               return 1;
928 +
929 +       EIP_STAT_ALLOC_ADD(&rmd->head->stats, 1);
930 +
931 +       rmd->dvma_idx = dvma_idx;
932 +       eip_skb_load(rmd);
933 +
934 +       EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "RMD [%p] : ALLOCATED for SVC 0x%x\n", rmd, svc);
935 +
936 +       if (ep_queue_receive(rmd->head->rcvr, eip_rxhandler, (void *) rmd, &rmd->nmd, attr) == ESUCCESS) {
937 +               atomic_inc(&rmd->head->stats);
938 +               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "RMD [%p] : QUEUED on SVC 0x%x\n", rmd, svc);
939 +               return 0;
940 +       }
941 +       
942 +       EIP_ERR_PRINTF("RMD [%p] : couldn't be QUEUED on SVC 0x%x\n", rmd, svc);
943 +
944 +       EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
945 +
946 +       eip_skb_unload(rmd);
947 +       eip_rmd_free(rmd);
948 +
949 +       return 1;
950 +}
951 +
952 +static int eip_rmds_alloc(void)
953 +{
954 +       int idx, svc;
955 +
956 +       eip_rx->irq_list = NULL;
957 +       eip_rx->irq_list_nr = 0;
958 +
959 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
960 +               eip_rx->head[svc].rcvr = ep_alloc_rcvr(eip_tx->ep_system, EIP_SVC_EP(svc), rx_envelope_nr);
961 +               if (!eip_rx->head[svc].rcvr) {
962 +                       EIP_ERR_PRINTF("Cannot install receiver for SVC 0x%x - maybe cable is disconnected\n", svc);
963 +                       return -EAGAIN;
964 +               }
965 +
966 +               eip_rx->head[svc].handle =
967 +                   eip_dma_reserve(EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)) * eip_rx->rmd_max_nr,
968 +                                   EP_PERM_WRITE);
969 +               if (!eip_rx->head[svc].handle)
970 +                       return -ENOMEM;
971 +               
972 +               EIP_DBG(EIP_DBG_RMD_HEAD, eip_rmd_head_display, &eip_rx->head[svc]);
973 +
974 +               for (idx = 0; idx < EIP_RMD_NR; idx++) {
975 +                       if (eip_rmd_alloc_queue(svc, idx * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
976 +                                               GFP_KERNEL, EP_NO_SLEEP))
977 +                               return -ENOMEM;
978 +               }
979 +       }
980 +       return 0;
981 +}
982 +static void eip_rmds_free(void)
983 +{
984 +       unsigned long flags;
985 +       EIP_RMD *rmd;
986 +       int svc; 
987 +       
988 +       spin_lock_irqsave(&eip_rx->lock, flags);
989 +       rmd = eip_rx->irq_list;
990 +       eip_rx->irq_list = NULL;
991 +       eip_rx->irq_list_nr = 0;
992 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
993 +
994 +       eip_rmd_reclaim(rmd);
995 +       
996 +       for (svc = 0; svc < EIP_SVC_NR ; svc++) {
997 +               
998 +               while ((rmd = eip_rx->head[svc].busy_list)) {
999 +                       eip_rx->head[svc].busy_list = NULL;
1000 +                       eip_rmd_reclaim(rmd);
1001 +                       if (eip_rx->head[svc].busy_list) {
1002 +                               EIP_DBG_PRINTF(EIP_DBG_RMD_QUEUE, "Still RMD [%p] on BUSY list SVC 0x%d - Scheduling\n", rmd, svc);     
1003 +                               schedule();
1004 +                       }
1005 +               }
1006 +
1007 +               EIP_ASSERT(EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats) == EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats));
1008 +               
1009 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "HEAD[%p] : FREEING RCVR [%p]\n", &eip_rx->head[svc],
1010 +                               eip_rx->head[svc].rcvr);
1011 +               
1012 +               ep_free_rcvr(eip_rx->head[svc].rcvr);
1013 +
1014 +               EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "HEAD[%p] : RELEASING DVMA [%p]\n", &eip_rx->head[svc], 
1015 +                               eip_rx->head[svc].handle);
1016 +
1017 +               ep_dvma_release(eip_tx->ep_system, eip_rx->head[svc].handle);
1018 +       }
1019 +
1020 +}
1021 +static int eip_rx_queues_low (void) {
1022 +       int svc;
1023 +       for (svc = 0; svc < EIP_SVC_NR; svc++) 
1024 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats)  < EIP_RMD_ALLOC_THRESH) 
1025 +                       return (1);
1026 +       return (0);
1027 +}
1028 +static void eip_rxhandler(EP_RXD * rxd)
1029 +{
1030 +       EIP_RMD *rmd            = (EIP_RMD *) ep_rxd_arg(rxd);
1031 +       EP_STATUS ret           = ep_rxd_status(rxd);
1032 +       EP_PAYLOAD * payload    = ep_rxd_payload(rxd);
1033 +       unsigned long data      = (unsigned long) rmd->skb->data; 
1034 +       int frag_nr             = 0;
1035 +       int len;
1036 +
1037 +       struct sk_buff *skb;
1038 +       static char count = 0;
1039 +
1040 +       atomic_dec(&rmd->head->stats);
1041 +       rmd->rxd = rxd;
1042 +
1043 +       if (likely(ret == EP_SUCCESS)) {
1044 +
1045 +               rmd->head->dma++;
1046 +
1047 +               if ( eip_rx_dropping) {
1048 +                   eip_rmd_requeue(rmd);
1049 +                   return;
1050 +               }
1051 +
1052 +               len = (payload) ? payload->Data[frag_nr++] : ep_rxd_len(rxd);
1053 +
1054 +               EIP_DBG(EIP_DBG_RMD, eip_rmd_display, rmd);
1055 +
1056 +again:
1057 +               if ( (skb = skb_clone(rmd->skb, GFP_ATOMIC)) ) {
1058 +                       unsigned int off = (data - (unsigned long) rmd->skb->data);
1059 +
1060 +                       /* have to set the length before calling
1061 +                        * skb pull as it will not allow you to
1062 +                        * pull past the end */
1063 +
1064 +                       skb_put (skb, off + len);
1065 +                       skb_pull (skb, off);
1066 +
1067 +                       skb->protocol = eth_type_trans(skb, eip_rx->net_device);
1068 +                       skb->ip_summed = eip_checksum_state;
1069 +                       skb->dev = eip_rx->net_device;
1070 +
1071 +                       /* Fabien/David/Mike this is a hack/fix to allow aggrigation of packets to work.
1072 +                        * The problem is ip_frag looks at the truesize to see if it is caching too much space.
1073 +                        * As we are reusing a large skb (cloned) for a number of small fragments, they appear to take up alot of space.
1074 +                        * so ip_frag dropped them after 4 frags (not good). So we lie and set the truesize to just bigger than the data. 
1075 +                        */
1076 +                       if (payload) 
1077 +                               skb->truesize = SKB_DATA_ALIGN(skb->len + EIP_HEADER_PAD) +sizeof(struct sk_buff);
1078 +
1079 +               }
1080 +               if ( (skb) && 
1081 +                    (netif_rx(skb) != NET_RX_DROP)){
1082 +
1083 +                       eip_rx->bytes += len;
1084 +                       
1085 +                       if (payload && payload->Data[frag_nr] ) {
1086 +                               data += EIP_IP_ALIGN(len);
1087 +                               len   = payload->Data[frag_nr++];
1088 +                               goto again;
1089 +                       }
1090 +                       eip_rx->packets += ++frag_nr;
1091 +               } else if ( (eip_rx->dropped++ % 20) == 0)
1092 +                               __EIP_DBG_PRINTK("Packet dropped by the TCP/IP stack - increase /proc/sys/net/core/netdev_max_backlog\n");
1093 +       } else if (ret == EP_SHUTDOWN ) {
1094 +               EIP_DBG2(EIP_DBG_RMD, eip_rmd_display, rmd, "ABORTING\n");
1095 +                ep_complete_receive(rxd);
1096 +                eip_skb_unload(rmd);
1097 +               EIP_STAT_ALLOC_SUB(&rmd->head->stats, 1);
1098 +                eip_rmd_free(rmd);
1099 +               return;
1100 +       } else {
1101 +               EP_ENVELOPE *env = ep_rxd_envelope(rxd);
1102 +               EP_NMD *nmd ;
1103 +               
1104 +               EIP_ERR_PRINTF("RMD[%p] : RECEIVE ret = %d\n", rmd, ret);
1105 +
1106 +               for (len = 0 ; len < env->nFrags ; len++) {
1107 +                       nmd = &env->Frags[len];
1108 +                       EIP_ERR_PRINTF("RMD[%p] : ep_frag #%d nmd_addr [%x] nmd_len %d\n", rmd, len, 
1109 +                                       (unsigned int) nmd->nmd_addr, nmd->nmd_len);
1110 +               }
1111 +               eip_rx->errors++;
1112 +               EIP_ASSERT2(atomic_read(&skb_shinfo(rmd->skb)->dataref) == 1, eip_rmd_display, rmd);
1113 +       }
1114 +
1115 +       /* data is used to store the irq flags */
1116 +       spin_lock_irqsave(&eip_rx->lock, data);
1117 +       rmd->chain.next = eip_rx->irq_list;
1118 +       eip_rx->irq_list = rmd;
1119 +       eip_rx->irq_list_nr++;
1120 +       spin_unlock_irqrestore(&eip_rx->lock, data);
1121 +
1122 +       if (((count++ % eip_rx->sysctl_granularity) == 0) /* and either we have passed up a number of them */
1123 +           || eip_rx_queues_low())                       /* or we are low                                 */
1124 +               tasklet_schedule(&eip_rx->tasklet);
1125 +       else
1126 +       {
1127 +               if ( !timer_pending (&eip_rx_tasklet_timer)  )    /* the timer not already set  */
1128 +                       mod_timer (&eip_rx_tasklet_timer, lbolt);
1129 +       }
1130 +}
1131 +
1132 +/* dest ; if the buffer still reference on it mocve the rmd to the dest list */
1133 +static void eip_rmd_reclaim(EIP_RMD *rmd) 
1134 +{
1135 +       EIP_RMD *rmd_next = rmd;
1136 +       int dataref;
1137 +
1138 +       while (rmd_next) {
1139 +               rmd = rmd_next;
1140 +               rmd_next = rmd_next->chain.next;
1141 +
1142 +               dataref = atomic_read(&skb_shinfo(rmd->skb)->dataref);
1143 +               EIP_ASSERT(dataref > 0);
1144 +               
1145 +               if (dataref == 1) {
1146 +                       eip_rmd_requeue(rmd);
1147 +               } else {
1148 +                       rmd->chain.next = rmd->head->busy_list;
1149 +                       rmd->head->busy_list = rmd;
1150 +               }
1151 +       }
1152 +}
1153 +static void eip_rx_tasklet(unsigned long arg)
1154 +{
1155 +       EIP_RMD *rmd, *rmd_next;
1156 +       unsigned long flags;
1157 +       short svc, queued;
1158 +       int   needs_reschedule;
1159 +
1160 +       if (eip_rx_tasklet_locked) /* we dont want the tasklet to do anything when we are finishing */
1161 +           return;
1162 +
1163 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1164 +               rmd = eip_rx->head[svc].busy_list;
1165 +               eip_rx->head[svc].busy_list = NULL;
1166 +               eip_rmd_reclaim(rmd);
1167 +       }
1168 +
1169 +       spin_lock_irqsave(&eip_rx->lock, flags);
1170 +       rmd = eip_rx->irq_list;
1171 +       eip_rx->irq_list = NULL;
1172 +       eip_rx->irq_list_nr = 0;
1173 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
1174 +       
1175 +       eip_rmd_reclaim(rmd);
1176 +
1177 +       needs_reschedule = 0;
1178 +
1179 +       for (svc = 0; svc < EIP_SVC_NR; svc++) {
1180 +               /* the plan is : allocate some more if possible or steall some dvma space from those on the EIP_BUSY_LIST */
1181 +               queued = EIP_STAT_QUEUED_GET(&eip_rx->head[svc].stats);
1182 +
1183 +               EIP_ASSERT(queued >= 0 && queued <= EIP_RMD_MAX_NR);    
1184 +               
1185 +               if (queued < EIP_RMD_ALLOC_THRESH) {
1186 +                       short allocated = EIP_STAT_ALLOC_GET(&eip_rx->head[svc].stats);
1187 +                       short how_many; 
1188 +
1189 +                       EIP_ASSERT(allocated >= 0 && allocated <= EIP_RMD_MAX_NR);
1190 +                       
1191 +                       if (likely(allocated < eip_rx->rmd_max_nr)) {
1192 +
1193 +                               how_many = (((allocated / EIP_RMD_ALLOC_STEP) + 1) * EIP_RMD_ALLOC_STEP);
1194 +                               if (how_many > eip_rx->rmd_max_nr)
1195 +                                       how_many = eip_rx->rmd_max_nr;
1196 +
1197 +                               for (; allocated < how_many &&  
1198 +                                                       (eip_rmd_alloc_queue(svc, allocated * EIP_DVMA_PAGES((EIP_SVC_SMALLEST_LEN << svc)), 
1199 +                                                                             GFP_ATOMIC, EP_NO_ALLOC|EP_NO_SLEEP) == 0) ; allocated++);
1200 +                               if ( allocated != how_many ) {
1201 +                                       eip_rx->reschedule++;
1202 +                                       needs_reschedule = 1;
1203 +                               }
1204 +                       } else {
1205 +                               /* steal how_many rmds and put them on the aside list */
1206 +                               how_many = EIP_RMD_ALLOC_THRESH - queued;
1207 +
1208 +                               EIP_ASSERT(how_many >= 0 && how_many <= EIP_RMD_ALLOC_THRESH);
1209 +                               
1210 +                               rmd_next = eip_rx->head[svc].busy_list;
1211 +                               eip_rx->head[svc].busy_list = NULL;
1212 +
1213 +                               while (how_many-- && rmd_next) {
1214 +                                       rmd = rmd_next;
1215 +                                       rmd_next = rmd_next->chain.next;
1216 +
1217 +                                       if (eip_rmd_alloc_replace(rmd, svc, GFP_ATOMIC)) {
1218 +                                               rmd_next = rmd;
1219 +                                               break;
1220 +                                       }
1221 +                               }
1222 +                               eip_rx->head[svc].busy_list = rmd_next;
1223 +                               if ( how_many )
1224 +                                       needs_reschedule = 1;
1225 +                       }
1226 +               }
1227 +       }
1228 +       
1229 +       if (needs_reschedule) 
1230 +       {
1231 +               if ( !timer_pending (&eip_rx_tasklet_timer)) 
1232 +                       mod_timer (&eip_rx_tasklet_timer, lbolt);
1233 +       }
1234 +}
1235 +static void eip_rx_tasklet_resched(unsigned long arg)
1236 +{
1237 +       tasklet_schedule(&eip_rx->tasklet);     
1238 +}
1239 +
1240 +static inline void eip_tmd_init(EIP_TMD * tmd, unsigned long buff_base, EIP_TMD_HEAD * head, unsigned long buff_len,
1241 +                               int dvma_idx)
1242 +{
1243 +       tmd->dvma_idx = dvma_idx;
1244 +       tmd->dma_base = buff_base;
1245 +       tmd->dma_len = -1;
1246 +       tmd->skb = NULL;
1247 +       tmd->head = head;
1248 +       tmd->chain.next = NULL;
1249 +
1250 +       if (tmd->head != &eip_tx->head[EIP_TMD_STD]) {
1251 +               tmd->nmd.nmd_len = buff_len;
1252 +               eip_tmd_load(tmd);
1253 +       } else  {
1254 +               tmd->nmd.nmd_len  = -1;
1255 +               tmd->nmd.nmd_addr = 0;
1256 +       }
1257 +}
1258 +
1259 +static inline EIP_TMD *eip_tmd_get(int id)
1260 +{
1261 +       unsigned long flags;
1262 +       EIP_TMD *tmd = NULL;
1263 +       spin_lock_irqsave(&eip_tx->lock, flags);
1264 +       while ((tmd = eip_tx->head[id].tmd) == NULL) {
1265 +               spin_unlock_irqrestore(&eip_tx->lock, flags);
1266 +               if (ep_enable_txcallbacks(eip_tx->xmtr) == 0) {
1267 +
1268 +                       spin_lock_irqsave (&eip_tx->lock, flags);
1269 +                       if (eip_tx->head[id].tmd == NULL) {
1270 +                               __EIP_DBG_PRINTF("Cannot get a TMD on head %d ... stopping queue\n", id);
1271 +                               
1272 +                               eip_stop_queue ();
1273 +                               
1274 +                               spin_unlock_irqrestore (&eip_tx->lock, flags);
1275 +
1276 +                               return NULL;
1277 +                       }
1278 +                       spin_unlock_irqrestore (&eip_tx->lock, flags);
1279 +               }
1280 +
1281 +               ep_disable_txcallbacks(eip_tx->xmtr);
1282 +               spin_lock_irqsave(&eip_tx->lock, flags);
1283 +       }
1284 +       eip_tx->head[id].tmd = tmd->chain.next;
1285 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
1286 +       atomic_dec(&tmd->head->stats);
1287 +       return tmd;
1288 +}
1289 +
1290 +static inline void eip_tmd_put(EIP_TMD * tmd)
1291 +{
1292 +       unsigned long flags;
1293 +
1294 +       tmd->skb = NULL;
1295 +
1296 +       spin_lock_irqsave(&eip_tx->lock, flags);
1297 +       tmd->chain.next = tmd->head->tmd;
1298 +       tmd->head->tmd = tmd;
1299 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
1300 +       atomic_inc(&tmd->head->stats);
1301 +
1302 +       eip_start_queue();
1303 +
1304 +       EIP_DBG_PRINTF(EIP_DBG_TMD_QUEUE, "TMD [%p] : REQUEUED\n", tmd);
1305 +}
1306 +static inline void eip_tmd_load(EIP_TMD * tmd)
1307 +{
1308 +       EP_RAILMASK rmask = tx_railmask;
1309 +
1310 +       __eip_tmd_load(tmd, &rmask);
1311 +       
1312 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : LOADED\n", tmd);
1313 +}
1314 +static inline void eip_tmd_unload(EIP_TMD * tmd)
1315 +{
1316 +       __eip_tmd_unload(tmd);
1317 +       
1318 +       EIP_DBG_PRINTF(EIP_DBG_EP_DVMA, "TMD [%p] : UNLOADED\n", tmd);
1319 +}
1320 +static inline void eip_tmd_free(EIP_TMD * tmd)
1321 +{
1322 +       eip_buff_free(tmd->dma_base, tmd->nmd.nmd_len);
1323 +       
1324 +       EIP_DBG_PRINTF(EIP_DBG_MEMFREE, "TMD [%p] : FREED\n", tmd);
1325 +       
1326 +       EIP_STAT_ALLOC_SUB(&tmd->head->stats, 1);
1327 +}
1328 +
1329 +/* tmd on a separate block */
1330 +static inline EIP_TMD *eip_tmd_alloc_queue(EIP_TMD * tmd, EIP_TMD_HEAD * head, int dvma_idx)
1331 +{
1332 +       eip_tmd_init(tmd, 0, head, -1, dvma_idx);
1333 +
1334 +       eip_tmd_put(tmd);
1335 +
1336 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1337 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1338 +       return tmd;
1339 +}
1340 +/* tmd on the buffer */
1341 +static inline EIP_TMD *eip_tmd_alloc_queue_copybreak(EIP_TMD_HEAD * head, int dvma_idx)
1342 +{
1343 +       EIP_TMD *tmd;
1344 +       unsigned long buff_base;
1345 +
1346 +       if (!(buff_base = eip_buff_alloc(tx_copybreak_max + sizeof(EIP_TMD), GFP_KERNEL)))
1347 +               return NULL;
1348 +
1349 +       tmd = (EIP_TMD *) (buff_base + tx_copybreak_max);
1350 +       eip_tmd_init(tmd, buff_base, head, tx_copybreak_max, dvma_idx);
1351 +
1352 +       eip_tmd_put(tmd);
1353 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1354 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1355 +       return tmd;
1356 +}
1357 +
1358 +/* ipf are on the buffer */
1359 +static inline EIP_TMD *eip_tmd_alloc_queue_aggreg(EIP_TMD_HEAD * head, int dvma_idx)
1360 +{
1361 +       EIP_TMD *tmd;
1362 +       unsigned long buff_base;
1363 +
1364 +       if (!(buff_base = eip_buff_alloc(EIP_SVC_BIGGEST_LEN, GFP_KERNEL)))
1365 +               return NULL;
1366 +
1367 +       tmd = (EIP_TMD *) (buff_base + EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG));
1368 +       eip_tmd_init(tmd, buff_base, head, EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG), dvma_idx);
1369 +
1370 +       eip_tmd_put(tmd);
1371 +       EIP_STAT_ALLOC_ADD(&tmd->head->stats, 1);
1372 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1373 +       return tmd;
1374 +}
1375 +
1376 +static int eip_tmds_alloc()
1377 +{
1378 +       int i;
1379 +       int page_nr;
1380 +       EIP_TMD *tmd;
1381 +
1382 +       page_nr = EIP_DVMA_PAGES(tx_copybreak_max);
1383 +
1384 +       eip_tx->head[EIP_TMD_COPYBREAK].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1385 +       
1386 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_COPYBREAK]);
1387 +
1388 +       for (i = 0; i < EIP_TMD_NR; i++) {
1389 +               if (!eip_tmd_alloc_queue_copybreak(&eip_tx->head[EIP_TMD_COPYBREAK], i * page_nr))
1390 +                       return -ENOMEM;
1391 +       }
1392 +
1393 +       eip_tx->head[EIP_TMD_STD].handle =
1394 +           eip_dma_reserve(EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN) * eip_tx->tmd_max_nr, EP_PERM_READ);
1395 +       
1396 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_STD]);
1397 +
1398 +       tmd = kmalloc(sizeof(EIP_TMD) * EIP_TMD_NR, GFP_KERNEL);
1399 +       if (!tmd) {
1400 +               EIP_ERR_PRINTF("Cannot ALLOCATE %d of tmds\n", (int) sizeof(EIP_TMD) * EIP_TMD_NR);
1401 +               return -ENOMEM;
1402 +       }
1403 +       
1404 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1405 +       
1406 +       for (i = 0; i < EIP_TMD_NR; i++, tmd++) {
1407 +               if (!eip_tmd_alloc_queue(tmd, &eip_tx->head[EIP_TMD_STD], i * page_nr))
1408 +                       return -ENOMEM;
1409 +       }
1410 +
1411 +       page_nr = EIP_DVMA_PAGES(EIP_SVC_BIGGEST_LEN);
1412 +
1413 +       eip_tx->head[EIP_TMD_AGGREG].handle = eip_dma_reserve(page_nr * eip_tx->tmd_max_nr, EP_PERM_READ);
1414 +       EIP_DBG(EIP_DBG_TMD_HEAD, eip_tmd_head_display, &eip_tx->head[EIP_TMD_AGGREG]);
1415 +
1416 +       for (i = 0; i < EIP_TMD_NR; i++) {
1417 +               if (!eip_tmd_alloc_queue_aggreg(&eip_tx->head[EIP_TMD_AGGREG], i * page_nr))
1418 +                       return -ENOMEM;
1419 +       }
1420 +       return 0;
1421 +}
1422 +
1423 +static void eip_tmds_free(void) 
1424 +{
1425 +       EIP_TMD *tmd;
1426 +       EIP_TMD *tmd_next;
1427 +       int i;
1428 +       
1429 +       ep_poll_transmits(eip_tx->xmtr);
1430 +
1431 +       for (i = 0 ; i < 3 ; i++) {
1432 +again:
1433 +               if (EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats) < EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats)) {
1434 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "Polling XMTR [%p]\n", eip_tx->xmtr);       
1435 +                       ep_poll_transmits(eip_tx->xmtr);
1436 +                       goto again;
1437 +               }
1438 +       }
1439 +       /* everything should be queued */
1440 +        if ((tmd = eip_tx->head[EIP_TMD_COPYBREAK].tmd)) {
1441 +            do {
1442 +                       tmd_next = tmd->chain.next;
1443 +                        eip_tmd_unload(tmd);
1444 +                       
1445 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1446 +                       
1447 +                        eip_tmd_free(tmd);
1448 +            } while (tmd_next && (tmd = tmd_next));
1449 +        }
1450 +       
1451 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_COPYBREAK] release DVMA [%p]\n",
1452 +                       eip_tx->head[EIP_TMD_COPYBREAK].handle);        
1453 +       
1454 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_COPYBREAK].handle);
1455 +       
1456 +       /* these ones have been allocated as a block */
1457 +       if ((tmd = eip_tx->head[EIP_TMD_STD].tmd)) {
1458 +               do {
1459 +                       if (tmd->dvma_idx == 0 ) {
1460 +                               kfree(tmd);
1461 +                               /* eip_tmd_free(tmd); */
1462 +                               EIP_STAT_ALLOC_SUB(&tmd->head->stats, EIP_TMD_NR);
1463 +                               tmd_next = NULL;
1464 +                               EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_STD] BLOCK FREED\n", tmd); 
1465 +                       } else 
1466 +                               tmd_next = tmd->chain.next;
1467 +               } while (tmd_next && (tmd = tmd_next));
1468 +       }
1469 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "HEAD[EIP_TMD_STD] release DVMA [%p]\n", 
1470 +                       eip_tx->head[EIP_TMD_STD].handle);      
1471 +       
1472 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_STD].handle);
1473 +       
1474 +       if ((tmd = eip_tx->head[EIP_TMD_AGGREG].tmd)) {
1475 +               do {
1476 +                       tmd_next = tmd->chain.next;
1477 +
1478 +                       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1479 +                       
1480 +                       eip_tmd_unload(tmd);
1481 +                       eip_tmd_free(tmd);
1482 +               } while (tmd_next && (tmd = tmd_next));
1483 +       }
1484 +       EIP_DBG_PRINTF(EIP_DBG_TMD_EP_DVMA, "TMD HEAD[%p] : [EIP_TMD_AGGREG] release DVMA\n", 
1485 +                       eip_tx->head[EIP_TMD_AGGREG].handle);   
1486 +       
1487 +        ep_dvma_release(eip_tx->ep_system, eip_tx->head[EIP_TMD_AGGREG].handle);
1488 +
1489 +       ep_free_xmtr(eip_tx->xmtr);
1490 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "XMTR[%p] : FREED\n", eip_tx->xmtr);
1491 +}
1492 +
1493 +static inline void eip_ipf_skb_add(EIP_IPFRAG * ipf, struct sk_buff *skb)
1494 +{
1495 +       int align = EIP_IP_ALIGN(skb->len);
1496 +       
1497 +       
1498 +       if (ipf->dma_len == -1) {       /* like a virgin; touched for the very first time */
1499 +               do_gettimeofday(&ipf->timestamp);
1500 +               /* FIXE ME put that in release tmd code */
1501 +               ipf->frag_nr            = 0;
1502 +               ipf->dma_len            = 0;
1503 +               ipf->datagram_len       = -1;
1504 +               ipf->dma_correction     = 0;
1505 +       }
1506 +       
1507 +       memcpy((void *) (ipf->dma_base + ipf->dma_len), skb->data, skb->len);
1508 +       
1509 +       if (ipf->datagram_len == -1) {
1510 +               struct iphdr * iph = skb->nh.iph;
1511 +               int offset = ntohs(iph->frag_off);
1512 +
1513 +               /* last one ? ;  offset & ~IP_OFFSET = IP fragment flags */
1514 +               if (((offset & ~IP_OFFSET) & IP_MF) == 0) {
1515 +                       offset &= IP_OFFSET;
1516 +                       offset <<= 3;    
1517 +                       ipf->datagram_len = offset + htons(iph->tot_len) - sizeof(struct iphdr);
1518 +               }
1519 +       }
1520 +
1521 +       skb->next                       = ipf->skb;
1522 +       ipf->skb                        = skb;
1523 +       ipf->payload.Data[ipf->frag_nr] = skb->len;
1524 +       ipf->dma_len                   += align;
1525 +       ipf->dma_correction            += align - skb->len  + ETH_HLEN + sizeof(struct iphdr);
1526 +       /* FIXME ; Count got wrong if ip header has options */
1527 +
1528 +       ipf->frag_nr++;
1529 +
1530 +       EIP_DBG2(EIP_DBG_TMD, eip_ipf_display, ipf, "ADDED skb[%p] len %db ALIGNED(%db)\n", skb, skb->len, EIP_IP_ALIGN(skb->len));
1531 +}
1532 +
1533 +#define eip_ipf_hasroom(ipf, skb) ((ipf->dma_len + EIP_IP_ALIGN(skb->len) < eip_tx->sysctl_ipfrag_copybreak))
1534 +int eip_hard_start_xmit(struct sk_buff *skb, struct net_device *devnet) 
1535 +{
1536 +
1537 +       EIP_TMD *tmd;
1538 +       EP_NMD nmd;
1539 +       struct iphdr *iph;
1540 +       int j;
1541 +
1542 +       if (skb->destructor){
1543 +               atomic_inc(&eip_tx->destructor);
1544 +               tasklet_schedule(&eip_tx->tasklet);
1545 +       } 
1546 +
1547 +       if (!(iph = eip_ipfrag_get(skb->data)) || (eip_tx->sysctl_aggregation == 0)) { /* not ip fragment */
1548 +no_aggreg:
1549 +               j = (skb->len < eip_tx->sysctl_copybreak) ? EIP_TMD_COPYBREAK : EIP_TMD_STD; /* j = head id */
1550 +               
1551 +               if (!(tmd = eip_tmd_get(j))) {
1552 +                       if (skb->destructor)
1553 +                               atomic_dec(&eip_tx->destructor);
1554 +                       return 1;
1555 +               }
1556 +               
1557 +               tmd->dma_len    = skb->len;
1558 +               tmd->skb        = skb;
1559 +               tmd->skb->next  = NULL;
1560 +               tmd->chain.next = NULL;
1561 +               
1562 +               if (j == EIP_TMD_COPYBREAK) {
1563 +                       memcpy((void *) tmd->dma_base, skb->data, skb->len);
1564 +                       
1565 +                       ep_nmd_subset(&nmd, &tmd->nmd, 0, skb->len);
1566 +#ifdef EIP_MORE_STATS
1567 +                       eip_tx->sent_copybreak++;
1568 +#endif
1569 +                       return eip_do_xmit(tmd, &nmd, NULL);
1570 +               }
1571 +               tmd->dma_base           = (unsigned long) skb->data;
1572 +               tmd->nmd.nmd_len        = skb->len;
1573 +               eip_tmd_load(tmd);
1574 +
1575 +#ifdef EIP_MORE_STATS
1576 +               eip_tx->sent_std++;
1577 +#endif
1578 +               return eip_do_xmit(tmd, &tmd->nmd, NULL);
1579 +       } else if ( skb->len > EIP_SVC_BIGGEST_LEN/2 ) { 
1580 +               /* don't aggregate when we have a full mtu of data */
1581 +               /* or more than 32k ; in this case it is cheaper   */
1582 +               /* to just map the buffer and send it              */
1583 +               goto no_aggreg;
1584 +       } else {
1585 +               EIP_IPFRAG *ipf = NULL;
1586 +               unsigned long flags;
1587 +               struct list_head *l;
1588 +               struct iphdr *iph2;
1589 +               int i;
1590 +               __u16 id = iph->id;
1591 +               __u32 saddr = iph->saddr;
1592 +               __u32 daddr = iph->daddr;
1593 +               __u8 protocol = iph->protocol;
1594 +
1595 +                       EIP_DBG(EIP_DBG_IPH, eip_iph_display, iph);
1596 +
1597 +               j = 0;
1598 +
1599 +               /* here we can't have full mtu size aggregated packet */
1600 +               EIP_ASSERT_RET(skb->len < eip_tx->sysctl_ipfrag_copybreak, 0);
1601 +
1602 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1603 +               list_for_each(l, &eip_tx->ipfrag) {
1604 +                       ipf = list_entry(l, EIP_IPFRAG, list);
1605 +                       iph2 = eip_ipfrag_get((char *) ipf->dma_base);
1606 +                       
1607 +                        EIP_ASSERT(iph2);
1608 +                       
1609 +                       if ((iph2->id == id) && 
1610 +                                       (get_unaligned(&iph2->saddr) == saddr) && 
1611 +                                       (get_unaligned(&iph2->daddr) == daddr) && 
1612 +                                       (iph2->protocol == protocol)) {
1613 +                               /* || timeout */
1614 +                               if (eip_ipf_hasroom(ipf, skb)) {
1615 +                                       
1616 +                                       eip_ipf_skb_add(ipf, skb);
1617 +                                       
1618 +                                       if ((ipf->datagram_len != -1) && 
1619 +                                                       (ipf->dma_len == (ipf->datagram_len + ipf->dma_correction) || 
1620 +                                                        ipf->frag_nr == (128 / sizeof(uint32_t)))) {
1621 +send_aggreg:
1622 +                                               ipf->payload.Data[ipf->frag_nr] = 0;
1623 +                                               list_del(&ipf->list);
1624 +                                               eip_tx->ipfrag_count--;
1625 +                                               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1626 +                                       
1627 +                                               ep_nmd_subset(&nmd, &ipf->nmd, 0, ipf->dma_len);
1628 +                                               
1629 +#ifdef EIP_MORE_STATS
1630 +                                               eip_tx->sent_aggreg++;
1631 +#endif
1632 +                                               if ((i = eip_do_xmit((EIP_TMD *) ipf, &nmd, &ipf->payload)) != EP_SUCCESS)
1633 +                                                       return i;
1634 +                                               if (j)
1635 +                                                       goto new;
1636 +                                               return 0;
1637 +                                       }
1638 +                                       
1639 +                                       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1640 +                                       tasklet_schedule(&eip_tx->tasklet);
1641 +                                       return 0;
1642 +                               } else {
1643 +                                       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF[%p] : FULL %db full - sending it\n", ipf, ipf->dma_len);
1644 +                                       j = 1;
1645 +                                       goto send_aggreg;
1646 +                               }
1647 +                       }
1648 +               }
1649 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1650 +new:
1651 +               if (!(ipf = (EIP_IPFRAG *) eip_tmd_get(EIP_TMD_AGGREG)))
1652 +                       goto no_aggreg;
1653 +
1654 +               eip_ipf_skb_add(ipf, skb);
1655 +               
1656 +               spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1657 +               list_add_tail(&ipf->list, &eip_tx->ipfrag);
1658 +               eip_tx->ipfrag_count++;
1659 +               spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1660 +               tasklet_schedule(&eip_tx->tasklet);
1661 +       }
1662 +       return 0;
1663 +}
1664 +static int eip_do_xmit(EIP_TMD * tmd, EP_NMD *nmd, EP_PAYLOAD *payload)
1665 +{
1666 +       EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
1667 +       int         attr = EP_SET_DATA((EP_NO_SLEEP | EP_NO_INTERRUPT | EP_NO_FAILOVER), EP_TYPE_SVC_INDICATOR, EP_SVC_EIP);
1668 +       unsigned long flags;
1669 +       int svc, rnum;
1670 +
1671 +       SIZE_TO_SVC(nmd->nmd_len, svc);
1672 +
1673 +       EIP_DBG(EIP_DBG_TMD, eip_tmd_display, tmd);
1674 +       /* EIP_DBG(eip_eiph_display(eiph)); */
1675 +       
1676 +       if (unlikely (eiph->h_dhost.ip_bcast))
1677 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_availrails(eip_tx->xmtr));
1678 +       else
1679 +               rnum = ep_pickRail (EP_NMD_RAILMASK (nmd) & tx_railmask & ep_xmtr_noderails(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr)));
1680 +
1681 +       if (rnum >= 0)
1682 +               attr = EP_SET_PREFRAIL(attr, rnum);
1683 +
1684 +       /* add to inuse list  */
1685 +       spin_lock_irqsave (&eip_tx->lock, flags);
1686 +       list_add_tail (&tmd->chain.link, &eip_tx->inuse);
1687 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1688 +
1689 +       /* ENOMEM EINVAL ECONNREFUSED ESUCCESS */
1690 +       svc = (unlikely(eiph->h_dhost.ip_bcast)) ? 
1691 +               ep_multicast_message(eip_tx->xmtr, -1, -1, NULL, EIP_SVC_EP(svc), attr | EP_NOT_MYSELF, eip_txhandler, tmd, payload, nmd, 1) :
1692 +
1693 +               ep_transmit_message(eip_tx->xmtr, ntohs(eiph->h_dhost.ip_addr), EIP_SVC_EP(svc),  attr, eip_txhandler, tmd, payload, nmd, 1);
1694 +               
1695 +       if (likely(svc == EP_SUCCESS))
1696 +               return 0;
1697 +       else if (svc == ENOMEM) {
1698 +               EIP_ERR_PRINTF("%s", "Memory allocation error ...\n");
1699 +               eip_tx->errors++;
1700 +       }
1701 +       else
1702 +       {
1703 +               /* EP_EINVAL occurs when the svc has a bad value or the iovec has too many frag; */
1704 +               /* we don't use the latter option here                                        */
1705 +               __EIP_DBG_PRINTF("TMD [%p] : DROPPED skb[%p] status = %d from ep_?_message\n", tmd, tmd->skb, svc);
1706 +
1707 +               eip_tx->dropped++;
1708 +       }
1709 +
1710 +       eip_txhandler(NULL, tmd, -99);
1711 +
1712 +       /* Quadrics GNAT sw-elan/4397 - since we will "never" be able to send this packet to the */
1713 +       /* destination node, we drop it and feign success - this has the same behaviour as an    */
1714 +       /* ethernet where it sticks the packet on the wire, but no-one receives it.              */
1715 +       return 0;
1716 +}
1717 +
1718 +static void eip_txhandler(EP_TXD * txd, void *arg, EP_STATUS status)
1719 +{
1720 +       EIP_TMD *tmd = (EIP_TMD *) arg;
1721 +       struct sk_buff *skb_next;
1722 +       unsigned long flags;
1723 +       int svc = 0;
1724 +       
1725 +       if (likely(status == EP_SUCCESS)) {
1726 +               SIZE_TO_SVC(tmd->dma_len, svc);
1727 +               eip_tx->dma[svc]++;
1728 +               eip_tx->bytes += tmd->dma_len;
1729 +               
1730 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1731 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1732 +                       eip_tx->packets += ipf->frag_nr;
1733 +               } else
1734 +                       eip_tx->packets++;
1735 +       } else {
1736 +               if (tmd->head == &eip_tx->head[EIP_TMD_AGGREG]) {
1737 +                       EIP_IPFRAG *ipf = (EIP_IPFRAG *) tmd;
1738 +                       eip_tx->dropped += ipf->frag_nr;
1739 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler aggreg packet dropped status = %d\n", status);
1740 +               } else  {
1741 +                       eip_tx->dropped++;
1742 +                       EIP_DBG_PRINTF(EIP_DBG_TMD, "txhandler packet dropped status = %d\n", status);
1743 +               }
1744 +       }
1745 +
1746 +       if (tmd->head == &eip_tx->head[EIP_TMD_STD]) {
1747 +               eip_tmd_unload(tmd);
1748 +               tmd->dma_base = 0;
1749 +               tmd->nmd.nmd_len = -1;
1750 +       }
1751 +               
1752 +       tmd->dma_len = -1;
1753 +       
1754 +       svc = 0;
1755 +       while (tmd->skb) {
1756 +               svc++;
1757 +               
1758 +               if (tmd->skb->destructor)
1759 +                       atomic_dec(&eip_tx->destructor);
1760 +
1761 +               skb_next = tmd->skb->next;
1762 +               dev_kfree_skb_any(tmd->skb);
1763 +               tmd->skb = skb_next;
1764 +       }
1765 +       EIP_DBG_PRINTF(EIP_DBG_TMD, "IPF/TMD [%p] : %d skb RELEASE/FREED\n", tmd, svc);
1766 +
1767 +       /* remove from inuse list  */
1768 +       spin_lock_irqsave (&eip_tx->lock, flags);
1769 +       list_del (&tmd->chain.link);
1770 +       spin_unlock_irqrestore (&eip_tx->lock, flags);
1771 +
1772 +       eip_tmd_put(tmd);
1773 +}
1774 +
1775 +static void eip_tx_tasklet(unsigned long arg)
1776 +{
1777 +       struct timeval now;
1778 +       unsigned long flags;
1779 +       EIP_IPFRAG *ipf, *ipfq = NULL;
1780 +       EP_NMD nmd;
1781 +       struct list_head *list;
1782 +       struct list_head *tmp;
1783 +       char resched = 0;
1784 +       char poll = 1;
1785 +       
1786 +       do_gettimeofday(&now);
1787 +       
1788 +       spin_lock_irqsave(&eip_tx->ipfraglock, flags);
1789 +       if (eip_tx->ipfrag_count) {
1790 +               list_for_each_safe(list, tmp, &eip_tx->ipfrag) {
1791 +                       ipf = list_entry(list, EIP_IPFRAG, list);
1792 +                       /* delta = (((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - ipf->timestamp.tv_usec; */
1793 +                       if (((((now.tv_sec - ipf->timestamp.tv_sec) * 1000000UL) + now.tv_usec) - 
1794 +                                       ipf->timestamp.tv_usec) >= (1000UL * eip_tx->sysctl_ipfrag_to)) {
1795 +                               list_del(&ipf->list);
1796 +                               eip_tx->ipfrag_count--;
1797 +                               ipf->chain.next = (EIP_TMD *) ipfq;
1798 +                               ipfq = ipf;
1799 +                       }
1800 +               }
1801 +       }
1802 +       if (eip_tx->ipfrag_count)
1803 +               resched = 1;
1804 +       spin_unlock_irqrestore(&eip_tx->ipfraglock, flags);
1805 +
1806 +       while (ipfq) {
1807 +               poll = 0;
1808 +
1809 +               ep_nmd_subset(&nmd, &ipfq->nmd, 0, ipfq->dma_len);
1810 +               
1811 +               ipfq->payload.Data[ipfq->frag_nr] = 0;
1812 +               
1813 +#ifdef EIP_MORE_STATS
1814 +               eip_tx->sent_aggreg++;
1815 +#endif
1816 +               ipf = (EIP_IPFRAG *) ipfq->chain.next;
1817 +               eip_do_xmit((EIP_TMD *) ipfq, &nmd, &ipfq->payload);
1818 +               ipfq = ipf;
1819 +       }
1820 +       
1821 +       if (poll)
1822 +               ep_poll_transmits(eip_tx->xmtr);
1823 +
1824 +       if (atomic_read(&eip_tx->destructor) || resched )
1825 +               tasklet_schedule(&eip_tx->tasklet);
1826 +}
1827 +void eip_start_queue()
1828 +{
1829 +       if (netif_queue_stopped(eip_tx->net_device)) {
1830 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Waking up %s queue\n", eip_tx->net_device->name);
1831 +               netif_wake_queue(eip_tx->net_device);
1832 +       }
1833 +}
1834 +void eip_stop_queue()
1835 +{
1836 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Stopping %s queue\n", eip_tx->net_device->name);
1837 +       netif_stop_queue(eip_tx->net_device);
1838 +}
1839 +
1840 +static int eip_open(struct net_device *devnet)
1841 +{
1842 +       if (devnet->flags & IFF_PROMISC)
1843 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s entering in promiscuous mode\n", devnet->name);
1844 +
1845 +       netif_start_queue(devnet);
1846 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x up\n",
1847 +                       devnet->name, (devnet->dev_addr[0]) & 0xff,
1848 +                       (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1849 +                       (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1850 +       return 0;
1851 +}
1852 +
1853 +static int eip_close(struct net_device *devnet)
1854 +{
1855 +       if (devnet->flags & IFF_PROMISC)
1856 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "%s leaving promiscuous mode\n", devnet->name);
1857 +
1858 +       netif_stop_queue(devnet);
1859 +
1860 +       eip_rx_tasklet(0);
1861 +
1862 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x down\n", 
1863 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
1864 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
1865 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
1866 +       return 0;
1867 +}
1868 +
1869 +static struct net_device_stats *eip_get_stats(struct net_device *devnet)
1870 +{
1871 +       static struct net_device_stats stats;
1872 +
1873 +       stats.rx_packets = eip_rx->packets;
1874 +       stats.rx_bytes = eip_rx->bytes;
1875 +       stats.rx_errors = eip_rx->errors;
1876 +       stats.rx_dropped = eip_rx->dropped;
1877 +
1878 +       stats.tx_packets = eip_tx->packets;
1879 +       stats.tx_bytes = eip_tx->bytes;
1880 +       stats.tx_errors = eip_tx->errors;
1881 +       stats.tx_dropped = eip_tx->dropped;
1882 +       return &stats;
1883 +}
1884 +
1885 +static int eip_change_mtu(struct net_device *devnet, int mtu)
1886 +{
1887 +       if (mtu <= EIP_MTU_MAX) {
1888 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "MTU size changed from %d to %d\n", devnet->mtu, mtu);
1889 +               devnet->mtu = mtu;
1890 +       }
1891 +       return 0;
1892 +}
1893 +
1894 +#ifdef MODULE
1895 +int eip_init(void)
1896 +{
1897 +       struct net_device *devnet;
1898 +       int errno = 0;
1899 +
1900 +       eip_rx_dropping = 0; 
1901 +       eip_rx_tasklet_locked = 1;
1902 +
1903 +       /* timer up but not started */
1904 +       init_timer (&eip_rx_tasklet_timer);
1905 +       eip_rx_tasklet_timer.function = eip_rx_tasklet_resched;
1906 +       eip_rx_tasklet_timer.data     = (unsigned long) 0;
1907 +       eip_rx_tasklet_timer.expires  = lbolt + hz;
1908 +
1909 +       devnet = alloc_etherdev(sizeof(EIP_RX) + sizeof(EIP_TX));
1910 +       if (!devnet) {
1911 +               EIP_ERR_PRINTF("Unable to ALLOCATE etherdev structure\n");
1912 +               return -ENOMEM;
1913 +       }
1914 +       strcpy (devnet->name, "eip0");
1915 +
1916 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "Enabling aggregation code\n");
1917 +       devnet->change_mtu = eip_change_mtu;
1918 +       devnet->mtu = EIP_MTU_MAX;
1919 +       devnet->open = eip_open;
1920 +       devnet->stop = eip_close;
1921 +       devnet->hard_start_xmit = eip_hard_start_xmit;
1922 +       devnet->get_stats = eip_get_stats;
1923 +
1924 +        /* devnet->features |= (NETIF_F_DYNALLOC); */
1925 +        /* devnet->features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA); */
1926 +        /* devnet->features |= (NETIF_F_SG|NETIF_F_FRAGLIST|NETIF_F_HIGHDMA|NETIF_F_HW_CSUM); */
1927 +
1928 +       eip_rx = (EIP_RX *) devnet->priv;
1929 +       eip_tx = (EIP_TX *) (eip_rx + 1);
1930 +
1931 +       /* instance 0 */
1932 +       eip_tx->ep_system = ep_system();
1933 +       if (eip_tx->ep_system == NULL) {
1934 +               EIP_ERR_PRINTF("kernel comms for iface %s does not exist\n", devnet->name);
1935 +               errno = -ENXIO;
1936 +               goto out;
1937 +       }
1938 +       if (ep_waitfor_nodeid(eip_tx->ep_system) == ELAN_INVALID_NODE) {
1939 +               EIP_ERR_PRINTF("network position not found\n");
1940 +               errno = -EAGAIN;
1941 +               goto out;
1942 +       }
1943 +       eip_tx->xmtr = ep_alloc_xmtr(eip_tx->ep_system);
1944 +       if (!eip_tx->xmtr) {
1945 +               EIP_ERR_PRINTF("Cannot create allocated transmitter - maybe cable is disconnected\n");
1946 +               errno = -EAGAIN;
1947 +               goto out;
1948 +       }
1949 +       /* assign MAC address */
1950 +       *((int *) &devnet->dev_addr[4]) = htons(ep_nodeid(eip_tx->ep_system));
1951 +       eip_rx->net_device = devnet;
1952 +       eip_tx->net_device = devnet;
1953 +
1954 +       atomic_set(&eip_tx->destructor, 0);
1955 +
1956 +       if ((tmd_max >= EIP_TMD_MIN_NR) && (tmd_max <= EIP_TMD_MAX_NR)) {
1957 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tmd_max_nr to %d\n", tmd_max);
1958 +               eip_tx->tmd_max_nr = tmd_max;
1959 +       } else {
1960 +               EIP_ERR_PRINTF("parameter error : %d <= tmd_max(%d) <= %d using default %d\n", 
1961 +                               EIP_TMD_MIN_NR, tmd_max, EIP_TMD_MAX_NR, EIP_TMD_MAX_NR);
1962 +               eip_tx->tmd_max_nr = EIP_TMD_MAX_NR;
1963 +       }
1964 +
1965 +       if ((rmd_max >= EIP_RMD_MIN_NR) && (rmd_max <= EIP_RMD_MAX_NR)) {
1966 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting rmd_max_nr to %d\n", rmd_max);
1967 +               eip_rx->rmd_max_nr = rmd_max;
1968 +       } else {
1969 +               EIP_ERR_PRINTF("parameter error : %d <= rmd_max(%d) <= %d using default %d\n", EIP_RMD_MIN_NR,
1970 +                          rmd_max, EIP_RMD_MAX_NR, EIP_RMD_MAX_NR);
1971 +               eip_rx->rmd_max_nr = EIP_RMD_MAX_NR;
1972 +       }
1973 +
1974 +       if ((rx_envelope_nr > 0) && (rx_envelope_nr <= 1024)) { /* > 1024 don't be silly */
1975 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting rx_envelope_nr to %d\n", rx_envelope_nr);
1976 +       } else {
1977 +               EIP_ERR_PRINTF("parameter error : 0 < rx_envelope_nr(%d) <= 1024 using default %d\n",
1978 +                          rx_envelope_nr, EIP_RX_ENVELOPE_NR);
1979 +               rx_envelope_nr = EIP_RX_ENVELOPE_NR;
1980 +       }
1981 +
1982 +       if (tx_copybreak_max <= EIP_TX_COPYBREAK_MAX) {
1983 +               EIP_DBG_PRINTF(EIP_DBG_GEN, "Setting tx_copybreak_max to %d\n", tx_copybreak_max);
1984 +       } else {
1985 +               EIP_ERR_PRINTF("parameter error : tx_copybreak_max > %d using default %d\n",
1986 +                          EIP_TX_COPYBREAK_MAX, EIP_TX_COPYBREAK_MAX);
1987 +               tx_copybreak_max = EIP_TX_COPYBREAK_MAX;
1988 +       }
1989 +#ifdef EIP_MORE_STATS
1990 +       eip_tx->sent_copybreak = 0;
1991 +       eip_tx->sent_std = 0;
1992 +       eip_tx->sent_aggreg = 0;
1993 +#endif
1994 +
1995 +       eip_tx->ipfrag_count = 0;
1996 +       eip_aggregation_set(1);
1997 +       eip_rx_granularity_set(rx_granularity);
1998 +       eip_tx_copybreak_set(EIP_TX_COPYBREAK);
1999 +       eip_ipfrag_to_set(EIP_IPFRAG_TO);
2000 +       eip_ipfrag_copybreak_set(EIP_IPFRAG_COPYBREAK);
2001 +
2002 +       spin_lock_init(&eip_tx->lock);
2003 +       spin_lock_init(&eip_tx->ipfraglock);
2004 +       spin_lock_init(&eip_rx->lock);
2005 +       tasklet_init(&eip_rx->tasklet, eip_rx_tasklet, 0);
2006 +       tasklet_init(&eip_tx->tasklet, eip_tx_tasklet, 0);
2007 +       INIT_LIST_HEAD(&eip_tx->ipfrag);
2008 +       INIT_LIST_HEAD(&eip_tx->inuse);
2009 +
2010 +       /* if we fail here cannot do much yet; waiting for rcvr remove code in ep. */
2011 +       errno = eip_tmds_alloc();
2012 +       if (errno)
2013 +               goto out;
2014 +
2015 +       errno = eip_rmds_alloc();
2016 +       if (errno)
2017 +               goto out;
2018 +
2019 +       errno = eip_stats_init();
2020 +       if (errno)
2021 +               goto out;
2022 +
2023 +       if (ep_svc_indicator_set(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
2024 +               EIP_ERR_PRINTF("Cannot set the service indicator\n");
2025 +               errno = -EINVAL;
2026 +               goto out;
2027 +       }
2028 +
2029 +       eip_rx_tasklet_locked = 0;
2030 +       tasklet_schedule(&eip_rx->tasklet);
2031 +
2032 +       SET_MODULE_OWNER(eip_tx->net_device);
2033 +
2034 +       if (register_netdev(devnet)) {
2035 +               printk("eip: failed to register netdev\n");
2036 +               goto out;
2037 +       }
2038 +
2039 +       EIP_DBG_PRINTK(EIP_DBG_GEN, "iface %s MAC %02x:%02x:%02x:%02x:%02x:%02x ready\n", 
2040 +               devnet->name, (devnet->dev_addr[0]) & 0xff,
2041 +               (devnet->dev_addr[1]) & 0xff, (devnet->dev_addr[2]) & 0xff, (devnet->dev_addr[3]) & 0xff,
2042 +               (devnet->dev_addr[4]) & 0xff, (devnet->dev_addr[5]) & 0xff);
2043 +
2044 +       return 0;
2045 +      out:
2046 +       unregister_netdev(devnet);
2047 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 25)
2048 +       kfree(devnet);
2049 +#else
2050 +       free_netdev(devnet);
2051 +#endif
2052 +
2053 +       return errno;
2054 +}
2055 +void eip_exit(void)
2056 +{
2057 +       int i;
2058 +
2059 +       eip_rx_dropping = 1;                /* means that new messages wont be sent to tcp stack */
2060 +       eip_rx_tasklet_locked = 1;
2061 +
2062 +       netif_stop_queue(eip_tx->net_device);
2063 +
2064 +       if (ep_svc_indicator_clear(eip_tx->ep_system, EP_SVC_EIP) != EP_SUCCESS) {
2065 +               EIP_ERR_PRINTF("Cannot unset the service indicator\n");
2066 +       }
2067 +
2068 +       schedule_timeout(10);
2069 +       
2070 +       del_timer_sync (&eip_rx_tasklet_timer);
2071 +
2072 +       tasklet_disable(&eip_rx->tasklet);
2073 +       tasklet_disable(&eip_tx->tasklet);
2074 +
2075 +       tasklet_kill(&eip_tx->tasklet);
2076 +       tasklet_kill(&eip_rx->tasklet);
2077 +
2078 +        eip_rmds_free();
2079 +        eip_tmds_free();
2080 +
2081 +       /* that things freed */
2082 +       for (i = 0 ; i < EIP_SVC_NR ; i++) {
2083 +               if ( EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats) != 0 )
2084 +                       EIP_ERR_PRINTF("%d RMDs not FREED on SVC[%d]\n", EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats), i);
2085 +       }
2086 +       for (i = 0 ; i < 3 ; i++) {
2087 +               if ( EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats) != 0 )
2088 +                       EIP_ERR_PRINTF("%d TMDs not freed on TX HEAD[%d]\n", EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats), i);
2089 +               
2090 +       }
2091 +       unregister_netdev(eip_tx->net_device);
2092 +       kfree(eip_tx->net_device);
2093 +       
2094 +       eip_stats_cleanup();
2095 +}
2096 +
2097 +module_init(eip_init);
2098 +module_exit(eip_exit);
2099 +
2100 +module_param(eipdebug, uint, 0);
2101 +MODULE_PARM_DESC(eipdebug, "Set debug flags");
2102 +
2103 +module_param(rx_envelope_nr, uint, 0);
2104 +MODULE_PARM_DESC(rx_enveloppe_nr, "Number of allocated enveloppe on the rx side");
2105 +
2106 +module_param(tx_copybreak_max, uint, 0);
2107 +MODULE_PARM_DESC(tx_copybreak_max, "Maximum size of the tx copybreak limit (default 512)");
2108 +
2109 +module_param(tmd_max, uint, 0);
2110 +module_param(rmd_max, uint, 0);
2111 +MODULE_PARM_DESC(tmd_max, "Maximun number of transmit buffers (default 64)");
2112 +MODULE_PARM_DESC(rmd_max, "Maximun number of receive buffers (default 64)");
2113 +
2114 +module_param(tx_railmask, ushort, 0);
2115 +MODULE_PARM_DESC(tx_railmask, "Mask of which rails transmits can be queued on");
2116 +
2117 +MODULE_AUTHOR("Quadrics Ltd.");
2118 +MODULE_DESCRIPTION("Elan IP driver");
2119 +MODULE_LICENSE("GPL");
2120 +#endif /* MODULE */
2121 +
2122 +/*
2123 + * Local variables:
2124 + * c-file-style: "linux"
2125 + * End:
2126 + */
2127 diff -urN clean/drivers/net/qsnet/eip/eip_linux.h linux-2.6.9/drivers/net/qsnet/eip/eip_linux.h
2128 --- clean/drivers/net/qsnet/eip/eip_linux.h     1969-12-31 19:00:00.000000000 -0500
2129 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_linux.h       2004-10-01 06:49:29.000000000 -0400
2130 @@ -0,0 +1,399 @@
2131 +/*
2132 + *    Copyright (c) 2003 by Quadrics Ltd.
2133 + * 
2134 + *    For licensing information please see the supplied COPYING file
2135 + *
2136 + */
2137 +
2138 +#ident "$Id: eip_linux.h,v 1.47 2004/10/01 10:49:29 mike Exp $"
2139 +
2140 +#ifndef __EIP_LINUX_H
2141 +#define __EIP_LINUX_H
2142 +
2143 +#define EIP_WATERMARK                  (0xfab1e)
2144 +
2145 +#define EIP_PAGES(s)                   (((s - 1) >> PAGE_SHIFT) + 1)
2146 +#define EIP_DVMA_PAGES(s)              ((s < PAGE_SIZE) ? EIP_PAGES(s) + 1 : EIP_PAGES(s))
2147 +
2148 +#define EIP_SVC_SMALLEST_LEN           (1 << 9)        /* 512 */
2149 +#define EIP_SVC_BIGGEST_LEN            (1 << 16)       /* 64k */
2150 +
2151 +#define EIP_SVC_SMALLEST               (0)
2152 +#define EIP_SVC_BIGGEST                        (7)
2153 +
2154 +#define EIP_SVC_NR                     (8)
2155 +#define EIP_SVC_EP(s)                  (s + EP_MSG_SVC_EIP512)
2156 +
2157 +#define EIP_STAT_ALLOC_SHIFT           (8)
2158 +#define EIP_STAT_ALLOC_GET(atomicp)    ((int) atomic_read(atomicp) >> EIP_STAT_ALLOC_SHIFT)
2159 +#define EIP_STAT_ALLOC_ADD(atomicp, v) (atomic_add((v << EIP_STAT_ALLOC_SHIFT), atomicp))
2160 +#define EIP_STAT_ALLOC_SUB(atomicp, v) (atomic_sub((v << EIP_STAT_ALLOC_SHIFT), atomicp))
2161 +
2162 +#define EIP_STAT_QUEUED_MASK           (0xff)
2163 +#define EIP_STAT_QUEUED_GET(atomicp)   ((int) atomic_read(atomicp) & EIP_STAT_QUEUED_MASK)
2164 +
2165 +#define EIP_RMD_NR                     (8)
2166 +#define EIP_RMD_MIN_NR                 (8)
2167 +#define EIP_RMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
2168 +
2169 +#define EIP_RMD_ALLOC_STEP             (8)
2170 +#define EIP_RMD_ALLOC_THRESH           (16)
2171 +
2172 +#define EIP_RMD_ALLOC                  (1)
2173 +#define EIP_RMD_REPLACE                        (0)
2174 +
2175 +#define EIP_TMD_NR                     (64)
2176 +#define EIP_TMD_MIN_NR                 (16)
2177 +#define EIP_TMD_MAX_NR                 (64)    /* should be < than (1 << EIP_STAT_ALLOC_SHIFT) */
2178 +
2179 +#define EIP_TMD_TYPE_NR                        (3)
2180 +#define EIP_TMD_COPYBREAK              (0x0)
2181 +#define EIP_TMD_STD                    (0x1)
2182 +#define EIP_TMD_AGGREG                 (0x2)
2183 +
2184 +#define EIP_TX_COPYBREAK               (512)
2185 +#define EIP_TX_COPYBREAK_MAX           (1024)
2186 +
2187 +#define EIP_IPFRAG_TO                  (50)    /* time out before a frag is sent in msec */
2188 +#define EIP_IPFRAG_COPYBREAK           (EIP_SVC_BIGGEST_LEN - sizeof(EIP_IPFRAG) - EIP_HEADER_PAD)
2189 +
2190 +#define EIP_RX_ENVELOPE_NR             ((EIP_RMD_MAX_NR*EIP_SVC_NR)/2)
2191 +#define EIP_RX_GRANULARITY             (1)
2192 +
2193 +#define EIP_IP_ALIGN(X)                        (((X) + (15)) & ~(15))
2194 +#define EIP_EXTRA                      roundup (sizeof(EIP_RMD), 256)
2195 +#define EIP_RCV_DMA_LEN(s)                     (s - EIP_EXTRA - EIP_HEADER_PAD)
2196 +#define EIP_MTU_MAX                    (EIP_RCV_DMA_LEN(EIP_SVC_BIGGEST_LEN) - (ETH_HLEN))
2197 +
2198 +#define SIZE_TO_SVC(s, svc)                                                                    \
2199 +       do {                                                                                    \
2200 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 9)))  {svc = 0;break;}   \
2201 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 10))) {svc = 1;break;}   \
2202 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 11))) {svc = 2;break;}   \
2203 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 12))) {svc = 3;break;}   \
2204 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 13))) {svc = 4;break;}   \
2205 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 14))) {svc = 5;break;}   \
2206 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 15))) {svc = 6;break;}   \
2207 +                                       if (s <= EIP_RCV_DMA_LEN((1 << 16))) {svc = 7;break;}   \
2208 +                                       svc = -666;                                             \
2209 +                                       EIP_ASSERT(1 == 0);                                     \
2210 +       } while (0)
2211 +
2212 +extern int eipdebug;
2213 +#define EIP_ASSERT_ON 
2214 +/* #define NO_DEBUG */
2215 +
2216 +
2217 +/* ######################## */
2218 +#ifdef NO_DEBUG
2219 +#define __EIP_DBG_PRINTF(fmt, args...)
2220 +#define EIP_DBG_PRINTF(flag, fmt, args...)
2221 +#else
2222 +
2223 +#define EIP_DBG_RMD            0x1
2224 +#define EIP_DBG_TMD            0x2
2225 +#define EIP_DBG_RMD_HEAD       0x4
2226 +#define EIP_DBG_TMD_HEAD       0x8
2227 +#define EIP_DBG_EIPH           0x10
2228 +#define EIP_DBG_IPH            0x20
2229 +#define EIP_DBG_RMD_EP_DVMA    0x40
2230 +#define EIP_DBG_TMD_EP_DVMA    0x80
2231 +#define EIP_DBG_EP_DVMA                (EIP_DBG_RMD_EP_DVMA|EIP_DBG_TMD_EP_DVMA)
2232 +#define EIP_DBG_MEMALLOC       0x100
2233 +#define EIP_DBG_MEMFREE                0x200
2234 +#define EIP_DBG_RMD_QUEUE      0x400
2235 +#define EIP_DBG_TMD_QUEUE      0x800
2236 +#define EIP_DBG_GEN            0x1000
2237 +#define EIP_DBG_DEBUG          0x2000
2238 +       
2239 +#define __EIP_DBG_PRINTF(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUFFER, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
2240 +#define EIP_DBG_PRINTF(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
2241 +
2242 +#define __EIP_DBG_PRINTK(fmt, args...) (qsnet_debugf (QSNET_DEBUG_BUF_CON, " CPU #%d %s: " fmt, smp_processor_id(), __func__, ## args))
2243 +#define EIP_DBG_PRINTK(flag, fmt, args...) (unlikely(eipdebug & flag) ? __EIP_DBG_PRINTF(fmt, ## args):(void)0)
2244 +           
2245 +#define EIP_ERR_PRINTF(fmt, args...)   __EIP_DBG_PRINTK("!!! ERROR !!! - " fmt, ## args)
2246 +
2247 +       
2248 +#define EIP_DBG2(flag, fn, fn_arg, fmt, args...)                                                               \
2249 +    if (unlikely(eipdebug & flag)) {                                                                           \
2250 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
2251 +            (void)(fn)(fn_arg);                                                                                \
2252 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s: " fmt, smp_processor_id(), __func__, ##args);       \
2253 +    }
2254 +
2255 +
2256 +#define EIP_DBG(flag, fn, args...)                                                             \
2257 +    if (unlikely(eipdebug & flag)) {                                                           \
2258 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "+CPU #%d %s\n", smp_processor_id(), __func__);   \
2259 +            (void)(fn)(args);                                                                  \
2260 +           qsnet_debugf (QSNET_DEBUG_BUFFER, "-CPU #%d %s :\n", smp_processor_id(), __func__); \
2261 +    }
2262 +#endif /* NO_DEBUG */
2263 +
2264 +
2265 +#ifdef EIP_ASSERT_ON
2266 +
2267 +#define __EIP_ASSERT_PRINT(exp)                                \
2268 +               eipdebug = 0xffff;                              \
2269 +               EIP_ERR_PRINTF("ASSERT : %s, %s::%d\n",         \
2270 +                      #exp, __BASE_FILE__, __LINE__);          
2271 +
2272 +#define EIP_ASSERT(exp)                                                        \
2273 +               if (!(exp)) {                                           \
2274 +                       __EIP_ASSERT_PRINT(exp);                        \
2275 +                       netif_stop_queue(eip_tx->net_device);           \
2276 +               }
2277 +
2278 +#define EIP_ASSERT2(exp, f, arg)                                       \
2279 +       do {                                                            \
2280 +               if (!(exp)) {                                           \
2281 +                       __EIP_ASSERT_PRINT(exp);                        \
2282 +                       f(arg);                                         \
2283 +               }                                                       \
2284 +       } while (0)
2285 +
2286 +#define EIP_ASSERT_BUG(exp)                                            \
2287 +       do {                                                            \
2288 +               if (!(exp)) {                                           \
2289 +                       __EIP_ASSERT_PRINT(exp);                        \
2290 +                       BUG();                                          \
2291 +               }                                                       \
2292 +       } while (0)
2293 +
2294 +#define EIP_ASSERT_GOTO(exp, label, f, arg)                            \
2295 +       do {                                                            \
2296 +               if (!(exp)) {                                           \
2297 +                       __EIP_ASSERT_PRINT(exp);                        \
2298 +                       f(arg);                                         \
2299 +                       goto label;                                     \
2300 +               }                                                       \
2301 +       } while (0)
2302 +
2303 +#define EIP_ASSERT_RET(exp, ret)                                       \
2304 +       do {                                                            \
2305 +               if (!(exp)) {                                           \
2306 +                       __EIP_ASSERT_PRINT(exp);                        \
2307 +                       return ret;                                     \
2308 +               }                                                       \
2309 +       } while (0)
2310 +
2311 +#define EIP_ASSERT_RETURN(exp, f, arg)                                 \
2312 +       do {                                                            \
2313 +               if (!(exp)) {                                           \
2314 +                       __EIP_ASSERT_PRINT(exp);                        \
2315 +                       f(arg);                                         \
2316 +                       return;                                         \
2317 +               }                                                       \
2318 +       } while (0)
2319 +
2320 +#define EIP_ASSERT_RETNULL(exp, f, arg)                                        \
2321 +       do {                                                            \
2322 +               if (!(exp)) {                                           \
2323 +                       __EIP_ASSERT_PRINT(exp);                        \
2324 +                       f(arg);                                         \
2325 +                       return NULL;                                    \
2326 +               }                                                       \
2327 +       } while (0)
2328 +
2329 +#else
2330 +
2331 +#define EIP_ASSERT(exp)                do {} while(0)
2332 +#define EIP_ASSERT_OUT(exp)            do {} while(0)
2333 +#define EIP_ASSERT_RETURN(exp)                 do {} while(0)
2334 +#define EIP_ASSERT_RETNULL(exp)                do {} while(0)
2335 +#define EIP_ASSERT_BUG(exp)            do {} while(0)
2336 +
2337 +#endif /* EIP_ASSERT */
2338 +
2339 +
2340 +
2341 +typedef struct {
2342 +       u_short ip_bcast;
2343 +       u_short ip_inst;
2344 +       u_short ip_addr;
2345 +} EIP_ADDRESS;
2346 +
2347 +typedef struct {
2348 +       EIP_ADDRESS h_dhost;
2349 +       EIP_ADDRESS h_shost;
2350 +       u_short h_sap;
2351 +} EIP_HEADER;
2352 +#define EIP_HEADER_PAD                 (2)
2353 +
2354 +typedef struct eip_proc_fs {
2355 +       const char *name;
2356 +       struct proc_dir_entry **parent;
2357 +       read_proc_t *read;
2358 +       write_proc_t *write;
2359 +       unsigned char allocated;
2360 +       struct proc_dir_entry *entry;
2361 +} EIP_PROC_FS;
2362 +
2363 +#define EIP_PROC_ROOT_DIR              "eip"
2364 +
2365 +#define EIP_PROC_DEBUG_DIR             "debug"
2366 +#define EIP_PROC_DEBUG_RX_FLUSH                "rx_flush"
2367 +#define EIP_PROC_DEBUG_TX_FLUSH                "tx_flush"
2368 +
2369 +#define EIP_PROC_AGGREG_DIR            "aggregation"
2370 +#define EIP_PROC_AGGREG_ONOFF          "enable"
2371 +#define EIP_PROC_AGGREG_TO             "timeout"
2372 +#define EIP_PROC_AGGREG_COPYBREAK      "copybreak"
2373 +
2374 +#define EIP_PROC_TX_COPYBREAK          "tx_copybreak"
2375 +#define EIP_PROC_STATS                 "stats"
2376 +#define EIP_PROC_RX_GRAN               "rx_granularity"
2377 +#define EIP_PROC_TX_RAILMASK           "tx_railmask"
2378 +#define EIP_PROC_TMD_INUSE             "tmd_inuse"
2379 +#define EIP_PROC_EIPDEBUG              "eipdebug"
2380 +#define EIP_PROC_CHECKSUM               "checksum"
2381 +
2382 +/* RX */
2383 +/* dma_len is used to keep the len of a received packet */
2384 +/* nmd.nmd_len is the max dma that can be received      */
2385 +/*                                                      */
2386 +struct eip_rmd {
2387 +       struct sk_buff *skb;
2388 +
2389 +       EP_NMD nmd;
2390 +       u16 dvma_idx;
2391 +
2392 +       EP_RXD *rxd;
2393 +       struct eip_rmd_head *head;
2394 +       union {
2395 +               struct list_head link;                          /* when on "busy" list */
2396 +               struct eip_rmd  *next;                          /* all other lists */
2397 +       } chain;
2398 +};
2399 +typedef struct eip_rmd EIP_RMD;
2400 +struct eip_rmd_head {
2401 +       EP_NMH *handle;
2402 +
2403 +       EP_RCVR *rcvr;
2404 +       EIP_RMD *busy_list;
2405 +
2406 +       /* stats */
2407 +       atomic_t stats;
2408 +       unsigned long dma;
2409 +};
2410 +
2411 +typedef struct eip_rmd_head EIP_RMD_HEAD;
2412 +typedef struct eip_rx {
2413 +       struct eip_rmd_head head[EIP_SVC_NR];
2414 +
2415 +       EIP_RMD *irq_list;
2416 +       short    irq_list_nr;   
2417 +
2418 +       /* stats */
2419 +       unsigned long packets;
2420 +       unsigned long bytes;
2421 +       unsigned long errors;
2422 +       unsigned long dropped;
2423 +       unsigned long reschedule;
2424 +
2425 +       spinlock_t lock;
2426 +       struct tasklet_struct tasklet;
2427 +       unsigned char rmd_max_nr;
2428 +       unsigned char sysctl_granularity;
2429 +       struct net_device *net_device;
2430 +} EIP_RX;
2431 +
2432 +/* TX */
2433 +/* dma_len_max is the maximum len for a given DMA                      */
2434 +/* where mnd.nmd_len is the len of the packet to send ~> than skb->len */
2435 +typedef struct eip_ipfrag_handle {
2436 +       /* common with tmd */
2437 +       unsigned long dma_base;
2438 +       int dma_len;
2439 +       EP_NMD nmd;
2440 +       u16 dvma_idx;
2441 +
2442 +       struct sk_buff *skb;
2443 +       struct eip_tmd_head *head;
2444 +       union {
2445 +               struct list_head link;                          /* when on "busy" list */
2446 +               struct eip_tmd  *next;                          /* all other lists */
2447 +       } chain;
2448 +
2449 +       /* private */
2450 +       struct list_head list;
2451 +       struct timeval timestamp;
2452 +       unsigned int frag_nr;
2453 +       int datagram_len; /* Ip data */
2454 +       int dma_correction;
2455 +       EP_PAYLOAD payload;
2456 +} EIP_IPFRAG;
2457 +
2458 +struct eip_tmd {
2459 +       unsigned long dma_base;
2460 +       int dma_len;
2461 +       EP_NMD nmd;
2462 +       u16 dvma_idx;
2463 +
2464 +       struct sk_buff *skb;
2465 +       struct eip_tmd_head *head;
2466 +       union {
2467 +               struct list_head link;                          /* when on "busy" list */
2468 +               struct eip_tmd  *next;                          /* all other lists */
2469 +       } chain;
2470 +};
2471 +
2472 +struct eip_tmd_head {
2473 +       EP_NMH *handle;
2474 +
2475 +       struct eip_tmd *tmd;
2476 +       atomic_t stats;
2477 +};
2478 +
2479 +typedef struct eip_tmd EIP_TMD;
2480 +typedef struct eip_tmd_head EIP_TMD_HEAD;
2481 +
2482 +/* #define EIP_MORE_STATS */
2483 +
2484 +typedef struct eip_tx {
2485 +       struct net_device *net_device;
2486 +       EP_XMTR *xmtr;
2487 +       EP_SYS *ep_system;
2488 +
2489 +       struct eip_tmd_head head[EIP_TMD_TYPE_NR];
2490 +       struct list_head inuse;
2491 +       atomic_t destructor;
2492 +
2493 +       /* stats */
2494 +       unsigned long packets;
2495 +       unsigned long bytes;
2496 +       unsigned long errors;
2497 +       unsigned long dropped;
2498 +       unsigned long dma[EIP_SVC_NR];
2499 +       
2500 +#ifdef EIP_MORE_STATS
2501 +       unsigned long sent_copybreak;
2502 +       unsigned long sent_std;
2503 +       unsigned long sent_aggreg;
2504 +#endif
2505 +
2506 +       unsigned char tmd_max_nr;
2507 +
2508 +       unsigned short sysctl_copybreak;
2509 +       unsigned short sysctl_ipfrag_to;
2510 +       unsigned short sysctl_ipfrag_copybreak;
2511 +       unsigned short sysctl_aggregation;
2512 +
2513 +       unsigned short ipfrag_count;
2514 +       struct list_head ipfrag;
2515 +       spinlock_t ipfraglock;
2516 +
2517 +       spinlock_t lock;
2518 +       struct tasklet_struct tasklet;
2519 +} EIP_TX;
2520 +
2521 +/* =============================================== */
2522 +    /* unsigned long   multicast; */
2523 +#endif                         /* __EIP_LINUX_H */
2524 +
2525 +/*
2526 + * Local variables:
2527 + * c-file-style: "linux"
2528 + * End:
2529 + */
2530 diff -urN clean/drivers/net/qsnet/eip/eip_stats.c linux-2.6.9/drivers/net/qsnet/eip/eip_stats.c
2531 --- clean/drivers/net/qsnet/eip/eip_stats.c     1969-12-31 19:00:00.000000000 -0500
2532 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_stats.c       2005-09-07 10:34:58.000000000 -0400
2533 @@ -0,0 +1,374 @@
2534 +/*
2535 + *    Copyright (c) 2003 by Quadrics Ltd.
2536 + * 
2537 + *    For licensing information please see the supplied COPYING file
2538 + *
2539 + */
2540 +
2541 +/*
2542 + * $Id: eip_stats.c,v 1.36.2.2 2005/09/07 14:34:58 mike Exp $
2543 + * $Source: /cvs/master/quadrics/eipmod/eip_stats.c,v $
2544 + */
2545 +
2546 +#include <qsnet/kernel.h>
2547 +#include <qsnet/module.h>
2548 +#include <elan/epcomms.h>
2549 +
2550 +#include <linux/netdevice.h>
2551 +
2552 +#include <linux/kernel.h>
2553 +#include <linux/proc_fs.h>
2554 +
2555 +#include <asm/atomic.h>
2556 +
2557 +#include <qsnet/procfs_linux.h>
2558 +
2559 +#include "eip_linux.h"
2560 +#include "eip_stats.h"
2561 +
2562 +extern EIP_RX *eip_rx;
2563 +extern EIP_TX *eip_tx;
2564 +extern int tx_copybreak_max;
2565 +extern EP_RAILMASK tx_railmask;
2566 +extern int  eip_checksum_state;
2567 +extern void eip_stop_queue(void);
2568 +extern void eip_start_queue(void);
2569 +
2570 +static int eip_stats_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2571 +{
2572 +       int i, outlen = 0;
2573 +
2574 +       *buf = '\0';
2575 +       strcat(buf, "\n");
2576 +       strcat(buf, "--------------------------------------------+------------+-----------------+\n");
2577 +       strcat(buf, "    SKB/DMA    |               | Rx         | Tx         |  TMD TYPE       |\n");
2578 +       strcat(buf, "--------------------------------------------+------------|-----------------+\n");
2579 +
2580 +       i = 0;
2581 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #1[%3.3d/%3.3d/%3.3d] |\n",
2582 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2583 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2584 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2585 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2586 +                eip_tx->tmd_max_nr);
2587 +
2588 +       i++;
2589 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #2[%3.3d/%3.3d/%3.3d] |\n",
2590 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2591 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2592 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2593 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2594 +               eip_tx->tmd_max_nr);
2595 +
2596 +       i++;
2597 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld | #3[%3.3d/%3.3d/%3.3d] |\n",
2598 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2599 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2600 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i],
2601 +               EIP_STAT_QUEUED_GET(&eip_tx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_tx->head[i].stats),
2602 +               eip_tx->tmd_max_nr);
2603 +
2604 +       i++;
2605 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld +-----------------+\n",
2606 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2607 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2608 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2609 +
2610 +       i++;
2611 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2612 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2613 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2614 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2615 +
2616 +       i++;
2617 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2618 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2619 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2620 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2621 +
2622 +       i++;
2623 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2624 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2625 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2626 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2627 +
2628 +       i++;
2629 +       sprintf(buf + strlen(buf), " [%5d/%5d] | [%3.3d/%3.3d/%3.3d] | %10ld | %10ld |\n",
2630 +               EIP_SVC_SMALLEST_LEN << i, (int) EIP_RCV_DMA_LEN((EIP_SVC_SMALLEST_LEN << i)),
2631 +               EIP_STAT_QUEUED_GET(&eip_rx->head[i].stats), EIP_STAT_ALLOC_GET(&eip_rx->head[i].stats),
2632 +               eip_rx->rmd_max_nr, eip_rx->head[i].dma, eip_tx->dma[i]);
2633 +
2634 +       strcat(buf, "--------------------------------------------+------------+\n");
2635 +       sprintf(buf + strlen(buf), " RMD IRQ %4.4d                    %10lu | %10lu |\n",
2636 +               eip_rx->irq_list_nr, 
2637 +               eip_rx->packets, eip_tx->packets);
2638 +       strcat(buf, "--------------------------------------------+------------+\n");
2639 +
2640 +#ifdef EIP_MORE_STATS
2641 +       strcat(buf, "\n");
2642 +       sprintf(buf + strlen(buf), " Copybreak %10ld Std %10ld Aggreg %10ld\n",
2643 +                       eip_tx->sent_copybreak, eip_tx->sent_std, eip_tx->sent_aggreg);
2644 +#endif
2645 +
2646 +
2647 +       strcat(buf, "\n");
2648 +       sprintf(buf + strlen(buf), "Rx bytes: %lu (%lu Mb) errors: %lu dropped: %lu reschedule: %lu\n",
2649 +               eip_rx->bytes, eip_rx->bytes / (1024 * 1024), eip_rx->errors, eip_rx->dropped, eip_rx->reschedule);
2650 +       sprintf(buf + strlen(buf), "Tx bytes: %lu (%lu Mb) errors: %lu dropped: %lu\n",
2651 +               eip_tx->bytes, eip_tx->bytes / (1024 * 1024), eip_tx->errors, eip_tx->dropped);
2652 +       strcat(buf, "\n");
2653 +
2654 +       outlen = strlen(buf);
2655 +       ASSERT(outlen < PAGE_SIZE);
2656 +       *eof = 1;
2657 +       return outlen;
2658 +}
2659 +
2660 +void eip_stats_dump(void)
2661 +{
2662 +    int eof;
2663 +
2664 +    char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2665 +
2666 +    if (buf == NULL)
2667 +    {
2668 +       printk("no memory to produce eip_stats\n");
2669 +       return;
2670 +    }
2671 +
2672 +    eip_stats_read(buf, NULL, 0, 0, &eof, NULL);
2673 +
2674 +    printk(buf);
2675 +
2676 +    kfree(buf);
2677 +}
2678 +
2679 +static int eip_stats_write(struct file *file, const char *buf, unsigned long count, void *data)
2680 +{
2681 +       int i;
2682 +       unsigned long flags;
2683 +
2684 +       spin_lock_irqsave(&eip_rx->lock, flags);
2685 +       eip_rx->packets = 0;
2686 +       eip_rx->bytes = 0;
2687 +       eip_rx->errors = 0;
2688 +       eip_rx->dropped = 0;
2689 +       eip_rx->reschedule = 0;
2690 +       for (i = 0; i < EIP_SVC_NR; eip_rx->head[i].dma = 0, i++);
2691 +       spin_unlock_irqrestore(&eip_rx->lock, flags);
2692 +
2693 +       spin_lock_irqsave(&eip_tx->lock, flags);
2694 +       eip_tx->packets = 0;
2695 +       eip_tx->bytes = 0;
2696 +       eip_tx->errors = 0;
2697 +       eip_tx->dropped = 0;
2698 +#ifdef EIP_MORE_STATS
2699 +       eip_tx->sent_copybreak = 0;
2700 +       eip_tx->sent_std = 0;
2701 +       eip_tx->sent_aggreg = 0;
2702 +#endif
2703 +       for (i = 0; i < EIP_SVC_NR; eip_tx->dma[i] = 0, i++);
2704 +       spin_unlock_irqrestore(&eip_tx->lock, flags);
2705 +
2706 +       return count;
2707 +}
2708 +
2709 +#define                eip_stats_var_write(name)                                                                       \
2710 +static int eip_stats_##name##_write(struct file *file, const char *buf, unsigned long count, void *data)       \
2711 +{                                                                                                              \
2712 +       char * b = (char *) buf;                                                                                \
2713 +       *(b + count) = '\0';                                                                                    \
2714 +       eip_##name##_set((int) simple_strtoul(b, NULL, 10));                                                    \
2715 +       return count;                                                                                           \
2716 +}
2717 +
2718 +#define        eip_stats_var_read(name, var)                                                                   \
2719 +static int eip_stats_##name##_read(char *buf, char **start, off_t off, int count, int *eof, void *data)                \
2720 +{                                                                                                              \
2721 +       sprintf(buf, "%d\n", var);                                                                              \
2722 +       *eof = 1;                                                                                               \
2723 +       return strlen(buf);                                                                                     \
2724 +}
2725 +
2726 +
2727 +#define                eip_stats_var_set(name, min, max, default, var)                                                                 \
2728 +void eip_##name##_set(int i)                                                                                                   \
2729 +{                                                                                                                              \
2730 +       if ( (i >= min) && (i <= max)) {                                                                                        \
2731 +               EIP_DBG_PRINTK(EIP_DBG_GEN, "Setting " #name " to %d\n", i);                                                    \
2732 +               var =(unsigned short) i;                                                                                        \
2733 +       }                                                                                                                       \
2734 +       else {                                                                                                                  \
2735 +               EIP_ERR_PRINTF("parameter error : %d <= " #name "(%d) <= %d using default %d\n", min, i, (int) max, (int) default);     \
2736 +       }                                                                                                                       \
2737 +}
2738 +
2739 +eip_stats_var_set(tx_copybreak, 0, tx_copybreak_max, EIP_TX_COPYBREAK, eip_tx->sysctl_copybreak);
2740 +eip_stats_var_set(rx_granularity, 1, EIP_RMD_MIN_NR, EIP_RX_GRANULARITY, eip_rx->sysctl_granularity);
2741 +eip_stats_var_set(tx_railmask, 0, EP_RAILMASK_ALL, EP_RAILMASK_ALL, tx_railmask);
2742 +eip_stats_var_set(ipfrag_to, 0, (1 << 16), EIP_IPFRAG_TO, eip_tx->sysctl_ipfrag_to);
2743 +eip_stats_var_set(aggregation, 0, 1, 1, eip_tx->sysctl_aggregation);
2744 +eip_stats_var_set(ipfrag_copybreak, 0, EIP_IPFRAG_COPYBREAK, EIP_IPFRAG_COPYBREAK, eip_tx->sysctl_ipfrag_copybreak);
2745 +/* eip_stats_var_set(eipdebug, 0, , 0, eipdebug); */
2746 +
2747 +eip_stats_var_read(aggregation, eip_tx->sysctl_aggregation);
2748 +eip_stats_var_read(ipfrag_count, eip_tx->ipfrag_count);
2749 +eip_stats_var_read(ipfrag_to, eip_tx->sysctl_ipfrag_to);
2750 +eip_stats_var_read(ipfrag_copybreak, eip_tx->sysctl_ipfrag_copybreak);
2751 +eip_stats_var_read(tx_copybreak, eip_tx->sysctl_copybreak);
2752 +eip_stats_var_read(rx_granularity, eip_rx->sysctl_granularity);
2753 +eip_stats_var_read(tx_railmask, tx_railmask);
2754 +
2755 +eip_stats_var_write(aggregation);
2756 +eip_stats_var_write(ipfrag_to);
2757 +eip_stats_var_write(ipfrag_copybreak);
2758 +eip_stats_var_write(tx_copybreak);
2759 +eip_stats_var_write(rx_granularity);
2760 +eip_stats_var_write(tx_railmask);
2761 +
2762 +
2763 +static int eip_checksum_write(struct file *file, const char *buf, unsigned long count, void *data)
2764 +{
2765 +       char * b = (char *) buf;
2766 +       int    value;
2767 +
2768 +       *(b + count) = '\0';
2769 +
2770 +       value = (int) simple_strtoul(b, NULL, 10);
2771 +       if  ((value >= CHECKSUM_NONE) && (value <= CHECKSUM_UNNECESSARY)) 
2772 +               eip_checksum_state = value;
2773 +       else 
2774 +               EIP_ERR_PRINTF("%d <= checksum(%d) <= %d using old value %d\n", CHECKSUM_NONE, value, CHECKSUM_UNNECESSARY, eip_checksum_state);
2775 +
2776 +       return count;
2777 +}
2778 +
2779 +static int eip_checksum_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2780 +{
2781 +       switch ( eip_checksum_state ) 
2782 +       {
2783 +       case 0  : sprintf(buf, "0 CHECKSUM_NONE\n");                      break;
2784 +       case 1  : sprintf(buf, "1 CHECKSUM_HW\n");                        break;
2785 +       case 2  : sprintf(buf, "2 CHECKSUM_UNNECESSARY\n");               break;
2786 +       default : sprintf(buf, "%d INVALID VALUE\n", eip_checksum_state); break;
2787 +       }
2788 +       *eof = 1;
2789 +       return strlen(buf);
2790 +}
2791 +
2792 +static int eip_stats_eipdebug_read(char *buf, char **start, off_t off, int count, int *eof, void *data)
2793 +{
2794 +       *buf = '\0';
2795 +       sprintf(buf + strlen(buf), "0x%x\n", eipdebug);
2796 +       *eof = 1;
2797 +       return strlen(buf);
2798 +}
2799 +static int eip_stats_eipdebug_write(struct file *file, const char *buf, unsigned long count, void *data)
2800 +{
2801 +       char * p = (char *) buf;
2802 +       *(p + count - 1) = '\0';
2803 +       eipdebug = simple_strtoul(p, NULL, 0);
2804 +       __EIP_DBG_PRINTK("Setting eipdebug to 0x%x\n", eipdebug);
2805 +       return count;
2806 +}
2807 +
2808 +static int eip_stats_tmd_inuse_read(char *page, char **start, off_t off, int count, int *eof, void *data)
2809 +{
2810 +       struct list_head *lp;
2811 +       unsigned long flags;
2812 +       unsigned int len = 0;
2813 +
2814 +       spin_lock_irqsave(&eip_tx->lock, flags);
2815 +       list_for_each (lp, &eip_tx->inuse) {
2816 +               EIP_TMD *tmd = list_entry (lp, EIP_TMD, chain.link);
2817 +               EIP_HEADER *eiph = (EIP_HEADER *) tmd->dma_base;
2818 +               
2819 +                len += sprintf(page+len, "tmd=%p id=%d len=%d\n",
2820 +                              tmd, eiph ? ntohs(eiph->h_dhost.ip_addr) : -1,
2821 +                              tmd->dma_len);
2822 +
2823 +                if (len + 40 >= count)
2824 +                        break;
2825 +        }
2826 +        spin_unlock_irqrestore(&eip_tx->lock, flags);
2827 +
2828 +       return qsnet_proc_calc_metrics (page, start, off, count, eof, len);
2829 +}
2830 +
2831 +static int eip_stats_debug_rx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2832 +{
2833 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing rx ...\n");
2834 +       tasklet_schedule(&eip_rx->tasklet);
2835 +       return count;
2836 +}
2837 +static int eip_stats_debug_tx_flush(struct file *file, const char *buf, unsigned long count, void *data)
2838 +{
2839 +       EIP_DBG_PRINTF(EIP_DBG_GEN, "Flushing tx ... %d tmds reclaimed\n", ep_enable_txcallbacks(eip_tx->xmtr));
2840 +       ep_disable_txcallbacks(eip_tx->xmtr);
2841 +       tasklet_schedule(&eip_tx->tasklet);
2842 +       return count;
2843 +}
2844 +
2845 +#define EIP_PROC_PARENT_NR     (3)
2846 +/* NOTE : the parents should be declared b4 the children */
2847 +static EIP_PROC_FS eip_procs[] = {
2848 +       /* {name, parent, read fn, write fn, allocated, entry}, */
2849 +       {EIP_PROC_ROOT_DIR, &qsnet_procfs_root, NULL, NULL, 0, NULL},
2850 +       {EIP_PROC_DEBUG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},
2851 +       {EIP_PROC_AGGREG_DIR, &eip_procs[0].entry, NULL, NULL, 0, NULL},        /* end of parents */
2852 +       {EIP_PROC_STATS, &eip_procs[0].entry, eip_stats_read, eip_stats_write, 0, NULL},
2853 +       {EIP_PROC_TX_COPYBREAK, &eip_procs[0].entry, eip_stats_tx_copybreak_read, eip_stats_tx_copybreak_write, 0, NULL},
2854 +       {EIP_PROC_RX_GRAN, &eip_procs[0].entry, eip_stats_rx_granularity_read, eip_stats_rx_granularity_write, 0, NULL},
2855 +       {EIP_PROC_TX_RAILMASK, &eip_procs[0].entry, eip_stats_tx_railmask_read, eip_stats_tx_railmask_write, 0, NULL},
2856 +       {EIP_PROC_TMD_INUSE, &eip_procs[0].entry, eip_stats_tmd_inuse_read, NULL, 0, NULL},
2857 +       {EIP_PROC_EIPDEBUG, &eip_procs[0].entry, eip_stats_eipdebug_read, eip_stats_eipdebug_write, 0, NULL},
2858 +       {EIP_PROC_CHECKSUM, &eip_procs[0].entry, eip_checksum_read, eip_checksum_write, 0, NULL},
2859 +       {EIP_PROC_DEBUG_RX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_rx_flush, 0, NULL},
2860 +       {EIP_PROC_DEBUG_TX_FLUSH, &eip_procs[1].entry, NULL, eip_stats_debug_tx_flush, 0, NULL},
2861 +       {"ipfrag_count", &eip_procs[2].entry, eip_stats_ipfrag_count_read, NULL, 0, NULL},
2862 +       {EIP_PROC_AGGREG_TO, &eip_procs[2].entry, eip_stats_ipfrag_to_read, eip_stats_ipfrag_to_write, 0, NULL},
2863 +       {EIP_PROC_AGGREG_ONOFF, &eip_procs[2].entry, eip_stats_aggregation_read, eip_stats_aggregation_write, 0, NULL},
2864 +       {EIP_PROC_AGGREG_COPYBREAK, &eip_procs[2].entry, eip_stats_ipfrag_copybreak_read, eip_stats_ipfrag_copybreak_write, 0, NULL},
2865 +       {NULL, NULL, NULL, NULL, 1, NULL},
2866 +};
2867 +
2868 +int eip_stats_init(void)
2869 +{
2870 +       int p;
2871 +
2872 +       for (p = 0; !eip_procs[p].allocated; p++) {
2873 +               if (p < EIP_PROC_PARENT_NR)
2874 +                       eip_procs[p].entry = proc_mkdir(eip_procs[p].name, *eip_procs[p].parent);
2875 +               else
2876 +                       eip_procs[p].entry = create_proc_entry(eip_procs[p].name, 0, *eip_procs[p].parent);
2877 +
2878 +               if (!eip_procs[p].entry) {
2879 +                       EIP_ERR_PRINTF("%s\n", "Cannot allocate proc entry");
2880 +                       eip_stats_cleanup();
2881 +                       return -ENOMEM;
2882 +               }
2883 +
2884 +               eip_procs[p].entry->owner = THIS_MODULE;
2885 +               eip_procs[p].entry->write_proc = eip_procs[p].write;
2886 +               eip_procs[p].entry->read_proc = eip_procs[p].read;
2887 +               eip_procs[p].allocated = 1;
2888 +       }
2889 +       eip_procs[p].allocated = 0;
2890 +       return 0;
2891 +}
2892 +
2893 +void eip_stats_cleanup(void)
2894 +{
2895 +       int p;
2896 +       for (p = (sizeof (eip_procs)/sizeof (eip_procs[0]))-1; p >= 0; p--)
2897 +               if (eip_procs[p].allocated) {
2898 +                       EIP_DBG_PRINTF(EIP_DBG_GEN, "Removing %s from proc\n", eip_procs[p].name);
2899 +                       remove_proc_entry(eip_procs[p].name, *eip_procs[p].parent);
2900 +               }
2901 +}
2902 +
2903 +/*
2904 + * Local variables:
2905 + * c-file-style: "linux"
2906 + * End:
2907 + */
2908 diff -urN clean/drivers/net/qsnet/eip/eip_stats.h linux-2.6.9/drivers/net/qsnet/eip/eip_stats.h
2909 --- clean/drivers/net/qsnet/eip/eip_stats.h     1969-12-31 19:00:00.000000000 -0500
2910 +++ linux-2.6.9/drivers/net/qsnet/eip/eip_stats.h       2004-05-10 10:47:47.000000000 -0400
2911 @@ -0,0 +1,22 @@
2912 +/*
2913 + *    Copyright (c) 2003 by Quadrics Ltd.
2914 + * 
2915 + *    For licensing information please see the supplied COPYING file
2916 + *
2917 + */
2918 +
2919 +#ident "$Id: eip_stats.h,v 1.14 2004/05/10 14:47:47 daniel Exp $"
2920 +
2921 +#ifndef __EIP_STATS_H
2922 +#define        __EIP_STATS_H
2923 +
2924 +int eip_stats_init(void);
2925 +void eip_stats_cleanup(void);
2926 +void eip_rx_granularity_set(int);
2927 +void eip_tx_copybreak_set(int);
2928 +void eip_ipfrag_to_set(int);
2929 +void eip_aggregation_set(int);
2930 +void eip_ipfrag_copybreak_set(int);
2931 +void eip_stats_dump(void);
2932 +
2933 +#endif                         /* __EIP_STATS_H */
2934 diff -urN clean/drivers/net/qsnet/eip/Makefile linux-2.6.9/drivers/net/qsnet/eip/Makefile
2935 --- clean/drivers/net/qsnet/eip/Makefile        1969-12-31 19:00:00.000000000 -0500
2936 +++ linux-2.6.9/drivers/net/qsnet/eip/Makefile  2005-10-10 17:47:30.000000000 -0400
2937 @@ -0,0 +1,15 @@
2938 +#
2939 +# Makefile for Quadrics QsNet
2940 +#
2941 +# Copyright (c) 2002-2004 Quadrics Ltd
2942 +#
2943 +# File: drivers/net/qsnet/eip/Makefile
2944 +#
2945 +
2946 +
2947 +#
2948 +
2949 +obj-$(CONFIG_EIP)      += eip.o
2950 +eip-objs       := eip_linux.o eip_stats.o
2951 +
2952 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
2953 diff -urN clean/drivers/net/qsnet/eip/Makefile.conf linux-2.6.9/drivers/net/qsnet/eip/Makefile.conf
2954 --- clean/drivers/net/qsnet/eip/Makefile.conf   1969-12-31 19:00:00.000000000 -0500
2955 +++ linux-2.6.9/drivers/net/qsnet/eip/Makefile.conf     2005-09-07 10:39:48.000000000 -0400
2956 @@ -0,0 +1,10 @@
2957 +# Flags for generating QsNet Linux Kernel Makefiles
2958 +MODNAME                =       eip.o
2959 +MODULENAME     =       eip
2960 +KOBJFILES      =       eip_linux.o eip_stats.o
2961 +EXPORT_KOBJS   =       
2962 +CONFIG_NAME    =       CONFIG_EIP
2963 +SGALFC         =       
2964 +# EXTRALINES START
2965 +
2966 +# EXTRALINES END
2967 diff -urN clean/drivers/net/qsnet/eip/quadrics_version.h linux-2.6.9/drivers/net/qsnet/eip/quadrics_version.h
2968 --- clean/drivers/net/qsnet/eip/quadrics_version.h      1969-12-31 19:00:00.000000000 -0500
2969 +++ linux-2.6.9/drivers/net/qsnet/eip/quadrics_version.h        2005-09-07 10:39:49.000000000 -0400
2970 @@ -0,0 +1 @@
2971 +#define QUADRICS_VERSION "5.11.3qsnet"
2972 diff -urN clean/drivers/net/qsnet/elan/bitmap.c linux-2.6.9/drivers/net/qsnet/elan/bitmap.c
2973 --- clean/drivers/net/qsnet/elan/bitmap.c       1969-12-31 19:00:00.000000000 -0500
2974 +++ linux-2.6.9/drivers/net/qsnet/elan/bitmap.c 2004-01-20 12:32:17.000000000 -0500
2975 @@ -0,0 +1,287 @@
2976 +/*
2977 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
2978 + *
2979 + *    For licensing information please see the supplied COPYING file
2980 + *
2981 + */
2982 +
2983 +#ident "@(#)$Id: bitmap.c,v 1.5 2004/01/20 17:32:17 david Exp $"
2984 +/*      $Source: /cvs/master/quadrics/elanmod/shared/bitmap.c,v $*/
2985 +
2986 +#if defined(__KERNEL__)
2987 +#include <qsnet/kernel.h>
2988 +#endif
2989 +#include <qsnet/config.h>
2990 +#include <elan/bitmap.h>
2991 +
2992 +/*
2993 + * Return the index of the first available bit in the 
2994 + * bitmap , or -1 for failure
2995 + */
2996 +int
2997 +bt_freebit (bitmap_t *bitmap, int nbits)
2998 +{
2999 +    int last = (--nbits) >> BT_ULSHIFT;
3000 +    int maxbit;
3001 +    int        i, j;
3002 +
3003 +    /* look for a word with a bit off */
3004 +    for (i = 0; i <= last; i++)
3005 +       if (bitmap[i] != ~((bitmap_t) 0))
3006 +           break;
3007 +
3008 +    if (i <= last)
3009 +    {
3010 +       /* found an word with a bit off,  now see which bit it is */
3011 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3012 +       for (j = 0; j <= maxbit; j++)
3013 +           if ((bitmap[i] & (1 << j)) == 0)
3014 +               return ((i << BT_ULSHIFT) | j);
3015 +    }
3016 +    return (-1);
3017 +    
3018 +}
3019 +
3020 +/*
3021 + * bt_lowbit:
3022 + *     Return the index of the lowest set bit in the
3023 + *     bitmap, or -1 for failure.
3024 + */
3025 +int
3026 +bt_lowbit (bitmap_t *bitmap, int nbits)
3027 +{
3028 +    int last = (--nbits) >> BT_ULSHIFT;
3029 +    int maxbit;
3030 +    int i, j;
3031 +    
3032 +    /* look for a word with a bit on */
3033 +    for (i = 0; i <= last; i++)
3034 +       if (bitmap[i] != 0)
3035 +           break;
3036 +    if (i <= last)
3037 +    {
3038 +       /* found a word bit a bit on, now see which bit it is */
3039 +       maxbit = (i == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3040 +       for (j = 0; j <= maxbit; j++)
3041 +           if (bitmap[i] & (1 << j))
3042 +               return ((i << BT_ULSHIFT) | j);
3043 +    }
3044 +
3045 +    return (-1);
3046 +}
3047 +
3048 +/*
3049 + * Return the index of the first available bit in the 
3050 + * bitmap , or -1 for failure
3051 + */
3052 +int
3053 +bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset)
3054 +{
3055 +    int first = ((last+1) + BT_NBIPUL-1) >> BT_ULSHIFT;
3056 +    int end   = (--nbits) >> BT_ULSHIFT;
3057 +    int maxbit;
3058 +    int        i, j;
3059 +
3060 +    /* look for bits before the first whole word */
3061 +    if (((last+1) & BT_ULMASK) != 0)
3062 +    {
3063 +       maxbit = ((first-1) == last) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3064 +       for (j = ((last+1) & BT_ULMASK); j <= maxbit; j++)
3065 +           if ((bitmap[first-1] & (1 << j)) == (isset << j))
3066 +               return (((first-1) << BT_ULSHIFT) | j);
3067 +    }
3068 +
3069 +    /* look for a word with a bit off */
3070 +    for (i = first; i <= end; i++)
3071 +       if (bitmap[i] != (isset ? 0 : ~((bitmap_t) 0)))
3072 +           break;
3073 +
3074 +    if (i <= end)
3075 +    {
3076 +       /* found an word with a bit off,  now see which bit it is */
3077 +       maxbit = (i == end) ? (nbits & BT_ULMASK) : (BT_NBIPUL-1);
3078 +       for (j = 0; j <= maxbit; j++)
3079 +           if ((bitmap[i] & (1 << j)) == (isset << j))
3080 +               return ((i << BT_ULSHIFT) | j);
3081 +    }
3082 +    return (-1);
3083 +}
3084 +
3085 +void
3086 +bt_copy (bitmap_t *a, bitmap_t *b, int nbits)
3087 +{
3088 +    int i;
3089 +
3090 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3091 +       b[i] = a[i];
3092 +
3093 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3094 +       if (BT_TEST(a, i))
3095 +           BT_SET(b,i);
3096 +       else
3097 +           BT_CLEAR(b,i);
3098 +}
3099 +
3100 +void
3101 +bt_zero (bitmap_t *bitmap, int nbits)
3102 +{
3103 +    int i;
3104 +
3105 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3106 +       bitmap[i] = 0;
3107 +
3108 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3109 +       BT_CLEAR(bitmap,i);
3110 +}
3111 +
3112 +void
3113 +bt_fill (bitmap_t *bitmap, int nbits)
3114 +{
3115 +    int i;
3116 +
3117 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3118 +       bitmap[i] = ~((bitmap_t) 0);
3119 +
3120 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3121 +       BT_SET(bitmap,i);
3122 +}
3123 +
3124 +int
3125 +bt_cmp (bitmap_t *a, bitmap_t *b, int nbits)
3126 +{
3127 +    int i;
3128 +
3129 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3130 +       if (a[i] != b[i])
3131 +           return (1);
3132 +
3133 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3134 +       if (BT_TEST (a, i) != BT_TEST(b, i))
3135 +           return (1);
3136 +    return (0);
3137 +}
3138 +
3139 +void
3140 +bt_intersect (bitmap_t *a, bitmap_t *b, int nbits)
3141 +{
3142 +    int i;
3143 +    
3144 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3145 +       a[i] &= b[i];
3146 +
3147 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3148 +       if (BT_TEST (a, i) && BT_TEST (b, i))
3149 +           BT_SET (a, i);
3150 +       else
3151 +           BT_CLEAR (a, i);
3152 +}
3153 +
3154 +void
3155 +bt_remove (bitmap_t *a, bitmap_t *b, int nbits)
3156 +{
3157 +    int i;
3158 +
3159 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3160 +       a[i] &= ~b[i];
3161 +
3162 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3163 +       if (BT_TEST (b, i))
3164 +           BT_CLEAR (a, i);
3165 +}
3166 +
3167 +void
3168 +bt_add (bitmap_t *a, bitmap_t *b, int nbits)
3169 +{
3170 +    int i;
3171 +
3172 +    for (i = 0; i < (nbits>>BT_ULSHIFT); i++)
3173 +       a[i] |= b[i];
3174 +
3175 +    for (i <<= BT_ULSHIFT; i < nbits; i++)
3176 +       if (BT_TEST(b, i))
3177 +           BT_SET (a, i);
3178 +}
3179 +
3180 +/*
3181 + * bt_spans : partition a spans partition b
3182 + *    == all bits set in 'b' are set in 'a'
3183 + */
3184 +int
3185 +bt_spans (bitmap_t *a, bitmap_t *b, int nbits)
3186 +{
3187 +    int i;
3188 +    
3189 +    for (i = 0; i < nbits; i++)
3190 +       if (BT_TEST (b, i) && !BT_TEST (a, i))
3191 +           return (0);
3192 +    return (1);
3193 +}
3194 +
3195 +/*
3196 + * bt_subset: copy [base,base+nbits-1] from 'a' to 'b'
3197 + */
3198 +void
3199 +bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits)
3200 +{
3201 +    int i;
3202 +
3203 +    for (i = 0; i < nbits; i++)
3204 +    {
3205 +       if (BT_TEST (a, base+i))
3206 +           BT_SET(b,i);
3207 +       else
3208 +           BT_CLEAR (b,i);
3209 +    }
3210 +}
3211 +
3212 +void 
3213 +bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
3214 +{
3215 +    int i;
3216 +    
3217 +    for (i = 0; i < nbits; i++)
3218 +    {
3219 +       if (!BT_TEST (a, i) && BT_TEST (b, i))
3220 +       {
3221 +           BT_SET (c, i);
3222 +        }
3223 +       else
3224 +       {
3225 +           BT_CLEAR (c, i);
3226 +        }
3227 +    }
3228 +}
3229 +
3230 +void 
3231 +bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits)
3232 +{
3233 +    int i;
3234 +    
3235 +    for (i = 0; i < nbits; i++)
3236 +    {
3237 +       if (BT_TEST (a, i) && !BT_TEST (b, i))
3238 +       {
3239 +           BT_SET (c, i);
3240 +        }
3241 +       else
3242 +       {
3243 +           BT_CLEAR (c, i);
3244 +        }
3245 +    }
3246 +}
3247 +
3248 +int
3249 +bt_nbits (bitmap_t *a, int nbits)
3250 +{
3251 +    int i, c;
3252 +    for (i = 0, c = 0; i < nbits; i++)
3253 +       if (BT_TEST (a, i))
3254 +           c++;
3255 +    return (c);
3256 +}
3257 +
3258 +/*
3259 + * Local variables:
3260 + * c-file-style: "stroustrup"
3261 + * End:
3262 + */
3263 diff -urN clean/drivers/net/qsnet/elan/capability.c linux-2.6.9/drivers/net/qsnet/elan/capability.c
3264 --- clean/drivers/net/qsnet/elan/capability.c   1969-12-31 19:00:00.000000000 -0500
3265 +++ linux-2.6.9/drivers/net/qsnet/elan/capability.c     2005-07-21 06:42:36.000000000 -0400
3266 @@ -0,0 +1,796 @@
3267 +/*
3268 + *    Copyright (c) 2003 by Quadrics Ltd.
3269 + * 
3270 + *    For licensing information please see the supplied COPYING file
3271 + *
3272 + */
3273 +
3274 +#ident "@(#)$Id: capability.c,v 1.19.2.2 2005/07/21 10:42:36 addy Exp $"
3275 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.c,v $ */
3276 +
3277 +
3278 +#include <qsnet/kernel.h>
3279 +#include <elan/elanmod.h>
3280 +
3281 +static LIST_HEAD(elan_cap_list); 
3282 +
3283 +typedef struct elan_vp_struct
3284 +{
3285 +       struct list_head list;
3286 +       ELAN_CAPABILITY  vp;
3287 +} ELAN_VP_NODE_STRUCT;
3288 +
3289 +/* There is an array of these structs for each process/context in the CAP 
3290 + * This is then replicated for each rail. The usercopy handle stuff is 
3291 + * only maintained in rail 0 though
3292 + */
3293 +typedef struct elan_attached_struct
3294 +{
3295 +       void               *cb_args;
3296 +       ELAN_DESTROY_CB  cb_func;
3297 +       struct task_struct *handle;             /* usercopy: attached task handle */
3298 +       struct task_struct *owner;              /* usercopy: attached task handle owner */
3299 +} ELAN_ATTACHED_STRUCT;
3300 +
3301 +typedef struct elan_cap_node_struct
3302 +{
3303 +       struct list_head list;
3304 +       ELAN_CAP_STRUCT     node;
3305 +       ELAN_ATTACHED_STRUCT *attached[ELAN_MAX_RAILS];
3306 +       struct list_head vp_list;
3307 +} ELAN_CAP_NODE_STRUCT;
3308 +
3309 +
3310 +ELAN_CAP_NODE_STRUCT *
3311 +find_cap_node(ELAN_CAPABILITY *cap)
3312 +{
3313 +       struct list_head        *tmp;
3314 +       ELAN_CAP_NODE_STRUCT *ptr=NULL;
3315 +
3316 +       list_for_each(tmp, &elan_cap_list) {
3317 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3318 +               /* is it an exact match (key not checked) */
3319 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3320 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)) {
3321 +                       return ptr;
3322 +               }
3323 +       }
3324 +       return ptr;
3325 +}
3326 +
3327 +ELAN_VP_NODE_STRUCT *
3328 +find_vp_node( ELAN_CAP_NODE_STRUCT *cap_node,ELAN_CAPABILITY *map)
3329 +{
3330 +       struct list_head       * tmp;
3331 +       ELAN_VP_NODE_STRUCT * ptr = NULL;
3332 +
3333 +       list_for_each(tmp, &cap_node->vp_list) {
3334 +               ptr = list_entry(tmp, ELAN_VP_NODE_STRUCT , list);
3335 +               /* is it an exact match (key not checked) */
3336 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->vp,map) 
3337 +                    && ELAN_CAP_GEOM_MATCH(&ptr->vp,map)){
3338 +                       return ptr;
3339 +               }
3340 +       }
3341 +       return ptr;
3342 +}
3343 +
3344 +int 
3345 +elan_validate_cap(ELAN_CAPABILITY *cap)
3346 +{
3347 +       char                      space[127];
3348 +
3349 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap %s\n",elan_capability_string(cap,space));
3350 +
3351 +       /* check versions */
3352 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
3353 +       {
3354 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
3355 +               return (EINVAL);
3356 +       }
3357 +
3358 +       /* check its not HWTEST */
3359 +       if ( cap->cap_type & ELAN_CAP_TYPE_HWTEST )
3360 +       {
3361 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_HWTEST \n");   
3362 +               return (EINVAL);
3363 +       }
3364 +       
3365 +       /* check its type */
3366 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
3367 +       {
3368 +       case ELAN_CAP_TYPE_KERNEL :     
3369 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_cap: failed type = ELAN_CAP_TYPE_KERNEL \n");   
3370 +               return (EINVAL);
3371 +
3372 +               /* check it has a valid type */
3373 +       case ELAN_CAP_TYPE_BLOCK:
3374 +       case ELAN_CAP_TYPE_CYCLIC:
3375 +               break;
3376 +
3377 +               /* all others are failed as well */
3378 +       default:
3379 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed unknown type = %x \n", (cap->cap_type & ELAN_CAP_TYPE_MASK));       
3380 +               return (EINVAL);
3381 +       }
3382 +       
3383 +       if ((cap->cap_lowcontext == ELAN_CAP_UNINITIALISED) || (cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
3384 +           || (cap->cap_lownode == ELAN_CAP_UNINITIALISED) || (cap->cap_highnode    == ELAN_CAP_UNINITIALISED))
3385 +       {
3386 +               
3387 +               ELAN_DEBUG4 (ELAN_DBG_VP,"elan_validate_cap: ELAN_CAP_UNINITIALISED   LowNode %d   HighNode %d   LowContext %d   highContext %d\n",
3388 +                            cap->cap_lownode , cap->cap_highnode,
3389 +                            cap->cap_lowcontext , cap->cap_highcontext);
3390 +               return (EINVAL);
3391 +       }       
3392 +
3393 +       if (cap->cap_lowcontext > cap->cap_highcontext)
3394 +       {
3395 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
3396 +               return (EINVAL);
3397 +       }
3398 +       
3399 +       if (cap->cap_lownode > cap->cap_highnode)
3400 +       {
3401 +               ELAN_DEBUG2 (ELAN_DBG_VP,"elan_validate_cap: (cap->cap_lownode > cap->cap_highnode) %d %d\n",cap->cap_lownode, cap->cap_highnode);
3402 +               return (EINVAL);
3403 +       }
3404 +
3405 +       if (cap->cap_mycontext != ELAN_CAP_UNINITIALISED) 
3406 +       {
3407 +               ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_cap: failed cap->cap_mycontext is set %d  \n", cap->cap_mycontext);
3408 +               return (EINVAL);
3409 +       }
3410 +
3411 +
3412 +       if ((ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)) > ELAN_MAX_VPS)
3413 +       {
3414 +               ELAN_DEBUG6 (ELAN_DBG_VP,"elan_validate_cap: too many vps  LowNode %d   HighNode %d   LowContext %d   highContext %d,  %d >% d\n",
3415 +                            cap->cap_lownode , cap->cap_highnode,
3416 +                            cap->cap_lowcontext , cap->cap_highcontext,
3417 +                            (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap)),
3418 +                            ELAN_MAX_VPS);
3419 +               
3420 +               return (EINVAL);
3421 +       }
3422 +
3423 +       return (ESUCCESS);
3424 +}
3425 +
3426 +int
3427 +elan_validate_map(ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3428 +{
3429 +       ELAN_CAP_NODE_STRUCT * ptr  = NULL;
3430 +       ELAN_VP_NODE_STRUCT  * vptr = NULL;
3431 +       char space[256];
3432 +
3433 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
3434 +
3435 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map \n");
3436 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map cap = %s \n",elan_capability_string(cap,space));
3437 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_validate_map map = %s \n",elan_capability_string(map,space));
3438 +
3439 +       /* does cap exist    */
3440 +       ptr = find_cap_node(cap);
3441 +       if ( ptr == NULL ) 
3442 +       {
3443 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not found \n");
3444 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3445 +               return EINVAL;
3446 +       }
3447 +       /* is it active */
3448 +       if ( ! ptr->node.active ) 
3449 +       {
3450 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap not active \n");
3451 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3452 +               return EINVAL;
3453 +       }
3454 +
3455 +       /* are they the same */
3456 +       if ( ELAN_CAP_TYPE_MATCH(cap,map) 
3457 +            && ELAN_CAP_GEOM_MATCH(cap,map)) 
3458 +       {
3459 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map: cap == map  passed\n");
3460 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3461 +               return ESUCCESS;
3462 +       }
3463 +
3464 +       /* is map in map list */
3465 +       vptr = find_vp_node(ptr, map);
3466 +       if ( vptr == NULL ) 
3467 +       {
3468 +               ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map not found\n");
3469 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3470 +               return EINVAL;
3471 +       }
3472 +       
3473 +       ELAN_DEBUG0 (ELAN_DBG_VP,"elan_validate_map:  map passed\n");
3474 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3475 +       return ESUCCESS;
3476 +}
3477 +
3478 +int
3479 +elan_create_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3480 +{
3481 +       char                      space[127];
3482 +       struct list_head        * tmp;
3483 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3484 +       int                       i, rail;
3485 +
3486 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3487 +
3488 +       ELAN_DEBUG1 (ELAN_DBG_VP,"elan_create_cap %s\n",elan_capability_string(cap,space));     
3489 +
3490 +       /* need to check that the cap does not over lap another one 
3491 +          or is an exact match with only the userkey changing */
3492 +       list_for_each(tmp, &elan_cap_list) {
3493 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3494 +
3495 +               /* is it an exact match (key not checked) */
3496 +               if ( ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) 
3497 +                    && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)
3498 +                    && (ptr->node.owner == owner)) {
3499 +                       if ( ptr->node.active ) {
3500 +                               /* dont inc attached count as its like a create */
3501 +                               ptr->node.cap.cap_userkey = cap->cap_userkey;
3502 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3503 +                               return ESUCCESS;
3504 +                       }
3505 +                       else
3506 +                       {
3507 +                               ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed %s\n",
3508 +                                           elan_capability_string(&ptr->node.cap,space));
3509 +                               ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed ptr %p owner %p attached %d\n",
3510 +                                           ptr, owner, ptr->node.attached);
3511 +                                            
3512 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3513 +                               return EINVAL;
3514 +                       }
3515 +               }
3516 +               
3517 +               /* does it overlap, even with ones being destroyed */
3518 +               if (elan_cap_overlap(&ptr->node.cap,cap))
3519 +               {
3520 +                       ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed overlap %s\n",
3521 +                                   elan_capability_string(&ptr->node.cap,space));
3522 +                       ELAN_DEBUG (ELAN_DBG_VP,"elan_create_cap failed overlap ptr %p owner %p attached %d active %d\n",
3523 +                                   ptr, owner, ptr->node.attached, ptr->node.active);
3524 +
3525 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3526 +                       return  EACCES;
3527 +               }
3528 +       }
3529 +
3530 +       /* create it */
3531 +       KMEM_ALLOC(ptr, ELAN_CAP_NODE_STRUCT *, sizeof(ELAN_CAP_NODE_STRUCT), 1);
3532 +       if (ptr == NULL)
3533 +       {
3534 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3535 +               return  ENOMEM;
3536 +       }
3537 +
3538 +       /* create per rail space for the attached array */
3539 +       for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3540 +       {
3541 +               ptr->attached[rail]=NULL;
3542 +               /* GNAT 7685: Always need to allocate an attached structure in rail 0 for the usercopy device */
3543 +               if ( ELAN_CAP_IS_RAIL_SET(cap,rail) || rail == 0 ) 
3544 +               {
3545 +                       KMEM_ALLOC(ptr->attached[rail], ELAN_ATTACHED_STRUCT *, sizeof(ELAN_ATTACHED_STRUCT) *  ELAN_CAP_NUM_CONTEXTS(cap), 1);
3546 +                       if (ptr->attached[rail] == NULL) 
3547 +                       {
3548 +                               for(;rail>=0;rail--)
3549 +                                       if ( ptr->attached[rail] )
3550 +                                               KMEM_FREE(ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) *  ELAN_CAP_NUM_CONTEXTS(cap));
3551 +
3552 +                               KMEM_FREE(ptr, sizeof(ELAN_CAP_NODE_STRUCT));
3553 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3554 +                               return  ENOMEM;
3555 +                       }
3556 +                       /* blank the per context attached array */
3557 +                       for(i=0;i<ELAN_CAP_NUM_CONTEXTS(cap);i++)
3558 +                       {
3559 +                               ptr->attached[rail][i].cb_func = NULL;
3560 +                               /* user-to-user copy */
3561 +                               ptr->attached[rail][i].handle  = NULL;
3562 +                               ptr->attached[rail][i].owner   = NULL;
3563 +                       }
3564 +               }
3565 +       }       
3566 +       
3567 +       ptr->node.owner     = owner;
3568 +       ptr->node.cap       = *cap;
3569 +       ptr->node.attached  = 1;    /* creator counts as attached */
3570 +       ptr->node.active    = 1;
3571 +       ptr->vp_list.next   = &(ptr->vp_list);
3572 +       ptr->vp_list.prev   = &(ptr->vp_list);
3573 +
3574 +       list_add_tail(&ptr->list, &elan_cap_list);      
3575 +
3576 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3577 +       return  ESUCCESS;
3578 +}
3579 +
3580 +void
3581 +elan_destroy_cap_test(ELAN_CAP_NODE_STRUCT *cap_ptr)
3582 +{
3583 +       /* called by someone holding the mutex   */
3584 +       struct list_head       * vp_tmp;
3585 +       ELAN_VP_NODE_STRUCT * vp_ptr = NULL;
3586 +       int                      rail;
3587 +
3588 +       ASSERT(cap_ptr->node.attached >= 0);
3589 +
3590 +       /* check to see if it can be deleted now */
3591 +       if ( cap_ptr->node.attached == 0 ) {
3592 +               
3593 +               ELAN_DEBUG1(ELAN_DBG_CAP,"elan_destroy_cap_test: %p attached == 0\n", cap_ptr); 
3594 +               
3595 +               /* delete the vp list */
3596 +               list_for_each(vp_tmp, &(cap_ptr->vp_list)) {
3597 +                       vp_ptr = list_entry(vp_tmp, ELAN_VP_NODE_STRUCT , list);
3598 +                       list_del(&vp_ptr->list);
3599 +                       KMEM_FREE( vp_ptr, sizeof(ELAN_VP_NODE_STRUCT));
3600 +               }
3601 +               
3602 +               list_del(&cap_ptr->list);
3603 +
3604 +               /* delete space for the attached array */
3605 +               for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3606 +                       if (cap_ptr->attached[rail]) 
3607 +                               KMEM_FREE(cap_ptr->attached[rail], sizeof(ELAN_ATTACHED_STRUCT) * ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap)));
3608 +                       
3609 +               KMEM_FREE(cap_ptr, sizeof(ELAN_CAP_NODE_STRUCT));               
3610 +       }
3611 +       else
3612 +               ELAN_DEBUG2(ELAN_DBG_CAP,"elan_destroy_cap_test: %p attached = %d\n",
3613 +                           cap_ptr, cap_ptr->node.attached);   
3614 +
3615 +}
3616 +
3617 +int
3618 +elan_destroy_cap(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap)
3619 +{
3620 +       char                      space[127];
3621 +       struct list_head        * el;
3622 +       struct list_head        * nel;
3623 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3624 +       int                       i, rail;
3625 +       int                       found = 0;
3626 +
3627 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3628 +
3629 +       ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_destroy_cap %s\n",elan_capability_string(cap,space));   
3630 +
3631 +       list_for_each_safe (el, nel, &elan_cap_list) {
3632 +               ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3633 +               
3634 +               /* is it an exact match */
3635 +               if ( (ptr->node.owner == owner )
3636 +                    && (  (cap == NULL) 
3637 +                          || (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) && ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap)))) {
3638 +
3639 +                       if ( ptr->node.active ) {
3640 +
3641 +                               /* mark as in active and dec attached count */
3642 +                               ptr->node.active = 0;
3643 +                               ptr->node.attached--;
3644 +                               ptr->node.owner  = 0; /* no one own's it now */
3645 +
3646 +                               ASSERT(ptr->node.attached >= 0);
3647 +                               
3648 +                               /* need to tell any one who was attached that this has been destroy'd */
3649 +                               for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3650 +                                       if (ELAN_CAP_IS_RAIL_SET( &(ptr->node.cap), rail)) {
3651 +                                               for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(ptr->node.cap));i++)
3652 +                                                       if ( ptr->attached[rail][i].cb_func != NULL) 
3653 +                                                               ptr->attached[rail][i].cb_func(ptr->attached[rail][i].cb_args, cap, NULL);
3654 +                                       }
3655 +                               
3656 +                               /* now try to destroy it */
3657 +                               elan_destroy_cap_test(ptr);
3658 +                               
3659 +                               /* found it */
3660 +                               found = 1;
3661 +                       }
3662 +               }
3663 +       }
3664 +       
3665 +       if ( found )
3666 +       {
3667 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3668 +               return ESUCCESS;
3669 +       }
3670 +
3671 +       /* failed */
3672 +       ELAN_DEBUG1(ELAN_DBG_CAP,"elan_destroy_cap: %p didnt find it \n", cap); 
3673 +
3674 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3675 +       return EINVAL;
3676 +}
3677 +
3678 +int 
3679 +elan_get_caps(uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps)
3680 +{
3681 +       uint                      results = 0;
3682 +       struct list_head        * tmp;
3683 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3684 +       
3685 +
3686 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
3687 +
3688 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_get_caps\n");    
3689 +
3690 +       list_for_each(tmp, &elan_cap_list) {
3691 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3692 +               
3693 +               copyout(&ptr->node, &caps[results], sizeof (ELAN_CAP_STRUCT));
3694 +               
3695 +               results++;
3696 +               
3697 +               if ( results >= array_size )
3698 +               {
3699 +                       copyout(&results, number_of_results, sizeof(uint));     
3700 +                       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3701 +                       return ESUCCESS;
3702 +               }
3703 +       }
3704 +
3705 +       copyout(&results, number_of_results, sizeof(uint));     
3706 +
3707 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3708 +       return ESUCCESS;
3709 +}
3710 +
3711 +int
3712 +elan_create_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3713 +{
3714 +       ELAN_CAP_NODE_STRUCT * cap_ptr = NULL;
3715 +       ELAN_VP_NODE_STRUCT  * vp_ptr  = NULL;
3716 +       
3717 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3718 +
3719 +
3720 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_create_vp\n");
3721 +
3722 +       /* the railmasks must match */
3723 +       if ( cap->cap_railmask != map->cap_railmask)
3724 +       {
3725 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3726 +               return  EINVAL;
3727 +       }
3728 +
3729 +       /* does the cap exist */
3730 +       cap_ptr = find_cap_node(cap);
3731 +       if ((cap_ptr == NULL) || ( cap_ptr->node.owner != owner ) || (! cap_ptr->node.active) )
3732 +       {
3733 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3734 +               return  EINVAL;
3735 +       }
3736 +       
3737 +       /* is there already a mapping */
3738 +       vp_ptr = find_vp_node(cap_ptr,map);
3739 +       if ( vp_ptr != NULL) 
3740 +       {
3741 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3742 +               return  EINVAL;
3743 +       }
3744 +
3745 +       /* create space for mapping */
3746 +       KMEM_ALLOC(vp_ptr, ELAN_VP_NODE_STRUCT *, sizeof(ELAN_VP_NODE_STRUCT), 1);
3747 +       if (vp_ptr == NULL)
3748 +       {
3749 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3750 +               return  ENOMEM;
3751 +       }
3752 +                       
3753 +       /* copy map */
3754 +       vp_ptr->vp = *map;
3755 +       list_add_tail(&vp_ptr->list, &(cap_ptr->vp_list));      
3756 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3757 +       return  ESUCCESS;
3758 +}
3759 +
3760 +int
3761 +elan_destroy_vp(ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
3762 +{
3763 +       ELAN_CAP_NODE_STRUCT * cap_ptr = NULL;
3764 +       ELAN_VP_NODE_STRUCT  * vp_ptr  = NULL;
3765 +       int                       i, rail;
3766 +
3767 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3768 +
3769 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_destroy_vp\n");  
3770 +
3771 +       cap_ptr = find_cap_node(cap);
3772 +       if ((cap_ptr!=NULL) && (cap_ptr->node.owner == owner) && ( cap_ptr->node.active))
3773 +       {               
3774 +               vp_ptr = find_vp_node( cap_ptr, map );
3775 +               if ( vp_ptr != NULL ) 
3776 +               {
3777 +                       list_del(&vp_ptr->list);
3778 +                       KMEM_FREE(vp_ptr, sizeof(ELAN_VP_NODE_STRUCT));
3779 +             
3780 +                       /* need to tell those who are attached that map is nolonger in use */
3781 +                       for(rail=0;rail<ELAN_MAX_RAILS;rail++)
3782 +                               if (ELAN_CAP_IS_RAIL_SET(cap, rail))
3783 +                               {
3784 +                                       for(i=0;i< ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap));i++)
3785 +                                               if ( cap_ptr->attached[rail][i].cb_func != NULL) 
3786 +                                                       cap_ptr->attached[rail][i].cb_func( cap_ptr->attached[rail][i].cb_args, cap, map);
3787 +                               }
3788 +
3789 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3790 +                       return  ESUCCESS;
3791 +               }
3792 +       }       
3793 +       
3794 +       /* didnt find it */
3795 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3796 +       return  EINVAL;
3797 +}
3798 +
3799 +int 
3800 +elan_attach_cap(ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB func)
3801 +{
3802 +       char                  space[127];
3803 +       struct list_head     *el;
3804 +
3805 +       ELAN_DEBUG1 (ELAN_DBG_CAP,"elan_attach_cap %s\n",elan_capability_string(cap,space));
3806 +
3807 +       /* currently must provide a call back, as null mean something */
3808 +       if ( func == NULL)
3809 +               return (EINVAL);
3810 +
3811 +       /* mycontext must be set and correct */
3812 +       if ( ! ELAN_CAP_VALID_MYCONTEXT(cap))
3813 +               return (EINVAL);
3814 +
3815 +       /* rail must be one of the rails in railmask */
3816 +       if (((1 << rail) & cap->cap_railmask) == 0)
3817 +               return (EINVAL);
3818 +       
3819 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3820 +
3821 +       list_for_each(el, &elan_cap_list) {
3822 +               ELAN_CAP_NODE_STRUCT *cap_ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3823 +               
3824 +               /* is it an exact match */
3825 +               if (ELAN_CAP_MATCH(&cap_ptr->node.cap,cap) && cap_ptr->node.active) {
3826 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3827 +                       
3828 +                       if ( cap_ptr->attached[rail][attached_index].cb_func != NULL ) /* only one per ctx per rail */
3829 +                       {
3830 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3831 +                               return   EBUSY;
3832 +                       }
3833 +
3834 +                       /* keep track of who attached as we might need to tell them when */
3835 +                       /* cap or maps get destroyed                                     */
3836 +                       cap_ptr->attached[rail][ attached_index ].cb_func = func;
3837 +                       cap_ptr->attached[rail][ attached_index ].cb_args = args;
3838 +                       cap_ptr->node.attached++;
3839 +
3840 +                       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: passed\n");
3841 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3842 +                       return ESUCCESS;
3843 +               }
3844 +       }
3845 +       
3846 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_attach_cap: failed to find \n");
3847 +
3848 +       /* didnt find one */
3849 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3850 +       return EINVAL;
3851 +}
3852 +
3853 +int 
3854 +elan_detach_cap(ELAN_CAPABILITY *cap, unsigned int rail)
3855 +{
3856 +       struct list_head *el, *nel;
3857 +       char              space[256];
3858 +
3859 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3860 +
3861 +       ELAN_DEBUG1(ELAN_DBG_CAP,"elan_detach_cap %s\n",elan_capability_string(cap,space));
3862 +       list_for_each_safe (el, nel, &elan_cap_list) {
3863 +               ELAN_CAP_NODE_STRUCT *ptr = list_entry (el, ELAN_CAP_NODE_STRUCT, list);
3864 +
3865 +               /* is it an exact match (key not checked) */
3866 +               if (ELAN_CAP_TYPE_MATCH(&ptr->node.cap,cap) &&
3867 +                   ELAN_CAP_GEOM_MATCH(&ptr->node.cap,cap) &&
3868 +                   (ptr->node.cap.cap_railmask & cap->cap_railmask) == cap->cap_railmask) {
3869 +               
3870 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3871 +
3872 +                       if ( ptr->attached[rail][ attached_index ].cb_func == NULL ) {
3873 +                               ELAN_DEBUG0(ELAN_DBG_CAP,"elanmod_detach_cap already removed \n");
3874 +                               
3875 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3876 +                               return  ESUCCESS;
3877 +                       }
3878 +
3879 +                       ptr->attached[rail][ attached_index ].cb_func = NULL;
3880 +                       ptr->attached[rail][ attached_index ].cb_args = (void *)0;
3881 +
3882 +                       ptr->node.attached--;
3883 +                       
3884 +                       ASSERT(ptr->node.attached >= 0);
3885 +
3886 +                       ELAN_DEBUG1(ELAN_DBG_CAP,"elanmod_detach_cap new attach count %d \n", ptr->node.attached);
3887 +
3888 +                       elan_destroy_cap_test(ptr);
3889 +
3890 +                       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: success\n"); 
3891 +
3892 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3893 +                       return  ESUCCESS;
3894 +               }
3895 +       }
3896 +
3897 +       ELAN_DEBUG0(ELAN_DBG_CAP,"elan_detach_cap: failed to find\n");
3898 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3899 +       return  EINVAL;
3900 +}
3901 +
3902 +int
3903 +elan_cap_dump()
3904 +{
3905 +       struct list_head        * tmp;
3906 +       ELAN_CAP_NODE_STRUCT * ptr = NULL;
3907 +       
3908 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
3909 +       
3910 +       list_for_each(tmp, &elan_cap_list) {
3911 +               ptr = list_entry(tmp, ELAN_CAP_NODE_STRUCT , list);
3912 +
3913 +               ELAN_DEBUG2 (ELAN_DBG_ALL, "cap dump: owner %p type %x\n", ptr->node.owner, ptr->node.cap.cap_type);
3914 +                       
3915 +               ELAN_DEBUG5 (ELAN_DBG_ALL, "cap dump: LowNode %d   HighNode %d   LowContext %d   mycontext %d   highContext %d\n",
3916 +                            ptr->node.cap.cap_lownode , ptr->node.cap.cap_highnode,
3917 +                            ptr->node.cap.cap_lowcontext , ptr->node.cap.cap_mycontext, ptr->node.cap.cap_highcontext);
3918 +
3919 +       }
3920 +
3921 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
3922 +       return  ESUCCESS;
3923 +}
3924 +
3925 +int
3926 +elan_usercopy_attach(ELAN_CAPABILITY *cap, ELAN_CAP_NODE_STRUCT **node_ptr, void *handle, void *owner)
3927 +{
3928 +       struct list_head     *el;
3929 +
3930 +       /* mycontext must be set and correct */
3931 +       if ( ! ELAN_CAP_VALID_MYCONTEXT(cap))
3932 +               return -EINVAL;
3933 +       
3934 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3935 +
3936 +       /* Search all cap node structs looking for an exact match (including key) */
3937 +       list_for_each(el, &elan_cap_list) {
3938 +               ELAN_CAP_NODE_STRUCT *cap_ptr = list_entry(el, ELAN_CAP_NODE_STRUCT , list);
3939 +               
3940 +               /* is it an exact match */
3941 +               if (ELAN_CAP_MATCH(&cap_ptr->node.cap,cap) && cap_ptr->node.active) {
3942 +                       char space[127];
3943 +                       /* Work out which local process index we are */
3944 +                       unsigned int attached_index = cap->cap_mycontext - cap->cap_lowcontext;
3945 +
3946 +                       ELAN_DEBUG(ELAN_DBG_CAP, "usercopy_attach: %s\n",
3947 +                                  elan_capability_string(cap,space));
3948 +
3949 +                       ELAN_DEBUG(ELAN_DBG_CAP, 
3950 +                                  "usercopy_attach: cap_ptr %p handle %p owner %p idx %d\n", 
3951 +                                  cap_ptr, handle, owner, attached_index);
3952 +                       
3953 +                       /* Check we're not being called multiple times for the same local process */
3954 +                       if (cap_ptr->attached[0][attached_index].handle)
3955 +                       {
3956 +                               ELAN_DEBUG(ELAN_DBG_CAP, 
3957 +                                          "usercopy_attach: cap_ptr %p idx %d already attached handle %p owner %p\n",
3958 +                                          cap_ptr, attached_index, 
3959 +                                          cap_ptr->attached[0][attached_index].handle,
3960 +                                          cap_ptr->attached[0][attached_index].owner);
3961 +                                           
3962 +                               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3963 +                               return -EAGAIN;
3964 +                       }
3965 +
3966 +                       /* Reference count node struct */
3967 +                       cap_ptr->node.attached++;
3968 +                       
3969 +                       /* Stash our task handle/owner off the cap node array */
3970 +                       cap_ptr->attached[0][attached_index].handle = handle;
3971 +                       cap_ptr->attached[0][attached_index].owner = owner;
3972 +                       
3973 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3974 +
3975 +                       /* Return node pointer to caller */
3976 +                       *node_ptr = cap_ptr;
3977 +
3978 +                       return ESUCCESS;
3979 +               }
3980 +       }
3981 +
3982 +       /* failed to match a cap */
3983 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
3984 +       return -EINVAL;
3985 +}
3986 +
3987 +int
3988 +elan_usercopy_detach(ELAN_CAP_NODE_STRUCT *cap_ptr, void *owner)
3989 +{
3990 +       int i;
3991 +
3992 +       /* NB: The usercopy code holds a read lock on this rwlock and
3993 +        * hence we will block here if exit_fs() gets called during a
3994 +        * copy to this process
3995 +        */
3996 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
3997 +
3998 +       /* Find this process in the attached task handle/owner array */
3999 +       for(i=0; i< ELAN_CAP_NUM_CONTEXTS((&cap_ptr->node.cap)); i++)
4000 +       {
4001 +               if (cap_ptr->attached[0][i].owner == owner)
4002 +               {
4003 +                       ELAN_DEBUG(ELAN_DBG_CAP,
4004 +                                  "usercopy_detach: cap_ptr %p handle %p owner %p id %d\n",
4005 +                                  cap_ptr, cap_ptr->attached[0][i].handle, owner, i);
4006 +
4007 +                       /* Clear our task handle/owner off the cap node array */
4008 +                       cap_ptr->attached[0][i].handle = NULL;
4009 +                       cap_ptr->attached[0][i].owner  = NULL;
4010 +                       
4011 +                       /* Reference count node struct */
4012 +                       cap_ptr->node.attached--;
4013 +
4014 +                       ASSERT(cap_ptr->node.attached >= 0);
4015 +                       
4016 +                       /* May need to destroy cap if reference count has hit zero */
4017 +                       elan_destroy_cap_test(cap_ptr);
4018 +
4019 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4020 +
4021 +                       return ESUCCESS;
4022 +               }
4023 +       }
4024 +
4025 +       ELAN_DEBUG(ELAN_DBG_CAP, "usercopy_detach: cap_ptr %p[%d] failed owner %p\n", 
4026 +                  cap_ptr, cap_ptr->node.attached, owner);
4027 +
4028 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4029 +
4030 +       return -EINVAL;
4031 +}
4032 +
4033 +/* Returns the associated handle for the supplied ctxId process in the cap node */
4034 +/* Should be called holding a read lock on the elan_rwlock */
4035 +int
4036 +elan_usercopy_handle(ELAN_CAP_NODE_STRUCT *cap_ptr, int ctxId, void **handlep)
4037 +{
4038 +       int res = ESUCCESS;
4039 +       void *handle;
4040 +
4041 +       /* Sanity check argument */
4042 +       if (ctxId < 0 || ctxId >= ELAN_CAP_NUM_CONTEXTS(&(cap_ptr->node.cap)))
4043 +               return -EINVAL;
4044 +       
4045 +//     ELANMOD_RWLOCK_READ(&elan_rwlock);
4046 +
4047 +       /* Get the task handle for the remote process */
4048 +       if ((handle = cap_ptr->attached[0][ctxId].handle) == NULL)
4049 +               res = -EAGAIN;
4050 +       
4051 +//     ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
4052 +
4053 +       *handlep = handle;
4054 +
4055 +       return res;
4056 +}
4057 +
4058 +/*
4059 + * Local variables:
4060 + * c-file-style: "linux"
4061 + * End:
4062 + */
4063 diff -urN clean/drivers/net/qsnet/elan/capability_general.c linux-2.6.9/drivers/net/qsnet/elan/capability_general.c
4064 --- clean/drivers/net/qsnet/elan/capability_general.c   1969-12-31 19:00:00.000000000 -0500
4065 +++ linux-2.6.9/drivers/net/qsnet/elan/capability_general.c     2004-02-25 08:47:59.000000000 -0500
4066 @@ -0,0 +1,446 @@
4067 +/*
4068 + *    Copyright (c) 2003 by Quadrics Ltd.
4069 + * 
4070 + *    For licensing information please see the supplied COPYING file
4071 + *
4072 + */
4073 +
4074 +#ident "@(#)$Id: capability_general.c,v 1.10 2004/02/25 13:47:59 daniel Exp $"
4075 +/*      $Source: /cvs/master/quadrics/elanmod/shared/capability_general.c,v $ */
4076 +
4077 +#if defined(__KERNEL__)
4078 +
4079 +#include <qsnet/kernel.h>
4080 +
4081 +#else
4082 +
4083 +#include <stdlib.h>
4084 +#include <stdio.h>
4085 +#include <sys/param.h>
4086 +
4087 +#endif
4088 +
4089 +#include <elan/elanmod.h>
4090 +
4091 +
4092 +void
4093 +elan_nullcap (ELAN_CAPABILITY *cap)
4094 +{
4095 +       register int i;
4096 +
4097 +       for (i = 0; i < sizeof (cap->cap_userkey)/sizeof(cap->cap_userkey.key_values[0]); i++)
4098 +               cap->cap_userkey.key_values[i] = ELAN_CAP_UNINITIALISED;
4099 +    
4100 +       cap->cap_lowcontext  = ELAN_CAP_UNINITIALISED;
4101 +       cap->cap_highcontext = ELAN_CAP_UNINITIALISED;
4102 +       cap->cap_mycontext   = ELAN_CAP_UNINITIALISED;
4103 +       cap->cap_lownode     = ELAN_CAP_UNINITIALISED;
4104 +       cap->cap_highnode    = ELAN_CAP_UNINITIALISED;
4105 +       cap->cap_railmask    = ELAN_CAP_UNINITIALISED;
4106 +       cap->cap_type        = ELAN_CAP_UNINITIALISED;
4107 +       cap->cap_spare       = 0;
4108 +       cap->cap_version     = ELAN_CAP_VERSION_NUMBER;
4109 +       
4110 +       for (i = 0; i < sizeof (cap->cap_bitmap)/sizeof (cap->cap_bitmap[0]); i++)
4111 +               cap->cap_bitmap[i] = 0;
4112 +}
4113 +
4114 +char *
4115 +elan_capability_string (ELAN_CAPABILITY *cap, char *str)
4116 +{
4117 +       if (cap == NULL) 
4118 +               sprintf (str, "[-.-.-.-] cap = NULL\n");
4119 +       else
4120 +               sprintf (str, "[%x.%x.%x.%x] Version %x Type %x \n"
4121 +                        "Context %x.%x.%x Node %x.%x\n",
4122 +                        cap->cap_userkey.key_values[0], cap->cap_userkey.key_values[1],
4123 +                        cap->cap_userkey.key_values[2], cap->cap_userkey.key_values[3],
4124 +                        cap->cap_version, cap->cap_type, 
4125 +                        cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext,
4126 +                        cap->cap_lownode, cap->cap_highnode);
4127 +       
4128 +       return (str);
4129 +}
4130 +
4131 +ELAN_LOCATION
4132 +elan_vp2location (u_int process, ELAN_CAPABILITY *cap)
4133 +{
4134 +       ELAN_LOCATION location;
4135 +       int i, vp, node, context, nnodes, nctxs;
4136 +
4137 +       vp = 0;
4138 +
4139 +       location.loc_node    = ELAN_INVALID_NODE;
4140 +       location.loc_context = -1;
4141 +       
4142 +       nnodes = cap->cap_highnode - cap->cap_lownode + 1;
4143 +       nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
4144 +       
4145 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4146 +       {
4147 +       case ELAN_CAP_TYPE_BLOCK:
4148 +               for (node = 0, i = 0; node < nnodes; node++)
4149 +               {
4150 +                       for (context = 0; context < nctxs; context++)
4151 +                       {
4152 +                               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs)))
4153 +                               {
4154 +                                       if (vp == process)
4155 +                                       {
4156 +                                               /* Return relative indices within the capability box */
4157 +                                               location.loc_node    = node;
4158 +                                               location.loc_context = context;
4159 +
4160 +                                               return (location);
4161 +                                       }
4162 +                      
4163 +                                       vp++;
4164 +                               }
4165 +                       }
4166 +               }
4167 +               break;
4168 +       
4169 +       case ELAN_CAP_TYPE_CYCLIC:
4170 +               for (context = 0, i = 0; context < nctxs; context++)
4171 +               {
4172 +                       for (node = 0; node < nnodes; node++)
4173 +                       {
4174 +                               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes)))
4175 +                               {
4176 +                                       if (vp == process)
4177 +                                       {
4178 +                                               location.loc_node    = node;
4179 +                                               location.loc_context = context;
4180 +
4181 +                                               return (location);
4182 +                                       }
4183 +                   
4184 +                                       vp++;
4185 +                               }
4186 +                       }
4187 +               }
4188 +               break;
4189 +       }
4190 +    
4191 +       return( location );
4192 +}
4193 +
4194 +int
4195 +elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap)
4196 +{
4197 +    int  vp, node, context, nnodes, nctxs;
4198 +
4199 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
4200 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
4201 +
4202 +    vp = 0;
4203 +    
4204 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4205 +    {
4206 +    case ELAN_CAP_TYPE_BLOCK:
4207 +       for (node = 0 ; node < nnodes ; node++)
4208 +       {
4209 +           for (context = 0; context < nctxs; context++)
4210 +           {
4211 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, context + (node * nctxs)))
4212 +               {
4213 +                   if ((location.loc_node == node) && (location.loc_context == context))
4214 +                   {
4215 +                       /* Found it ! */
4216 +                       return( vp );
4217 +                   }
4218 +                   
4219 +                   vp++;
4220 +               }
4221 +           }
4222 +       }
4223 +       break;
4224 +       
4225 +    case ELAN_CAP_TYPE_CYCLIC:
4226 +       for (context = 0; context < nctxs; context++)
4227 +       {
4228 +           for (node = 0; node < nnodes; node++)
4229 +           {
4230 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (context * nnodes)))
4231 +               {
4232 +                   if ((location.loc_node == node) && (location.loc_context == context))
4233 +                   {
4234 +                       /* Found it ! */
4235 +                       return( vp );
4236 +                   }
4237 +                   
4238 +                   vp++;
4239 +               }
4240 +           }
4241 +       }
4242 +       break;
4243 +    }
4244 +    
4245 +    /* Failed to find it */
4246 +    return( -1 );
4247 +}
4248 +
4249 +/* Return the number of processes as described by a capability */
4250 +int
4251 +elan_nvps (ELAN_CAPABILITY *cap)
4252 +{
4253 +       int i, c, nbits = ELAN_CAP_BITMAPSIZE(cap);
4254 +
4255 +       if (cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)
4256 +               return (nbits);
4257 +
4258 +       for (i = 0, c = 0; i < nbits; i++)
4259 +               if (BT_TEST (cap->cap_bitmap, i))
4260 +                       c++;
4261 +
4262 +       return (c);
4263 +}
4264 +
4265 +/* Return the number of local processes on a given node as described by a capability */
4266 +int
4267 +elan_nlocal (int node, ELAN_CAPABILITY *cap)
4268 +{
4269 +       int vp;
4270 +       ELAN_LOCATION loc;
4271 +       int nLocal = 0;
4272 +
4273 +       for (vp = 0; vp < elan_nvps(cap); vp++)
4274 +       {
4275 +               loc = elan_vp2location(vp, cap);
4276 +               if (loc.loc_node == node)
4277 +                       nLocal++;
4278 +       }
4279 +
4280 +       return (nLocal);
4281 +}
4282 +
4283 +/* Return the maximum number of local processes on any node as described by a capability */
4284 +int
4285 +elan_maxlocal (ELAN_CAPABILITY *cap)
4286 +{
4287 +       return(cap->cap_highcontext - cap->cap_lowcontext + 1);
4288 +}
4289 +
4290 +/* Return the vps of the local processes on a given node as described by a capability */
4291 +int
4292 +elan_localvps (int node, ELAN_CAPABILITY *cap, int *vps, int size)
4293 +{
4294 +       int context;
4295 +       ELAN_LOCATION loc;
4296 +       int nLocal = 0;
4297 +    
4298 +       loc.loc_node = node;
4299 +
4300 +       for (context = 0; context < MIN(size, elan_maxlocal(cap)); context++)
4301 +       {
4302 +               loc.loc_context = context;
4303 +       
4304 +               /* Should return -1 if none found */
4305 +               if ( (vps[context] = elan_location2vp( loc, cap )) != -1)
4306 +                       nLocal++;
4307 +       }
4308 +
4309 +       return (nLocal);
4310 +}
4311 +
4312 +/* Return the number of rails that this capability utilises */
4313 +int
4314 +elan_nrails (ELAN_CAPABILITY *cap)
4315 +{
4316 +       int nrails = 0;
4317 +       unsigned int railmask;
4318 +
4319 +       /* Test for a multi-rail capability */
4320 +       if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)
4321 +       {
4322 +               /* Grab rail bitmask from capability */
4323 +               railmask = cap->cap_railmask;
4324 +       
4325 +               while (railmask)
4326 +               {
4327 +                       if (railmask & 1)
4328 +                               nrails++;
4329 +           
4330 +                       railmask >>= 1;
4331 +               }
4332 +       }
4333 +       else 
4334 +               /* Default to just one rail */
4335 +               nrails = 1;
4336 +       
4337 +       return (nrails);
4338 +}
4339 +
4340 +/* Fill out an array giving the physical rail numbers utilised by a capability */
4341 +int
4342 +elan_rails (ELAN_CAPABILITY *cap, int *rails)
4343 +{
4344 +       int nrails, rail;
4345 +       unsigned int railmask;
4346 +
4347 +       /* Test for a multi-rail capability */
4348 +       if (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL)
4349 +       {
4350 +               /* Grab rail bitmask from capability */
4351 +               railmask = cap->cap_railmask;
4352 +       
4353 +               nrails = rail = 0;
4354 +               while (railmask)
4355 +               {
4356 +                       if (railmask & 1)
4357 +                               rails[nrails++] = rail;
4358 +           
4359 +                       rail++;
4360 +                       railmask >>= 1;
4361 +               }
4362 +       }
4363 +       else
4364 +       {
4365 +               /* Default to just one rail */
4366 +               rails[0] = 0;
4367 +               nrails = 1;
4368 +       }
4369 +
4370 +       return( nrails );
4371 +}
4372 +
4373 +int 
4374 +elan_cap_overlap(ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2)
4375 +{
4376 +       /* by context */
4377 +       if ( cap1->cap_highcontext < cap2->cap_lowcontext ) return (0);
4378 +       if ( cap1->cap_lowcontext  > cap2->cap_highcontext) return (0);
4379 +       
4380 +       /* by node */
4381 +       if ( cap1->cap_highnode < cap2->cap_lownode ) return (0);
4382 +       if ( cap1->cap_lownode  > cap2->cap_highnode) return (0);
4383 +
4384 +       /* by rail */
4385 +       /* they overlap if they have a rail in common */
4386 +       return (cap1->cap_railmask & cap2->cap_railmask);
4387 +}
4388 +
4389 +#if !defined(__KERNEL__)
4390 +
4391 +/* Fill out an array that hints at the best use of the rails on a
4392 + * per process basis. The library user can then decide whether or not
4393 + * to take this into account (e.g. TPORTs)
4394 + * All processes calling this fn will be returned the same information.
4395 + */
4396 +int
4397 +elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp)
4398 +{
4399 +       int i;
4400 +       int nrails = elan_nrails(cap);
4401 +       int maxlocal = elan_maxlocal(cap);
4402 +
4403 +       /* Test for a multi-rail capability */
4404 +       if (! (cap->cap_type & ELAN_CAP_TYPE_MULTI_RAIL))
4405 +       {
4406 +               /* Default to just one rail */
4407 +               for (i = 0; i < nvp; i++)
4408 +                       pref[i] = 0;
4409 +
4410 +               return( 0 );
4411 +       }
4412 +
4413 +       /*
4414 +        * We allocate rails on a per node basis sharing our the rails
4415 +        * equally amongst the local processes. However, if there is only
4416 +        * one process per node and multiple rails, then we use a different
4417 +        * algorithm where rails are allocated across all the processes in 
4418 +        * a round-robin fashion
4419 +        */
4420 +    
4421 +       if (maxlocal == 1)
4422 +       {
4423 +               /* Allocate rails in a round-robin manner */
4424 +               for (i = 0; i < nvp; i++)
4425 +                       *pref++ = i % nrails;
4426 +       }
4427 +       else
4428 +       {
4429 +               int node;
4430 +               int *vps;
4431 +               int nnodes = cap->cap_highnode - cap->cap_lownode + 1;
4432 +
4433 +               vps = (int *) malloc(sizeof(int)*maxlocal);
4434 +
4435 +               /* Grab the local process info for each node and allocate
4436 +                * rails to those vps on an equal basis
4437 +                */
4438 +               for (node = 0; node < nnodes; node++)
4439 +               {
4440 +                       int nlocal;
4441 +                       int pprail;
4442 +
4443 +                       /* Grab an array of local vps */
4444 +                       nlocal = elan_localvps(node, cap, vps, maxlocal);
4445 +           
4446 +                       /* Calculate the number processes per rail */
4447 +                       if ((pprail = nlocal/nrails) == 0)
4448 +                               pprail = 1;
4449 +
4450 +                       /* Allocate processes to rails */
4451 +                       for (i = 0; i < nlocal; i++)
4452 +                       {
4453 +                               pref[vps[i]] = (i / pprail) % nrails;
4454 +                       }
4455 +               }
4456 +       
4457 +               free(vps);
4458 +       }
4459 +
4460 +       return( 0 );
4461 +}
4462 +
4463 +void 
4464 +elan_get_random_key(ELAN_USERKEY *key)
4465 +{
4466 +    int i;
4467 +    for (i = 0; i < sizeof(key->key_values) / sizeof(key->key_values[0]); i++)
4468 +       key->key_values[i] = lrand48();
4469 +}
4470 +
4471 +int elan_lowcontext(ELAN_CAPABILITY *cap)
4472 +{
4473 +    return(cap->cap_lowcontext);
4474 +}
4475 +
4476 +int elan_mycontext(ELAN_CAPABILITY *cap)
4477 +{
4478 +    return(cap->cap_mycontext);
4479 +}
4480 +
4481 +int elan_highcontext(ELAN_CAPABILITY *cap)
4482 +{
4483 +    return(cap->cap_highcontext);
4484 +}
4485 +
4486 +int elan_lownode(ELAN_CAPABILITY *cap)
4487 +{
4488 +    return(cap->cap_lownode);
4489 +}
4490 +
4491 +int elan_highnode(ELAN_CAPABILITY *cap)
4492 +{
4493 +    return(cap->cap_highnode);
4494 +}
4495 +
4496 +int elan_captype(ELAN_CAPABILITY *cap)
4497 +{
4498 +    return(cap->cap_type);
4499 +}
4500 +
4501 +int elan_railmask(ELAN_CAPABILITY *cap)
4502 +{
4503 +    return(cap->cap_railmask);
4504 +}
4505 +
4506 +#endif
4507 +
4508 +/*
4509 + * Local variables:
4510 + * c-file-style: "linux"
4511 + * End:
4512 + */
4513 diff -urN clean/drivers/net/qsnet/elan/device.c linux-2.6.9/drivers/net/qsnet/elan/device.c
4514 --- clean/drivers/net/qsnet/elan/device.c       1969-12-31 19:00:00.000000000 -0500
4515 +++ linux-2.6.9/drivers/net/qsnet/elan/device.c 2005-04-13 05:31:47.000000000 -0400
4516 @@ -0,0 +1,147 @@
4517 +/*
4518 + *    Copyright (c) 2003 by Quadrics Ltd.
4519 + * 
4520 + *    For licensing information please see the supplied COPYING file
4521 + *
4522 + */
4523 +
4524 +#ident "@(#)$Id: device.c,v 1.6 2005/04/13 09:31:47 addy Exp $"
4525 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/device.c,v $*/
4526 +
4527 +#include <qsnet/kernel.h>
4528 +#include <elan/elanmod.h>
4529 +
4530 +static LIST_HEAD(elan_dev_list);
4531 +
4532 +ELAN_DEV_STRUCT *
4533 +elan_dev_find (ELAN_DEV_IDX devidx)
4534 +{
4535 +       struct list_head   *tmp;
4536 +       ELAN_DEV_STRUCT *ptr=NULL;
4537 +
4538 +       list_for_each(tmp, &elan_dev_list) {
4539 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4540 +               if (ptr->devidx == devidx) 
4541 +                       return ptr;
4542 +               if (ptr->devidx > devidx)
4543 +                       return ERR_PTR(-ENXIO);
4544 +       }
4545 +       
4546 +       return ERR_PTR(-EINVAL);
4547 +}
4548 +
4549 +ELAN_DEV_STRUCT *
4550 +elan_dev_find_byrail (unsigned short deviceid, unsigned rail)
4551 +{
4552 +       struct list_head   *tmp;
4553 +       ELAN_DEV_STRUCT *ptr=NULL;
4554 +
4555 +       list_for_each(tmp, &elan_dev_list) {
4556 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4557 +
4558 +               ELAN_DEBUG5 (ELAN_DBG_ALL,"elan_dev_find_byrail devidx %d - %04x %04x,  %d %d \n", ptr->devidx, 
4559 +                            ptr->devinfo->dev_device_id, deviceid, ptr->devinfo->dev_rail, rail);
4560 +
4561 +               if (ptr->devinfo->dev_device_id == deviceid && ptr->devinfo->dev_rail == rail)
4562 +                       return ptr;
4563 +       }
4564 +       
4565 +       return NULL;
4566 +}
4567 +
4568 +ELAN_DEV_IDX
4569 +elan_dev_register (ELAN_DEVINFO *devinfo, ELAN_DEV_OPS *ops, void * user_data)
4570 +{
4571 +       ELAN_DEV_STRUCT *ptr;
4572 +       ELAN_DEV_IDX        devidx = 0;
4573 +       struct list_head   *tmp;
4574 +
4575 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
4576 +
4577 +       /* is it already registered */
4578 +       if ((ptr = elan_dev_find_byrail(devinfo->dev_device_id, devinfo->dev_rail)) != NULL) 
4579 +       {
4580 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4581 +               return EINVAL;
4582 +       }
4583 +
4584 +       /* find a free device idx */
4585 +       list_for_each (tmp, &elan_dev_list) {
4586 +               if (list_entry (tmp, ELAN_DEV_STRUCT, node)->devidx != devidx)
4587 +                       break;
4588 +               devidx++;
4589 +       }
4590 +
4591 +       /* create it and add */
4592 +       KMEM_ALLOC(ptr, ELAN_DEV_STRUCT *, sizeof(ELAN_DEV_STRUCT), 1);
4593 +       if (ptr == NULL)
4594 +       {
4595 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4596 +               return ENOMEM;
4597 +       }
4598 +
4599 +       ptr->devidx    = devidx;
4600 +       ptr->ops       = ops;
4601 +       ptr->devinfo   = devinfo;
4602 +       ptr->user_data = user_data;
4603 +
4604 +       /* insert this entry *before* the last entry we've found */
4605 +       list_add_tail(&ptr->node, tmp);
4606 +
4607 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4608 +       return  ESUCCESS;
4609 +}
4610 +
4611 +int
4612 +elan_dev_deregister (ELAN_DEVINFO *devinfo)
4613 +{
4614 +       ELAN_DEV_STRUCT *target;
4615 +
4616 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
4617 +
4618 +       if ((target = elan_dev_find_byrail (devinfo->dev_device_id, devinfo->dev_rail)) == NULL)
4619 +       {
4620 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4621 +               return  EINVAL;
4622 +       }
4623 +
4624 +       list_del(&target->node);
4625 +
4626 +       /* delete target entry */
4627 +       KMEM_FREE(target, sizeof(ELAN_DEV_STRUCT));
4628 +
4629 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4630 +       return  ESUCCESS;
4631 +}
4632 +
4633 +int
4634 +elan_dev_dump ()
4635 +{
4636 +       struct list_head   *tmp;
4637 +       ELAN_DEV_STRUCT *ptr=NULL;
4638 +
4639 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
4640 +
4641 +       list_for_each(tmp, &elan_dev_list) {
4642 +               ptr = list_entry(tmp, ELAN_DEV_STRUCT , node);
4643 +
4644 +               ELAN_DEBUG3 (ELAN_DBG_ALL,"dev dump: index %u rail %u elan%c\n", 
4645 +                            ptr->devidx, ptr->devinfo->dev_rail, '3' + ptr->devinfo->dev_device_id);
4646 +               ELAN_DEBUG5 (ELAN_DBG_ALL,"dev dump: Vid %x   Did %x  Rid %x  DR %d  DVal %x\n",
4647 +                            ptr->devinfo->dev_vendor_id,
4648 +                            ptr->devinfo->dev_device_id,
4649 +                            ptr->devinfo->dev_revision_id,
4650 +                            ptr->devinfo->dev_driver_version,
4651 +                            ptr->devinfo->dev_num_down_links_value);
4652 +
4653 +       }
4654 +
4655 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
4656 +       return ESUCCESS;
4657 +}
4658 +
4659 +/*
4660 + * Local variables:
4661 + * c-file-style: "linux"
4662 + * End:
4663 + */
4664 diff -urN clean/drivers/net/qsnet/elan/devinfo.c linux-2.6.9/drivers/net/qsnet/elan/devinfo.c
4665 --- clean/drivers/net/qsnet/elan/devinfo.c      1969-12-31 19:00:00.000000000 -0500
4666 +++ linux-2.6.9/drivers/net/qsnet/elan/devinfo.c        2005-04-13 05:31:47.000000000 -0400
4667 @@ -0,0 +1,78 @@
4668 +/*
4669 + *    Copyright (c) 2003 by Quadrics Ltd.
4670 + * 
4671 + *    For licensing information please see the supplied COPYING file
4672 + *
4673 + */
4674 +
4675 +#ident "@(#)$Id: devinfo.c,v 1.6 2005/04/13 09:31:47 addy Exp $"
4676 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.c,v $*/
4677 +
4678 +#include <qsnet/kernel.h>
4679 +#include <elan/elanmod.h>
4680 +
4681 +int 
4682 +elan_get_devinfo(ELAN_DEV_IDX devidx, ELAN_DEVINFO *devinfo)
4683 +{
4684 +       ELAN_DEV_STRUCT *target;
4685 +       int                 res;
4686 +
4687 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
4688 +
4689 +       target = elan_dev_find (devidx);
4690 +
4691 +       if (IS_ERR (target))
4692 +               res = PTR_ERR(target);
4693 +       else
4694 +       {
4695 +               copyout(target->devinfo, devinfo, sizeof(ELAN_DEVINFO));
4696 +               res = ESUCCESS;
4697 +       }
4698 +       
4699 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
4700 +       return res;
4701 +}
4702 +
4703 +int 
4704 +elan_get_position(ELAN_DEV_IDX devidx, ELAN_POSITION *position)
4705 +{
4706 +       ELAN_DEV_STRUCT *target;
4707 +       int                 res;
4708 +
4709 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
4710 +
4711 +       target = elan_dev_find(devidx);
4712 +
4713 +       if (IS_ERR (target))
4714 +               res = PTR_ERR(target);
4715 +       else
4716 +               res = target->ops->get_position(target->user_data, position);
4717 +       
4718 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
4719 +       return res;
4720 +}
4721 +
4722 +int 
4723 +elan_set_position(ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes)
4724 +{
4725 +       ELAN_DEV_STRUCT *target;
4726 +       int                 res;
4727 +
4728 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
4729 +
4730 +       target = elan_dev_find(devidx);
4731 +
4732 +       if (IS_ERR (target))
4733 +               res = PTR_ERR (target);
4734 +       else
4735 +               res = target->ops->set_position(target->user_data, nodeId, numNodes);
4736 +       
4737 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
4738 +       return res;
4739 +}
4740 +
4741 +/*
4742 + * Local variables:
4743 + * c-file-style: "linux"
4744 + * End:
4745 + */
4746 diff -urN clean/drivers/net/qsnet/elan/elanmod.c linux-2.6.9/drivers/net/qsnet/elan/elanmod.c
4747 --- clean/drivers/net/qsnet/elan/elanmod.c      1969-12-31 19:00:00.000000000 -0500
4748 +++ linux-2.6.9/drivers/net/qsnet/elan/elanmod.c        2005-04-13 05:31:47.000000000 -0400
4749 @@ -0,0 +1,149 @@
4750 +/*
4751 + *    Copyright (c) 2003 by Quadrics Ltd.
4752 + * 
4753 + *    For licensing information please see the supplied COPYING file
4754 + *
4755 + */
4756 +#ident "@(#)$Id: elanmod.c,v 1.12 2005/04/13 09:31:47 addy Exp $"
4757 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.c,v $*/
4758 +
4759 +#include <qsnet/kernel.h>
4760 +#include <elan/elanmod.h>
4761 +
4762 +ELANMOD_RWLOCK elan_rwlock;
4763 +
4764 +int 
4765 +elan_init()
4766 +{
4767 +       ELANMOD_RWLOCK_INIT(&elan_rwlock);
4768 +       return (ESUCCESS);
4769 +}
4770 +
4771 +int 
4772 +elan_fini()
4773 +{
4774 +       ELANMOD_RWLOCK_DESTROY(&elan_rwlock);
4775 +       return (ESUCCESS);
4776 +}
4777 +
4778 +int 
4779 +elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use)
4780 +{
4781 +       if (cap->cap_version != ELAN_CAP_VERSION_NUMBER)
4782 +       {
4783 +               ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->Version != ELAN_CAP_VERSION) %d %d\n", cap->cap_version, ELAN_CAP_VERSION_NUMBER);
4784 +               return (-EINVAL);
4785 +       }
4786 +       
4787 +       if (cap->cap_lowcontext == ELAN_CAP_UNINITIALISED || cap->cap_highcontext == ELAN_CAP_UNINITIALISED)
4788 +       {
4789 +               ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: LowContext %d    HighContext %d MyContext %d\n",
4790 +                            cap->cap_lowcontext , cap->cap_highcontext, cap->cap_mycontext);
4791 +               return (-EINVAL);
4792 +       }
4793 +       
4794 +       if (cap->cap_lowcontext > cap->cap_highcontext)
4795 +       {
4796 +               ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: (cap->cap_lowcontext > cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext);
4797 +               return (-EINVAL);
4798 +       }
4799 +       
4800 +       
4801 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
4802 +       {
4803 +       case ELAN_CAP_TYPE_BLOCK:
4804 +       case ELAN_CAP_TYPE_CYCLIC:
4805 +               if (position->pos_mode == ELAN_POS_UNKNOWN)
4806 +               {
4807 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: Position Unknown \n");
4808 +                       return (-EAGAIN);
4809 +               }
4810 +               
4811 +               if ( ! ( ELAN_USER_CONTEXT(cap->cap_lowcontext) && ELAN_USER_CONTEXT(cap->cap_highcontext)))
4812 +               {
4813 +                       ELAN_DEBUG4 (ELAN_DBG_VP, "elanmod_classify_cap:  USER_BASE_CONTEXT %d %d %d %d \n" ,  ELAN_USER_BASE_CONTEXT_NUM,cap->cap_lowcontext, cap->cap_highcontext ,ELAN_USER_TOP_CONTEXT_NUM);
4814 +                       return (-EINVAL);
4815 +               }
4816 +               if (cap->cap_lownode == ELAN_CAP_UNINITIALISED)
4817 +                       cap->cap_lownode = position->pos_nodeid;
4818 +               if (cap->cap_highnode == ELAN_CAP_UNINITIALISED)
4819 +                       cap->cap_highnode = position->pos_nodeid;
4820 +               
4821 +               if (cap->cap_lownode < 0 || cap->cap_highnode >= position->pos_nodes || cap->cap_lownode > cap->cap_highnode)
4822 +               {
4823 +                       ELAN_DEBUG3 ( ELAN_DBG_VP,"elanmod_classify_cap: low %d high %d pos %d \n" , cap->cap_lownode  ,cap->cap_highnode, position->pos_nodes);
4824 +                       
4825 +                       return (-EINVAL);
4826 +               }
4827 +               
4828 +               if ((cap->cap_highnode < position->pos_nodeid) || (cap->cap_lownode > position->pos_nodeid))
4829 +               {
4830 +                       ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: node not i range low %d high %d this %d\n",
4831 +                                    cap->cap_lownode, cap->cap_highnode, position->pos_nodeid);
4832 +                       return (-EINVAL);
4833 +               }
4834 +
4835 +               break;
4836 +       default:
4837 +               ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: cant decode type %x \n", cap->cap_type & ELAN_CAP_TYPE_MASK);
4838 +               return (-EINVAL);
4839 +
4840 +       }
4841 +
4842 +       switch (use)
4843 +       {
4844 +       case ELAN_USER_ATTACH:
4845 +       case ELAN_USER_DETACH:
4846 +               if (cap->cap_mycontext == ELAN_CAP_UNINITIALISED)
4847 +               {
4848 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext == ELAN_CAP_UNINITIALISED");
4849 +                       return (-EINVAL);
4850 +               }
4851 +       
4852 +               if ((cap->cap_mycontext != ELAN_CAP_UNINITIALISED) && 
4853 +                   (cap->cap_mycontext < cap->cap_lowcontext || cap->cap_mycontext > cap->cap_highcontext))
4854 +               {
4855 +                       ELAN_DEBUG3 (ELAN_DBG_VP, "elanmod_classify_cap: cap->cap_mycontext out of range %d %d %d \n", cap->cap_lowcontext,cap->cap_mycontext,cap->cap_highcontext);
4856 +                       return (-EINVAL);
4857 +               }   
4858 +               break;
4859 +
4860 +       case ELAN_USER_P2P:
4861 +               break;
4862 +
4863 +       case ELAN_USER_BROADCAST:
4864 +               if (! (cap->cap_type & ELAN_CAP_TYPE_BROADCASTABLE)) {
4865 +                       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: use ELAN_USER_BROADCAST but cap not ELAN_CAP_TYPE_BROADCASTABLE\n");
4866 +                       return (-EINVAL);
4867 +               }
4868 +               break;
4869 +
4870 +       default:
4871 +               ELAN_DEBUG1 (ELAN_DBG_VP, "elanmod_classify_cap: unknown use (%d)\n",use);
4872 +               return (-EINVAL);
4873 +       }
4874 +
4875 +
4876 +
4877 +       /* is any ctxt an rms one ?? */
4878 +       if (ELAN_RMS_CONTEXT(cap->cap_lowcontext) || ELAN_RMS_CONTEXT(cap->cap_highcontext))
4879 +       {
4880 +               /* so both low and high must be */
4881 +               if (!(ELAN_RMS_CONTEXT(cap->cap_lowcontext) && ELAN_RMS_CONTEXT(cap->cap_highcontext))) 
4882 +               {
4883 +                       ELAN_DEBUG2 (ELAN_DBG_VP, "elanmod_classify_cap: not rms ctxt %x %x\n",cap->cap_lowcontext,cap->cap_highcontext );
4884 +                       return (-EINVAL);
4885 +               }
4886 +               ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_RMS\n");
4887 +               return (ELAN_CAP_RMS);
4888 +       }
4889 +
4890 +       ELAN_DEBUG0 (ELAN_DBG_VP, "elanmod_classify_cap: returning ELAN_CAP_OK\n");
4891 +       return (ELAN_CAP_OK);
4892 +}
4893 +
4894 +/*
4895 + * Local variables:
4896 + * c-file-style: "linux"
4897 + * End:
4898 + */
4899 diff -urN clean/drivers/net/qsnet/elan/elanmod_linux.c linux-2.6.9/drivers/net/qsnet/elan/elanmod_linux.c
4900 --- clean/drivers/net/qsnet/elan/elanmod_linux.c        1969-12-31 19:00:00.000000000 -0500
4901 +++ linux-2.6.9/drivers/net/qsnet/elan/elanmod_linux.c  2005-09-07 10:35:03.000000000 -0400
4902 @@ -0,0 +1,544 @@
4903 +/*
4904 + *    Copyright (c) 2003 by Quadrics Ltd.
4905 + * 
4906 + *    For licensing information please see the supplied COPYING file
4907 + *
4908 + */
4909 +
4910 +#ident "@(#)$Id: elanmod_linux.c,v 1.23.2.6 2005/09/07 14:35:03 mike Exp $"
4911 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.c,v $*/
4912 +
4913 +#include <qsnet/kernel.h>
4914 +
4915 +#include <elan/elanmod.h>
4916 +#include <elan/elanmod_linux.h>
4917 +
4918 +#include <qsnet/module.h>
4919 +#include <linux/sysctl.h>
4920 +#include <linux/init.h>
4921 +
4922 +#include <qsnet/procfs_linux.h>
4923 +
4924 +MODULE_AUTHOR("Quadrics Ltd.");
4925 +MODULE_DESCRIPTION("Elan support module");
4926 +
4927 +MODULE_LICENSE("GPL");
4928 +
4929 +/* elanmod.c */
4930 +EXPORT_SYMBOL(elanmod_classify_cap);
4931 +
4932 +/* bitmap.c */
4933 +#include <elan/bitmap.h>
4934 +
4935 +EXPORT_SYMBOL(bt_freebit);
4936 +EXPORT_SYMBOL(bt_lowbit); 
4937 +EXPORT_SYMBOL(bt_nextbit);
4938 +EXPORT_SYMBOL(bt_copy);
4939 +EXPORT_SYMBOL(bt_zero); 
4940 +EXPORT_SYMBOL(bt_fill); 
4941 +EXPORT_SYMBOL(bt_cmp); 
4942 +EXPORT_SYMBOL(bt_intersect);
4943 +EXPORT_SYMBOL(bt_remove); 
4944 +EXPORT_SYMBOL(bt_add); 
4945 +EXPORT_SYMBOL(bt_spans);
4946 +EXPORT_SYMBOL(bt_subset);  
4947 +EXPORT_SYMBOL(bt_up);
4948 +EXPORT_SYMBOL(bt_down);
4949 +EXPORT_SYMBOL(bt_nbits);
4950 +
4951 +/* capability.c */
4952 +EXPORT_SYMBOL(elan_nullcap);
4953 +EXPORT_SYMBOL(elan_detach_cap);
4954 +EXPORT_SYMBOL(elan_attach_cap);
4955 +EXPORT_SYMBOL(elan_validate_map);
4956 +
4957 +/* stats.c */
4958 +EXPORT_SYMBOL(elan_stats_register);
4959 +EXPORT_SYMBOL(elan_stats_deregister);
4960 +
4961 +/* device.c */
4962 +EXPORT_SYMBOL(elan_dev_deregister);
4963 +EXPORT_SYMBOL(elan_dev_register);
4964 +
4965 +/* debug */
4966 +int  elan_debug_mode = QSNET_DEBUG_BUFFER; 
4967 +int  elan_debug_mask;
4968 +
4969 +static struct proc_dir_entry *elan_procfs_root;
4970 +
4971 +extern void elan_procfs_init(void);
4972 +extern void elan_procfs_fini(void);
4973 +
4974 +static int elan_open    (struct inode *ino, struct file *fp);
4975 +static int elan_release (struct inode *ino, struct file *fp);
4976 +static int elan_ioctl   (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
4977 +
4978 +static int elan_user_open    (struct inode *ino, struct file *fp);
4979 +static int elan_user_release (struct inode *ino, struct file *fp);
4980 +static int elan_user_ioctl (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
4981 +
4982 +static struct file_operations elan_fops = 
4983 +{
4984 +       ioctl:   elan_ioctl,
4985 +       open:    elan_open,
4986 +       release: elan_release,
4987 +};
4988 +
4989 +static struct file_operations elan_user_fops = 
4990 +{
4991 +       ioctl:   elan_user_ioctl,
4992 +       open:    elan_user_open,
4993 +       release: elan_user_release,
4994 +};
4995 +
4996 +static int __init elan_start(void)
4997 +{
4998 +       int res;
4999 +
5000 +       elan_procfs_init(); 
5001 +
5002 +       if ((res = elan_init()) != ESUCCESS)
5003 +       {
5004 +               elan_procfs_fini();
5005 +               return (-res);
5006 +       }
5007 +
5008 +       return (0);
5009 +}
5010 +
5011 +static void __exit elan_exit(void)
5012 +{
5013 +       elan_fini();
5014 +       elan_procfs_fini();
5015 +}
5016 +
5017 +
5018 +/* Declare the module init and exit functions */
5019 +void
5020 +elan_procfs_init()
5021 +{
5022 +       struct proc_dir_entry  *p;
5023 +       
5024 +       elan_procfs_root = proc_mkdir("elan",   qsnet_procfs_root);
5025 +
5026 +       if (elan_procfs_root == NULL)
5027 +               return;
5028 +       
5029 +       qsnet_proc_register_hex(elan_procfs_root, "debug_mask", &elan_debug_mask, 0);
5030 +       qsnet_proc_register_hex(elan_procfs_root, "debug_mode", &elan_debug_mode, 0);
5031 +
5032 +       if ((p = create_proc_entry ("ioctl", 0, elan_procfs_root)) != NULL)
5033 +       {
5034 +               p->proc_fops = &elan_fops;
5035 +               p->data      = 0;
5036 +               p->owner     = THIS_MODULE;
5037 +       }   
5038 +
5039 +       /* user entry point */
5040 +       if ((p = create_proc_entry ("user", 0, elan_procfs_root)) != NULL)
5041 +       {
5042 +               p->proc_fops = &elan_user_fops;
5043 +               p->data      = 0;
5044 +               p->owner     = THIS_MODULE;
5045 +       }   
5046 +}
5047 +
5048 +void
5049 +elan_procfs_fini()
5050 +{
5051 +       if (elan_procfs_root == NULL)
5052 +               return;
5053 +
5054 +       remove_proc_entry ("debug_mask", elan_procfs_root);
5055 +       remove_proc_entry ("debug_mode", elan_procfs_root);
5056 +       
5057 +       remove_proc_entry ("ioctl",   elan_procfs_root); 
5058 +
5059 +       /* remove user entry point */
5060 +       remove_proc_entry ("user",   elan_procfs_root); 
5061 +       
5062 +       remove_proc_entry ("elan",   qsnet_procfs_root);
5063 +}
5064 +
5065 +module_init(elan_start);
5066 +module_exit(elan_exit);
5067 +
5068 +static int
5069 +elan_open (struct inode *inode, struct file *fp)
5070 +{
5071 +       MOD_INC_USE_COUNT;
5072 +       fp->private_data = NULL;
5073 +       return (0);
5074 +}
5075 +
5076 +static int
5077 +elan_release (struct inode *inode, struct file *fp)
5078 +{
5079 +       /* mark all caps owned by fp to be destroyed */
5080 +       elan_destroy_cap(fp,NULL);
5081 +
5082 +       MOD_DEC_USE_COUNT;
5083 +       return (0);
5084 +}
5085 +
5086 +static int 
5087 +elan_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
5088 +{
5089 +       int rep = 0;
5090 +
5091 +       switch (cmd) 
5092 +       {
5093 +       case ELANCTRL_STATS_GET_NEXT :
5094 +       {
5095 +               ELANCTRL_STATS_GET_NEXT_STRUCT args;
5096 +
5097 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_NEXT_STRUCT)))
5098 +                       return (-EFAULT); 
5099 +
5100 +               /* uses copyin/copyout */
5101 +               if (elan_stats_get_next_index(args.statidx, args.next_statidx) != 0 ) 
5102 +                       return (-EINVAL);       
5103 +
5104 +               break;
5105 +       }
5106 +       case ELANCTRL_STATS_FIND_INDEX :
5107 +       {
5108 +               ELANCTRL_STATS_FIND_INDEX_STRUCT args;
5109 +               char block_name[ELAN_STATS_NAME_MAX_LEN+1];
5110 +               int res;
5111 +
5112 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_FIND_INDEX_STRUCT)))
5113 +                       return (-EFAULT); 
5114 +
5115 +               res = strncpy_from_user (block_name, args.block_name, sizeof (block_name));
5116 +
5117 +               if (res == 0 || res  == sizeof (block_name))
5118 +                       return -ERANGE;
5119 +               if (res < 0)
5120 +                       return res;
5121 +
5122 +               /* uses copyin/copyout */
5123 +               if (elan_stats_find_index(block_name, args.statidx, args.num_entries) != 0 ) 
5124 +                       return (-EINVAL);       
5125 +
5126 +               break;
5127 +       }
5128 +       case ELANCTRL_STATS_GET_BLOCK_INFO :
5129 +       {
5130 +               ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT args;
5131 +               
5132 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT)))
5133 +                       return (-EFAULT);
5134 +
5135 +               /* uses copyin/copyout */
5136 +               if (elan_stats_get_block_info(args.statidx, args.block_name, args.num_entries) != 0 ) 
5137 +                       return (-EINVAL);
5138 +               break;          
5139 +       }
5140 +       case ELANCTRL_STATS_GET_INDEX_NAME :
5141 +       {
5142 +               ELANCTRL_STATS_GET_INDEX_NAME_STRUCT args;
5143 +               
5144 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_INDEX_NAME_STRUCT)))
5145 +                       return (-EFAULT);
5146 +
5147 +               /* uses copyin/copyout */
5148 +               if (elan_stats_get_index_name(args.statidx, args.index, args.name) != 0 )
5149 +                       return (-EINVAL);
5150 +               break;
5151 +       }
5152 +       case ELANCTRL_STATS_CLEAR_BLOCK :
5153 +       {
5154 +               ELANCTRL_STATS_CLEAR_BLOCK_STRUCT args;
5155 +               
5156 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_CLEAR_BLOCK_STRUCT)))
5157 +                       return (-EFAULT);
5158 +
5159 +               /* statidx is not a pointer */
5160 +               if (elan_stats_clear_block(args.statidx) != 0 )
5161 +                       return (-EINVAL);
5162 +               break;
5163 +       }
5164 +       case ELANCTRL_STATS_GET_BLOCK :
5165 +       {
5166 +               ELANCTRL_STATS_GET_BLOCK_STRUCT args;
5167 +               
5168 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_STATS_GET_BLOCK_STRUCT)))
5169 +                       return (-EFAULT);
5170 +
5171 +               /* uses copyin/copyout */
5172 +               if (elan_stats_get_block(args.statidx, args.entries, args.values) != 0 )
5173 +                       return (-EINVAL);
5174 +               break;
5175 +       }
5176 +       case ELANCTRL_GET_DEVINFO :
5177 +       {
5178 +               ELANCTRL_GET_DEVINFO_STRUCT args;
5179 +               
5180 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_DEVINFO_STRUCT)))
5181 +                       return (-EFAULT);
5182 +
5183 +               /* uses copyin/copyout */
5184 +               if (elan_get_devinfo(args.devidx, args.devinfo) != 0 )
5185 +                       return (-EINVAL);
5186 +               break;          
5187 +       }
5188 +       case ELANCTRL_GET_POSITION :
5189 +       {
5190 +               ELANCTRL_GET_POSITION_STRUCT args;
5191 +               
5192 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_POSITION_STRUCT)))
5193 +                       return (-EFAULT); 
5194 +
5195 +               /* uses copyin/copyout */
5196 +               if (elan_get_position(args.devidx, args.position) != 0 )
5197 +                       return (-EINVAL);
5198 +               break;          
5199 +       }
5200 +       case ELANCTRL_SET_POSITION :
5201 +       {
5202 +               ELANCTRL_SET_POSITION_STRUCT args;
5203 +               
5204 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_SET_POSITION_STRUCT)))
5205 +                       return (-EFAULT);
5206 +
5207 +               /* uses copyin/copyout */
5208 +               if (elan_set_position(args.devidx, args.nodeId, args.numNodes) != 0 )
5209 +                       return (-EINVAL);       
5210 +               break;          
5211 +       }
5212 +       case ELANCTRL_CREATE_CAP  :
5213 +       {
5214 +               ELANCTRL_CREATE_CAP_STRUCT *args;
5215 +
5216 +               /* get space for args */
5217 +               KMEM_ALLOC(args, ELANCTRL_CREATE_CAP_STRUCT *, sizeof(ELANCTRL_CREATE_CAP_STRUCT), 1);
5218 +               if (args == NULL)
5219 +                       return(-ENOMEM);        
5220 +
5221 +               /* copy them */
5222 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_CAP_STRUCT)))
5223 +                       return (-EFAULT);
5224 +               else 
5225 +               {
5226 +                       if (((rep = elan_validate_cap(&args->cap)) != 0) || ((rep = elan_create_cap(fp,&args->cap)) != 0)) 
5227 +                               rep = (-rep);
5228 +               }
5229 +
5230 +               /* free the space */
5231 +               KMEM_FREE(args, sizeof(ELANCTRL_CREATE_CAP_STRUCT));
5232 +
5233 +               break;          
5234 +       }
5235 +       case ELANCTRL_DESTROY_CAP  :
5236 +       {
5237 +               ELANCTRL_DESTROY_CAP_STRUCT *args;
5238 +
5239 +               /* get space for args */
5240 +               KMEM_ALLOC(args, ELANCTRL_DESTROY_CAP_STRUCT *, sizeof(ELANCTRL_DESTROY_CAP_STRUCT), 1);
5241 +               if (args == NULL)
5242 +                       return(-ENOMEM);        
5243 +
5244 +               /* copy them */
5245 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_CAP_STRUCT)))
5246 +                       rep = (-EFAULT);
5247 +               else 
5248 +               {
5249 +                       if (elan_destroy_cap(fp, &args->cap) != 0 )
5250 +                               rep = (-EINVAL);
5251 +               }
5252 +
5253 +               /* free the space */
5254 +               KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_CAP_STRUCT));
5255 +
5256 +               break;          
5257 +       }
5258 +       case ELANCTRL_CREATE_VP  :
5259 +       {
5260 +               ELANCTRL_CREATE_VP_STRUCT *args;
5261 +
5262 +               /* get space for args */
5263 +               KMEM_ALLOC(args, ELANCTRL_CREATE_VP_STRUCT *, sizeof(ELANCTRL_CREATE_VP_STRUCT), 1);
5264 +               if (args == NULL)
5265 +                       return(-ENOMEM);        
5266 +
5267 +               /* copy them */
5268 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_CREATE_VP_STRUCT)))
5269 +                       return (-EFAULT);
5270 +               else
5271 +               {
5272 +                       if ((elan_validate_cap( &args->map) != 0) || (elan_create_vp(fp, &args->cap, &args->map) != 0 ))
5273 +                               rep = (-EINVAL);        
5274 +               }
5275 +
5276 +               KMEM_FREE(args, sizeof(ELANCTRL_CREATE_VP_STRUCT ));
5277 +
5278 +               break;          
5279 +       }
5280 +       case ELANCTRL_DESTROY_VP  :
5281 +       {
5282 +               ELANCTRL_DESTROY_VP_STRUCT *args;
5283 +
5284 +               /* get space for args */
5285 +               KMEM_ALLOC(args, ELANCTRL_DESTROY_VP_STRUCT *, sizeof(ELANCTRL_DESTROY_VP_STRUCT), 1);
5286 +               if (args == NULL)
5287 +                       return(-ENOMEM);        
5288 +               
5289 +               /* copy them */
5290 +               if (copy_from_user (args, (void *) arg, sizeof (ELANCTRL_DESTROY_VP_STRUCT)))
5291 +                       rep = (-EFAULT);
5292 +               else 
5293 +               {
5294 +                       if (elan_destroy_vp(fp, &args->cap, &args->map) != 0 )
5295 +                               rep = (-EINVAL);        
5296 +               }
5297 +
5298 +               KMEM_FREE(args, sizeof(ELANCTRL_DESTROY_VP_STRUCT ));
5299 +
5300 +               break;          
5301 +       }
5302 +
5303 +       case ELANCTRL_GET_CAPS  :
5304 +       {
5305 +               ELANCTRL_GET_CAPS_STRUCT args;
5306 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_GET_CAPS_STRUCT)))
5307 +                       return (-EFAULT);
5308 +
5309 +               /* uses copyin/copyout */
5310 +               if (elan_get_caps(args.number_of_results, args.array_size, args.caps) != 0 )
5311 +                       return (-EINVAL);
5312 +               break;          
5313 +       }
5314 +       case ELANCTRL_DEBUG_DUMP :
5315 +       {
5316 +               elan_cap_dump();
5317 +               elan_dev_dump();
5318 +
5319 +               break;
5320 +       }
5321 +       case ELANCTRL_DEBUG_BUFFER :
5322 +       {
5323 +               ELANCTRL_DEBUG_BUFFER_STRUCT args;
5324 +
5325 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT)))
5326 +                       return (-EFAULT);
5327 +
5328 +               /* uses copyin/copyout */
5329 +               if ((args.size = qsnet_debug_buffer (args.buffer, args.size)) != -1 &&
5330 +                   copy_to_user ((void *) arg, &args, sizeof (ELANCTRL_DEBUG_BUFFER_STRUCT)))
5331 +                       return (-EFAULT);
5332 +               break;
5333 +       }
5334 +       default:
5335 +               return (-EINVAL);
5336 +               break;
5337 +       }
5338 +
5339 +       return (rep);
5340 +}
5341 +
5342 +
5343 +static int
5344 +elan_user_open (struct inode *inode, struct file *fp)
5345 +{
5346 +       MOD_INC_USE_COUNT;
5347 +       fp->private_data = NULL;
5348 +       return (0);
5349 +}
5350 +
5351 +static int
5352 +elan_user_release (struct inode *inode, struct file *fp)
5353 +{
5354 +       struct elan_cap_node_struct *cap_ptr = (struct elan_cap_node_struct *)fp->private_data;
5355 +
5356 +       if (cap_ptr) {
5357 +               /* Remove this process from usercopy system */
5358 +               /* GNAT 7498: New to pass in a common owner pointer */
5359 +               if (elan_usercopy_detach (cap_ptr, fp) == 0)
5360 +                       fp->private_data = NULL;
5361 +       }
5362 +       
5363 +       MOD_DEC_USE_COUNT;
5364 +       return (0);
5365 +}
5366 +
5367 +static int 
5368 +elan_user_ioctl (struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
5369 +{
5370 +       int rep = 0;
5371 +#if !defined(NO_MMPUT) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)
5372 +       struct elan_cap_node_struct *cap_ptr = (struct elan_cap_node_struct *)fp->private_data;
5373 +#endif
5374 +
5375 +       switch (cmd) 
5376 +       {
5377 +#if !defined(NO_MMPUT) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9)
5378 +       case ELANCTRL_USERCOPY_ATTACH:
5379 +       {
5380 +               ELANCTRL_USERCOPY_ATTACH_STRUCT args;
5381 +               
5382 +               /* Are we already attached ? */
5383 +               if (cap_ptr != NULL)
5384 +                       return -EAGAIN;
5385 +               
5386 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_USERCOPY_ATTACH_STRUCT)))
5387 +                       return -EFAULT; 
5388 +       
5389 +               /* Lookup the associated cap node and can check we are allowed to
5390 +                * access it using the supplied capability. If allowed, then associate 
5391 +                * our task with that cap node
5392 +                * We also reference count it as we then hang it off the file pointer
5393 +                */
5394 +               /* GNAT 7498: New to pass in a common owner pointer */
5395 +               if ((rep = elan_usercopy_attach(&args.cap, &cap_ptr, current, fp)) < 0)
5396 +                       return -EAGAIN;
5397 +               
5398 +               /* Hang cap node off file pointer for future usercopy ioctls */
5399 +               fp->private_data = (void *) cap_ptr;
5400 +
5401 +               break;  
5402 +       }
5403 +       case ELANCTRL_USERCOPY_DETACH:
5404 +       {
5405 +               /* Detach process */
5406 +               if (cap_ptr) {
5407 +                       /* Remove this process from usercopy system */
5408 +                       /* GNAT 7498: New to pass in a common owner pointer */
5409 +                       if ((rep = elan_usercopy_detach (cap_ptr, fp)) == 0)
5410 +                               fp->private_data = NULL;
5411 +               }
5412 +               else
5413 +                       rep = -EINVAL;
5414 +               
5415 +               break;  
5416 +       }
5417 +       case ELANCTRL_USERCOPY:
5418 +       {
5419 +               ELANCTRL_USERCOPY_STRUCT args;
5420 +               
5421 +               /* Check that we have previously successfully attached */
5422 +               if (cap_ptr == NULL)
5423 +                       return -EAGAIN;
5424 +               
5425 +               if (copy_from_user (&args, (void *) arg, sizeof (ELANCTRL_USERCOPY_STRUCT)))
5426 +                       return (-EFAULT); 
5427 +               
5428 +               /* Perform user-to-user copy */
5429 +               rep = elan_usercopy(args.remote, args.local, args.len, args.write, args.ctxId, cap_ptr);
5430 +
5431 +               break;
5432 +       }
5433 +#endif /* !defined(NO_MMPUT) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) */
5434 +       default:
5435 +               return (-EINVAL);
5436 +               break;
5437 +       }
5438 +
5439 +       return (rep);
5440 +}
5441 +
5442 +/*
5443 + * Local variables:
5444 + * c-file-style: "linux"
5445 + * End:
5446 + */
5447 diff -urN clean/drivers/net/qsnet/elan/Makefile linux-2.6.9/drivers/net/qsnet/elan/Makefile
5448 --- clean/drivers/net/qsnet/elan/Makefile       1969-12-31 19:00:00.000000000 -0500
5449 +++ linux-2.6.9/drivers/net/qsnet/elan/Makefile 2005-10-10 17:47:30.000000000 -0400
5450 @@ -0,0 +1,15 @@
5451 +#
5452 +# Makefile for Quadrics QsNet
5453 +#
5454 +# Copyright (c) 2002-2004 Quadrics Ltd
5455 +#
5456 +# File: drivers/net/qsnet/elan/Makefile
5457 +#
5458 +
5459 +
5460 +#
5461 +
5462 +obj-$(CONFIG_QSNET)    += elan.o
5463 +elan-objs      := elanmod.o device.o stats.o devinfo.o capability.o usercopy.o elanmod_linux.o capability_general.o bitmap.o
5464 +
5465 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
5466 diff -urN clean/drivers/net/qsnet/elan/Makefile.conf linux-2.6.9/drivers/net/qsnet/elan/Makefile.conf
5467 --- clean/drivers/net/qsnet/elan/Makefile.conf  1969-12-31 19:00:00.000000000 -0500
5468 +++ linux-2.6.9/drivers/net/qsnet/elan/Makefile.conf    2005-09-07 10:39:36.000000000 -0400
5469 @@ -0,0 +1,10 @@
5470 +# Flags for generating QsNet Linux Kernel Makefiles
5471 +MODNAME                =       elan.o
5472 +MODULENAME     =       elan
5473 +KOBJFILES      =       elanmod.o device.o stats.o devinfo.o capability.o usercopy.o elanmod_linux.o capability_general.o bitmap.o
5474 +EXPORT_KOBJS   =       elanmod_linux.o 
5475 +CONFIG_NAME    =       CONFIG_QSNET
5476 +SGALFC         =       
5477 +# EXTRALINES START
5478 +
5479 +# EXTRALINES END
5480 diff -urN clean/drivers/net/qsnet/elan/quadrics_version.h linux-2.6.9/drivers/net/qsnet/elan/quadrics_version.h
5481 --- clean/drivers/net/qsnet/elan/quadrics_version.h     1969-12-31 19:00:00.000000000 -0500
5482 +++ linux-2.6.9/drivers/net/qsnet/elan/quadrics_version.h       2005-09-07 10:39:49.000000000 -0400
5483 @@ -0,0 +1 @@
5484 +#define QUADRICS_VERSION "5.11.3qsnet"
5485 diff -urN clean/drivers/net/qsnet/elan/stats.c linux-2.6.9/drivers/net/qsnet/elan/stats.c
5486 --- clean/drivers/net/qsnet/elan/stats.c        1969-12-31 19:00:00.000000000 -0500
5487 +++ linux-2.6.9/drivers/net/qsnet/elan/stats.c  2005-04-13 05:31:47.000000000 -0400
5488 @@ -0,0 +1,277 @@
5489 +/*
5490 + *    Copyright (c) 2003 by Quadrics Ltd.
5491 + * 
5492 + *    For licensing information please see the supplied COPYING file
5493 + *
5494 + */
5495 +
5496 +#ident "@(#)$Id: stats.c,v 1.7 2005/04/13 09:31:47 addy Exp $"
5497 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/stats.c,v $*/
5498 +
5499 +#include <qsnet/kernel.h>
5500 +#include <elan/elanmod.h>
5501 +
5502 +static LIST_HEAD(elan_stats_list);
5503 +static ELAN_STATS_IDX elan_next_statidx=0;
5504 +
5505 +ELAN_STATS_STRUCT *
5506 +elan_stats_find(ELAN_STATS_IDX statidx)
5507 +{
5508 +       struct list_head     *tmp;
5509 +       ELAN_STATS_STRUCT *ptr=NULL;
5510 +
5511 +       list_for_each(tmp, &elan_stats_list) {
5512 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
5513 +               if ( ptr->statidx == statidx ) 
5514 +                       return ptr;
5515 +       }
5516 +
5517 +       ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find failed %d\n", statidx);    
5518 +       return NULL;
5519 +}
5520 +
5521 +ELAN_STATS_STRUCT *
5522 +elan_stats_find_by_name(caddr_t block_name)
5523 +{
5524 +       struct list_head     *tmp;
5525 +       ELAN_STATS_STRUCT *ptr=NULL;
5526 +
5527 +       list_for_each(tmp, &elan_stats_list)    {
5528 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
5529 +               if (!strcmp(ptr->block_name, block_name)) 
5530 +               {
5531 +                       ELAN_DEBUG3 (ELAN_DBG_CTRL, "elan_stats_find_by_name found %s (%d,%d)\n", block_name, ptr->statidx, ptr->num_entries);  
5532 +                       return ptr;
5533 +               }
5534 +       }
5535 +
5536 +       ELAN_DEBUG1 (ELAN_DBG_CTRL, "elan_stats_find_by_name failed %s\n", block_name);
5537 +       return NULL;
5538 +}
5539 +
5540 +ELAN_STATS_STRUCT *
5541 +elan_stats_find_next(ELAN_STATS_IDX statidx)
5542 +{
5543 +       struct list_head     *tmp;
5544 +       ELAN_STATS_STRUCT *ptr=NULL;
5545 +
5546 +       list_for_each(tmp, &elan_stats_list) {
5547 +               ptr = list_entry(tmp, ELAN_STATS_STRUCT , node);
5548 +         
5549 +               if ( ptr->statidx > statidx ) 
5550 +                       return ptr;       
5551 +       }       
5552 +
5553 +       return NULL;
5554 +}
5555 +
5556 +int 
5557 +elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_block)
5558 +{
5559 +       ELAN_STATS_STRUCT *target;
5560 +       ELAN_STATS_IDX        next = 0;
5561 +
5562 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
5563 +
5564 +       if ((target = elan_stats_find_next(statidx)) != NULL)
5565 +               next = target->statidx;
5566 +
5567 +       copyout(&next, next_block, sizeof(ELAN_STATS_IDX) );
5568 +
5569 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5570 +       return 0;
5571 +}
5572 +
5573 +int 
5574 +elan_stats_find_index  (caddr_t  block_name, ELAN_STATS_IDX *statidx,  uint *num_entries)
5575 +
5576 +{
5577 +       ELAN_STATS_STRUCT *target;
5578 +       ELAN_STATS_IDX        index   = 0;
5579 +       uint                  entries = 0;
5580 +
5581 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
5582 +
5583 +       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_find_index %s \n", block_name);
5584 +
5585 +       if ((target = elan_stats_find_by_name(block_name)) != NULL)
5586 +       {
5587 +               index   = target->statidx;
5588 +               entries = target->num_entries;
5589 +       }
5590 +
5591 +       ELAN_DEBUG3(ELAN_DBG_CTRL, "elan_stats_find_index found %d %d (target=%p)\n", index, entries, target);
5592 +
5593 +       copyout(&index,   statidx,     sizeof(ELAN_STATS_IDX));
5594 +       copyout(&entries, num_entries, sizeof(uint));
5595 +
5596 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5597 +       return  ESUCCESS;
5598 +}
5599 +
5600 +int 
5601 +elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t  block_name, uint *num_entries)
5602 +{
5603 +       ELAN_STATS_STRUCT *target;
5604 +       int                   res=EINVAL;
5605 +
5606 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
5607 +
5608 +       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_block_info statidx %d\n",statidx);
5609 +
5610 +       if ((target = elan_stats_find(statidx)) != NULL)
5611 +       {
5612 +               ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_block_info name %s entries %d\n",block_name, *num_entries);
5613 +               
5614 +               copyout( target->block_name,  block_name,  ELAN_STATS_NAME_MAX_LEN);
5615 +               copyout(&target->num_entries, num_entries, sizeof(uint));
5616 +
5617 +               res = ESUCCESS;
5618 +       }
5619 +
5620 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5621 +       return res;
5622 +}
5623 +
5624 +int 
5625 +elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name)
5626 +{
5627 +       ELAN_STATS_STRUCT *target;
5628 +       int                   res=EINVAL;
5629 +
5630 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
5631 +
5632 +       ELAN_DEBUG2(ELAN_DBG_CTRL, "elan_stats_get_index_name statidx %d index %d\n",statidx, index);
5633 +
5634 +       if ((target = elan_stats_find(statidx)) != NULL)
5635 +       {
5636 +               if ( target->ops->elan_stats_get_name== NULL) 
5637 +               {
5638 +                       ELAN_DEBUG0(ELAN_DBG_CTRL, "elan_stats_get_index_name no callback\n");  
5639 +                       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5640 +                       return  res;
5641 +               }
5642 +
5643 +               if ((res = target->ops->elan_stats_get_name(target->arg, index, name)) == 0)
5644 +                       ELAN_DEBUG1(ELAN_DBG_CTRL, "elan_stats_get_index_name name %s\n",name); 
5645 +
5646 +       }
5647 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5648 +       return  res;
5649 +}
5650 +
5651 +int 
5652 +elan_stats_get_block (ELAN_STATS_IDX statidx, uint entries, ulong *values)
5653 +{
5654 +       ELAN_STATS_STRUCT *target;
5655 +       int                   res=EINVAL;
5656 +
5657 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
5658 +
5659 +       
5660 +       if ((target = elan_stats_find(statidx)) != NULL)
5661 +       {
5662 +               if ( target->ops->elan_stats_get_block == NULL) 
5663 +               {
5664 +                       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5665 +                       return  res;
5666 +               }
5667 +
5668 +               res = target->ops->elan_stats_get_block(target->arg, entries, values);
5669 +       }
5670 +
5671 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5672 +       return  res;
5673 +}
5674 +
5675 +int 
5676 +elan_stats_clear_block (ELAN_STATS_IDX statidx)
5677 +{
5678 +       ELAN_STATS_STRUCT *target;
5679 +       int                   res=EINVAL;
5680 +
5681 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
5682 +
5683 +       if ((target = elan_stats_find(statidx)) != NULL)
5684 +       {
5685 +               if ( target->ops->elan_stats_clear_block == NULL) 
5686 +               {
5687 +                       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
5688 +                       return  res;
5689 +               }
5690 +       
5691 +               res = target->ops->elan_stats_clear_block(target->arg);
5692 +       }
5693 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
5694 +       return  res;
5695 +}
5696 +
5697 +void
5698 +elan_stats_next_statidx(void)
5699 +{
5700 +       /* XXXXX need to put not in use check here incase we loop MRH */
5701 +       /* tho its a bigish loop :)                                   */
5702 +       elan_next_statidx++;
5703 +       if (!elan_next_statidx)
5704 +               elan_next_statidx++;
5705 +}
5706 +
5707 +int 
5708 +elan_stats_register (ELAN_STATS_IDX    *statidx, 
5709 +                       char              *block_name, 
5710 +                       uint               num_entries,
5711 +                       ELAN_STATS_OPS *ops,
5712 +                       void              *arg)
5713 +{
5714 +       ELAN_STATS_STRUCT *target;
5715 +
5716 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
5717 +
5718 +       /* create it and add */
5719 +       KMEM_ALLOC(target, ELAN_STATS_STRUCT *, sizeof(ELAN_STATS_STRUCT), 1);
5720 +       if (target == NULL)
5721 +       {
5722 +               ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
5723 +               return  ENOMEM;
5724 +       }
5725 +
5726 +       elan_stats_next_statidx();
5727 +
5728 +       *statidx = elan_next_statidx;
5729 +
5730 +       target->statidx     = elan_next_statidx;
5731 +       target->num_entries = num_entries;
5732 +       target->ops         = ops;
5733 +       target->arg         = arg;
5734 +       strcpy(target->block_name, block_name);
5735 +       
5736 +       list_add_tail(&target->node, &elan_stats_list);
5737 +
5738 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
5739 +       return  0;
5740 +}
5741 +
5742 +int
5743 +elan_stats_deregister (ELAN_STATS_IDX statidx)
5744 +{
5745 +       ELAN_STATS_STRUCT *target;
5746 +
5747 +       ELANMOD_RWLOCK_WRITE(&elan_rwlock);
5748 +       if ((target = elan_stats_find(statidx)) != NULL)
5749 +       {
5750 +
5751 +               list_del(&target->node);
5752 +               
5753 +               /* delete target entry */
5754 +               KMEM_FREE(target, sizeof(ELAN_STATS_STRUCT));
5755 +       }
5756 +       ELANMOD_RWLOCK_WRITE_UNLOCK(&elan_rwlock);
5757 +
5758 +       return  target == NULL ? EINVAL : 0;
5759 +}
5760 +
5761 +/*
5762 + * Local variables:
5763 + * c-file-style: "linux"
5764 + * End:
5765 + */
5766 diff -urN clean/drivers/net/qsnet/elan/usercopy.c linux-2.6.9/drivers/net/qsnet/elan/usercopy.c
5767 --- clean/drivers/net/qsnet/elan/usercopy.c     1969-12-31 19:00:00.000000000 -0500
5768 +++ linux-2.6.9/drivers/net/qsnet/elan/usercopy.c       2005-09-06 05:06:58.000000000 -0400
5769 @@ -0,0 +1,198 @@
5770 +/*
5771 + *    Copyright (c) 2005 by Quadrics Ltd.
5772 + * 
5773 + *    For licensing information please see the supplied COPYING file
5774 + *
5775 + */
5776 +
5777 +#ident "@(#)$Id: usercopy.c,v 1.10.2.6 2005/09/06 09:06:58 addy Exp $"
5778 +/*$Source: /cvs/master/quadrics/elanmod/modsrc/usercopy.c,v $*/
5779 +
5780 +#include <qsnet/kernel.h>
5781 +#include <qsnet/autoconf.h>
5782 +
5783 +#include <elan/elanmod.h>
5784 +#include <elan/elanmod_linux.h>
5785 +
5786 +#include <linux/sched.h>
5787 +#include <linux/mm.h>
5788 +#include <linux/highmem.h>
5789 +#include <linux/pagemap.h>
5790 +
5791 +/*
5792 + * Access another process' address space copying directly to/from user space (current)
5793 + *
5794 + * Remote is the non-local process memory address, which we access using get_user_pages() and kmap()
5795 + * For the local memory (i.e. owned by current task) we use the standard copy_[to|from]_user interfaces
5796 + *
5797 + * Code based on linux/kernel/ptrace.c
5798 + */
5799 +
5800 +#if defined(NO_MMPUT) || LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 9)
5801 +static size_t 
5802 +rw_process_vm (struct task_struct *tsk, unsigned long remote, void *local, size_t len, int write)
5803 +{
5804 +#warning "NO EXPORTED MMPUT - usercopy not possible"
5805 +
5806 +       /* Without an exported mmput() function we cannot make this
5807 +        * safe as the remote process may be torn down during the copy
5808 +        * I experimented with taking a write lock on the remote mmap_sem
5809 +        * but this seemed to lead to deadlocks when pagefaulting
5810 +        */
5811 +       /* GNAT 7768: We have also found that some older versions of the get_task_mm() code
5812 +        * in linux/sched.h call mmgrab() which is not exported in any 2.6.X kernel
5813 +        */
5814 +       return 0;
5815 +}
5816 +
5817 +#else
5818 +static size_t 
5819 +rw_process_vm (struct task_struct *tsk, unsigned long remote, void *local, size_t len, int write)
5820 +{
5821 +        struct mm_struct *mm;
5822 +        struct vm_area_struct *vma;
5823 +        struct page *page;
5824 +        void *old_buf = local;
5825 +
5826 +       if (write)
5827 +               ELAN_DEBUG5(ELAN_DBG_USERCOPY, "%p remote write from %p to %lx len %ld tsk %p\n",
5828 +                           current, local, remote, (long)len, tsk);
5829 +       else
5830 +               ELAN_DEBUG5(ELAN_DBG_USERCOPY, "%p remote read from %lx to %p len %ld tsk %p\n",
5831 +                           current, remote, local, (long)len, tsk);
5832 +
5833 +       /* This locks the task, grabs a reference to the mm and then unlocks the task */
5834 +       mm = get_task_mm(tsk);
5835 +
5836 +       if (!mm)
5837 +       {
5838 +               /* GNAT 7777: Must drop lock before returning */
5839 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5840 +               return 0;
5841 +       }
5842 +
5843 +       /* Do not try and copy from ourselves! */
5844 +       if (mm == current->mm)
5845 +       {
5846 +               /* GNAT 7777: Must now drop the elanmod lock as otherwise we can create a deadlock
5847 +                * during the mmput() due it it calling exit_mmap() for the remote process
5848 +                */
5849 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5850 +               mmput(mm);
5851 +               return 0;
5852 +       }
5853 +       
5854 +        down_read(&mm->mmap_sem);
5855 +
5856 +        /* ignore errors, just check how much was sucessfully transfered */
5857 +        while (len) {
5858 +                size_t bytes, ret, offset;
5859 +                void *maddr;
5860 +
5861 +                ret = get_user_pages(tsk, mm, remote, 1, write, 1, &page, &vma);
5862 +                if (ret <= 0)
5863 +                        break;
5864 +
5865 +                bytes = len;
5866 +                offset = remote & (PAGE_SIZE-1);
5867 +                if (bytes > PAGE_SIZE-offset)
5868 +                        bytes = PAGE_SIZE-offset;
5869 +
5870 +                maddr = kmap(page);
5871 +                if (write) {
5872 +                        if (copy_from_user(/* remote to */maddr + offset, /* user from */local, bytes))        {
5873 +                               kunmap(page);
5874 +                               page_cache_release(page);       
5875 +                               break;
5876 +                       }
5877 +                       set_page_dirty_lock(page);
5878 +                } else {
5879 +                        if (copy_to_user(/* user to */local, /* remote from */maddr + offset, bytes)) {
5880 +                               kunmap(page);
5881 +                               page_cache_release(page);
5882 +                               break;
5883 +                       }
5884 +                }
5885 +                kunmap(page);
5886 +               page_cache_release(page);
5887 +                len -= bytes;
5888 +                local += bytes;
5889 +                remote += bytes;
5890 +        }
5891 +
5892 +        up_read(&mm->mmap_sem);
5893 +
5894 +       /* GNAT 7777: Must now drop the elanmod lock as otherwise can we create a deadlock
5895 +        * during the mmput() due it it calling exit_mmap() in the remote process
5896 +        */
5897 +       ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5898 +       mmput(mm);
5899 +
5900 +       /* Return num bytes copied */
5901 +        return local - old_buf;
5902 +}
5903 +#endif /* !defined(NO_MMPUT) || LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 9) */
5904 +
5905 +int
5906 +elan_usercopy (void *remote, void *local, size_t len, int write, int ctxId, struct elan_cap_node_struct *cap_ptr)
5907 +{
5908 +       int ret = 0;
5909 +       size_t bytes;
5910 +       
5911 +       struct task_struct *tsk;
5912 +
5913 +       /* Grab a read lock on elanmod lock 
5914 +        *
5915 +        * This prevents any process from exiting whilst the copy is in progress
5916 +        * as it will need to take a write lock on the elanmod lock in order to do so
5917 +        * As exit_fs() is called before the task is destroyed this should prevent
5918 +        * the remote tsk from being torn down during the copy
5919 +        *
5920 +        * It would be much easier if we could just use get_task_struct()/put_task_struct()
5921 +        * but __put_task_struct() is not exported by the 2.6.X kernels - sigh.
5922 +        */
5923 +       ELANMOD_RWLOCK_READ(&elan_rwlock);
5924 +
5925 +       /* Get the task handle from the cap node for the supplied ctxId */
5926 +       if ((ret = elan_usercopy_handle(cap_ptr, ctxId, (void **)&tsk)) < 0)
5927 +       {
5928 +               ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5929 +               return ret;
5930 +       }
5931 +       
5932 +       ELAN_DEBUG6(ELAN_DBG_USERCOPY,
5933 +                   "elan_usercopy: remote %p local %p len %ld write %d ctxId %d tsk %p\n",
5934 +                   remote, local, (long) len, write, ctxId, tsk);
5935 +       
5936 +       ASSERT(tsk);
5937 +
5938 +       /* The BKL - why ??? (arch/[i386|ia64]/kernel/ptrace.c seems to hold it) */
5939 +//     lock_kernel();
5940 +
5941 +       bytes = rw_process_vm(tsk, (unsigned long)remote, local, len, write);
5942 +       
5943 +       if (bytes != len)
5944 +       {
5945 +               ELAN_DEBUG2(ELAN_DBG_USERCOPY, "elan_usercopy: Failed to read %ld bytes (%ld copied)\n",
5946 +                           (long)len, (long)bytes);
5947 +               ret = -EPERM;
5948 +       }
5949 +
5950 +       /* The BKL - why ??? (arch/[i386|ia64]/kernel/ptrace.c seems to hold it) */
5951 +//     unlock_kernel();
5952 +
5953 +       /* GNAT 7777: rw_process_vm() now drops the elanmod lock 
5954 +        *
5955 +        * ELANMOD_RWLOCK_READ_UNLOCK(&elan_rwlock);
5956 +        */
5957 +
5958 +       return ret;
5959 +}
5960 +
5961 +
5962 +
5963 +/*
5964 + * Local variables:
5965 + * c-file-style: "linux"
5966 + * End:
5967 + */
5968 diff -urN clean/drivers/net/qsnet/elan3/context.c linux-2.6.9/drivers/net/qsnet/elan3/context.c
5969 --- clean/drivers/net/qsnet/elan3/context.c     1969-12-31 19:00:00.000000000 -0500
5970 +++ linux-2.6.9/drivers/net/qsnet/elan3/context.c       2005-07-20 07:35:36.000000000 -0400
5971 @@ -0,0 +1,2101 @@
5972 +/*
5973 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
5974 + * 
5975 + *    For licensing information please see the supplied COPYING file
5976 + *
5977 + */
5978 +
5979 +#ident "@(#)$Id: context.c,v 1.117.2.1 2005/07/20 11:35:36 mike Exp $"
5980 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/context.c,v $ */
5981 +
5982 +#include <qsnet/kernel.h>
5983 +#include <qsnet/autoconf.h>
5984 +#include <elan/elanmod.h>
5985 +#include <elan3/elanregs.h>
5986 +#include <elan3/elandev.h>
5987 +#include <elan3/elanvp.h>
5988 +#include <elan3/elan3mmu.h>
5989 +#include <elan3/elanctxt.h>
5990 +#include <elan3/elan3mmu.h>
5991 +#include <elan3/elandebug.h>
5992 +#include <elan3/urom_addrs.h>
5993 +#include <elan3/thread.h>
5994 +#include <elan3/vmseg.h>
5995 +#include <elan3/elan3ops.h>
5996 +#include <elan3/elansyscall.h>
5997 +/*
5998 + * Global variables configurable from /etc/system file
5999 + *     (OR /etc/sysconfigtab on Digital UNIX)
6000 + */
6001 +int ntrapped_threads   = 64;
6002 +int ntrapped_dmas      = 64;
6003 +int ntrapped_events    = E3_NonSysCntxQueueSize + 128;
6004 +int ntrapped_commands  = 64;
6005 +int noverflow_commands = 1024;
6006 +int nswapped_threads   = 64;
6007 +int nswapped_dmas      = 64;
6008 +
6009 +#define NUM_HALTOPS    8
6010 +
6011 +void *SwapListsLockInfo;
6012 +void *CmdLockInfo;
6013 +
6014 +static void HaltSwapContext (ELAN3_DEV *dev, void *arg);
6015 +
6016 +static char *OthersStateStrings[]  = {"others_running", "others_halting", "others_swapping", 
6017 +                                     "others_halting_more", "others_swapping_more", "others_swapped"};
6018 +
6019 +ELAN3_CTXT *
6020 +elan3_alloc (ELAN3_DEV *dev, int  kernel)
6021 +{
6022 +    ELAN3_CTXT    *ctxt;
6023 +    int           i;
6024 +    unsigned long flags;
6025 +
6026 +    PRINTF1 (DBG_DEVICE, DBG_FN, "elan3_alloc: %s\n", kernel ? "kernel" : "user");
6027 +
6028 +    KMEM_ZALLOC (ctxt, ELAN3_CTXT *, sizeof (ELAN3_CTXT), TRUE);
6029 +    
6030 +    if (ctxt == NULL)
6031 +       return (NULL);
6032 +
6033 +    elan_nullcap (&ctxt->Capability);
6034 +
6035 +    ctxt->Device      = dev;
6036 +    ctxt->OthersState = CTXT_OTHERS_SWAPPED;
6037 +    ctxt->RefCnt      = 1;
6038 +    ctxt->Position    = dev->Position;
6039 +
6040 +    if (kernel)
6041 +       ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_KERNEL;
6042 +    else
6043 +       ctxt->Status = CTXT_DETACHED | CTXT_SWAPPED_OUT | CTXT_NO_LWPS;
6044 +
6045 +    ctxt->Elan3mmu = elan3mmu_alloc (ctxt);
6046 +
6047 +    kcondvar_init (&ctxt->Wait);
6048 +    kcondvar_init (&ctxt->CommandPortWait);
6049 +    kcondvar_init (&ctxt->LwpWait);
6050 +    kcondvar_init (&ctxt->HaltWait);
6051 +
6052 +    spin_lock_init (&ctxt->InputFaultLock);
6053 +
6054 +    kmutex_init (&ctxt->SwapListsLock);
6055 +    kmutex_init (&ctxt->CmdPortLock);
6056 +    kmutex_init (&ctxt->NetworkErrorLock);
6057 +    kmutex_init (&ctxt->CmdLock);
6058 +
6059 +    krwlock_init (&ctxt->VpLock);
6060 +
6061 +    KMEM_GETPAGES (ctxt->FlagPage, ELAN3_FLAGSTATS *, 1, TRUE);
6062 +    if (!ctxt->FlagPage)
6063 +       goto error;
6064 +    bzero ((char *) ctxt->FlagPage, PAGESIZE);
6065 +
6066 +    KMEM_ZALLOC (ctxt->CommandTraps, COMMAND_TRAP *,    sizeof (COMMAND_TRAP)    * ntrapped_commands, TRUE);
6067 +    if (!ctxt->CommandTraps)
6068 +       goto error;
6069 +
6070 +    KMEM_ZALLOC (ctxt->ThreadTraps,  THREAD_TRAP *,     sizeof (THREAD_TRAP)     * ntrapped_threads,  TRUE);
6071 +    if (!ctxt->ThreadTraps)
6072 +       goto error;
6073 +
6074 +    KMEM_ZALLOC (ctxt->DmaTraps,     DMA_TRAP *,        sizeof (DMA_TRAP)        * ntrapped_dmas,     TRUE);
6075 +    if (!ctxt->DmaTraps)
6076 +       goto error;
6077 +
6078 +    KMEM_ZALLOC (ctxt->EventCookies, EVENT_COOKIE *,    sizeof (EVENT_COOKIE)    * ntrapped_events,   TRUE);
6079 +    if (!ctxt->EventCookies)
6080 +       goto error;
6081 +
6082 +    KMEM_ZALLOC (ctxt->Commands,     CProcTrapBuf_BE *, sizeof (CProcTrapBuf_BE) * noverflow_commands,TRUE);
6083 +    if (!ctxt->Commands)
6084 +       goto error;
6085 +
6086 +    KMEM_ZALLOC (ctxt->SwapThreads,  E3_Addr *,         sizeof (E3_Addr)         * nswapped_threads,  TRUE);
6087 +    if (!ctxt->SwapThreads)
6088 +       goto error;
6089 +
6090 +    KMEM_ZALLOC (ctxt->SwapDmas,     E3_DMA_BE *,       sizeof (E3_DMA_BE)       * nswapped_dmas,     TRUE);
6091 +    if (!ctxt->SwapDmas)
6092 +       goto error;
6093 +
6094 +    /*
6095 +     * "slop" is defined as follows :
6096 +     *     number of entries REQUIRED to be left spare to consume all other traps
6097 +     *     up until the time that the context can be swapped out.
6098 +     *  
6099 +     * CommandTrapQ : 1 command issued by main + 1 issued by the thread processor per elan
6100 +     * ThreadTrapQ  : 2 from command + 2 input
6101 +     * DmaTrapQ     : 2 from command + 2 input
6102 +     * EventTrapQ   : 2 from command + 1 thread + 1 dma + 2 input + E3_NonSysCntxQueueSize
6103 +     */
6104 +    spin_lock_irqsave (&dev->IntrLock, flags);
6105 +    ELAN3_QUEUE_INIT (ctxt->CommandTrapQ, ntrapped_commands,  2);
6106 +    ELAN3_QUEUE_INIT (ctxt->ThreadTrapQ,  ntrapped_threads,   4);
6107 +    ELAN3_QUEUE_INIT (ctxt->DmaTrapQ,     ntrapped_dmas,      4);
6108 +    ELAN3_QUEUE_INIT (ctxt->EventCookieQ, ntrapped_events,    MIN(E3_NonSysCntxQueueSize + 6, ntrapped_events - 6));
6109 +    ELAN3_QUEUE_INIT (ctxt->CommandQ,     noverflow_commands, 0);
6110 +    ELAN3_QUEUE_INIT (ctxt->SwapThreadQ,  nswapped_threads,   0);
6111 +    ELAN3_QUEUE_INIT (ctxt->SwapDmaQ,     nswapped_dmas,      0);
6112 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6113 +
6114 +#if defined(DIGITAL_UNIX)
6115 +    /* Allocate the segelan for the command port */
6116 +    if (! kernel && elan3_segelan3_create (ctxt) == NULL)
6117 +    {
6118 +       elan3_detach(ctxt);
6119 +       elan3_free (ctxt);
6120 +       return ((ELAN3_CTXT *) NULL);
6121 +    }
6122 +#endif
6123 +
6124 +    /*
6125 +     * Initialise the Input Fault list 
6126 +     */
6127 +    spin_lock (&ctxt->InputFaultLock);
6128 +    for (i = 0; i < NUM_INPUT_FAULT_SAVE; i++)
6129 +       ctxt->InputFaults[i].Next = (i == (NUM_INPUT_FAULT_SAVE-1)) ? NULL : &ctxt->InputFaults[i+1];
6130 +    ctxt->InputFaultList = &ctxt->InputFaults[0];
6131 +    spin_unlock (&ctxt->InputFaultLock);
6132 +
6133 +    ReserveHaltOperations (dev, NUM_HALTOPS, TRUE);
6134 +    
6135 +    if ((ctxt->RouteTable = AllocateRouteTable (ctxt->Device, ELAN3_MAX_VPS)) == NULL)
6136 +    {
6137 +       PRINTF0 (DBG_DEVICE, DBG_FN, "elan3_alloc: cannot map route table\n");
6138 +       elan3_detach(ctxt);
6139 +       elan3_free (ctxt);
6140 +       return ((ELAN3_CTXT *) NULL);
6141 +    }  
6142 +
6143 +    return (ctxt);
6144 +
6145 +
6146 + error:
6147 +
6148 +    elan3_detach(ctxt);
6149 +    elan3_free (ctxt);
6150 +    if (ctxt->FlagPage)
6151 +       KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1);
6152 +    if (ctxt->CommandTraps)
6153 +       KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP)    * ntrapped_commands);
6154 +    if (ctxt->ThreadTraps)
6155 +       KMEM_FREE ((void *) ctxt->ThreadTraps,  sizeof (THREAD_TRAP)     * ntrapped_threads);
6156 +    if (ctxt->DmaTraps)
6157 +       KMEM_FREE ((void *) ctxt->DmaTraps,     sizeof (DMA_TRAP)        * ntrapped_dmas);
6158 +    if (ctxt->EventCookies)
6159 +       KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE)    * ntrapped_events);
6160 +    if (ctxt->Commands)
6161 +       KMEM_FREE ((void *) ctxt->Commands,     sizeof (CProcTrapBuf_BE) * noverflow_commands);
6162 +    if (ctxt->SwapThreads)
6163 +       KMEM_FREE ((void *) ctxt->SwapThreads,  sizeof (E3_Addr)         * nswapped_threads);
6164 +    if (ctxt->SwapDmas)
6165 +       KMEM_FREE ((void *) ctxt->SwapDmas,     sizeof (E3_DMA_BE)       * nswapped_dmas);
6166 +
6167 +    kcondvar_destroy (&ctxt->Wait);
6168 +    kcondvar_destroy (&ctxt->CommandPortWait);
6169 +    kcondvar_destroy (&ctxt->LwpWait);
6170 +    kcondvar_destroy (&ctxt->HaltWait);
6171 +
6172 +    kmutex_destroy (&ctxt->SwapListsLock);
6173 +    kmutex_destroy (&ctxt->CmdLock);
6174 +    kmutex_destroy (&ctxt->NetworkErrorLock);
6175 +    spin_lock_destroy  (&ctxt->InputFaultLock);
6176 +
6177 +    krwlock_destroy (&ctxt->VpLock);
6178 +
6179 +    KMEM_FREE (ctxt, sizeof (ELAN3_CTXT));
6180 +
6181 +    return (NULL);
6182 +}
6183 +
6184 +void
6185 +elan3_free (ELAN3_CTXT *ctxt)
6186 +{
6187 +    ELAN3_DEV     *dev = ctxt->Device;
6188 +    NETERR_FIXUP *nef;
6189 +    
6190 +    PRINTF1 (ctxt, DBG_FN, "elan3_free: %p \n", ctxt);
6191 +   
6192 +    elan3_removevp (ctxt, ELAN3_INVALID_PROCESS);                      /* Remove any virtual process mappings */
6193 +
6194 +#if defined(DIGITAL_UNIX)
6195 +    WaitForContext (ctxt);                                     /* wait for all references to this context to go away */
6196 +#endif
6197 +
6198 +    if (ctxt->RouteTable)
6199 +       FreeRouteTable (dev, ctxt->RouteTable);
6200 +    ctxt->RouteTable = NULL;
6201 +
6202 +    elan3mmu_free (ctxt->Elan3mmu);                            /* free of our Elan3mmu  */
6203 +
6204 +    if (ctxt->Private)                                         /* Call back to "user" to free off  */
6205 +       ELAN3_OP_FREE_PRIVATE (ctxt);                           /* private data */
6206 +
6207 +#if defined(DIGITAL_UNIX)
6208 +    if (! CTXT_IS_KERNEL(ctxt))
6209 +       elan3_segelan3_destroy (ctxt);                          /* Unmap the command port from the users address space. */
6210 +#endif
6211 +   
6212 +    ReleaseHaltOperations (dev, NUM_HALTOPS);
6213 +
6214 +    if (ctxt->Input0Resolver)
6215 +       CancelNetworkErrorResolver (ctxt->Input0Resolver);
6216 +
6217 +    if (ctxt->Input1Resolver)
6218 +       CancelNetworkErrorResolver (ctxt->Input1Resolver);
6219 +
6220 +    while ((nef = ctxt->NetworkErrorFixups) != NULL)
6221 +    {
6222 +       ctxt->NetworkErrorFixups = nef->Next;
6223 +
6224 +       CompleteNetworkErrorFixup (ctxt, nef, ESRCH);
6225 +    }
6226 +
6227 +    KMEM_FREEPAGES ((void *) ctxt->FlagPage, 1);
6228 +
6229 +    KMEM_FREE ((void *) ctxt->CommandTraps, sizeof (COMMAND_TRAP)    * ntrapped_commands);
6230 +    KMEM_FREE ((void *) ctxt->ThreadTraps,  sizeof (THREAD_TRAP)     * ntrapped_threads);
6231 +    KMEM_FREE ((void *) ctxt->DmaTraps,     sizeof (DMA_TRAP)        * ntrapped_dmas);
6232 +    KMEM_FREE ((void *) ctxt->EventCookies, sizeof (EVENT_COOKIE)    * ntrapped_events);
6233 +    KMEM_FREE ((void *) ctxt->Commands,     sizeof (CProcTrapBuf_BE) * noverflow_commands);
6234 +    KMEM_FREE ((void *) ctxt->SwapThreads,  sizeof (E3_Addr)         * nswapped_threads);
6235 +    KMEM_FREE ((void *) ctxt->SwapDmas,     sizeof (E3_DMA_BE)       * nswapped_dmas);
6236 +
6237 +    kcondvar_destroy (&ctxt->Wait);
6238 +    kcondvar_destroy (&ctxt->CommandPortWait);
6239 +    kcondvar_destroy (&ctxt->LwpWait);
6240 +    kcondvar_destroy (&ctxt->HaltWait);
6241 +
6242 +    kmutex_destroy (&ctxt->SwapListsLock);
6243 +    kmutex_destroy (&ctxt->CmdLock);
6244 +    kmutex_destroy (&ctxt->NetworkErrorLock);
6245 +    spin_lock_destroy  (&ctxt->InputFaultLock);
6246 +
6247 +    krwlock_destroy (&ctxt->VpLock);
6248 +
6249 +    KMEM_FREE (ctxt, sizeof (ELAN3_CTXT));
6250 +}
6251 +
6252 +int 
6253 +elan3_doattach(ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
6254 +{
6255 +    unsigned long pgnum = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) / PAGE_SIZE;
6256 +    unsigned long pgoff = ((cap->cap_mycontext & MAX_ROOT_CONTEXT_MASK) * sizeof (E3_CommandPort)) & (PAGE_SIZE-1);
6257 +    ELAN3_DEV     *dev   = ctxt->Device;
6258 +    int           res   = ESUCCESS;
6259 +    unsigned long flags;
6260 +
6261 +    /* Map in the command port for this context */
6262 +    if (MapDeviceRegister (dev, ELAN3_BAR_COMMAND_PORT, &ctxt->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ctxt->CommandPageHandle) != ESUCCESS)
6263 +    {
6264 +       PRINTF0 (ctxt, DBG_FN, "elan3_doattach: MapDeviceRegister failed");
6265 +       return (EINVAL);
6266 +    }
6267 +
6268 +    ctxt->CommandPort = ctxt->CommandPage + pgoff;
6269 +
6270 +    spin_lock_irqsave (&dev->IntrLock, flags);
6271 +
6272 +    res = 0;
6273 +    if (ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) != NULL)
6274 +       res = EBUSY;
6275 +    else
6276 +    {
6277 +       if ((res = elan3mmu_attach (ctxt->Device, cap->cap_mycontext, ctxt->Elan3mmu, 
6278 +                                   ctxt->RouteTable->Table, ctxt->RouteTable->Size-1)) == 0)
6279 +       {
6280 +           ELAN3_DEV_CTX_TABLE(dev,cap->cap_mycontext) = ctxt;
6281 +           ctxt->Capability                            = *cap;
6282 +       }
6283 +    }
6284 +
6285 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6286 +
6287 +    if (res == ESUCCESS)
6288 +       elan3_swapin (ctxt, CTXT_DETACHED);
6289 +    else 
6290 +    {
6291 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
6292 +       ctxt->CommandPage = (ioaddr_t) 0; 
6293 +       ctxt->CommandPort = (ioaddr_t) 0;
6294 +    }
6295 +
6296 +    return (res);
6297 +}
6298 +
6299 +void
6300 +elan3_destroy_callback( void * args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
6301 +{
6302 +    if (map == NULL) 
6303 +    {
6304 +       /* the cap is being destroyed */
6305 +       PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the cap is being destroyed \n");
6306 +    }
6307 +    else
6308 +    {
6309 +       /* the map is being destroyed */
6310 +       PRINTF0 (NULL, DBG_VP, "elan3_destroy_callback: the map is being destroyed \n");
6311 +    }
6312 +}
6313 +
6314 +int
6315 +elan3_attach (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
6316 +{
6317 +    ELAN3_DEV *dev = ctxt->Device;
6318 +    int type;
6319 +    int res;
6320 +
6321 +    switch (type = elan3_validate_cap (dev, cap, ELAN_USER_ATTACH))
6322 +    {
6323 +    case ELAN_CAP_OK:
6324 +       /* nothing */
6325 +       break;
6326 +
6327 +    case ELAN_CAP_RMS:
6328 +       if ((res = elan_attach_cap(cap, dev->Devinfo.dev_rail, ctxt, elan3_destroy_callback)) != 0)
6329 +           return res;
6330 +       break;
6331 +
6332 +    default:
6333 +       return (EINVAL);
6334 +    }
6335 +
6336 +    if (((res = elan3_doattach(ctxt,cap)) != ESUCCESS) && (type == ELAN_CAP_RMS))
6337 +       elan_detach_cap(cap, dev->Devinfo.dev_rail);
6338 +
6339 +    return res;
6340 +}
6341 +
6342 +void
6343 +elan3_detach ( ELAN3_CTXT *ctxt )
6344 +{
6345 +    ELAN3_DEV   *dev                 = ctxt->Device;
6346 +    int need_to_call_elanmod_detach = 0;
6347 +    unsigned long flags;
6348 +
6349 +    PRINTF1 (ctxt, DBG_FN, "elan3_detach: %p \n", ctxt );
6350 +    
6351 +    if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
6352 +    {
6353 +       PRINTF0 (ctxt, DBG_FN, "elan3_detach: context not attached \n");
6354 +       return ;
6355 +    }
6356 +
6357 +    /* must you be in the ctx_table ?? */
6358 +    
6359 +    switch (ctxt->Capability.cap_type & ELAN_CAP_TYPE_MASK)
6360 +    {
6361 +    case ELAN_CAP_TYPE_BLOCK:
6362 +    case ELAN_CAP_TYPE_CYCLIC:
6363 +    {
6364 +       if (ELAN3_SYSTEM_CONTEXT (ctxt->Capability.cap_mycontext))
6365 +           return ;
6366 +
6367 +       if (! (ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST))
6368 +           need_to_call_elanmod_detach = 1;
6369 +
6370 +       break;
6371 +    }  
6372 +    default:
6373 +       return ;
6374 +    }
6375 +
6376 +    elan3_swapout (ctxt, CTXT_DETACHED);
6377 +
6378 +    spin_lock_irqsave (&dev->IntrLock, flags);
6379 +
6380 +    elan3mmu_detach (dev, ctxt->Capability.cap_mycontext);
6381 +    ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL;
6382 +
6383 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6384 +
6385 +    if (ctxt->CommandPage)
6386 +    {
6387 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
6388 +       ctxt->CommandPage = (ioaddr_t) 0;
6389 +    }
6390 +    
6391 +    if (need_to_call_elanmod_detach) 
6392 +       elan_detach_cap(&ctxt->Capability, dev->Devinfo.dev_rail);
6393 +
6394 +    elan_nullcap (&ctxt->Capability);
6395 +
6396 +}
6397 +
6398 +void
6399 +elan3_dodetach ( ELAN3_CTXT *ctxt )
6400 +{
6401 +    ELAN3_DEV     *dev = ctxt->Device;
6402 +    unsigned long flags;
6403 +
6404 +    PRINTF1 (ctxt, DBG_FN, "elan3_dodetach: %p \n", ctxt );
6405 +    
6406 +    if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
6407 +    {
6408 +       PRINTF0 (ctxt, DBG_FN, "elan3_dodetach: context not attached \n");
6409 +       return ;
6410 +    }
6411 +
6412 +    elan3_swapout (ctxt, CTXT_DETACHED);
6413 +
6414 +    spin_lock_irqsave (&dev->IntrLock, flags);
6415 +
6416 +    elan3mmu_detach (dev, ctxt->Capability.cap_mycontext);
6417 +    ELAN3_DEV_CTX_TABLE(dev,ctxt->Capability.cap_mycontext) = NULL;
6418 +
6419 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6420 +
6421 +    if (ctxt->CommandPage)
6422 +    {
6423 +       UnmapDeviceRegister (dev, &ctxt->CommandPageHandle);
6424 +       ctxt->CommandPage = (ioaddr_t) 0;
6425 +    }
6426 +    
6427 +    elan_nullcap (&ctxt->Capability);
6428 +}
6429 +
6430 +void
6431 +elan3_swapin (ELAN3_CTXT *ctxt, int reason)
6432 +{
6433 +    ELAN3_DEV *dev = ctxt->Device;
6434 +    unsigned long flags;
6435 +
6436 +    spin_lock_irqsave (&dev->IntrLock, flags);
6437 +
6438 +    ASSERT (ctxt->Status & CTXT_SWAPPED_REASONS);
6439 +
6440 +    PRINTF3 (ctxt, DBG_SWAP, "elan3_swapin: status %x State %s reason %x\n", 
6441 +            ctxt->Status, OthersStateStrings[ctxt->OthersState], reason);
6442 +
6443 +    while (ctxt->Status & CTXT_SWAPPING_OUT)                   /* In transition */
6444 +       kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);
6445 +
6446 +    if (reason == CTXT_NO_LWPS && ctxt->LwpCount++ != 0)       /* Added another LWP */
6447 +    {
6448 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6449 +       return;
6450 +    }
6451 +
6452 +    if ((ctxt->Status & ~reason) & CTXT_SWAPPED_REASONS)
6453 +       ctxt->Status &= ~reason;
6454 +    else
6455 +    {
6456 +       ASSERT (ctxt->Status & CTXT_SWAPPED_OUT);
6457 +       ASSERT (ctxt->OthersState == CTXT_OTHERS_SWAPPED);
6458 +       
6459 +       /*
6460 +        * Will not be swapped out anymore, so ask the "user" to perform 
6461 +        * any swapping in he needs before letting the context run again.
6462 +        */
6463 +       
6464 +       ctxt->Status &= ~(CTXT_SWAPPED_OUT | CTXT_QUEUES_EMPTY | reason);
6465 +       ctxt->OthersState = CTXT_OTHERS_RUNNING;
6466 +
6467 +       if (ctxt->Input0Trap.State == CTXT_STATE_OK && ctxt->Input1Trap.State == CTXT_STATE_OK)
6468 +           SetInputterStateForContext (ctxt, 0, NULL);
6469 +       
6470 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
6471 +    }
6472 +
6473 +    PRINTF2 (ctxt, DBG_SWAP, "elan3_swapin: all done - status %x state %s\n",
6474 +            ctxt->Status, OthersStateStrings[ctxt->OthersState]);
6475 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6476 +}
6477 +
6478 +
6479 +void
6480 +elan3_swapout (ELAN3_CTXT *ctxt, int reason)
6481 +{
6482 +    ELAN3_DEV     *dev = ctxt->Device;
6483 +    int           cansleep;
6484 +    unsigned long flags;
6485 +
6486 +    spin_lock_irqsave (&dev->IntrLock, flags);
6487 +
6488 +    PRINTF3 (ctxt, DBG_SWAP, "elan3_swapout: status %x state %s reason %x\n", 
6489 +            ctxt->Status, OthersStateStrings[ctxt->OthersState], reason);
6490 +
6491 +    if (reason == CTXT_NO_LWPS)
6492 +    {
6493 +       if (--ctxt->LwpCount != 0)                              /* Still other LWPs running */
6494 +       {
6495 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
6496 +           return;
6497 +       }
6498 +
6499 +       kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);            /* Wakeup anyone waiting on LwpCount */
6500 +    }
6501 +    
6502 +    ctxt->Status |= reason;
6503 +    
6504 +    while (ctxt->Status & CTXT_SWAPPING_OUT)                   /* wait for someone else to finish swapping */
6505 +       kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);         /* out */
6506 +
6507 +    if (ctxt->Status & CTXT_SWAPPED_OUT)
6508 +    {
6509 +       if (reason == CTXT_NO_LWPS)                             /* Wakeup other thread waiting on LWP exit */
6510 +           kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);
6511 +       
6512 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
6513 +       return;
6514 +    }
6515 +    
6516 +    /*
6517 +     * mark the context as swapping out.
6518 +     */
6519 +    ctxt->Status |= CTXT_SWAPPING_OUT;
6520 +    
6521 +    if (reason != CTXT_FIXUP_NETERR)
6522 +    {
6523 +       /*
6524 +        * Stop all of the lwps.
6525 +        */
6526 +       while (ctxt->LwpCount)
6527 +       {
6528 +           kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);           /* Wake up any lwps */
6529 +           kcondvar_wait (&ctxt->LwpWait, &dev->IntrLock, &flags);             /* then wait for them to enter elan3_swapout */
6530 +       }
6531 +    }
6532 +    
6533 +    StartSwapoutContext (ctxt, 0, NULL);
6534 +    for (;;)
6535 +    {
6536 +       PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: HandleExceptions\n");
6537 +
6538 +       cansleep = (HandleExceptions(ctxt, &flags) == ESUCCESS);
6539 +
6540 +       PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: OthersState=%d cansleep=%d\n", ctxt->OthersState, cansleep);
6541 +
6542 +       if (ctxt->OthersState == CTXT_OTHERS_SWAPPED)
6543 +           break;
6544 +
6545 +       if (cansleep)
6546 +           kcondvar_wait (&ctxt->Wait, &dev->IntrLock, &flags);
6547 +    }
6548 +    PRINTF0 (ctxt, DBG_SWAP, "elan3_swapout: swapped out\n");
6549 +    
6550 +    ASSERT (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ));
6551 +    ASSERT (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ));
6552 +
6553 +    ctxt->Status |=  CTXT_SWAPPED_OUT;
6554 +    ctxt->Status &= ~CTXT_SWAPPING_OUT;
6555 +
6556 +    kcondvar_wakeupall (&ctxt->LwpWait, &dev->IntrLock);
6557 +
6558 +    PRINTF2 (ctxt, DBG_SWAP, "elan3_swapout: all done - status %x state %s\n",
6559 +            ctxt->Status, OthersStateStrings[ctxt->OthersState]);
6560 +
6561 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6562 +}
6563 +
6564 +int
6565 +elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages)
6566 +{
6567 +    E3_Addr     elanAddr = FaultSave->s.FaultAddress;
6568 +    int                writeable;
6569 +    int                res;
6570 +
6571 +    PRINTF3 (ctxt, DBG_FAULT, "elan3_pagefault: elanAddr %08x FSR %08x : %s\n", elanAddr, FaultSave->s.FSR.Status,
6572 +            FaultSave->s.FSR.s.ProtFault ? "protection fault" : "pte invalid");
6573 +    
6574 +    /* Look at the FSR to determine the fault type etc */
6575 +    
6576 +    if (FaultSave->s.FSR.Status == 0)                          /* this is a target abort/parity error, so look */
6577 +    {                                                          /* at the PCI config space registers to determine  */
6578 +       ElanBusError (ctxt->Device);
6579 +       return (EFAULT);                                        
6580 +    }
6581 +    
6582 +    if (FaultSave->s.FSR.s.AlignmentErr)                       /* Alignment errors are always fatal. */
6583 +    {
6584 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Alignment error\n");
6585 +       return (EFAULT);
6586 +    }
6587 +
6588 +    if (FaultSave->s.FSR.s.WalkBadData)                                /* Memory ECC error during a walk */
6589 +    {
6590 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: Memory ECC error during walk\n");
6591 +       return (EFAULT);
6592 +    }
6593 +
6594 +    if (!FaultSave->s.FSR.s.ProtFault &&                       /* DMA memory type changed */
6595 +       !FaultSave->s.FSR.s.Walking)
6596 +    {
6597 +       PRINTF0 (ctxt, DBG_FAULT, "elan3_pagefault: DMA memory type changed\n");
6598 +       return (EFAULT);
6599 +    }
6600 +
6601 +    ASSERT (FaultSave->s.FSR.s.ProtFault ?                     /* protection errors, should always have a valid pte */
6602 +           (!FaultSave->s.FSR.s.Walking || !(FaultSave->s.FSR.s.Level==3) ||  FaultSave->s.FSR.s.FaultPte == ELAN3_ET_PTE) : 
6603 +           FaultSave->s.FSR.s.FaultPte == ELAN3_ET_INVALID);   /* otherwise it must be an invalid pte */
6604 +
6605 +    /*
6606 +     * Determine whether to fault for a 'write' from the access permissions we need, and not
6607 +     * from the access type (WrAcc).
6608 +     */
6609 +    writeable = (FaultSave->s.FSR.s.AccTypePerm & (1 << FSR_WritePermBit));
6610 +
6611 +    /* Check that we have the right permissions for this access type. */
6612 +    if ((res = elan3mmu_checkperm (ctxt->Elan3mmu, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.AccTypePerm)) != 0)
6613 +    {
6614 +       PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: %s\n", (res == ENOMEM) ? "no protection mapping" : "protection error");
6615 +       
6616 +       return (res);
6617 +    }
6618 +
6619 +    res = LoadElanTranslation (ctxt, (elanAddr&PAGEMASK), npages*PAGESIZE, FaultSave->s.FSR.s.ProtFault, writeable);
6620 +
6621 +    if (res == ESUCCESS)
6622 +    {
6623 +       BumpStat (ctxt->Device, PageFaults);
6624 +       BumpUserStat (ctxt, PageFaults);
6625 +    }
6626 +
6627 +    PRINTF1 (ctxt, DBG_FAULT, "elan3_pagefault: -> %d\n", res);
6628 +
6629 +    return (res);
6630 +}
6631 +
6632 +void
6633 +elan3_block_inputter (ELAN3_CTXT *ctxt, int block)
6634 +{
6635 +    ELAN3_DEV *dev = ctxt->Device;
6636 +    unsigned long flags;
6637 +
6638 +    spin_lock_irqsave (&dev->IntrLock, flags);
6639 +    
6640 +    if (block)
6641 +       ctxt->Status |= CTXT_USER_FILTERING;
6642 +    else
6643 +       ctxt->Status &= ~CTXT_USER_FILTERING;
6644 +
6645 +    if (ctxt->Capability.cap_mycontext != ELAN_CAP_UNINITIALISED)
6646 +       SetInputterStateForContext (ctxt, 0, NULL);
6647 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
6648 +}
6649 +
6650 +int
6651 +FixupNetworkErrors (ELAN3_CTXT *ctxt, unsigned long *flags)
6652 +{
6653 +    ELAN3_DEV   *dev = ctxt->Device;
6654 +    NETERR_FIXUP *nef;
6655 +
6656 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6657 +    
6658 +    if (ctxt->NetworkErrorFixups == NULL)
6659 +       return (ESUCCESS);
6660 +
6661 +    spin_unlock_irqrestore (&dev->IntrLock, *flags);
6662 +    
6663 +    kmutex_lock (&ctxt->NetworkErrorLock);                     /* single thread while fixing up errors */
6664 +    elan3_swapout (ctxt, CTXT_FIXUP_NETERR);
6665 +
6666 +    spin_lock_irqsave (&dev->IntrLock, *flags);
6667 +    while ((nef = ctxt->NetworkErrorFixups) != NULL)
6668 +    {
6669 +       ctxt->NetworkErrorFixups = nef->Next;
6670 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6671 +
6672 +       if (ELAN3_OP_FIXUP_NETWORK_ERROR (ctxt, nef) == OP_FAILED)
6673 +           CompleteNetworkErrorFixup (ctxt, nef, EINVAL);
6674 +
6675 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6676 +    }
6677 +    spin_unlock_irqrestore (&dev->IntrLock, *flags);
6678 +
6679 +    elan3_swapin (ctxt, CTXT_FIXUP_NETERR);
6680 +
6681 +    kmutex_unlock (&ctxt->NetworkErrorLock);
6682 +    spin_lock_irqsave (&dev->IntrLock, *flags);
6683 +    return (EAGAIN);
6684 +}
6685 +
6686 +int
6687 +CompleteNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER *rvp)
6688 +{
6689 +    int state;
6690 +
6691 +    switch (rvp->Status)
6692 +    {
6693 +    case ESUCCESS:
6694 +       /*
6695 +        * the item still existed at the source - if it's a wait for EOP transaction
6696 +        * then the source will retry - otherwise the remote event will have been
6697 +        * cleared and we should execute it
6698 +        */
6699 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESUCCESS zero WaitForEopTransaction %p\n", trap->WaitForEopTransaction);
6700 +
6701 +       state = trap->WaitForEopTransaction ? CTXT_STATE_OK : CTXT_STATE_NEEDS_RESTART;
6702 +
6703 +       break;
6704 +
6705 +    case ESRCH:        
6706 +       /*
6707 +        * the item was not found at the source - we should always execute the transaction
6708 +        * since it will never be resent
6709 +        */
6710 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: ESRCH execute WaitForEopTransaction %p\n", trap->WaitForEopTransaction);
6711 +       state = CTXT_STATE_NEEDS_RESTART;
6712 +       break;
6713 +
6714 +    default:                                                   /* other errors */
6715 +       PRINTF1 (ctxt, DBG_NETERR, "CompleteNetworkErrorResolver: %d\n", rvp->Status);
6716 +       if (ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, &rvp) == OP_HANDLED)
6717 +           state = CTXT_STATE_NEEDS_RESTART;
6718 +       else
6719 +           state = CTXT_STATE_OK;
6720 +       break;
6721 +    }
6722 +
6723 +    FreeNetworkErrorResolver (rvp);
6724 +
6725 +    return (state);
6726 +}
6727 +
6728 +int
6729 +HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags)
6730 +{
6731 +    ELAN3_DEV        *dev    = ctxt->Device;
6732 +    THREAD_TRAP      tproc;
6733 +    DMA_TRAP         dproc;
6734 +    NETERR_RESOLVER *rvp;
6735 +    int                     state;
6736 +
6737 +    if (ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR)
6738 +    {
6739 +       ctxt->Status &= ~CTXT_COMMAND_OVERFLOW_ERROR;
6740 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6741 +       ElanException (ctxt, EXCEPTION_COMMAND_OVERFLOW, COMMAND_PROC, NULL);
6742 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6743 +       return (EAGAIN);
6744 +    }
6745 +    
6746 +    if (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ))
6747 +    {
6748 +       /* XXXX: unmap translations to the command port */
6749 +
6750 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6751 +       ResolveCProcTrap (ctxt);
6752 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6753 +       return (EAGAIN);
6754 +    }
6755 +    
6756 +    if (ctxt->Input0Trap.State == CTXT_STATE_TRAPPED)
6757 +    {
6758 +       ctxt->Input0Trap.State = CTXT_STATE_RESOLVING;
6759 +
6760 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6761 +       ResolveIProcTrap (ctxt, &ctxt->Input0Trap, &ctxt->Input0Resolver);
6762 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6763 +       return (EAGAIN);
6764 +    }
6765 +
6766 +    if (ctxt->Input1Trap.State == CTXT_STATE_TRAPPED)
6767 +    {
6768 +       ctxt->Input1Trap.State = CTXT_STATE_RESOLVING;
6769 +
6770 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6771 +       ResolveIProcTrap (ctxt, &ctxt->Input1Trap, &ctxt->Input1Resolver);
6772 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6773 +       return (EAGAIN);
6774 +    }
6775 +
6776 +    if ((rvp = ctxt->Input0Resolver) != NULL && rvp->Completed)
6777 +    {
6778 +       ASSERT (ctxt->Input0Trap.State == CTXT_STATE_NETWORK_ERROR);
6779 +
6780 +       ctxt->Input0Resolver = NULL;
6781 +       
6782 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6783 +       state = CompleteNetworkErrorResolver (ctxt, &ctxt->Input0Trap, rvp);
6784 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6785 +       ctxt->Input0Trap.State = state;
6786 +       return (EAGAIN);
6787 +    }
6788 +
6789 +    if ((rvp = ctxt->Input1Resolver) != NULL && rvp->Completed)
6790 +    {
6791 +       ASSERT (ctxt->Input1Trap.State == CTXT_STATE_NETWORK_ERROR);
6792 +
6793 +       ctxt->Input1Resolver = NULL;
6794 +       
6795 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6796 +       state = CompleteNetworkErrorResolver (ctxt,&ctxt->Input1Trap, rvp);
6797 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6798 +       ctxt->Input1Trap.State = state;
6799 +       return (EAGAIN);
6800 +    }
6801 +
6802 +    if (NextTProcTrap (ctxt, &tproc))
6803 +    {
6804 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6805 +       ResolveTProcTrap (ctxt, &tproc);
6806 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6807 +       return (EAGAIN);
6808 +    }
6809 +    ctxt->Status &= ~CTXT_THREAD_QUEUE_FULL;
6810 +
6811 +    if (NextDProcTrap (ctxt, &dproc))
6812 +    {
6813 +       spin_unlock_irqrestore (&dev->IntrLock, *flags);
6814 +       ResolveDProcTrap (ctxt, &dproc);
6815 +       spin_lock_irqsave (&dev->IntrLock, *flags);
6816 +       return (EAGAIN);
6817 +    }
6818 +    ctxt->Status &= ~CTXT_DMA_QUEUE_FULL;
6819 +
6820 +    /* Handle all event interrupts. */
6821 +    if (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ))
6822 +    {
6823 +       while (! ELAN3_QUEUE_EMPTY (ctxt->EventCookieQ))
6824 +       {
6825 +           E3_uint32 cookie = *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies);
6826 +
6827 +           ELAN3_QUEUE_REMOVE (ctxt->EventCookieQ);
6828 +
6829 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6830 +           if (ELAN3_OP_EVENT (ctxt, cookie, OP_LWP) != OP_DEFER)
6831 +               spin_lock_irqsave (&dev->IntrLock, *flags);
6832 +           else
6833 +           {
6834 +               spin_lock_irqsave (&dev->IntrLock, *flags);     /* place the cookie back on the queue. */
6835 +                                                               /* note we place it on the front to ensure  */
6836 +               ELAN3_QUEUE_ADD_FRONT (ctxt->EventCookieQ);     /* event ordering. */
6837 +               *ELAN3_QUEUE_FRONT (ctxt->EventCookieQ, ctxt->EventCookies) = cookie;
6838 +           }
6839 +       }
6840 +       return (EAGAIN);
6841 +    }
6842 +    ctxt->Status &= ~CTXT_EVENT_QUEUE_FULL;
6843 +
6844 +    if (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ))
6845 +    {
6846 +       while (! ELAN3_QUEUE_EMPTY (ctxt->SwapDmaQ))
6847 +       {
6848 +           E3_DMA_BE DmaDesc = *ELAN3_QUEUE_FRONT (ctxt->SwapDmaQ, ctxt->SwapDmas);
6849 +
6850 +           ELAN3_QUEUE_REMOVE (ctxt->SwapDmaQ);
6851 +
6852 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6853 +           RestartDmaDesc (ctxt, &DmaDesc);
6854 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6855 +       }
6856 +       return (EAGAIN);
6857 +    }
6858 +    
6859 +    if (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ))
6860 +    {
6861 +       while (! ELAN3_QUEUE_EMPTY (ctxt->SwapThreadQ))
6862 +       {
6863 +           E3_Addr StackPointer = *ELAN3_QUEUE_FRONT (ctxt->SwapThreadQ, ctxt->SwapThreads);
6864 +
6865 +           ELAN3_QUEUE_REMOVE (ctxt->SwapThreadQ);
6866 +
6867 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6868 +           ReissueStackPointer (ctxt, StackPointer);
6869 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6870 +       }
6871 +       return (EAGAIN);
6872 +    }
6873 +    
6874 +    switch (ctxt->OthersState)
6875 +    {
6876 +    case CTXT_OTHERS_SWAPPING:
6877 +       if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6878 +           ctxt->OthersState = CTXT_OTHERS_RUNNING;
6879 +       else
6880 +           ctxt->OthersState = CTXT_OTHERS_SWAPPED;
6881 +
6882 +       PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping -> %s\n", OthersStateStrings[ctxt->OthersState]);
6883 +           
6884 +       break;
6885 +
6886 +    case CTXT_OTHERS_SWAPPING_MORE:
6887 +       ctxt->OthersState = CTXT_OTHERS_HALTING_MORE;
6888 +       QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt);
6889 +
6890 +       PRINTF1 (ctxt, DBG_LWP, "HandleExceptions: OthersState : swapping_more -> %s\n", OthersStateStrings[ctxt->OthersState]);
6891 +       break;
6892 +    }
6893 +    return (ESUCCESS);
6894 +}
6895 +
6896 +int
6897 +RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags)
6898 +{
6899 +    ELAN3_DEV *dev = ctxt->Device;
6900 +    int       res;
6901 +
6902 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
6903 +
6904 +    PRINTF1 (ctxt, DBG_LWP, "RestartContext: status %x\n", ctxt->Status);
6905 +
6906 +    if (! (ctxt->Status & CTXT_OTHERS_REASONS))
6907 +    {
6908 +       if (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ) || ! ELAN3_QUEUE_EMPTY(ctxt->CommandQ))
6909 +       {
6910 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6911 +           RestartCProcTrap (ctxt);
6912 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6913 +           return (EAGAIN);
6914 +       }
6915 +
6916 +       if (ctxt->Input0Trap.State == CTXT_STATE_NEEDS_RESTART)
6917 +       {
6918 +           ctxt->Input0Trap.State = CTXT_STATE_EXECUTING;
6919 +
6920 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6921 +           res = RestartIProcTrap (ctxt, &ctxt->Input0Trap);
6922 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6923 +           
6924 +           if (res == ESUCCESS)
6925 +               ctxt->Input0Trap.State = CTXT_STATE_OK;
6926 +           else
6927 +               ctxt->Input0Trap.State = CTXT_STATE_NEEDS_RESTART;
6928 +           return (EAGAIN);
6929 +       }
6930 +
6931 +       if (ctxt->Input1Trap.State == CTXT_STATE_NEEDS_RESTART)
6932 +       {
6933 +           ctxt->Input1Trap.State = CTXT_STATE_EXECUTING;
6934 +
6935 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6936 +           res = RestartIProcTrap (ctxt, &ctxt->Input1Trap);
6937 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6938 +
6939 +           if (res == ESUCCESS)
6940 +               ctxt->Input1Trap.State = CTXT_STATE_OK;
6941 +           else
6942 +               ctxt->Input1Trap.State = CTXT_STATE_NEEDS_RESTART;
6943 +           return (EAGAIN);
6944 +       }
6945 +
6946 +       if (SetEventsNeedRestart (ctxt))
6947 +       {
6948 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6949 +           RestartSetEvents (ctxt);
6950 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6951 +           return (EAGAIN);
6952 +       }
6953 +
6954 +       SetInputterStateForContext (ctxt, 0, NULL);
6955 +
6956 +       if (TProcNeedsRestart (ctxt))
6957 +       {
6958 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6959 +
6960 +           LoadCommandPortTranslation (ctxt);
6961 +           RestartTProcItems (ctxt);
6962 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6963 +           return (EAGAIN);
6964 +       }
6965 +
6966 +       if (DProcNeedsRestart (ctxt))
6967 +       {
6968 +           spin_unlock_irqrestore (&dev->IntrLock, *flags);
6969 +           RestartDProcItems (ctxt);
6970 +           spin_lock_irqsave (&dev->IntrLock, *flags);
6971 +           return (EAGAIN);
6972 +       }
6973 +
6974 +       if (ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ))
6975 +       {
6976 +           PRINTF1 (ctxt, DBG_LWP, "RestartContext: setting Command Flag at %p to 0\n", &ctxt->FlagPage->CommandFlag);
6977 +
6978 +           ctxt->FlagPage->CommandFlag = 0;
6979 +
6980 +           if (ctxt->Status & CTXT_WAITING_COMMAND)
6981 +           {
6982 +               PRINTF0 (ctxt, DBG_LWP, "RestartContext: waking up threads waiting for commandport\n");
6983 +               
6984 +               ctxt->Status &= ~CTXT_WAITING_COMMAND;
6985 +               
6986 +               kcondvar_wakeupall (&ctxt->CommandPortWait, &dev->IntrLock);
6987 +           }
6988 +       }
6989 +    }
6990 +
6991 +    return (ESUCCESS);
6992 +}
6993 +
6994 +static void
6995 +HaltSwapContext (ELAN3_DEV *dev, void *arg)
6996 +{
6997 +    ELAN3_CTXT        *ctxt    = (ELAN3_CTXT *) arg;
6998 +    int                      SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT);
6999 +    E3_ThreadQueue_BE thread;
7000 +    E3_DMA_BE         dma;
7001 +    sdramaddr_t       FPtr, BPtr;
7002 +    sdramaddr_t              Base, Top;
7003 +    u_int           *runCount;
7004 +    unsigned long     flags;
7005 +
7006 +    spin_lock_irqsave (&dev->IntrLock, flags);
7007 +
7008 +    ASSERT (ctxt->OthersState == CTXT_OTHERS_HALTING || ctxt->OthersState == CTXT_OTHERS_HALTING_MORE);
7009 +
7010 +    PRINTF2 (ctxt, DBG_SWAP, "HaltSwapContext: status %x state %s\n", ctxt->Status, OthersStateStrings[ctxt->OthersState]);
7011 +
7012 +    if (! (ctxt->Status & CTXT_OTHERS_REASONS))
7013 +    {
7014 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE)
7015 +       {
7016 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
7017 +
7018 +           if (--(*runCount) == 0)
7019 +               SetSchedStatusRegister (dev, 0, NULL);
7020 +       }
7021 +       ctxt->OthersState = CTXT_OTHERS_RUNNING;
7022 +       
7023 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: no more reason to swap -> others_running\n");
7024 +
7025 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
7026 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
7027 +       return;
7028 +    }
7029 +
7030 +    /*
7031 +     * Capture all other processors since we're not being responsive to 
7032 +     * the command processor interrupt.
7033 +     */
7034 +    CAPTURE_CPUS();
7035 +
7036 +    if (SysCntx)
7037 +    {
7038 +       FPtr = read_reg32 (dev, TProc_SysCntx_FPtr);
7039 +       BPtr = read_reg32 (dev, TProc_SysCntx_BPtr);
7040 +       Base = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]);
7041 +       Top  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[E3_SysCntxQueueSize-1]);
7042 +    }
7043 +    else
7044 +    {
7045 +       FPtr  = read_reg32 (dev, TProc_NonSysCntx_FPtr);
7046 +       BPtr  = read_reg32 (dev, TProc_NonSysCntx_BPtr);
7047 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0]);
7048 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[E3_NonSysCntxQueueSize-1]);
7049 +    }
7050 +
7051 +    while (FPtr != BPtr)
7052 +    {
7053 +       elan3_sdram_copyq_from_sdram (dev, FPtr, (void *) &thread, sizeof (E3_ThreadQueue_BE));
7054 +       
7055 +       if (thread.s.Context == ctxt->Capability.cap_mycontext)
7056 +       {
7057 +           if (ELAN3_QUEUE_FULL (ctxt->SwapThreadQ))
7058 +               break;
7059 +           
7060 +           *ELAN3_QUEUE_BACK(ctxt->SwapThreadQ, ctxt->SwapThreads) = thread.s.Thread;
7061 +           ELAN3_QUEUE_ADD (ctxt->SwapThreadQ);
7062 +           
7063 +           /*
7064 +            * Remove this entry from the queue by replacing it with 
7065 +            * the "magic" thread value.
7066 +            *
7067 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
7068 +            * to mark the approriate run queue as empty.
7069 +            */
7070 +           thread.s.Context = SysCntx ? SYS_CONTEXT_BIT : 0;
7071 +           thread.s.Thread  = VanishingStackPointer;
7072 +
7073 +           elan3_sdram_copyq_to_sdram (dev, (void *) &thread, FPtr, sizeof (E3_ThreadQueue_BE));
7074 +       }
7075 +       
7076 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_ThreadQueue);
7077 +    }
7078 +
7079 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
7080 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
7081 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
7082 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
7083 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
7084 +
7085 +    if (SysCntx)
7086 +    {
7087 +       FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
7088 +       BPtr  = read_reg32 (dev, DProc_SysCntx_BPtr);
7089 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
7090 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
7091 +    }
7092 +    else
7093 +    {
7094 +       FPtr  = read_reg32 (dev, DProc_NonSysCntx_FPtr);
7095 +       BPtr  = read_reg32 (dev, DProc_NonSysCntx_BPtr);
7096 +       Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]);
7097 +       Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]);
7098 +    }
7099 +
7100 +    while (FPtr != BPtr)
7101 +    {
7102 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
7103 +           
7104 +       if (dma.s.dma_u.s.Context == ctxt->Capability.cap_mycontext)
7105 +       {
7106 +           if (ELAN3_QUEUE_FULL (ctxt->SwapDmaQ))
7107 +               break;
7108 +           
7109 +           *ELAN3_QUEUE_BACK (ctxt->SwapDmaQ, ctxt->SwapDmas) = dma;
7110 +           ELAN3_QUEUE_ADD (ctxt->SwapDmaQ);
7111 +
7112 +           /*
7113 +            * Remove the DMA from the queue by replacing it with one with
7114 +            * zero size and no events.
7115 +            *
7116 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
7117 +            * to mark the approriate run queue as empty.
7118 +            */
7119 +           dma.s.dma_type            = ((SysCntx ? SYS_CONTEXT_BIT : 0) << 16);
7120 +           dma.s.dma_size            = 0;
7121 +           dma.s.dma_source          = (E3_Addr) 0;
7122 +           dma.s.dma_dest            = (E3_Addr) 0;
7123 +           dma.s.dma_destCookieVProc = (E3_Addr) 0;
7124 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
7125 +           dma.s.dma_srcCookieVProc  = (E3_Addr) 0;
7126 +
7127 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
7128 +       }
7129 +
7130 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
7131 +    }
7132 +
7133 +    /*
7134 +     * Release the other processors now before signalling the LWP.
7135 +     */
7136 +    RELEASE_CPUS();
7137 +
7138 +    if (! ELAN3_QUEUE_FULL (ctxt->SwapDmaQ) && !ELAN3_QUEUE_FULL (ctxt->SwapThreadQ))
7139 +    {
7140 +       /*
7141 +        * We've compleletly emptied the elan queues of items in this
7142 +        * context, so we now mark it as fully swapped out.
7143 +        */
7144 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING_MORE)
7145 +       {
7146 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
7147 +           
7148 +           if (--(*runCount) == 0)
7149 +               SetSchedStatusRegister (dev, 0, NULL);
7150 +           
7151 +       }
7152 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues emptied -> others_swapping\n");
7153 +
7154 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING;
7155 +       kcondvar_wakeupall (&ctxt->Wait, &dev->IntrLock);
7156 +    }
7157 +    else
7158 +    {
7159 +       if (ctxt->OthersState == CTXT_OTHERS_HALTING)
7160 +       {
7161 +           runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
7162 +           
7163 +           if ((*runCount)++ == 0)
7164 +               SetSchedStatusRegister (dev, 0, NULL);
7165 +       }
7166 +       PRINTF0 (ctxt, DBG_SWAP, "HaltSwapContext: queues not emptied -> others_swapping_more\n");
7167 +
7168 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE;
7169 +       kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
7170 +    }
7171 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7172 +}
7173 +
7174 +void
7175 +UnloadCommandPageMapping (ELAN3_CTXT *ctxt)
7176 +{
7177 +    /*
7178 +     * Unload the Elan translations,  and flag the main processor to stall after 
7179 +     * issueing its next command.
7180 +     */
7181 +    if (ctxt->CommandPageMapping != NULL && (ctxt->Status & CTXT_COMMAND_MAPPED_ELAN))
7182 +    {
7183 +       ELAN3MMU_RGN *rgn = elan3mmu_rgnat_main (ctxt->Elan3mmu, ctxt->CommandPageMapping);
7184 +       
7185 +       if (rgn != NULL)
7186 +       {
7187 +           E3_Addr eaddr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase);
7188 +           
7189 +           PRINTF1 (ctxt, DBG_INTR, "UnloadCommandPageMapping: unmapping command port at addr %08x\n", eaddr);
7190 +           
7191 +           elan3mmu_unload (ctxt->Elan3mmu, eaddr, PAGESIZE, PTE_UNLOAD);
7192 +       }
7193 +       
7194 +       ctxt->Status &= ~CTXT_COMMAND_MAPPED_ELAN;
7195 +    }
7196 +}
7197 +
7198 +void
7199 +StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp)
7200 +{
7201 +    ELAN3_DEV   *dev     = ctxt->Device;
7202 +    int                SysCntx = (ctxt->Capability.cap_mycontext & SYS_CONTEXT_BIT);
7203 +    u_int      *runCount;
7204 +
7205 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
7206 +
7207 +    PRINTF2 (ctxt, DBG_SWAP, "StartSwapoutContext: Status %x OthersState %s\n",
7208 +            ctxt->Status, OthersStateStrings [ctxt->OthersState]);
7209 +    /*
7210 +     * Disable the inputters,  we should already have a reason for it.
7211 +     */
7212 +    SetInputterStateForContext (ctxt, Pend, Maskp);
7213 +
7214 +    UnloadCommandPageMapping (ctxt);
7215 +
7216 +    /* 
7217 +     * Flag main processor to stall after issueing next command
7218 +     */
7219 +    PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag);
7220 +
7221 +    ctxt->FlagPage->CommandFlag = 1;
7222 +
7223 +    PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState);
7224 +
7225 +    /*
7226 +     * And queue a haltop to stop the queues and clear it out.
7227 +     */
7228 +    switch (ctxt->OthersState)
7229 +    {
7230 +    case CTXT_OTHERS_RUNNING:
7231 +       PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_halting\n");
7232 +
7233 +       ctxt->OthersState = CTXT_OTHERS_HALTING;
7234 +
7235 +       QueueHaltOperation (dev, Pend, Maskp, INT_DProcHalted | INT_TProcHalted, HaltSwapContext, ctxt);
7236 +       break;
7237 +       
7238 +    case CTXT_OTHERS_SWAPPING:
7239 +       PRINTF0 (ctxt, DBG_SWAP, "StartSwapoutContext: -> others_swapping_more\n");
7240 +       ctxt->OthersState = CTXT_OTHERS_SWAPPING_MORE;
7241 +
7242 +       runCount = SysCntx ? &dev->HaltAllCount : &dev->HaltNonContext0Count;
7243 +           
7244 +       if ((*runCount)++ == 0)
7245 +           SetSchedStatusRegister (dev, Pend, Maskp);
7246 +       break;
7247 +    default:
7248 +       PRINTF1 (ctxt, DBG_SWAP, "StartSwapoutContext: OthersState=%d\n", ctxt->OthersState);
7249 +       break;
7250 +    }
7251 +}
7252 +
7253 +#if defined(DIGITAL_UNIX)
7254 +/* temporary tweaks to priority bump */
7255 +int lwp_do_prio = 1;
7256 +int lwp_do_nxm = 1;
7257 +int lwp_prio = BASEPRI_USER-1;
7258 +#elif defined(LINUX)
7259 +/* This is the default nice level for the helper LWP */
7260 +int LwpNice = -1;
7261 +#endif
7262 +
7263 +int
7264 +elan3_lwp (ELAN3_CTXT *ctxt)
7265 +{
7266 +    ELAN3_DEV     *dev = ctxt->Device;
7267 +    int                  res;
7268 +    unsigned long flags;
7269 +
7270 +    PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: started, context 0x%x\n", ctxt->Capability.cap_mycontext);
7271 +
7272 +#if defined(DIGITAL_UNIX)
7273 +    {
7274 +        thread_t mythread = current_thread();
7275 +        if (lwp_do_prio && (lwp_do_nxm || !IS_NXM_TASK(mythread->task)))
7276 +        {
7277 +            mythread->priority = mythread->sched_pri = lwp_prio;
7278 +            mythread->max_priority = BASEPRI_HIGHEST;
7279 +            (void) thread_priority(mythread, lwp_prio, 0, 1);
7280 +        }
7281 +    }
7282 +#elif defined(LINUX)
7283 +    {
7284 +       /* Do the priority trick for the helper LWP so that it
7285 +        * runs in preferance to the user threads which may be
7286 +        * burning CPU waiting for a trap to be fixed up
7287 +        */
7288 +#ifdef NO_O1_SCHED
7289 +       if (LwpNice >= -20 && LwpNice < 20)
7290 +           current->nice = LwpNice;
7291 +#else
7292 +       set_user_nice(current, LwpNice);
7293 +#endif
7294 +    }
7295 +#endif
7296 +
7297 +    elan3_swapin (ctxt, CTXT_NO_LWPS);
7298 +
7299 +    spin_lock_irqsave (&dev->IntrLock, flags);
7300 +
7301 +    /* If we're swapped out, and not detached (or exiting) then wait until we're swapped back in */
7302 +    /* since otherwise we could "spin" forever continually calling elan3_lwp() */
7303 +    if ((ctxt->Status & CTXT_SWAPPED_REASONS) && ! (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)))
7304 +       kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags);
7305 +
7306 +    for (;;)
7307 +    {
7308 +#if defined(DIGITAL_UNIX)
7309 +        if (thread_should_halt(current_thread()) || 
7310 +            CURSIG_CHECK(task_to_proc(current_thread()->task), u.np_uthread))
7311 +       {
7312 +           PRINTF1 (ctxt, DBG_LWP, "elan3_lwp: exiting on %s\n", 
7313 +                    thread_should_halt(current_thread()) ? "halt" : "signal");
7314 +            break;
7315 +       }
7316 +#endif
7317 +
7318 +       if (ctxt->Status & CTXT_SWAPPED_REASONS)
7319 +       {
7320 +           PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting on swapped reasons\n");
7321 +           break;
7322 +       }
7323 +
7324 +       if (! (ctxt->inhibit))
7325 +       {
7326 +           if (FixupNetworkErrors (ctxt, &flags) == ESUCCESS &&
7327 +               HandleExceptions (ctxt, &flags) == ESUCCESS &&
7328 +               RestartContext (ctxt, &flags) == ESUCCESS)
7329 +               {
7330 +                   if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0)
7331 +                   {
7332 +                       PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n");
7333 +                       break;
7334 +                   }
7335 +               }
7336 +       }
7337 +       else
7338 +       {
7339 +           printk("elan3_lwp :: skipping as inhibited\n");
7340 +           if (kcondvar_waitsig (&ctxt->Wait, &dev->IntrLock, &flags) == 0)
7341 +           {
7342 +               PRINTF0 (ctxt, DBG_LWP, "elan3_lwp: exiting by kcondvar_wait_sig()\n");
7343 +               break;
7344 +           }
7345 +       }
7346 +
7347 +    }
7348 +
7349 +    /* Return EINVAL to elan3_syscall_lwp() when we want it to exit */
7350 +    res = (ctxt->Status & (CTXT_DETACHED|CTXT_EXITING)) ? EINVAL : 0;
7351 +
7352 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7353 +    
7354 +    elan3_swapout (ctxt, CTXT_NO_LWPS);
7355 +
7356 +    spin_lock_irqsave (&dev->IntrLock, flags);
7357 +    FixupNetworkErrors (ctxt, &flags);
7358 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7359 +
7360 +    return (res);
7361 +}
7362 +
7363 +void
7364 +SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp)
7365 +{
7366 +    ELAN3_DEV  *dev          = NULL;
7367 +    int        new_disabled = 0;
7368 +    int               ctxnum;
7369 +
7370 +    ASSERT (ctxt != NULL);
7371 +    dev  = ctxt->Device;
7372 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
7373 +
7374 +    new_disabled = (ctxt->Input0Trap.State != CTXT_STATE_OK ||
7375 +                   ctxt->Input1Trap.State != CTXT_STATE_OK ||
7376 +                   (ctxt->Status & CTXT_INPUTTER_REASONS) != 0);
7377 +    
7378 +
7379 +    ctxnum   = ctxt->Capability.cap_mycontext;
7380 +
7381 +#ifndef __lock_lint  
7382 +    PRINTF2 (ctxt , DBG_IPROC, "SetInputterState: ctxnum %x %s attached\n", ctxnum, ctxt->Disabled ? "disabled " : "");
7383 +#endif /* __lock_lint */
7384 +        
7385 +    if (ctxt->Disabled != new_disabled)
7386 +    {
7387 +       PRINTF2 (ctxt, DBG_IPROC, "SetInputterState: ctxnum %x change %s\n", ctxnum, new_disabled ? "enabled to disabled" : "disabled to enabled");
7388 +       
7389 +       ctxt->Disabled = new_disabled;
7390 +
7391 +       /* synchronize the context filter for this context */
7392 +       elan3mmu_set_context_filter (dev, ctxnum, new_disabled, Pend, Maskp);
7393 +    }
7394 +}
7395 +
7396 +int
7397 +CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags)
7398 +{
7399 +    ELAN3_DEV *dev    = ctxt->Device;
7400 +    int       delay  = 1;
7401 +    int i, SeenComQueueEmpty;
7402 +
7403 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
7404 +    ASSERT (cflags != DmaComQueueNotEmpty || dev->HaltDmaDequeueCount != 0);
7405 +
7406 +    /*
7407 +     * Flush the command processor queues and poll the queue to see it it empties.
7408 +     */
7409 +    if (dev->FlushCommandCount++ == 0)
7410 +       SetSchedStatusRegister (dev, 0, NULL);
7411 +
7412 +    /* 
7413 +     * Ensure previous writes have been flushed through the write buffers
7414 +     */
7415 +    wmb(); mmiob();
7416 +
7417 +    /*
7418 +     * If the command processor traps,  or it's taking too long to observe
7419 +     * the queue as emtpy,  then we need to force the interrupt handler to 
7420 +     * run for us.  So queue a halt operation for the dma processor.
7421 +     */
7422 +    SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags);
7423 +    for (i = 20; i > 0 || (how & ISSUE_COMMAND_CANT_WAIT); i--)
7424 +    {
7425 +       if (SeenComQueueEmpty || (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue)))
7426 +           break;
7427 +       
7428 +       mb();
7429 +       DELAY (delay);
7430 +
7431 +       if ((delay <<= 1) == 0) delay = 1;
7432 +
7433 +       SeenComQueueEmpty = !(read_reg32 (dev, ComQueueStatus) & cflags);
7434 +    }
7435 +
7436 +    if (--dev->FlushCommandCount == 0)
7437 +       SetSchedStatusRegister (dev, 0, NULL);
7438 +
7439 +    /*
7440 +     * If we've seen the command queue that we're interested in with nothing in it
7441 +     * and the command processor has not trapped then the commands we've
7442 +     * issued have been successfully processed.
7443 +     */
7444 +    if (SeenComQueueEmpty && ! (read_reg32 (dev, Exts.InterruptReg) & (INT_CProc | INT_ComQueue)))
7445 +    {
7446 +       PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: observed dma queue empty and command proc not trapped\n");
7447 +
7448 +       if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0)
7449 +           SetSchedStatusRegister (dev, 0, NULL);
7450 +
7451 +       return (ISSUE_COMMAND_OK);
7452 +    }
7453 +
7454 +    if ((how & ISSUE_COMMAND_CANT_WAIT) != 0)
7455 +       return (ISSUE_COMMAND_WAIT);
7456 +    
7457 +    /*
7458 +     * Halt the dma processor and wait for it to halt,  if the command we've issued has
7459 +     * trapped then the interrupt handler will have moved it to the context structure.
7460 +     */
7461 +    PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for dproc to halt\n");
7462 +    QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, WakeupLwp, ctxt);
7463 +    while (! ctxt->Halted)
7464 +    {
7465 +       PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: waiting for Halted - %d\n", ctxt->Halted);
7466 +
7467 +       kcondvar_wait (&ctxt->HaltWait, &dev->IntrLock, flags);
7468 +
7469 +       PRINTF1 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: woken for Halted - %d\n", ctxt->Halted);
7470 +    }
7471 +    ctxt->Halted = 0;
7472 +    
7473 +    PRINTF0 (ctxt, DBG_CMD, "CheckCommandQueueFlushed: dproc halted, checking for trap\n");
7474 +    
7475 +    if (cflags == DmaComQueueNotEmpty && --dev->HaltDmaDequeueCount == 0)
7476 +       SetSchedStatusRegister (dev, 0, NULL);
7477 +
7478 +    return (ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ) ? ISSUE_COMMAND_OK : ISSUE_COMMAND_TRAPPED);
7479 +}
7480 +
7481 +int
7482 +WaitForCommandPort (ELAN3_CTXT *ctxt)
7483 +{
7484 +    ELAN3_DEV     *dev = ctxt->Device;
7485 +    int                  res;
7486 +    unsigned long flags;
7487 +
7488 +    spin_lock_irqsave (&dev->IntrLock, flags);
7489 +
7490 +    if (ctxt->Status & CTXT_DETACHED)
7491 +       res = EINVAL;
7492 +    else 
7493 +    {
7494 +       if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
7495 +       {
7496 +           ctxt->Status |= CTXT_WAITING_COMMAND;
7497 +           if (CTXT_IS_KERNEL(ctxt))
7498 +               kcondvar_wait (&ctxt->CommandPortWait, &dev->IntrLock, &flags);
7499 +           else 
7500 +               kcondvar_waitsig (&ctxt->CommandPortWait, &dev->IntrLock, &flags);
7501 +       }
7502 +       
7503 +       res = (!ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS)) ? EAGAIN : 0;
7504 +    }
7505 +       
7506 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7507 +
7508 +    return (res);
7509 +}
7510 +
7511 +static char *
7512 +CommandName (int offset)
7513 +{
7514 +    switch (offset)
7515 +    {
7516 +    case offsetof (E3_CommandPort, PutDma):    return ("PutDma");
7517 +    case offsetof (E3_CommandPort, GetDma):    return ("GetDma");
7518 +    case offsetof (E3_CommandPort, RunThread): return ("RunThread");
7519 +    case offsetof (E3_CommandPort, WaitEvent0):        return ("WaitEvent0");
7520 +    case offsetof (E3_CommandPort, WaitEvent1):        return ("WaitEvent1");
7521 +    case offsetof (E3_CommandPort, SetEvent):  return ("SetEvent");
7522 +    default:                                   return ("Bad Command");
7523 +    }
7524 +}
7525 +
7526 +int
7527 +IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int cflags)
7528 +{
7529 +    ELAN3_DEV     *dev = ctxt->Device;
7530 +    int                  res;
7531 +    unsigned long flags;
7532 +
7533 +    spin_lock_irqsave (&dev->IntrLock, flags);
7534 +
7535 +    if ((! (cflags & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS))
7536 +    {
7537 +       /*
7538 +        * Cannot issue commands for non-cproc traps if command port is trapped, 
7539 +        * nor if the dma/thread trap queues are full, or we're swapping out
7540 +        */
7541 +       PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_RETRY\n",
7542 +                CommandName (cmdoff), value);
7543 +
7544 +       res = ISSUE_COMMAND_RETRY;
7545 +    }
7546 +    else
7547 +    {
7548 +       PRINTF2 (ctxt, DBG_CMD, "IssueCommand: %s %08x -> ISSUE_COMMAND_OK\n",
7549 +                CommandName (cmdoff), value);
7550 +
7551 +       mb();                                                   /* ensure writes to main memory completed */
7552 +       writel (value, (void *)(ctxt->CommandPort + cmdoff));           /* issue command */
7553 +       mmiob();                                                /* and flush through IO writes */
7554 +
7555 +       res = ISSUE_COMMAND_OK;
7556 +    }
7557 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7558 +    
7559 +    return (res);
7560 +}
7561 +
7562 +int
7563 +IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int how)
7564 +{
7565 +    ELAN3_DEV     *dev    = ctxt->Device;
7566 +    int                  res;
7567 +    unsigned long flags;
7568 +
7569 +    /*
7570 +     * Since we may be issuing a command that could trap, and we're interested in
7571 +     * the outcome, the command port trap resolving code must be locked out.
7572 +     */
7573 +    kmutex_lock (&ctxt->CmdLock);
7574 +    spin_lock_irqsave (&dev->IntrLock, flags);
7575 +
7576 +    if ((! (how & ISSUE_COMMAND_FOR_CPROC) && !ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ)) || (ctxt->Status & CTXT_OTHERS_REASONS))
7577 +    {
7578 +       PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_RETRY\n", value, item);
7579 +
7580 +       /*
7581 +        * Cannot issue commands for non-cproc traps if command port is trapped, 
7582 +        * nor if the dma/thread trap queues are full, or we're swapping out
7583 +        */
7584 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
7585 +       kmutex_unlock (&ctxt->CmdLock);
7586 +       return (ISSUE_COMMAND_RETRY);
7587 +    }
7588 +    
7589 +    ASSERT (item == NULL || ctxt->CommandPortItem == NULL);
7590 +
7591 +    /*
7592 +     * Stop the DMA processor from removing entries from the 
7593 +     * command port, and force the command processor to do this.
7594 +     * This means that if a trap occurs then it will be the command
7595 +     * processor that traps.
7596 +     */
7597 +    if (dev->HaltDmaDequeueCount++ == 0)
7598 +       SetSchedStatusRegister (dev, 0, NULL);
7599 +
7600 +    PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p]\n", value, item);
7601 +
7602 +    /*
7603 +     * Always issue the DMA to the 'write' command,  since we've asserted HaltDmaDequeue
7604 +     * the command processor will read the descriptor and transfer it to the run queue. 
7605 +     * The command processor looks at the dma_direction field to determine whether it is
7606 +     * a read or a write and whether to alter the dma_souce of the descriptr on the run 
7607 +     * queue
7608 +     */
7609 +    mb();                                                      /* ensure writes to main memory ccompleted */
7610 +    writel (value, (void *) (ctxt->CommandPort + offsetof (E3_CommandPort, PutDma)));
7611 +    mmiob();                                                   /* and flush through IO writes */
7612 +    
7613 +    res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags);
7614 +
7615 +    if (res == ISSUE_COMMAND_TRAPPED)
7616 +    {
7617 +       PRINTF2 (ctxt, DBG_CMD, "IssueDmaCommand: PutDma %08x [%p] -> ISSUE_COMMAND_TRAPPED\n", value, item);
7618 +       /*
7619 +        * Remember the item we're issueing so that if the command port traps the item will not
7620 +        * get freed off until the descriptor has been read after the command trap has been fixed
7621 +        * up.
7622 +        */
7623 +       if (item != NULL)
7624 +           ctxt->CommandPortItem = item;
7625 +    }
7626 +
7627 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7628 +    kmutex_unlock (&ctxt->CmdLock);
7629 +
7630 +    return (res);
7631 +}
7632 +
7633 +int
7634 +WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int how)
7635 +{
7636 +    ELAN3_DEV     *dev = ctxt->Device;
7637 +    int           res;
7638 +    unsigned long flags;
7639 +
7640 +    spin_lock_irqsave (&dev->IntrLock, flags);
7641 +
7642 +    res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, how, &flags);
7643 +
7644 +    if (res == ISSUE_COMMAND_TRAPPED && item != NULL)
7645 +       ctxt->CommandPortItem = item;
7646 +
7647 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
7648 +    
7649 +    return (res);
7650 +}
7651 +
7652 +void
7653 +FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, E3_FaultSave_BE *FaultSaveArea, int flags)
7654 +{
7655 +    ASSERT (! CTXT_IS_KERNEL (ctxt));
7656 +
7657 +    /*
7658 +     * This code re-issues the part of the set event that trapped.
7659 +     */
7660 +    switch (TrapType)
7661 +    {
7662 +    case MI_ChainedEventError:
7663 +       ElanException (ctxt, EXCEPTION_CHAINED_EVENT, proc, trap, FaultSaveArea->s.EventAddress);
7664 +       break;
7665 +       
7666 +
7667 +    case MI_SetEventReadWait:
7668 +       /*
7669 +        * Fault occured on the read for the event location. Just re-issue
7670 +        * setevent using EventAddress in E3_FaultSave
7671 +        */
7672 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_SetEventReadWait: re-issuing setevent %08x\n", 
7673 +                FaultSaveArea->s.EventAddress);
7674 +       
7675 +       ReissueEvent (ctxt, (E3_Addr) FaultSaveArea->s.EventAddress, flags);
7676 +       break;
7677 +
7678 +    case MI_DoSetEvent:
7679 +    {
7680 +       /*
7681 +        * Fault occured because the block write of a block copy event trapped.
7682 +        * Must grab the event type, source and dest then simulate the block copy and then
7683 +        * perform the set. Once the block copy is started the event location cannot be read
7684 +        * again.
7685 +        */
7686 +       E3_Event *EventPtr  = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7687 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7688 +       
7689 +       /*
7690 +        * Check that the event has the block copy bit
7691 +        * set in it,  since we couldn't trap here if it
7692 +        * didn't
7693 +        */
7694 +       if ((EventType & EV_TYPE_BCOPY) != EV_TYPE_BCOPY)
7695 +       {
7696 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: Unexpected type=%x\n", EventType);
7697 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7698 +           break;
7699 +       }
7700 +       
7701 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_DoSetEvent: RunEventType %x\n", EventType);
7702 +
7703 +       if (RunEventType (ctxt, FaultSaveArea, EventType))
7704 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7705 +
7706 +       break;
7707 +    }
7708 +    
7709 +    case MI_ThreadUpdateNonSysCntxBack:
7710 +    case MI_ThreadUpdateSysCntxBack:
7711 +    {
7712 +       /*
7713 +        * Fault occured because the block write of a block copy event trapped.
7714 +        * Must grab the event type, source and dest then simulate the block copy and then
7715 +        * run the thread. Once the block copy is started the event location cannot be read
7716 +        * again.
7717 +        */
7718 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7719 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7720 +
7721 +       /*
7722 +        * Check for the correct EventPtr type
7723 +        */
7724 +       if ((EventType & (EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_THREAD))
7725 +       {
7726 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: Unexpected type=%x for setevent trap. Should be thread\n", EventType);
7727 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7728 +           break;
7729 +       }
7730 +       
7731 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_ThreadUpdateCntx0Back: RunEventType %x\n", EventType);
7732 +       if (RunEventType (ctxt, FaultSaveArea, EventType))
7733 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7734 +       break;
7735 +    }
7736 +    
7737 +    case MI_EventIntUpdateBPtr:
7738 +    {
7739 +       /*
7740 +        * Fault occured because the block write of a block copy event trapped.
7741 +        * Must grab the event type, source and dest then simulate the block copy and then
7742 +        * run the dma. Once the block copy is started the event location cannot be read
7743 +        * again.
7744 +        */
7745 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7746 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7747 +
7748 +       /*
7749 +        * Check for the correct EventPtr type
7750 +        */
7751 +       if ((EventType & (EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_EVIRQ))
7752 +       {
7753 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: Unexpected type=%x\n", EventType);
7754 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7755 +           break;
7756 +       }
7757 +
7758 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_EventIntUpdateBPtr: RunEventType %x\n", EventType);
7759 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
7760 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7761 +       break;
7762 +    }
7763 +    
7764 +    case MI_RunDmaDesc:
7765 +    {
7766 +       /*
7767 +        * Fault occured because the block write of a block copy event trapped.
7768 +        * Must grab the event type, source and dest then simulate the block copy and then
7769 +        * run the dma. Once the block copy is started the event location cannot be read
7770 +        * again.
7771 +        */
7772 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7773 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7774 +
7775 +       /*
7776 +        * Check for the correct EventPtr type
7777 +        */
7778 +       if ((EventType & (EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY)) != (EV_TYPE_BCOPY | EV_TYPE_DMA))
7779 +       {
7780 +           PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: Unexpected type=%x\n", EventType);
7781 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7782 +           break;
7783 +       }
7784 +
7785 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_RunDmaDesc: RunEventType %x\n", EventType);
7786 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
7787 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7788 +       break;
7789 +    }
7790 +    
7791 +    case MI_WaitForCntxDmaDescRead:
7792 +    case MI_WaitForNonCntxDmaDescRead:
7793 +       /*
7794 +        * Fault occured on the read of the dma descriptor. Run dma using the
7795 +        * Fault Address in FaultSave.
7796 +        */
7797 +       PRINTF1 (ctxt, DBG_EVENT, "FixupEventTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", FaultSaveArea->s.FaultAddress);
7798 +       
7799 +       RestartDmaPtr (ctxt, FaultSaveArea->s.FaultAddress);
7800 +       break;
7801 +    
7802 +    case MI_FinishedSetEvent:
7803 +       /*
7804 +        * Fault occured because the block write of a block copy event trapped.
7805 +        * Simulate the block copy.
7806 +        */
7807 +       if (SimulateBlockCopy (ctxt, FaultSaveArea->s.EventAddress))
7808 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7809 +       break;
7810 +       
7811 +    case MI_BlockCopyEvent:
7812 +    case MI_BlockCopyWaitForReadData:
7813 +    {
7814 +       /*
7815 +        * Fault occured on the read or write of the data for a block copy
7816 +        * event. Simulate the block copy using EventAddress in E3_FaultSave. Must also sample
7817 +        * the event type and then perform a run.
7818 +        */
7819 +       E3_Event *EventPtr = (E3_Event *) elan3mmu_mainaddr (ctxt->Elan3mmu, FaultSaveArea->s.EventAddress);
7820 +       E3_uint32 EventType = fuword (&EventPtr->ev_Type);
7821 +
7822 +       PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: MI_BlockCopyWaitForReadData: BCopy read fault in BCopy event. Simulating BCopy.\n");
7823 +       
7824 +       if (RunEventType(ctxt, FaultSaveArea, EventType))
7825 +           ElanException (ctxt, EXCEPTION_BAD_EVENT, proc, trap, FaultSaveArea, TrapType);
7826 +       break;
7827 +    }
7828 +    
7829 +    case MI_EventQueueOverflow:
7830 +    case MI_ThreadQueueOverflow:
7831 +    case MI_DmaQueueOverflow:
7832 +       /* XXXX: should handle queue overflow */
7833 +       PRINTF0 (ctxt, DBG_EVENT, "FixupEventTrap: Queue overflow\n");
7834 +
7835 +       ElanException (ctxt, EXCEPTION_QUEUE_OVERFLOW, proc, trap, FaultSaveArea, TrapType);
7836 +       break;
7837 +
7838 +    default:
7839 +       ElanException (ctxt, EXCEPTION_BUS_ERROR, proc, trap, FaultSaveArea, TrapType);
7840 +       break;
7841 +    }
7842 +}
7843 +
7844 +int
7845 +SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress)
7846 +{
7847 +    E3_Addr  SourcePtrElan;
7848 +    E3_Addr  DestPtrElan;
7849 +    unsigned DataType;
7850 +    int      i;
7851 +
7852 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
7853 +    {
7854 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
7855 +
7856 +       ElanException (ctxt, EXCEPTION_FAULTED, EVENT_PROC, NULL, EventAddress);
7857 +       return (TRUE);
7858 +    }
7859 +
7860 +    SourcePtrElan = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Source));
7861 +    DestPtrElan   = ELAN3_OP_LOAD32 (ctxt, EventAddress + offsetof (E3_BlockCopyEvent, ev_Dest));
7862 +    DataType      = DestPtrElan & EV_BCOPY_DTYPE_MASK;
7863 +    DestPtrElan  &= ~EV_BCOPY_DTYPE_MASK;
7864 +
7865 +
7866 +    PRINTF3 (ctxt, DBG_EVENT, "SimulateBlockCopy: Event %08x SourcePtr %08x DestPtr %08x\n",
7867 +            EventAddress, SourcePtrElan, DestPtrElan);
7868 +
7869 +    if (SourcePtrElan & EV_WCOPY)
7870 +       ELAN3_OP_STORE32 (ctxt, DestPtrElan, SourcePtrElan);
7871 +    else
7872 +    {
7873 +       /*
7874 +        * NOTE: since the block copy could be to sdram, we issue the writes backwards,
7875 +        *       except we MUST ensure that the last item in the block is written last.
7876 +        */
7877 +#if defined(__LITTLE_ENDIAN__)
7878 +       /*
7879 +        * For little endian cpu's we don't need to worry about the data type.
7880 +        */
7881 +       for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
7882 +           ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7883 +
7884 +       i = E3_BLK_SIZE - sizeof (E3_uint64);
7885 +       ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7886 +#else
7887 +       switch (DataType)
7888 +       {
7889 +       case EV_TYPE_BCOPY_BYTE:
7890 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8))
7891 +               ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i));
7892 +           
7893 +           i = E3_BLK_SIZE - sizeof (E3_uint8);
7894 +           ELAN3_OP_STORE8 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD8 (ctxt, SourcePtrElan + i));
7895 +           break;
7896 +
7897 +       case EV_TYPE_BCOPY_HWORD: 
7898 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16))
7899 +               ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i));
7900 +           
7901 +           i = E3_BLK_SIZE - sizeof (E3_uint16);
7902 +           ELAN3_OP_STORE16 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD16 (ctxt, SourcePtrElan + i));
7903 +           break;
7904 +           
7905 +       case EV_TYPE_BCOPY_WORD:  
7906 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32))
7907 +               ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i));
7908 +           
7909 +           i = E3_BLK_SIZE - sizeof (E3_uint32);
7910 +           ELAN3_OP_STORE32 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD32 (ctxt, SourcePtrElan + i));
7911 +           break;
7912 +           
7913 +       case EV_TYPE_BCOPY_DWORD: 
7914 +           for (i = E3_BLK_SIZE-(2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
7915 +               ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7916 +           
7917 +           i = E3_BLK_SIZE - sizeof (E3_uint64);
7918 +           ELAN3_OP_STORE64 (ctxt, DestPtrElan + i, ELAN3_OP_LOAD64 (ctxt, SourcePtrElan + i));
7919 +           break;
7920 +       }
7921 +#endif
7922 +    }
7923 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
7924 +
7925 +    return (FALSE);
7926 +}
7927 +
7928 +void
7929 +ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr, int flags)
7930 +{
7931 +    PRINTF1 (ctxt, DBG_CMD, "ReissueEvent : Event=%08x\n", addr);
7932 +
7933 +    if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), addr, flags) == ISSUE_COMMAND_RETRY)
7934 +    {
7935 +       PRINTF1 (ctxt, DBG_CMD, "ReissueEvent: queue event %08x\n", addr);
7936 +
7937 +       kmutex_lock (&ctxt->SwapListsLock);
7938 +       ctxt->ItemCount[LIST_SETEVENT]++;
7939 +       ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_SETEVENT, addr);
7940 +       kmutex_unlock (&ctxt->SwapListsLock);
7941 +    }
7942 +}
7943 +
7944 +int
7945 +SetEventsNeedRestart (ELAN3_CTXT *ctxt)
7946 +{
7947 +    return (ctxt->ItemCount[LIST_SETEVENT] != 0);
7948 +}
7949 +
7950 +void
7951 +RestartSetEvents (ELAN3_CTXT *ctxt)
7952 +{
7953 +    void     *item;
7954 +    E3_uint32 EventPointer;
7955 +
7956 +    kmutex_lock (&ctxt->SwapListsLock);
7957 +    
7958 +    while (ctxt->ItemCount[LIST_SETEVENT])
7959 +    {
7960 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_SETEVENT, &item, &EventPointer))
7961 +           ctxt->ItemCount[LIST_SETEVENT] = 0;
7962 +       else
7963 +       {
7964 +           if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), EventPointer, FALSE) == ISSUE_COMMAND_RETRY)
7965 +           {
7966 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_SETEVENT, item);
7967 +               kmutex_unlock (&ctxt->SwapListsLock);
7968 +               return;
7969 +           }
7970 +           
7971 +           ctxt->ItemCount[LIST_SETEVENT]--;
7972 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
7973 +       }
7974 +    }
7975 +    kmutex_unlock (&ctxt->SwapListsLock);
7976 +}
7977 +
7978 +int
7979 +RunEventType(ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType)
7980 +{
7981 +    int failed = FALSE;
7982 +
7983 +    if ((EventType & EV_TYPE_BCOPY) != 0)
7984 +       failed = SimulateBlockCopy(ctxt, FaultSaveArea->s.EventAddress);
7985 +    
7986 +    if ((EventType & EV_TYPE_MASK) == EV_TYPE_THREAD)
7987 +       ReissueStackPointer (ctxt, EventType & ~(EV_TYPE_MASK_THREAD|EV_TYPE_MASK_BCOPY));
7988 +    else if ((EventType & EV_TYPE_MASK) == EV_TYPE_DMA)
7989 +       RestartDmaPtr (ctxt, EventType & ~(EV_TYPE_MASK_DMA|EV_TYPE_MASK_BCOPY));
7990 +    else if ((EventType & EV_TYPE_EVIRQ) != 0)
7991 +       QueueEventInterrupt (ctxt, EventType & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY));
7992 +    else /* Chained event */
7993 +    {
7994 +       if ((EventType & ~EV_TYPE_BCOPY) != 0) /* not null setevent */
7995 +           ReissueEvent (ctxt, EventType & ~(EV_TYPE_MASK_CHAIN|EV_TYPE_MASK_BCOPY), FALSE);
7996 +    }
7997 +
7998 +    return (failed);
7999 +}
8000 +
8001 +void
8002 +WakeupLwp (ELAN3_DEV *dev, void *arg)
8003 +{
8004 +    ELAN3_CTXT    *ctxt = (ELAN3_CTXT *) arg;
8005 +    unsigned long flags;
8006 +
8007 +    PRINTF1 (ctxt, DBG_INTR, "WakeupLwp: %d\n", SPINLOCK_HELD (&dev->IntrLock));
8008 +
8009 +    spin_lock_irqsave (&dev->IntrLock, flags);
8010 +    ctxt->Halted = 1;
8011 +    kcondvar_wakeupone (&ctxt->HaltWait, &dev->IntrLock);
8012 +
8013 +    PRINTF0 (ctxt, DBG_INTR, "WakeupLwp: woken up context\n");
8014 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
8015 +}
8016 +
8017 +void
8018 +QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie)
8019 +{
8020 +    ELAN3_DEV     *dev = ctxt->Device;
8021 +    unsigned long flags;
8022 +
8023 +    PRINTF1 (ctxt, DBG_EVENT, "QueueEventInterrupt: cookie %08x\n", cookie);
8024 +
8025 +    if (ELAN3_OP_EVENT (ctxt, cookie, OP_INTR) == OP_DEFER)
8026 +    {
8027 +       spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
8028 +
8029 +       if (ELAN3_QUEUE_REALLY_FULL (ctxt->EventCookieQ))
8030 +       {
8031 +           ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8032 +           StartSwapoutContext (ctxt, 0, NULL);
8033 +       }
8034 +       else
8035 +       {
8036 +           *(ELAN3_QUEUE_BACK (ctxt->EventCookieQ, ctxt->EventCookies)) = cookie;
8037 +           
8038 +           ELAN3_QUEUE_ADD (ctxt->EventCookieQ);
8039 +           kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
8040 +           if (ELAN3_QUEUE_FULL (ctxt->EventCookieQ))
8041 +           {
8042 +               ctxt->Status |= CTXT_EVENT_QUEUE_FULL;
8043 +               StartSwapoutContext (ctxt, 0, NULL);
8044 +           }
8045 +       }
8046 +       spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
8047 +    }
8048 +}
8049 +
8050 +int
8051 +ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...)
8052 +{
8053 +    int     res;
8054 +    va_list ap;
8055 +
8056 +    va_start (ap, trap);
8057 +
8058 +    PRINTF2 (ctxt, DBG_FN, "ElanException: proc %d type %d\n", proc, type);
8059 +
8060 +    res = ELAN3_OP_EXCEPTION (ctxt, type, proc, trap, ap);
8061 +
8062 +    va_end (ap);
8063 +    
8064 +    return (res);
8065 +}
8066 +
8067 +
8068 +/*
8069 + * Local variables:
8070 + * c-file-style: "stroustrup"
8071 + * End:
8072 + */
8073 diff -urN clean/drivers/net/qsnet/elan3/context_linux.c linux-2.6.9/drivers/net/qsnet/elan3/context_linux.c
8074 --- clean/drivers/net/qsnet/elan3/context_linux.c       1969-12-31 19:00:00.000000000 -0500
8075 +++ linux-2.6.9/drivers/net/qsnet/elan3/context_linux.c 2004-10-28 07:51:00.000000000 -0400
8076 @@ -0,0 +1,229 @@
8077 +/*
8078 + *    Copyright (c) 2003 by Quadrics Limited.
8079 + * 
8080 + *    For licensing information please see the supplied COPYING file
8081 + *
8082 + */
8083 +
8084 +#ident "@(#)$Id: context_linux.c,v 1.32 2004/10/28 11:51:00 david Exp $"
8085 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/context_linux.c,v $*/
8086 +
8087 +#include <qsnet/kernel.h>
8088 +#include <qsnet/kpte.h>
8089 +
8090 +#include <elan3/elanregs.h>
8091 +#include <elan3/elandev.h>
8092 +#include <elan3/elanvp.h>
8093 +#include <elan3/elan3mmu.h>
8094 +#include <elan3/elanctxt.h>
8095 +#include <elan3/elandebug.h>
8096 +#include <elan3/urom_addrs.h>
8097 +#include <elan3/thread.h>
8098 +
8099 +int
8100 +LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr addr, int len, int protFault, int writeable)
8101 +{
8102 +    ELAN3MMU           *elan3mmu = ctxt->Elan3mmu;
8103 +    ELAN3MMU_RGN       *rgn;
8104 +    caddr_t            mainAddr;
8105 +    int                        perm;
8106 +    unsigned int        off;
8107 +    unsigned long       flags;
8108 +
8109 +    ASSERT (PAGE_ALIGNED (addr) && PAGE_ALIGNED (len));
8110 +
8111 +    PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr %08x len %08x%s%s\n", 
8112 +        addr, len, protFault ? " prot fault" : "", writeable ? " writeable" : "");
8113 +
8114 +    /* Ensure there's enough elan mmu tables for us to use */
8115 +    elan3mmu_expand (elan3mmu, addr, len, PTBL_LEVEL_3, 0);
8116 +
8117 +    while (len > 0) 
8118 +    {
8119 +       /*
8120 +        * Retrieve permission region and calculate main address
8121 +        */
8122 +       spin_lock (&elan3mmu->elan3mmu_lock);
8123 +
8124 +       rgn = elan3mmu_rgnat_elan (elan3mmu, addr);
8125 +       if (rgn == NULL) {
8126 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: no permission region at %lx %p\n", 
8127 +               (u_long) addr, rgn);
8128 +           spin_unlock (&elan3mmu->elan3mmu_lock);
8129 +           return (EFAULT);
8130 +       }
8131 +       mainAddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
8132 +
8133 +       ASSERT (PAGE_ALIGNED ((unsigned long)mainAddr));
8134 +
8135 +       spin_unlock (&elan3mmu->elan3mmu_lock);
8136 +
8137 +       /*
8138 +        * If we're tying to load a translation to the elan command port, 
8139 +        * then don't do it now, but mark the context to have it reloaded
8140 +        * just before we restart any threads. We do this because we don't
8141 +        * want to call into the segment driver since we could then block
8142 +        * waiting for the command port to become available.
8143 +        */
8144 +       if (mainAddr == ctxt->CommandPageMapping)
8145 +       {
8146 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: addr=%08x maps command port\n", addr);
8147 +
8148 +           spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
8149 +           UnloadCommandPageMapping (ctxt);
8150 +           spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
8151 +       }
8152 +       else 
8153 +       {
8154 +           struct vm_area_struct *area;
8155 +           struct mm_struct *mm = current->mm;
8156 +           pte_t *ptep_ptr;
8157 +           pte_t  ptep_value;
8158 +
8159 +           down_read (&current->mm->mmap_sem);
8160 +
8161 +           if ((area = find_vma_intersection(mm, (unsigned long)mainAddr, (unsigned long)mainAddr + PAGESIZE)) == NULL)
8162 +           {
8163 +               PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p no vma\n", mainAddr);
8164 +               up_read (&current->mm->mmap_sem);
8165 +               return EFAULT;
8166 +           }
8167 +
8168 +           if (writeable && !(area->vm_flags & VM_WRITE)) 
8169 +           {
8170 +               PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p not writeable\n", mainAddr);
8171 +               up_read (&current->mm->mmap_sem);
8172 +               return EFAULT;
8173 +           }
8174 +           
8175 +           spin_lock (&mm->page_table_lock);
8176 +
8177 +           /* dont deference the pointer after the unmap */
8178 +           ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr);  
8179 +           if (ptep_ptr) {
8180 +               ptep_value = *ptep_ptr;
8181 +               pte_unmap(ptep_ptr);
8182 +           }
8183 +
8184 +           PRINTF (ctxt, DBG_FAULT, "LoadElanTranslation: %p %s %s\n", 
8185 +                   mainAddr, writeable ? "writeable" : "readonly", 
8186 +                   !ptep_ptr ? "invalid" : pte_none(ptep_value) ? "none " : !pte_present(ptep_value) ? "swapped " : 
8187 +                   writeable && !pte_write(ptep_value) ? "COW" : "OK");
8188 +           
8189 +           if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) 
8190 +           {  
8191 +               spin_unlock (&mm->page_table_lock);
8192 +
8193 +               get_user_pages (current, current->mm, (unsigned long) mainAddr, PAGE_SIZE, 
8194 +                               (area->vm_flags & VM_WRITE), 0, NULL, NULL);
8195 +
8196 +               spin_lock (&mm->page_table_lock);
8197 +
8198 +               /* dont deference the pointer after the unmap */
8199 +               ptep_ptr = find_pte_map (mm, (unsigned long)mainAddr);  
8200 +               if (ptep_ptr) {
8201 +                   ptep_value = *ptep_ptr;
8202 +                   pte_unmap(ptep_ptr);
8203 +               }
8204 +
8205 +               if (!ptep_ptr || pte_none(ptep_value) || !pte_present(ptep_value) || (writeable && !pte_write(ptep_value))) 
8206 +               {
8207 +                   spin_unlock (&mm->page_table_lock);
8208 +                   up_read (&current->mm->mmap_sem);
8209 +                   return EFAULT;
8210 +               }
8211 +           } 
8212 +
8213 +           /* don't allow user write access to kernel pages if not kernel */
8214 +           if (!pte_read(ptep_value))
8215 +           {
8216 +               spin_unlock (&mm->page_table_lock);
8217 +               up_read (&current->mm->mmap_sem);
8218 +               return EFAULT;
8219 +           }
8220 +
8221 +           if (writeable)
8222 +               pte_mkdirty(ptep_value);
8223 +           pte_mkyoung (ptep_value);
8224 +
8225 +           /* now load the elan pte */
8226 +           if (writeable)
8227 +               perm  = rgn->rgn_perm;
8228 +           else
8229 +               perm = ELAN3_PERM_READONLY(rgn->rgn_perm & ELAN3_PTE_PERM_MASK) | (rgn->rgn_perm & ~ELAN3_PTE_PERM_MASK);
8230 +
8231 +           for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE)
8232 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, pte_phys(ptep_value) + off, perm, PTE_LOAD | PTE_NO_SLEEP);
8233 +
8234 +           spin_unlock (&mm->page_table_lock);
8235 +           up_read (&current->mm->mmap_sem);
8236 +       }
8237 +
8238 +       len -= PAGESIZE;
8239 +       addr += PAGESIZE;
8240 +    }
8241 +    return (ESUCCESS);
8242 +}
8243 +
8244 +
8245 +/*
8246 + * LoadCommandPortTranslation:
8247 + *    explicitly load an elan translation to the command port.
8248 + *    but only do it if the command port is accessible.
8249 + *
8250 + *    we call this function just after we have restarted
8251 + *    and trapped commands,  since when a command traps
8252 + *    the elan translation to the command port is unloaded.
8253 + */
8254 +void
8255 +LoadCommandPortTranslation (ELAN3_CTXT *ctxt)
8256 +{
8257 +    ELAN3MMU     *elan3mmu = ctxt->Elan3mmu;
8258 +    ELAN3MMU_RGN *rgn;
8259 +    E3_Addr       addr;
8260 +    int                  perm;
8261 +    physaddr_t    phys;
8262 +    unsigned int  off;
8263 +    unsigned long flags;
8264 +
8265 +    PRINTF (ctxt, DBG_FAULT, "LoadCommandPortTranslation: SegAddr=%p Status=%x\n", ctxt->CommandPageMapping, ctxt->Status);
8266 +
8267 +    if (ctxt->CommandPageMapping != NULL  && !(ctxt->Status & CTXT_COMMAND_MAPPED_ELAN))
8268 +    {
8269 +       spin_lock (&elan3mmu->elan3mmu_lock);
8270 +       
8271 +       rgn = elan3mmu_rgnat_main (elan3mmu, ctxt->CommandPageMapping);
8272 +       if (rgn == (ELAN3MMU_RGN *) NULL) 
8273 +       {
8274 +           PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: no permission for command port\n");
8275 +           spin_unlock (&elan3mmu->elan3mmu_lock);
8276 +           return;
8277 +       }
8278 +       
8279 +       addr = rgn->rgn_ebase + (ctxt->CommandPageMapping - rgn->rgn_mbase);
8280 +       perm = rgn->rgn_perm;
8281 +       phys = kmem_to_phys((caddr_t) ctxt->CommandPage);
8282 +
8283 +       spin_lock_irqsave (&ctxt->Device->IntrLock, flags);
8284 +       if (ELAN3_QUEUE_EMPTY(ctxt->CommandTrapQ) && !(ctxt->Status & CTXT_OTHERS_REASONS))
8285 +       {
8286 +           PRINTF(ctxt, DBG_FAULT, "LoadCommandPortTranslation: load xlation addr=%08x phys=%llx perm=%d\n", 
8287 +                  addr, (unsigned long long)phys, perm);
8288 +
8289 +           ctxt->Status |= CTXT_COMMAND_MAPPED_ELAN;
8290 +
8291 +           for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
8292 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, addr + off, phys + off, perm, PTE_LOAD | PTE_NO_SLEEP);
8293 +       }
8294 +       spin_unlock_irqrestore (&ctxt->Device->IntrLock, flags);
8295 +       
8296 +       spin_unlock (&elan3mmu->elan3mmu_lock);
8297 +    }
8298 +}
8299 +
8300 +
8301 +/*
8302 + * Local variables:
8303 + * c-file-style: "stroustrup"
8304 + * End:
8305 + */
8306 diff -urN clean/drivers/net/qsnet/elan3/cproc.c linux-2.6.9/drivers/net/qsnet/elan3/cproc.c
8307 --- clean/drivers/net/qsnet/elan3/cproc.c       1969-12-31 19:00:00.000000000 -0500
8308 +++ linux-2.6.9/drivers/net/qsnet/elan3/cproc.c 2004-02-10 10:05:10.000000000 -0500
8309 @@ -0,0 +1,539 @@
8310 +/*
8311 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
8312 + * 
8313 + *    For licensing information please see the supplied COPYING file
8314 + *
8315 + */
8316 +
8317 +#ident "@(#)$Id: cproc.c,v 1.46 2004/02/10 15:05:10 david Exp $"
8318 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/cproc.c,v $ */
8319 +
8320 +
8321 +#include <qsnet/kernel.h>
8322 +
8323 +#include <elan3/elanregs.h>
8324 +#include <elan3/elandev.h>
8325 +#include <elan3/elanvp.h>
8326 +#include <elan3/elan3mmu.h>
8327 +#include <elan3/elanctxt.h>
8328 +#include <elan3/elandebug.h>
8329 +#include <elan3/urom_addrs.h>
8330 +#include <elan3/vmseg.h>
8331 +
8332 +void
8333 +HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Maskp)
8334 +{
8335 +    E3_FaultSave_BE     FaultSave;
8336 +    CProcTrapBuf_BE    TrapBuf;
8337 +    COMMAND_TRAP       *trap;
8338 +    ELAN3_CTXT        *ctxt;
8339 +    sdramaddr_t         CurrTrap;
8340 +    sdramaddr_t         LastTrapAddr;
8341 +    int                NTrapEntries;
8342 +    int                        NewPend;
8343 +    unsigned long       flags;
8344 +
8345 +    /* 
8346 +     * Temporarily mask out the command processor interrupt, since
8347 +     * we may cause it be re-asserted when we re-issue the commands
8348 +     * from the overflow queue area.
8349 +     */
8350 +    DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
8351 +
8352 +    NewPend = read_reg32 (dev, Exts.InterruptReg);
8353 +
8354 +    do {
8355 +       if (NewPend & INT_ComQueue)
8356 +       {
8357 +           if ((read_reg32 (dev, ComQueueStatus) & ComQueueError) != 0)
8358 +           {
8359 +               printk ("elan%d: InterruptReg=%x ComQueueStatus=%x\n", dev->Instance,
8360 +                       read_reg32 (dev, Exts.InterruptReg), read_reg32 (dev, ComQueueStatus));
8361 +               panic ("elan: command queue has overflowed !!");
8362 +               /* NOTREACHED */
8363 +           }
8364 +
8365 +           BumpStat (dev, ComQueueHalfFull);
8366 +
8367 +           /*
8368 +            * Capture the other cpus and stop the threads processor then
8369 +            * allow the command processor to eagerly flush the command queue.
8370 +            */
8371 +           dev->FlushCommandCount++; dev->HaltThreadCount++;
8372 +           SetSchedStatusRegister (dev, Pend, Maskp);
8373 +
8374 +           CAPTURE_CPUS();
8375 +
8376 +           while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
8377 +               mb();
8378 +           
8379 +           /*
8380 +            * Let the threads processor run again, and release the cross call.
8381 +            */
8382 +           RELEASE_CPUS();
8383 +
8384 +           dev->FlushCommandCount--; dev->HaltThreadCount--;
8385 +           SetSchedStatusRegister (dev, Pend, Maskp);
8386 +
8387 +           /*
8388 +            * Re-sample the interrupt register to see if the command processor
8389 +            * has trapped while flushing the queue.  Preserve the INT_ComQueue
8390 +            * bit, so we can clear the ComQueueStatus register later.
8391 +            */
8392 +           NewPend = (read_reg32 (dev, Exts.InterruptReg) | INT_ComQueue);
8393 +       }
8394 +       
8395 +       CurrTrap = dev->CommandPortTraps[dev->CurrentCommandPortTrap];
8396 +       
8397 +       if (NewPend & INT_CProc)
8398 +       {
8399 +           BumpStat (dev, CProcTraps);
8400 +
8401 +           /*
8402 +            * Copy the MMU Fault Save area and zero it out for future traps.
8403 +            */
8404 +           elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), &FaultSave, sizeof (E3_FaultSave));
8405 +           elan3_sdram_zeroq_sdram      (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), sizeof (E3_FaultSave));
8406 +
8407 +           /*
8408 +            * First entry in the cproc trap save area is the value of Areg and Breg for the
8409 +            * uWord before the address fault.
8410 +            */
8411 +           TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf.Align64);
8412 +
8413 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.r.Breg >> 16));
8414 +           if (ctxt == NULL)
8415 +           {
8416 +               PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context invalid [%08x.%08x]\n", TrapBuf.r.Areg, TrapBuf.r.Breg);
8417 +               BumpStat (dev, InvalidContext);
8418 +           }
8419 +           else
8420 +           {
8421 +               if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ))
8422 +               {
8423 +                   if ((ctxt->Status & CTXT_COMMAND_OVERFLOW_ERROR) == 0)
8424 +                   {
8425 +                       ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8426 +                       StartSwapoutContext (ctxt, Pend, Maskp);
8427 +                   }
8428 +               }
8429 +               else
8430 +               {
8431 +                   trap = ELAN3_QUEUE_BACK (ctxt->CommandTrapQ, ctxt->CommandTraps);
8432 +                   
8433 +                   trap->FaultSave     = FaultSave;
8434 +                   trap->Status.Status = read_reg32 (dev, Exts.CProcStatus.Status);
8435 +                   trap->TrapBuf       = TrapBuf;
8436 +                   
8437 +                   /*
8438 +                    * The command processor does not stop after it has trapped. It will continue
8439 +                    * to save commands for other contexts into the commands port save area.
8440 +                    * The valid context for the trap is held in FaultSave. As some of this
8441 +                    * trap code uses the context in the status register the local copy must be
8442 +                    * updated with the trap context.
8443 +                    */
8444 +                   trap->Status.s.Context = (TrapBuf.r.Breg >> 16);
8445 +                   
8446 +                   PRINTF4 (ctxt, DBG_INTR, "HandleCProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
8447 +                            trap->Status.s.WakeupFunction, trap->Status.s.Context,
8448 +                            trap->Status.s.SuspendAddr, MiToName(trap->Status.s.TrapType));
8449 +                   PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: Areg=%08x Breg=%08x\n", 
8450 +                            trap->TrapBuf.r.Areg, trap->TrapBuf.r.Breg);
8451 +                   
8452 +                   if (ELAN3_OP_CPROC_TRAP (ctxt, trap) == OP_DEFER)
8453 +                   {
8454 +                       ELAN3_QUEUE_ADD (ctxt->CommandTrapQ);
8455 +                       
8456 +                       PRINTF1 (ctxt, DBG_INTR, "HandleCProcTrap: setting Command Flag at %p to 1\n", &ctxt->FlagPage->CommandFlag);
8457 +                       
8458 +                       ctxt->FlagPage->CommandFlag = 1;
8459 +                       
8460 +                       kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
8461 +                   }
8462 +               }
8463 +
8464 +               UnloadCommandPageMapping (ctxt);
8465 +           }
8466 +       }
8467 +       
8468 +       /*
8469 +        * Now change the CommandPortTrap queue.
8470 +        * Must stop the command processor, wait for it to stop, find the final
8471 +        * entry in the current cproc trap save area, reset the comm port
8472 +        * trap save address to the other queue, clear the command port interrupt and
8473 +        * set it running normally again, and then let it go again. This is not very
8474 +        * time critical but it would be a good idea to prevent a higher priority
8475 +        * interrupt from slowing down the process to prevent to fifos filling.
8476 +        */
8477 +       spin_lock_irqsave (&dev->CProcLock, flags);
8478 +
8479 +       SET_SCHED_STATUS (dev, CProcStop);
8480 +
8481 +       while ((read_reg32 (dev, Exts.SchCntReg) & CProcStopped) == 0)
8482 +       {
8483 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for command processor to stop\n");
8484 +           mb();
8485 +       }
8486 +       
8487 +       /*
8488 +        * Remember how many entries are in the saved command queue,  and 
8489 +        * re-initialise it, before restarting the command processor.
8490 +        */
8491 +       NTrapEntries = (read_reg32 (dev, CProc_TrapSave_Addr) - dev->CommandPortTraps[dev->CurrentCommandPortTrap])/sizeof (E3_uint64);
8492 +       LastTrapAddr = dev->CommandPortTraps[dev->CurrentCommandPortTrap] + NTrapEntries*sizeof (TrapBuf);
8493 +
8494 +       dev->CurrentCommandPortTrap ^= 1;
8495 +       write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]);
8496 +
8497 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: command trap queue has %d entries\n", NTrapEntries);
8498 +
8499 +       if (NTrapEntries > ELAN3_COMMAND_TRAP_SIZE/sizeof (E3_uint64))
8500 +           panic ("HandleCProcTrap: command trap queue has overflowed\n");
8501 +       
8502 +       if (NewPend & INT_CProc)
8503 +       {
8504 +           /*
8505 +            * Clear the CProc interrupt and set it running normally again. Nothing should
8506 +            * be running now that could issue commands apart from this trap handler.
8507 +            */
8508 +           PULSE_SCHED_STATUS (dev, RestartCProc);
8509 +       }
8510 +       
8511 +       if (NewPend & INT_ComQueue)
8512 +       {
8513 +           /*
8514 +            * Write any value here to clear out the half full and error bits of the command
8515 +            * overflow queues. This will also remove the overflow interrupt.
8516 +            */
8517 +           write_reg32 (dev, ComQueueStatus, 0);
8518 +       }
8519 +       
8520 +       /*
8521 +        * And let the command processor start again
8522 +        */
8523 +       CLEAR_SCHED_STATUS (dev, CProcStop);
8524 +       
8525 +       /*
8526 +        * Now re-issue all the commands that were issued after the command port trapped.
8527 +        * Should halt the dma processor and force command sto be put onto the run queues
8528 +        * to ensure that a remote re-issued command is handled correctly. NOTE it is
8529 +        * not necessary to wait for the dma processor to stop and this will reduce the
8530 +        * performance impact. As CProcHalt is asserted all commands will be flushed
8531 +        * to the queues.
8532 +        */
8533 +       dev->HaltDmaDequeueCount++; dev->FlushCommandCount++;
8534 +       SetSchedStatusRegister (dev, Pend, Maskp);
8535 +       
8536 +       /*
8537 +        * XXXX: should we do a capture/release if the trap overflow
8538 +        *       area has a "large" number of commands in it,  since
8539 +        *       we will just stuff them all back in, together with 
8540 +        *       all those issued by the other cpus/thread processors.
8541 +        */
8542 +       while (CurrTrap != LastTrapAddr)
8543 +       {
8544 +           /* Read the next saved (but not trapped) command */
8545 +           TrapBuf.Align64 = elan3_sdram_readq (dev, CurrTrap); CurrTrap += sizeof (TrapBuf);
8546 +           
8547 +
8548 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, (TrapBuf.s.ContextType >> 16));
8549 +           
8550 +           if (ctxt == NULL)
8551 +           {
8552 +               PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: context %x invalid\n", TrapBuf.s.ContextType >> 16);
8553 +               BumpStat (dev, InvalidContext);
8554 +           }
8555 +           else
8556 +           {
8557 +               if (!ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
8558 +               {
8559 +                   PRINTF3 (ctxt, DBG_INTR, "HandleCProcTrap: save command %x context %x - %08x\n",
8560 +                            (TrapBuf.s.ContextType>>3) & 0x3ff, TrapBuf.s.ContextType >> 17, TrapBuf.s.Addr);
8561 +                   
8562 +                   if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandQ))
8563 +                   {
8564 +                       ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8565 +                       StartSwapoutContext (ctxt, Pend, Maskp);
8566 +                   }
8567 +                   else
8568 +                   {
8569 +                       *ELAN3_QUEUE_BACK(ctxt->CommandQ, ctxt->Commands) = TrapBuf;
8570 +
8571 +                       ELAN3_QUEUE_ADD (ctxt->CommandQ);
8572 +                   }
8573 +                   continue;
8574 +               }
8575 +               
8576 +               /* Reissue the command to the command port for this context */
8577 +               PRINTF2 (ctxt, DBG_INTR, "HandleCProcTrap: re-issue command %x - %08x\n",
8578 +                        (TrapBuf.s.ContextType>>5) & 0xff, TrapBuf.s.Addr);
8579 +               
8580 +               mb();
8581 +               if (ELAN3_OP_CPROC_REISSUE(ctxt, &TrapBuf) != OP_HANDLED)
8582 +                   ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf.s.ContextType>>5) & 0xff] = TrapBuf.s.Addr;
8583 +               mmiob();
8584 +           }
8585 +       }
8586 +       
8587 +       while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
8588 +       {
8589 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "HandleCProcTrap: waiting for queues to empty after reissueing commands\n");
8590 +           mb();
8591 +       }
8592 +       
8593 +       dev->HaltDmaDequeueCount--; dev->FlushCommandCount--;
8594 +       SetSchedStatusRegister (dev, Pend, Maskp);
8595 +       
8596 +       spin_unlock_irqrestore (&dev->CProcLock, flags);
8597 +
8598 +       /*
8599 +        * Re-read the interrupt register and see if we've got another command
8600 +        * port interrupt
8601 +        */
8602 +       NewPend = read_reg32 (dev, Exts.InterruptReg);
8603 +    } while ((NewPend & (INT_CProc | INT_ComQueue)) != 0);
8604 +
8605 +
8606 +    /*
8607 +     * Re-enable the command processor interrupt as we've finished 
8608 +     * polling it.
8609 +     */
8610 +    ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
8611 +}
8612 +
8613 +void
8614 +ResolveCProcTrap (ELAN3_CTXT *ctxt)
8615 +{
8616 +    ELAN3_DEV     *dev = ctxt->Device;
8617 +    COMMAND_TRAP *trap;
8618 +    int                  res;
8619 +    unsigned long flags;
8620 +
8621 +    kmutex_lock (&ctxt->CmdLock);
8622 +    spin_lock_irqsave (&dev->IntrLock, flags);
8623 +
8624 +    while (! ELAN3_QUEUE_BACK_EMPTY (ctxt->CommandTrapQ))
8625 +    {
8626 +       trap = ELAN3_QUEUE_MIDDLE(ctxt->CommandTrapQ, ctxt->CommandTraps);
8627 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
8628 +
8629 +       switch (trap->Status.s.TrapType)
8630 +       {
8631 +       case MI_EventIntUpdateBPtr:
8632 +       case MI_ChainedEventError:
8633 +       case MI_EventQueueOverflow:
8634 +       case MI_ThreadQueueOverflow:
8635 +       case MI_DmaQueueOverflow:
8636 +           PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: %s\n", MiToName (trap->Status.s.TrapType));
8637 +           break;
8638 +           
8639 +       default:
8640 +           /* All other traps are MMU related, we should have a fault address and FSR */
8641 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
8642 +           {
8643 +               PRINTF1 (ctxt, DBG_CPROC, "ResolveCProcTrap: elan3_pagefault failed for address %08x\n", 
8644 +                        trap->FaultSave.s.FaultAddress);
8645 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, COMMAND_PROC, trap, &trap->FaultSave, res);
8646 +               
8647 +               /* Set the trap type to 0 so the command does not get re-issued */
8648 +               trap->Status.s.TrapType = 0;
8649 +           }
8650 +           break;
8651 +       }
8652 +       
8653 +       spin_lock_irqsave (&dev->IntrLock, flags);
8654 +
8655 +       ELAN3_QUEUE_CONSUME (ctxt->CommandTrapQ);
8656 +    }
8657 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
8658 +    kmutex_unlock (&ctxt->CmdLock);
8659 +}
8660 +
8661 +int
8662 +RestartCProcTrap (ELAN3_CTXT *ctxt)
8663 +{
8664 +    ELAN3_DEV     *dev      = ctxt->Device;
8665 +    COMMAND_TRAP  trap;
8666 +    void        *item;
8667 +    int                  res;
8668 +    unsigned long flags;
8669 +
8670 +    spin_lock_irqsave (&dev->IntrLock, flags);
8671 +
8672 +    while (! ELAN3_QUEUE_FRONT_EMPTY (ctxt->CommandTrapQ))
8673 +    {
8674 +       trap = (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps));
8675 +       ELAN3_QUEUE_REMOVE (ctxt->CommandTrapQ);
8676 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
8677 +       
8678 +       BumpUserStat (ctxt, CProcTraps);
8679 +
8680 +       switch (trap.Status.s.TrapType)
8681 +       {
8682 +       case 0:
8683 +           res = ISSUE_COMMAND_OK;
8684 +           break;
8685 +           
8686 +       case MI_WaitForWaitEventDesc:
8687 +           /*
8688 +            * Fault occured on the read of wait event descriptor for wait event type 0.
8689 +            * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr
8690 +            * is in the Areg save value.
8691 +            */
8692 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 desc read fault %08x\n", 
8693 +                    trap.TrapBuf.r.Areg);
8694 +           
8695 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
8696 +           break;
8697 +
8698 +       case MI_WaitForEventReadTy0:
8699 +           /*
8700 +            * Fault occured on the read of event location for wait event type 0.
8701 +            * Fault already fixed. Just re-issue the wait command. Wait event descriptor addr
8702 +            * is in the Areg save value.
8703 +            */
8704 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type0 event loc fault %08x\n",
8705 +                    trap.TrapBuf.r.Areg);
8706 +           
8707 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent0), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
8708 +           break;
8709 +           
8710 +       case MI_WaitForEventReadTy1:
8711 +           /*
8712 +            * Fault occured on the read of the event location for wait event type 1.
8713 +            * Areg has the original ptr and count.
8714 +            * Fault already fixed. Just re-issue the wait command using Areg and context.
8715 +            */
8716 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: WaitEvent type1 event location read fault %08x\n",
8717 +                    trap.TrapBuf.r.Areg);
8718 +           res = IssueCommand (ctxt, offsetof (E3_CommandPort, WaitEvent1), trap.TrapBuf.r.Areg, ISSUE_COMMAND_FOR_CPROC);
8719 +           break;
8720 +           
8721 +       case MI_WaitForCntxDmaDescRead:
8722 +       case MI_WaitForNonCntxDmaDescRead:
8723 +           /*
8724 +            * Fault occured on the read of the dma descriptor. Run dma using the
8725 +            * Fault Address in FaultSave.
8726 +            */
8727 +           PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: MI_WaitForCntxDmaDescRead: re-issue dma at %08x\n", 
8728 +                    trap.FaultSave.s.FaultAddress);
8729 +           
8730 +           res = IssueDmaCommand (ctxt, trap.FaultSave.s.FaultAddress, NULL, ISSUE_COMMAND_FOR_CPROC);
8731 +           break;
8732 +           
8733 +       default:
8734 +           /*
8735 +            * Assume the fault will be fixed by FixupEventTrap.
8736 +            */
8737 +           FixupEventTrap (ctxt, COMMAND_PROC, &trap, trap.Status.s.TrapType, &trap.FaultSave, ISSUE_COMMAND_FOR_CPROC);
8738 +
8739 +           res = ISSUE_COMMAND_OK;
8740 +           break;
8741 +       }
8742 +
8743 +       switch (res)
8744 +       {
8745 +       case ISSUE_COMMAND_OK:                                  /* command re-issued ok*/
8746 +           break;
8747 +
8748 +       case ISSUE_COMMAND_TRAPPED:                             /* command trapped,  it will have been copied */
8749 +           return (EAGAIN);                                    /* to the back of the trap queue */
8750 +
8751 +       case ISSUE_COMMAND_RETRY:                               /* didn't issue command, so place back at front for */
8752 +           spin_lock_irqsave (&dev->IntrLock, flags);          /* later (after resolving other traps */
8753 +
8754 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->CommandTrapQ))
8755 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8756 +           else
8757 +           {
8758 +               ELAN3_QUEUE_ADD_FRONT(ctxt->CommandTrapQ);
8759 +               (*ELAN3_QUEUE_FRONT (ctxt->CommandTrapQ, ctxt->CommandTraps)) = trap;
8760 +           }
8761 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
8762 +           return (EAGAIN);
8763 +
8764 +       default:
8765 +           return (EINVAL);
8766 +       }
8767 +       spin_lock_irqsave (&dev->IntrLock, flags);
8768 +    }  
8769 +
8770 +    /*
8771 +     * GNAT 5409 - if CommandPortItem was not NULL, but other reasons were set,
8772 +     *             then we'd not free the CommandPortItem even though we'd re-
8773 +     *             issued all trapped and overflowed commands.  Hence only return
8774 +     *             without clearing CommandPortItem if we will be called again as
8775 +     *             either CommandTrapQ or CommandQ is not empty.
8776 +     */
8777 +
8778 +    /* Now run the overflowed commands for this context */
8779 +    if (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ))
8780 +    {
8781 +       if (! ELAN3_QUEUE_EMPTY (ctxt->CommandTrapQ) || (ctxt->Status & CTXT_OTHERS_REASONS))
8782 +       {
8783 +           PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: cannot issue overflowed commands\n");
8784 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
8785 +           return (EAGAIN);
8786 +       }
8787 +
8788 +       /*
8789 +        * Just re-issue the commands,  if one traps then the remainder will 
8790 +        * just get placed in the overflow queue again and the interrupt handler
8791 +        * will copy them back in here.
8792 +        *
8793 +        * Stop the dma processor from taking commands,  since one of the commands
8794 +        * could be a re-issued remote dma, which must be processed by the command
8795 +        * processor.
8796 +        */
8797 +       
8798 +       if (dev->HaltDmaDequeueCount++ == 0)
8799 +           SetSchedStatusRegister (dev, 0, NULL);
8800 +       
8801 +       while (! ELAN3_QUEUE_EMPTY (ctxt->CommandQ))
8802 +       {
8803 +           CProcTrapBuf_BE *TrapBuf = ELAN3_QUEUE_FRONT (ctxt->CommandQ, ctxt->Commands);
8804 +           
8805 +           PRINTF2 (ctxt, DBG_CPROC, "RestartCProcTrap: re-issue command %x - %08x\n",
8806 +                    (TrapBuf->s.ContextType>>5) & 0xff, TrapBuf->s.Addr);
8807 +           mb();                                                       /* ensure writes to main memory completed */
8808 +           ((E3_uint32 *) ctxt->CommandPort)[(TrapBuf->s.ContextType>>5) & 0xff] = TrapBuf->s.Addr;
8809 +           mmiob();                                            /* and flush through IO writes */
8810 +           
8811 +           ELAN3_QUEUE_REMOVE (ctxt->CommandQ);
8812 +       }
8813 +       
8814 +       /* observe the command processor having halted */
8815 +       res = CheckCommandQueueFlushed (ctxt, DmaComQueueNotEmpty, 0, &flags);
8816 +       
8817 +       if (res != ISSUE_COMMAND_OK)
8818 +       {
8819 +           PRINTF0 (ctxt, DBG_CPROC, "RestartCProcTrap: trapped after issueing overflowed commands\n");
8820 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
8821 +           return (EAGAIN);
8822 +       }
8823 +    }
8824 +
8825 +    /* remove the command port item, while holding the lock */
8826 +    item = ctxt->CommandPortItem;
8827 +    ctxt->CommandPortItem = NULL;
8828 +
8829 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
8830 +       
8831 +    if (item != NULL)                                          /* Free of any item that may have been stored */
8832 +    {                                                          /* because of the commandport trap */
8833 +       PRINTF1 (ctxt, DBG_CPROC, "RestartCProcTrap: commandPortItem %p\n", item);
8834 +
8835 +       kmutex_lock (&ctxt->SwapListsLock);
8836 +       ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item);
8837 +       kmutex_unlock (&ctxt->SwapListsLock);
8838 +    }
8839 +
8840 +    return (ESUCCESS);
8841 +}
8842 +
8843 +
8844 +/*
8845 + * Local variables:
8846 + * c-file-style: "stroustrup"
8847 + * End:
8848 + */
8849 diff -urN clean/drivers/net/qsnet/elan3/dproc.c linux-2.6.9/drivers/net/qsnet/elan3/dproc.c
8850 --- clean/drivers/net/qsnet/elan3/dproc.c       1969-12-31 19:00:00.000000000 -0500
8851 +++ linux-2.6.9/drivers/net/qsnet/elan3/dproc.c 2003-09-24 09:57:25.000000000 -0400
8852 @@ -0,0 +1,553 @@
8853 +/*
8854 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
8855 + * 
8856 + *    For licensing information please see the supplied COPYING file
8857 + *
8858 + */
8859 +
8860 +#ident "@(#)$Id: dproc.c,v 1.52 2003/09/24 13:57:25 david Exp $"
8861 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/dproc.c,v $ */
8862 +
8863 +#include <qsnet/kernel.h>
8864 +
8865 +#include <elan3/elanregs.h>
8866 +#include <elan3/elandev.h>
8867 +#include <elan3/elanvp.h>
8868 +#include <elan3/elan3mmu.h>
8869 +#include <elan3/elanctxt.h>
8870 +#include <elan3/elandebug.h>
8871 +#include <elan3/urom_addrs.h>
8872 +#include <elan3/intrinsics.h>
8873 +#include <elan3/dma.h>
8874 +#include <elan3/vmseg.h>
8875 +
8876 +#define DMA_RETRY_FAIL_COUNT   8
8877 +
8878 +static void PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr);
8879 +
8880 +int
8881 +HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits)
8882 +{
8883 +    DMA_TRAP    *trap   = dev->DmaTrap;
8884 +
8885 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
8886 +
8887 +    /* Scoop out the trap information, before restarting the Elan */
8888 +    trap->Status.Status = read_reg32 (dev, Exts.DProcStatus.Status);
8889 +    
8890 +    ASSERT(trap->Status.s.WakeupFunction == WakeupNever);
8891 +
8892 +    /* copy the normal dma access fault type */
8893 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), &trap->FaultSave, sizeof (E3_FaultSave_BE));
8894 +    
8895 +    /* copy all 4 of the dma data fault type */
8896 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), &trap->Data0, 4*sizeof (E3_FaultSave_BE));
8897 +    
8898 +    /* Copy the DMA descriptor */
8899 +    copy_dma_regs (dev, &trap->Desc);
8900 +    
8901 +    /* Copy the packet info */
8902 +    trap->PacketInfo.Value = read_reg32 (dev, Exts.Dmas.DmaRds.DMA_PacketInfo.Value);
8903 +
8904 +    /* update device statistics */
8905 +    BumpStat (dev, DProcTraps);
8906 +    switch (trap->Status.s.TrapType)
8907 +    {
8908 +    case MI_DmaPacketTimedOutOrPacketError:
8909 +       if (trap->PacketInfo.s.PacketTimeout)
8910 +           BumpStat (dev, DmaOutputTimeouts);
8911 +       else if (trap->PacketInfo.s.PacketAckValue == C_ACK_ERROR)
8912 +           BumpStat (dev, DmaPacketAckErrors);
8913 +       break;
8914 +       
8915 +    case MI_DmaFailCountError:
8916 +       BumpStat (dev, DmaRetries);
8917 +       break;
8918 +    }
8919 +
8920 +    /* Must now zero all the FSRs so that a subsequent fault can be seen */
8921 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), sizeof (E3_FaultSave));
8922 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 4*sizeof (E3_FaultSave));
8923 +           
8924 +    *RestartBits |= RestartDProc;
8925 +    return (TRUE);
8926 +}
8927 +
8928 +void
8929 +DeliverDProcTrap (ELAN3_DEV *dev, DMA_TRAP *dmaTrap, E3_uint32 Pend)
8930 +{
8931 +    ELAN3_CTXT     *ctxt;
8932 +    E3_FaultSave_BE *FaultArea;
8933 +    DMA_TRAP       *trap;
8934 +    register int     i;
8935 +
8936 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
8937 +
8938 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, dmaTrap->Status.s.Context);
8939 +
8940 +    if (ctxt == NULL)
8941 +    {
8942 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverDProcTrap: context %x invalid\n", dmaTrap->Status.s.Context);
8943 +       BumpStat (dev, InvalidContext);
8944 +    }
8945 +    else
8946 +    {
8947 +       if (ELAN3_OP_DPROC_TRAP (ctxt, dmaTrap) == OP_DEFER)
8948 +       {
8949 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->DmaTrapQ))
8950 +           {
8951 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
8952 +               StartSwapoutContext (ctxt, Pend, NULL);
8953 +           }
8954 +           else
8955 +           {
8956 +               trap = ELAN3_QUEUE_BACK (ctxt->DmaTrapQ, ctxt->DmaTraps);
8957 +               
8958 +               bcopy (dmaTrap, trap, sizeof (DMA_TRAP));
8959 +               
8960 +               PRINTF5 (ctxt, DBG_INTR, "DeliverDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x PacketInfo=%x TrapType=%s\n",
8961 +                        trap->Status.s.WakeupFunction, trap->Status.s.Context, 
8962 +                        trap->Status.s.SuspendAddr, trap->PacketInfo.Value, MiToName (trap->Status.s.TrapType));
8963 +               PRINTF3 (ctxt, DBG_INTR, "                    FaultAddr=%x EventAddr=%x FSR=%x\n",
8964 +                        trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
8965 +                        trap->FaultSave.s.FSR.Status);
8966 +               for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
8967 +                   PRINTF4 (ctxt, DBG_INTR, "                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
8968 +                            FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
8969 +               
8970 +               PRINTF4 (ctxt, DBG_INTR, "                 type %08x size %08x source %08x dest %08x\n",
8971 +                        trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
8972 +               PRINTF2 (ctxt, DBG_INTR, "                 Dest event %08x cookie/proc %08x\n",
8973 +                        trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
8974 +               PRINTF2 (ctxt, DBG_INTR, "                 Source event %08x cookie/proc %08x\n",
8975 +                        trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
8976 +               ELAN3_QUEUE_ADD (ctxt->DmaTrapQ);
8977 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
8978 +               
8979 +               if (ELAN3_QUEUE_FULL (ctxt->DmaTrapQ))
8980 +               {
8981 +                   PRINTF0 (ctxt, DBG_INTR, "DeliverDProcTrap: dma queue full, must swap out\n");
8982 +                   ctxt->Status |= CTXT_DMA_QUEUE_FULL;
8983 +                   
8984 +                   StartSwapoutContext (ctxt, Pend, NULL);
8985 +               }
8986 +           }
8987 +       }
8988 +    }
8989 +}
8990 +
8991 +int
8992 +NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
8993 +{
8994 +    ELAN3_DEV *dev = ctxt->Device;
8995 +
8996 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
8997 +    
8998 +    if (ELAN3_QUEUE_EMPTY (ctxt->DmaTrapQ))
8999 +       return (0);
9000 +
9001 +    *trap = *ELAN3_QUEUE_FRONT (ctxt->DmaTrapQ, ctxt->DmaTraps);
9002 +    ELAN3_QUEUE_REMOVE (ctxt->DmaTrapQ);
9003 +    
9004 +    return (1);
9005 +}
9006 +
9007 +void
9008 +ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
9009 +{
9010 +    E3_FaultSave_BE *FaultArea;
9011 +    int                     FaultHandled = 0;
9012 +    int                     res;
9013 +    register int     i;
9014 +    
9015 +    PRINTF4 (ctxt, DBG_DPROC, "ResolveDProcTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
9016 +            trap->Status.s.WakeupFunction, trap->Status.s.Context, 
9017 +            trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
9018 +    PRINTF3 (ctxt, DBG_DPROC, "                    FaultAddr=%x EventAddr=%x FSR=%x\n",
9019 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
9020 +            trap->FaultSave.s.FSR.Status);
9021 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
9022 +       PRINTF4 (ctxt, DBG_DPROC, "                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
9023 +                FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
9024 +
9025 +    PRINTF4 (ctxt, DBG_DPROC, "                  type %08x size %08x source %08x dest %08x\n",
9026 +            trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
9027 +    PRINTF2 (ctxt, DBG_DPROC, "                  Dest event %08x cookie/proc %08x\n",
9028 +            trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
9029 +    PRINTF2 (ctxt, DBG_DPROC, "                  Source event %08x cookie/proc %08x\n",
9030 +            trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
9031 +    
9032 +    BumpUserStat (ctxt, DProcTraps);
9033 +
9034 +    switch (trap->Status.s.TrapType)
9035 +    {
9036 +    case MI_DmaPacketTimedOutOrPacketError:
9037 +       /*
9038 +        * Faulted due to packet timeout or a PAckError.
9039 +        * Reset fail count and reissue the same desc.
9040 +        */
9041 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: got a PAckError or the output timed out. Rescheduling dma.\n");
9042 +       if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, DMA_PROC, trap) == OP_IGNORE)
9043 +       {
9044 +           BumpUserStat (ctxt, DmaRetries);
9045 +
9046 +           trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT;
9047 +
9048 +           RestartDmaTrap (ctxt, trap);
9049 +       }
9050 +       return;
9051 +
9052 +    case MI_DmaFailCountError:
9053 +       /*
9054 +        * Faulted due to dma fail count.
9055 +        * Reset fail count and reissue the same desc.
9056 +        */
9057 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: Reset dma fail count to %d\n", DMA_RETRY_FAIL_COUNT);
9058 +       
9059 +       if (ElanException (ctxt, EXCEPTION_DMA_RETRY_FAIL, DMA_PROC, trap) == OP_IGNORE)
9060 +       {
9061 +           BumpUserStat (ctxt, DmaRetries);
9062 +
9063 +           trap->Desc.s.dma_failCount = DMA_RETRY_FAIL_COUNT;
9064 +
9065 +           RestartDmaTrap (ctxt, trap);
9066 +       }
9067 +       return;
9068 +
9069 +    case MI_TimesliceDmaQueueOverflow:
9070 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: dma timeslice queue overflow\n");
9071 +       RestartDmaTrap (ctxt, trap);
9072 +       return;
9073 +       
9074 +    case MI_UnimplementedError:
9075 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: unimplemented dma trap\n");
9076 +       if (ElanException (ctxt, EXCEPTION_UNIMPLEMENTED, DMA_PROC, trap) == OP_IGNORE)
9077 +           RestartDmaTrap (ctxt, trap);
9078 +       return;
9079 +
9080 +    case MI_EventQueueOverflow:
9081 +    case MI_ThreadQueueOverflow:
9082 +    case MI_DmaQueueOverflow:
9083 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n");
9084 +       FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0);
9085 +       return;
9086 +
9087 +    case MI_RemoteDmaCommand:
9088 +    case MI_RunDmaCommand:
9089 +    case MI_DequeueNonSysCntxDma:
9090 +    case MI_DequeueSysCntxDma:
9091 +       /*
9092 +        * The DMA processor has trapped due to outstanding prefetches from the previous 
9093 +        * dma.  The "current" dma has not been consumed, so we just ignore the trap
9094 +        */
9095 +       return;
9096 +
9097 +    case MI_WaitForRemoteDescRead2:
9098 +    case MI_ExecuteDmaDescriptorForRun:
9099 +       /*
9100 +        * The DMA processor has trapped while fetching the dma descriptor, so
9101 +        * zero it out to not confuse the user on an error
9102 +        */
9103 +       bzero (&trap->Desc, sizeof (trap->Desc));
9104 +       break;
9105 +    }
9106 +
9107 +    /*
9108 +     * All other uWords will have updated one of the fault areas,  so fix
9109 +     * any faults found in them.  If there were no faults found then it 
9110 +     * must have been a bus error
9111 +     */
9112 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
9113 +    {
9114 +       if (FaultArea->s.FSR.Status != 0)
9115 +       {
9116 +           FaultHandled++;
9117 +
9118 +           ASSERT ((FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block64 ||
9119 +                   (FaultArea->s.FSR.Status & FSR_SizeMask) == FSR_Block32);
9120 +           
9121 +           ASSERT (FaultArea->s.FaultContext == trap->Status.s.Context);
9122 +           
9123 +           if (((trap->Desc.s.dma_source & PAGEOFFSET) >= (PAGESIZE-E3_BLK_SIZE)) &&
9124 +               ((trap->Desc.s.dma_source & PAGEMASK) != ((trap->Desc.s.dma_source + trap->Desc.s.dma_size-1) & PAGEMASK)))
9125 +           {
9126 +               /* XXXX: dma started within last 64 bytes of the page
9127 +                *       terminate the process if it has pagefaulted */
9128 +               if (FaultArea->s.FaultAddress == (trap->Desc.s.dma_source & ~(E3_BLK_SIZE-1)))
9129 +               {
9130 +                   printk ("elan%d: invalid dma - context=%x source=%x\n", ctxt->Device->Instance, 
9131 +                           ctxt->Capability.cap_mycontext, trap->Desc.s.dma_source);
9132 +                   
9133 +                   if (ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0) != OP_IGNORE)
9134 +                       return;
9135 +               }
9136 +           }
9137 +
9138 +           if (trap->Desc.s.dma_size != 0 && (res = elan3_pagefault (ctxt, FaultArea, 1)) != ESUCCESS)
9139 +           {
9140 +               /* XXXX: Rev B Elans can prefetch data passed the end of the dma descriptor */
9141 +               /*       if the fault relates to this, then just ignore it */
9142 +               if (FaultArea->s.FaultAddress < (trap->Desc.s.dma_source+trap->Desc.s.dma_size) ||
9143 +                   FaultArea->s.FaultAddress > (trap->Desc.s.dma_source+trap->Desc.s.dma_size+E3_BLK_SIZE*2))
9144 +               {
9145 +                   PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n",
9146 +                            FaultArea->s.FaultAddress);
9147 +                   
9148 +                   if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, FaultArea, res) != OP_IGNORE)
9149 +                       return;
9150 +               }
9151 +           }
9152 +       }
9153 +    }
9154 +    
9155 +    if (trap->FaultSave.s.FSR.Status != 0)
9156 +    {
9157 +       FaultHandled++;
9158 +
9159 +       ASSERT (trap->FaultSave.s.FaultContext == trap->Status.s.Context);
9160 +
9161 +       if ((trap->FaultSave.s.FSR.Status & FSR_SizeMask) == FSR_RouteFetch)
9162 +       {
9163 +           res = ResolveVirtualProcess (ctxt, trap->FaultSave.s.FaultAddress & 0xffff); /* mask out cookie */
9164 +
9165 +           switch (res)
9166 +           {
9167 +           default:
9168 +               if (ElanException (ctxt, EXCEPTION_INVALID_PROCESS, DMA_PROC, trap, trap->FaultSave.s.FaultAddress, res) != OP_IGNORE)
9169 +                   return;
9170 +               
9171 +           case EAGAIN:
9172 +               /* XXXX; wait on trail blazing code */
9173 +
9174 +           case 0:
9175 +               break;
9176 +           }
9177 +       }
9178 +       else
9179 +       {
9180 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
9181 +           {
9182 +               PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: elan3_pagefault failed for address %x\n",
9183 +                        trap->FaultSave.s.FaultAddress);
9184 +
9185 +               if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, res) != OP_IGNORE)
9186 +                   return;
9187 +           }
9188 +       }
9189 +    }
9190 +
9191 +    if (! FaultHandled)
9192 +    {
9193 +       ElanBusError (ctxt->Device);
9194 +
9195 +       if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, DMA_PROC, trap, &trap->FaultSave, EFAULT) != OP_IGNORE)
9196 +           return;
9197 +    }
9198 +
9199 +    switch (trap->Status.s.TrapType)
9200 +    {
9201 +    case MI_WaitForRemoteDescRead2:
9202 +       /*
9203 +        * Faulted while trying to read the dma descriptor for a read dma.
9204 +        * Fix fault and re-issue using FaultAddress.
9205 +        */
9206 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a remote dma descriptor at %x.\n",
9207 +                trap->FaultSave.s.FaultAddress);
9208 +       
9209 +       RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress);
9210 +       break;
9211 +       
9212 +    case MI_ExecuteDmaDescriptorForRun:
9213 +       /*
9214 +        * Faulted while trying to read the dma descriptor for a write dma.
9215 +        * Fix fault and re-issue using FaultAddress.
9216 +        */
9217 +       PRINTF1 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped reading a write dma descriptor at %x.\n", 
9218 +                trap->FaultSave.s.FaultAddress);
9219 +       
9220 +       RestartDmaPtr (ctxt, trap->FaultSave.s.FaultAddress);
9221 +       break;
9222 +       
9223 +    case MI_WaitForRemoteRoutes1:
9224 +    case MI_WaitForRemoteRoutes2:
9225 +    case MI_SendRemoteDmaDesc:
9226 +    case MI_SendDmaIdentify:
9227 +    case MI_SendRemoteDmaRoutes2:
9228 +    case MI_WaitForDmaRoutes1:
9229 +    case MI_DmaLoop:
9230 +    case MI_ExitDmaLoop:
9231 +    case MI_GetDestEventValue:
9232 +    case MI_SendFinalUnlockTrans:
9233 +    case MI_SendNullSetEvent:
9234 +    case MI_SendFinalSetEvent:
9235 +    case MI_SendDmaEOP:
9236 +       /*
9237 +        * Faulted either fetching routes or fetching dma data.
9238 +        * Fix fault and re-issue using FaultAddress.
9239 +        */
9240 +
9241 +    case MI_SendEOPforRemoteDma:
9242 +    case MI_LookAtRemoteAck:
9243 +    case MI_FailedAckIfCCis0:
9244 +       /*
9245 +        * Possible fault when reading the remote desc into the dma data buffers
9246 +        */
9247 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap:  trapped reading a dma data or fetching a route\n");
9248 +       RestartDmaTrap (ctxt, trap);
9249 +       break;
9250 +       
9251 +    case MI_DequeueSysCntxDma:
9252 +    case MI_DequeueNonSysCntxDma:
9253 +    case MI_RemoteDmaCommand:
9254 +    case MI_RunDmaCommand:
9255 +       /*
9256 +        * It is possible that a dma can get back onto the queue while outstanding dma
9257 +        * have not finished trapping. In this case the trap can be ignored as the dma
9258 +        * state has been saved. It might trap again the next time it comes to the front
9259 +        * of the queue and be fixed then.
9260 +        */
9261 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trap after dma has finished. ignored\n");
9262 +       break;
9263 +       
9264 +    default:
9265 +       PRINTF0 (ctxt, DBG_DPROC, "ResolveDProcTrap: trapped on a write set event.\n");
9266 +       FixupEventTrap (ctxt, DMA_PROC, trap, trap->Status.s.TrapType, &trap->FaultSave, 0);
9267 +       break;
9268 +    }
9269 +}
9270 +
9271 +int
9272 +DProcNeedsRestart (ELAN3_CTXT *ctxt)
9273 +{
9274 +    return (ctxt->ItemCount[LIST_DMA_PTR] != 0 ||
9275 +           ctxt->ItemCount[LIST_DMA_DESC] != 0);
9276 +}
9277 +
9278 +void
9279 +RestartDProcItems (ELAN3_CTXT *ctxt)
9280 +{
9281 +    void      *item;
9282 +    E3_Addr    value;
9283 +    int               res;
9284 +    
9285 +    kmutex_lock (&ctxt->SwapListsLock);
9286 +    while (ctxt->ItemCount[LIST_DMA_PTR])
9287 +    {
9288 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_DMA_PTR, &item, &value))
9289 +           ctxt->ItemCount[LIST_DMA_PTR] = 0;
9290 +       else
9291 +       {
9292 +           PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue write dma at %x\n", value);
9293 +           PrintUserDma (ctxt, value);
9294 +
9295 +           res = IssueDmaCommand (ctxt, value, NULL, 0);
9296 +           
9297 +           if (res == ISSUE_COMMAND_RETRY)
9298 +           {
9299 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_PTR, item);
9300 +               kmutex_unlock (&ctxt->SwapListsLock);
9301 +               return;
9302 +           }
9303 +           
9304 +           ctxt->ItemCount[LIST_DMA_PTR]--;
9305 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
9306 +       }
9307 +    }
9308 +    
9309 +    while (ctxt->ItemCount[LIST_DMA_DESC])
9310 +    {
9311 +       if (! ELAN3_OP_GET_BLOCK_ITEM (ctxt, LIST_DMA_DESC, &item, &value))
9312 +           ctxt->ItemCount[LIST_DMA_DESC] = 0;
9313 +       else
9314 +       {
9315 +           PRINTF1 (ctxt, DBG_DPROC, "RestartDProc: issue dma desc at %x\n", value);
9316 +           PrintUserDma (ctxt, value);
9317 +
9318 +           res = IssueDmaCommand (ctxt, value, item, 0);
9319 +
9320 +           switch (res)
9321 +           {
9322 +           case ISSUE_COMMAND_OK:
9323 +               ctxt->ItemCount[LIST_DMA_DESC]--;
9324 +               ELAN3_OP_FREE_BLOCK_ITEM (ctxt, item);
9325 +               break;
9326 +               
9327 +           case ISSUE_COMMAND_RETRY:
9328 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_DMA_DESC, item);
9329 +               kmutex_unlock (&ctxt->SwapListsLock);
9330 +               return;
9331 +               
9332 +           case ISSUE_COMMAND_TRAPPED:
9333 +               ctxt->ItemCount[LIST_DMA_DESC]--;
9334 +               /* The item will be freed off when the command port trap */
9335 +               /* fixed up and the command successfully re-issued */
9336 +               break;
9337 +           }
9338 +       }
9339 +    }
9340 +
9341 +    kmutex_unlock (&ctxt->SwapListsLock);
9342 +}
9343 +
9344 +void
9345 +RestartDmaDesc(ELAN3_CTXT *ctxt, E3_DMA_BE *desc)
9346 +{
9347 +    kmutex_lock (&ctxt->SwapListsLock);
9348 +    if (desc->s.dma_direction != DMA_WRITE)
9349 +       desc->s.dma_direction = (desc->s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
9350 +
9351 +    ELAN3_OP_PUT_BLOCK_ITEM (ctxt, LIST_DMA_DESC, (E3_uint32 *) desc);
9352 +    ctxt->ItemCount[LIST_DMA_DESC]++;
9353 +
9354 +    kmutex_unlock (&ctxt->SwapListsLock);
9355 +}
9356 +
9357 +void
9358 +RestartDmaTrap(ELAN3_CTXT *ctxt, DMA_TRAP *trap)
9359 +{
9360 +    /* Negative length DMAs are illegal, since they hangup the dma processor,
9361 +     * if they got generated then they will have been spotted by PollForDmahungup,
9362 +     * and delivered to us with a Dequeue  suspend address,
9363 +     *
9364 +     * GNAT sw-elan3/3908: Moved this check into this new function to avoid
9365 +     * it sampling old or invalid register state
9366 +     */
9367 +    if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
9368 +       ElanException (ctxt, EXCEPTION_BAD_DMA, DMA_PROC, trap, NULL, 0);
9369 +    else
9370 +       RestartDmaDesc (ctxt, &trap->Desc);
9371 +}
9372 +
9373 +void
9374 +RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr)
9375 +{
9376 +    kmutex_lock (&ctxt->SwapListsLock);
9377 +    ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_DMA_PTR, ptr);
9378 +    ctxt->ItemCount[LIST_DMA_PTR]++;
9379 +    kmutex_unlock (&ctxt->SwapListsLock);
9380 +}
9381 +
9382 +static void
9383 +PrintUserDma (ELAN3_CTXT *ctxt, E3_Addr addr)
9384 +{
9385 +    E3_DMA *dma;
9386 +
9387 +    /* Dont call a function which takes locks unless we need to */
9388 +    if (!(elan3_debug & DBG_DPROC))
9389 +        return;
9390 +
9391 +    dma = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
9392 +
9393 +    PRINTF4 (ctxt, DBG_DPROC, "DMA: type %08x size %08x source %08x dest %08x\n",
9394 +            fuword ((int *) &dma->dma_type), fuword ((int *) &dma->dma_size), 
9395 +            fuword ((int *) &dma->dma_source), fuword ((int *) &dma->dma_dest));
9396 +    PRINTF4 (ctxt, DBG_DPROC, "DMA: Dest %08x %08x  Local %08x %08x\n",
9397 +            fuword ((int *) &dma->dma_destEvent), fuword ((int *) &dma->dma_destCookieProc), 
9398 +            fuword ((int *) &dma->dma_srcEvent), fuword ((int *) &dma->dma_srcCookieProc));
9399 +}
9400 +
9401 +/*
9402 + * Local variables:
9403 + * c-file-style: "stroustrup"
9404 + * End:
9405 + */
9406 diff -urN clean/drivers/net/qsnet/elan3/elan3mmu_generic.c linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_generic.c
9407 --- clean/drivers/net/qsnet/elan3/elan3mmu_generic.c    1969-12-31 19:00:00.000000000 -0500
9408 +++ linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_generic.c      2004-12-14 05:19:38.000000000 -0500
9409 @@ -0,0 +1,3255 @@
9410 +/*
9411 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
9412 + *
9413 + *    For licensing information please see the supplied COPYING file
9414 + *
9415 + */
9416 +
9417 +#ident "@(#)$Id: elan3mmu_generic.c,v 1.76 2004/12/14 10:19:38 mike Exp $"
9418 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_generic.c,v $*/
9419 +
9420 +#include <qsnet/kernel.h>
9421 +
9422 +#include <elan3/elanregs.h>
9423 +#include <elan3/elandev.h>
9424 +#include <elan3/elanvp.h>
9425 +#include <elan3/elan3mmu.h>
9426 +#include <elan3/elanctxt.h>
9427 +#include <elan3/elandebug.h>
9428 +#include <elan3/urom_addrs.h>
9429 +#include <elan3/thread.h>
9430 +
9431 +#ifdef CONFIG_MPSAS
9432 +#  define zero_all_ptbls
9433 +#endif
9434 +
9435 +/*
9436 + * Debugging
9437 + */
9438 +int    elan3mmu_debug = 0;
9439 +
9440 +#define        N_L3PTBL_MTX    (0x20)
9441 +#define        N_L2PTBL_MTX    (0x40)
9442 +#define        N_L1PTBL_MTX    (0x20)
9443 +
9444 +#define        L3PTBL_MTX_HASH(p) \
9445 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L3PTBL_MTX - 1))
9446 +static spinlock_t l3ptbl_lock[N_L3PTBL_MTX];
9447 +
9448 +#define        L2PTBL_MTX_HASH(p)   \
9449 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L2PTBL_MTX - 1))
9450 +static spinlock_t l2ptbl_lock[N_L2PTBL_MTX];
9451 +
9452 +#define        L1PTBL_MTX_HASH(p)   \
9453 +       ((((uintptr_t)(p) >> 12) ^ ((uintptr_t)(p) >> 2)) & (N_L1PTBL_MTX - 1))
9454 +static spinlock_t l1ptbl_lock[N_L1PTBL_MTX];
9455 +
9456 +
9457 +#define        BASE2VA(p)      ((E3_Addr)((p)->ptbl_base << 16))
9458 +#define        VA2BASE(v)      ((u_short)(((uintptr_t)(v)) >> 16))
9459 +
9460 +ELAN3MMU_GLOBAL_STATS  elan3mmu_global_stats;
9461 +
9462 +static void          elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *);
9463 +static void          elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags);
9464 +
9465 +static ELAN3_PTBL    *elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep);
9466 +static ELAN3_PTBL    *elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp);
9467 +
9468 +static ELAN3_PTBL    *elan3mmu_alloc_pte    (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx);
9469 +void                 elan3mmu_free_lXptbl  (ELAN3_DEV *dev, ELAN3_PTBL *ptbl);
9470 +
9471 +void                 elan3mmu_free_pte  (ELAN3_DEV *dev,  ELAN3MMU *elan3mmu,  ELAN3_PTBL *ptbl_ptr, int idx);
9472 +
9473 +static ELAN3_PTBL    *elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu);
9474 +static ELAN3_PTBL    *elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu,
9475 +                                           E3_Addr base, spinlock_t **plock, unsigned long *flags);
9476 +static ELAN3_PTBL    *elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu,
9477 +                                           E3_Addr base, spinlock_t **plock, unsigned long *flags);
9478 +
9479 +static int          elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl);
9480 +static ELAN3_PTBL    *elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr);
9481 +
9482 +static spinlock_t   *elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl);
9483 +
9484 +/*
9485 + * Encoding of MMU permissions against access type,
9486 + * to allow quick permission checking against access 
9487 + * type.
9488 + */
9489 +u_char elan3mmu_permissionTable[] =
9490 +{
9491 +    0xcc,      /* 11001100 ELAN3_PERM_NULL        */
9492 +    0x01,      /* 00000001 ELAN3_PERM_LOCALREAD   */
9493 +    0x05,      /* 00000101 ELAN3_PERM_READ        */
9494 +    0x33,      /* 00110011 ELAN3_PERM_NOREMOTE    */
9495 +    0x37,      /* 00110111 ELAN3_PERM_REMOTEREAD  */
9496 +    0x3f,      /* 00111111 ELAN3_PERM_REMOTEWRITE */
9497 +    0xf7,      /* 11110111 ELAN3_PERM_REMOTEEVENT */
9498 +    0xff,      /* 11111111 ELAN3_PERM_REMOTEALL          */
9499 +} ;
9500 +
9501 +void
9502 +elan3mmu_init()
9503 +{
9504 +    register int i;
9505 +
9506 +    HAT_PRINTF0 (1, "elan3mmu_init: initialising elan mmu\n");
9507 +
9508 +    for (i = 0; i < N_L1PTBL_MTX; i++)
9509 +       spin_lock_init (&l1ptbl_lock[i]);
9510 +
9511 +    for (i = 0; i < N_L2PTBL_MTX; i++)
9512 +       spin_lock_init (&l2ptbl_lock[i]);
9513 +
9514 +    for (i = 0; i < N_L3PTBL_MTX; i++)
9515 +       spin_lock_init (&l3ptbl_lock[i]);
9516 +
9517 +    elan3mmu_global_stats.version = ELAN3MMU_STATS_VERSION;
9518 +
9519 +    elan3mmu_init_osdep();
9520 +}
9521 +
9522 +void
9523 +elan3mmu_fini()
9524 +{
9525 +    register int i;
9526 +
9527 +    HAT_PRINTF0 (1, "elan3mmu_fini: finalising elan mmu\n");
9528 +
9529 +    for (i = 0; i < N_L1PTBL_MTX; i++)
9530 +       spin_lock_destroy (&l1ptbl_lock[i]);
9531 +
9532 +    for (i = 0; i < N_L2PTBL_MTX; i++)
9533 +       spin_lock_destroy (&l2ptbl_lock[i]);
9534 +
9535 +    for (i = 0; i < N_L3PTBL_MTX; i++)
9536 +       spin_lock_destroy (&l3ptbl_lock[i]);
9537 +
9538 +    elan3mmu_fini_osdep();
9539 +}
9540 +
9541 +ELAN3MMU *
9542 +elan3mmu_alloc (ELAN3_CTXT *ctxt)
9543 +{
9544 +    ELAN3MMU  *elan3mmu;
9545 +    ELAN3_PTBL *l1ptbl;
9546 +
9547 +    ALLOC_ELAN3MMU (elan3mmu, TRUE);
9548 +    
9549 +    spin_lock_init (&elan3mmu->elan3mmu_lock);
9550 +
9551 +    spin_lock (&elan3mmu->elan3mmu_lock);                      /* lock_lint */
9552 +
9553 +    elan3mmu->elan3mmu_ergns    = NULL;
9554 +    elan3mmu->elan3mmu_etail    = NULL;
9555 +    elan3mmu->elan3mmu_ergnlast = NULL;
9556 +    elan3mmu->elan3mmu_mrgns    = NULL;
9557 +    elan3mmu->elan3mmu_mtail    = NULL;
9558 +    elan3mmu->elan3mmu_mrgnlast = NULL;
9559 +    elan3mmu->elan3mmu_ctxt     = ctxt;
9560 +
9561 +    spin_lock_init (&elan3mmu->elan3mmu_lXptbl_lock);
9562 +    elan3mmu->elan3mmu_lXptbl   = NULL;
9563 +
9564 +    spin_unlock (&elan3mmu->elan3mmu_lock);                    /* lock_lint */
9565 +
9566 +    l1ptbl = elan3mmu_alloc_l1ptbl(ctxt->Device, 0, elan3mmu);
9567 +
9568 +    elan3mmu->elan3mmu_ctp      = (sdramaddr_t) 0;
9569 +    elan3mmu->elan3mmu_dev      = ctxt->Device;
9570 +    elan3mmu->elan3mmu_l1ptbl   = l1ptbl;
9571 +
9572 +    /* Ensure that there are at least some level 3 page tables,  since if a level 2 and */
9573 +    /* a level 3 table are allocated together, then the level 3 is allocated with the NO_ALLOC */
9574 +    /* flag,  thus there MUST be at least one that can be stolen or on the free list */
9575 +    if (elan3mmu->elan3mmu_dev->Level[PTBL_LEVEL_3].PtblFreeList == NULL)
9576 +       elan3mmu_create_ptbls (elan3mmu->elan3mmu_dev, PTBL_LEVEL_3, 0, 0);
9577 +
9578 +    HAT_PRINTF1 (1, "elan3mmu_alloc: elan3mmu %p\n", elan3mmu);
9579 +
9580 +    elan3mmu_alloc_osdep (elan3mmu);
9581 +
9582 +    return (elan3mmu);
9583 +}
9584 +
9585 +void 
9586 +elan3mmu_free (ELAN3MMU *elan3mmu)
9587 +{
9588 +    ELAN3MMU_RGN   *rgn;
9589 +    ELAN3_PTBL    *l1ptbl;
9590 +    spinlock_t    *l1lock;
9591 +    unsigned long   l1flags;
9592 +    unsigned long   flags;
9593 +
9594 +    HAT_PRINTF1 (1, "elan3mmu_free : elan3mmu %p\n", elan3mmu);
9595 +    
9596 +    /*
9597 +     * Invalidate the level1 page table,  since it's already removed
9598 +     * from the context table, there is no need to flush the tlb.
9599 +     */
9600 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
9601 +    elan3mmu->elan3mmu_l1ptbl = NULL;
9602 +    
9603 +    if (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, PTBL_LEVEL_1, &l1lock, &l1flags) == LK_PTBL_OK)
9604 +    {
9605 +       elan3mmu_l1inval (elan3mmu, l1ptbl, PTE_UNLOAD_NOFLUSH);
9606 +       elan3mmu_free_l1ptbl (elan3mmu->elan3mmu_dev, l1ptbl, l1lock, l1flags);
9607 +    }
9608 +
9609 +    /*
9610 +     * Free of any permission regions.
9611 +     */
9612 +    spin_lock (&elan3mmu->elan3mmu_lock);                                      /* lock_lint */
9613 +    while ((rgn = elan3mmu->elan3mmu_mrgns) != NULL)
9614 +    {
9615 +       spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);           /* lock_lint */
9616 +       elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase);
9617 +       elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase);
9618 +       spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);      /* lock_lint */
9619 +       
9620 +       FREE_ELAN3MMU_RGN (rgn);
9621 +    }
9622 +    elan3mmu->elan3mmu_mrgnlast = NULL;
9623 +    elan3mmu->elan3mmu_ergnlast = NULL;
9624 +
9625 +    /* 
9626 +     * Free the lXptbl list
9627 +     */
9628 +    ASSERT (elan3mmu->elan3mmu_lXptbl == NULL); /* XXXX MRH need to add list removal */
9629
9630 +    elan3mmu->elan3mmu_lXptbl = NULL;
9631 +    spin_lock_destroy (&elan3mmu->elan3mmu_lXptbl_lock);
9632 +
9633 +
9634 +    spin_unlock (&elan3mmu->elan3mmu_lock);                                    /* lock_lint */
9635 +
9636 +    spin_lock_destroy (&elan3mmu->elan3mmu_lock);
9637 +
9638 +    FREE_ELAN3MMU (elan3mmu);
9639 +}
9640 +
9641 +/*================================================================================*/
9642 +/* Interface routines to device driver */
9643 +static void
9644 +elan3mmu_flush_context_filter (ELAN3_DEV *dev, void *arg)
9645 +{
9646 +    unsigned long flags;
9647 +
9648 +    spin_lock_irqsave (&dev->IntrLock, flags);
9649 +    ASSERT ((read_reg32 (dev, Exts.InterruptReg) & (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)) == 
9650 +           (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx));
9651 +
9652 +    dev->FilterHaltQueued = 0;
9653 +
9654 +    write_reg32 (dev, Input_Context_Fil_Flush, 0);
9655 +
9656 +    HAT_PRINTF0 (1, "elan3mmu_flush_context_filter completed\n");
9657 +
9658 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
9659 +}
9660 +
9661 +void
9662 +elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp)
9663 +{
9664 +    int         mctx = ctx & MAX_ROOT_CONTEXT_MASK;
9665 +    sdramaddr_t ctp  = dev->ContextTable + mctx * sizeof (E3_ContextControlBlock);
9666 +
9667 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
9668 +
9669 +    ASSERT ((mctx < 32 || mctx >= ELAN3_KCOMM_BASE_CONTEXT_NUM) ? (ctx & SYS_CONTEXT_BIT) : ! (ctx & SYS_CONTEXT_BIT));
9670 +
9671 +    elan3_sdram_writel (dev, ctp + offsetof (E3_ContextControlBlock, filter), 
9672 +                 ((ctx & SYS_CONTEXT_BIT) ? E3_CCB_CNTX0 : 0) | (disabled ? E3_CCB_DISCARD_ALL : 0));
9673 +
9674 +    HAT_PRINTF4 (1, "elan3mmu_set_context_filter: ctx %x [%lx] -> %s (%x)\n", ctx, ctp,
9675 +                disabled ? "up" : "down", elan3_sdram_readl (dev, ctp + offsetof (E3_ContextControlBlock, filter)));
9676 +
9677 +    /* queue a halt operation to flush the context filter while the inputter is halted */
9678 +    if (dev->FilterHaltQueued == 0)
9679 +    {
9680 +       dev->FilterHaltQueued = 1;
9681 +       QueueHaltOperation (dev, Pend, Maskp, INT_DiscardingSysCntx | INT_DiscardingNonSysCntx, 
9682 +                           elan3mmu_flush_context_filter, NULL);
9683 +    }
9684 +}
9685 +
9686 +int
9687 +elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask)
9688 +{
9689 +    sdramaddr_t ctp;
9690 +    ELAN3_PTP    trootptp;
9691 +
9692 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
9693 +
9694 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
9695 +    
9696 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
9697 +       return (EINVAL);
9698 +
9699 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
9700 +    
9701 +    trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP));
9702 +    
9703 +    if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID)
9704 +       return (EBUSY);
9705 +
9706 +    elan3mmu->elan3mmu_ctp = ctp;
9707 +    
9708 +    trootptp = PTBL_TO_PTADDR (elan3mmu->elan3mmu_l1ptbl) | ELAN3_ET_PTP;
9709 +    
9710 +    HAT_PRINTF4 (1, "elan3mmu_attach: ctp at %08lx : trootptp=%08x VPT_ptr=%08lx VPT_mask=%08x\n",
9711 +                ctp, trootptp, routeTable, routeMask);
9712 +    
9713 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), trootptp);
9714 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), routeTable);
9715 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), routeMask);
9716 +    
9717 +    return (ESUCCESS);
9718 +}
9719 +
9720 +void
9721 +elan3mmu_detach (ELAN3_DEV *dev, int ctx)
9722 +{
9723 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
9724 +    sdramaddr_t ctp;
9725 +    
9726 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
9727 +    
9728 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
9729 +       return;
9730 +    
9731 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
9732 +    
9733 +    HAT_PRINTF1 (1, "elan3mmu_detach: clearing ptp at %lx\n", ctp);
9734 +    
9735 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP), invalidptp);
9736 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_mask), 0);
9737 +    elan3_writeptp (dev, ctp + offsetof (E3_ContextControlBlock, VPT_ptr), 0);
9738 +    
9739 +    ElanFlushTlb (dev);
9740 +}
9741 +
9742 +int
9743 +elan3mmu_reference (ELAN3MMU *elan3mmu, int ctx)
9744 +{
9745 +    ELAN3_DEV              *dev = elan3mmu->elan3mmu_dev;
9746 +    sdramaddr_t            ctp;
9747 +    E3_ContextControlBlock ccb;
9748 +    ELAN3_PTP               trootptp;
9749 +
9750 +    ctx &= MAX_ROOT_CONTEXT_MASK;                                              /* Mask out all high bits in context */
9751 +    
9752 +    if (ctx < 0 || ctx >= dev->ContextTableSize)
9753 +       return (EINVAL);
9754 +
9755 +    ctp = dev->ContextTable + ctx * sizeof (E3_ContextControlBlock);
9756 +
9757 +    trootptp = elan3_readptp (dev, ctp + offsetof (E3_ContextControlBlock, rootPTP));
9758 +    
9759 +    if (ELAN3_PTP_TYPE(trootptp) != ELAN3_ET_INVALID)
9760 +       return (EBUSY);
9761 +    
9762 +    elan3_sdram_copyl_from_sdram (dev, elan3mmu->elan3mmu_ctp, &ccb, sizeof (E3_ContextControlBlock));
9763 +    elan3_sdram_copyl_to_sdram (dev, &ccb, ctp, sizeof (E3_ContextControlBlock));
9764 +    
9765 +    return (ESUCCESS);
9766 +    
9767 +}
9768 +/*================================================================================*/
9769 +/* Elan permission regions. */
9770 +
9771 +/* elan address region management */
9772 +ELAN3MMU_RGN *
9773 +elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu,
9774 +                      E3_Addr addr, int tail)
9775 +{
9776 +    ELAN3MMU_RGN *next = NULL;
9777 +    ELAN3MMU_RGN *rgn;
9778 +    ELAN3MMU_RGN *hirgn;
9779 +    ELAN3MMU_RGN *lorgn;
9780 +    E3_Addr       base;
9781 +    E3_Addr       lastaddr;
9782 +    int                  forward;
9783 +
9784 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9785 +
9786 +    if (elan3mmu->elan3mmu_ergns == NULL)
9787 +       return (NULL);
9788 +
9789 +    rgn = elan3mmu->elan3mmu_ergnlast;
9790 +    if (rgn == NULL)
9791 +       rgn = elan3mmu->elan3mmu_ergns;
9792 +
9793 +    forward = 0;
9794 +    if ((u_long) (base = rgn->rgn_ebase) < (u_long)addr)
9795 +    {
9796 +       if ((u_long)addr <= ((u_long) base + rgn->rgn_len - 1))
9797 +           return (rgn);                                       /* ergnlast contained addr */
9798 +
9799 +       hirgn = elan3mmu->elan3mmu_etail;
9800 +
9801 +       if ((u_long) (lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < (u_long) addr)
9802 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
9803 +       
9804 +       if ((u_long) (addr - base) > (u_long) (lastaddr - addr))
9805 +           rgn = hirgn;
9806 +       else
9807 +       {
9808 +           rgn = rgn->rgn_enext;
9809 +           forward++;
9810 +       }
9811 +    }
9812 +    else
9813 +    {
9814 +       lorgn = elan3mmu->elan3mmu_ergns;
9815 +
9816 +       if ((u_long)lorgn->rgn_ebase > (u_long) addr)
9817 +           return (lorgn);                                     /* lowest regions is higher than addr */
9818 +       if ((u_long)(addr - lorgn->rgn_ebase) < (u_long) (base - addr))
9819 +       {
9820 +           rgn = lorgn;                                        /* search forward from head */
9821 +           forward++;
9822 +       }
9823 +    }
9824 +    if (forward)
9825 +    {
9826 +       while ((u_long)(rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr)
9827 +           rgn = rgn->rgn_enext;
9828 +
9829 +       if ((u_long)rgn->rgn_ebase <= (u_long)addr)
9830 +           elan3mmu->elan3mmu_ergnlast = rgn;
9831 +       return (rgn);
9832 +    }
9833 +    else
9834 +    {
9835 +       while ((u_long)rgn->rgn_ebase > (u_long)addr)
9836 +       {
9837 +           next = rgn;
9838 +           rgn = rgn->rgn_eprev;
9839 +       }
9840 +
9841 +       if ((u_long) (rgn->rgn_ebase + rgn->rgn_len - 1) < (u_long)addr)
9842 +           return (next);
9843 +       else
9844 +       {
9845 +           elan3mmu->elan3mmu_ergnlast = rgn;
9846 +           return (rgn);
9847 +       }
9848 +    }
9849 +}
9850 +
9851 +int
9852 +elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn)
9853 +{
9854 +    ELAN3MMU_RGN *rgn   = elan3mmu_findrgn_elan (elan3mmu, nrgn->rgn_ebase, 1);
9855 +    E3_Addr       nbase = nrgn->rgn_ebase;
9856 +    E3_Addr      ntop  = nbase + nrgn->rgn_len - 1; /* avoid wrap */
9857 +    E3_Addr      base;
9858 +
9859 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9860 +
9861 +    if (rgn == NULL)
9862 +    {
9863 +       elan3mmu->elan3mmu_ergns = elan3mmu->elan3mmu_etail = nrgn;
9864 +       nrgn->rgn_enext = nrgn->rgn_eprev = NULL;
9865 +    }
9866 +    else
9867 +    {
9868 +       base = rgn->rgn_ebase;
9869 +
9870 +       if ((u_long)(base + rgn->rgn_len - 1) < (u_long)nbase)  /* top of region below requested address */
9871 +       {                                                       /* so insert after region (and hence at end */
9872 +           nrgn->rgn_eprev = rgn;                              /* of list */
9873 +           nrgn->rgn_enext = NULL;
9874 +           rgn->rgn_enext = elan3mmu->elan3mmu_etail = nrgn;
9875 +       }
9876 +       else
9877 +       {
9878 +           if ((u_long)nbase >= (u_long)base || (u_long)ntop >= (u_long)base)
9879 +               return (-1);                                    /* overlapping region */
9880 +
9881 +           nrgn->rgn_enext = rgn;                              /* insert before region */
9882 +           nrgn->rgn_eprev = rgn->rgn_eprev;
9883 +           rgn->rgn_eprev  = nrgn;
9884 +           if (elan3mmu->elan3mmu_ergns == rgn)
9885 +               elan3mmu->elan3mmu_ergns = nrgn;
9886 +           else
9887 +               nrgn->rgn_eprev->rgn_enext = nrgn;
9888 +       }
9889 +    }
9890 +    elan3mmu->elan3mmu_ergnlast = nrgn;
9891 +    
9892 +    return (0);
9893 +}
9894 +
9895 +ELAN3MMU_RGN *
9896 +elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr)
9897 +{
9898 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0);
9899 +    
9900 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9901 +
9902 +    if (rgn == NULL || rgn->rgn_ebase != addr)
9903 +       return (NULL);
9904 +    
9905 +    elan3mmu->elan3mmu_ergnlast = rgn->rgn_enext;
9906 +    if (rgn == elan3mmu->elan3mmu_etail)
9907 +       elan3mmu->elan3mmu_etail = rgn->rgn_eprev;
9908 +    else
9909 +       rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev;
9910 +    
9911 +    if (rgn == elan3mmu->elan3mmu_ergns)
9912 +       elan3mmu->elan3mmu_ergns = rgn->rgn_enext;
9913 +    else
9914 +       rgn->rgn_eprev->rgn_enext = rgn->rgn_enext;
9915 +
9916 +    return (rgn);
9917 +}
9918 +
9919 +ELAN3MMU_RGN *
9920 +elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr)
9921 +{
9922 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0);
9923 +    E3_Addr       base;
9924 +
9925 +    if (rgn != NULL && (u_long)(base = rgn->rgn_ebase) <= (u_long)addr && (u_long)addr <= (u_long)(base + rgn->rgn_len - 1))
9926 +       return (rgn);
9927 +    return (NULL);
9928 +}
9929 +
9930 +/* main address region management */
9931 +ELAN3MMU_RGN *
9932 +elan3mmu_findrgn_main (ELAN3MMU *elan3mmu,
9933 +                      caddr_t addr, int tail)
9934 +{
9935 +    ELAN3MMU_RGN *next = NULL;
9936 +    ELAN3MMU_RGN *rgn;
9937 +    ELAN3MMU_RGN *hirgn;
9938 +    ELAN3MMU_RGN *lorgn;
9939 +    caddr_t       lastaddr;
9940 +    caddr_t       base;
9941 +    int                  forward;
9942 +
9943 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) || SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
9944 +
9945 +    if (elan3mmu->elan3mmu_mrgns == NULL)
9946 +       return (NULL);
9947 +
9948 +    rgn = elan3mmu->elan3mmu_mrgnlast;
9949 +    if (rgn == NULL)
9950 +       rgn = elan3mmu->elan3mmu_mrgns;
9951 +
9952 +    forward = 0;
9953 +    if ((base = rgn->rgn_mbase) < addr)
9954 +    {
9955 +       if (addr <= (base + rgn->rgn_len - 1))
9956 +           return (rgn);                                       /* ergnlast contained addr */
9957 +
9958 +       hirgn = elan3mmu->elan3mmu_mtail;
9959 +       if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr)
9960 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
9961 +       
9962 +       if ((addr - base) > (lastaddr - addr))
9963 +           rgn = hirgn;
9964 +       else
9965 +       {
9966 +           rgn = rgn->rgn_mnext;
9967 +           forward++;
9968 +       }
9969 +    }
9970 +    else
9971 +    {
9972 +       lorgn = elan3mmu->elan3mmu_mrgns;
9973 +       if (lorgn->rgn_mbase > addr)
9974 +           return (lorgn);                                     /* lowest regions is higher than addr */
9975 +       if ((addr - lorgn->rgn_mbase) < (base - addr))
9976 +       {
9977 +           rgn = lorgn;                                        /* search forward from head */
9978 +           forward++;
9979 +       }
9980 +    }
9981 +    if (forward)
9982 +    {
9983 +       while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
9984 +           rgn = rgn->rgn_mnext;
9985 +
9986 +       if (rgn->rgn_mbase <= addr)
9987 +           elan3mmu->elan3mmu_mrgnlast = rgn;
9988 +       return (rgn);
9989 +    }
9990 +    else
9991 +    {
9992 +       while (rgn->rgn_mbase > addr)
9993 +       {
9994 +           next = rgn;
9995 +           rgn = rgn->rgn_mprev;
9996 +       }
9997 +       if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
9998 +           return (next);
9999 +       else
10000 +       {
10001 +           elan3mmu->elan3mmu_mrgnlast = rgn;
10002 +           return (rgn);
10003 +       }
10004 +    }
10005 +}
10006 +
10007 +int
10008 +elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn)
10009 +{
10010 +    ELAN3MMU_RGN *rgn   = elan3mmu_findrgn_main (elan3mmu, nrgn->rgn_mbase, 1);
10011 +    caddr_t       nbase = nrgn->rgn_mbase;
10012 +    caddr_t      ntop  = nbase + nrgn->rgn_len - 1;
10013 +    caddr_t      base;
10014 +
10015 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
10016 +
10017 +    if (rgn == NULL)
10018 +    {
10019 +       elan3mmu->elan3mmu_mrgns = elan3mmu->elan3mmu_mtail = nrgn;
10020 +       nrgn->rgn_mnext = nrgn->rgn_mprev = NULL;
10021 +    }
10022 +    else
10023 +    {
10024 +       base = rgn->rgn_mbase;
10025 +
10026 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
10027 +       {                                                       /* so insert after region (and hence at end */
10028 +           nrgn->rgn_mprev = rgn;                              /* of list */
10029 +           nrgn->rgn_mnext = NULL;
10030 +           rgn->rgn_mnext = elan3mmu->elan3mmu_mtail = nrgn;
10031 +       }
10032 +       else
10033 +       {
10034 +           if (nbase >= base || ntop >= base)
10035 +               return (-1);                                    /* overlapping region */
10036 +
10037 +           nrgn->rgn_mnext = rgn;                              /* insert before region */
10038 +           nrgn->rgn_mprev = rgn->rgn_mprev;
10039 +           rgn->rgn_mprev  = nrgn;
10040 +           if (elan3mmu->elan3mmu_mrgns == rgn)
10041 +               elan3mmu->elan3mmu_mrgns = nrgn;
10042 +           else
10043 +               nrgn->rgn_mprev->rgn_mnext = nrgn;
10044 +       }
10045 +    }
10046 +    elan3mmu->elan3mmu_mrgnlast = nrgn;
10047 +    
10048 +    return (0);
10049 +}
10050 +
10051 +ELAN3MMU_RGN *
10052 +elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr)
10053 +{
10054 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
10055 +    
10056 +    ASSERT (SPINLOCK_HELD (&elan3mmu->elan3mmu_dev->IntrLock) && SPINLOCK_HELD (&elan3mmu->elan3mmu_lock));
10057 +
10058 +    if (rgn == NULL || rgn->rgn_mbase != addr)
10059 +       return (NULL);
10060 +    
10061 +    elan3mmu->elan3mmu_mrgnlast = rgn->rgn_mnext;
10062 +    if (rgn == elan3mmu->elan3mmu_mtail)
10063 +       elan3mmu->elan3mmu_mtail = rgn->rgn_mprev;
10064 +    else
10065 +       rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev;
10066 +    
10067 +    if (rgn == elan3mmu->elan3mmu_mrgns)
10068 +       elan3mmu->elan3mmu_mrgns = rgn->rgn_mnext;
10069 +    else
10070 +       rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext;
10071 +
10072 +    return (rgn);
10073 +}
10074 +
10075 +ELAN3MMU_RGN *
10076 +elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr)
10077 +{
10078 +    ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
10079 +    caddr_t       base;
10080 +
10081 +    if (rgn != NULL && (base = rgn->rgn_mbase) <= addr && addr <= (base + rgn->rgn_len - 1))
10082 +       return (rgn);
10083 +    return (NULL);
10084 +}
10085 +
10086 +int
10087 +elan3mmu_setperm (ELAN3MMU *elan3mmu,
10088 +                 caddr_t   maddr,
10089 +                 E3_Addr   eaddr,
10090 +                 u_int     len,
10091 +                 u_int     perm)
10092 +{
10093 +    ELAN3_DEV     *dev = elan3mmu->elan3mmu_dev;
10094 +    ELAN3MMU_RGN *nrgn;
10095 +    unsigned long  flags;
10096 +
10097 +    HAT_PRINTF4 (1, "elan3mmu_setperm: user %p elan %08x len %x perm %x\n", maddr, eaddr, len, perm);
10098 +
10099 +    if ((((uintptr_t) maddr) & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET)) 
10100 +    {
10101 +        HAT_PRINTF0 (1, "elan3mmu_setperm:  alignment failure\n");
10102 +       return (EINVAL);
10103 +    }
10104 +
10105 +    if (((uintptr_t) maddr + len - 1) < (uintptr_t) maddr || ((u_long)eaddr + len - 1) < (u_long)eaddr) 
10106 +    {
10107 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  range failure\n");
10108 +       return (EINVAL);
10109 +    }
10110 +
10111 +    ALLOC_ELAN3MMU_RGN(nrgn, TRUE);
10112 +    
10113 +    spin_lock (&elan3mmu->elan3mmu_lock);
10114 +    nrgn->rgn_mbase = maddr;
10115 +    nrgn->rgn_ebase = eaddr;
10116 +    nrgn->rgn_len   = len;
10117 +    nrgn->rgn_perm  = perm;
10118 +
10119 +    spin_lock_irqsave (&dev->IntrLock, flags);
10120 +    if (elan3mmu_addrgn_elan (elan3mmu, nrgn) < 0)
10121 +    {
10122 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  elan address exists\n");
10123 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
10124 +       spin_unlock (&elan3mmu->elan3mmu_lock);
10125 +
10126 +       FREE_ELAN3MMU_RGN (nrgn);
10127 +       return (EINVAL);
10128 +    }
10129 +    
10130 +    if (elan3mmu_addrgn_main (elan3mmu, nrgn) < 0)
10131 +    {
10132 +       HAT_PRINTF0 (1, "elan3mmu_setperm:  main address exists\n");
10133 +       elan3mmu_removergn_elan (elan3mmu, eaddr);
10134 +
10135 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
10136 +       spin_unlock (&elan3mmu->elan3mmu_lock);
10137 +
10138 +       FREE_ELAN3MMU_RGN (nrgn);
10139 +       return (EINVAL);
10140 +    }
10141 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
10142 +    spin_unlock (&elan3mmu->elan3mmu_lock);
10143 +
10144 +    return (ESUCCESS);
10145 +}
10146 +
10147 +void
10148 +elan3mmu_clrperm (ELAN3MMU *elan3mmu,
10149 +                 E3_Addr   addr,
10150 +                 u_int     len)
10151 +{
10152 +    E3_Addr       raddr;
10153 +    E3_Addr       rtop;
10154 +    ELAN3MMU_RGN *nrgn;
10155 +    ELAN3MMU_RGN *rgn;
10156 +    ELAN3MMU_RGN *rgn_next;
10157 +    u_int        ssize;
10158 +    unsigned long flags;
10159 +    int                  res;
10160 +
10161 +    HAT_PRINTF2 (1, "elan3mmu_clrperm: elan %08x len %x\n", addr, len);
10162 +
10163 +    raddr = (addr & PAGEMASK);
10164 +    rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET;
10165 +
10166 +    ALLOC_ELAN3MMU_RGN (nrgn, TRUE);
10167 +
10168 +    spin_lock (&elan3mmu->elan3mmu_lock);
10169 +    
10170 +    for (rgn = elan3mmu_findrgn_elan (elan3mmu, addr, 0); rgn != NULL; rgn = rgn_next)
10171 +    {
10172 +       if (rtop < rgn->rgn_ebase)                              /* rtop was in a gap */
10173 +           break;
10174 +       
10175 +       rgn_next = rgn->rgn_enext;                              /* Save next region pointer */
10176 +       
10177 +       if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1)) 
10178 +       {
10179 +           /* whole region is cleared */
10180 +           elan3mmu_unload (elan3mmu, rgn->rgn_ebase, rgn->rgn_len, PTE_UNLOAD);
10181 +           
10182 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10183 +           elan3mmu_removergn_elan (elan3mmu, rgn->rgn_ebase);
10184 +           elan3mmu_removergn_main (elan3mmu, rgn->rgn_mbase);
10185 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10186 +
10187 +           FREE_ELAN3MMU_RGN (rgn);
10188 +       }
10189 +       else if (raddr <= rgn->rgn_ebase)
10190 +       {
10191 +           /* clearing at beginning, so shrink size and increment base ptrs */
10192 +           ssize = rtop - rgn->rgn_ebase + 1;
10193 +
10194 +           elan3mmu_unload (elan3mmu, rgn->rgn_ebase, ssize, PTE_UNLOAD);
10195 +           
10196 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10197 +           rgn->rgn_mbase += ssize;
10198 +           rgn->rgn_ebase += ssize;
10199 +           rgn->rgn_len   -= ssize;
10200 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10201 +           
10202 +       }
10203 +       else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
10204 +       {
10205 +           /* clearing at end, so just shrink length of region */
10206 +           ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
10207 +
10208 +           elan3mmu_unload (elan3mmu, raddr, ssize, PTE_UNLOAD);
10209 +
10210 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10211 +           rgn->rgn_len -= ssize;
10212 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10213 +       }
10214 +       else
10215 +       {
10216 +           /* the section to go is in the middle,  so need to  */
10217 +           /* split it into two regions */
10218 +           elan3mmu_unload (elan3mmu, raddr, rtop - raddr + 1, PTE_UNLOAD);
10219 +
10220 +           spin_lock_irqsave (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10221 +
10222 +           ASSERT (nrgn != NULL);
10223 +
10224 +           nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);;
10225 +           nrgn->rgn_ebase = rtop + 1;
10226 +           nrgn->rgn_len   = ((rgn->rgn_ebase + rgn->rgn_len - 1) - rtop);
10227 +           nrgn->rgn_perm  = rgn->rgn_perm;
10228 +
10229 +           rgn->rgn_len = (raddr - rgn->rgn_ebase);            /* shrink original region */
10230 +
10231 +           res = elan3mmu_addrgn_elan (elan3mmu, nrgn);        /* insert new region */
10232 +           ASSERT (res == 0);                                  /* which cannot fail */
10233 +
10234 +           res = elan3mmu_addrgn_main (elan3mmu, nrgn);        
10235 +           ASSERT (res == 0);
10236 +           spin_unlock_irqrestore (&elan3mmu->elan3mmu_dev->IntrLock, flags);
10237 +
10238 +           nrgn = NULL;
10239 +       }
10240 +    }
10241 +    spin_unlock (&elan3mmu->elan3mmu_lock);
10242 +
10243 +    if (nrgn != NULL)
10244 +       FREE_ELAN3MMU_RGN (nrgn);
10245 +}
10246 +
10247 +int
10248 +elan3mmu_checkperm (ELAN3MMU *elan3mmu,
10249 +                   E3_Addr   addr,
10250 +                   u_int     len,
10251 +                   u_int     access)
10252 +{
10253 +    E3_Addr     raddr = (((E3_Addr) addr) & PAGEMASK);
10254 +    u_int        rtop = ((addr + len - 1) & PAGEMASK) + PAGEOFFSET;
10255 +    u_int       rsize = rtop - raddr + 1;
10256 +    ELAN3MMU_RGN *rgn;
10257 +
10258 +    HAT_PRINTF3 (1, "elan3mmu_checkperm: user %08x len %x access %x\n", addr, len, access);
10259 +    
10260 +    
10261 +    if ((raddr + rsize - 1) < raddr)
10262 +       return (ENOMEM);
10263 +    
10264 +    spin_lock (&elan3mmu->elan3mmu_lock);
10265 +    if ((rgn = elan3mmu_rgnat_elan (elan3mmu, raddr)) == (ELAN3MMU_RGN *) NULL)
10266 +    {
10267 +       spin_unlock (&elan3mmu->elan3mmu_lock);
10268 +       return (ENOMEM);
10269 +    }
10270 +    else
10271 +    {
10272 +       register int ssize;
10273 +       
10274 +       for (; rsize != 0; rsize -= ssize, raddr += ssize)
10275 +       {
10276 +           if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1))
10277 +           {
10278 +               rgn  = rgn->rgn_enext;
10279 +               
10280 +               if (rgn == NULL || raddr != rgn->rgn_ebase)
10281 +               {
10282 +                   spin_unlock (&elan3mmu->elan3mmu_lock);
10283 +                   return (ENOMEM);
10284 +               }
10285 +           }
10286 +           if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1))
10287 +               ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
10288 +           else
10289 +               ssize = rsize;
10290 +           
10291 +           HAT_PRINTF4 (1, "elan3mmu_checkperm : rgn %x -> %x perm %x access %x\n",
10292 +                        rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len, rgn->rgn_perm, access);
10293 +
10294 +           if (ELAN3_INCOMPAT_ACCESS (rgn->rgn_perm, access))
10295 +           {
10296 +               spin_unlock (&elan3mmu->elan3mmu_lock);
10297 +               return (EACCES);
10298 +           }
10299 +       }
10300 +    }
10301 +    
10302 +    spin_unlock (&elan3mmu->elan3mmu_lock);
10303 +    
10304 +    return (ESUCCESS);
10305 +}
10306 +
10307 +caddr_t
10308 +elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr)
10309 +{
10310 +    ELAN3MMU_RGN *rgn;
10311 +    caddr_t      raddr;
10312 +    
10313 +    spin_lock (&elan3mmu->elan3mmu_lock);
10314 +    if ((rgn = elan3mmu_rgnat_elan (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL)
10315 +       raddr = NULL;
10316 +    else
10317 +       raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
10318 +    spin_unlock (&elan3mmu->elan3mmu_lock);
10319 +
10320 +    return (raddr);
10321 +}
10322 +
10323 +E3_Addr
10324 +elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr)
10325 +{
10326 +    ELAN3MMU_RGN *rgn;
10327 +    E3_Addr       raddr;
10328 +
10329 +    spin_lock (&elan3mmu->elan3mmu_lock);
10330 +    if ((rgn = elan3mmu_rgnat_main (elan3mmu, addr)) == (ELAN3MMU_RGN *) NULL)
10331 +       raddr = (E3_Addr) 0;
10332 +    else
10333 +       raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
10334 +    spin_unlock (&elan3mmu->elan3mmu_lock);
10335 +
10336 +    return (raddr);
10337 +}
10338 +
10339 +void
10340 +elan3mmu_displayrgns(ELAN3MMU *elan3mmu)
10341 +{
10342 +    ELAN3MMU_RGN *rgn;
10343 +
10344 +    spin_lock (&elan3mmu->elan3mmu_lock);
10345 +    HAT_PRINTF0 (1, "elan3mmu_displayrgns: main regions\n");
10346 +    for (rgn = elan3mmu->elan3mmu_mrgns; rgn; rgn = (rgn->rgn_mnext == elan3mmu->elan3mmu_mrgns) ? NULL : rgn->rgn_mnext)
10347 +       HAT_PRINTF5 (1, "    RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm);
10348 +    HAT_PRINTF0 (1, "elan3mmu_displayrgns: elan regions\n");
10349 +    for (rgn = elan3mmu->elan3mmu_ergns; rgn; rgn = (rgn->rgn_enext == elan3mmu->elan3mmu_ergns) ? NULL : rgn->rgn_enext)
10350 +       HAT_PRINTF5 (1, "    RGN %p ebase %08x mbase %p len %08x perm %08x\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len, rgn->rgn_perm);
10351 +
10352 +    spin_unlock (&elan3mmu->elan3mmu_lock);
10353 +}
10354 +
10355 +/*============================================================================*/
10356 +/* Private functions */
10357 +#define ELAN3_PTE_IS_VALID(ptbl, pte)  \
10358 +          ((ptbl->ptbl_flags & PTBL_KERNEL) ? \
10359 +          (pte&(~ELAN3_PTE_REF)) != elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu) : \
10360 +          ELAN3_PTE_VALID(pte))
10361 +
10362 +void
10363 +elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr)
10364 +{
10365 +    ELAN3_PTBL          *ptbl;
10366 +    sdramaddr_t                pte;
10367 +    spinlock_t        *lock;
10368 +    u_int              span;
10369 +    unsigned long       flags;
10370 +
10371 +    HAT_PRINTF3 (1, "elan3mmu_expand: elan3mmu %p %08x to %08x\n", elan3mmu, 
10372 +                addr, addr + len);
10373 +
10374 +    for ( ; len != 0; addr += span, len -= span)
10375 +    {
10376 +       /* as we asked for level 3 we know its a pte */
10377 +       pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
10378 +
10379 +       switch (level)
10380 +       {
10381 +       case PTBL_LEVEL_3:
10382 +           span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10383 +           break;
10384 +       case PTBL_LEVEL_2:
10385 +           span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET));
10386 +           break;
10387 +       default:
10388 +           span = len;
10389 +           break;
10390 +       }
10391 +       
10392 +       if (pte != (sdramaddr_t) 0)
10393 +           elan3mmu_unlock_ptbl (ptbl, lock, flags);
10394 +    }
10395 +}
10396 +
10397 +void
10398 +elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes)
10399 +{
10400 +    ELAN3_PTBL          *ptbl;
10401 +    sdramaddr_t                pte;
10402 +    spinlock_t        *lock;
10403 +    u_int              span;
10404 +    int                        len;
10405 +    int                        i;
10406 +    unsigned long       flags;
10407 +
10408 +    HAT_PRINTF3 (1, "elan3mmu_reserve: elan3mmu %p %08x to %08x\n", elan3mmu, 
10409 +                addr, addr + (npages << ELAN3_PAGE_SHIFT));
10410 +
10411 +    for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span)
10412 +    {
10413 +       /* as we asked for level 3 we know its a pte */
10414 +       pte = elan3mmu_ptealloc (elan3mmu, addr, 3, &ptbl, &lock, 0, &flags);
10415 +
10416 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10417 +       
10418 +       if (ptes != NULL)
10419 +       {
10420 +           for (i = 0; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE)
10421 +               *ptes++ = pte;
10422 +           ptbl->ptbl_valid += (span >> ELAN3_PAGE_SHIFT);
10423 +
10424 +           HAT_PRINTF4 (2, "elan3mmu_reserve: inc valid for level %d ptbl %p to %d   (%d)\n", 
10425 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid, (span >> ELAN3_PAGE_SHIFT));
10426 +
10427 +       }
10428 +
10429 +       elan3mmu_unlock_ptbl (ptbl, lock, flags);
10430 +    }
10431 +}
10432 +
10433 +void
10434 +elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *ptes)
10435 +{
10436 +    ELAN3_DEV           *dev = elan3mmu->elan3mmu_dev;
10437 +    ELAN3_PTBL          *ptbl;
10438 +    sdramaddr_t                pte;
10439 +    ELAN3_PTE          tpte;
10440 +    spinlock_t        *lock;
10441 +    u_int              span;
10442 +    int                        len;
10443 +    int                        i;
10444 +    int                        level;
10445 +    unsigned long       flags;
10446 +    
10447 +    HAT_PRINTF3 (1, "elan3mmu_release: elan3mmu %p %08x to %08x\n", elan3mmu, 
10448 +                addr, addr + (npages << ELAN3_PAGE_SHIFT));
10449 +
10450 +    if (ptes == NULL)
10451 +       return;
10452 +
10453 +    tpte = elan3mmu_kernel_invalid_pte (elan3mmu);
10454 +
10455 +    for (len = (npages << ELAN3_PAGE_SHIFT); len != 0; addr += span, len -= span)
10456 +    {
10457 +       /* as we asked for level 3 we know its a pte */
10458 +       pte = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags);
10459 +       ASSERT (level == PTBL_LEVEL_3);
10460 +
10461 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10462 +
10463 +
10464 +       for (i = 0 ; i < span; i += ELAN3_PAGE_SIZE, pte += ELAN3_PTE_SIZE)
10465 +           elan3_writepte (dev, pte, tpte);
10466 +       ptbl->ptbl_valid -= (span >> ELAN3_PAGE_SHIFT);
10467 +
10468 +       HAT_PRINTF3 (2, "elan3mmu_release: inc valid for level %d ptbl %p to %d\n", 
10469 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
10470 +
10471 +       elan3mmu_unlock_ptbl (ptbl, lock, flags);
10472 +    }
10473 +    ElanFlushTlb (elan3mmu->elan3mmu_dev);
10474 +}
10475 +
10476 +void
10477 +elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr)
10478 +    
10479 +{
10480 +    ELAN3_DEV     *dev;
10481 +    ELAN3_PTBL    *ptbl;
10482 +    spinlock_t   *lock;
10483 +    unsigned long flags;
10484 +    ELAN3_PTE      newpte;
10485 +    ELAN3_PTE      oldpte;
10486 +    sdramaddr_t   pte;
10487 +
10488 +    ASSERT((level == PTBL_LEVEL_2) || (level == PTBL_LEVEL_3));
10489 +
10490 +    /* Generate the new pte which we're going to load */
10491 +    dev = elan3mmu->elan3mmu_dev;
10492 +
10493 +    newpte = elan3mmu_phys_to_pte (dev, paddr, perm);
10494 +    
10495 +    if (attr & PTE_LOAD_BIG_ENDIAN)
10496 +       newpte |= ELAN3_PTE_BIG_ENDIAN;
10497 +
10498 +    HAT_PRINTF4 (1, "elan3mmu_pteload: elan3mmu %p level %d addr %x pte %llx\n", elan3mmu, level, addr, (long long) newpte);
10499 +    HAT_PRINTF5 (1, "elan3mmu_pteload:%s%s%s perm=%d phys=%llx\n",
10500 +                (newpte & ELAN3_PTE_LOCAL)  ? " local" : "",
10501 +                (newpte & ELAN3_PTE_64_BIT)     ? " 64 bit" : "",
10502 +                (newpte & ELAN3_PTE_BIG_ENDIAN) ? " big-endian" : " little-endian",
10503 +                (u_int) (newpte & ELAN3_PTE_PERM_MASK) >> ELAN3_PTE_PERM_SHIFT,
10504 +                (unsigned long long) (newpte & ELAN3_PTE_PFN_MASK));
10505 +                 
10506 +    if (level == PTBL_LEVEL_3)
10507 +       pte = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
10508 +    else
10509 +    {
10510 +       sdramaddr_t ptp = elan3mmu_ptealloc (elan3mmu, addr, level, &ptbl, &lock, attr, &flags);
10511 +
10512 +       pte = elan3mmu_ptp2pte (elan3mmu, ptp, level);
10513 +
10514 +       HAT_PRINTF3 (2, "elan3mmu_pteload: level %d ptp at %lx => pte at %lx\n", level, ptp, pte);
10515 +    }
10516 +
10517 +    if (pte == (sdramaddr_t) 0)
10518 +    {
10519 +       ASSERT (level == PTBL_LEVEL_3 && (attr & (PTE_NO_SLEEP | PTE_NO_STEAL)) == (PTE_NO_SLEEP | PTE_NO_STEAL));
10520 +       return;
10521 +    }
10522 +
10523 +    ASSERT (ptbl->ptbl_elan3mmu == elan3mmu);
10524 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == level);
10525 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
10526 +    
10527 +    oldpte = elan3_readpte (dev, pte);
10528 +
10529 +    HAT_PRINTF3 (2, "elan3mmu_pteload: modify pte at %lx from %llx to %llx\n", pte, (long long) oldpte, (long long) newpte);
10530 +
10531 +    if (ELAN3_PTE_IS_VALID(ptbl, oldpte))
10532 +    {
10533 +       ELAN3MMU_STAT(ptereload);
10534 +
10535 +       ASSERT ((newpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)) == (oldpte & ~((E3_uint64)ELAN3_PTE_PERM_MASK | ELAN3_RM_MASK)));
10536 +       
10537 +       if ((newpte & ~ELAN3_RM_MASK) != (oldpte & ~ELAN3_RM_MASK))
10538 +       {
10539 +           /* We're modifying a valid translation, it must be mapping the same page */
10540 +           /* so we use elan3_modifypte to not affect the referenced and modified bits */
10541 +           elan3_modifypte (dev, pte, newpte);
10542 +
10543 +
10544 +           ElanFlushTlb (elan3mmu->elan3mmu_dev);
10545 +       }
10546 +    }
10547 +    else
10548 +    {
10549 +       ELAN3MMU_STAT(pteload);
10550 +
10551 +       ptbl->ptbl_valid++;
10552 +
10553 +       HAT_PRINTF3 (2, "elan3mmu_pteload: inc valid for level %d ptbl %p to %d\n", 
10554 +                    PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
10555 +
10556 +       HAT_PRINTF2 (2, "elan3mmu_pteload: write pte %lx to %llx\n", pte, (long long) newpte);
10557 +
10558 +       elan3_writepte (dev, pte, newpte);
10559 +
10560 +       if (ptbl->ptbl_flags & PTBL_KERNEL)
10561 +           ElanFlushTlb (elan3mmu->elan3mmu_dev);
10562 +
10563 +    }
10564 +
10565 +    elan3mmu_unlock_ptbl (ptbl, lock, flags);
10566 +}
10567 +
10568 +void
10569 +elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int attr)
10570 +{
10571 +    ELAN3_PTBL          *ptbl;
10572 +    sdramaddr_t         ptp;
10573 +    spinlock_t        *lock;
10574 +    int                        level;
10575 +    u_int              span;
10576 +    unsigned long      flags;
10577 +
10578 +    HAT_PRINTF3(1, "elan3mmu_unload (elan3mmu %p addr %x -> %x)\n", elan3mmu, addr, addr+len-1);
10579 +
10580 +    for (; len != 0; addr += span, len -= span)
10581 +    {
10582 +       ptp  = elan3mmu_ptefind(elan3mmu, addr, &level, &ptbl, &lock, &flags);
10583 +
10584 +       span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10585 +
10586 +       if (ptp != (sdramaddr_t) 0)
10587 +       {
10588 +           HAT_PRINTF2 (2, "elan3mmu_unload: unload [%x,%x]\n", addr, addr + span);
10589 +           
10590 +           if ( level ==  PTBL_LEVEL_3 ) 
10591 +               elan3mmu_unload_loop (elan3mmu, ptbl, ptp - PTBL_TO_PTADDR(ptbl), span >> ELAN3_PAGE_SHIFT, attr);
10592 +           else
10593 +           {
10594 +               ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
10595 +               ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
10596 +               ELAN3_PTBL  *lXptbl;
10597 +               ELAN3_PTP    tptp;
10598 +               int         idx;
10599 +
10600 +               tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp);
10601 +
10602 +               ASSERT (ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE);
10603 +
10604 +               lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tptp);
10605 +               idx    = (PTP_TO_PT_PADDR(tptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;
10606 +
10607 +               if ( level == PTBL_LEVEL_1) 
10608 +                   span = MIN(len, ELAN3_L2_PTSIZE - ((E3_Addr) addr & ELAN3_L2_PTOFFSET));
10609 +               else
10610 +                   span = MIN(len, ELAN3_L3_PTSIZE - ((E3_Addr) addr & ELAN3_L3_PTOFFSET));
10611 +
10612 +               /* invalidate the ptp. */
10613 +               elan3_writeptp (dev, ptp, invalidptp);
10614 +               if (! (attr & PTE_UNLOAD_NOFLUSH))
10615 +                   ElanFlushTlb (dev);     
10616 +    
10617 +               elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); 
10618 +
10619 +               ptbl->ptbl_valid--;
10620 +
10621 +               HAT_PRINTF3 (2, "elan3mmu_unload: dec valid for level %d ptbl %p to %d\n", 
10622 +                            PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);     
10623 +
10624 +           }
10625 +           elan3mmu_unlock_ptbl (ptbl, lock, flags);
10626 +       }
10627 +    }
10628 +}
10629 +
10630 +static void
10631 +elan3mmu_unload_loop (ELAN3MMU *elan3mmu, ELAN3_PTBL *ptbl, int first_valid, int nptes, int flags)
10632 +{
10633 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
10634 +    sdramaddr_t pte;
10635 +    ELAN3_PTE    tpte;
10636 +    int         last_valid = first_valid + nptes;
10637 +    int                i;
10638 +    
10639 +    HAT_PRINTF3 (1, "elan3mmu_unloadloop: ptbl %p entries [%d->%d]\n", ptbl, first_valid, last_valid);
10640 +
10641 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
10642 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3);
10643 +    
10644 +    pte = PTBL_TO_PTADDR(ptbl) + first_valid;
10645 +    
10646 +    for (i = first_valid; i < last_valid; i++, pte += ELAN3_PTE_SIZE)
10647 +    {
10648 +       if (ptbl->ptbl_valid == 0)
10649 +           break;
10650 +
10651 +       tpte = elan3_readpte (dev, pte);
10652 +       if (! ELAN3_PTE_IS_VALID(ptbl, tpte))
10653 +           continue;
10654 +       
10655 +       elan3mmu_pteunload (ptbl, pte, flags, NO_MLIST_LOCK);
10656 +    }
10657 +}
10658 +
10659 +void
10660 +elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock)
10661 +{
10662 +    ELAN3_DEV   *dev = ptbl->ptbl_elan3mmu->elan3mmu_dev;
10663 +    ELAN3_PTE    tpte;
10664 +
10665 +    ASSERT (PTBL_LEVEL (ptbl->ptbl_flags) == PTBL_LEVEL_3);
10666 +    ASSERT (PTBL_IS_LOCKED (ptbl->ptbl_flags));
10667 +
10668 +    HAT_PRINTF2 (1, "elan3mmu_pteunload: ptbl %p pte %lx\n", ptbl, pte);
10669 +
10670 +    ELAN3MMU_STAT (pteunload);
10671 +
10672 +    elan3_invalidatepte (dev, pte);
10673 +
10674 +    if (! (flags & PTE_UNLOAD_NOFLUSH))
10675 +       ElanFlushTlb (dev);
10676 +    
10677 +    tpte = ELAN3_INVALID_PTE;
10678 +    elan3_writepte (dev, pte, tpte);
10679 +    
10680 +    if (ptbl->ptbl_flags & PTBL_KERNEL)
10681 +    {
10682 +       tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu);
10683 +
10684 +       elan3_writepte (dev, pte, tpte);
10685 +    }
10686 +
10687 +    ptbl->ptbl_valid--;
10688 +
10689 +    HAT_PRINTF3 (2, "elan3mmu_pteunload: dec valid for level %d ptbl %p to %d\n", 
10690 +                PTBL_LEVEL(ptbl->ptbl_flags), ptbl, ptbl->ptbl_valid);
10691 +
10692 +}
10693 +
10694 +void
10695 +elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock)
10696 +{
10697 +
10698 +}
10699 +
10700 +/*
10701 + * Create more page tables at a given level for this Elan.
10702 + */
10703 +static ELAN3_PTBL *
10704 +elan3mmu_create_ptbls (ELAN3_DEV *dev, int level, int attr, int keep)
10705 +{
10706 +    sdramaddr_t          pts;
10707 +    ELAN3_PTBL    *ptbl;
10708 +    ELAN3_PTBL    *first;
10709 +    ELAN3_PTBL    *last;
10710 +    ELAN3_PTBL_GR *ptg;
10711 +    register int  i;
10712 +    register int  inc;
10713 +    
10714 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: create level %d ptbls\n", level);
10715 +
10716 +    pts = elan3_sdram_alloc (dev, PTBL_GROUP_SIZE);
10717 +    if (pts == (sdramaddr_t) 0)
10718 +    {
10719 +       HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot map elan pages\n");
10720 +
10721 +       ELAN3MMU_STAT (create_ptbl_failed);
10722 +       return (NULL);
10723 +    }
10724 +    
10725 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: pts at %lx\n", pts);
10726 +    
10727 +    ALLOC_PTBL_GR (ptg, !(attr & PTE_NO_SLEEP));               /* Allocate the group of page tables */
10728 +    if (ptg == NULL)                                           /* for this page */
10729 +    {
10730 +       HAT_PRINTF0 (2, "elan3mmu_create_ptbls: cannot allocate page table group\n");
10731 +
10732 +       elan3_sdram_free (dev, pts, PTBL_GROUP_SIZE);
10733 +
10734 +       ELAN3MMU_STAT (create_ptbl_failed);
10735 +       return (NULL);
10736 +    }
10737 +
10738 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: ptg is %p\n", ptg);
10739 +    
10740 +    ElanSetPtblGr (dev, pts, ptg);
10741 +    
10742 +    HAT_PRINTF4 (2, "elan3mmu_create_ptbls: zeroing %d bytes at %lx, %d bytes at %p\n",
10743 +                PTBL_GROUP_SIZE, pts, (int) sizeof (ELAN3_PTBL_GR), ptg);
10744 +
10745 +#ifndef zero_all_ptbls
10746 +    elan3_sdram_zeroq_sdram (dev, pts, PTBL_GROUP_SIZE);               /* Ensure that all PTEs/PTPs are invalid */
10747 +#endif
10748 +    bzero ((caddr_t) ptg, sizeof (ELAN3_PTBL_GR));
10749 +    
10750 +    ptg->pg_addr  = pts;
10751 +    ptg->pg_level = level;
10752 +
10753 +    ptbl = ptg->pg_ptbls;                                      /* Initialise the index in all page tables */
10754 +    for (i = 0; i < PTBLS_PER_GROUP_MAX; i++)
10755 +    {
10756 +       ptbl->ptbl_index = (u_char) i;
10757 +       ptbl->ptbl_next  = (ELAN3_PTBL *) 0xdeaddead;
10758 +       ptbl++;
10759 +    }
10760 +    
10761 +    switch (level)                                             /* Determine the number of ptbls we can  */
10762 +    {                                                          /* allocate from this page, by jumping  */
10763 +    case PTBL_LEVEL_X: inc = PTBLS_PER_PTBL_LX; break;         /* multiples of the smallest. */
10764 +    case PTBL_LEVEL_1: inc = PTBLS_PER_PTBL_L1; break;
10765 +    case PTBL_LEVEL_2: inc = PTBLS_PER_PTBL_L2; break;
10766 +    case PTBL_LEVEL_3: inc = PTBLS_PER_PTBL_L3; break;
10767 +    default:           inc = PTBLS_PER_PTBL_L3; break;
10768 +    }
10769 +
10770 +    ptbl = ptg->pg_ptbls;                                      /* Chain them together */
10771 +    for (i = 0; i < PTBLS_PER_GROUP_MAX; i += inc, ptbl += inc)
10772 +       ptbl->ptbl_next = ptbl + inc;
10773 +
10774 +    first = ptg->pg_ptbls;                                     /* Determine list of */
10775 +    last  = first + PTBLS_PER_GROUP_MAX - inc;                 /* ptbls to add to free list */
10776 +    if (! keep)
10777 +       ptbl = NULL;
10778 +    else
10779 +    {
10780 +       ptbl  = first;
10781 +       first = first->ptbl_next;
10782 +    }
10783 +    
10784 +    spin_lock (&dev->Level[level].PtblLock);
10785 +    dev->Level[level].PtblTotal     += PTBLS_PER_GROUP_MAX/inc;                /* Increment the counts */
10786 +    dev->Level[level].PtblFreeCount += PTBLS_PER_GROUP_MAX/inc;
10787 +
10788 +    ELAN3MMU_SET_STAT (num_ptbl_level[level], dev->Level[level].PtblTotal);
10789 +
10790 +    if (keep)
10791 +       dev->Level[level].PtblFreeCount--;
10792 +    
10793 +    last->ptbl_next = dev->Level[level].PtblFreeList;                  /* And add to free list */
10794 +    dev->Level[level].PtblFreeList = first;
10795 +    spin_unlock (&dev->Level[level].PtblLock);
10796 +    
10797 +    spin_lock (&dev->PtblGroupLock);
10798 +    ptg->pg_next = dev->Level[level].PtblGroupList;
10799 +    dev->Level[level].PtblGroupList = ptg;
10800 +    spin_unlock (&dev->PtblGroupLock);
10801 +
10802 +    HAT_PRINTF1 (2, "elan3mmu_create_ptbls: returning ptbl %p\n", ptbl);
10803 +    
10804 +    return (ptbl);
10805 +}
10806 +
10807 +static ELAN3_PTBL *
10808 +elan3mmu_ta_to_ptbl (ELAN3MMU *elan3mmu, ELAN3_PTP *ptp)
10809 +{
10810 +    E3_Addr      ptpa  = PTP_TO_PT_PADDR(*ptp);
10811 +    ELAN3_PTBL_GR *pg    = ElanGetPtblGr (elan3mmu->elan3mmu_dev, (sdramaddr_t)ptpa & ~(PTBL_GROUP_SIZE-1));
10812 +    
10813 +    return (pg->pg_ptbls + ((ptpa - pg->pg_addr) >> ELAN3_PT_SHIFT));
10814 +}
10815 +
10816 +static ELAN3_PTBL *
10817 +elan3mmu_alloc_lXptbl (ELAN3_DEV *dev, int attr,  ELAN3MMU *elan3mmu)
10818 +{
10819 +    ELAN3_PTBL *ptbl = NULL;
10820 +
10821 +    spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10822 +    if (dev->Level[PTBL_LEVEL_X].PtblFreeList)
10823 +    {
10824 +       ptbl = dev->Level[PTBL_LEVEL_X].PtblFreeList;
10825 +
10826 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: found ptbl %p on free list\n", ptbl);
10827 +
10828 +       dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl->ptbl_next;
10829 +       dev->Level[PTBL_LEVEL_X].PtblFreeCount--;
10830 +    }
10831 +    spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock);
10832 +    
10833 +    if (ptbl == NULL) 
10834 +    {
10835 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_X, attr, 1);
10836 +
10837 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: created level X ptbl %p\n", ptbl);
10838 +    }
10839 +
10840 +    if (ptbl == NULL)
10841 +    {
10842 +       if ((attr & PTE_NO_STEAL))
10843 +       {
10844 +           HAT_PRINTF0 (2, "elan3mmu_alloc_lXptbl: not allowed to steal ptbl for use at level 2\n");
10845 +           return NULL;
10846 +       }
10847 +
10848 +       ELAN3MMU_STAT(lX_alloc_l3);
10849 +
10850 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
10851 +       
10852 +       HAT_PRINTF1 (2, "elan3mmu_alloc_lXptbl: stolen level3 ptbl %p used as level 2\n", ptbl);
10853 +    }
10854 +
10855 +    ptbl->ptbl_elan3mmu = elan3mmu;
10856 +    ptbl->ptbl_base     = 0;
10857 +    ptbl->ptbl_parent   = 0;
10858 +    ptbl->ptbl_flags    = PTBL_LEVEL_X | PTBL_ALLOCED;
10859 +    
10860 +    HAT_PRINTF2 (2, "elan3mmu_alloc_lXptbl: ptbl %p dev %p\n", ptbl, dev);
10861 +
10862 +#ifdef zero_all_ptbls
10863 +    elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_LX_ENTRIES*ELAN3_PTE_SIZE);
10864 +#endif
10865 +
10866 +    return (ptbl);
10867 +}
10868 +
10869 +static ELAN3_PTBL *
10870 +elan3mmu_alloc_pte (ELAN3_DEV *dev, ELAN3MMU *elan3mmu, int *idx)
10871 +{
10872 +    ELAN3_PTBL   * ptbl_ptr;
10873 +    int           index;
10874 +
10875 +    /* lock whilst looking for space */
10876 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10877 +    
10878 +    /* walk the lXptbl list */
10879 +    ptbl_ptr = elan3mmu->elan3mmu_lXptbl;
10880 +    while ( ptbl_ptr != NULL ) 
10881 +    {
10882 +       /* does this ptlb have any free ones */
10883 +       if (  (index = ptbl_ptr->ptbl_valid) < ELAN3_LX_ENTRIES) 
10884 +       {
10885 +           /*  better to search  from valid count as its likly to be free */
10886 +           index = ptbl_ptr->ptbl_valid; 
10887 +           do {
10888 +               if ((ptbl_ptr->ptbl_base & (1 << index)) == 0)
10889 +                   goto found;
10890 +
10891 +               /* move index on and wrap back to start if needed */
10892 +               if ((++index) == ELAN3_LX_ENTRIES) 
10893 +                   index = 0;
10894 +           } while (index != ptbl_ptr->ptbl_valid);
10895 +
10896 +           panic ("elan3mmu_alloc_pte: has ptbl valid < 32 when but no free pte's");
10897 +       }
10898 +       ptbl_ptr = ptbl_ptr->ptbl_parent;
10899 +    }
10900 +       
10901 +    /* unlock so we can create space */
10902 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); 
10903 +
10904 +    /* if create some more */
10905 +    ptbl_ptr = elan3mmu_alloc_lXptbl(dev, 0, elan3mmu);
10906 +
10907 +    /* get the lock again */
10908 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
10909 +       
10910 +    /* add to front of list as its obviously got free ones on it */
10911 +    ptbl_ptr->ptbl_parent     = elan3mmu->elan3mmu_lXptbl;
10912 +    elan3mmu->elan3mmu_lXptbl = ptbl_ptr;
10913 +
10914 +    /* grap the first one */
10915 +    index = 0;
10916 +    
10917 + found:
10918 +    ptbl_ptr->ptbl_base |= (1 << index);
10919 +    ptbl_ptr->ptbl_valid++;
10920 +
10921 +    HAT_PRINTF3 (2, "elan3mmu_alloc_pte: inc valid for level %d ptbl %p to %d\n", 
10922 +                PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid);
10923 +
10924 +    /* release the loc and return it */
10925 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock); 
10926 +
10927 +    *idx = index;
10928 +    return (ptbl_ptr);
10929 +}
10930 +
10931 +static ELAN3_PTBL *
10932 +elan3mmu_alloc_l1ptbl (ELAN3_DEV *dev, int attr, ELAN3MMU *elan3mmu)
10933 +{
10934 +    ELAN3_PTBL *ptbl = NULL;
10935 +    ELAN3_PTBL *p;
10936 +    int i,j;
10937 +    
10938 +    spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10939 +    if (dev->Level[PTBL_LEVEL_1].PtblFreeList)
10940 +    {
10941 +       ptbl = dev->Level[PTBL_LEVEL_1].PtblFreeList;
10942 +       dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl->ptbl_next;
10943 +       dev->Level[PTBL_LEVEL_1].PtblFreeCount--;
10944 +    }
10945 +    spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock);
10946 +    
10947 +    if (ptbl == NULL)
10948 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_1, attr, 1);
10949 +    
10950 +    if (ptbl == NULL)
10951 +       panic ("elan3mmu_alloc_l1ptbl: cannot alloc ptbl");
10952 +    
10953 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L1; i++, p++)
10954 +    {
10955 +       p->ptbl_elan3mmu = elan3mmu;
10956 +       p->ptbl_base     = VA2BASE (j);
10957 +       p->ptbl_flags    = PTBL_LEVEL_1 | PTBL_GROUPED;
10958 +       p->ptbl_parent   = NULL;
10959 +       
10960 +       j += L1_VA_PER_PTBL;
10961 +    }
10962 +    
10963 +    /* Now mark the real page table as allocated */
10964 +    /* level 1 ptbls are returned unlocked */
10965 +    ptbl->ptbl_flags = PTBL_LEVEL_1 | PTBL_ALLOCED;
10966 +    
10967 +    HAT_PRINTF2 (2, "elan3mmu_alloc_l1ptbl: ptbl %p dev %p\n", ptbl, dev);
10968 +
10969 +#ifdef zero_all_ptbls
10970 +    elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L1_ENTRIES*ELAN3_PTP_SIZE);
10971 +#endif
10972 +
10973 +    return (ptbl);
10974 +}
10975 +
10976 +static ELAN3_PTBL *
10977 +elan3mmu_alloc_l2ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags)
10978 +{
10979 +    ELAN3_PTBL *ptbl = NULL;
10980 +    ELAN3_PTBL *p;
10981 +    int        i;
10982 +    int        j;
10983 +    unsigned long ptbl_flags;
10984 +
10985 +    spin_lock_irqsave (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags);
10986 +    if (dev->Level[PTBL_LEVEL_2].PtblFreeList)
10987 +    {
10988 +       ptbl = dev->Level[PTBL_LEVEL_2].PtblFreeList;
10989 +
10990 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: found ptbl %p on free list\n", ptbl);
10991 +
10992 +       dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl->ptbl_next;
10993 +       dev->Level[PTBL_LEVEL_2].PtblFreeCount--;
10994 +    }
10995 +    spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_2].PtblLock, ptbl_flags);
10996 +    
10997 +    if (ptbl == NULL) 
10998 +    {
10999 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_2, attr, 1);
11000 +
11001 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: created level 2 ptbl %p\n", ptbl);
11002 +    }
11003 +
11004 +    if (ptbl == NULL)
11005 +    {
11006 +       if ((attr & PTE_NO_STEAL))
11007 +       {
11008 +           HAT_PRINTF0 (2, "elan3mmu_alloc_l2ptbl: not allowted to steal ptbl for use at level 2\n");
11009 +           return (NULL);
11010 +       }
11011 +
11012 +       ELAN3MMU_STAT(l2_alloc_l3);
11013 +
11014 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
11015 +       
11016 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l2ptbl: stolen level3 ptbl %p used as level 2\n", ptbl);
11017 +    }
11018 +    
11019 +    *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_2, ptbl);
11020 +    spin_lock_irqsave (*plock, *flags);
11021 +    
11022 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L2; i++, p++)
11023 +    {
11024 +       p->ptbl_elan3mmu = elan3mmu;
11025 +       p->ptbl_base     = VA2BASE (base + j);
11026 +       p->ptbl_flags    = PTBL_LEVEL_2 | PTBL_GROUPED;
11027 +       p->ptbl_parent   = parent;
11028 +       
11029 +       j += L2_VA_PER_PTBL;
11030 +    }
11031 +    
11032 +    ptbl->ptbl_flags  = PTBL_LEVEL_2 | PTBL_ALLOCED | PTBL_LOCKED;
11033 +    
11034 +    HAT_PRINTF3 (2, "elan3mmu_alloc_l2ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base);
11035 +
11036 +#ifdef zero_all_ptbls
11037 +    elan3_sdram_zero_sdarm (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L2_ENTRIES*ELAN3_PTP_SIZE);
11038 +#endif
11039 +
11040 +    return (ptbl);
11041 +}
11042 +
11043 +static ELAN3_PTBL *
11044 +elan3mmu_alloc_l3ptbl (ELAN3_DEV *dev, int attr, ELAN3_PTBL *parent, ELAN3MMU *elan3mmu, E3_Addr base, spinlock_t **plock, unsigned long *flags)
11045 +{
11046 +    ELAN3_PTBL *ptbl = NULL;
11047 +    ELAN3_PTBL *p;
11048 +    int               i;
11049 +    int               j;
11050 +    unsigned long ptbl_flags;
11051 +
11052 +    spin_lock_irqsave (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags);
11053 +    if (dev->Level[PTBL_LEVEL_3].PtblFreeList)
11054 +    {
11055 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: found ptbl %p on free list\n", ptbl);
11056 +
11057 +       ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList;
11058 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next;
11059 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount--;
11060 +    }
11061 +    spin_unlock_irqrestore (&dev->Level[PTBL_LEVEL_3].PtblLock, ptbl_flags);
11062 +    
11063 +    if (ptbl == NULL)
11064 +    {
11065 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1);
11066 +
11067 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: created level 3 ptbl %p\n", ptbl);
11068 +    }
11069 +
11070 +    if (ptbl == NULL)
11071 +    {
11072 +       if ((attr & PTE_NO_STEAL))
11073 +       {
11074 +           HAT_PRINTF0 (2, "elan3mmu_alloc_l3ptbl: not allowed to steal ptbl for use at level 3\n");
11075 +           return (NULL);
11076 +       }
11077 +
11078 +       ptbl = elan3mmu_steal_l3ptbl (dev, attr);
11079 +
11080 +       HAT_PRINTF1 (2, "elan3mmu_alloc_l3ptbl: stolen level3 ptbl %p\n", ptbl);
11081 +    }
11082 +    
11083 +    *plock = elan3mmu_ptbl_to_lock (PTBL_LEVEL_3, ptbl);
11084 +    spin_lock_irqsave (*plock,*flags);
11085 +    
11086 +    for (p = ptbl, j = i = 0; i < PTBLS_PER_PTBL_L3; i++, p++)
11087 +    {
11088 +       p->ptbl_elan3mmu = elan3mmu;
11089 +       p->ptbl_base     = VA2BASE (base + j);
11090 +       p->ptbl_flags    = PTBL_LEVEL_3 | PTBL_GROUPED;
11091 +       p->ptbl_parent   = parent;
11092 +       
11093 +       j += L3_VA_PER_PTBL;
11094 +    }
11095 +    
11096 +    ptbl->ptbl_flags = PTBL_LEVEL_3 | PTBL_ALLOCED | PTBL_LOCKED;
11097 +    
11098 +    HAT_PRINTF3 (2, "elan3mmu_alloc_l3ptbl: ptbl %p dev %p base %x\n", ptbl, dev, base);
11099 +
11100 +#ifdef zero_all_ptbls
11101 +    elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE);
11102 +#endif
11103 +
11104 +    return (ptbl);
11105 +}
11106 +
11107 +void 
11108 +elan3mmu_free_pte  (ELAN3_DEV *dev,  ELAN3MMU *elan3mmu,  ELAN3_PTBL *ptbl_ptr, int idx)
11109 +{  
11110 +    sdramaddr_t pte  = PTBL_TO_PTADDR (ptbl_ptr) | (idx * sizeof (ELAN3_PTE));
11111 +    ELAN3_PTE    tpte = ELAN3_INVALID_PTE;
11112 +    ELAN3_PTBL *prev;
11113 +
11114 +    /* ensure that the pte is invalid when free */
11115 +    elan3_writepte (dev, pte, tpte);
11116 +
11117 +    /* lock whilst removing */
11118 +    spin_lock (&elan3mmu->elan3mmu_lXptbl_lock);
11119 +
11120 +    HAT_PRINTF4 (2, "elan3mmu_free_pte idx %d   ptbl_ptr %p ptbl_base  %x  ptbl_ptr->ptbl_valid %d \n", 
11121 +                idx, ptbl_ptr, ptbl_ptr->ptbl_base, ptbl_ptr->ptbl_valid);
11122 +    /* make sure it was set */
11123 +    ASSERT ( ptbl_ptr->ptbl_base & (1 << idx) ); 
11124 +    ASSERT ( ptbl_ptr->ptbl_valid > 0  );
11125 +
11126 +    ptbl_ptr->ptbl_base &= ~(1 << idx);
11127 +    ptbl_ptr->ptbl_valid--;
11128 +
11129 +    HAT_PRINTF3 (2, "elan3mmu_free_pte: dec valid for level %d ptbl %p to %d\n", 
11130 +                PTBL_LEVEL(ptbl_ptr->ptbl_flags), ptbl_ptr, ptbl_ptr->ptbl_valid); 
11131
11132 +    /* was that the last one on this page */
11133 +    if ( ! ptbl_ptr->ptbl_valid ) 
11134 +    {
11135 +       /* so no bits should be set then */
11136 +       ASSERT ( ptbl_ptr->ptbl_base == 0 );
11137 +
11138 +       /* is this the first page ?? */
11139 +       if ( elan3mmu->elan3mmu_lXptbl == ptbl_ptr ) 
11140 +       {
11141 +           /* make the list start at the second element */
11142 +            elan3mmu->elan3mmu_lXptbl = ptbl_ptr->ptbl_parent;
11143 +
11144 +            /* put ptbl back on free list */
11145 +            elan3mmu_free_lXptbl(dev, ptbl_ptr);
11146 +
11147 +            /* unlock and return */
11148 +            spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
11149 +            return ;
11150 +       }
11151 +
11152 +       /* scan thro list looking for this page */
11153 +       prev = elan3mmu->elan3mmu_lXptbl;
11154 +       while ( prev->ptbl_parent != NULL ) 
11155 +       {
11156 +           if ( prev->ptbl_parent == ptbl_ptr ) /* its the next one */
11157 +           {
11158 +               /* remove element from chain */
11159 +               prev->ptbl_parent =  ptbl_ptr->ptbl_parent;
11160 +
11161 +               /* put ptbl back on free list */
11162 +               elan3mmu_free_lXptbl(dev, ptbl_ptr);
11163 +
11164 +               /* unlock and return */
11165 +               spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
11166 +               return ;
11167 +           }           
11168 +           prev = prev->ptbl_parent;
11169 +       }
11170 +       
11171 +               panic ("elan3mmu_free_pte: failed to find ptbl in chain");
11172 +       /* NOTREACHED */
11173 +    }
11174 +    
11175 +    spin_unlock (&elan3mmu->elan3mmu_lXptbl_lock);
11176 +}
11177 +
11178 +void
11179 +elan3mmu_free_lXptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl)
11180 +{
11181 +    ELAN3_PTBL_GR *ptg;
11182 +
11183 +    HAT_PRINTF2 (2, "elan3mmu_free_lXptbl: dev %p ptbl %p\n", dev, ptbl);
11184 +
11185 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
11186 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
11187 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_X);
11188 +    ASSERT (ptbl->ptbl_valid == 0);
11189 +   
11190 +    ptbl->ptbl_flags = 0;
11191 +
11192 +    ptg = PTBL_TO_GR(ptbl);
11193 +
11194 +    if (ptg->pg_level == PTBL_LEVEL_3)
11195 +    {
11196 +       ELAN3MMU_STAT(lX_freed_l3);
11197 +
11198 +       HAT_PRINTF1 (2, "elan3mmu_free_lXptbl: freeing stolen level 3 ptbl %p\n", ptbl);
11199 +
11200 +       /* this was really a level 3 ptbl which we had to steal */
11201 +       spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11202 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
11203 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
11204 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
11205 +       spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11206 +    }
11207 +    else
11208 +    {
11209 +       spin_lock (&dev->Level[PTBL_LEVEL_X].PtblLock);
11210 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_X].PtblFreeList;
11211 +       dev->Level[PTBL_LEVEL_X].PtblFreeList = ptbl;
11212 +       dev->Level[PTBL_LEVEL_X].PtblFreeCount++;
11213 +       spin_unlock (&dev->Level[PTBL_LEVEL_X].PtblLock);
11214 +    }
11215 +}
11216 +
11217 +void
11218 +elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
11219 +{
11220 +    HAT_PRINTF3 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p ptbl->ptbl_valid %x \n", dev, ptbl, ptbl->ptbl_valid);
11221 +
11222 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
11223 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
11224 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_1);
11225 +    ASSERT (ptbl->ptbl_valid == 0);
11226 +    
11227 +    HAT_PRINTF2 (2, "elan3mmu_free_l1ptbl: dev %p ptbl %p\n", dev, ptbl);
11228 +
11229 +    ptbl->ptbl_flags = 0;
11230 +    spin_unlock (lock);
11231 +    
11232 +    spin_lock (&dev->Level[PTBL_LEVEL_1].PtblLock);
11233 +    ptbl->ptbl_next = dev->Level[PTBL_LEVEL_1].PtblFreeList;
11234 +    dev->Level[PTBL_LEVEL_1].PtblFreeList = ptbl;
11235 +    dev->Level[PTBL_LEVEL_1].PtblFreeCount++;
11236 +    spin_unlock (&dev->Level[PTBL_LEVEL_1].PtblLock);
11237 +
11238 +    local_irq_restore (flags);
11239 +}
11240 +
11241 +void
11242 +elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
11243 +{
11244 +    ELAN3_PTBL_GR *ptg;
11245 +
11246 +    HAT_PRINTF2 (2, "elan3mmu_free_l2ptbl: dev %p ptbl %p\n", dev, ptbl);
11247 +
11248 +    ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags));
11249 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
11250 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
11251 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_2);
11252 +    ASSERT (ptbl->ptbl_valid == 0);
11253 +   
11254 +    ptbl->ptbl_flags = 0;
11255 +    spin_unlock (lock);
11256 +
11257 +    ptg = PTBL_TO_GR(ptbl);
11258 +
11259 +    if (ptg->pg_level == PTBL_LEVEL_3)
11260 +    {
11261 +       ELAN3MMU_STAT(l2_freed_l3);
11262 +
11263 +       HAT_PRINTF1 (2, "elan3mmu_free_l2ptbl: freeing stolen level 3 ptbl %p\n", ptbl);
11264 +
11265 +       /* this was really a level 3 ptbl which we had to steal */
11266 +       spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11267 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
11268 +       dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
11269 +       dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
11270 +       spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11271 +    }
11272 +    else
11273 +    {
11274 +       spin_lock (&dev->Level[PTBL_LEVEL_2].PtblLock);
11275 +       ptbl->ptbl_next = dev->Level[PTBL_LEVEL_2].PtblFreeList;
11276 +       dev->Level[PTBL_LEVEL_2].PtblFreeList = ptbl;
11277 +       dev->Level[PTBL_LEVEL_2].PtblFreeCount++;
11278 +       spin_unlock (&dev->Level[PTBL_LEVEL_2].PtblLock);
11279 +    }  
11280 +    local_irq_restore (flags);
11281 +}
11282 +
11283 +void
11284 +elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
11285 +{
11286 +    ASSERT (PTBL_IS_LOCKED(ptbl->ptbl_flags));
11287 +    ASSERT (ptbl->ptbl_flags & PTBL_ALLOCED);
11288 +    ASSERT ((ptbl->ptbl_flags & PTBL_KEEP) == 0);
11289 +    ASSERT (PTBL_LEVEL(ptbl->ptbl_flags) == PTBL_LEVEL_3);
11290 +    ASSERT (ptbl->ptbl_valid == 0);
11291 +    
11292 +    HAT_PRINTF2 (2, "elan3mmu_free_l3ptbl: dev %p ptbl %p\n", dev, ptbl);
11293 +
11294 +    if (ptbl->ptbl_flags & PTBL_KERNEL)                                /* if the ptbl has been used by the kernel */
11295 +    {                                                          /* then zero all the pte's, since they will */
11296 +       elan3_sdram_zeroq_sdram (dev, PTBL_TO_PTADDR(ptbl), ELAN3_L3_ENTRIES*ELAN3_PTE_SIZE);
11297 +    }
11298 +
11299 +    ptbl->ptbl_flags = 0;
11300 +    spin_unlock (lock);
11301 +    
11302 +    spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11303 +    ptbl->ptbl_next = dev->Level[PTBL_LEVEL_3].PtblFreeList;
11304 +    dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl;
11305 +    dev->Level[PTBL_LEVEL_3].PtblFreeCount++;
11306 +    spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11307 +
11308 +    local_irq_restore (flags);
11309 +}
11310 +
11311 +void
11312 +elan3mmu_kernel_l3ptbl (ELAN3_PTBL *ptbl)
11313 +{
11314 +    ELAN3_DEV   *dev  = ptbl->ptbl_elan3mmu->elan3mmu_dev;
11315 +    sdramaddr_t pte  = PTBL_TO_PTADDR(ptbl);
11316 +    ELAN3_PTE    tpte = elan3mmu_kernel_invalid_pte(ptbl->ptbl_elan3mmu);
11317 +    int                i;
11318 +
11319 +    ptbl->ptbl_flags |= PTBL_KERNEL;
11320 +    for (i = 0; i < ELAN3_L3_ENTRIES; i++, pte += ELAN3_PTE_SIZE)
11321 +    {
11322 +       elan3_writepte (dev, pte, tpte);
11323 +    }
11324 +}
11325 +       
11326 +#define PTBL_CAN_STEAL(flag)   (((flag) & (PTBL_KERNEL|PTBL_KEEP)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3))
11327 +#define PTBL_MAY_STEAL(flag)   (((flag) & (PTBL_KERNEL|PTBL_KEEP|PTBL_LOCKED)) == 0 && (((flag) & PTBL_ALLOCED) && PTBL_LEVEL(flag) == PTBL_LEVEL_3))
11328 +
11329 +static int
11330 +elan3mmu_steal_this_ptbl (ELAN3_DEV *dev, ELAN3_PTBL *l3ptbl)
11331 +{
11332 +    ELAN3_PTBL  *l2ptbl     = l3ptbl->ptbl_parent;
11333 +    E3_Addr     l2addr     = BASE2VA(l2ptbl);
11334 +    E3_Addr     l3addr     = BASE2VA(l3ptbl);
11335 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
11336 +    sdramaddr_t l2ptp;
11337 +    spinlock_t *l2lock;
11338 +    unsigned long l2flags;
11339 +
11340 +    HAT_PRINTF5 (1, "elan3mmu_steal_this_ptbl: l3ptbl %p (%x) l2ptbl %p (%x) l2addr %x\n",
11341 +                l3ptbl, l3ptbl->ptbl_flags, l2ptbl, l2ptbl->ptbl_flags, l2addr);
11342 +
11343 +    if (PTBL_CAN_STEAL (l3ptbl->ptbl_flags) &&
11344 +       elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_NOWAIT, l3ptbl->ptbl_elan3mmu, l2addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_OK)
11345 +    {
11346 +       ELAN3MMU_STAT(stolen_ptbls);
11347 +
11348 +       /* Locked both L3 and L2 page tables. */
11349 +       l2ptp = PTBL_TO_PTADDR (l2ptbl) + ELAN3_L2_INDEX(l3addr)*ELAN3_PTP_SIZE;
11350 +       
11351 +       /* detach the level 3 page table */
11352 +       elan3_writeptp (dev, l2ptp, invalidptp);
11353 +       ElanFlushTlb (dev);
11354 +
11355 +       l2ptbl->ptbl_valid--;
11356 +
11357 +       HAT_PRINTF3 (2, "elan3mmu_steal_this_ptbl: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
11358 +
11359 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11360 +
11361 +       elan3mmu_unload_loop (l3ptbl->ptbl_elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, PTE_UNLOAD_NOFLUSH);
11362 +
11363 +       ASSERT (l3ptbl->ptbl_valid == 0);
11364 +
11365 +       l3ptbl->ptbl_flags = 0;
11366 +       return (1);
11367 +    }
11368 +    return (0);
11369 +}
11370 +
11371 +static ELAN3_PTBL *
11372 +elan3mmu_steal_l3ptbl (ELAN3_DEV *dev, int attr)
11373 +{
11374 +    ELAN3_PTBL_GR      *ptg;
11375 +    ELAN3_PTBL         *ptbl;
11376 +    spinlock_t         *lock;
11377 +    unsigned long        group_flags;
11378 +    unsigned long        ptbl_flags;
11379 +    register int        i;
11380 +
11381 +    HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: attr %x\n", attr);
11382 +
11383 +    spin_lock_irqsave (&dev->PtblGroupLock, group_flags);
11384 +
11385 +    ptg = dev->Level3PtblGroupHand;
11386 +
11387 +    if (ptg == NULL)
11388 +       ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList;
11389 +    
11390 +    for (;;)
11391 +    {
11392 +       while (ptg)
11393 +       {
11394 +           for (i = 0, ptbl = ptg->pg_ptbls; i < PTBLS_PER_GROUP_MAX; i++, ptbl++)
11395 +           {
11396 +               if (PTBL_MAY_STEAL (ptbl->ptbl_flags) &&
11397 +                   elan3mmu_lock_this_ptbl (ptbl, LK_PTBL_NOWAIT, &lock, &ptbl_flags) == LK_PTBL_OK)
11398 +               {
11399 +                   if (elan3mmu_steal_this_ptbl (dev, ptbl ))
11400 +                   {
11401 +                       HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: stolen ptbl %p\n", ptbl);
11402 +
11403 +                       elan3mmu_unlock_ptbl (ptbl, lock,ptbl_flags);
11404 +
11405 +                       dev->Level3PtblGroupHand = ptg->pg_next;
11406 +
11407 +                       spin_unlock_irqrestore (&dev->PtblGroupLock, group_flags);
11408 +
11409 +                       return (ptbl);
11410 +                   }
11411 +                   elan3mmu_unlock_ptbl (ptbl, lock, ptbl_flags);
11412 +               }
11413 +           }
11414 +           ptg = ptg->pg_next;
11415 +       }
11416 +       
11417 +       if (dev->Level[PTBL_LEVEL_3].PtblFreeList)
11418 +       {
11419 +           spin_lock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11420 +           ptbl = dev->Level[PTBL_LEVEL_3].PtblFreeList;
11421 +           if (ptbl != NULL)
11422 +           {
11423 +               dev->Level[PTBL_LEVEL_3].PtblFreeList = ptbl->ptbl_next;
11424 +               dev->Level[PTBL_LEVEL_3].PtblFreeCount--;
11425 +           }
11426 +           spin_unlock (&dev->Level[PTBL_LEVEL_3].PtblLock);
11427 +
11428 +           if (ptbl != NULL)
11429 +           {
11430 +               HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: found ptbl %p on free list\n", ptbl);
11431 +               break;
11432 +           }
11433 +       }
11434 +
11435 +       ptbl = elan3mmu_create_ptbls (dev, PTBL_LEVEL_3, attr, 1);
11436 +
11437 +       if (ptbl != NULL)
11438 +       {
11439 +           HAT_PRINTF1 (2, "elan3mmu_steal_l3ptbl: created new ptbl %p\n", ptbl);
11440 +           break;
11441 +       }
11442 +       
11443 +       HAT_PRINTF0 (1, "elan3mmu_steal_l3ptbl: cannot find a ptbl, retrying\n");
11444 +       ptg = dev->Level[PTBL_LEVEL_3].PtblGroupList;
11445 +    }
11446 +
11447 +    spin_unlock (&dev->PtblGroupLock);
11448 +    return (ptbl);
11449 +}
11450 +
11451 +sdramaddr_t
11452 +elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr addr, int *level, 
11453 +                 ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags)
11454 +{
11455 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
11456 +    ELAN3_PTBL  *l1ptbl;
11457 +    sdramaddr_t l1ptp;
11458 +    ELAN3_PTP    tl1ptp;
11459 +    E3_Addr     l1base;
11460 +    ELAN3_PTBL  *l2ptbl;
11461 +    sdramaddr_t l2ptp;
11462 +    ELAN3_PTP    tl2ptp;
11463 +    E3_Addr     l2base;
11464 +    ELAN3_PTBL  *l3ptbl;
11465 +    sdramaddr_t l3pte;
11466 +    spinlock_t *l1lock;
11467 +    spinlock_t *l2lock;
11468 +    spinlock_t *l3lock;
11469 +    unsigned long l1flags;
11470 +    unsigned long l2flags;
11471 +    unsigned long l3flags;
11472 +
11473 +    HAT_PRINTF2 (2, "elan3mmu_ptefind: elan3mmu %p addr %x\n", elan3mmu, addr);
11474 +
11475 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
11476 +    *level = 0;
11477 +
11478 +    if (l1ptbl == NULL)
11479 +       return ((sdramaddr_t) NULL);
11480 +
11481 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11482 +    l1base = ELAN3_L1_BASE(addr);
11483 +    
11484 +retryl1:
11485 +    tl1ptp = elan3_readptp (dev, l1ptp);
11486 +    
11487 +    HAT_PRINTF4 (2, "elan3mmu_ptefind: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp);
11488 +
11489 +    switch (ELAN3_PTP_TYPE(tl1ptp))
11490 +    {
11491 +    case ELAN3_ET_PTE:
11492 +       elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11493 +
11494 +       tl1ptp = elan3_readptp (dev, l1ptp);
11495 +       if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE)
11496 +       {
11497 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11498 +           goto retryl1;
11499 +       }
11500 +       
11501 +       *level = 1;
11502 +       *pptbl = l1ptbl;
11503 +       *plock = l1lock;
11504 +       *flags = l1flags;
11505 +       
11506 +       /* return with l1lock */
11507 +       return (l1ptp);  
11508 +
11509 +    case ELAN3_ET_INVALID:
11510 +       return ((sdramaddr_t) 0);
11511 +       
11512 +    case ELAN3_ET_PTP:
11513 +       break;
11514 +
11515 +    default:
11516 +       panic ("elan3mmu_ptefind: found bad entry in level 1 page table");
11517 +       /* NOTREACHED */
11518 +    }
11519 +    
11520 +    HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 2 ptbl from ptp %x\n", tl1ptp);
11521 +
11522 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11523 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11524 +    l2base = ELAN3_L2_BASE(addr);
11525 +    
11526 +    tl2ptp = elan3_readptp (dev, l2ptp);
11527 +    
11528 +    HAT_PRINTF4 (2, "elan3mmu_ptefind: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n", l2ptbl, l2ptp, l2base, tl2ptp);
11529 +
11530 +    switch (ELAN3_PTP_TYPE(tl2ptp))
11531 +    {
11532 +    case ELAN3_ET_PTE:
11533 +       switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11534 +       {
11535 +       case LK_PTBL_OK:
11536 +           tl2ptp = elan3_readptp (dev, l2ptp);
11537 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE)
11538 +           {
11539 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11540 +               goto retryl1;
11541 +           }
11542 +           
11543 +           *level = 2;
11544 +           *pptbl = l2ptbl;
11545 +           *plock = l2lock;
11546 +           *flags = l2flags;
11547 +           
11548 +           /* return with l2lock */
11549 +           return (l2ptp); 
11550 +           
11551 +       case LK_PTBL_MISMATCH:
11552 +           HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11553 +                        l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11554 +           
11555 +           /*
11556 +            * We've trogged down to this ptbl,  but someone has just
11557 +            * stolen it,  so try all over again.
11558 +            */
11559 +           goto retryl1;
11560 +           
11561 +       default:
11562 +           panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value");
11563 +           /* NOTREACHED */
11564 +       }
11565 +    case ELAN3_ET_INVALID:
11566 +       return ((sdramaddr_t) 0);
11567 +       
11568 +    case ELAN3_ET_PTP:
11569 +       break;
11570 +    default:
11571 +       panic ("elan3mmu_ptefind: found bad entry in level 2 page table");
11572 +       /* NOTREACHED */
11573 +    }
11574 +    
11575 +    HAT_PRINTF1 (2, "elan3mmu_ptefind: chain to level 3 page table from ptp %x\n", tl2ptp);
11576 +
11577 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
11578 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11579 +    
11580 +    HAT_PRINTF2 (2, "elan3mmu_ptefind: l3ptbl %p l3pte %lx\n", l3ptbl, l3pte);
11581 +                
11582 +    switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags))
11583 +    {
11584 +    case LK_PTBL_OK:
11585 +       *level = 3;
11586 +       *plock = l3lock;
11587 +       *pptbl = l3ptbl;
11588 +       *flags = l3flags;
11589 +
11590 +       return (l3pte);
11591 +       
11592 +    case LK_PTBL_FAILED:
11593 +       panic ("elan3mmu_ptefind: l3 lock failed");
11594 +       /* NOTREACHED */
11595 +
11596 +    case LK_PTBL_MISMATCH:
11597 +       HAT_PRINTF6 (2, "elan3mmu_ptefind: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11598 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr);
11599 +                    
11600 +       /*
11601 +        * We've trogged down to this ptbl,  but someone has just
11602 +        * stolen it,  so try all over again.
11603 +        */
11604 +       goto retryl1;
11605 +       
11606 +    default:
11607 +       panic ("elan3mmu_ptefind: elan3mmu_lock_ptbl returned bad value");
11608 +       /* NOTREACHED */
11609 +    }
11610 +    /* NOTREACHED */
11611 +    return ((sdramaddr_t) 0);
11612 +}
11613 +
11614 +sdramaddr_t 
11615 +elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level)
11616 +{
11617 +    ELAN3_PTP tptp = elan3_readptp (elan3mmu->elan3mmu_dev, ptp);
11618 +
11619 +    ASSERT (level != 3 && ELAN3_PTP_TYPE(tptp) == ELAN3_ET_PTE);
11620 +
11621 +    return PTP_TO_PT_PADDR(tptp);
11622 +}
11623 +
11624 +sdramaddr_t
11625 +elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr addr, int level, 
11626 +                  ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags)
11627 +{
11628 +    ELAN3_DEV   *dev     = elan3mmu->elan3mmu_dev;
11629 +    ELAN3_PTBL  *l1ptbl;
11630 +    ELAN3_PTBL  *lXptbl;
11631 +    int         idx;
11632 +    sdramaddr_t l1ptp;
11633 +    ELAN3_PTP    tl1ptp;
11634 +    E3_Addr     l1base;
11635 +    spinlock_t *l1lock;
11636 +    ELAN3_PTBL  *l2ptbl;
11637 +    sdramaddr_t l2ptp;
11638 +    ELAN3_PTP    tl2ptp;
11639 +    E3_Addr     l2base;
11640 +    spinlock_t *l2lock;
11641 +    ELAN3_PTBL  *l3ptbl;
11642 +    sdramaddr_t l3pte;
11643 +    E3_Addr     l3base;
11644 +    spinlock_t *l3lock;
11645 +
11646 +    unsigned long l1flags;
11647 +    unsigned long l2flags;
11648 +    unsigned long l3flags;
11649 +
11650 +    HAT_PRINTF2 (2, "elan3mmu_ptealloc: elan3mmu %p addr %x\n", elan3mmu, addr);
11651 +
11652 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
11653 +    if (l1ptbl == NULL)
11654 +       return ((sdramaddr_t) 0);
11655 +
11656 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11657 +    l1base = ELAN3_L1_BASE(addr);
11658 +               
11659 +retryl1:
11660 +    tl1ptp = elan3_readptp (dev, l1ptp);
11661 +
11662 +    HAT_PRINTF5 (2, "elan3mmu_ptealloc: l1ptbl %p 1ptp %lx l1base %x (%x) : tl1ptp %x\n",
11663 +                l1ptbl, l1ptp, l1base, l1ptbl->ptbl_base, tl1ptp);
11664 +
11665 +    switch (ELAN3_PTP_TYPE(tl1ptp))
11666 +    {
11667 +    case ELAN3_ET_PTE:
11668 +       if (level == PTBL_LEVEL_1)
11669 +       {
11670 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11671 +
11672 +           tl1ptp = elan3_readptp (dev, l1ptp);
11673 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_PTE)
11674 +           {
11675 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11676 +               goto retryl1;
11677 +           }
11678 +           
11679 +           *pptbl = l1ptbl;
11680 +           *plock = l1lock;
11681 +           *flags = l1flags;
11682 +
11683 +           /* return holding l1lock */
11684 +           return (l1ptp);
11685 +       }
11686 +       panic ("elan3mmu_ptealloc: found pte in level 1 page table");
11687 +       /* NOTREACHED */
11688 +
11689 +    case ELAN3_ET_PTP:
11690 +       if (level == PTBL_LEVEL_1)
11691 +           panic ("elan3mmu_ptealloc: found PTP when loading a level 1 PTE\n");
11692 +       break;
11693 +
11694 +    case ELAN3_ET_INVALID:
11695 +       if (level == PTBL_LEVEL_1)
11696 +       {
11697 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu,  &idx)) == NULL)
11698 +               return ((sdramaddr_t) 0);
11699 +
11700 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11701 +
11702 +           tl1ptp = elan3_readptp (dev, l1ptp);
11703 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11704 +           {
11705 +               /* raced with someone else, whose got there first */
11706 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
11707 +
11708 +               /* drop the l1lock and retry */
11709 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11710 +               goto retryl1;
11711 +           }
11712 +           
11713 +           tl1ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
11714 +           
11715 +           elan3_writeptp (dev, l1ptp, tl1ptp);
11716 +
11717 +           *pptbl = l1ptbl;
11718 +           *plock = l1lock;
11719 +           *flags = l1flags;
11720 +
11721 +           /* return holding l1lock */
11722 +           return (l1ptp);
11723 +       }
11724 +
11725 +       if (level == PTBL_LEVEL_2)
11726 +       {
11727 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL)
11728 +               return ((sdramaddr_t) 0);
11729 +
11730 +           if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL)
11731 +           {
11732 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx); 
11733 +               return ((sdramaddr_t) 0);
11734 +           }
11735 +
11736 +           /* Connect l2ptbl to the new LX pte */
11737 +           l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE;
11738 +           tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
11739 +
11740 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11741 +
11742 +           /* Now need to lock the l1 ptbl */
11743 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11744 +
11745 +           elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11746 +           elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags);
11747 +
11748 +           tl1ptp = elan3_readptp (dev, l1ptp);
11749 +           if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11750 +           {
11751 +               HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free l2 ptbl/lx pte\n");
11752 +               
11753 +               tl2ptp = ELAN3_INVALID_PTP;
11754 +               elan3_writeptp (dev, l2ptp, tl2ptp);
11755 +               
11756 +               HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11757 +               HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11758 +               
11759 +               elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags);
11760 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
11761 +
11762 +               elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11763 +
11764 +               goto retryl1;
11765 +           }
11766 +           
11767 +           /* Now have L1 locked,  so install the L2 ptbl */
11768 +           l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11769 +           tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP;
11770 +           l1ptbl->ptbl_valid++;
11771 +
11772 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11773 +                        PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11774 +           
11775 +           elan3_writeptp (dev, l1ptp, tl1ptp);
11776 +           
11777 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp);
11778 +
11779 +           /* unordered unlock - lock l1ptbl, lock l2ptbl, unlock l1ptbl */
11780 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l2flags); /* need to unlock with the l2flags to keep irq order correct */
11781 +
11782 +           *pptbl = l2ptbl;
11783 +           *plock = l2lock;
11784 +           *flags = l1flags; /* return the l1flags here as we have released the l2flags already to keep order */
11785 +
11786 +           /* return holding l2lock */
11787 +           return (l2ptp);
11788 +       }
11789 +
11790 +       HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocating level 2 and level 3 page tables\n");
11791 +
11792 +       /* Allocate a level 2 and level 3 page table and link them together */
11793 +       if ((l2ptbl = elan3mmu_alloc_l2ptbl (dev, attr, l1ptbl, elan3mmu, ELAN3_L2_BASE(addr), &l2lock, &l2flags)) == NULL)
11794 +           return ((sdramaddr_t) 0);
11795 +
11796 +       if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr | PTE_NO_SLEEP, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL)
11797 +       {
11798 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11799 +           return ((sdramaddr_t) 0);
11800 +       }
11801 +
11802 +       ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags));
11803 +       ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2);
11804 +       ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags));
11805 +       ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3);
11806 +
11807 +       HAT_PRINTF6 (2, "elan3mmu_ptealloc: l2ptbl %p (%x,%x) l3ptbl %p (%x,%x)\n",
11808 +                    l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_base,
11809 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_base);
11810 +
11811 +       if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt))
11812 +       {
11813 +           l2ptbl->ptbl_flags |= PTBL_KERNEL;
11814 +           elan3mmu_kernel_l3ptbl (l3ptbl);
11815 +       }
11816 +       
11817 +       /*
11818 +        * Connect L3 ptbl to the new L2 ptbl.
11819 +        */
11820 +       l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr) * ELAN3_PTP_SIZE;
11821 +       tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP;
11822 +
11823 +       l2ptbl->ptbl_valid = 1;
11824 +
11825 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: set valid for level %d ptbl %p to %d\n", 
11826 +                    PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
11827 +
11828 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11829 +
11830 +       elan3_writeptp (dev, l2ptp, tl2ptp);
11831 +
11832 +       /* 
11833 +        * Now need to lock the l1 ptbl - to maintain lock ordering
11834 +        * we set the PTBL_KEEP bit to stop the l3 ptbl from being 
11835 +        * stolen and drop the locks in the order we aquired them
11836 +        */
11837 +       l3ptbl->ptbl_flags |= PTBL_KEEP;
11838 +
11839 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
11840 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11841 +
11842 +       elan3mmu_lock_ptbl (l1ptbl, 0, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &l1flags);
11843 +       elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
11844 +           
11845 +       l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11846 +          
11847 +       /* Now have l1 and l3 ptbls locked,  so install the new l2 ptbl into the l1. */
11848 +       tl1ptp = elan3_readptp (dev, l1ptp);
11849 +
11850 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: l1ptp %lx is %x\n", l1ptp, tl1ptp);
11851 +
11852 +       if (ELAN3_PTP_TYPE(tl1ptp) != ELAN3_ET_INVALID)
11853 +       {
11854 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free l2/l3 ptbls\n");
11855 +
11856 +           /* free off the level 3 page table */
11857 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l3 ptbl %p (%x)\n", l3ptbl, l3ptbl->ptbl_flags);
11858 +
11859 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
11860 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
11861 +
11862 +           /* and unlock the level 1 ptbl */
11863 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, l1flags);
11864 +           
11865 +           /* lock the level 2 page table, and clear out the PTP, then free it */
11866 +           (void) elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags);
11867 +
11868 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: locked l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11869 +           
11870 +           tl2ptp = ELAN3_INVALID_PTP;
11871 +           elan3_writeptp (dev, l2ptp, tl2ptp);
11872 +           l2ptbl->ptbl_valid = 0;
11873 +
11874 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: set to 0 valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
11875 +
11876 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
11877 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: freeing l2 ptbl %p (%x)\n", l2ptbl, l2ptbl->ptbl_flags);
11878 +
11879 +           elan3mmu_free_l2ptbl (dev, l2ptbl, l2lock, l2flags);
11880 +
11881 +           goto retryl1;
11882 +       }
11883 +       
11884 +       HAT_PRINTF4 (2, "elan3mmu_ptealloc: l1ptbl is %p (%x), l3ptbl is %p (%x)\n", 
11885 +                    l1ptbl, l1ptbl->ptbl_flags, l3ptbl, l3ptbl->ptbl_flags);
11886 +
11887 +       /* Now have L1 and L3 locked,  so install the L2 ptbl */
11888 +       l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
11889 +       tl1ptp = PTBL_TO_PTADDR(l2ptbl) | ELAN3_ET_PTP;
11890 +       l1ptbl->ptbl_valid++;
11891 +
11892 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
11893 +                    PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
11894 +
11895 +       elan3_writeptp (dev, l1ptp, tl1ptp);
11896 +
11897 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l1ptp %lx to %x\n", l1ptp, tl1ptp);
11898 +
11899 +       /* unordered unlock - lock l1ptbl, lock l3ptbl, unlock l1ptbl */
11900 +       elan3mmu_unlock_ptbl (l1ptbl, l1lock, l3flags); /* free using l3flags to keep irq ordering */
11901 +
11902 +       l3pte = PTBL_TO_PTADDR (l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
11903 +
11904 +       /* Level 3 ptbl is already locked,  so just return the pte */
11905 +       *pptbl = l3ptbl;
11906 +       *plock = l3lock;
11907 +       *flags = l1flags; /* return l1flags to keep irq ordering */
11908 +
11909 +       return (l3pte);
11910 +
11911 +    default:
11912 +       panic ("elan3mmu_ptealloc: found bad entry in level 1 page table");
11913 +       /* NOTREACHED */
11914 +    }
11915 +
11916 +    HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 2 ptbl from ptp %x\n", tl1ptp);
11917 +
11918 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
11919 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
11920 +    l2base = ELAN3_L2_BASE(addr);
11921 +
11922 +    tl2ptp = elan3_readptp (dev, l2ptp);
11923 +
11924 +    HAT_PRINTF5 (2, "elan3mmu_ptealloc: l2ptbl %p l2ptp %lx l2base %x (%x) : tl2ptp %x\n",
11925 +                l2ptbl, l2ptp, l2base, l2ptbl->ptbl_base, tl2ptp);
11926 +
11927 +    switch (ELAN3_PTP_TYPE(tl2ptp))
11928 +    {
11929 +    case ELAN3_ET_PTE:
11930 +       if (level == PTBL_LEVEL_2) {
11931 +           /* this is a pointer to a pte,  we should just return it */
11932 +
11933 +           switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11934 +           {
11935 +           case LK_PTBL_OK:
11936 +               break;
11937 +       
11938 +           case LK_PTBL_FAILED:
11939 +               panic ("elan3mmu_ptealloc: l2 lock failed");
11940 +               /* NOTREACHED */
11941 +               
11942 +           case LK_PTBL_MISMATCH:
11943 +               HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11944 +                            l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11945 +               
11946 +               /*
11947 +                * We've trogged down to this ptbl,  but someone has just
11948 +                * stolen it,  so try all over again.
11949 +                */
11950 +               goto retryl1;
11951 +               
11952 +           default:
11953 +               panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value");
11954 +               /* NOTREACHED */
11955 +           }
11956 +
11957 +
11958 +           tl2ptp = elan3_readptp (dev, l2ptp);
11959 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_PTE)
11960 +           {
11961 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
11962 +               goto retryl1;
11963 +           }
11964 +
11965 +           *pptbl = l2ptbl;
11966 +           *plock = l2lock;
11967 +           *flags = l2flags;
11968 +
11969 +           /* return holdind l2lock */
11970 +           return (l2ptp);
11971 +       }
11972 +       panic ("elan3mmu: found pte in level 2 page table");
11973 +       /* NOTREACHED */
11974 +
11975 +    case ELAN3_ET_PTP:
11976 +       break;
11977 +
11978 +    case ELAN3_ET_INVALID:
11979 +       if (level == PTBL_LEVEL_2) 
11980 +       {
11981 +           if ((lXptbl = elan3mmu_alloc_pte (dev, elan3mmu, &idx)) == NULL)
11982 +               return ((sdramaddr_t) 0);
11983 +
11984 +           switch (elan3mmu_lock_ptbl (l2ptbl, 0, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags))
11985 +           {
11986 +           case LK_PTBL_OK:
11987 +               break;
11988 +       
11989 +           case LK_PTBL_FAILED:
11990 +               panic ("elan3mmu_ptealloc: l2 lock failed");
11991 +               /* NOTREACHED */
11992 +               
11993 +           case LK_PTBL_MISMATCH:
11994 +               HAT_PRINTF6 (2, "elan3mmu_ptealloc: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x)\n",
11995 +                            l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr);
11996 +               
11997 +               /*
11998 +                * We've trogged down to this ptbl,  but someone has just
11999 +                * stolen it,  so try all over again.
12000 +                */
12001 +               goto retryl1;
12002 +               
12003 +           default:
12004 +               panic ("elan3mmu_ptealloc: elan3mmu_lock_ptbl returned bad value");
12005 +               /* NOTREACHED */
12006 +           }
12007 +
12008 +           tl2ptp = elan3_readptp (dev, l2ptp);
12009 +           if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID)
12010 +           {
12011 +               HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it,  free lx pte\n");
12012 +
12013 +               elan3mmu_free_pte (dev, elan3mmu, lXptbl, idx);
12014 +
12015 +               elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
12016 +               goto retryl1;
12017 +           }
12018 +
12019 +           /* Connect l2ptbl to the new LX pte */
12020 +           tl2ptp = PTBL_TO_PTADDR(lXptbl) | (idx * ELAN3_PTE_SIZE) | ELAN3_ET_PTE;
12021 +                  
12022 +           HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n", 
12023 +                        PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
12024 +           
12025 +           elan3_writeptp (dev, l2ptp, tl2ptp);
12026 +           
12027 +           HAT_PRINTF2 (2, "elan3mmu_ptealloc: write l2ptp %lx to %x\n", l2ptp, tl2ptp);
12028 +
12029 +           *pptbl = l2ptbl;
12030 +           *plock = l2lock;
12031 +           *flags = l2flags;
12032 +
12033 +           /* return holding l2lock */
12034 +           return (l2ptp);
12035 +       }
12036 +       HAT_PRINTF0 (2, "elan3mmu_ptealloc: allocate level 3 page table\n");
12037 +
12038 +       if ((l3ptbl = elan3mmu_alloc_l3ptbl (dev, attr, l2ptbl, elan3mmu, ELAN3_L3_BASE(addr), &l3lock, &l3flags)) == NULL)
12039 +           return ((sdramaddr_t) 0);
12040 +
12041 +       if (CTXT_IS_KERNEL (elan3mmu->elan3mmu_ctxt))
12042 +           elan3mmu_kernel_l3ptbl (l3ptbl);
12043 +
12044 +       /* 
12045 +        * Now need to lock the l2 ptbl - to maintain lock ordering
12046 +        * we set the PTBL_KEEP bit to stop the l3 ptbl from being 
12047 +        * stolen and drop the locks in the order we aquired them
12048 +        */
12049 +       l3ptbl->ptbl_flags |= PTBL_KEEP;
12050 +
12051 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
12052 +
12053 +       if (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &l2flags) == LK_PTBL_MISMATCH)
12054 +       {
12055 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: l2ptbl freed, free l3 ptbl and try again\n");
12056 +             
12057 +           elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
12058 +
12059 +           /* free off the level 3 page table, and try again */
12060 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
12061 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
12062 +           
12063 +           goto retryl1;
12064 +       }
12065 +
12066 +       elan3mmu_lock_ptbl (l3ptbl, 0, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags);
12067 +
12068 +       l3ptbl->ptbl_flags &= ~PTBL_KEEP;
12069 +
12070 +       /* Now have L2 and L3 ptbls locked, see if someone has beaten us to it. */
12071 +       tl2ptp = elan3_readptp (dev, l2ptp);
12072 +
12073 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: l2ptp at %lx is %x\n", l2ptp, tl2ptp);
12074 +
12075 +       if (ELAN3_PTP_TYPE(tl2ptp) != ELAN3_ET_INVALID)
12076 +       {
12077 +           HAT_PRINTF0 (2, "elan3mmu_ptealloc: beaten to it, free l3 ptbl and try again\n");
12078 +
12079 +           /* free off the level 3 page table, and try again */
12080 +           l3ptbl->ptbl_flags &= ~PTBL_KEEP;
12081 +           elan3mmu_free_l3ptbl (dev, l3ptbl, l3lock, l3flags);
12082 +           
12083 +           /* Someone has allocated the ptbl before us */
12084 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, l2flags);
12085 +           
12086 +           goto retryl1;
12087 +       }
12088 +
12089 +       ASSERT (PTBL_IS_LOCKED (l2ptbl->ptbl_flags));
12090 +
12091 +       /* Install the L3 ptbl into the L2 one */
12092 +       l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
12093 +       tl2ptp = PTBL_TO_PTADDR(l3ptbl) | ELAN3_ET_PTP;
12094 +       l2ptbl->ptbl_valid++;
12095 +
12096 +       HAT_PRINTF3 (2, "elan3mmu_ptealloc: inc valid for level %d ptbl %p to %d\n",
12097 +                    PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
12098 +
12099 +       elan3_writeptp (dev, l2ptp, tl2ptp);
12100 +
12101 +       HAT_PRINTF2 (2, "elan3mmu_ptealloc: write level 2 ptp %lx to %x\n", l2ptp, tl2ptp);
12102 +
12103 +       /* unordered unlock - lock l2ptbl, lock l3ptbl, unlock l2ptbl */
12104 +       elan3mmu_unlock_ptbl (l2ptbl, l2lock, l3flags); /* free with the l3flags to keep irq ordering */
12105 +
12106 +       l3pte = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
12107 +       
12108 +       /* Level 3 ptbl is already locked, so just return the pte */
12109 +       *pptbl = l3ptbl;
12110 +       *plock = l3lock;
12111 +       *flags = l2flags; /* return l2flags to keep irq ordering */
12112 +
12113 +       return (l3pte);
12114 +
12115 +    default:
12116 +       panic ("elan3mmu_ptealloc: found bad entry in level 2 page table");
12117 +       /* NOTREACHED */
12118 +    }
12119 +
12120 +    HAT_PRINTF1 (2, "elan3mmu_ptealloc: chain to level 3 page table from ptp %x\n", tl2ptp);
12121 +
12122 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12123 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
12124 +    l3base = ELAN3_L3_BASE(addr);
12125 +
12126 +    HAT_PRINTF4 (2, "elan3mmu_ptealloc: l3ptbl %p 3pte %lx l3base %x (%x)\n",
12127 +                l3ptbl, l3pte, l3base, l3ptbl->ptbl_base);
12128 +                
12129 +    if (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &l3flags) == LK_PTBL_OK)
12130 +    {
12131 +       *pptbl = l3ptbl;
12132 +       *plock = l3lock;
12133 +       *flags = l3flags;
12134 +
12135 +       return (l3pte);
12136 +    }
12137 +
12138 +    /* got all the way down here,  but its been nicked before we could lock it */
12139 +    /* so try all over again */
12140 +    goto retryl1;
12141 +}
12142 +
12143 +void
12144 +elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int attr)
12145 +{
12146 +    ELAN3_DEV     *dev = elan3mmu->elan3mmu_dev;
12147 +    ELAN3_PTP      invalidptp = ELAN3_INVALID_PTP;
12148 +    ELAN3_PTP      tl1ptp;
12149 +    sdramaddr_t   l1ptp;
12150 +    E3_Addr       addr;
12151 +    spinlock_t   *l2lock;
12152 +    ELAN3_PTBL    *l2ptbl;
12153 +    ELAN3_PTBL    *lXptbl;
12154 +    int           idx;
12155 +    int                  i;
12156 +    int                  ret;
12157 +    unsigned long flags;
12158 +
12159 +    l1ptp = PTBL_TO_PTADDR(l1ptbl);
12160 +
12161 +    HAT_PRINTF2 (1, "elan3mmu_l1inval: l1ptbl %p l1ptp %lx\n", l1ptbl, l1ptp);
12162 +
12163 +    for (i = 0, addr = 0; i < ELAN3_L1_ENTRIES; i++, l1ptp += ELAN3_PTP_SIZE)
12164 +    {
12165 +       tl1ptp = elan3_readptp (dev, l1ptp);
12166 +       switch (ELAN3_PTP_TYPE(tl1ptp))
12167 +       {
12168 +       case ELAN3_ET_PTE:
12169 +           lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
12170 +           idx    = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
12171 +
12172 +           HAT_PRINTF3 (2, "elan3mmu_l1inval: l1ptbl %p : lXptbl %p idx %d\n",
12173 +                        l1ptbl, lXptbl, idx);
12174 +
12175 +           /* invalidate the L1 pte. */
12176 +           elan3_writeptp (dev, l1ptp, invalidptp);
12177 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
12178 +               ElanFlushTlb (dev);         
12179 +
12180 +           l1ptbl->ptbl_valid--;
12181 +           elan3mmu_free_pte ( dev, elan3mmu,  lXptbl, idx); 
12182 +
12183 +           HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n",
12184 +                    PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
12185 +           
12186 +           break;
12187 +
12188 +       case ELAN3_ET_PTP:
12189 +           HAT_PRINTF5 (2, "elan3mmu_l1inval: l1ptbl %p : ptp %lx (%x) addr %x (%d)\n",
12190 +                        l1ptbl, l1ptp, tl1ptp, addr, i);
12191 +
12192 +           /* invalidate the L1 ptp. */
12193 +           elan3_writeptp (dev, l1ptp, invalidptp);
12194 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
12195 +               ElanFlushTlb (dev);
12196 +
12197 +           /* invalidate the level 2 page table */
12198 +           l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
12199 +           ret    = elan3mmu_l2inval (elan3mmu, l2ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l2lock, &flags);
12200 +
12201 +           ASSERT ((l2ptbl->ptbl_flags & PTBL_KEEP) == 0);
12202 +           
12203 +           if (ret == LK_PTBL_OK)
12204 +           {
12205 +               if (((l2ptbl->ptbl_flags & PTBL_KEEP) == 0) && l2ptbl->ptbl_valid == 0)
12206 +               {
12207 +                   HAT_PRINTF1 (2, "elan3mmu_l1inval: free l2ptbl %p\n", l2ptbl);
12208 +                   
12209 +                   l1ptbl->ptbl_valid--;
12210 +                   elan3mmu_free_l2ptbl (elan3mmu->elan3mmu_dev, l2ptbl, l2lock, flags);
12211 +
12212 +                   HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", 
12213 +                                PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
12214 +               }
12215 +               else
12216 +               {
12217 +                   /* need to keep this page table,  so even though its now empty, */
12218 +                   /* chain it back in */
12219 +                   HAT_PRINTF1 (2, "elan3mmu_l1inval: keep l2ptbl %p\n", l2ptbl);
12220 +
12221 +                   elan3_writeptp (dev, l1ptp, tl1ptp);
12222 +                   elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags);
12223 +               }    
12224 +           }
12225 +           else
12226 +           {
12227 +               l1ptbl->ptbl_valid--;
12228 +
12229 +               HAT_PRINTF3 (2, "elan3mmu_l1inval: dec valid for level %d ptbl %p to %d\n", 
12230 +                            PTBL_LEVEL(l1ptbl->ptbl_flags), l1ptbl, l1ptbl->ptbl_valid);
12231 +           }
12232 +           break;
12233 +           
12234 +       case ELAN3_ET_INVALID:
12235 +           break;
12236 +
12237 +       default:
12238 +           panic ("elan3mmu_l1inval: found invalid entry in level 1 page table");
12239 +           /* NOTREACHED */
12240 +       }
12241 +
12242 +       if (l1ptbl->ptbl_valid == 0)
12243 +           break;
12244 +
12245 +       addr += ELAN3_L1_SIZE;
12246 +    }
12247 +}
12248 +
12249 +int
12250 +elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int attr, E3_Addr addr, spinlock_t **pl2lock, unsigned long *flags)
12251 +{
12252 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
12253 +    ELAN3_PTP    invalidptp = ELAN3_INVALID_PTP;
12254 +    ELAN3_PTP    tl2ptp;
12255 +    sdramaddr_t l2ptp;
12256 +    spinlock_t *l3lock;
12257 +    unsigned long l3flags;
12258 +    ELAN3_PTBL  *l3ptbl;
12259 +    ELAN3_PTBL  *lXptbl;
12260 +    int         idx;
12261 +    int                i;
12262 +    int                ret;
12263 +
12264 +    HAT_PRINTF2 (1, "elan3mmu_l2inval: l2ptbl %p addr %x\n", l2ptbl, addr);
12265 +
12266 +    ASSERT (PTBL_LEVEL (l2ptbl->ptbl_flags) == PTBL_LEVEL_2);
12267 +    ASSERT (PTBL_LEVEL (l2ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_1);
12268 +
12269 +    ret = elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, pl2lock, flags);
12270 +
12271 +    ASSERT (ret == LK_PTBL_OK);
12272 +    ASSERT (l2ptbl->ptbl_elan3mmu == elan3mmu);
12273 +    ASSERT (l2ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu);
12274 +
12275 +    l2ptp = PTBL_TO_PTADDR(l2ptbl);
12276 +
12277 +    for (i = 0; i < ELAN3_L2_ENTRIES; i++, l2ptp += ELAN3_PTP_SIZE)
12278 +    {
12279 +       tl2ptp = elan3_readptp (dev, l2ptp);
12280 +       switch (ELAN3_PTP_TYPE(tl2ptp))
12281 +       {
12282 +       case ELAN3_ET_PTE:
12283 +           lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12284 +           idx    = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
12285 +
12286 +           HAT_PRINTF3 (2, "elan3mmu_l2inval: l2ptbl %p : lXptbl %p idx %d\n",
12287 +                        l2ptbl, lXptbl, idx);
12288 +
12289 +           /* invalidate the L2 pte. */
12290 +           elan3_writeptp (dev, l2ptp, invalidptp);
12291 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
12292 +               ElanFlushTlb (dev);
12293 +
12294 +           l2ptbl->ptbl_valid--;
12295 +           elan3mmu_free_pte ( dev, elan3mmu, lXptbl, idx); 
12296 +
12297 +           HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid); 
12298 +
12299 +           break;
12300 +           
12301 +       case ELAN3_ET_PTP:
12302 +           HAT_PRINTF5 (2, "elan3mmu_l2inval: l2ptbl %p : ptp %lx (%x) addr %x (%d)\n",
12303 +                        l2ptbl, l2ptp, tl2ptp, addr, i);
12304 +
12305 +           /* invalidate the L2 ptp. */
12306 +           elan3_writeptp (dev, l2ptp, invalidptp);
12307 +           if (! (attr & PTE_UNLOAD_NOFLUSH))
12308 +               ElanFlushTlb (dev);
12309 +           
12310 +           /* unload the level 3 page table */
12311 +           l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12312 +           ret = elan3mmu_l3inval (elan3mmu, l3ptbl, attr | PTE_UNLOAD_NOFLUSH, addr, &l3lock, &l3flags);
12313 +           
12314 +           if (ret == LK_PTBL_OK)
12315 +           {
12316 +               if ((l3ptbl->ptbl_flags & PTBL_KEEP) == 0 && l3ptbl->ptbl_valid == 0)
12317 +               {
12318 +                   /* decrement the valid count of the level 2 page table, and */
12319 +                   /* free off the level 3 page table */
12320 +                   HAT_PRINTF1 (2, "elan3mmu_l2inval: free l3ptbl %p\n", l3ptbl);
12321 +
12322 +                   l2ptbl->ptbl_valid--;
12323 +                   elan3mmu_free_l3ptbl (elan3mmu->elan3mmu_dev, l3ptbl, l3lock, l3flags);
12324 +
12325 +                   HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", 
12326 +                                PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
12327 +               }
12328 +               else
12329 +               {
12330 +                   /* need to keep this page table,  so even though its now empty, */
12331 +                   /* chain it back in */
12332 +                   HAT_PRINTF1 (2, "elan3mmu_l2inval: keep l3ptbl %p\n", l3ptbl);
12333 +
12334 +                   elan3_writeptp (dev, l2ptp, tl2ptp);
12335 +                   elan3mmu_unlock_ptbl (l3ptbl, l3lock, l3flags);
12336 +               }
12337 +           }
12338 +           else
12339 +           {
12340 +               l2ptbl->ptbl_valid--;
12341 +
12342 +               HAT_PRINTF3 (2, "elan3mmu_l2inval: dec valid for level %d ptbl %p to %d\n", 
12343 +                            PTBL_LEVEL(l2ptbl->ptbl_flags), l2ptbl, l2ptbl->ptbl_valid);
12344 +           }
12345 +           break;
12346 +           
12347 +       case ELAN3_ET_INVALID:
12348 +           break;
12349 +
12350 +       default:
12351 +           panic ("elan3mmu_l2inval: found pte in level 2 page table");
12352 +           /* NOTREACHED */
12353 +       }
12354 +
12355 +       if (l2ptbl->ptbl_valid == 0)
12356 +           break;
12357 +
12358 +       addr += ELAN3_L2_SIZE;
12359 +    }
12360 +
12361 +    ASSERT (PTBL_IS_LOCKED(l2ptbl->ptbl_flags));
12362 +
12363 +    return (ret);
12364 +}
12365 +
12366 +int 
12367 +elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int attr, E3_Addr addr, spinlock_t **pl3lock, unsigned long *flags)
12368 +{
12369 +    int ret;
12370 +
12371 +    HAT_PRINTF3 (2, "elan3mmu_l3inval: l3ptbl %p parent %p addr %x\n", l3ptbl, l3ptbl->ptbl_parent, addr);
12372 +
12373 +    ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_parent->ptbl_flags));
12374 +    ASSERT (PTBL_LEVEL (l3ptbl->ptbl_parent->ptbl_flags) == PTBL_LEVEL_2);
12375 +    ASSERT (l3ptbl->ptbl_parent->ptbl_elan3mmu == elan3mmu);
12376 +    ASSERT (l3ptbl->ptbl_parent->ptbl_base == VA2BASE (ELAN3_L2_BASE(addr)));
12377 +    
12378 +    ret = elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, pl3lock, flags);
12379 +
12380 +    ASSERT (ret == LK_PTBL_OK);
12381 +    ASSERT (PTBL_LEVEL (l3ptbl->ptbl_flags) == PTBL_LEVEL_3);
12382 +
12383 +    elan3mmu_unload_loop (elan3mmu, l3ptbl, 0, ELAN3_L3_ENTRIES, attr);
12384 +
12385 +    ASSERT (PTBL_IS_LOCKED (l3ptbl->ptbl_flags));
12386 +
12387 +    return (ret);
12388 + }
12389 +
12390 +int
12391 +elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags)
12392 +{
12393 +    int         level = PTBL_LEVEL (ptbl->ptbl_flags);
12394 +    spinlock_t *lock  = elan3mmu_ptbl_to_lock (level, ptbl);
12395 +
12396 +    local_irq_save (*flags);
12397 +
12398 +    if ((flag & LK_PTBL_NOWAIT) == 0)
12399 +       spin_lock (lock);
12400 +    else if (! spin_trylock (lock)) {
12401 +       local_irq_restore (*flags);
12402 +       return (LK_PTBL_FAILED);
12403 +    }
12404 +    
12405 +    if (level != PTBL_LEVEL (ptbl->ptbl_flags))
12406 +    {
12407 +       spin_unlock (lock);     
12408 +       local_irq_restore (*flags);
12409 +       return (LK_PTBL_MISMATCH);
12410 +    }
12411 +
12412 +    ptbl->ptbl_flags |= PTBL_LOCKED;
12413 +    *plock = lock;
12414 +    return (LK_PTBL_OK);
12415 +}
12416 +
12417 +int
12418 +elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags)
12419 +{
12420 +    spinlock_t *lock = elan3mmu_ptbl_to_lock (level, ptbl);
12421 +    int         res  = LK_PTBL_MISMATCH;
12422 +
12423 +    local_irq_save (*flags);
12424 +    
12425 +    if ((flag & LK_PTBL_NOWAIT) == 0)
12426 +       spin_lock (lock);
12427 +    else if (spin_trylock (lock) == 0) {
12428 +       local_irq_restore(*flags);
12429 +       return (LK_PTBL_FAILED);
12430 +    }
12431 +    
12432 +    if (PTBL_LEVEL (ptbl->ptbl_flags) != level)
12433 +    {
12434 +       res = LK_PTBL_MISMATCH;
12435 +       goto mismatch;
12436 +    }
12437 +    
12438 +    /* We have the right mutex,  so check that its the ptbl we want. */
12439 +    switch (level)
12440 +    {
12441 +    case PTBL_LEVEL_1: va = ELAN3_L1_BASE(va); break;
12442 +    case PTBL_LEVEL_2: va = ELAN3_L2_BASE(va); break;
12443 +    case PTBL_LEVEL_3: va = ELAN3_L3_BASE(va); break;
12444 +    }
12445 +
12446 +    if (ptbl->ptbl_elan3mmu != elan3mmu || ptbl->ptbl_base != VA2BASE(va))
12447 +    {
12448 +       res = LK_PTBL_MISMATCH;
12449 +       goto mismatch;
12450 +    }
12451 +
12452 +    ASSERT ((ptbl->ptbl_flags & PTBL_LOCKED) == 0);
12453 +    ptbl->ptbl_flags |= PTBL_LOCKED;
12454 +
12455 +    *plock = lock;
12456 +    return (LK_PTBL_OK);
12457 +
12458 +mismatch:
12459 +    if (! (flag & LK_PTBL_FAILOK))
12460 +       panic ("elan3mmu: failed to lock ptbl\n");
12461 +       
12462 +    spin_unlock (lock);
12463 +    local_irq_restore(*flags);
12464 +    return (res);
12465 +}
12466 +
12467 +void
12468 +elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags)
12469 +{
12470 +    ptbl->ptbl_flags &= ~PTBL_LOCKED;
12471 +    spin_unlock_irqrestore (lock,flags);
12472 +}
12473 +
12474 +static spinlock_t *
12475 +elan3mmu_ptbl_to_lock (int level, ELAN3_PTBL *ptbl)
12476 +{
12477 +    switch (level)
12478 +    {
12479 +    case PTBL_LEVEL_3: return (&l3ptbl_lock[L3PTBL_MTX_HASH(ptbl)]);
12480 +    case PTBL_LEVEL_2: return (&l2ptbl_lock[L2PTBL_MTX_HASH(ptbl)]);
12481 +    case PTBL_LEVEL_1: return (&l1ptbl_lock[L1PTBL_MTX_HASH(ptbl)]);
12482 +    case PTBL_LEVEL_X: 
12483 +       panic ("elan3mmu: ptbl_to_lock, bad level X");
12484 +    default:
12485 +       panic ("elan3mmu: ptbl_to_lock, bad level");
12486 +       /* NOTREACHED */
12487 +    }
12488 +    return (NULL);
12489 +}
12490 +
12491 +void
12492 +elan3mmu_display (ELAN3MMU *elan3mmu, E3_Addr addr)
12493 +{
12494 +    ELAN3_DEV   *dev = elan3mmu->elan3mmu_dev;
12495 +    ELAN3_PTBL  *l1ptbl;
12496 +    sdramaddr_t l1ptp;
12497 +    spinlock_t *l1lock;
12498 +    ELAN3_PTE    tl1pte;
12499 +    ELAN3_PTP    tl1ptp;
12500 +    E3_Addr     l1base;
12501 +    ELAN3_PTBL  *l2ptbl;
12502 +    sdramaddr_t l2ptp;
12503 +    ELAN3_PTE    tl2pte;
12504 +    spinlock_t *l2lock;
12505 +    ELAN3_PTP    tl2ptp;
12506 +    E3_Addr     l2base;
12507 +    ELAN3_PTBL  *l3ptbl;
12508 +    sdramaddr_t l3pte;
12509 +    ELAN3_PTE    tl3pte;
12510 +    spinlock_t *l3lock;
12511 +    ELAN3_PTBL  *lXptbl;
12512 +    int         idx;
12513 +    unsigned long flags;
12514 +
12515 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: elan3mmu %p addr %x\n", elan3mmu, addr);
12516 +
12517 +    l1ptbl = elan3mmu->elan3mmu_l1ptbl;
12518 +
12519 +    if (l1ptbl == NULL)
12520 +       return;
12521 +
12522 +    l1ptp  = PTBL_TO_PTADDR(l1ptbl) + ELAN3_L1_INDEX(addr)*ELAN3_PTP_SIZE;
12523 +    l1base = ELAN3_L1_BASE(addr);
12524 +    
12525 +    tl1ptp = elan3_readptp (dev, l1ptp);
12526 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l1ptbl %p l1ptp %lx l1base %x : tl1ptp %x\n", l1ptbl, l1ptp, l1base, tl1ptp);
12527 +    
12528 +    switch (ELAN3_PTP_TYPE(tl1ptp))
12529 +    {
12530 +    case ELAN3_ET_PTE:
12531 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 1 page table for pte %x\n", tl1ptp);
12532 +    
12533 +       lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
12534 +       idx    = (PTP_TO_PT_PADDR(tl1ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
12535 +       
12536 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx);
12537 +
12538 +       tl1pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE));
12539 +
12540 +       switch (elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_1, &l1lock, &flags))
12541 +       {
12542 +       case LK_PTBL_OK:
12543 +           elan3mmu_unlock_ptbl (l1ptbl, l1lock, flags);
12544 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 1 l1pte matches value %llx\n", (long long) tl1pte);
12545 +           break;
12546 +           
12547 +       case LK_PTBL_FAILED:
12548 +           panic ("elan3mmu_display: l1 lock failed");
12549 +           /* NOTREACHED */
12550 +           
12551 +       case LK_PTBL_MISMATCH:
12552 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 1 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
12553 +                         l1ptbl, l1ptbl->ptbl_flags, l1ptbl->ptbl_elan3mmu, l1ptbl->ptbl_base, elan3mmu, addr, (long long)tl1pte);
12554 +           
12555 +           break;
12556 +       default:
12557 +           panic ("elan3mmu_display: lvl 1 elan3mmu_lock_ptbl returned bad value");
12558 +           /* NOTREACHED */
12559 +       }
12560 +       return;
12561 +       
12562 +    case ELAN3_ET_INVALID:
12563 +       return;
12564 +       
12565 +    case ELAN3_ET_PTP:
12566 +       break;
12567 +       
12568 +    default:
12569 +       panic ("elan3mmu_display: found bad entry in level 1 page table");
12570 +       /* NOTREACHED */
12571 +    }
12572 +    
12573 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 2 ptbl from ptp %x\n", tl1ptp);
12574 +    
12575 +    l2ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl1ptp);
12576 +    l2ptp  = PTBL_TO_PTADDR(l2ptbl) + ELAN3_L2_INDEX(addr)*ELAN3_PTP_SIZE;
12577 +    l2base = ELAN3_L2_BASE(addr);
12578 +    
12579 +    tl2ptp = elan3_readptp (dev, l2ptp);
12580 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l2ptbl %p l2ptp %lx l2base %x : tl2ptp %x\n",
12581 +                l2ptbl, l2ptp, l2base, tl2ptp);
12582 +    
12583 +    switch (ELAN3_PTP_TYPE(tl2ptp))
12584 +    {
12585 +    case ELAN3_ET_PTE:
12586 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: level 2 page table for pte %x\n", tl2ptp);
12587 +    
12588 +       lXptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12589 +       idx    = (PTP_TO_PT_PADDR(tl2ptp) - PTBL_TO_PTADDR(lXptbl))/ELAN3_PTE_SIZE;  
12590 +       
12591 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lXptbl %p idx %d\n",lXptbl, idx);
12592 +
12593 +       tl2pte = elan3_readpte (dev,(PTBL_TO_PTADDR (lXptbl) + idx * ELAN3_PTE_SIZE));
12594 +
12595 +       switch (elan3mmu_lock_ptbl (l2ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_2, &l2lock, &flags))
12596 +       {
12597 +       case LK_PTBL_OK:
12598 +           elan3mmu_unlock_ptbl (l2ptbl, l2lock, flags);
12599 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: lvl 2 l1pte matches value %llx\n", (long long)tl2pte);
12600 +           break;
12601 +           
12602 +       case LK_PTBL_FAILED:
12603 +           panic ("elan3mmu_display: l2 lock failed");
12604 +           /* NOTREACHED */
12605 +           
12606 +       case LK_PTBL_MISMATCH:
12607 +           elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : lvl 2 ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
12608 +                         l2ptbl, l2ptbl->ptbl_flags, l2ptbl->ptbl_elan3mmu, l2ptbl->ptbl_base, elan3mmu, addr, (long long) tl2pte);
12609 +           
12610 +           break;
12611 +       default:
12612 +           panic ("elan3mmu_display: lvl 2 elan3mmu_lock_ptbl returned bad value");
12613 +           /* NOTREACHED */
12614 +       }
12615 +       return;
12616 +       
12617 +    case ELAN3_ET_INVALID:
12618 +       return;
12619 +       
12620 +    case ELAN3_ET_PTP:
12621 +       break;
12622 +
12623 +    default:
12624 +       panic ("elan3mmu_display: found bad entry in level 2 page table");
12625 +       /* NOTREACHED */
12626 +    }
12627 +    
12628 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: chain to level 3 page table from ptp %x\n", tl2ptp);
12629 +    
12630 +    l3ptbl = elan3mmu_ta_to_ptbl (elan3mmu, &tl2ptp);
12631 +    l3pte  = PTBL_TO_PTADDR(l3ptbl) + ELAN3_L3_INDEX(addr)*ELAN3_PTE_SIZE;
12632 +    
12633 +    elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3ptbl %p l3pte %lx\n",l3ptbl, l3pte);
12634 +    
12635 +    tl3pte = elan3_readpte (dev, l3pte);
12636 +    switch (elan3mmu_lock_ptbl (l3ptbl, LK_PTBL_FAILOK, elan3mmu, addr, PTBL_LEVEL_3, &l3lock, &flags))
12637 +    {
12638 +    case LK_PTBL_OK:
12639 +       elan3mmu_unlock_ptbl (l3ptbl, l3lock, flags);
12640 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: l3pte matches value %llx\n", (long long) tl3pte);
12641 +       break;
12642 +       
12643 +    case LK_PTBL_FAILED:
12644 +       panic ("elan3mmu_display: l3 lock failed");
12645 +       /* NOTREACHED */
12646 +       
12647 +    case LK_PTBL_MISMATCH:
12648 +       elan3_debugf (NULL, DBG_HAT, "elan3mmu_display: PTBL_MISMATCH : ptbl %p flags %x elan3mmu %p base %x (%p %x) %llx\n",
12649 +                    l3ptbl, l3ptbl->ptbl_flags, l3ptbl->ptbl_elan3mmu, l3ptbl->ptbl_base, elan3mmu, addr, (long long) tl3pte);
12650 +       
12651 +       break;
12652 +       
12653 +    default:
12654 +       panic ("elan3mmu_display: elan3mmu_lock_ptbl returned bad value");
12655 +       /* NOTREACHED */
12656 +    }
12657 +}
12658 +
12659 +
12660 +/*
12661 + * Local variables:
12662 + * c-file-style: "stroustrup"
12663 + * End:
12664 + */
12665 diff -urN clean/drivers/net/qsnet/elan3/elan3mmu_linux.c linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_linux.c
12666 --- clean/drivers/net/qsnet/elan3/elan3mmu_linux.c      1969-12-31 19:00:00.000000000 -0500
12667 +++ linux-2.6.9/drivers/net/qsnet/elan3/elan3mmu_linux.c        2004-12-14 05:19:38.000000000 -0500
12668 @@ -0,0 +1,284 @@
12669 +/*
12670 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12671 + *
12672 + *    For licensing information please see the supplied COPYING file
12673 + *
12674 + */
12675 +
12676 +#ident "@(#)$Id: elan3mmu_linux.c,v 1.53 2004/12/14 10:19:38 mike Exp $"
12677 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/vm/elan3mmu_linux.c,v $*/
12678 +
12679 +#include <qsnet/kernel.h>
12680 +#include <qsnet/kpte.h>
12681 +
12682 +#include <elan3/elanregs.h>
12683 +#include <elan3/elandev.h>
12684 +#include <elan3/elanvp.h>
12685 +#include <elan3/elan3mmu.h>
12686 +#include <elan3/elanctxt.h>
12687 +#include <elan3/elandebug.h>
12688 +#include <elan3/urom_addrs.h>
12689 +#include <elan3/thread.h>
12690 +
12691 +/*
12692 + * Strategy for syncing main <-> elan pte's:
12693 + * 
12694 + *   Install callbacks for linux flush_tlb_page(), flush_tlb_range(),
12695 + *   flush_tlb_all(), and flush_tlb_mm() so when a main PTE changes,
12696 + *   the elan translations, if any, are invalidated.  They can then be
12697 + *   faulted in again with the correct physical page, perms, etc., on demand. 
12698 + * 
12699 + *   Callbacks are stacked on the mm_struct, one per context.  We also stack
12700 + *   a ctxt pointer so we don't have to do lookups on every call.
12701 + *
12702 + *   Sanity check -- we clearly want to flush the elan PTEs in these 
12703 + *   situations, all of which are covered by tlb_flush_{page,range}()
12704 + *
12705 + *     1) kernel/vmscan.c::try_to_swap_out() swaps out a page
12706 + *
12707 + *     2) kernel/mremap.c::copy_one_pte() moves a page as a result of the 
12708 + *     mremap system call
12709 + * 
12710 + *     3) kernel/mprotect.c::change_pte_range() changes the permissions of a 
12711 + *     page as the result of the mprotect system call
12712 + *
12713 + * Other Notes: 
12714 + * 
12715 + *   Dirty a page in the mains page tables when it is faulted into the elan.
12716 + *   This way it will not be thrown away by the swapper.
12717 + * 
12718 + *   Pages write protected for COW are copied by elan3mmu_main_pagefault()
12719 + *   when a writeable translation is loaded into the elan.
12720 + */
12721 +
12722 +caddr_t             elan3mmu_kernel_invalid_space;
12723 +ELAN3_PTE     elan3mmu_kernel_invalid_pte_val;
12724 +
12725 +void
12726 +elan3mmu_init_osdep (void)
12727 +{
12728 +    pte_t *pte;
12729 +
12730 +    KMEM_GETPAGES (elan3mmu_kernel_invalid_space, caddr_t, 1, TRUE);
12731 +
12732 +    ASSERT(elan3mmu_kernel_invalid_space != NULL);
12733 +
12734 +    pte = find_pte_kernel ((unsigned long) elan3mmu_kernel_invalid_space);
12735 +
12736 +    elan3mmu_kernel_invalid_pte_val = ELAN3_PTE_64_BIT | (pte_phys(*pte) & ELAN3_PTE_PFN_MASK) | ELAN3_PERM_REMOTEREAD | ELAN3_ET_PTE;
12737 +
12738 +#ifdef __alpha
12739 +    /*
12740 +     * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to
12741 +     *       set any of bits 63:48, then we will set them all by setting bit 48/
12742 +     */
12743 +    if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull)
12744 +        elan3mmu_kernel_invalid_pte_val |= (1ull << 48);
12745 +    else
12746 +       elan3mmu_kernel_invalid_pte_val |= alpha_mv.pci_dac_offset;
12747 +#endif
12748 +
12749 +    HAT_PRINTF(0x10, "elan3mmu_invalid_space at %p phys=%llx pte=%llx\n", elan3mmu_kernel_invalid_space, 
12750 +              (unsigned long long) pte_phys(*pte), (unsigned long long) elan3mmu_kernel_invalid_pte_val);
12751 +}
12752 +
12753 +void
12754 +elan3mmu_fini_osdep()
12755 +{
12756 +    KMEM_FREEPAGES (elan3mmu_kernel_invalid_space, 1);
12757 +}
12758 +
12759 +void
12760 +elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu)
12761 +{
12762 +    elan3mmu->elan3mmu_coproc_mm = current->mm;
12763 +}
12764 +
12765 +/*
12766 + * Convert physical page frame number to elan pte.
12767 + */
12768 +ELAN3_PTE
12769 +elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm)
12770 +{
12771 +    ELAN3_PTE newpte;
12772 +    
12773 +    ASSERT (paddr != 0);
12774 +    
12775 +    if ((paddr & dev->SdramPhysMask) == dev->SdramPhysBase)            /* SDRAM, turn on PTE_LOCAL bit */
12776 +    {
12777 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx SDRAM\n", (unsigned long long) paddr);
12778 +       
12779 +       newpte = ELAN3_PTE_LOCAL | (paddr & ELAN3_PTE_PFN_MASK & ~dev->SdramPhysMask) | perm | ELAN3_ET_PTE;
12780 +    }
12781 +#if defined(LINUX_ALPHA)
12782 +    else if ((paddr & dev->PciPhysMask) == dev->PciPhysBase)
12783 +    {
12784 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx PCI\n", (unsigned long long) paddr);
12785 +       newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK & ~dev->PciPhysMask) | perm | ELAN3_ET_PTE;
12786 +    }
12787 +#endif
12788 +    else                                               /* main memory, must convert to PCI view */
12789 +    {
12790 +       PRINTF(NULL, DBG_HAT, "elan3mmu_phys_to_pte: phys %llx is main memory\n", (unsigned long long) paddr);
12791 +
12792 +       /* main memory, just set the architecture specific PTE_BYPASS bit */
12793 +       /* This requires the Tsunami chipset being programmed to support
12794 +        * the monster window option. This is in linux-2.4.5 and later kernels 
12795 +        * and is also patched into the RH 7.1/2.4.3-12 Alpha kernel
12796 +        */
12797 +       newpte = ELAN3_PTE_64_BIT | (paddr & ELAN3_PTE_PFN_MASK) | perm | ELAN3_ET_PTE;
12798 +
12799 +#ifdef __alpha
12800 +       /*
12801 +        * NOTE: Elan sign-extends bit 48 of the physical address, so if we need to
12802 +        *       set any of bits 63:48, then we will set them all by setting bit 48/
12803 +        */
12804 +       if (alpha_mv.pci_dac_offset & 0xFFFF000000000000ull)
12805 +            newpte |= (1ull << 48);
12806 +        else
12807 +           newpte |= alpha_mv.pci_dac_offset;
12808 +#endif
12809 +    }
12810 +
12811 +    if ( ELAN3_PERM_WRITEABLE( perm )) 
12812 +       newpte |= ( ELAN3_PTE_MOD | ELAN3_PTE_REF );
12813 +    else
12814 +       newpte |= ( ELAN3_PTE_REF ) ;
12815 +
12816 +    return (newpte);
12817 +}
12818 +
12819 +ELAN3_PTE
12820 +elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu)
12821 +{
12822 +    if (elan3mmu->elan3mmu_dev->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVB)
12823 +       return (elan3mmu_kernel_invalid_pte_val);
12824 +    return (ELAN3_INVALID_PTE);
12825 +}
12826 +
12827 +/* 
12828 + * Invalidate a range of addresses for specified context.
12829 + */
12830 +void
12831 +elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len)
12832 +{
12833 +    E3_Addr       eaddr;
12834 +    ELAN3MMU_RGN *rgn;
12835 +    unsigned long span;
12836 +
12837 +    spin_lock (&elan3mmu->elan3mmu_lock);
12838 +
12839 +    for (; len; len -= span, addr += span)
12840 +    {
12841 +       rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
12842 +
12843 +       if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr)
12844 +           span = len;
12845 +       else if (rgn->rgn_mbase > addr)
12846 +           span = MIN(len, rgn->rgn_mbase - addr);
12847 +       else
12848 +       {
12849 +           span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr);
12850 +           eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
12851 +
12852 +            HAT_PRINTF(0x10, "  unloading eaddr %x main %p (%ld pages)\n", 
12853 +             eaddr, addr, btopr(span));
12854 +           elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD);
12855 +       }                       /* takes care of elan tlb flush also */
12856 +    }
12857 +
12858 +    spin_unlock (&elan3mmu->elan3mmu_lock);
12859 +}
12860 +
12861 +/*
12862 + *
12863 + */
12864 +void
12865 +elan3mmu_update_range (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t vaddr, E3_Addr eaddr, u_int len, u_int perm)
12866 +{
12867 +    u_int roperm = ELAN3_PERM_READONLY(perm & ELAN3_PTE_PERM_MASK) | (perm & ~ELAN3_PTE_PERM_MASK);
12868 +    u_int off;
12869 +
12870 +    HAT_PRINTF3(1, "elan3mmu_update_range (elan3mmu %p addr %p -> %p)\n", elan3mmu, vaddr, vaddr+len-1);
12871 +
12872 +    while (len > 0)
12873 +    {
12874 +       pte_t *pte_ptr;
12875 +       pte_t  pte_value;
12876 +
12877 +       pte_ptr = find_pte_map(mm, (unsigned long)vaddr);
12878 +       if (pte_ptr) {
12879 +           pte_value = *pte_ptr;
12880 +           pte_unmap(pte_ptr);
12881 +       }
12882 +
12883 +       HAT_PRINTF(0x10, "  elan3mmu_update_range %x (%p) %s\n", eaddr, vaddr, 
12884 +               !pte_ptr ? "invalid" : pte_none(pte_value) ? "none " : !pte_present(pte_value) ? "swapped " : 
12885 +               !pte_write(pte_value) ? "RO/COW" : "OK");
12886 +       
12887 +       if (pte_ptr && !pte_none(pte_value) && pte_present(pte_value))
12888 +           for (off = 0; off < PAGE_SIZE; off += ELAN3_PAGE_SIZE)
12889 +               elan3mmu_pteload (elan3mmu, PTBL_LEVEL_3, eaddr + off, pte_phys(pte_value) + off, pte_write(pte_value) ? perm : roperm, PTE_LOAD|PTE_NO_SLEEP|PTE_NO_STEAL);
12890 +       vaddr += PAGESIZE;
12891 +       eaddr += PAGESIZE;
12892 +       len   -= PAGESIZE;
12893 +    }
12894 +}
12895 +
12896 +/* 
12897 + * Update a range of addresses for specified context.
12898 + */
12899 +void
12900 +elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm,caddr_t vaddr, unsigned long len)
12901 +{
12902 +    E3_Addr       eaddr;
12903 +    ELAN3MMU_RGN *rgn;
12904 +    unsigned long span;
12905 +
12906 +    spin_lock (&elan3mmu->elan3mmu_lock);
12907 +
12908 +    for (; len; len -= span, vaddr += span)
12909 +    {
12910 +       rgn = elan3mmu_findrgn_main (elan3mmu, vaddr, 0);
12911 +
12912 +       if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < vaddr)
12913 +           span = len;
12914 +       else if (rgn->rgn_mbase > vaddr)
12915 +           span = MIN(len, rgn->rgn_mbase - vaddr);
12916 +       else
12917 +       {
12918 +           span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - vaddr);
12919 +           eaddr = rgn->rgn_ebase + (vaddr - rgn->rgn_mbase);
12920 +
12921 +            HAT_PRINTF(0x10, "  updating eaddr %u main %p (%ld pages)\n", 
12922 +             eaddr, vaddr, btopr(span));
12923 +           
12924 +           elan3mmu_update_range(elan3mmu, mm, vaddr, eaddr, span, rgn->rgn_perm);
12925 +       }                       
12926 +    }
12927 +
12928 +    spin_unlock (&elan3mmu->elan3mmu_lock);
12929 +}
12930 +
12931 +/* 
12932 + * Invalidate all ptes for the given context.
12933 + */
12934 +void
12935 +elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu)
12936 +{
12937 +    ELAN3_PTBL  *l1ptbl   = (elan3mmu ? elan3mmu->elan3mmu_l1ptbl : NULL);
12938 +    spinlock_t *l1mtx;
12939 +    unsigned long flags;
12940 +
12941 +    if (l1ptbl && elan3mmu_lock_ptbl (l1ptbl, LK_PTBL_FAILOK, elan3mmu, (E3_Addr) 0, 1, &l1mtx, &flags) == LK_PTBL_OK)
12942 +    {
12943 +       elan3mmu_l1inval(elan3mmu, elan3mmu->elan3mmu_l1ptbl, 0);
12944 +       elan3mmu_unlock_ptbl (l1ptbl, l1mtx, flags);
12945 +    }
12946 +}
12947 +
12948 +/*
12949 + * Local variables:
12950 + * c-file-style: "stroustrup"
12951 + * End:
12952 + */
12953 diff -urN clean/drivers/net/qsnet/elan3/elan3ops.c linux-2.6.9/drivers/net/qsnet/elan3/elan3ops.c
12954 --- clean/drivers/net/qsnet/elan3/elan3ops.c    1969-12-31 19:00:00.000000000 -0500
12955 +++ linux-2.6.9/drivers/net/qsnet/elan3/elan3ops.c      2003-09-24 09:57:25.000000000 -0400
12956 @@ -0,0 +1,170 @@
12957 +/*
12958 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
12959 + *
12960 + *    For licensing information please see the supplied COPYING file
12961 + *
12962 + */
12963 +
12964 +#ident "@(#)$Id: elan3ops.c,v 1.4 2003/09/24 13:57:25 david Exp $"
12965 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elan3ops.c,v $*/
12966 +
12967 +#include <qsnet/kernel.h>
12968 +#include <elan/elanmod.h>
12969 +
12970 +#include <elan3/elanregs.h>
12971 +#include <elan3/elandev.h>
12972 +#include <elan3/elan3ops.h>
12973 +
12974 +extern ELAN_STATS_OPS elan3_device_stats_ops;
12975 +
12976 +ELAN_DEV_OPS elan3_dev_ops = {
12977 +
12978 +       get_position,
12979 +       set_position,   
12980 +
12981 +       ELAN_DEV_OPS_VERSION
12982 +};
12983 +
12984 +ELAN_STATS_OPS elan3_device_stats_ops = {
12985 +       ELAN_STATS_OPS_VERSION,
12986 +
12987 +       stats_get_index_name,
12988 +       stats_get_block,
12989 +       stats_clear_block
12990 +};
12991 +
12992 +static char *elan3_device_stats_names[ELAN3_NUM_STATS] = 
12993 +{
12994 +               "version field",                 /* not cleared */
12995 +               "elan interrupts",
12996 +               "tlb flushes",
12997 +               "traps with invalid context",
12998 +               "interrupts com queue half full",
12999 +               "cproc traps",
13000 +               "dproc traps",
13001 +               "tproc traps",
13002 +               "iproc traps",
13003 +               "event interrupts",
13004 +               "elan page faults",
13005 +               "EopBadAcks",
13006 +               "EopResets",
13007 +               "InputterBadLength",
13008 +               "InputterCRCDiscards",
13009 +               "InputterCRCErrors",
13010 +               "InputterCRCBad",       
13011 +               "errors in dma data",
13012 +               "errors after dma identify",
13013 +               "errors after thread identify",
13014 +               "dma retries",
13015 +               "dma output timeouts",
13016 +               "dma packet ack errors",
13017 +               "forced tproc traps",
13018 +               "too many instruction traps",
13019 +               "output timeouts",
13020 +               "packet ack errors",
13021 +               "LockError",
13022 +               "DeskewError",
13023 +               "PhaseError",
13024 +               "DataError",
13025 +               "FifoOvFlow0",
13026 +               "FifoOvFlow1",
13027 +               "link error value on data error",
13028 +               "correctable ecc errors",
13029 +               "uncorrectable ecc errors",
13030 +               "multiple ecc errors",
13031 +               "sdram bytes free",              /* not cleared */
13032 +               "longest interrupt in ticks",
13033 +               "punts of event int's to thread",
13034 +               "reschedules of event int's thread"
13035 +};
13036 +
13037 +int 
13038 +stats_get_index_name (void *arg, uint  index, caddr_t name)
13039 +{
13040 +       copyout (elan3_device_stats_names[index], name, strlen (elan3_device_stats_names[index]) + 1  /* with \0 */);
13041 +
13042 +       return (0);
13043 +}
13044 +
13045 +int
13046 +stats_get_block (void *arg, uint entries, ulong *value)
13047 +{
13048 +       ELAN3_DEV *dev = (ELAN3_DEV *) arg;
13049 +
13050 +       if ( entries >  ELAN3_NUM_STATS ) /* if space too big only send valid portion */
13051 +               entries = ELAN3_NUM_STATS;
13052 +       
13053 +       copyout(&dev->Stats, value, sizeof(ulong) * entries);
13054 +
13055 +       return (0);
13056 +}
13057 +
13058 +int 
13059 +stats_clear_block (void *arg)
13060 +{
13061 +       ELAN3_DEV *dev = (ELAN3_DEV *) arg;
13062 +       u_long   *ptr = (u_long *) &dev->Stats;
13063 +       int                n;
13064 +       
13065 +       for (n = 0; n < ELAN3_NUM_STATS; n++)
13066 +       {
13067 +               switch (n) 
13068 +               {
13069 +               case offsetof (ELAN3_STATS, Version)/sizeof(u_long):
13070 +               case offsetof (ELAN3_STATS, SdramBytesFree)/sizeof(u_long):
13071 +                       break;
13072 +               default:
13073 +                       ptr[n] = (ulong)0;
13074 +               }
13075 +       }
13076 +       return (0);
13077 +}
13078 +
13079 +int 
13080 +get_position (void *user_data, ELAN_POSITION *position)
13081 +{
13082 +       ELAN3_DEV *dev = (ELAN3_DEV *)user_data;
13083 +
13084 +       copyout(&dev->Position, position, sizeof(ELAN_POSITION));
13085 +
13086 +       return (0);     
13087 +}
13088 +
13089 +int 
13090 +set_position (void *user_data, unsigned short nodeId, unsigned short numNodes)
13091 +{
13092 +       ELAN3_DEV *dev = (ELAN3_DEV *)user_data;
13093 +
13094 +       if (ComputePosition (&dev->Position, nodeId, numNodes, dev->Devinfo.dev_num_down_links_value) != 0)
13095 +               return (EINVAL);
13096 +       
13097 +       return (0);     
13098 +}
13099 +
13100 +int
13101 +elan3_register_dev_stats(ELAN3_DEV * dev) 
13102 +{
13103 +       char name[ELAN_STATS_NAME_MAX_LEN+1];
13104 +
13105 +       sprintf (name, ELAN3_STATS_DEV_FMT, dev->Instance);
13106 +
13107 +       elan_stats_register(&dev->StatsIndex,
13108 +                              name,
13109 +                              sizeof (elan3_device_stats_names)/sizeof (elan3_device_stats_names[0]),
13110 +                              &elan3_device_stats_ops,
13111 +                              (void *)dev);
13112 +
13113 +       return (0);
13114 +}
13115 +
13116 +void
13117 +elan3_deregister_dev_stats(ELAN3_DEV * dev) 
13118 +{
13119 +       elan_stats_deregister(dev->StatsIndex);
13120 +}
13121 +
13122 +/*
13123 + * Local variables:
13124 + * c-file-style: "linux"
13125 + * End:
13126 + */
13127 diff -urN clean/drivers/net/qsnet/elan3/elandebug.c linux-2.6.9/drivers/net/qsnet/elan3/elandebug.c
13128 --- clean/drivers/net/qsnet/elan3/elandebug.c   1969-12-31 19:00:00.000000000 -0500
13129 +++ linux-2.6.9/drivers/net/qsnet/elan3/elandebug.c     2003-09-24 09:57:25.000000000 -0400
13130 @@ -0,0 +1,151 @@
13131 +/*
13132 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
13133 + * 
13134 + *    For licensing information please see the supplied COPYING file
13135 + *
13136 + */
13137 +
13138 +#ident "@(#)$Id: elandebug.c,v 1.25 2003/09/24 13:57:25 david Exp $"
13139 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandebug.c,v $*/
13140 +
13141 +#include <qsnet/kernel.h>
13142 +#include <elan3/elanregs.h>
13143 +#include <elan3/elandev.h>
13144 +#include <elan3/elanvp.h>
13145 +#include <elan3/elan3mmu.h>
13146 +#include <elan3/elanctxt.h>
13147 +#include <elan3/elandebug.h>
13148 +
13149 +
13150 +void
13151 +elan3_debugf (void *p, unsigned int mode, char *fmt,...)
13152 +{
13153 +    char prefix[128];
13154 +
13155 +#if defined (DIGITAL_UNIX)
13156 +#define PREFIX_FMT     "[%lx.%08x]"
13157 +#define PREFIX_VAL     (int)CURTHREAD()
13158 +#else
13159 +#define PREFIX_FMT     "[%lx.%04d]"
13160 +#define PREFIX_VAL     (current->pid)
13161 +#endif
13162 +
13163 +    if ((unsigned long) p > DBG_NTYPES)
13164 +    {
13165 +       ELAN3_CTXT *ctxt = (ELAN3_CTXT *) p;
13166 +
13167 +        if (elan3_debug_display_ctxt && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) != elan3_debug_display_ctxt)
13168 +            return;
13169 +        if (elan3_debug_ignore_ctxt  && (ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK) == elan3_debug_ignore_ctxt)
13170 +            return;
13171
13172 +       if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
13173 +           sprintf (prefix, PREFIX_FMT " (XXX) ", lbolt, PREFIX_VAL);
13174 +       else
13175 +           sprintf (prefix, PREFIX_FMT " (%03x) ", lbolt, PREFIX_VAL,
13176 +                    ctxt->Capability.cap_mycontext & MAX_ROOT_CONTEXT_MASK);
13177 +    }
13178 +    else
13179 +    {
13180 +       char *what;
13181 +
13182 +       if (elan3_debug_ignore_dev & (1 << ((unsigned long) p)))
13183 +           return;
13184 +
13185 +       switch ((unsigned long) p)
13186 +       {
13187 +       case (int) DBG_DEVICE: what = "dev"; break;
13188 +       case (int) DBG_KCOMM:  what = "kcm"; break;
13189 +       case (int) DBG_ICS:    what = "ics"; break;
13190 +       case (int) DBG_USER:   what = "usr"; break;
13191 +       default:               what = NULL; break;
13192 +       }
13193 +           
13194 +       if (what)
13195 +           sprintf (prefix, PREFIX_FMT " [%s] ", lbolt,  PREFIX_VAL, what);
13196 +       else
13197 +           sprintf (prefix, PREFIX_FMT " [%3d] ", lbolt,  PREFIX_VAL, (int)(long)what);
13198 +    }
13199 +
13200 +    {
13201 +       va_list       ap;
13202 +
13203 +       va_start (ap, fmt);
13204 +       qsnet_vdebugf ((((mode & elan3_debug_buffer)?QSNET_DEBUG_BUFFER:0)|((mode & elan3_debug_console)?QSNET_DEBUG_CONSOLE:0)) , prefix, fmt, ap);
13205 +       va_end (ap);
13206 +    }
13207 +}
13208 +
13209 +
13210 +void
13211 +elan3_alloc_panicstate (ELAN3_DEV *dev, int allocsdram)
13212 +{
13213 +    register int bank;
13214 +
13215 +    if (dev->PanicState.RegPtr == NULL)
13216 +       KMEM_ZALLOC (dev->PanicState.RegPtr, E3_Regs *, sizeof (E3_Regs), 1);
13217 +
13218 +    if (allocsdram)
13219 +       for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
13220 +           if (dev->PanicState.Sdram[bank] == NULL && dev->SdramBanks[bank].Size)
13221 +               KMEM_ZALLOC (dev->PanicState.Sdram[bank], char *, dev->SdramBanks[bank].Size, 1);
13222 +}
13223 +
13224 +void
13225 +elan3_free_panicstate (ELAN3_DEV *dev)
13226 +{
13227 +    register int bank;
13228 +
13229 +    if (dev->PanicState.RegPtr != NULL)
13230 +       KMEM_FREE (dev->PanicState.RegPtr, sizeof (E3_Regs));
13231 +
13232 +    for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
13233 +       if (dev->PanicState.Sdram[bank] != NULL && dev->SdramBanks[bank].Size)
13234 +           KMEM_FREE (dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size);
13235 +
13236 +    bzero (&dev->PanicState, sizeof (dev->PanicState));
13237 +}
13238 +
13239 +void
13240 +elan3_save_panicstate (ELAN3_DEV *dev)
13241 +{
13242 +    register int bank;
13243 +    
13244 +    if (dev->PanicState.RegPtr)
13245 +    {
13246 +       printk ("elan%d: saving state on panic .....\n", dev->Devinfo.dev_instance);
13247 +
13248 +       bcopy ((void *) dev->RegPtr, (void *) dev->PanicState.RegPtr, sizeof (E3_Regs));
13249 +       
13250 +       for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
13251 +           if (dev->SdramBanks[bank].Size && dev->PanicState.Sdram[bank])
13252 +               elan3_sdram_copyq_from_sdram (dev, (bank << ELAN3_SDRAM_BANK_SHIFT), dev->PanicState.Sdram[bank], dev->SdramBanks[bank].Size);
13253 +       
13254 +    }
13255 +}
13256 +
13257 +int
13258 +elan3_assfail (ELAN3_DEV *dev, char *string, char *file, int line)
13259 +{
13260 +    if (panicstr)
13261 +       return (0);
13262 +
13263 +    printk ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
13264 +
13265 +#if defined(LINUX)
13266 +    elan3_save_panicstate (dev);
13267 +
13268 +    panic ("elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
13269 +#else
13270 +    cmn_err (CE_PANIC, "elan: assertion failed '%s' File '%s' Line %d\n", string, file, line);
13271 +#endif
13272 +    /*NOTREACHED*/
13273 +    return (0);
13274 +}
13275 +
13276 +
13277 +/*
13278 + * Local variables:
13279 + * c-file-style: "stroustrup"
13280 + * End:
13281 + */
13282 diff -urN clean/drivers/net/qsnet/elan3/elandev_generic.c linux-2.6.9/drivers/net/qsnet/elan3/elandev_generic.c
13283 --- clean/drivers/net/qsnet/elan3/elandev_generic.c     1969-12-31 19:00:00.000000000 -0500
13284 +++ linux-2.6.9/drivers/net/qsnet/elan3/elandev_generic.c       2005-07-20 07:35:36.000000000 -0400
13285 @@ -0,0 +1,1867 @@
13286 +/*
13287 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
13288 + * 
13289 + *    For licensing information please see the supplied COPYING file
13290 + *
13291 + */
13292 +
13293 +#ident "@(#)$Id: elandev_generic.c,v 1.115.2.2 2005/07/20 11:35:36 mike Exp $"
13294 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_generic.c,v $*/
13295 +
13296 +#include <qsnet/kernel.h>
13297 +#include <qsnet/kthread.h>
13298 +
13299 +#include <elan3/dma.h>
13300 +#include <elan3/elanregs.h>
13301 +#include <elan3/elandev.h>
13302 +#include <elan3/elanvp.h>
13303 +#include <elan3/elan3mmu.h>
13304 +#include <elan3/elanctxt.h>
13305 +#include <elan3/elandebug.h>
13306 +#include <elan3/elansyscall.h>
13307 +#include <elan3/urom_addrs.h>
13308 +#include <elan3/elan3ops.h>
13309 +
13310 +/*
13311 + * Module globals, configurable from system file.
13312 + */
13313 +u_int  elan3_debug                  = 0;
13314 +u_int  elan3_debug_console       = 0;
13315 +u_int  elan3_debug_buffer           = -1;
13316 +u_int  elan3_debug_ignore_dev       = 0;
13317 +u_int  elan3_debug_ignore_kcomm     = 0;
13318 +u_int  elan3_debug_ignore_ctxt      = 0;
13319 +u_int  elan3_debug_display_ctxt     = 0;
13320 +
13321 +int    eventint_punt_loops;
13322 +int    eventint_punt_ticks;
13323 +int    eventint_resched_ticks;
13324 +
13325 +static void InitialiseDmaBuffers (ELAN3_DEV *dev, ioaddr_t CmdPort);
13326 +static int  ProbeSdram (ELAN3_DEV *dev);
13327 +static void InitialiseSdram (ELAN3_DEV *dev);
13328 +static void ReEnableErrorInterrupts (void *arg);
13329 +void        PollForDmaHungup (void *arg);
13330 +static void elan3_event_interrupt (ELAN3_DEV *dev);
13331 +
13332 +/*
13333 + * BaseAddr is ptr to the start of a table aligned on a power of two byte address.
13334 + * SizePower must be in the range of 6 to 12. It defines the number of valid contexts as
13335 + * shown below.
13336 + *
13337 + * SizePower   Valid Contexts  Table size in bytes.
13338 + *     6            64               1k
13339 + *     7           128               2k
13340 + *     8           256               4K
13341 + *     9           512               8k
13342 + *    10          1024              16k
13343 + *    11          2048              32k
13344 + *    12          4096              64k
13345 + */
13346 +#define GEN_CONTEXT_PTR(BaseAddr, SizePower) (((E3_uint32) BaseAddr) | \
13347 +                                             (~((1 << ((SizePower) - 6)) - 1) & 0x3f))
13348 +
13349 +int
13350 +InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort)
13351 +{
13352 +    E3_IprocTrapHeader_BE   TrapCleanup[4];
13353 +    E3_ContextControlBlock  ContextControlBlock;
13354 +    sdramaddr_t             ptr;
13355 +    int                            res;
13356 +    int                            i;
13357 +
13358 +    eventint_punt_loops    = 100;
13359 +    eventint_punt_ticks    = (hz/100);
13360 +    eventint_resched_ticks = (hz/4);
13361 +    
13362 +    dev->Stats.Version     = ELAN3_STATS_VERSION;
13363 +    dev->Position.pos_mode = ELAN_POS_UNKNOWN;
13364 +
13365 +    /*
13366 +     * The elan should have already been reset, so the interrupt mask
13367 +     * should be 0 and the schedule status register should be set to
13368 +     * its initial state
13369 +     */
13370 +    ASSERT (dev->InterruptMask == 0);
13371 +    ASSERT ((read_reg32 (dev, Exts.SchCntReg) & HaltStopAndExtTestMask) == Sched_Initial_Value);
13372 +
13373 +    /*
13374 +     * Write any value here to clear out the half full and error bits of the command
13375 +     * overflow queues.
13376 +     */
13377 +    write_reg32 (dev, ComQueueStatus, 0);
13378 +
13379 +    /* Initialise the cache tags before touching the SDRAM */
13380 +    /* we initialise them to "map" the bottom of SDRAM */
13381 +    for (i = 0; i < E3_NumCacheLines; i++)
13382 +    {
13383 +       write_cache_tag (dev, Tags[i][0].Value, 0x0000000000000000ULL);
13384 +       write_cache_tag (dev, Tags[i][1].Value, 0x0000080000000000ULL);
13385 +       write_cache_tag (dev, Tags[i][2].Value, 0x0000100000000000ULL);
13386 +       write_cache_tag (dev, Tags[i][3].Value, 0x0000180000000000ULL);
13387 +    }
13388 +
13389 +#ifndef CONFIG_MPSAS
13390 +    for (i = 0; i < E3_NumCacheLines*(E3_CACHELINE_SIZE/sizeof(E3_uint64)); i++)
13391 +    {
13392 +       write_cache_set (dev, Set0[i], 0xcac1ecac1ecac1e0ULL);
13393 +       write_cache_set (dev, Set1[i], 0xcac1ecac1ecac1e1ULL);
13394 +       write_cache_set (dev, Set2[i], 0xcac1ecac1ecac1e2ULL);
13395 +       write_cache_set (dev, Set3[i], 0xcac1ecac1ecac1e3ULL);
13396 +    }
13397 +#endif
13398 +
13399 +    if ((res = ProbeSdram(dev)) != ESUCCESS)
13400 +       return (res);
13401 +
13402 +    /* Enable all cache sets before initialising the sdram allocators */
13403 +    write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg |= CONT_EN_ALL_SETS));
13404 +
13405 +    InitialiseSdram (dev);
13406 +
13407 +    dev->TAndQBase              = elan3_sdram_alloc (dev, ELAN3_TANDQ_SIZE);
13408 +    dev->ContextTable           = elan3_sdram_alloc (dev, ELAN3_CONTEXT_SIZE);
13409 +    dev->ContextTableSize       = ELAN3_NUM_CONTEXTS;
13410 +    dev->CommandPortTraps[0]    = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE);
13411 +    dev->CommandPortTraps[1]    = elan3_sdram_alloc (dev, ELAN3_COMMAND_TRAP_SIZE);
13412 +    dev->CurrentCommandPortTrap = 0;
13413 +
13414 +    PRINTF3 (DBG_DEVICE, DBG_CONFIG, "InitialiseElan: ContextTable %08lx TAndQ %08lx CommandPortTrap %08lx\n",
13415 +            dev->ContextTable, dev->TAndQBase, dev->CommandPortTraps[0]);
13416 +
13417 +    /* Allocate the thread amd dma trap areas */
13418 +    KMEM_ZALLOC (dev->ThreadTrap, THREAD_TRAP *, sizeof (THREAD_TRAP), TRUE);
13419 +    KMEM_ZALLOC (dev->DmaTrap, DMA_TRAP *, sizeof (DMA_TRAP), TRUE);
13420 +
13421 +    /* Allocate the ctxt table */
13422 +    KMEM_ZALLOC (dev->CtxtTable,  ELAN3_CTXT **, dev->ContextTableSize * sizeof ( ELAN3_CTXT *), TRUE);
13423 +
13424 +    /* Initialise halt queue list */
13425 +    dev->HaltOperationsTailpp   = &dev->HaltOperations;
13426 +
13427 +    /* From elan3/code/harness/elanstuff.c */
13428 +    /* Init the clock. */
13429 +    write_ureg64 (dev, Clock.NanoSecClock, 0);
13430 +    
13431 +    /* Init the instruction count reg. */
13432 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
13433 +    
13434 +    /* Init the stats control reg. Must be done before the count regs.*/
13435 +    write_ureg32 (dev, StatCont.StatsControl, 0);
13436 +    
13437 +    /* Init the stats count regs. */
13438 +    write_ureg32 (dev, StatCounts[0].s.StatsCount, 0);
13439 +    write_ureg32 (dev, StatCounts[1].s.StatsCount, 0);
13440 +    write_ureg32 (dev, StatCounts[2].s.StatsCount, 0);
13441 +    write_ureg32 (dev, StatCounts[3].s.StatsCount, 0);
13442 +    write_ureg32 (dev, StatCounts[4].s.StatsCount, 0);
13443 +    write_ureg32 (dev, StatCounts[5].s.StatsCount, 0);
13444 +    write_ureg32 (dev, StatCounts[6].s.StatsCount, 0);
13445 +    write_ureg32 (dev, StatCounts[7].s.StatsCount, 0);
13446 +    
13447 +    /*
13448 +     * Initialise the Context_Ptr and Fault_Base_Ptr
13449 +     */
13450 +    write_reg32 (dev, Fault_Base_Ptr, dev->TAndQBase + offsetof(E3_TrapAndQueue, IProcSysCntx));
13451 +    write_reg32 (dev, Context_Ptr, GEN_CONTEXT_PTR (dev->ContextTable, ELAN3_LN2_NUM_CONTEXTS));
13452 +
13453 +    /* scrub the TProc Registers */
13454 +    for (i = 0; i < 8; i++)
13455 +       write_reg32 (dev, Globals[i], 0xdeadbabe);
13456 +    for (i = 0; i < 8; i++)
13457 +       write_reg32 (dev, Outs[i], 0xdeadbabe);
13458 +    for (i = 0; i < 8; i++)
13459 +       write_reg32 (dev, Locals[i], 0xdeadbabe);
13460 +    for (i = 0; i < 8; i++)
13461 +       write_reg32 (dev, Ins[i], 0xdeadbabe);
13462 +
13463 +    /*
13464 +     * Initialise the Queue pointers.  Arrange them so that the starting positions are
13465 +     * farthest apart in one set of the cache. Thus 512 bytes apart,  but with cntx0
13466 +     * thread the same as the interrupt queue.
13467 +     */
13468 +    write_reg32 (dev, TProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0]));
13469 +    write_reg32 (dev, TProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxThreadQueue[0xc0]));
13470 +    write_reg32 (dev, TProc_SysCntx_FPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80]));
13471 +    write_reg32 (dev, TProc_SysCntx_BPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0x80]));
13472 +    
13473 +    write_reg32 (dev, DProc_NonSysCntx_FPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]));
13474 +    write_reg32 (dev, DProc_NonSysCntx_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]));
13475 +    write_reg32 (dev, DProc_SysCntx_FPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10]));
13476 +    write_reg32 (dev, DProc_SysCntx_BPtr,    dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0x10]));
13477 +    
13478 +    dev->Event_Int_Queue_FPtr = dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]);
13479 +    write_reg32 (dev, Event_Int_Queue_FPtr, dev->Event_Int_Queue_FPtr);
13480 +    write_reg32 (dev, Event_Int_Queue_BPtr, dev->TAndQBase + offsetof (E3_TrapAndQueue, EventIntQueue[0x80]));
13481 +    
13482 +    
13483 +    /* Initialise Input_Trap_Base to last 8 Kbytes of trap area, uCode adds the right offset */
13484 +    write_reg32 (dev, Input_Trap_Base, dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxThreadQueue[0]));
13485 +    
13486 +    /* Ptr to word used to save the SP to when a thread deschedules */
13487 +    write_reg32 (dev, Thread_SP_Save_Ptr, dev->TAndQBase + offsetof (E3_TrapAndQueue, Thread_SP_Save));
13488 +    
13489 +    /* Initialise the command trap base */
13490 +    write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[0]);
13491 +    
13492 +    /* Initialise the set event tracing registers */
13493 +    write_reg32 (dev, Event_Trace_Ptr, 0);
13494 +    write_reg32 (dev, Event_Trace_Mask, 0);
13495 +    
13496 +    /* Initialise Tlb_Line_Value to zero. The TLB cannot be read while either the */
13497 +    /* uCode or thread proc might be running. Must be set to 0. */
13498 +    write_reg64 (dev, Tlb_Line_Value, 0);
13499 +
13500 +    /* Control register. Cache everything, Enable MMU, RefreshRate=3, CasLatency=1, StartSDR */
13501 +    dev->Cache_Control_Reg |= CONT_MMU_ENABLE | CONT_EN_ALL_SETS | CONT_CACHE_ALL | CONT_ENABLE_ECC;
13502 +
13503 +#if ELAN3_PAGE_SHIFT == 13
13504 +    dev->Cache_Control_Reg |= CONT_ENABLE_8K_PAGES;
13505 +#endif
13506 +
13507 +    write_reg32 (dev, Cache_Control_Reg.ContReg,  dev->Cache_Control_Reg);
13508 +
13509 +    /*
13510 +     * Initialise the context table to be discard for all contexts
13511 +     */
13512 +    ContextControlBlock.rootPTP  = 0;
13513 +    ContextControlBlock.filter   = E3_CCB_DISCARD_ALL;
13514 +    ContextControlBlock.VPT_mask = 0;
13515 +    ContextControlBlock.VPT_ptr  = 0;
13516 +
13517 +    for (i = 0, ptr = dev->ContextTable; i < ELAN3_NUM_CONTEXTS; i++, ptr += sizeof (E3_ContextControlBlock))
13518 +       elan3_sdram_copyl_to_sdram (dev, &ContextControlBlock, ptr, sizeof (E3_ContextControlBlock));
13519 +
13520 +    /* From elan3/code/trap_handler/init.c */
13521 +    /*
13522 +     * Initialise the Trap And Queue area in Elan SDRAM.
13523 +     */
13524 +    TrapCleanup[0].s.TrTypeCntx.TypeContext = 0;
13525 +    TrapCleanup[0].s.TrAddr                = 0;
13526 +    TrapCleanup[0].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13527 +    TrapCleanup[0].s.TrData0               = 0;
13528 +    TrapCleanup[1].s.TrTypeCntx.TypeContext = 0;
13529 +    TrapCleanup[1].s.TrAddr                = 0;
13530 +    TrapCleanup[1].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13531 +    TrapCleanup[1].s.TrData0               = 0;
13532 +    TrapCleanup[2].s.TrTypeCntx.TypeContext = 0;
13533 +    TrapCleanup[2].s.TrAddr                = 0;
13534 +    TrapCleanup[2].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13535 +    TrapCleanup[2].s.TrData0               = 0;
13536 +    TrapCleanup[3].s.TrTypeCntx.TypeContext = 0;
13537 +    TrapCleanup[3].s.TrAddr                = 0;
13538 +    TrapCleanup[3].s.IProcTrapStatus.Status = CRC_STATUS_GOOD;
13539 +    TrapCleanup[3].s.TrData0               = 0;
13540 +
13541 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FaultContext),  0);
13542 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx.s.FSR.Status), 0);
13543 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FaultContext), 0);
13544 +    elan3_sdram_writel (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx.s.FSR.Status), 0);
13545 +    
13546 +    /* Must now zero all the FSRs so that a subsequent Fault can be seen */ 
13547 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, CProc), 16);
13548 +
13549 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc), 16);
13550 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0), 64);
13551 +    
13552 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16);
13553 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16);
13554 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16);
13555 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16);
13556 +
13557 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]), 64);
13558 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]), 64);
13559 +
13560 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]), 64);
13561 +    elan3_sdram_copyq_to_sdram (dev, TrapCleanup, dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]), 64);
13562 +
13563 +    InitialiseDmaBuffers(dev, CmdPort);
13564 +
13565 +    /* reserve a halt operation for flushing the context filter */
13566 +    ReserveHaltOperations (dev, 1, TRUE);
13567 +
13568 +    /* Allow the Thread/Dma to run */
13569 +    CLEAR_SCHED_STATUS (dev, HaltThread | HaltDmas);
13570 +
13571 +    /* Enable All Interrrupts */
13572 +    SET_INT_MASK (dev, (INT_PciMemErr | INT_SDRamInt | INT_EventInterrupt | INT_LinkError | INT_ComQueue |
13573 +                       INT_TProc | INT_CProc | INT_DProc | INT_IProcCh1NonSysCntx | 
13574 +                       INT_IProcCh1SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh0SysCntx));
13575 +
13576 +    /* Take the link out of boundary scan */
13577 +    SET_SCHED_LINK_VALUE (dev, 0, 0);
13578 +    
13579 +    /* And clear any link errors */
13580 +    PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
13581 +
13582 +    /* XXXX: clear discard context 0,  AFTER setting up the kernel comms */
13583 +    CLEAR_SCHED_STATUS (dev, DiscardSysCntxIn | DiscardNonSysCntxIn);
13584 +
13585 +    /* Start a thread to handle excessive Event Interrrupts */
13586 +    if (kernel_thread_create (elan3_event_interrupt, (caddr_t) dev) == NULL)
13587 +    {
13588 +       panic ("InitialiseElan: cannot start elan3_event_interrupt\n");
13589 +       return (EFAIL);
13590 +    }
13591 +    dev->EventInterruptThreadStarted = 1;
13592 +
13593 +    ReserveHaltOperations (dev, 1, TRUE);
13594 +
13595 +    PollForDmaHungup (dev);
13596 +
13597 +#if defined(IOPROC_PATCH_APPLIED) && !defined(NO_PTRACK)
13598 +    /* Register the device and stats with elanmod for RMS
13599 +     * but only if we've got the coproc patch applied */
13600 +    dev->DeviceIdx = elan_dev_register(&dev->Devinfo, &elan3_dev_ops, (void *) dev);
13601 +    
13602 +    elan3_register_dev_stats(dev);
13603 +#endif
13604 +
13605 +    return (ESUCCESS);
13606 +}
13607 +
13608 +static void
13609 +InitialiseDmaBuffers(ELAN3_DEV *dev, ioaddr_t CmdPort)
13610 +{
13611 +   register int i;
13612 +
13613 +   /* GNAT sw-elan3/3908:
13614 +    * Clear down the power on state of the Dma_Desc registers to make sure we don't
13615 +    * try and interpret them when a trap happens.
13616 +    */
13617 +   write_reg32 (dev, Dma_Desc.dma_type,            0);
13618 +   write_reg32 (dev, Dma_Desc.dma_size,            0);
13619 +   write_reg32 (dev, Dma_Desc.dma_source,          0);
13620 +   write_reg32 (dev, Dma_Desc.dma_dest,            0);
13621 +   write_reg32 (dev, Dma_Desc.dma_destEvent,       0);
13622 +   write_reg32 (dev, Dma_Desc.dma_destCookieVProc, 0);
13623 +   write_reg32 (dev, Dma_Desc.dma_srcEvent,        0);
13624 +   write_reg32 (dev, Dma_Desc.dma_srcCookieVProc,  0);
13625 +   
13626 +   /*
13627 +    * The following is a sequence of writes to remove X's from the dma buffers and 
13628 +    * registers. It is only safe to write these registers after reset and before any
13629 +    * dma's have been issued. The chip will NOT function corectly if they are written at
13630 +    * any other time or in a different order.
13631 +    */
13632 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
13633 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdDmaType, 0);
13634 +   write_reg64 (dev, Exts.Dmas.DmaWrs.ResetAckNLdBytesToWr, ((u_longlong_t)0x1000) << 32);
13635 +   write_reg64 (dev, Exts.Dmas.DmaWrs.LdBytesToRd, ((u_longlong_t)0x100) << 32);
13636 +
13637 +   for (i=0;i<(4*8);i++)
13638 +       write_reg64 (dev, Dma_Alignment_Port[0], 0);
13639 +
13640 +   /*
13641 +    * This is used to clear out X's from some of the trap registers. This is required to
13642 +    * prevent the first traps from possibly writting X's into the SDram and upsetting the
13643 +    * ECC value. It requires that the trap save area registers have been set up but does
13644 +    * not require any translations to be ready.
13645 +    */
13646 +   writel (-1, (void *)(CmdPort + offsetof (E3_CommandPort, SetEvent)));
13647 +   while ((read_reg32 (dev, Exts.InterruptReg) & INT_CProc) == 0)
13648 +   {
13649 +       mb();
13650 +       DELAY (1);
13651 +   }
13652 +
13653 +   write_reg32 (dev, CProc_TrapSave_Addr, dev->CommandPortTraps[dev->CurrentCommandPortTrap]);
13654 +   
13655 +   PULSE_SCHED_STATUS(dev, RestartCProc);
13656 +}
13657 +
13658 +void
13659 +FinaliseElan (ELAN3_DEV *dev)
13660 +{
13661 +    ELAN3_PTBL_GR *ptg;
13662 +    ELAN3_HALTOP  *op;
13663 +    ELAN3_HALTOP  *chain = NULL;
13664 +    int           bank;
13665 +    int                  indx;
13666 +    int                  size;
13667 +    unsigned long flags;
13668 +    int           level;
13669 +
13670 +#if defined(IOPROC_PATCH_APPLIED) && !defined(NO_PTRACK)
13671 +    elan_stats_deregister (dev->StatsIndex);
13672 +    elan_dev_deregister(&dev->Devinfo);
13673 +#endif
13674 +
13675 +    /* Cancel the dma poller */
13676 +    cancel_timer_fn (&dev->DmaPollTimeoutId);
13677 +
13678 +    /* release it's halt operation */
13679 +    ReleaseHaltOperations (dev, 1);
13680 +
13681 +    /* stop all kernel threads */
13682 +    dev->ThreadsShouldStop = 1;
13683 +
13684 +    spin_lock_irqsave (&dev->IntrLock, flags);
13685 +    while (dev->EventInterruptThreadStarted && !dev->EventInterruptThreadStopped)
13686 +    {
13687 +       kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock);
13688 +       kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags);
13689 +    }
13690 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
13691 +
13692 +    /* Set the interrupt mask to 0 and the schedule control register to run nothing */
13693 +    SET_INT_MASK (dev, 0);
13694 +    SET_SCHED_STATUS (dev, DiscardNonSysCntxIn | DiscardSysCntxIn | HaltThread | HaltDmas);
13695 +
13696 +    /* Cancel any link error timeout */
13697 +    if (timer_fn_queued(&dev->ErrorTimeoutId))
13698 +       cancel_timer_fn (&dev->ErrorTimeoutId);
13699 +
13700 +    /* Free of and page tables that have been allocated */
13701 +    spin_lock (&dev->PtblGroupLock);
13702 +    for(level=0; level<4; level++) 
13703 +    {
13704 +       while ((ptg = dev->Level[level].PtblGroupList) != NULL)
13705 +       {
13706 +           dev->Level[level].PtblGroupList = ptg->pg_next;
13707 +
13708 +           elan3_sdram_free (dev, ptg->pg_addr, PTBL_GROUP_SIZE);
13709 +           FREE_PTBL_GR(ptg);
13710 +       }
13711 +    }
13712
13713 +    spin_unlock (&dev->PtblGroupLock);
13714 +
13715 +    /* Free of all halt operations */
13716 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
13717 +    while ((op = dev->FreeHaltOperations) != NULL)
13718 +    {
13719 +       dev->FreeHaltOperations = op->Next;
13720 +
13721 +       /* Keep a list of 'freed' ops for later KMEM_FREE call */
13722 +       op->Next = chain;
13723 +       chain = op;
13724 +    }
13725 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
13726 +
13727 +    /* Have now dropped the spinlock - can call KMEM_FREE */
13728 +    while ((op = chain) != NULL)
13729 +    {
13730 +       chain = op->Next;
13731 +
13732 +       KMEM_FREE (op, sizeof (ELAN3_HALTOP));
13733 +    }
13734 +       
13735 +    /* Free of the ctxt table */
13736 +    KMEM_FREE (dev->CtxtTable,  dev->ContextTableSize * sizeof (ELAN3_CTXT *));
13737 +
13738 +    /* Free of the thread and dma atrap areas */
13739 +    KMEM_FREE (dev->ThreadTrap, sizeof (THREAD_TRAP));
13740 +    KMEM_FREE (dev->DmaTrap, sizeof (DMA_TRAP));
13741 +
13742 +    /* Free of the memsegs and pages */
13743 +    for (bank = 0; bank < ELAN3_SDRAM_NUM_BANKS; bank++)
13744 +    {
13745 +       if (dev->SdramBanks[bank].Size)
13746 +       {
13747 +           UnmapDeviceRegister (dev, &dev->SdramBanks[bank].Handle);
13748 +
13749 +           KMEM_FREE (dev->SdramBanks[bank].PtblGroups, sizeof (ELAN3_PTBL_GR *) * (dev->SdramBanks[bank].Size / PTBL_GROUP_SIZE));
13750 +
13751 +           for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= dev->SdramBanks[bank].Size; indx++, size <<= 1)
13752 +               KMEM_FREE (dev->SdramBanks[bank].Bitmaps[indx], sizeof (bitmap_t)*BT_BITOUL(dev->SdramBanks[bank].Size/size));
13753 +       }
13754 +    }
13755 +    elan3_sdram_fini (dev);
13756 +}
13757 +
13758 +#define INIT_PATTERN(offset)   (0xBEEC000000000011ull | ((u_longlong_t)(offset)) << 16)
13759 +#define FREE_PATTERN(offset)   (0xBEEC000000000022ull | ((u_longlong_t)(offset)) << 16)
13760 +
13761 +static int
13762 +ProbeSdram (ELAN3_DEV *dev)
13763 +{
13764 +    int                          Instance;
13765 +    u_int                Bank;
13766 +    int                          MemSpaceSize;
13767 +    int                          BankMaxSize;
13768 +    int                          BankOffset;
13769 +    int                          BankSize;
13770 +    ioaddr_t             BankBase;
13771 +    ioaddr_t             PageBase;
13772 +    ioaddr_t             PageBase1;
13773 +    ioaddr_t             PageBase2;
13774 +    DeviceMappingHandle   BankHandle;
13775 +    DeviceMappingHandle   PageHandle;
13776 +    DeviceMappingHandle   PageHandle1;
13777 +    DeviceMappingHandle   PageHandle2;
13778 +    register int          i;
13779 +    u_longlong_t         value;
13780 +    extern int            sdram_bank_limit;
13781 +
13782 +    /* NOTE: The Cache control register is set to only enable cache set 0 */
13783 +    /*       and has ECC disabled */
13784 +    Instance = dev->Instance;
13785 +    
13786 +    /* Determine the size of the SDRAM from the BAR register */
13787 +    if (DeviceRegisterSize (dev, ELAN3_BAR_SDRAM, &MemSpaceSize) != ESUCCESS)
13788 +    {
13789 +       printk ("elan%d: cannot determine SDRAM size\n", Instance);
13790 +       return (EFAIL);
13791 +    }
13792 +
13793 +    elan3_sdram_init (dev);
13794 +
13795 +    BankMaxSize = MemSpaceSize / ELAN3_SDRAM_NUM_BANKS;
13796 +
13797 +    for (Bank = 0; Bank < ELAN3_SDRAM_NUM_BANKS; Bank++)
13798 +    {
13799 +       BankOffset = Bank * BankMaxSize;
13800 +       
13801 +       PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: Probing RAM Bank %d (max size %08x)\n", Instance, Bank, BankMaxSize);
13802 +       
13803 +       /* Probe the memory bank by mapping two pages that are the size of the cache apart */
13804 +       /* this guarantees that when we store the second pattern we displace the first pattern */
13805 +       /* from the cache, also store the second pattern again the size of the cache up again */
13806 +       /* to ensure that the SDRAM wires don't stay floating at pattern1 */
13807 +
13808 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, PAGESIZE, &BankHandle) != ESUCCESS)
13809 +       {
13810 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
13811 +           continue;
13812 +       }
13813 +       
13814 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase1, BankOffset + ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle1) != ESUCCESS)
13815 +       {
13816 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
13817 +           UnmapDeviceRegister (dev, &BankHandle);
13818 +           continue;
13819 +       }
13820 +
13821 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase2, BankOffset + 2*ELAN3_MAX_CACHE_SIZE, PAGESIZE, &PageHandle2) != ESUCCESS)
13822 +       {
13823 +           printk ("elan%d: Cannot probe memory bank %d\n", Instance, Bank);
13824 +           UnmapDeviceRegister (dev, &BankHandle);
13825 +           UnmapDeviceRegister (dev, &PageHandle1);
13826 +           continue;
13827 +       }
13828 +
13829 +#define PATTERN0       (0x5555555555555555ull)
13830 +#define PATTERN1       (0xAAAAAAAAAAAAAAAAull)
13831 +       writeq (PATTERN0, (u_longlong_t *) BankBase);
13832 +       writeq (PATTERN1, (u_longlong_t *) PageBase1);
13833 +       writeq (PATTERN1, (u_longlong_t *) PageBase2);
13834 +
13835 +       mmiob();
13836 +
13837 +       value = readq ((u_longlong_t *) BankBase);
13838 +
13839 +       if (value != PATTERN0)
13840 +       {
13841 +           UnmapDeviceRegister (dev, &BankHandle);
13842 +           UnmapDeviceRegister (dev, &PageHandle1);
13843 +           UnmapDeviceRegister (dev, &PageHandle2);
13844 +           continue;
13845 +       }
13846 +
13847 +       writeq (PATTERN1, (u_longlong_t *) BankBase);
13848 +       writeq (PATTERN0, (u_longlong_t *) PageBase1);
13849 +       writeq (PATTERN0, (u_longlong_t *) PageBase2);
13850 +
13851 +       mmiob();
13852 +       
13853 +       value = readq ((u_longlong_t *) BankBase);
13854 +       if (value != PATTERN1)
13855 +       {
13856 +           UnmapDeviceRegister (dev, &BankHandle);
13857 +           UnmapDeviceRegister (dev, &PageHandle1);
13858 +           UnmapDeviceRegister (dev, &PageHandle2);
13859 +           continue;
13860 +       }
13861 +       UnmapDeviceRegister (dev, &PageHandle1);
13862 +       UnmapDeviceRegister (dev, &PageHandle2);
13863 +
13864 +       /* Bank is present, so work out its size,  we store tha maximum size at the base */
13865 +       /* and then store the address at each address  on every power of two address until */
13866 +       /* we reach the minimum mappable size (PAGESIZE), we then read back the value at the */
13867 +       /* base to determine the bank size */
13868 +       writeq ((u_longlong_t) BankMaxSize, (u_longlong_t *) BankBase);
13869 +
13870 +       for (BankSize = (BankMaxSize>>1); BankSize > PAGESIZE; BankSize >>= 1)
13871 +       {
13872 +           if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &PageBase, BankOffset + BankSize, PAGESIZE, &PageHandle) == ESUCCESS)
13873 +           {
13874 +               writeq (BankSize, (u_longlong_t *) PageBase);
13875 +               UnmapDeviceRegister (dev, &PageHandle);
13876 +           }
13877 +       }
13878 +       mmiob();
13879 +
13880 +       BankSize = (u_long) readq ((u_longlong_t *) BankBase);
13881 +       
13882 +       if (sdram_bank_limit == 0 || BankSize <= (sdram_bank_limit * 1024 * 1024))
13883 +           printk ("elan%d: memory bank %d is %dK\n", Instance, Bank, BankSize / 1024);
13884 +       else
13885 +       {
13886 +           BankSize = (sdram_bank_limit * 1024 * 1024);
13887 +           printk ("elan%d: limit memory bank %d to %dK\n", Instance, Bank, BankSize / 1024);
13888 +       }
13889 +
13890 +       UnmapDeviceRegister (dev, &BankHandle);
13891 +       
13892 +       /* Now map all of this bank into the kernel */
13893 +       if (MapDeviceRegister (dev, ELAN3_BAR_SDRAM, &BankBase, BankOffset, BankSize, &BankHandle) != ESUCCESS)
13894 +       {
13895 +           printk ("elan%d: Cannot initialise memory bank %d\n", Instance, Bank);
13896 +           continue;
13897 +       }
13898 +       
13899 +       dev->SdramBanks[Bank].Size    = BankSize;
13900 +       dev->SdramBanks[Bank].Mapping = BankBase;
13901 +       dev->SdramBanks[Bank].Handle  = BankHandle;
13902 +
13903 +#ifndef CONFIG_MPSAS
13904 +       /* Initialise it for ECC */
13905 +       preemptable_start {
13906 +           for (i = 0; i < BankSize; i += 8)
13907 +           {
13908 +               elan3_sdram_writeq (dev, (Bank << ELAN3_SDRAM_BANK_SHIFT) | i, INIT_PATTERN(BankOffset+i));
13909 +
13910 +               preemptable_check();
13911 +           }
13912 +       } preemptable_end;
13913 +#endif
13914 +    }
13915 +    
13916 +    return (ESUCCESS);
13917 +}
13918 +
13919 +static void
13920 +InitialiseSdram (ELAN3_DEV *dev)
13921 +{
13922 +    int indx, size, b;
13923 +
13924 +    for (b = 0; b < ELAN3_SDRAM_NUM_BANKS; b++)
13925 +    {
13926 +       ELAN3_SDRAM_BANK *bank = &dev->SdramBanks[b];
13927 +
13928 +       if (bank->Size == 0)
13929 +           continue;
13930 +
13931 +       /* allocate a ptbl group pointer for each possible ptbl group in this bank */
13932 +       KMEM_ZALLOC (bank->PtblGroups, ELAN3_PTBL_GR **, sizeof (ELAN3_PTBL_GR *) * bank->Size/PTBL_GROUP_SIZE, TRUE);
13933 +           
13934 +       /* allocate the buddy allocator bitmaps */
13935 +       for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->Size; indx++, size <<= 1)
13936 +           KMEM_ZALLOC (bank->Bitmaps[indx], bitmap_t *, sizeof (bitmap_t)*BT_BITOUL(bank->Size/size), TRUE);
13937 +           
13938 +       /* and add it to the sdram buddy allocator */
13939 +       elan3_sdram_add (dev, (b << ELAN3_SDRAM_BANK_SHIFT), (b << ELAN3_SDRAM_BANK_SHIFT) + bank->Size);
13940 +    }
13941 +}
13942 +
13943 +#include <elan3/vpd.h>
13944 +
13945 +int
13946 +ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency)
13947 +{
13948 +    DeviceMappingHandle RomHandle;
13949 +    unsigned char      *RomBase;
13950 +    unsigned char      *PCIDataPtr;
13951 +    unsigned char      *VPDPtr;
13952 +    unsigned char      *lim;
13953 +    int                        type;
13954 +    int                        i, len, len2;
13955 +    char               name[3] = "XX";
13956 +    char               value[256];
13957 +    int                        finished = 0;
13958 +
13959 +    
13960 +    /* default valud for CAS latency is 3 */
13961 +    (*CasLatency) = CAS_LATENCY_3;
13962 +
13963 +    if (MapDeviceRegister (dev, ELAN3_BAR_EBUS, (ioaddr_t *) &RomBase, ELAN3_EBUS_ROM_OFFSET, ELAN3_EBUS_ROM_SIZE, &RomHandle) != ESUCCESS)
13964 +    {
13965 +       printk ("elan%d: Cannot map ROM\n", dev->Instance);
13966 +       return (EFAIL);
13967 +    }
13968 +    
13969 +    /* Check the ROM signature */
13970 +    if (RomBase[0] != 0x55 || RomBase[1] != 0xAA)
13971 +    {
13972 +       printk ("elan%d: Invalid ROM signature %02x %02x\n", dev->Instance, RomBase[0], RomBase[1]);
13973 +       return (ESUCCESS);
13974 +    }
13975 +    
13976 +    PCIDataPtr = RomBase + ((RomBase[0x19] << 8) | RomBase[0x18]);
13977 +
13978 +    /* check the pci data structure */
13979 +    if (PCIDataPtr[0] != 'P' || PCIDataPtr[1] != 'C' || PCIDataPtr[2] != 'I' || PCIDataPtr[3] != 'R')
13980 +    {
13981 +       printk ("elan%d: Invalid PCI Data structure\n", dev->Instance);
13982 +       return (ESUCCESS);
13983 +    }
13984 +    
13985 +    /* Extract the VPD pointer */
13986 +    VPDPtr = RomBase + ((PCIDataPtr[9] << 8) | PCIDataPtr[8]);
13987 +
13988 +    if (VPDPtr == RomBase)
13989 +    {
13990 +       printk ("elan%d: No Vital Product Data\n", dev->Instance);
13991 +       return (ESUCCESS);
13992 +    }
13993 +
13994 +    while (! finished)
13995 +    {
13996 +       type = *VPDPtr++;
13997 +       
13998 +       if (type & LARGE_RESOURCE_BIT)
13999 +       {
14000 +           len = *(VPDPtr++);
14001 +           len += *(VPDPtr++) << 8;
14002 +
14003 +           switch (type & ~LARGE_RESOURCE_BIT)
14004 +           {
14005 +           case LARGE_RESOURCE_STRING:
14006 +               printk ("elan%d: ", dev->Instance);
14007 +               for (i = 0; i < len; i++)
14008 +                   printk ("%c", *VPDPtr++);
14009 +               printk ("\n");
14010 +               break;
14011 +               
14012 +           case LARGE_RESOURCE_VENDOR_DEFINED:
14013 +               VPDPtr += len;
14014 +               break;
14015 +               
14016 +           case LARGE_RESOURCE_VITAL_PRODUCT_DATA:
14017 +               for (lim = VPDPtr + len; VPDPtr < lim; )
14018 +               {
14019 +                   name[0] = *VPDPtr++;
14020 +                   name[1] = *VPDPtr++;
14021 +                   len2    = *VPDPtr++;
14022 +
14023 +                   for (i = 0; i < len2 && VPDPtr < lim; i++)
14024 +                       value[i] = *VPDPtr++;
14025 +                   value[i] = '\0';
14026 +
14027 +                   if (! strcmp (name, "SN"))
14028 +                       printk ("elan%d: Serial Number - %s\n", dev->Instance, value);
14029 +
14030 +                   if (! strcmp (name, "Z0"))
14031 +                       (*CasLatency) = (strcmp (value, "CAS_LATENCY_2") ? CAS_LATENCY_3 : CAS_LATENCY_2);
14032 +               }
14033 +               break;
14034 +               
14035 +           default:
14036 +               printk ("elan%d: unknown large resource %x\n", dev->Instance, type);
14037 +               finished = 1;
14038 +               break;
14039 +           }
14040 +       }
14041 +       else
14042 +       {
14043 +           len = type & 0x7;
14044 +
14045 +           switch (type >> 3)
14046 +           {
14047 +           case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID:
14048 +               VPDPtr += len;
14049 +               break;
14050 +
14051 +           case SMALL_RESOURCE_VENDOR_DEFINED:
14052 +               VPDPtr += len;
14053 +               break;
14054 +               
14055 +           case SMALL_RESOURCE_END_TAG:
14056 +               finished = 1;
14057 +               break;
14058 +               
14059 +           default:
14060 +               printk ("elan%d: unknown small resource %x\n", dev->Instance, type >> 3);
14061 +               finished = 1;
14062 +               break;
14063 +           }
14064 +       }
14065 +    }
14066 +    
14067 +    UnmapDeviceRegister (dev, &RomHandle);
14068 +    return (ESUCCESS);
14069 +}
14070 +
14071 +void
14072 +ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, ELAN3_PTBL_GR *ptg)
14073 +{
14074 +    int bank = offset >> ELAN3_SDRAM_BANK_SHIFT;
14075 +    
14076 +    dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE] = ptg;
14077 +}
14078 +
14079 +ELAN3_PTBL_GR *
14080 +ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset)
14081 +{
14082 +    int bank = offset >> ELAN3_SDRAM_BANK_SHIFT;
14083 +    
14084 +    return (dev->SdramBanks[bank].PtblGroups[(offset & (ELAN3_SDRAM_BANK_SIZE-1)) / PTBL_GROUP_SIZE]);
14085 +}
14086 +
14087 +void
14088 +ElanFlushTlb (ELAN3_DEV *dev)
14089 +{
14090 +    unsigned long flags;
14091 +
14092 +    spin_lock_irqsave (&dev->TlbLock, flags);
14093 +    BumpStat (dev, TlbFlushes);
14094 +
14095 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH);
14096 +    mmiob();
14097 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
14098 +
14099 +    while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED))
14100 +       mb();
14101 +}
14102 +
14103 +void
14104 +KillNegativeDma (ELAN3_DEV *dev, void *arg)
14105 +{
14106 +    DMA_TRAP     *trap    = dev->DmaTrap;
14107 +    E3_Status_Reg status;
14108 +    sdramaddr_t   FPtr, BPtr;
14109 +    sdramaddr_t   Base, Top;
14110 +    unsigned long flags;
14111 +
14112 +    spin_lock_irqsave (&dev->IntrLock, flags);
14113 +
14114 +    ASSERT (read_reg32 (dev, Exts.InterruptReg) & INT_DProcHalted);
14115 +
14116 +    /* Initialise the trap to deliver to the offending user process */
14117 +    trap->Status.Status   = read_reg32 (dev, Exts.DProcStatus.Status);
14118 +    trap->PacketInfo.Value = 0;
14119 +    
14120 +    bzero (&trap->FaultSave, sizeof (trap->FaultSave));
14121 +    bzero (&trap->Data0, sizeof (trap->Data0));
14122 +    bzero (&trap->Data1, sizeof (trap->Data1));
14123 +    bzero (&trap->Data2, sizeof (trap->Data2));
14124 +    bzero (&trap->Data3, sizeof (trap->Data3));
14125 +
14126 +    /* run down the kernel dma run queue and panic on a -ve length dma */
14127 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
14128 +    BPtr  = read_reg32 (dev, DProc_SysCntx_BPtr);
14129 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
14130 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
14131 +    
14132 +    while (FPtr != BPtr)
14133 +    {
14134 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE));
14135 +       
14136 +       if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
14137 +           panic ("KillNegativeDma: -ve sized kernel dma\n");
14138 +
14139 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
14140 +    }
14141 +
14142 +    /* run down the user dma run queue and "remove" and -ve length dma's */
14143 +    FPtr  = read_reg32 (dev, DProc_NonSysCntx_FPtr);
14144 +    BPtr  = read_reg32 (dev, DProc_NonSysCntx_BPtr);
14145 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[0]);
14146 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, NonSysCntxDmaQueue[E3_NonSysCntxQueueSize-1]);
14147 +    
14148 +    while (FPtr != BPtr)
14149 +    {
14150 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &trap->Desc, sizeof (E3_DMA_BE));
14151 +       
14152 +       if (trap->Desc.s.dma_size > E3_MAX_DMA_SIZE)
14153 +       {
14154 +           PRINTF3 (NULL, DBG_INTR, "KillNegativeDma: remove dma - context %d size %d SuspendAddr %x\n", 
14155 +                    trap->Desc.s.dma_u.s.Context, trap->Desc.s.dma_size, trap->Status.s.SuspendAddr);
14156 +
14157 +           trap->Status.s.TrapType = trap->Status.s.SuspendAddr;
14158 +           trap->Status.s.Context  = trap->Desc.s.dma_u.s.Context;
14159 +
14160 +           DeliverDProcTrap (dev, trap, 0);
14161 +
14162 +           /*
14163 +            * Remove the DMA from the queue by replacing it with one with
14164 +            * zero size and no events.
14165 +            *
14166 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
14167 +            * to mark the approriate run queue as empty.
14168 +            */
14169 +           trap->Desc.s.dma_type            = 0;
14170 +           trap->Desc.s.dma_size            = 0;
14171 +           trap->Desc.s.dma_source          = (E3_Addr) 0;
14172 +           trap->Desc.s.dma_dest            = (E3_Addr) 0;
14173 +           trap->Desc.s.dma_destCookieVProc = (E3_Addr) 0;
14174 +           trap->Desc.s.dma_srcEvent        = (E3_Addr) 0;
14175 +           trap->Desc.s.dma_srcCookieVProc  = (E3_Addr) 0;
14176 +
14177 +           elan3_sdram_copyq_to_sdram (dev, &trap->Desc, FPtr, sizeof (E3_DMA_BE));
14178 +       }
14179 +
14180 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
14181 +    }
14182 +
14183 +    status.Status = read_reg32 (dev, Exts.DProcStatus.Status);
14184 +
14185 +    if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || 
14186 +       status.s.SuspendAddr == MI_DequeueSysCntxDma ||
14187 +       status.s.SuspendAddr == MI_DmaLoop)
14188 +    {
14189 +       PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: unlock dma processor\n");
14190 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
14191 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType,   0);
14192 +       mmiob();
14193 +       
14194 +       DELAY (10);
14195 +       
14196 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdAlignment, 0);
14197 +       write_reg32 (dev, Exts.Dmas.DmaWrs.LdDmaType,   0);
14198 +       mmiob();
14199 +    }
14200 +
14201 +    PRINTF0 (NULL, DBG_INTR, "KillNegativeDma: dma processor restarted\n");
14202 +
14203 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14204 +
14205 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1);
14206 +}
14207 +
14208 +void
14209 +ForceTProcTrap (ELAN3_DEV *dev, void *arg)
14210 +{
14211 +    printk ("elan%d: forced tproc trap .....\n", dev->Instance);
14212 +
14213 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 1);
14214 +}
14215 +
14216 +void
14217 +PollForDmaHungup (void *arg)
14218 +{
14219 +    ELAN3_DEV     *dev   = (ELAN3_DEV *) arg;
14220 +    unsigned long flags;
14221 +    E3_Status_Reg status;
14222 +    E3_uint32     insn1, insn3;
14223 +    register int  i;
14224 +
14225 +    if (read_reg32 (dev, Dma_Desc.dma_size) > E3_MAX_DMA_SIZE)
14226 +    {
14227 +       status.Status = read_reg32 (dev, Exts.DProcStatus);
14228 +
14229 +       PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: size %x SuspendAddr %x\n", read_reg32 (dev, Dma_Desc.dma_size), status.s.SuspendAddr);
14230 +
14231 +       if (status.s.SuspendAddr == MI_DequeueNonSysCntxDma || 
14232 +           status.s.SuspendAddr == MI_DequeueSysCntxDma ||
14233 +           status.s.SuspendAddr == MI_DmaLoop)
14234 +       {
14235 +           printk ("elan%d: PollForDmaHungup: size %x context %d SuspendAddr %x\n", 
14236 +                   dev->Instance, read_reg32 (dev, Dma_Desc.dma_size),
14237 +                   status.s.Context, status.s.SuspendAddr);
14238 +       
14239 +           PRINTF2 (NULL, DBG_INTR, "PollForDmaHungup: dma_size %x status %x\n",
14240 +                    read_reg32 (dev, Dma_Desc.dma_size), status.Status);
14241 +           
14242 +           spin_lock_irqsave (&dev->IntrLock, flags);
14243 +           QueueHaltOperation (dev, 0, NULL, INT_DProcHalted, KillNegativeDma, NULL);
14244 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
14245 +           
14246 +           return;
14247 +       }
14248 +    }
14249 +
14250 +    status.Status = read_reg32 (dev, Exts.TProcStatus);
14251 +    if (status.s.WakeupFunction == WakeupStopped)
14252 +    {
14253 +       E3_uint32 PC = read_reg32 (dev, ExecutePC);
14254 +
14255 +       /* See if it's likely that the thread is really "stuck" on a waitevent/break 
14256 +        * instruction ......... */
14257 +       for (i = 0; i < 10; i++)
14258 +       {
14259 +           status.Status = read_reg32 (dev, Exts.TProcStatus);
14260 +           insn1         = read_reg32 (dev, IBufferReg[1]);
14261 +           insn3         = read_reg32 (dev, IBufferReg[3]);
14262 +           
14263 +           if (! (status.s.WakeupFunction == WakeupStopped && read_reg32 (dev, ExecutePC) == PC &&     /* stopping and it could be a break/waitevent */
14264 +                  (insn1 == 0x81a00000 || insn3 == 0x81a00000 ||                                       /* break instruction */
14265 +                   insn1 == 0x81b00000 || insn3 == 0x81b00000)))                                       /* waitevent instruction  */
14266 +               break;
14267 +       }
14268 +
14269 +       if (i == 10)
14270 +       {
14271 +           printk ("elan%d: forcing tproc trap from %s instruction at pc %x\n", dev->Instance, 
14272 +                   (insn1 == 0x81a00000 || insn3 == 0x81a00000) ? "break" : "waitevent", PC);
14273 +
14274 +           spin_lock_irqsave (&dev->IntrLock, flags);
14275 +           QueueHaltOperation (dev, 0, NULL, INT_TProcHalted, ForceTProcTrap, NULL);
14276 +           spin_unlock_irqrestore (&dev->IntrLock, flags);
14277 +           return;
14278 +       }
14279 +    }
14280 +
14281 +    schedule_timer_fn (&dev->DmaPollTimeoutId, PollForDmaHungup, (void *) dev, 10);
14282 +}
14283 +
14284 +/*=======================================================================================*/
14285 +/*
14286 + * Interrupt handler.
14287 + */
14288 +static void
14289 +ReEnableErrorInterrupts (void *arg)
14290 +{
14291 +    ELAN3_DEV     *dev = (ELAN3_DEV *) arg;
14292 +    unsigned long flags;
14293 +
14294 +    spin_lock_irqsave (&dev->IntrLock, flags);
14295 +
14296 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
14297 +       ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
14298 +
14299 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "ReEnableErrorInterrupts: IntMask=%x\n", read_reg32 (dev, Exts.InterruptMask));
14300 +
14301 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14302 +}
14303 +
14304 +void
14305 +CheckForExcessiveErrorRate (ELAN3_DEV *dev)
14306 +{
14307 +    if (dev->ErrorTime == (lbolt/hz))
14308 +    {
14309 +       if (dev->ErrorsPerTick++ > 100)
14310 +       {
14311 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "CheckForExcessiveErrorRate: too many links errors, disabling interrupt\n");
14312 +
14313 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
14314 +
14315 +           schedule_timer_fn (&dev->ErrorTimeoutId, ReEnableErrorInterrupts, (void *) dev, hz);
14316 +       }
14317 +    }
14318 +    else
14319 +    {
14320 +       dev->ErrorTime     = (lbolt/hz);
14321 +       dev->ErrorsPerTick = 0;
14322 +    }
14323 +}
14324 +/*=======================================================================================*/
14325 +/*
14326 + * Interrupt handler.
14327 + */
14328 +static void
14329 +HandlePciMemErr (ELAN3_DEV *dev)
14330 +{
14331 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "HandlePciMemErr : masking out interrupt\n");
14332 +    
14333 +    ElanBusError (dev);
14334 +    panic ("elan pci memory error\n");
14335 +}
14336 +
14337 +static void
14338 +HandleSDRamInterrupt (ELAN3_DEV *dev)
14339 +{
14340 +    E3_uint32     EccStatus0 = read_reg32 (dev, ECC_STATUS0);
14341 +    E3_uint32     EccStatus1 = read_reg32 (dev, ECC_STATUS1);
14342 +    unsigned long flags;
14343 +
14344 +    PRINTF5 (DBG_DEVICE, DBG_INTR, "elan: ECC error - Addr=%x UE=%x CE=%x ME=%x Syn=%x\n",
14345 +            EccStatus0 & ECC_ADDR_MASK, EccStatus0 & ECC_UE_MASK, 
14346 +            EccStatus0 & ECC_CE_MASK, EccStatus0 & ECC_ME_MASK, 
14347 +            EccStatus1 & ECC_SYN_MASK);
14348 +
14349 +    if (EccStatus0 & (ECC_UE_MASK|ECC_CE_MASK))
14350 +    {
14351 +       printk ("elan%d: ECC memory error (Address=%08x Syndrome=%02x %s%s%s)\n",
14352 +               dev->Instance, 
14353 +               (EccStatus0 & ECC_ADDR_MASK), (EccStatus1 & ECC_SYN_MASK), 
14354 +               (EccStatus0 & ECC_UE_MASK) ? "Uncorrectable "   : "",
14355 +               (EccStatus0 & ECC_CE_MASK) ? "Correctable "     : "",
14356 +               (EccStatus0 & ECC_ME_MASK) ? "Multiple Errors " : "");
14357 +    }
14358 +
14359 +    if (EccStatus0 & ECC_UE_MASK)
14360 +       panic ("elan: Uncorrectable ECC memory error");
14361 +    if (EccStatus0 & ECC_CE_MASK)
14362 +       BumpStat (dev, CorrectableErrors);
14363 +    if (EccStatus0 & ECC_ME_MASK)
14364 +       BumpStat (dev, MultipleErrors);
14365 +
14366 +    /*
14367 +     * Clear the interrupt and reset the error flags.
14368 +     * Note. Might loose an UE or CE if it occurs between reading the status and
14369 +     *       clearing the interrupt. I don't think this matters very much as the
14370 +     *   status reg will only be used to identify a bad simm.
14371 +     */
14372 +
14373 +    spin_lock_irqsave (&dev->TlbLock, flags);
14374 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | CLEAR_SDRAM_ERROR);
14375 +    mmiob();
14376 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
14377 +
14378 +    CheckForExcessiveErrorRate (dev);
14379 +}
14380 +
14381 +static int
14382 +HandleEventInterrupt (ELAN3_DEV *dev, int nticks, unsigned long *flags)
14383 +{
14384 +    E3_uint32 Fptr  = dev->Event_Int_Queue_FPtr;
14385 +    E3_uint32 Bptr  = read_reg32 (dev, Event_Int_Queue_BPtr);                                          /* PCI read */
14386 +    long      tlim  = lbolt + nticks;
14387 +    long      count = 0;
14388 +    ELAN3_CTXT *ctxt;
14389 +
14390 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
14391 +    ASSERT ((dev->InterruptMask & INT_EventInterrupt) == 0);
14392 +           
14393 +    while (Fptr != Bptr)
14394 +    {
14395 +       while (Fptr != Bptr)
14396 +       {
14397 +           E3_EventInt_BE  EvInt;
14398 +           E3_uint32       Context;
14399 +
14400 +           /* If we're running in the interrupt handler and have seen a high
14401 +            * rate of event interrupts then punt to the thread  - however on 
14402 +            * Linux the elan interrupt handler can block the timer interrupt,
14403 +            * and so lbolt (jiffies) is not incremented, hence we punt after
14404 +            a number of loops instead */
14405 +#if defined(LINUX)
14406 +           if (in_interrupt() && ++count > eventint_punt_loops)
14407 +               return (EAGAIN);
14408 +#endif
14409 +
14410 +           if (nticks && ((int) (lbolt - tlim)) > 0)
14411 +           {
14412 +               PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x punting to thread\n", Fptr, Bptr);
14413 +               return (EAGAIN);
14414 +           }
14415 +
14416 +           elan3_sdram_copyq_from_sdram (dev, Fptr, (void *) &EvInt, 8);                               /* PCI read */
14417 +           
14418 +           /* The context number is held in the top 16 bits of the EventContext */
14419 +           Context = (EvInt.s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK;
14420 +           
14421 +           PRINTF2 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Context %d : Cookie %x\n", Context, EvInt.s.IntCookie);
14422 +           
14423 +           ctxt = ELAN3_DEV_CTX_TABLE(dev, Context);
14424 +
14425 +           /* Work out new fptr, and store it in the device, since we'll be dropping the IntrLock */
14426 +           Fptr = E3_EVENT_INTQ_NEXT(Fptr);
14427 +           dev->Event_Int_Queue_FPtr = Fptr;
14428 +
14429 +           if (ctxt == NULL)
14430 +           {
14431 +               PRINTF3 (DBG_DEVICE, DBG_INTR, "HandleEventInterrupt: Fptr %x Bptr %x context %d invalid\n",
14432 +                        Fptr, Bptr, Context);
14433 +               BumpStat (dev, InvalidContext);
14434 +           }
14435 +           else
14436 +           {
14437 +               BumpStat (dev, EventInterrupts);
14438 +               
14439 +               spin_unlock_irqrestore (&dev->IntrLock, *flags);
14440 +               QueueEventInterrupt (ctxt, EvInt.s.IntCookie);
14441 +               spin_lock_irqsave (&dev->IntrLock, *flags);
14442 +           }
14443 +           
14444 +           /* Re-read the FPtr, since we've dropped the IntrLock */
14445 +           Fptr = dev->Event_Int_Queue_FPtr;
14446 +           
14447 +           /* Store the new FPtr to the elan, this also clears the interrupt. */
14448 +           write_reg32 (dev, Event_Int_Queue_FPtr, Fptr);                                      /* PCI write */
14449 +
14450 +           mmiob();
14451 +       }
14452 +
14453 +       mb();
14454 +       Bptr = read_reg32 (dev, Event_Int_Queue_BPtr);                                          /* PCI read */
14455 +    }
14456 +
14457 +    return (ESUCCESS);
14458 +}
14459 +
14460 +int
14461 +SetLinkBoundaryScan (ELAN3_DEV *dev)
14462 +{
14463 +    int           res = ESUCCESS;
14464 +    unsigned long flags;
14465 +
14466 +    spin_lock_irqsave (&dev->IntrLock, flags);
14467 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
14468 +       res = EAGAIN;
14469 +    else
14470 +    {
14471 +       PRINTF0 (DBG_DEVICE, DBG_BSCAN, "SetLinkBoundaryScan: setting link into boundary scan mode\n");
14472 +
14473 +       /*
14474 +        * We're going to set the link into boundary scan mode,  so firstly
14475 +        * set the inputters to discard everything.
14476 +        */
14477 +       if (dev->DiscardAllCount++ == 0)
14478 +           SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL);
14479 +
14480 +       /*
14481 +        * Now disable the error interrupts
14482 +        */
14483 +       DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
14484 +       
14485 +       /*
14486 +        * And set the link into boundary scan mode, and drive
14487 +        * a reset token onto the link.
14488 +        */
14489 +       SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken);
14490 +    }
14491 +
14492 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14493 +
14494 +    return (res);
14495 +}
14496 +
14497 +void
14498 +ClearLinkBoundaryScan (ELAN3_DEV *dev)
14499 +{
14500 +    unsigned long flags;
14501 +
14502 +    spin_lock_irqsave (&dev->IntrLock, flags);
14503 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
14504 +    {
14505 +       PRINTF0 (DBG_DEVICE, DBG_BSCAN, "ClearLinkBoundaryScan: taking link out of boundary scan mode\n");
14506 +
14507 +       /*
14508 +        * Take the link out of boundary scan 
14509 +        */
14510 +       SET_SCHED_LINK_VALUE (dev, 0, 0);
14511 +
14512 +       /*
14513 +        * Clear any link errors.
14514 +        */
14515 +       PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
14516 +
14517 +       /*
14518 +        * Re-enable the error interrupts.
14519 +        */
14520 +       if (! timer_fn_queued(&dev->ErrorTimeoutId))
14521 +           ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
14522 +
14523 +       /*
14524 +        * And stop the inputter from discarding all packets.
14525 +        */
14526 +       if (--dev->DiscardAllCount == 0)
14527 +           SetSchedStatusRegister (dev, 0, NULL);
14528 +    }
14529 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14530 +}
14531 +
14532 +int
14533 +WriteBoundaryScanValue (ELAN3_DEV *dev, int value)
14534 +{
14535 +    int           res = 0;
14536 +    unsigned long flags;
14537 +
14538 +    spin_lock_irqsave (&dev->IntrLock, flags);
14539 +    if ((dev->SchCntReg & LinkBoundaryScan) != 0)
14540 +    {
14541 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: driving value 0x%x onto link\n", value);
14542 +       SET_SCHED_LINK_VALUE (dev, 1, value);
14543 +
14544 +       res = read_reg32 (dev, Exts.LinkState);
14545 +
14546 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "WriteBoundaryScanValue: return 0x%x\n", res);
14547 +    }
14548 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14549 +
14550 +    return (res);
14551 +}
14552 +
14553 +int
14554 +ReadBoundaryScanValue(ELAN3_DEV *dev, int link)
14555 +{
14556 +    int           res;
14557 +    unsigned long flags;
14558 +
14559 +    spin_lock_irqsave (&dev->IntrLock, flags);
14560 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
14561 +    {
14562 +       PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: set linkval 0x%x\n",  link);
14563 +       SET_SCHED_LINK_VALUE (dev, 0, link);
14564 +    }
14565 +    res = read_reg32 (dev, Exts.LinkState);
14566 +    PRINTF1 (DBG_DEVICE, DBG_BSCAN, "ReadBoundaryScanValue: return 0x%x\n", res);
14567 +
14568 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14569 +
14570 +    return (res);
14571 +}
14572 +
14573 +static int
14574 +ReadLinkVal (ELAN3_DEV *dev, int link)
14575 +{
14576 +    if ((dev->SchCntReg & LinkBoundaryScan) == 0)
14577 +       SET_SCHED_LINK_VALUE (dev, 0, link);
14578 +    
14579 +    return (read_reg32 (dev, Exts.LinkState));
14580 +}
14581 +
14582 +static void
14583 +HandleLinkError (ELAN3_DEV *dev)
14584 +{
14585 +    E3_uint32 value = read_reg32 (dev, Exts.LinkErrorTypes);
14586 +
14587 +    PRINTF1 (DBG_DEVICE, DBG_LINKERR, "HandleLinkError: LinkErrorTypes %08x - clearing\n", value);
14588 +    
14589 +    if (value & LS_LockError)   BumpStat (dev, LockError);
14590 +    if (value & LS_DeskewError) BumpStat (dev, DeskewError);
14591 +    if (value & LS_PhaseError)  BumpStat (dev, PhaseError);
14592 +    if (value & LS_DataError)   BumpStat (dev, DataError);
14593 +    if (value & LS_FifoOvFlow0) BumpStat (dev, FifoOvFlow0);
14594 +    if (value & LS_FifoOvFlow1) BumpStat (dev, FifoOvFlow1);
14595 +
14596 +    if (value & LS_DataError)
14597 +       dev->Stats.LinkErrorValue = ReadLinkVal (dev, 12) | (ReadLinkVal (dev, 13) << 9);
14598 +
14599 +    PULSE_SCHED_STATUS (dev, ClearLinkErrorInt);
14600 +
14601 +    CheckForExcessiveErrorRate (dev);
14602 +}
14603 +
14604 +static void
14605 +HandleErrorInterrupt (ELAN3_DEV *dev, E3_uint32 Pend)
14606 +{
14607 +    if (Pend & INT_PciMemErr)
14608 +       HandlePciMemErr (dev);
14609 +    
14610 +    if (Pend & INT_SDRamInt)
14611 +       HandleSDRamInterrupt (dev);
14612 +    
14613 +    if (Pend & INT_LinkError)
14614 +       HandleLinkError (dev);
14615 +}
14616 +       
14617 +static void
14618 +HandleAnyIProcTraps (ELAN3_DEV *dev, E3_uint32 Pend)
14619 +{
14620 +    E3_uint32       RestartBits = 0;
14621 +    
14622 +    if (Pend & INT_IProcCh0SysCntx)
14623 +    {
14624 +       HandleIProcTrap (dev, 0, Pend,
14625 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx),
14626 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrHead[0]),
14627 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_C0_TrData[0]));
14628 +                        
14629 +       RestartBits |= RestartCh0SysCntx;
14630 +    }
14631 +    
14632 +    if (Pend & INT_IProcCh1SysCntx)
14633 +    {
14634 +       HandleIProcTrap (dev, 1, Pend,
14635 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcSysCntx),
14636 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrHead[0]),
14637 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_C0_TrData[0]));
14638 +                        
14639 +       RestartBits |= RestartCh1SysCntx;
14640 +    }
14641 +
14642 +    if (Pend & INT_IProcCh0NonSysCntx)
14643 +    {
14644 +       HandleIProcTrap (dev, 0, Pend,
14645 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx),
14646 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrHead[0]),
14647 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh0_NonC0_TrData[0]));
14648 +
14649 +       RestartBits |= RestartCh0NonSysCntx;
14650 +    }
14651 +    
14652 +
14653 +    if (Pend & INT_IProcCh1NonSysCntx)
14654 +    {
14655 +       HandleIProcTrap (dev, 1, Pend,
14656 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, IProcNonSysCntx),
14657 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrHead[0]),
14658 +                        dev->TAndQBase + offsetof (E3_TrapAndQueue, VCh1_NonC0_TrData[0]));
14659 +       RestartBits |= RestartCh1NonSysCntx;
14660 +    }
14661 +
14662 +    PULSE_SCHED_STATUS (dev, RestartBits);
14663 +}
14664 +
14665 +static void
14666 +elan3_event_interrupt (ELAN3_DEV *dev)
14667 +{
14668 +    unsigned long flags;
14669 +
14670 +    kernel_thread_init("elan3_event_int");
14671 +
14672 +    spin_lock_irqsave (&dev->IntrLock, flags);
14673 +    for (;;)
14674 +    {
14675 +       /* Make sure we never sleep with the EventInterrupt disabled */
14676 +       if (! (dev->InterruptMask & INT_EventInterrupt))
14677 +       {
14678 +           if (HandleEventInterrupt (dev, eventint_resched_ticks, &flags) != ESUCCESS)
14679 +               BumpStat (dev, EventRescheds);
14680 +           
14681 +           ENABLE_INT_MASK (dev, INT_EventInterrupt);
14682 +       }
14683 +
14684 +       if (dev->ThreadsShouldStop)
14685 +           break;
14686 +
14687 +       kcondvar_wait (&dev->IntrWait, &dev->IntrLock, &flags);
14688 +    }
14689 +    
14690 +    dev->EventInterruptThreadStopped = 1;
14691 +    kcondvar_wakeupall (&dev->IntrWait, &dev->IntrLock);
14692 +
14693 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14694 +
14695 +    kernel_thread_exit ();
14696 +}
14697 +
14698 +int
14699 +InterruptHandler (ELAN3_DEV *dev)
14700 +{
14701 +    E3_uint32     Mask;
14702 +    E3_uint32     Pend;
14703 +    E3_uint32     RestartBits;
14704 +    int           deliverDProcTrap;
14705 +    int                  deliverTProcTrap;
14706 +    static long   lboltsave;
14707 +    int           loop_count = 0; 
14708 +    unsigned long flags;
14709 +    int  tproc_delivered;
14710 +
14711 +    spin_lock_irqsave (&dev->IntrLock, flags);
14712 +
14713 +    BumpStat (dev, Interrupts);
14714 +
14715 +    Mask = dev->InterruptMask;
14716 +    Pend = read_reg32 (dev, Exts.InterruptReg);                                                /* PCI read */
14717 +
14718 +    /* Save the lbolt so we know how long in do loop or in event handling */
14719 +    lboltsave = lbolt;
14720 +
14721 +    if ((Pend & Mask) == INT_EventInterrupt)
14722 +    {
14723 +       DISABLE_INT_MASK (dev, INT_EventInterrupt);
14724 +
14725 +       if (HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS)
14726 +           ENABLE_INT_MASK (dev, INT_EventInterrupt);
14727 +       else
14728 +       {
14729 +           BumpStat (dev, EventPunts);
14730 +
14731 +           kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock);
14732 +       }
14733 +
14734 +        if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
14735 +            dev->Stats.LongestInterrupt = (lbolt - lboltsave);
14736 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
14737 +       return (ESUCCESS);
14738 +    }
14739 +
14740 +    if ((Pend & Mask) == 0)
14741 +    {
14742 +       PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Spurious Pend %x Mask %x SchedStatus %x\n", 
14743 +                Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
14744 +
14745 +        if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
14746 +            dev->Stats.LongestInterrupt = (lbolt - lboltsave);
14747 +       spin_unlock_irqrestore (&dev->IntrLock, flags);
14748 +       return (EFAIL);
14749 +    }
14750 +
14751 +    PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", 
14752 +            Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
14753 +
14754 +    do {
14755 +       loop_count++;
14756 +       RestartBits = 0;
14757 +
14758 +       if (Pend & Mask & (INT_CProc | INT_ComQueue))
14759 +           HandleCProcTrap (dev, Pend, &Mask);
14760 +
14761 +       tproc_delivered = 0;
14762 +
14763 +       if (Pend & Mask & INT_TProc) {
14764 +           ELAN_REG_REC(Pend);
14765 +           tproc_delivered = 1;
14766 +           deliverTProcTrap = HandleTProcTrap (dev, &RestartBits);
14767 +       }
14768 +       else
14769 +           deliverTProcTrap = 0;
14770 +
14771 +       if (Pend & Mask & INT_DProc)
14772 +           deliverDProcTrap = HandleDProcTrap (dev, &RestartBits);
14773 +       else
14774 +           deliverDProcTrap = 0;
14775 +
14776 +       ASSERT ((RestartBits & RestartDProc) == 0 || (read_reg32 (dev, Exts.DProcStatus.Status) >> 29) == 4);
14777 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR.Status))      == 0);
14778 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
14779 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
14780 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
14781 +       ASSERT ((RestartBits & RestartDProc) == 0 || elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
14782 +
14783 +       PULSE_SCHED_STATUS (dev, RestartBits);          /* Restart any processors which had trapped. */
14784 +       SET_INT_MASK (dev, Mask);                       /* And install the new interrupt mask */
14785 +
14786 +       if ((Pend & Mask & INT_TProc) && deliverTProcTrap)
14787 +           DeliverTProcTrap (dev, dev->ThreadTrap, Pend);
14788 +
14789 +       if ((Pend & Mask & INT_DProc) && deliverDProcTrap)
14790 +           DeliverDProcTrap (dev, dev->DmaTrap, Pend);
14791 +
14792 +       if (Pend & Mask & INT_Inputters)
14793 +           HandleAnyIProcTraps (dev, Pend);
14794 +       
14795 +       if (Pend & Mask & INT_EventInterrupt)
14796 +       {
14797 +           DISABLE_INT_MASK (dev, INT_EventInterrupt);
14798 +           
14799 +           if (loop_count == 1 && HandleEventInterrupt (dev, eventint_punt_ticks, &flags) == ESUCCESS) /* always punt to the thread if we've */
14800 +               ENABLE_INT_MASK (dev, INT_EventInterrupt);                                              /* been round the loop once */
14801 +           else
14802 +           {
14803 +               BumpStat (dev, EventPunts);
14804 +
14805 +               kcondvar_wakeupone (&dev->IntrWait, &dev->IntrLock);
14806 +           }
14807 +       }
14808 +
14809 +       if (Pend & (INT_Halted | INT_Discarding))
14810 +           ProcessHaltOperations (dev, Pend);
14811 +
14812 +       if (Pend & Mask & INT_ErrorInterrupts)
14813 +           HandleErrorInterrupt (dev, Pend);
14814 +
14815 +       Mask = dev->InterruptMask;
14816 +       Pend = read_reg32 (dev, Exts.InterruptReg);     /* PCI read */
14817 +       
14818 +       if (tproc_delivered)
14819 +           ELAN_REG_REC(Pend);
14820 +
14821 +       PRINTF3 (DBG_DEVICE, DBG_INTR, "InterruptHandler: Pend %x Mask %08x SchedStatus %x\n", 
14822 +                Pend, Mask, read_reg32 (dev, Exts.SchCntReg));
14823 +    }  while ((Pend & Mask) != 0);
14824 +
14825 +    if ((lbolt - lboltsave) > dev->Stats.LongestInterrupt)
14826 +        dev->Stats.LongestInterrupt = (lbolt - lboltsave);
14827 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
14828 +
14829 +    PRINTF2 (DBG_DEVICE, DBG_INTR, "InterruptHandler: lbolt is %lx; start lbolt is %lx\n", 
14830 +            lbolt, lboltsave);
14831 +
14832 +    return (ESUCCESS);
14833 +}
14834 +
14835 +void
14836 +SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp)
14837 +{
14838 +    E3_uint32 HaltMask  = dev->HaltOperationsMask;
14839 +    E3_uint32 Mask      = Maskp ? *Maskp : dev->InterruptMask;
14840 +    E3_uint32 ClearBits = 0;
14841 +    E3_uint32 SetBits   = 0;
14842 +
14843 +    PRINTF5 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: HaltOperationsMask=%x HaltAll=%d HaltDmaDequeue=%d HaltThread=%d DiscardAll=%d\n",
14844 +            HaltMask, dev->HaltAllCount, dev->HaltDmaDequeueCount, dev->HaltThreadCount, dev->DiscardAllCount);
14845 +
14846 +    if (dev->FlushCommandCount)
14847 +       SetBits |= FlushCommandQueues;
14848 +    
14849 +    if ((HaltMask & INT_DProcHalted) || dev->HaltAllCount)
14850 +    {
14851 +       SetBits |= HaltDmas | HaltDmaDequeue;
14852 +       if (Pend & INT_DProcHalted)
14853 +           Mask &= ~INT_DProcHalted;
14854 +       else
14855 +           Mask |= INT_DProcHalted;
14856 +    }
14857 +
14858 +    if (dev->HaltDmaDequeueCount)
14859 +    {
14860 +       SetBits |= HaltDmaDequeue;
14861 +       if (Pend & INT_DProcHalted)
14862 +           Mask &= ~INT_DProcHalted;
14863 +       else
14864 +           Mask |= INT_DProcHalted;
14865 +    }
14866 +
14867 +    if ((HaltMask & INT_TProcHalted) || dev->HaltAllCount || dev->HaltThreadCount)
14868 +    {
14869 +       SetBits |= HaltThread;
14870 +       if (Pend & INT_TProcHalted)
14871 +           Mask &= ~INT_TProcHalted;
14872 +       else
14873 +           Mask |= INT_TProcHalted;
14874 +    }
14875 +
14876 +    if ((HaltMask & INT_DiscardingSysCntx) || dev->DiscardAllCount)
14877 +    {
14878 +       SetBits |= DiscardSysCntxIn;
14879 +       if (Pend & INT_DiscardingSysCntx)
14880 +           Mask &= ~INT_DiscardingSysCntx;
14881 +       else
14882 +           Mask |= INT_DiscardingSysCntx;
14883 +    }
14884 +
14885 +    if ((HaltMask & INT_DiscardingNonSysCntx) || dev->DiscardNonContext0Count || dev->DiscardAllCount)
14886 +    {
14887 +       SetBits |= DiscardNonSysCntxIn;
14888 +       if (Pend & INT_DiscardingNonSysCntx)
14889 +           Mask &= ~INT_DiscardingNonSysCntx;
14890 +       else
14891 +           Mask |= INT_DiscardingNonSysCntx;
14892 +    }
14893 +
14894 +    if (dev->HaltNonContext0Count)
14895 +       SetBits |= StopNonSysCntxs;
14896 +
14897 +    ClearBits = SetBits ^ (FlushCommandQueues | HaltDmas | HaltDmaDequeue | HaltThread |
14898 +                          DiscardSysCntxIn | DiscardNonSysCntxIn | StopNonSysCntxs);
14899 +
14900 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "SetSchedStatusRegister: SetBits=%x InterruptMask=%x InterruptReg=%x Mask=%x\n",
14901 +            SetBits, dev->InterruptMask, read_reg32 (dev, Exts.InterruptReg), Mask);
14902 +
14903 +    MODIFY_SCHED_STATUS (dev, SetBits, ClearBits);
14904 +
14905 +    if (Maskp)
14906 +       *Maskp = Mask;                                          /* copyback new interrupt mask */
14907 +    else
14908 +       SET_INT_MASK(dev, Mask);
14909 +}
14910 +
14911 +void
14912 +FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op)
14913 +{
14914 +    unsigned long flags;
14915 +
14916 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14917 +    op->Next = dev->FreeHaltOperations;
14918 +    dev->FreeHaltOperations = op;
14919 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14920 +}
14921 +
14922 +int
14923 +ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep)
14924 +{
14925 +    ELAN3_HALTOP   *op;
14926 +    unsigned long flags;
14927 +
14928 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14929 +    while ((dev->NumHaltOperations - dev->ReservedHaltOperations) < count)
14930 +    {
14931 +       spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14932 +
14933 +       KMEM_ZALLOC (op, ELAN3_HALTOP *, sizeof (ELAN3_HALTOP), cansleep);
14934 +
14935 +       if (op == NULL)
14936 +           return (FALSE);
14937 +
14938 +       spin_lock_irqsave (&dev->FreeHaltLock, flags);
14939 +
14940 +       dev->NumHaltOperations++;
14941 +
14942 +       op->Next = dev->FreeHaltOperations;
14943 +       dev->FreeHaltOperations = op;
14944 +    }
14945 +                   
14946 +    dev->ReservedHaltOperations += count;
14947 +    
14948 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14949 +
14950 +    return (TRUE);
14951 +}
14952 +
14953 +void
14954 +ReleaseHaltOperations (ELAN3_DEV *dev, int count)
14955 +{
14956 +    unsigned long flags;
14957 +
14958 +    spin_lock_irqsave (&dev->FreeHaltLock, flags);
14959 +    dev->ReservedHaltOperations -= count;
14960 +    spin_unlock_irqrestore (&dev->FreeHaltLock, flags);
14961 +}
14962 +
14963 +void
14964 +QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp, 
14965 +                   E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement)
14966 +{
14967 +    ELAN3_HALTOP *op;
14968 +
14969 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
14970 +    
14971 +    spin_lock (&dev->FreeHaltLock);
14972 +    op = dev->FreeHaltOperations;
14973 +
14974 +    ASSERT (op != NULL);
14975 +
14976 +    dev->FreeHaltOperations = op->Next;
14977 +    spin_unlock (&dev->FreeHaltLock);
14978 +
14979 +    op->Mask      = ReqMask;
14980 +    op->Function  = (void (*)(void *, void *))Function;
14981 +    op->Arguement = Arguement;
14982 +
14983 +    dev->HaltOperationsMask |= ReqMask;                                /* Add our bits to the global bits needed. */
14984 +    SetSchedStatusRegister (dev, Pend, Maskp);                 /* Set the control register and the interrupt mask */
14985 +
14986 +    /*
14987 +     * If the condition is already satisfied, then SetSchedStatusRegister will
14988 +     * have masked out the interrupt, so re-enable it now to take it straight
14989 +     * away
14990 +     */
14991 +    if (Maskp == NULL)
14992 +    {
14993 +       if ((read_reg32 (dev, Exts.InterruptReg) & ReqMask) == ReqMask)
14994 +           ENABLE_INT_MASK (dev, ReqMask);
14995 +    }
14996 +    else
14997 +    {
14998 +       if ((Pend & ReqMask) == ReqMask)
14999 +           *Maskp |= ReqMask;
15000 +    }
15001 +
15002 +    *dev->HaltOperationsTailpp = op;                           /* Queue at end of list, since ProcessHaltOperations */
15003 +    dev->HaltOperationsTailpp = &op->Next;                     /* drops the IntrLock while running down the list */
15004 +    op->Next = NULL;
15005 +}
15006 +                   
15007 +void
15008 +ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend)
15009 +{
15010 +    E3_uint32     Mask;
15011 +    ELAN3_HALTOP  *op;
15012 +    ELAN3_HALTOP **prevp;
15013 +    E3_uint32     haltMask;
15014 +    ELAN3_HALTOP  *next;
15015 +
15016 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: Pend %x\n", Pend);
15017 +
15018 +    for (;;)
15019 +    {
15020 +       ELAN3_HALTOP  *head = NULL;
15021 +       ELAN3_HALTOP **tailp = &head;
15022 +
15023 +       /*
15024 +        * Generate a list of halt operations which can be called now.
15025 +        */
15026 +       for (haltMask = 0, prevp = &dev->HaltOperations; (op = *prevp) != NULL; )
15027 +       {
15028 +           if ((Pend & op->Mask) != op->Mask)
15029 +           {
15030 +               haltMask |= op->Mask;
15031 +               prevp = &op->Next;
15032 +           }
15033 +           else
15034 +           {
15035 +               *prevp = op->Next;                              /* remove from list */
15036 +               if (op->Next == NULL)
15037 +                   dev->HaltOperationsTailpp = prevp;
15038 +               
15039 +               *tailp = op;                                    /* add to local list */
15040 +               op->Next = NULL;
15041 +               tailp = &op->Next;
15042 +           }
15043 +       }
15044 +
15045 +       if (head == NULL)                                       /* nothing to do, so update */
15046 +       {                                                       /* the schedule status register */
15047 +           dev->HaltOperationsMask = haltMask;                 /* and the interrupt mask */
15048 +           SetSchedStatusRegister (dev, Pend, NULL);
15049 +           return;
15050 +       }
15051 +
15052 +       /*
15053 +        * flush the command queues, before calling any operations
15054 +        */
15055 +       Mask = dev->InterruptMask;
15056 +       
15057 +       if (dev->FlushCommandCount++ == 0)
15058 +           SetSchedStatusRegister (dev, Pend, &Mask);
15059 +       
15060 +       if ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
15061 +       {
15062 +           if (dev->HaltThreadCount++ == 0)
15063 +               SetSchedStatusRegister (dev, Pend, &Mask);
15064 +
15065 +           CAPTURE_CPUS();
15066 +
15067 +           while ((read_reg32 (dev, ComQueueStatus) & ComQueueNotEmpty) != 0)
15068 +               mb();
15069 +
15070 +           RELEASE_CPUS();
15071 +                   
15072 +           if (--dev->HaltThreadCount == 0)
15073 +               SetSchedStatusRegister (dev, Pend, &Mask);
15074 +       }
15075 +               
15076 +       if (read_reg32 (dev, Exts.InterruptReg) & INT_CProc)
15077 +       {
15078 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: command processor has trapped\n");
15079 +           HandleCProcTrap (dev, Pend, &Mask);
15080 +       }
15081 +       
15082 +       if (--dev->FlushCommandCount == 0)
15083 +           SetSchedStatusRegister (dev, Pend, &Mask);
15084 +       
15085 +       PRINTF2 (DBG_DEVICE, DBG_INTR, "ProcessHaltOperations: interrupt mask %08x -> %08x\n", 
15086 +                dev->InterruptMask, Mask);
15087 +       
15088 +       SET_INT_MASK (dev, Mask);
15089 +       spin_unlock (&dev->IntrLock);
15090 +
15091 +       /*
15092 +        * now process the list of operations
15093 +        * we have
15094 +        */
15095 +       for (op = head; op != NULL; op = next)
15096 +       {
15097 +           next = op->Next;
15098 +
15099 +           op->Function (dev, op->Arguement);
15100 +           
15101 +           FreeHaltOperation (dev, op);
15102 +       }
15103 +
15104 +       spin_lock (&dev->IntrLock);
15105 +    }
15106 +}
15107 +
15108 +int
15109 +ComputePosition (ELAN_POSITION *pos, unsigned nodeId, unsigned numNodes, unsigned numDownLinksVal)
15110 +{
15111 +    int i, lvl, n;
15112 +    char numDownLinks[ELAN_MAX_LEVELS];
15113 +
15114 +    if (nodeId >= numNodes)
15115 +       return (EINVAL);
15116 +
15117 +    for (i = 0; i < ELAN_MAX_LEVELS; i++, numDownLinksVal >>= 4)
15118 +       numDownLinks[i] = numDownLinksVal & 7;
15119 +    
15120 +    for (lvl = 0, n = numNodes; n > ((lvl % 3) == 2 ? 8 : 4) && lvl < ELAN_MAX_LEVELS; lvl++)
15121 +    {
15122 +       if (numDownLinks[lvl] == 0)
15123 +           numDownLinks[lvl] = 4;
15124 +       
15125 +       if ((n % numDownLinks[lvl]) != 0)
15126 +           return (EINVAL);
15127 +       
15128 +       n /= numDownLinks[lvl];
15129 +    }
15130 +
15131 +    if (numDownLinks[lvl] == 0)
15132 +       numDownLinks[lvl] = n;
15133 +
15134 +    if (numDownLinks[lvl] != n)
15135 +       return (EINVAL);
15136 +
15137 +    for (i = 0; i <= lvl; i++)
15138 +       pos->pos_arity[i] = numDownLinks[lvl - i];
15139 +
15140 +    pos->pos_nodes  = numNodes;
15141 +    pos->pos_levels = lvl + 1;
15142 +    pos->pos_nodeid = nodeId;
15143 +    pos->pos_mode   = ELAN_POS_MODE_SWITCHED;
15144 +
15145 +    return (0);
15146 +}
15147 +
15148 +/*
15149 + * Local variables:
15150 + * c-file-style: "stroustrup"
15151 + * End:
15152 + */
15153 diff -urN clean/drivers/net/qsnet/elan3/elandev_linux.c linux-2.6.9/drivers/net/qsnet/elan3/elandev_linux.c
15154 --- clean/drivers/net/qsnet/elan3/elandev_linux.c       1969-12-31 19:00:00.000000000 -0500
15155 +++ linux-2.6.9/drivers/net/qsnet/elan3/elandev_linux.c 2005-09-07 10:35:03.000000000 -0400
15156 @@ -0,0 +1,2395 @@
15157 +/*
15158 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
15159 + *
15160 + *    For licensing information please see the supplied COPYING file
15161 + *
15162 + */
15163 +
15164 +#ident "$Id: elandev_linux.c,v 1.112.2.7 2005/09/07 14:35:03 mike Exp $"
15165 +/*     $Source: /cvs/master/quadrics/elan3mod/elan3/os/elandev_linux.c,v $*/
15166 +
15167 +#include <qsnet/kernel.h>
15168 +#include <qsnet/kpte.h>
15169 +
15170 +#include <linux/config.h>
15171 +#include <linux/mm.h>
15172 +#include <linux/pci.h>
15173 +#include <linux/reboot.h>
15174 +#include <linux/notifier.h>
15175 +
15176 +#include <linux/init.h>
15177 +#include <qsnet/module.h>
15178 +#include <linux/pci.h>
15179 +
15180 +#include <asm/uaccess.h>
15181 +#include <asm/io.h>
15182 +#include <asm/pgalloc.h>
15183 +#include <asm/pgtable.h>
15184 +
15185 +#include <elan/devinfo.h>
15186 +#include <elan/elanmod.h>
15187 +
15188 +#include <elan3/elanregs.h>
15189 +#include <elan3/elandev.h>
15190 +#include <elan3/elanvp.h>
15191 +#include <elan3/elanio.h>
15192 +#include <elan3/elan3mmu.h>
15193 +#include <elan3/elanctxt.h>
15194 +#include <elan3/elandebug.h>
15195 +#include <elan3/elansyscall.h>
15196 +
15197 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,0)
15198 +#error please use a 2.2 series kernel or newer
15199 +#endif
15200 +
15201 +/* Minor numbers encoded as :
15202 + *   [5:0]     device number
15203 + *   [15:6]    function number
15204 + */
15205 +#define ELAN3_DEVICE_MASK          0x3F
15206 +
15207 +#define ELAN3_MINOR_CONTROL      0
15208 +#define ELAN3_MINOR_MEM          1
15209 +#define ELAN3_MINOR_USER        2
15210 +#define ELAN3_MINOR_SHIFT        6
15211 +
15212 +#define ELAN3_DEVICE(inode)    (MINOR(inode->i_rdev) & ELAN3_DEVICE_MASK)
15213 +#define ELAN3_MINOR(inode)     (MINOR(inode->i_rdev) >> ELAN3_MINOR_SHIFT)
15214 +
15215 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
15216 +#      define SetPageReserved(page)    set_bit(PG_reserved, &(page)->flags)
15217 +#      define ClearPageReserved(page)  clear_bit(PG_reserved, &(page)->flags)
15218 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
15219 +typedef void irqreturn_t;
15220 +#endif
15221 +#       define IRQ_NONE
15222 +#       define IRQ_HANDLED
15223 +#       define IRQ_RETVAL(x)
15224 +#endif
15225 +
15226 +#if defined(LINUX_SPARC) || defined(LINUX_PPC64)
15227 +#define __io_remap_page_range(from,offset,size,prot)   remap_page_range(from,offset,size,prot)
15228 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
15229 +#elif defined(NO_RMAP)
15230 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(from,offset,size,prot)
15231 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
15232 +#else
15233 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(vma,from,offset,size,prot)
15234 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
15235 +#define __remap_page_range(from,offset,size,prot)      remap_pfn_range(vma,from,(offset)>>PAGE_SHIFT,size,prot)
15236 +#else
15237 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(vma,from,offset,size,prot)
15238 +#endif
15239 +#endif
15240 +
15241 +/*
15242 + * Function prototypes.
15243 + */
15244 +static int     elanattach(int instance, struct pci_dev *pcidev);
15245 +static int     elandetach(int instance);
15246 +
15247 +static int     elan3_open (struct inode *inode, struct file *file);
15248 +static int     elan3_ioctl (struct inode *inode, struct file *file, 
15249 +                            unsigned int cmd, unsigned long arg);
15250 +static int     elan3_mmap (struct file *file, struct vm_area_struct *vm_area);
15251 +static int     elan3_release (struct inode *inode, struct file *file);
15252 +
15253 +static int      elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer);
15254 +static int      elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer);
15255 +
15256 +static irqreturn_t InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs);
15257 +
15258 +static int     ConfigurePci(ELAN3_DEV *dev);
15259 +static int     ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr);
15260 +
15261 +static void     elan3_shutdown_devices(int panicing);
15262 +
15263 +/*
15264 + * Globals. 
15265 + */
15266 +static ELAN3_DEV       *elan3_devices[ELAN3_MAX_CONTROLLER];
15267 +static int       NodeId = ELAN3_INVALID_NODE;
15268 +static int       NumNodes;
15269 +static int       DownLinks;
15270 +static int       RandomRoutingDisabled;
15271 +int              BackToBackMaster;
15272 +int              BackToBackSlave;
15273 +int              enable_sdram_writecombining;
15274 +int             sdram_bank_limit;
15275 +extern int       LwpNice;
15276 +
15277 +char *    elan_reg_rec_file [ELAN_REG_REC_MAX];
15278 +int       elan_reg_rec_line [ELAN_REG_REC_MAX];
15279 +long      elan_reg_rec_lbolt[ELAN_REG_REC_MAX];
15280 +int       elan_reg_rec_cpu  [ELAN_REG_REC_MAX];
15281 +E3_uint32 elan_reg_rec_reg  [ELAN_REG_REC_MAX];
15282 +int       elan_reg_rec_index;
15283 +
15284 +MODULE_AUTHOR("Quadrics Ltd.");
15285 +MODULE_DESCRIPTION("Elan3 Device Driver");
15286 +
15287 +MODULE_LICENSE("GPL");
15288 +
15289 +module_param(NodeId,uint, 0);
15290 +module_param(NumNodes,uint, 0);
15291 +module_param(RandomRoutingDisabled,uint, 0);
15292 +module_param(DownLinks,uint, 0);
15293 +module_param(BackToBackMaster,uint, 0);
15294 +module_param(BackToBackSlave,uint, 0);
15295 +module_param(LwpNice, uint, 0);
15296 +module_param(elan3_debug, uint, 0);
15297 +module_param(elan3_debug_console, uint, 0);
15298 +module_param(elan3_debug_buffer, uint, 0);
15299 +module_param(elan3mmu_debug, uint, 0);
15300 +module_param(sdram_bank_limit, uint, 0);
15301 +
15302 +/* elan3/os/context.c */
15303 +EXPORT_SYMBOL(elan3_alloc);
15304 +EXPORT_SYMBOL(elan3_attach);
15305 +EXPORT_SYMBOL(elan3_doattach);
15306 +EXPORT_SYMBOL(elan3_free);
15307 +EXPORT_SYMBOL(elan3_detach);
15308 +EXPORT_SYMBOL(elan3_dodetach);
15309 +EXPORT_SYMBOL(elan3_block_inputter);
15310 +EXPORT_SYMBOL(CheckCommandQueueFlushed);
15311 +
15312 +/* elan3/os/sdram.c */
15313 +EXPORT_SYMBOL(elan3_sdram_alloc);
15314 +EXPORT_SYMBOL(elan3_sdram_free);
15315 +EXPORT_SYMBOL(elan3_sdram_to_phys);
15316 +EXPORT_SYMBOL(elan3_sdram_writeb);
15317 +EXPORT_SYMBOL(elan3_sdram_writew);
15318 +EXPORT_SYMBOL(elan3_sdram_writel);
15319 +EXPORT_SYMBOL(elan3_sdram_writeq);
15320 +EXPORT_SYMBOL(elan3_sdram_readb);
15321 +EXPORT_SYMBOL(elan3_sdram_readw);
15322 +EXPORT_SYMBOL(elan3_sdram_readl);
15323 +EXPORT_SYMBOL(elan3_sdram_readq);
15324 +EXPORT_SYMBOL(elan3_sdram_zerob_sdram);
15325 +EXPORT_SYMBOL(elan3_sdram_zerow_sdram);
15326 +EXPORT_SYMBOL(elan3_sdram_zerol_sdram);
15327 +EXPORT_SYMBOL(elan3_sdram_zeroq_sdram);
15328 +EXPORT_SYMBOL(elan3_sdram_copyb_to_sdram);
15329 +EXPORT_SYMBOL(elan3_sdram_copyw_to_sdram);
15330 +EXPORT_SYMBOL(elan3_sdram_copyl_to_sdram);
15331 +EXPORT_SYMBOL(elan3_sdram_copyq_to_sdram);
15332 +EXPORT_SYMBOL(elan3_sdram_copyb_from_sdram);
15333 +EXPORT_SYMBOL(elan3_sdram_copyw_from_sdram);
15334 +EXPORT_SYMBOL(elan3_sdram_copyl_from_sdram);
15335 +EXPORT_SYMBOL(elan3_sdram_copyq_from_sdram);
15336 +
15337 +/* elan3/os/tproc.c */
15338 +EXPORT_SYMBOL(DeliverTProcTrap);
15339 +EXPORT_SYMBOL(HandleTProcTrap);
15340 +EXPORT_SYMBOL(SaveThreadToStack);
15341 +
15342 +/* elan3/os/tprocinsts.c */
15343 +EXPORT_SYMBOL(RollThreadToClose);
15344 +
15345 +/* elan3/os/iproc.c */
15346 +EXPORT_SYMBOL(InspectIProcTrap);
15347 +EXPORT_SYMBOL(IProcTrapString);
15348 +EXPORT_SYMBOL(SimulateUnlockQueue);
15349 +
15350 +/* elan3/os/cproc.c */
15351 +EXPORT_SYMBOL(HandleCProcTrap);
15352 +
15353 +/* elan3/os/route_table.c */
15354 +EXPORT_SYMBOL(GenerateRoute);
15355 +EXPORT_SYMBOL(LoadRoute);
15356 +EXPORT_SYMBOL(InvalidateRoute);
15357 +EXPORT_SYMBOL(ValidateRoute);
15358 +EXPORT_SYMBOL(ClearRoute);
15359 +EXPORT_SYMBOL(GenerateProbeRoute);
15360 +EXPORT_SYMBOL(GenerateCheckRoute);
15361 +
15362 +/* elan3/os/elandev_generic.c */
15363 +EXPORT_SYMBOL(elan3_debug);
15364 +EXPORT_SYMBOL(QueueHaltOperation);
15365 +EXPORT_SYMBOL(ReleaseHaltOperations);
15366 +EXPORT_SYMBOL(ReserveHaltOperations);
15367 +
15368 +/* elan3/vm/elan3mmu_generic.c */
15369 +EXPORT_SYMBOL(elan3mmu_pteload);
15370 +EXPORT_SYMBOL(elan3mmu_unload);
15371 +EXPORT_SYMBOL(elan3mmu_set_context_filter);
15372 +EXPORT_SYMBOL(elan3mmu_reserve);
15373 +EXPORT_SYMBOL(elan3mmu_attach);
15374 +EXPORT_SYMBOL(elan3mmu_detach);
15375 +EXPORT_SYMBOL(elan3mmu_release);
15376 +/* elan3/vm/elan3mmu_linux.c */
15377 +EXPORT_SYMBOL(elan3mmu_phys_to_pte);
15378 +EXPORT_SYMBOL(elan3mmu_kernel_invalid_pte);
15379 +
15380 +/* elan3/os/elan3_debug.c */
15381 +EXPORT_SYMBOL(elan3_debugf);
15382 +
15383 +/* elan3/os/minames.c */
15384 +EXPORT_SYMBOL(MiToName);
15385 +
15386 +/* elan3/os/elandev_generic.c */
15387 +EXPORT_SYMBOL(MapDeviceRegister);
15388 +EXPORT_SYMBOL(UnmapDeviceRegister);
15389 +
15390 +EXPORT_SYMBOL(elan_reg_rec_lbolt);
15391 +EXPORT_SYMBOL(elan_reg_rec_file);
15392 +EXPORT_SYMBOL(elan_reg_rec_index);
15393 +EXPORT_SYMBOL(elan_reg_rec_cpu);
15394 +EXPORT_SYMBOL(elan_reg_rec_reg);
15395 +EXPORT_SYMBOL(elan_reg_rec_line);
15396 +
15397 +/*
15398 + * Standard device entry points.
15399 + */
15400 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
15401 +
15402 +#include <linux/dump.h>
15403 +
15404 +static int      elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer);
15405 +
15406 +static struct notifier_block elan3_dump_notifier = 
15407 +{
15408 +    notifier_call:     elan3_dump_event,
15409 +    priority:          0,
15410 +};
15411 +
15412 +static int
15413 +elan3_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
15414 +{
15415 +    if ( event == DUMP_BEGIN )
15416 +       elan3_shutdown_devices (FALSE);
15417 +
15418 +    return (NOTIFY_DONE);
15419 +}
15420 +
15421 +#endif
15422 +
15423 +static struct file_operations elan3_fops = {
15424 +        ioctl:   elan3_ioctl,          /* ioctl */
15425 +        mmap:    elan3_mmap,           /* mmap */
15426 +        open:    elan3_open,           /* open */
15427 +        release: elan3_release,                /* release */
15428 +};
15429 +
15430 +static struct notifier_block elan3_reboot_notifier = 
15431 +{
15432 +    notifier_call:     elan3_reboot_event,
15433 +    priority:          0,
15434 +};
15435 +
15436 +#if !defined(NO_PANIC_NOTIFIER)
15437 +
15438 +static int elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer);
15439 +
15440 +static struct notifier_block elan3_panic_notifier = 
15441 +{
15442 +    notifier_call:     elan3_panic_event,
15443 +    priority:          0,
15444 +};
15445 +
15446 +static int
15447 +elan3_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
15448 +{
15449 +    elan3_shutdown_devices (TRUE);
15450 +
15451 +    return (NOTIFY_DONE);
15452 +}
15453 +
15454 +#endif /* !defined(NO_PANIC_NOTIFIER) */
15455 +
15456 +ELAN3_DEV *
15457 +elan3_device (int instance)
15458 +{
15459 +       if (instance < 0 || instance >= ELAN3_MAX_CONTROLLER)
15460 +           return ((ELAN3_DEV *) NULL);
15461 +       return elan3_devices[instance];
15462 +}
15463 +EXPORT_SYMBOL(elan3_device);
15464 +
15465 +/*
15466 + * Called at rmmod time.  elandetach() for each card + general cleanup.
15467 + */
15468 +#ifdef MODULE
15469 +static void __exit elan3_exit(void)
15470 +{
15471 +       int i;
15472 +
15473 +       printk("elan: preparing to remove module\n");
15474 +
15475 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
15476 +       unregister_dump_notifier (&elan3_dump_notifier);
15477 +#endif
15478 +       unregister_reboot_notifier (&elan3_reboot_notifier);
15479 +#if !defined(NO_PANIC_NOTIFIER)
15480 +       notifier_chain_unregister (&panic_notifier_list, &elan3_panic_notifier);
15481 +#endif
15482 +
15483 +       /* call elandetach() for each device configured. */
15484 +       for (i = 0; i < ELAN3_MAX_CONTROLLER; i++)
15485 +               if (elan3_devices[i] != NULL)
15486 +                       elandetach(i);
15487 +
15488 +       FinaliseNetworkErrorResolver();
15489 +       elan3mmu_fini();
15490 +
15491 +       cookie_fini();
15492 +       unregister_chrdev(ELAN3_MAJOR, ELAN3_NAME);
15493 +
15494 +       elan3_procfs_fini();
15495 +
15496 +       printk("elan: module removed\n");
15497 +}
15498 +
15499 +/*
15500 + * Called at insmod time.  First we perform general driver initialization,
15501 + * then call elanattach() for each card.
15502 + */
15503 +#ifdef MODULE
15504 +static int __init elan3_init(void)
15505 +#else
15506 +__initfunc(int elan3_init(void))
15507 +#endif
15508 +{
15509 +       int e;
15510 +       int boards;
15511 +       struct pci_dev *dev;
15512 +       char revid;
15513 +
15514 +       elan_reg_rec_index=0;
15515 +       {
15516 +           int i;
15517 +           for(i=0;i<ELAN_REG_REC_MAX;i++)
15518 +               elan_reg_rec_file[i] = NULL;
15519 +       }       
15520 +
15521 +       /* register major/minor num */
15522 +       e = register_chrdev(ELAN3_MAJOR, ELAN3_NAME, &elan3_fops);
15523 +       if (e < 0)
15524 +               return e;
15525 +
15526 +       elan3_procfs_init ();
15527 +
15528 +       cookie_init();
15529 +       elan3mmu_init();
15530 +       InitialiseNetworkErrorResolver();
15531 +
15532 +       /* call elanattach() for each device found on PCI */
15533 +       memset(elan3_devices, 0, sizeof(elan3_devices));
15534 +       boards = 0;
15535 +       for (dev = NULL; (dev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN3, dev)) != NULL ;) 
15536 +       {
15537 +           pci_read_config_byte (dev, PCI_REVISION_ID, &revid);
15538 +
15539 +           if (revid == PCI_REVISION_ID_ELAN3_REVA)
15540 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
15541 +               printk ("elan at pci %s - RevA device not supported\n", dev->slot_name);
15542 +#else
15543 +               printk ("elan at pci %s - RevA device not supported\n", pci_name(dev));
15544 +#endif
15545 +           else
15546 +           {
15547 +               if (boards < ELAN3_MAX_CONTROLLER)
15548 +                       /* Count successfully attached devices */ 
15549 +                       boards += ((elanattach(boards, dev) == 0) ? 1 : 0);
15550 +               else
15551 +               {
15552 +                   printk ("elan: max controllers = %d\n", ELAN3_MAX_CONTROLLER);
15553 +                   break;
15554 +               }
15555 +           }
15556 +       }
15557 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
15558 +       register_dump_notifier (&elan3_dump_notifier);
15559 +#endif
15560 +       register_reboot_notifier (&elan3_reboot_notifier);
15561 +#if !defined(NO_PANIC_NOTIFIER)
15562 +       notifier_chain_register (&panic_notifier_list, &elan3_panic_notifier);
15563 +#endif
15564 +
15565 +       return 0;
15566 +}
15567 +
15568 +/* Declare the module init and exit functions */
15569 +module_init(elan3_init);
15570 +module_exit(elan3_exit);
15571 +
15572 +#endif
15573 +
15574 +static void
15575 +elan3_shutdown_devices(int panicing)
15576 +{
15577 +    ELAN3_DEV *dev;
15578 +    unsigned long flags;
15579 +    register int i;
15580 +
15581 +    local_irq_save (flags);
15582 +    for (i = 0; i < ELAN3_MAX_CONTROLLER; i++)
15583 +    {
15584 +       if ((dev = elan3_devices[i]) != NULL)
15585 +       {
15586 +           if (! panicing) spin_lock (&dev->IntrLock);
15587 +           
15588 +           printk(KERN_INFO "elan%d: forcing link into reset\n", dev->Instance);
15589 +
15590 +           /*
15591 +            * We're going to set the link into boundary scan mode,  so firstly
15592 +            * set the inputters to discard everything.
15593 +            */
15594 +           if (dev->DiscardAllCount++ == 0)
15595 +               SetSchedStatusRegister (dev, read_reg32 (dev, Exts.InterruptReg), NULL);
15596 +
15597 +           dev->LinkShutdown = 1;
15598 +           
15599 +           /*
15600 +            * Now disable the error interrupts
15601 +            */
15602 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
15603 +           
15604 +           /*
15605 +            * And set the link into boundary scan mode, and drive
15606 +            * a reset token onto the link.
15607 +            */
15608 +           SET_SCHED_LINK_VALUE (dev, 1, LinkResetToken);
15609 +
15610 +           if (! panicing) spin_unlock (&dev->IntrLock);
15611 +       }
15612 +    }
15613 +    local_irq_restore (flags);
15614 +}
15615 +
15616 +static int
15617 +elan3_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
15618 +{
15619 +    if (! (event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
15620 +       return (NOTIFY_DONE);
15621 +
15622 +    elan3_shutdown_devices (FALSE);
15623 +
15624 +    return (NOTIFY_DONE);
15625 +}
15626 +
15627 +#include <elan3/elan3ops.h>
15628 +/*
15629 + * Called by init_module() for each card discovered on PCI.
15630 + */
15631 +static int
15632 +elanattach(int instance, struct pci_dev *pcidev)
15633 +{
15634 +       ELAN3_DEV *dev;
15635 +       int ramSize;
15636 +       int level;
15637 +       ioaddr_t sdramAddr, cmdPortAddr, intPalAddr;
15638 +       DeviceMappingHandle handle;
15639 +
15640 +       printk("elan%d: attach, irq=%d\n", instance, pcidev->irq);
15641 +
15642 +       /*
15643 +        * Allocate the ELAN3_DEV structure.
15644 +        */
15645 +       KMEM_ZALLOC(dev, ELAN3_DEV *, sizeof(ELAN3_DEV), TRUE);
15646 +       if (dev == NULL) {
15647 +               printk ("elan%d: KMEM_ALLOC failed\n", instance);
15648 +               return (-ENOMEM);
15649 +       }
15650 +       elan3_devices[instance] = dev;
15651 +       dev->Osdep.pci = pcidev;
15652 +
15653 +       dev->Instance = instance;
15654 +
15655 +       /* Initialise the device information */
15656 +       pci_read_config_word (pcidev, PCI_VENDOR_ID,   &dev->Devinfo.dev_vendor_id);
15657 +       pci_read_config_word (pcidev, PCI_DEVICE_ID,   &dev->Devinfo.dev_device_id);
15658 +       pci_read_config_byte (pcidev, PCI_REVISION_ID, &dev->Devinfo.dev_revision_id);
15659 +
15660 +       dev->Devinfo.dev_instance             = instance;
15661 +       dev->Devinfo.dev_rail                 = instance;
15662 +       dev->Devinfo.dev_driver_version       = 0;
15663 +       dev->Devinfo.dev_num_down_links_value = DownLinks;
15664 +
15665 +       dev->Position.pos_mode                = ELAN_POS_UNKNOWN;
15666 +       dev->Position.pos_random_disabled     = RandomRoutingDisabled;
15667 +       
15668 +       /*
15669 +        * Set up PCI config regs.
15670 +        */
15671 +       if (ConfigurePci(dev) != ESUCCESS)
15672 +           goto fail0;
15673 +
15674 +       /*
15675 +        * Determine the PFnums of the SDRAM and command port
15676 +        */
15677 +       if (MapDeviceRegister(dev, ELAN3_BAR_SDRAM, &sdramAddr, 0, PAGESIZE, &handle) != ESUCCESS)
15678 +           goto fail1;
15679 +
15680 +       DeviceRegisterSize(dev, ELAN3_BAR_SDRAM, &ramSize);
15681 +       
15682 +       dev->SdramPhysMask = ~((physaddr_t) ramSize - 1);
15683 +       dev->SdramPhysBase = kmem_to_phys((void *) sdramAddr);
15684 +
15685 +       UnmapDeviceRegister (dev, &handle);
15686 +
15687 +#if defined(LINUX_ALPHA)
15688 +       /*
15689 +        * consider a physical address to be on the same pci bus
15690 +        * as us if it's physical address is "close" to our sdram
15691 +        * physical address.
15692 +        * this is almost certainly incorrect for large memory (> 2Gb)
15693 +        * i386 machines - and is only correct for alpha for 32 bit
15694 +        * base address registers.
15695 +        *
15696 +        * Modified this to match the Tru64 driver value;
15697 +        * i.e. PciPhysMask = 0xfffffffffffc0000
15698 +        */
15699 +#  define PCI_ADDR_MASK (0x7FFFFFFFl)
15700 +
15701 +       dev->PciPhysMask = ~PCI_ADDR_MASK;
15702 +       dev->PciPhysBase = dev->SdramPhysBase & dev->PciPhysMask;
15703 +#endif
15704 +       /*
15705 +        * Now reset the elan chip.
15706 +        */
15707 +       if (MapDeviceRegister(dev, ELAN3_BAR_REGISTERS, &dev->RegPtr, 0, 0, &dev->RegHandle) != ESUCCESS)
15708 +           goto fail1;
15709 +
15710 +       if (MapDeviceRegister(dev, ELAN3_BAR_EBUS, &intPalAddr, ELAN3_EBUS_INTPAL_OFFSET, PAGESIZE,
15711 +                             &handle) != ESUCCESS)
15712 +           goto fail2;
15713 +
15714 +       ResetElan(dev, intPalAddr);     
15715 +
15716 +       UnmapDeviceRegister (dev, &handle);
15717 +
15718 +       /* 
15719 +        * Initialise the device mutex's which must be accessible from the 
15720 +        * interrupt handler.  
15721 +        */
15722 +       kcondvar_init (&dev->IntrWait);
15723 +       spin_lock_init (&dev->IntrLock);
15724 +       spin_lock_init (&dev->TlbLock);
15725 +       spin_lock_init (&dev->CProcLock);
15726 +       spin_lock_init (&dev->FreeHaltLock);
15727 +       for(level=0; level<4; level++)
15728 +           spin_lock_init (&dev->Level[level].PtblLock);
15729 +       spin_lock_init (&dev->PtblGroupLock);
15730 +
15731 +       /*
15732 +        * Add the interrupt handler,  
15733 +        */
15734 +       if (request_irq(dev->Osdep.pci->irq, InterruptHandlerWrapper, 
15735 +           SA_SHIRQ, "elan3", dev) != 0) {
15736 +               printk ("elan%d: request_irq failed\n", instance);
15737 +               goto fail3;
15738 +       }
15739 +
15740 +       if (MapDeviceRegister(dev, ELAN3_BAR_COMMAND_PORT, &cmdPortAddr, 0, PAGESIZE, &handle) != ESUCCESS)
15741 +           goto fail4;
15742 +       
15743 +       if (InitialiseElan(dev, cmdPortAddr) == EFAIL) {
15744 +               printk ("elan%d: InitialiseElan failed\n", instance);
15745 +               UnmapDeviceRegister (dev, &handle);
15746 +               goto fail4;
15747 +       }
15748 +       UnmapDeviceRegister (dev, &handle);
15749 +
15750 +       /* If our nodeid is defined, then set it now */
15751 +       if (NodeId != ELAN3_INVALID_NODE && ComputePosition (&dev->Position, NodeId, NumNodes, DownLinks) == 0)
15752 +       {
15753 +           if (RandomRoutingDisabled & ((1 << (dev->Position.pos_levels-1))-1))
15754 +               printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing disabled 0x%x)\n", 
15755 +                       dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes, RandomRoutingDisabled);
15756 +           else
15757 +               printk ("elan%d: NodeId=%d NodeLevel=%d NumNodes=%d (random routing ok)\n",
15758 +                       dev->Instance, dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes);
15759 +       }
15760 +
15761 +       if (BackToBackMaster || BackToBackSlave)
15762 +       {
15763 +           dev->Position.pos_mode     = ELAN_POS_MODE_BACKTOBACK;
15764 +           dev->Position.pos_nodeid   = (BackToBackMaster == 0);
15765 +           dev->Position.pos_nodes    = 2;
15766 +           dev->Position.pos_levels   = 1;
15767 +           dev->Position.pos_arity[0] = 2;
15768 +
15769 +           printk ("elan%d: back-to-back %s - elan node %d\n", dev->Instance,
15770 +                   BackToBackMaster ? "master" : "slave", dev->Position.pos_nodeid);
15771 +       }
15772 +
15773 +       elan3_procfs_device_init (dev);
15774 +       
15775 +       /* Success */
15776 +       return (0);
15777 +
15778 +fail4:
15779 +       free_irq(dev->Osdep.pci->irq, dev);
15780 +
15781 +fail3:
15782 +       kcondvar_destroy (&dev->IntrWait);
15783 +       spin_lock_destroy (&dev->IntrLock);
15784 +       spin_lock_destroy (&dev->InfoLock);
15785 +       spin_lock_destroy (&dev->TlbLock);
15786 +       spin_lock_destroy (&dev->CProcLock);
15787 +       spin_lock_destroy (&dev->FreeHaltLock);
15788 +       spin_lock_destroy (&dev->Level1PtblLock);
15789 +       spin_lock_destroy (&dev->Level2PtblLock);
15790 +       spin_lock_destroy (&dev->Level3PtblLock);
15791 +       spin_lock_destroy (&dev->PtblGroupLock);
15792 +
15793 +fail2:
15794 +       UnmapDeviceRegister (dev, &dev->RegHandle);
15795 +
15796 +fail1:
15797 +       pci_disable_device (dev->Osdep.pci);
15798 +fail0:
15799 +       KMEM_FREE(dev, sizeof(ELAN3_DEV));
15800 +
15801 +       elan3_devices[instance] = NULL;
15802 +       
15803 +       /* Failure */
15804 +       return (-ENODEV);
15805 +}
15806 +
15807 +/*
15808 + * Called by elan3_exit() for each board found on PCI.
15809 + */
15810 +static int
15811 +elandetach(int instance)
15812 +{
15813 +       ELAN3_DEV *dev = elan3_devices[instance];
15814 +
15815 +       printk("elan%d: detach\n", instance);
15816 +
15817 +       elan3_procfs_device_fini (dev);
15818 +
15819 +       FinaliseElan (dev);
15820 +
15821 +       UnmapDeviceRegister (dev, &dev->RegHandle);
15822 +
15823 +       free_irq(dev->Osdep.pci->irq, dev);
15824 +
15825 +       pci_disable_device(dev->Osdep.pci);
15826 +
15827 +       kcondvar_destroy (&dev->IntrWait);
15828 +       spin_lock_destroy (&dev->IntrLock);
15829 +       spin_lock_destroy (&dev->InfoLock);
15830 +       spin_lock_destroy (&dev->TlbLock);
15831 +       spin_lock_destroy (&dev->CProcLock);
15832 +       spin_lock_destroy (&dev->FreeHaltLock);
15833 +       spin_lock_destroy (&dev->Level1PtblLock);
15834 +       spin_lock_destroy (&dev->Level2PtblLock);
15835 +       spin_lock_destroy (&dev->Level3PtblLock);
15836 +       spin_lock_destroy (&dev->PtblGroupLock);
15837 +
15838 +       KMEM_FREE(dev, sizeof(ELAN3_DEV));
15839 +       elan3_devices[instance] = NULL; 
15840 +
15841 +       return 0;
15842 +}
15843 +
15844 +/*
15845 + * generic ioctls - available on control and user devices.
15846 + */
15847 +
15848 +static int
15849 +device_stats_ioctl (ELAN3_DEV *dev, unsigned long arg)
15850 +{
15851 +    ELAN3IO_STATS_STRUCT *args;
15852 +
15853 +    KMEM_ALLOC(args, ELAN3IO_STATS_STRUCT *, sizeof(ELAN3IO_STATS_STRUCT), TRUE);
15854 +       
15855 +    if (args == NULL)
15856 +       return (-ENOMEM);
15857 +
15858 +    if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_STATS_STRUCT)))
15859 +    {
15860 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15861 +       return (-EFAULT);
15862 +    }
15863 +
15864 +    switch (args->which)
15865 +    {
15866 +    case ELAN3_SYS_STATS_DEVICE:
15867 +       if (copy_to_user (args->ptr, &dev->Stats, sizeof (ELAN3_STATS)))
15868 +       {
15869 +           KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15870 +           return (-EFAULT);
15871 +       }
15872 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15873 +       return (0);
15874 +
15875 +    case ELAN3_SYS_STATS_MMU:
15876 +       if (copy_to_user (args->ptr, &elan3mmu_global_stats, sizeof (ELAN3MMU_GLOBAL_STATS)))
15877 +       {
15878 +           KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15879 +           return (-EFAULT);
15880 +       }
15881 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15882 +       return (0);
15883 +           
15884 +    default:
15885 +       KMEM_FREE(args, sizeof(ELAN3IO_STATS_STRUCT));
15886 +       return (-EINVAL);
15887 +    }
15888 +}
15889 +
15890 +/*
15891 + * /dev/elan3/controlX - control device
15892 + *
15893 + */
15894 +
15895 +typedef struct control_private
15896 +{
15897 +    u_int              pr_boundary_scan;
15898 +} CONTROL_PRIVATE;
15899 +
15900 +static int
15901 +control_open (struct inode *inode, struct file *file)
15902 +{
15903 +    CONTROL_PRIVATE *pr;
15904 +
15905 +    KMEM_ALLOC(pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), TRUE);
15906 +
15907 +    if (pr == NULL)
15908 +       return (-ENOMEM);
15909 +
15910 +    pr->pr_boundary_scan = 0;
15911 +    
15912 +    file->private_data = (void *) pr;
15913 +
15914 +    MOD_INC_USE_COUNT;
15915 +
15916 +    return (0);
15917 +}
15918 +
15919 +static int
15920 +control_release (struct inode *inode, struct file *file)
15921 +{
15922 +    ELAN3_DEV        *dev = elan3_devices[ELAN3_DEVICE(inode)];
15923 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
15924 +
15925 +    if (pr->pr_boundary_scan)
15926 +       ClearLinkBoundaryScan(dev);
15927 +
15928 +    KMEM_FREE (pr, sizeof(CONTROL_PRIVATE));
15929 +
15930 +    MOD_DEC_USE_COUNT;
15931 +    return (0);
15932 +}
15933 +
15934 +static int
15935 +control_ioctl (struct inode *inode, struct file *file, 
15936 +              unsigned int cmd, unsigned long arg)
15937 +{
15938 +    ELAN3_DEV        *dev = elan3_devices[ELAN3_DEVICE(inode)];
15939 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
15940 +    int                     res;
15941 +
15942 +    switch (cmd) 
15943 +    {
15944 +    case ELAN3IO_SET_BOUNDARY_SCAN:
15945 +       if (SetLinkBoundaryScan (dev) == 0)
15946 +           pr->pr_boundary_scan = 1;
15947 +       return (0);
15948 +
15949 +    case ELAN3IO_CLEAR_BOUNDARY_SCAN:
15950 +       if (pr->pr_boundary_scan == 0)
15951 +           return (-EINVAL);
15952 +
15953 +       pr->pr_boundary_scan = 0;
15954 +
15955 +       ClearLinkBoundaryScan (dev);
15956 +       return (0);
15957 +
15958 +    case ELAN3IO_READ_LINKVAL:
15959 +    {
15960 +       E3_uint32 val;
15961 +
15962 +       if (pr->pr_boundary_scan == 0)
15963 +           return (-EINVAL);
15964 +
15965 +       if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32)))
15966 +           return (-EFAULT);
15967 +
15968 +       val = ReadBoundaryScanValue (dev, val);
15969 +
15970 +       if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32)))
15971 +           return (-EFAULT);
15972 +       return (0);
15973 +    }
15974 +       
15975 +    case ELAN3IO_WRITE_LINKVAL:
15976 +    {
15977 +       E3_uint32 val;
15978 +
15979 +       if (pr->pr_boundary_scan == 0)
15980 +           return (-EINVAL);
15981 +
15982 +       if (copy_from_user(&val, (E3_uint32 *)arg, sizeof(E3_uint32)))
15983 +           return (-EFAULT);
15984 +
15985 +       val = WriteBoundaryScanValue (dev, val);
15986 +
15987 +       if (copy_to_user((E3_uint32 *)arg, &val, sizeof(E3_uint32)))
15988 +           return (-EFAULT);
15989 +       
15990 +       return (0);
15991 +    }
15992 +
15993 +    case ELAN3IO_SET_POSITION:
15994 +    {
15995 +       ELAN3IO_SET_POSITION_STRUCT args;
15996 +
15997 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_POSITION_STRUCT)))
15998 +           return (-EFAULT);
15999 +       
16000 +       if (ComputePosition (&dev->Position, args.nodeId, args.numNodes, dev->Devinfo.dev_num_down_links_value) != 0)
16001 +           return (-EINVAL);
16002 +
16003 +       return (0);
16004 +    }
16005 +
16006 +    case ELAN3IO_SET_DEBUG:
16007 +    {
16008 +       ELAN3IO_SET_DEBUG_STRUCT args;
16009 +
16010 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SET_DEBUG_STRUCT)))
16011 +           return (-EFAULT);
16012 +
16013 +       if (! strcmp (args.what, "elan3_debug"))
16014 +           elan3_debug = args.value;
16015 +       else if (! strcmp (args.what, "elan3_debug_console"))
16016 +           elan3_debug_console = args.value;
16017 +       else if (! strcmp (args.what, "elan3_debug_buffer"))
16018 +           elan3_debug_buffer = args.value;
16019 +       else if (! strcmp (args.what, "elan3_debug_ignore_dev"))
16020 +           elan3_debug_ignore_dev = args.value;
16021 +       else if (! strcmp (args.what, "elan3_debug_ignore_ctxt"))
16022 +           elan3_debug_ignore_ctxt = args.value;
16023 +       else if (! strcmp (args.what, "elan3mmu_debug"))
16024 +           elan3mmu_debug = args.value;
16025 +       
16026 +       return (0);
16027 +    }
16028 +
16029 +    case ELAN3IO_NETERR_SERVER:
16030 +    {
16031 +       ELAN3IO_NETERR_SERVER_STRUCT args;
16032 +
16033 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_NETERR_SERVER_STRUCT)))
16034 +           return (-EFAULT);
16035 +       
16036 +       res = AddNeterrServerSyscall (args.elanid, args.addr, args.name, NULL);
16037 +       return (set_errno (res));
16038 +    }
16039 +    
16040 +    case ELAN3IO_NETERR_FIXUP:
16041 +    {
16042 +       NETERR_MSG *msg;
16043 +
16044 +       KMEM_ALLOC(msg, NETERR_MSG *, sizeof (NETERR_MSG), TRUE);
16045 +
16046 +       if (msg == NULL)
16047 +           return (set_errno (ENOMEM));
16048 +       
16049 +       if (copy_from_user (msg, (void *) arg, sizeof (NETERR_MSG)))
16050 +           res = EFAULT;
16051 +       else
16052 +           res = ExecuteNetworkErrorFixup (msg);
16053 +
16054 +       KMEM_FREE (msg, sizeof (NETERR_MSG));
16055 +       return (set_errno (res));
16056 +    }
16057 +
16058 +    case ELAN3IO_STATS:
16059 +       return (device_stats_ioctl (dev, arg));
16060 +
16061 +    case ELAN3IO_GET_DEVINFO:
16062 +    {
16063 +       if (copy_to_user ((void *) arg, &dev->Devinfo, sizeof (ELAN_DEVINFO)))
16064 +           return (-EFAULT);
16065 +       return (0);
16066 +    }
16067 +
16068 +    case ELAN3IO_GET_POSITION:
16069 +    {
16070 +       if (copy_to_user ((void *) arg, &dev->Position, sizeof (ELAN_POSITION)))
16071 +           return (-EFAULT);
16072 +       return (0);
16073 +    }
16074 +    default:
16075 +       return (-EINVAL);
16076 +    }
16077 +}
16078 +
16079 +static int
16080 +control_mmap (struct file *file, struct vm_area_struct *vma)
16081 +{
16082 +    ELAN3_DEV         *dev   = elan3_devices[ELAN3_DEVICE(file->f_dentry->d_inode)];
16083 +    int                space = OFF_TO_SPACE(vma->vm_pgoff << PAGE_SHIFT);
16084 +    int                off   = OFF_TO_OFFSET(vma->vm_pgoff << PAGE_SHIFT);
16085 +    int                size;
16086 +    ioaddr_t           addr;
16087 +    DeviceMappingHandle handle;
16088 +    physaddr_t         phys;
16089 +    
16090 +    if (space < ELAN3_BAR_SDRAM || space > ELAN3_BAR_EBUS)
16091 +       return (-EINVAL);
16092 +
16093 +    if (off < 0 || DeviceRegisterSize (dev, space, &size) != ESUCCESS || off > size)
16094 +       return (-EINVAL);
16095 +
16096 +    if (MapDeviceRegister(dev, space, &addr, off, PAGESIZE, &handle) != ESUCCESS)
16097 +       return (-EINVAL);
16098 +
16099 +    phys = kmem_to_phys((caddr_t) addr);
16100 +    UnmapDeviceRegister(dev, &handle);
16101 +
16102 +    if (__remap_page_range(vma->vm_start, phys, vma->vm_end - vma->vm_start, vma->vm_page_prot))
16103 +       return (-EAGAIN);
16104 +
16105 +    return (0);
16106 +}
16107 +
16108 +/*
16109 + * /dev/elan3/sdramX - sdram access device
16110 + */
16111 +typedef struct mem_page
16112 +{
16113 +    struct mem_page *pg_next;
16114 +    sdramaddr_t      pg_addr;
16115 +    u_long          pg_pgoff;
16116 +    u_int           pg_ref;
16117 +} MEM_PAGE;
16118 +
16119 +#define MEM_HASH_SIZE  32
16120 +#define MEM_HASH(pgoff)        ((pgoff) & (MEM_HASH_SIZE-1))
16121 +
16122 +typedef struct mem_private
16123 +{
16124 +    ELAN3_DEV   *pr_dev;
16125 +    MEM_PAGE   *pr_pages[MEM_HASH_SIZE];
16126 +    spinlock_t  pr_lock;
16127 +} MEM_PRIVATE;
16128 +
16129 +static void 
16130 +mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg)
16131 +{
16132 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref);
16133 +
16134 +    elan3_sdram_free (pr->pr_dev, pg->pg_addr, PAGE_SIZE);
16135 +    KMEM_FREE (pg, sizeof(MEM_PAGE));
16136 +}
16137 +
16138 +static MEM_PAGE *
16139 +mem_getpage (MEM_PRIVATE *pr, u_long pgoff, virtaddr_t addr)
16140 +{
16141 +    int       hashval = MEM_HASH (pgoff);
16142 +    MEM_PAGE *npg = NULL;
16143 +    MEM_PAGE *pg;
16144 +
16145 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx\n", pr, pgoff, addr);
16146 +    
16147 + again:
16148 +    spin_lock (&pr->pr_lock);
16149 +    for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next)
16150 +       if (pg->pg_pgoff == pgoff)
16151 +           break;
16152 +    
16153 +    if (pg != NULL)
16154 +    {
16155 +       PRINTF (DBG_DEVICE, DBG_SEG, "mem_getpage: pr=%p pgoff=%lx addr=%lx -> found %p addr=%lx\n", pr, pgoff, addr, pg, pg->pg_addr);
16156 +
16157 +       pg->pg_ref++;
16158 +       spin_unlock (&pr->pr_lock);
16159 +
16160 +       if (npg != NULL)                                        /* we'd raced and someone else had created */
16161 +           mem_freepage (pr, npg);                             /* this page - so free of our new one*/
16162 +       return (pg);
16163 +    }
16164 +    
16165 +    if (npg != NULL)                                           /* didn't find the page, so inset the */
16166 +    {                                                          /* new one we've just created */
16167 +       npg->pg_next = pr->pr_pages[hashval];
16168 +       pr->pr_pages[hashval] = npg;
16169 +       
16170 +       spin_unlock (&pr->pr_lock);
16171 +       return (npg);
16172 +    }
16173 +    
16174 +    spin_unlock (&pr->pr_lock);                                        /* drop spinlock before creating a new page */
16175 +    
16176 +    KMEM_ALLOC(npg, MEM_PAGE *, sizeof (MEM_PAGE), TRUE);
16177 +
16178 +    if (npg == NULL)
16179 +       return (NULL);
16180 +
16181 +    if ((npg->pg_addr = elan3_sdram_alloc (pr->pr_dev, PAGE_SIZE)) == 0)
16182 +    {
16183 +       KMEM_FREE (npg, sizeof (MEM_PAGE));
16184 +       return (NULL);
16185 +    }
16186 +
16187 +    /* zero the page before returning it to the user */
16188 +    elan3_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, PAGE_SIZE);
16189 +    
16190 +    npg->pg_pgoff = pgoff;
16191 +    npg->pg_ref   = 1;
16192 +    
16193 +    /* created a new page - so have to rescan before inserting it */
16194 +    goto again;
16195 +}
16196 +
16197 +static void
16198 +mem_droppage (MEM_PRIVATE *pr, u_long pgoff, int dontfree)
16199 +{
16200 +    MEM_PAGE **ppg;
16201 +    MEM_PAGE  *pg;
16202 +
16203 +    spin_lock (&pr->pr_lock);
16204 +    for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next)
16205 +       if ((*ppg)->pg_pgoff == pgoff)
16206 +           break;
16207 +
16208 +    pg = *ppg;
16209 +
16210 +    ASSERT (*ppg != NULL);
16211 +    
16212 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree);
16213 +
16214 +    if (--pg->pg_ref == 0 && !dontfree)
16215 +    {
16216 +       *ppg = pg->pg_next;
16217 +
16218 +       mem_freepage (pr, pg);
16219 +    }
16220 +
16221 +    spin_unlock (&pr->pr_lock);
16222 +}
16223 +
16224 +static int
16225 +mem_open (struct inode *inode, struct file *file)
16226 +{
16227 +    ELAN3_DEV    *dev = elan3_devices[ELAN3_DEVICE(inode)];
16228 +    MEM_PRIVATE *pr;
16229 +    register int i;
16230 +
16231 +    KMEM_ALLOC(pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), TRUE);
16232 +
16233 +    if (pr == NULL)
16234 +       return (-ENOMEM);
16235 +
16236 +    spin_lock_init (&pr->pr_lock);
16237 +    pr->pr_dev = dev;
16238 +    for (i = 0; i < MEM_HASH_SIZE; i++)
16239 +       pr->pr_pages[i] = NULL;
16240 +
16241 +    file->private_data = (void *) pr;
16242 +    
16243 +    MOD_INC_USE_COUNT;
16244 +    return (0);
16245 +}
16246 +
16247 +static int
16248 +mem_release (struct inode *node, struct file *file)
16249 +{
16250 +    MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data;
16251 +    MEM_PAGE    *pg, *next;
16252 +    int          i;
16253 +
16254 +    /* free off any pages that we'd allocated */
16255 +    spin_lock (&pr->pr_lock);
16256 +    for (i = 0; i < MEM_HASH_SIZE; i++)
16257 +    {
16258 +       for (pg = pr->pr_pages[i]; pg; pg = next)
16259 +       {
16260 +           next = pg->pg_next;
16261 +           mem_freepage (pr, pg);
16262 +       }
16263 +    }
16264 +    spin_unlock (&pr->pr_lock);
16265 +
16266 +    KMEM_FREE (pr, sizeof (MEM_PRIVATE));
16267 +
16268 +    MOD_DEC_USE_COUNT;
16269 +    return (0);
16270 +}
16271 +
16272 +static int
16273 +mem_ioctl (struct inode *inode, struct file *file, 
16274 +                 unsigned int cmd, unsigned long arg)
16275 +{
16276 +    return (-EINVAL);
16277 +}
16278 +
16279 +static void mem_vma_open(struct vm_area_struct *vma)
16280 +{
16281 +    MEM_PRIVATE   *pr = (MEM_PRIVATE *) vma->vm_private_data;
16282 +    unsigned long addr;
16283 +    unsigned long pgoff;
16284 +
16285 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
16286 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
16287 +
16288 +    preemptable_start {
16289 +       for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++) {
16290 +           mem_getpage (pr, pgoff, addr);
16291 +           preemptable_check();
16292 +       }
16293 +    } preemptable_end;
16294 +}
16295 +
16296 +static void mem_vma_close(struct vm_area_struct *vma)
16297 +{
16298 +    MEM_PRIVATE  *pr  = (MEM_PRIVATE *) vma->vm_private_data;
16299 +    unsigned long addr;
16300 +    unsigned long pgoff;
16301 +
16302 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
16303 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
16304 +
16305 +    /* NOTE: the call to close may not have the same vm_start/vm_end values as 
16306 +     *       were passed into mmap()/open() - since if an partial unmap had occured
16307 +     *       then the vma could have been shrunk or even split.
16308 +     *
16309 +     *       if a the vma is split then an vma_open() will be called for the top
16310 +     *       portion - thus causing the reference counts to become incorrect.
16311 +     *
16312 +     * We drop the reference to any pages we're notified about - so they get freed
16313 +     * earlier than when the device is finally released.
16314 +     */
16315 +    for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
16316 +       mem_droppage (pr, pgoff, 0);
16317 +}
16318 +
16319 +static struct vm_operations_struct mem_vm_ops = {
16320 +    open:              mem_vma_open,
16321 +    close:             mem_vma_close,
16322 +};
16323 +
16324 +static int
16325 +mem_mmap (struct file *file, struct vm_area_struct *vma)
16326 +{
16327 +    MEM_PRIVATE  *pr = (MEM_PRIVATE *) file->private_data;
16328 +    MEM_PAGE     *pg;
16329 +    unsigned long addr;
16330 +    unsigned long pgoff;
16331 +
16332 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx prot=%llx file=%p\n",
16333 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, (long long)vma->vm_page_prot.pgprot , file);
16334 +
16335 +    preemptable_start {
16336 +       for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
16337 +       {
16338 +           if ((pg = mem_getpage (pr, pgoff, addr)) == NULL)
16339 +               goto failed;
16340 +           
16341 +#ifdef LINUX_SPARC
16342 +           pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
16343 +           pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
16344 +#elif defined(pgprot_noncached)
16345 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
16346 +#endif
16347 +           
16348 +#if defined(__ia64__)
16349 +           if (enable_sdram_writecombining)
16350 +               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
16351 +#endif
16352 +           PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: addr %lx -> pg=%p addr=%lx phys=%llx flags=%lx prot=%llx\n",
16353 +                   addr, pg, pg->pg_addr, (long long) elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), vma->vm_flags,  (long long)vma->vm_page_prot.pgprot);
16354 +           
16355 +           if (__remap_page_range(addr, elan3_sdram_to_phys (pr->pr_dev, pg->pg_addr), PAGE_SIZE, vma->vm_page_prot))
16356 +           {
16357 +               mem_droppage (pr, pgoff, 0);                    /* drop our reference to this page */
16358 +               goto failed;
16359 +           }
16360 +
16361 +           preemptable_check();
16362 +       }
16363 +    } preemptable_end;
16364 +
16365 +    /* Don't try to swap out Elan SDRAM pages.. */
16366 +    vma->vm_flags |= VM_RESERVED;
16367 +    
16368 +    /*
16369 +     * Don't dump SDRAM pages to a core file 
16370 +     * (Pity I would really like to do this but it crashes in elf_core_dump() as
16371 +     * it can only handle pages that are in the mem_map area (addy 11/01/2002))
16372 +     */
16373 +    vma->vm_flags |= VM_IO;
16374 +
16375 +    vma->vm_ops          = &mem_vm_ops;
16376 +    vma->vm_file         = file;
16377 +    vma->vm_private_data = (void *) pr;
16378 +
16379 +    return (0);
16380 +
16381 + failed:
16382 +    PRINTF (DBG_DEVICE, DBG_SEG, "mem_mmap: failed\n");
16383 +
16384 +    /* free of any pages we've already allocated/referenced */
16385 +    while (pgoff-- > vma->vm_pgoff)
16386 +       mem_droppage (pr, pgoff, 0);
16387 +
16388 +    return (-ENOMEM);
16389 +}
16390 +
16391 +#if !defined(NO_PTRACK) && defined(IOPROC_PATCH_APPLIED)
16392 +
16393 +#include <linux/ptrack.h>
16394 +
16395 +/*
16396 + * /dev/elan3/userX - control device
16397 + *
16398 + * "user_private" can be referenced from a number of places
16399 + *   1) the "file" structure.
16400 + *   2) the "mm" ioproc ops
16401 + *   3) the "mmap" of the command port.
16402 + *
16403 + */
16404 +typedef struct user_private
16405 +{
16406 +    spinlock_t        pr_lock;
16407 +    atomic_t         pr_mappings;
16408 +    atomic_t          pr_ref;
16409 +    ELAN3_CTXT        *pr_ctxt;
16410 +    struct mm_struct *pr_mm;
16411 +    struct ioproc_ops pr_ioproc;
16412 +} USER_PRIVATE;
16413 +
16414 +static void
16415 +user_free (USER_PRIVATE *pr)
16416 +{
16417 +    /* Have to unreserve the FlagPage or else we leak memory like a sieve! */
16418 +    ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) pr->pr_ctxt->FlagPage)));
16419 +
16420 +    elan3_detach(pr->pr_ctxt);
16421 +    elan3_free (pr->pr_ctxt);
16422 +
16423 +    KMEM_FREE (pr, sizeof(USER_PRIVATE));
16424 +
16425 +    MOD_DEC_USE_COUNT;
16426 +}
16427 +
16428 +static void
16429 +user_ioproc_release (void *arg, struct mm_struct *mm)
16430 +{
16431 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16432 +
16433 +    PRINTF3 (pr->pr_ctxt, DBG_SEG, "user_ioproc_release: ctxt=%p pr=%p ref=%d\n",
16434 +            pr->pr_ctxt, pr, atomic_read (&pr->pr_ref));
16435 +
16436 +    elan3mmu_pte_ctxt_unload (pr->pr_ctxt->Elan3mmu);
16437 +
16438 +    pr->pr_mm = NULL;
16439 +
16440 +    if (atomic_dec_and_test (&pr->pr_ref))
16441 +       user_free (pr);
16442 +}
16443 +
16444 +/*
16445 + * On 2.4 kernels we get passed a mm_struct, whereas on 2.6 kernels
16446 + * we get the vma which is more usefull
16447 + */
16448 +#if defined(IOPROC_MM_STRUCT_ARG)
16449 +static void
16450 +user_ioproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
16451 +{
16452 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16453 +
16454 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
16455 +
16456 +    ASSERT(start <= end);
16457 +
16458 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
16459 +}
16460 +
16461 +static void
16462 +user_ioproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
16463 +{
16464 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16465 +
16466 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
16467 +
16468 +    ASSERT(start <= end);
16469 +
16470 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
16471 +}
16472 +
16473 +static void
16474 +user_ioproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
16475 +{
16476 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16477 +
16478 +    ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0));
16479 +
16480 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_range: start=%lx end=%lx\n", start, end);
16481 +
16482 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, mm,(caddr_t) start, end-start);
16483 +}
16484 +
16485 +static void
16486 +user_ioproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
16487 +{
16488 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16489 +
16490 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
16491 +
16492 +    ASSERT(start <= end);
16493 +
16494 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, mm, (caddr_t) start, end-start);
16495 +}
16496 +
16497 +#else
16498 +
16499 +static void
16500 +user_ioproc_sync_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
16501 +{
16502 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16503 +
16504 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
16505 +
16506 +    ASSERT(start <= end);
16507 +
16508 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16509 +}
16510 +
16511 +static void
16512 +user_ioproc_invalidate_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
16513 +{
16514 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16515 +
16516 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
16517 +
16518 +    ASSERT(start <= end);
16519 +
16520 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16521 +}
16522 +
16523 +static void
16524 +user_ioproc_update_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
16525 +{
16526 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16527 +
16528 +    ASSERT(start <= end && ((start & PAGEOFFSET) == 0) && ((end & PAGEOFFSET) == 0));
16529 +
16530 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_range: start=%lx end=%lx\n", start, end);
16531 +
16532 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16533 +}
16534 +
16535 +static void
16536 +user_ioproc_change_protection (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
16537 +{
16538 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16539 +
16540 +    PRINTF2 (pr->pr_ctxt, DBG_SEG, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
16541 +
16542 +    ASSERT(start <= end);
16543 +
16544 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) start, end-start);
16545 +}
16546 +#endif /* defined(IOPROC_NO_VMA_RANGE) */
16547 +
16548 +static void
16549 +user_ioproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
16550 +{
16551 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16552 +
16553 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_sync_page: addr=%lx\n", addr);
16554 +
16555 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
16556 +}
16557 +
16558 +static void
16559 +user_ioproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
16560 +{
16561 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16562 +    
16563 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_invalidate_page: addr=%lx\n", addr);
16564 +
16565 +    elan3mmu_pte_range_unload(pr->pr_ctxt->Elan3mmu, vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
16566 +}
16567 +
16568 +static void
16569 +user_ioproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
16570 +{
16571 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
16572 +
16573 +    PRINTF1 (pr->pr_ctxt, DBG_SEG, "user_ioproc_update_page: addr=%lx\n", addr);
16574 +
16575 +    elan3mmu_pte_range_update (pr->pr_ctxt->Elan3mmu,vma->vm_mm, (caddr_t) (addr & PAGE_MASK), PAGE_SIZE);
16576 +}
16577 +
16578 +int
16579 +user_ptrack_handler (void *arg, int phase, struct task_struct *child)
16580 +{
16581 +    USER_PRIVATE *pr   = (USER_PRIVATE *) arg;
16582 +    ELAN3_CTXT    *ctxt = pr->pr_ctxt;
16583 +
16584 +    PRINTF5 (pr->pr_ctxt, DBG_FN, "user_ptrack_handler: ctxt=%p pr=%p ref=%d phase %d mm->ref %d\n", 
16585 +            pr->pr_ctxt, pr, atomic_read (&pr->pr_ref), phase, atomic_read (&current->mm->mm_count));
16586 +
16587 +    if (phase == PTRACK_PHASE_EXIT)
16588 +    {
16589 +       /* this will force the helper thread to exit */
16590 +       elan3_swapout (ctxt, CTXT_EXITING);
16591 +       
16592 +       if (atomic_dec_and_test (&pr->pr_ref))
16593 +           user_free (pr);
16594 +    }  
16595 +    return PTRACK_FINISHED;
16596 +}
16597 +
16598 +static int
16599 +user_open (struct inode *inode, struct file *file)
16600 +{
16601 +    ELAN3_DEV     *dev = elan3_devices[ELAN3_DEVICE(inode)];
16602 +    USER_PRIVATE *pr;
16603 +    ELAN3_CTXT    *ctxt;
16604 +
16605 +    if (dev == NULL)
16606 +       return (-ENXIO);
16607 +
16608 +    KMEM_ALLOC(pr, USER_PRIVATE *, sizeof (USER_PRIVATE), TRUE);
16609 +
16610 +    if (pr == NULL)
16611 +       return (-ENOMEM);
16612 +    
16613 +    if ((ctxt = elan3_alloc (dev, 0)) == NULL)
16614 +    {
16615 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
16616 +       return (-ENOMEM);
16617 +    }
16618 +
16619 +    if (sys_init (ctxt) == NULL)
16620 +    {
16621 +       elan3_detach(ctxt);
16622 +       elan3_free (ctxt);
16623 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
16624 +       return (-ENOMEM);
16625 +    }
16626 +
16627 +    /* initialise refcnt to 3 - one for "file", one for XA handler, one for the ioproc ops */
16628 +    atomic_set (&pr->pr_ref, 3);
16629 +
16630 +    atomic_set (&pr->pr_mappings, 0);
16631 +    spin_lock_init (&pr->pr_lock);
16632 +
16633 +    pr->pr_ctxt = ctxt;
16634 +    pr->pr_mm   = current->mm;
16635 +
16636 +    /* register an ptrack handler to force the helper thread to exit when we do */
16637 +    if (ptrack_register (user_ptrack_handler, pr) < 0)
16638 +    {
16639 +       elan3_detach(ctxt);
16640 +       elan3_free (ctxt);
16641 +       KMEM_FREE (pr, sizeof (USER_PRIVATE));
16642 +       return (-ENOMEM);
16643 +    }
16644 +
16645 +    /* register a ioproc callback to notify us of translation changes */
16646 +    
16647 +    pr->pr_ioproc.arg               = (void *) pr;
16648 +    pr->pr_ioproc.release           = user_ioproc_release;
16649 +    pr->pr_ioproc.sync_range        = user_ioproc_sync_range;
16650 +    pr->pr_ioproc.invalidate_range  = user_ioproc_invalidate_range;
16651 +    pr->pr_ioproc.update_range      = user_ioproc_update_range;
16652 +    pr->pr_ioproc.change_protection = user_ioproc_change_protection;
16653 +    pr->pr_ioproc.sync_page         = user_ioproc_sync_page;
16654 +    pr->pr_ioproc.invalidate_page   = user_ioproc_invalidate_page;
16655 +    pr->pr_ioproc.update_page       = user_ioproc_update_page;
16656 +    
16657 +    spin_lock (&current->mm->page_table_lock);
16658 +    ioproc_register_ops (current->mm, &pr->pr_ioproc);
16659 +    spin_unlock (&current->mm->page_table_lock);
16660 +
16661 +    file->private_data = (void *) pr;
16662 +
16663 +    PRINTF2 (pr->pr_ctxt, DBG_FN, "user_open: done ctxt=%p pr=%p\n", ctxt, pr);
16664 +
16665 +    MOD_INC_USE_COUNT;
16666 +    return (0);
16667 +}
16668 +
16669 +static int
16670 +user_release (struct inode *inode, struct file *file)
16671 +{
16672 +    USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data;
16673 +    
16674 +    PRINTF3 (pr->pr_ctxt, DBG_FN, "user_release: ctxt=%p pr=%p ref=%d\n", pr->pr_ctxt, pr,
16675 +            atomic_read (&pr->pr_ref));
16676 +
16677 +    if (atomic_dec_and_test (&pr->pr_ref))
16678 +       user_free (pr);
16679 +
16680 +    return (0);
16681 +}
16682 +
16683 +static int
16684 +user_ioctl (struct inode *inode, struct file *file, 
16685 +           unsigned int cmd, unsigned long arg)
16686 +{
16687 +    USER_PRIVATE *pr   = (USER_PRIVATE *) file->private_data;
16688 +    ELAN3_CTXT    *ctxt = pr->pr_ctxt;
16689 +    SYS_CTXT     *sctx = (SYS_CTXT *) ctxt->Private;
16690 +    int           res  = 0;
16691 +
16692 +    if (current->mm != pr->pr_mm)
16693 +       return (-EINVAL);
16694 +    
16695 +    PRINTF4 (ctxt, DBG_FN, "user_ioctl: ctxt=%p cmd=%x(%d) arg=%lx\n", ctxt, cmd, _IOC_NR(cmd), arg);
16696 +
16697 +    switch (cmd)
16698 +    {
16699 +    case ELAN3IO_FREE:
16700 +       if (atomic_read (&pr->pr_mappings) > 0)
16701 +           return (-EINVAL);
16702 +       
16703 +       spin_lock (&current->mm->page_table_lock);
16704 +       if (pr->pr_mm != current->mm)
16705 +           spin_unlock (&current->mm->page_table_lock);
16706 +       else
16707 +       {
16708 +           ioproc_unregister_ops (current->mm, &pr->pr_ioproc);
16709 +           spin_unlock (&current->mm->page_table_lock);
16710 +
16711 +           user_ioproc_release (pr, current->mm);
16712 +       }
16713 +
16714 +       if (ptrack_registered (user_ptrack_handler, pr))
16715 +       {
16716 +           ptrack_deregister (user_ptrack_handler, pr);
16717 +           user_ptrack_handler (pr, PTRACK_PHASE_EXIT, NULL);
16718 +       }
16719 +       break;
16720 +       
16721 +    case ELAN3IO_ATTACH:
16722 +    {
16723 +       ELAN_CAPABILITY *cap;
16724 +
16725 +       KMEM_ALLOC(cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE);
16726 +
16727 +       if (cap == NULL)
16728 +           return (set_errno (EFAULT));
16729 +
16730 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
16731 +           res = EFAULT;
16732 +       else
16733 +       {
16734 +           if ((res = elan3_attach (ctxt, cap)) == 0)
16735 +           {
16736 +               if (copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY)))
16737 +               {
16738 +                   elan3_detach (ctxt);
16739 +                   res = EFAULT;
16740 +               }
16741 +           }
16742 +       }
16743 +       KMEM_FREE (cap, sizeof(ELAN_CAPABILITY));
16744 +       break;
16745 +    }
16746 +    
16747 +    case ELAN3IO_DETACH:
16748 +       spin_lock (&pr->pr_lock);
16749 +       if (atomic_read (&pr->pr_mappings) > 0)
16750 +           res = EINVAL;
16751 +       else
16752 +           elan3_detach (ctxt);
16753 +       spin_unlock (&pr->pr_lock);
16754 +       break;
16755 +
16756 +    case ELAN3IO_ADDVP:
16757 +    {
16758 +       ELAN3IO_ADDVP_STRUCT *args;
16759 +
16760 +       KMEM_ALLOC(args, ELAN3IO_ADDVP_STRUCT *, sizeof (ELAN3IO_ADDVP_STRUCT), TRUE);
16761 +
16762 +       if (args == NULL)
16763 +           return (set_errno (ENOMEM));
16764 +       
16765 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN3IO_ADDVP_STRUCT)))
16766 +           res = EFAULT;
16767 +       else
16768 +       {
16769 +           if ( (res=elan3_addvp (ctxt, args->process, &args->capability)) != 0)
16770 +               PRINTF0 (ctxt, DBG_FN, "ELAN3IO_ADDVP elan3_addvp failed \n");  
16771 +       }
16772 +
16773 +       KMEM_FREE (args, sizeof (ELAN3IO_ADDVP_STRUCT));
16774 +       break;
16775 +    }
16776 +
16777 +    case ELAN3IO_REMOVEVP:
16778 +       res = elan3_removevp (ctxt, arg);
16779 +       break;
16780 +       
16781 +    case ELAN3IO_BCASTVP:
16782 +    {
16783 +       ELAN3IO_BCASTVP_STRUCT args;
16784 +
16785 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_BCASTVP_STRUCT)))
16786 +           return (-EFAULT);
16787 +       
16788 +       res = elan3_addbcastvp (ctxt, args.process, args.lowvp, args.highvp);
16789 +       break;
16790 +    }
16791 +
16792 +    case ELAN3IO_LOAD_ROUTE:
16793 +    {
16794 +       ELAN3IO_LOAD_ROUTE_STRUCT args;
16795 +
16796 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
16797 +           return (-EFAULT);
16798 +       
16799 +       res = elan3_load_route (ctxt, args.process, args.flits);
16800 +       break;
16801 +    }
16802 +
16803 +    case ELAN3IO_CHECK_ROUTE:
16804 +    {
16805 +       ELAN3IO_CHECK_ROUTE_STRUCT args;
16806 +
16807 +       args.routeError = 0;
16808 +
16809 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
16810 +           return (-EFAULT);
16811 +       
16812 +       if ((res = elan3_check_route (ctxt, args.process, args.flits, & args.routeError)) ==  ESUCCESS)
16813 +       {
16814 +           if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_LOAD_ROUTE_STRUCT)))
16815 +               return (-EFAULT);
16816 +       }
16817 +       break;
16818 +    }
16819 +
16820 +    case ELAN3IO_PROCESS_2_LOCATION:
16821 +    {
16822 +       ELAN3IO_PROCESS_2_LOCATION_STRUCT args;
16823 +       ELAN_LOCATION                    loc;
16824 +
16825 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT)))
16826 +           return (-EFAULT);
16827 +
16828 +       krwlock_write (&ctxt->VpLock);
16829 +       loc = ProcessToLocation (ctxt, NULL, args.process , NULL);
16830 +       krwlock_done (&ctxt->VpLock);
16831 +
16832 +       args.loc = loc;
16833 +
16834 +       if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_PROCESS_2_LOCATION_STRUCT)))
16835 +           return (-EFAULT);
16836 +
16837 +       break;
16838 +    }
16839 +
16840 +    case ELAN3IO_GET_ROUTE:
16841 +    {
16842 +       ELAN3IO_GET_ROUTE_STRUCT args;
16843 +
16844 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_ROUTE_STRUCT)))
16845 +           return (-EFAULT);
16846 +       
16847 +       if ((res = elan3_get_route (ctxt, args.process, args.flits)) ==  ESUCCESS)
16848 +       {
16849 +           if (copy_to_user ( (void *) arg, &args,sizeof (ELAN3IO_GET_ROUTE_STRUCT)))
16850 +               return (-EFAULT);
16851 +       }
16852 +       break;
16853 +    }
16854 +
16855 +    case ELAN3IO_RESET_ROUTE:
16856 +    {
16857 +       ELAN3IO_RESET_ROUTE_STRUCT args;
16858 +
16859 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_RESET_ROUTE_STRUCT)))
16860 +           return (-EFAULT);
16861 +       
16862 +       res = elan3_reset_route (ctxt, args.process);
16863 +       break;
16864 +    }
16865 +
16866 +    case ELAN3IO_VP2NODEID:
16867 +    {
16868 +       ELAN3IO_VP2NODEID_STRUCT *vp2nodeId;
16869 +       ELAN_LOCATION           location;
16870 +
16871 +       KMEM_ALLOC (vp2nodeId, ELAN3IO_VP2NODEID_STRUCT *, sizeof(ELAN3IO_VP2NODEID_STRUCT), TRUE);
16872 +       if (vp2nodeId == NULL) 
16873 +           return (set_errno (ENOMEM));
16874 +
16875 +       if (copy_from_user (vp2nodeId, (void *) arg, sizeof (ELAN3IO_VP2NODEID_STRUCT))) {
16876 +           KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
16877 +           return (-EFAULT);
16878 +       }
16879 +
16880 +       krwlock_write (&ctxt->VpLock);
16881 +       location = ProcessToLocation (ctxt, NULL, vp2nodeId->process , NULL);
16882 +       krwlock_done (&ctxt->VpLock);
16883 +
16884 +       vp2nodeId->nodeId = location.loc_node;
16885 +       if (copy_to_user ( (void *) arg, vp2nodeId, sizeof (ELAN3IO_VP2NODEID_STRUCT))) {
16886 +           KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
16887 +           return (-EFAULT);
16888 +       }
16889 +
16890 +       KMEM_FREE (vp2nodeId, sizeof(ELAN3IO_VP2NODEID_STRUCT));
16891 +
16892 +       break;
16893 +    }
16894 +
16895 +    case ELAN3IO_PROCESS:
16896 +       return (elan3_process (ctxt));
16897 +
16898 +    case ELAN3IO_SETPERM:
16899 +    {
16900 +       ELAN3IO_SETPERM_STRUCT args;
16901 +
16902 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_SETPERM_STRUCT)))
16903 +           return (-EFAULT);
16904 +
16905 +       res = elan3mmu_setperm (ctxt->Elan3mmu, args.maddr, args.eaddr, args.len, args.perm);
16906 +       break;
16907 +    }
16908 +
16909 +    case ELAN3IO_CLEARPERM:
16910 +    {
16911 +       ELAN3IO_CLEARPERM_STRUCT args;
16912 +
16913 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CLEARPERM_STRUCT)))
16914 +           return (-EFAULT);
16915 +
16916 +       elan3mmu_clrperm (ctxt->Elan3mmu, args.eaddr, args.len);
16917 +       break;
16918 +    }
16919 +
16920 +    case ELAN3IO_CHANGEPERM:
16921 +    {
16922 +       ELAN3IO_CHANGEPERM_STRUCT args;
16923 +
16924 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_CHANGEPERM_STRUCT)))
16925 +           return (-EFAULT);
16926 +
16927 +       res = EINVAL;
16928 +       break;
16929 +    }
16930 +
16931 +    case ELAN3IO_HELPER_THREAD:
16932 +       res = elan3_lwp (ctxt);
16933 +       break;
16934 +       
16935 +    case ELAN3IO_WAITCOMMAND:
16936 +       res = WaitForCommandPort (ctxt);
16937 +       break;
16938 +
16939 +    case ELAN3IO_BLOCK_INPUTTER:
16940 +       elan3_block_inputter (ctxt, arg);
16941 +       break;
16942 +
16943 +    case ELAN3IO_SET_FLAGS:
16944 +       sctx->Flags = arg;
16945 +       break;
16946 +
16947 +    case ELAN3IO_SET_SIGNAL:
16948 +       sctx->signal = arg;
16949 +       break;
16950 +
16951 +    case ELAN3IO_WAITEVENT:
16952 +       res = sys_waitevent (ctxt, (E3_Event *) arg);
16953 +       break;
16954 +
16955 +    case ELAN3IO_ALLOC_EVENTCOOKIE:
16956 +       res = cookie_alloc_cookie (sctx->Table, arg);
16957 +       break;
16958 +
16959 +    case ELAN3IO_FREE_EVENTCOOKIE:
16960 +       res = cookie_free_cookie (sctx->Table, arg);
16961 +       break;
16962 +
16963 +    case ELAN3IO_ARM_EVENTCOOKIE:
16964 +       res = cookie_arm_cookie (sctx->Table, arg);
16965 +       break;
16966 +
16967 +    case ELAN3IO_WAIT_EVENTCOOKIE:
16968 +       res = cookie_wait_cookie (sctx->Table, arg);
16969 +       break;
16970 +
16971 +    case ELAN3IO_SWAPSPACE:
16972 +       if (fuword (&((SYS_SWAP_SPACE *) arg)->Magic) != SYS_SWAP_MAGIC)
16973 +           return (set_errno (EINVAL));
16974 +       
16975 +       ((SYS_CTXT *) ctxt->Private)->Swap = (SYS_SWAP_SPACE *) arg;
16976 +       break;
16977 +
16978 +    case ELAN3IO_EXCEPTION_SPACE:
16979 +       if (fuword (&((SYS_EXCEPTION_SPACE *) arg)->Magic) != SYS_EXCEPTION_MAGIC)
16980 +           return (set_errno (EINVAL));
16981 +
16982 +       ((SYS_CTXT *) ctxt->Private)->Exceptions = (SYS_EXCEPTION_SPACE *) arg;
16983 +       break;
16984 +
16985 +    case ELAN3IO_GET_EXCEPTION:
16986 +    {
16987 +       SYS_EXCEPTION *exception;
16988 +
16989 +       if (((SYS_CTXT *) ctxt->Private)->Exceptions == NULL)
16990 +           return (set_errno (EINVAL));
16991 +       
16992 +       KMEM_ALLOC(exception, SYS_EXCEPTION *, sizeof (SYS_EXCEPTION), TRUE);
16993 +
16994 +       if (exception == NULL)
16995 +           return (set_errno (ENOMEM));
16996 +
16997 +       if ((res = sys_getException (((SYS_CTXT *) ctxt->Private), exception)) == 0 &&
16998 +           copy_to_user ((void *) arg, exception, sizeof (SYS_EXCEPTION)))
16999 +           res = EFAULT;
17000 +       
17001 +       KMEM_FREE (exception, sizeof (SYS_EXCEPTION));
17002 +       break;
17003 +    }
17004 +    
17005 +    case ELAN3IO_UNLOAD:
17006 +    {
17007 +       ELAN3MMU             *elan3mmu = ctxt->Elan3mmu;
17008 +       ELAN3IO_UNLOAD_STRUCT args;
17009 +       int                   span;
17010 +       unsigned long         flags;
17011 +       E3_Addr               eaddr;
17012 +       caddr_t               addr;
17013 +       size_t                len;
17014 +
17015 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_UNLOAD_STRUCT)))
17016 +           return (-EFAULT);
17017 +
17018 +       addr = (caddr_t) args.addr;
17019 +       len  = args.len;
17020 +
17021 +       if (((unsigned long) addr & PAGEMASK) || (len & PAGEMASK) || (len < 0))
17022 +           return -EINVAL;
17023 +
17024 +       spin_lock_irqsave (&elan3mmu->elan3mmu_lock, flags);
17025 +       for (; len; len -= span, addr += span)
17026 +       {
17027 +           ELAN3MMU_RGN *rgn = elan3mmu_findrgn_main (elan3mmu, addr, 0);
17028 +           
17029 +           if (rgn == NULL || (rgn->rgn_mbase + rgn->rgn_len) < addr)
17030 +               span = len;
17031 +           else if (rgn->rgn_mbase > addr)
17032 +               span = MIN(len, rgn->rgn_mbase - addr);
17033 +           else
17034 +           {
17035 +               span  = MIN(len, (rgn->rgn_mbase + rgn->rgn_len) - addr);
17036 +               eaddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
17037 +               
17038 +               elan3mmu_unload (elan3mmu, eaddr, span, PTE_UNLOAD);
17039 +           }
17040 +       }
17041 +       spin_unlock_irqrestore (&elan3mmu->elan3mmu_lock, flags);
17042 +       
17043 +       return 0;
17044 +    }
17045 +
17046 +    case ELAN3IO_GET_DEVINFO:
17047 +    {
17048 +       ELAN3IO_GET_DEVINFO_STRUCT args;
17049 +
17050 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_DEVINFO_STRUCT)))
17051 +           return (-EFAULT);
17052 +       
17053 +       if (copy_to_user ((void *) args.devinfo, &ctxt->Device->Devinfo, sizeof (ELAN_DEVINFO))) 
17054 +           res = EFAULT;
17055 +       break;
17056 +    }
17057 +
17058 +    case ELAN3IO_GET_POSITION:
17059 +    {
17060 +       ELAN3IO_GET_POSITION_STRUCT args;
17061 +
17062 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN3IO_GET_POSITION_STRUCT)))
17063 +           return (-EFAULT);   
17064 +
17065 +       if (copy_to_user ((void *) args.position, &ctxt->Device->Position, sizeof (ELAN_POSITION)))
17066 +           res = EFAULT;
17067 +       break;
17068 +    }
17069 +
17070 +    default:
17071 +       return (-EINVAL);
17072 +    }
17073 +
17074 +    return (res ? set_errno (res) : 0);
17075 +}
17076 +
17077 +static void user_vma_open(struct vm_area_struct *vma)
17078 +{
17079 +    USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data;
17080 +
17081 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
17082 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
17083 +
17084 +    if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE)
17085 +       if (atomic_dec_and_test (&pr->pr_mappings))
17086 +           pr->pr_ctxt->CommandPageMapping = NULL;
17087 +}
17088 +
17089 +static void user_vma_close(struct vm_area_struct *vma)
17090 +{
17091 +    USER_PRIVATE *pr = (USER_PRIVATE *) vma->vm_private_data;
17092 +
17093 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
17094 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
17095 +
17096 +    if (vma->vm_pgoff == ELAN3IO_OFF_COMMAND_PAGE)
17097 +       atomic_inc (&pr->pr_mappings);
17098 +}
17099 +
17100 +static struct vm_operations_struct user_vm_ops = {
17101 +    open:              user_vma_open,
17102 +    close:             user_vma_close,
17103 +};
17104 +
17105 +static int
17106 +user_mmap (struct file *file, struct vm_area_struct *vma)
17107 +{
17108 +    USER_PRIVATE  *pr   = (USER_PRIVATE *) file->private_data;
17109 +    ELAN3_CTXT     *ctxt = pr->pr_ctxt; 
17110 +    ioaddr_t       ioaddr;
17111 +
17112 +    /* 
17113 +     * NOTE - since we need to maintain the reference count on
17114 +     *        the user_private we only permit single page 
17115 +     *        mmaps - this means that we will certainly see
17116 +     *        the correct number of closes to maintain the
17117 +     *        the reference count correctly.
17118 +     */
17119 +    
17120 +    if ((vma->vm_end - vma->vm_start) != PAGE_SIZE)
17121 +       return (-EINVAL);
17122 +
17123 +    PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: vm_mm=%p start=%lx end=%lx pgoff=%lx flags=%lx prot=%llx file=%p\n",
17124 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags,  (long long)vma->vm_page_prot.pgprot, vma->vm_file);
17125 +
17126 +    switch (vma->vm_pgoff)
17127 +    {
17128 +    default:
17129 +       return (-EINVAL);
17130 +       
17131 +    case ELAN3IO_OFF_COMMAND_PAGE:
17132 +       spin_lock (&pr->pr_lock);
17133 +       if (ctxt->CommandPage == (ioaddr_t) 0 || atomic_read (&pr->pr_mappings) != 0)
17134 +       {
17135 +           PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: command port - %s\n", ctxt->CommandPort ? "already mapped" : "not attached");
17136 +           spin_unlock (&pr->pr_lock);
17137 +           return (-EINVAL);
17138 +       }
17139 +#ifdef LINUX_SPARC
17140 +       pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
17141 +       pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
17142 +#elif defined(pgprot_noncached)
17143 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
17144 +#endif
17145 +
17146 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: commandport at %lx phys %llx prot %llx\n", 
17147 +               vma->vm_start, (unsigned long long) kmem_to_phys ((void *) ctxt->CommandPort),  (long long)vma->vm_page_prot.pgprot);
17148 +
17149 +       /* Don't try to swap out physical pages.. */
17150 +       vma->vm_flags |= VM_RESERVED;
17151 +    
17152 +       /*
17153 +        * Don't dump addresses that are not real memory to a core file.
17154 +        */
17155 +       vma->vm_flags |= VM_IO;
17156 +
17157 +       if (__remap_page_range(vma->vm_start, kmem_to_phys ((void *) ctxt->CommandPage), vma->vm_end - vma->vm_start, vma->vm_page_prot))
17158 +       {
17159 +           spin_unlock (&pr->pr_lock);
17160 +           return (-EAGAIN);
17161 +       }
17162 +       ctxt->CommandPageMapping = (void *) vma->vm_start;
17163 +       
17164 +       atomic_inc (&pr->pr_mappings);
17165 +       
17166 +       spin_unlock (&pr->pr_lock);
17167 +       break;
17168 +
17169 +    case ELAN3IO_OFF_UREG_PAGE:
17170 +#ifdef LINUX_SPARC
17171 +       pgprot_val(vma->vm_page_prot) &= ~(_PAGE_CACHE);
17172 +       pgprot_val(vma->vm_page_prot) |= _PAGE_IE;
17173 +#elif defined(pgprot_noncached)
17174 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
17175 +#endif
17176 +       ioaddr = ctxt->Device->RegPtr + (offsetof (E3_Regs, URegs) & PAGEMASK);
17177 +
17178 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: user_regs at %lx phys %llx prot %llx\n", vma->vm_start, 
17179 +               (unsigned long long) kmem_to_phys ((void *) ioaddr),  (long long)vma->vm_page_prot.pgprot);
17180 +
17181 +       /* Don't try to swap out physical pages.. */
17182 +       vma->vm_flags |= VM_RESERVED;
17183 +    
17184 +       /*
17185 +        * Don't dump addresses that are not real memory to a core file.
17186 +        */
17187 +       vma->vm_flags |= VM_IO;
17188 +       if (__remap_page_range (vma->vm_start, kmem_to_phys ((void *) ioaddr),
17189 +                               vma->vm_end - vma->vm_start, vma->vm_page_prot))
17190 +           return (-EAGAIN);
17191 +       break;
17192 +       
17193 +    case ELAN3IO_OFF_FLAG_PAGE:
17194 +       PRINTF (DBG_DEVICE, DBG_SEG, "user_mmap: flag page at %lx phys %llx\n", vma->vm_start, 
17195 +               (unsigned long long) kmem_to_phys ((void *) ctxt->FlagPage));
17196 +
17197 +       /* we do not want to have this area swapped out, lock it */
17198 +       vma->vm_flags |= VM_LOCKED;
17199 +
17200 +       /* Mark the page as reserved or else the remap_page_range() doesn't remap it */
17201 +       SetPageReserved(pte_page(*find_pte_kernel((unsigned long) ctxt->FlagPage)));
17202 +       
17203 +       if (__remap_page_range (vma->vm_start, kmem_to_phys ((void *) ctxt->FlagPage),
17204 +                             vma->vm_end - vma->vm_start, vma->vm_page_prot))
17205 +           return (-EAGAIN);
17206 +       break;
17207 +    }
17208 +
17209 +    ASSERT (vma->vm_ops == NULL);
17210 +    
17211 +    vma->vm_ops          = &user_vm_ops;
17212 +    vma->vm_file         = file;
17213 +    vma->vm_private_data = (void *) pr;
17214 +    
17215 +    return (0);
17216 +}
17217 +
17218 +#else /* defined(NO_PTRACK) || !defined(IOPROC_PATCH_APPLIED) */
17219 +
17220 +static int
17221 +user_open (struct inode *inode, struct file *file)
17222 +{
17223 +    return -ENXIO;
17224 +}
17225 +
17226 +static int
17227 +user_release (struct inode *inode, struct file *file)
17228 +{
17229 +    return 0;
17230 +}
17231 +
17232 +static int
17233 +user_ioctl (struct inode *inode, struct file *file, 
17234 +           unsigned int cmd, unsigned long arg)
17235 +{
17236 +    return 0;
17237 +}
17238 +
17239 +static int
17240 +user_mmap (struct file *file, struct vm_area_struct *vma)
17241 +{
17242 +    return 0;
17243 +}
17244 +#endif
17245 +
17246 +/* driver entry points */
17247 +static int
17248 +elan3_open (struct inode *inode, struct file *file)
17249 +{
17250 +    if (elan3_devices[ELAN3_DEVICE(inode)] == NULL)
17251 +       return (-ENXIO);
17252 +
17253 +    PRINTF (DBG_DEVICE, DBG_FN, "elan3_open: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file);
17254 +    
17255 +    switch (ELAN3_MINOR (inode))
17256 +    {
17257 +    case ELAN3_MINOR_CONTROL:
17258 +       return (control_open (inode, file));
17259 +    case ELAN3_MINOR_MEM:
17260 +       return (mem_open (inode, file));
17261 +    case ELAN3_MINOR_USER:
17262 +       return (user_open (inode, file));
17263 +    default:
17264 +       return (-ENXIO);
17265 +    }
17266 +}
17267 +
17268 +static int
17269 +elan3_release (struct inode *inode, struct file *file)
17270 +{
17271 +    PRINTF (DBG_DEVICE, DBG_FN, "elan3_release: device %d minor %d file=%p\n", ELAN3_DEVICE(inode), ELAN3_MINOR(inode), file);
17272 +    
17273 +    switch (ELAN3_MINOR (inode))
17274 +    {
17275 +    case ELAN3_MINOR_CONTROL:
17276 +       return (control_release (inode, file));
17277 +    case ELAN3_MINOR_MEM:
17278 +       return (mem_release (inode, file));
17279 +    case ELAN3_MINOR_USER:
17280 +       return (user_release (inode, file));
17281 +    default:
17282 +       return (-ENXIO);
17283 +    }
17284 +}
17285 +
17286 +static int
17287 +elan3_ioctl (struct inode *inode, struct file *file, 
17288 +            unsigned int cmd, unsigned long arg)
17289 +{
17290 +    switch (ELAN3_MINOR (inode))
17291 +    {
17292 +    case ELAN3_MINOR_CONTROL:
17293 +       return (control_ioctl (inode, file, cmd, arg));
17294 +    case ELAN3_MINOR_MEM:
17295 +       return (mem_ioctl (inode, file, cmd, arg));
17296 +    case ELAN3_MINOR_USER:
17297 +       return (user_ioctl (inode, file, cmd, arg));
17298 +    default:
17299 +       return (-ENXIO);
17300 +    }
17301 +}
17302 +
17303 +
17304 +static int
17305 +elan3_mmap (struct file *file, struct vm_area_struct *vma)
17306 +{
17307 +    PRINTF (DBG_DEVICE, DBG_SEG, "elan3_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx flags=%lx prot=%llx\n", 
17308 +           ELAN3_DEVICE (file->f_dentry->d_inode), ELAN3_MINOR (file->f_dentry->d_inode),
17309 +           vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_flags,  (long long)vma->vm_page_prot.pgprot);
17310 +
17311 +    switch (ELAN3_MINOR (file->f_dentry->d_inode))
17312 +    {
17313 +    case ELAN3_MINOR_CONTROL:
17314 +       return (control_mmap (file, vma));
17315 +    case ELAN3_MINOR_MEM:
17316 +       return (mem_mmap (file, vma));
17317 +    case ELAN3_MINOR_USER:
17318 +       return (user_mmap (file, vma));
17319 +    default:
17320 +       return (-ENXIO);
17321 +    }
17322 +}
17323 +
17324 +static irqreturn_t
17325 +InterruptHandlerWrapper(int irq, void *dev_id, struct pt_regs *regs)
17326 +{
17327 +    if (InterruptHandler ((ELAN3_DEV *)dev_id) == 0)
17328 +       return IRQ_HANDLED;
17329 +    else
17330 +       return IRQ_NONE;
17331 +}
17332 +
17333 +
17334 +/* 
17335 + * Elan specific PCI configuration registers.
17336 + */
17337 +
17338 +#define PCI_CONF_PARITY_PHYS_LO         0x40
17339 +#define PCI_CONF_PARITY_PHYS_HI         0x44
17340 +#define PCI_CONF_PARITY_PHASE_ADDR      0x46
17341 +#define PCI_CONF_PARITY_MASTER_TYPE     0x47
17342 +#define PCI_CONF_ELAN3_CTRL              0x48
17343
17344 +#define ECTRL_EXTEND_LATENCY            (1 << 0)
17345 +#define ECTRL_ENABLE_PREFETCH           (1 << 1)
17346 +#define ECTRL_SOFTWARE_INTERNAL_RESET   (1 << 2)
17347 +#define ECTRL_REDUCED_RETRY_RATE        (1 << 3)
17348 +#define ECTRL_CLOCK_DIVIDE_RATE_SHIFT   4
17349 +#define ECTRL_COMMS_DIVIDE_RATE_SHIFT   10
17350 +#define ECTRL_FORCE_COMMSCLK_LOCAL      (1 << 14)
17351 +
17352 +/*
17353 + * Configure PCI.
17354 + */
17355 +static int
17356 +ConfigurePci(ELAN3_DEV *dev)
17357 +{
17358 +       struct pci_dev *pci = dev->Osdep.pci;
17359 +       u32 rom_address;
17360 +
17361 +       if (pci_enable_device(pci))
17362 +           return (ENXIO);
17363 +
17364 +       /* disable ROM */
17365 +       pci_read_config_dword(pci, PCI_ROM_ADDRESS, &rom_address);
17366 +       rom_address &= ~PCI_ROM_ADDRESS_ENABLE;
17367 +       pci_write_config_dword(pci, PCI_ROM_ADDRESS, rom_address);
17368 +       mb();
17369 +
17370 +       /* this is in 32-bit WORDS */
17371 +       pci_write_config_byte(pci, PCI_CACHE_LINE_SIZE, (64 >> 2));
17372 +       mb();
17373 +
17374 +       /* allow 40 ticks to respond, 16 data phases */
17375 +       pci_write_config_byte(pci, PCI_LATENCY_TIMER, 255);
17376 +       mb();
17377 +
17378 +       /* don't enable PCI_COMMAND_SERR--see note in elandev_dunix.c */
17379 +       pci_write_config_word(pci, PCI_COMMAND, PCI_COMMAND_MEMORY 
17380 +           | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY);
17381 +       mb();
17382 +
17383 +       return ESUCCESS;
17384 +}
17385 +
17386 +/* 
17387 + * Reset chip to a known state.
17388 + */
17389 +static int
17390 +ResetElan(ELAN3_DEV *dev, ioaddr_t intPalAddr)
17391 +{
17392 +       struct pci_dev *pci = dev->Osdep.pci;
17393 +       int instance = dev->Instance;
17394 +       u32 val;
17395 +       u8 revid;
17396 +       int CasLatency;
17397 +       int res;
17398 +
17399 +       /* determine rev of board */
17400 +       pci_read_config_byte(pci, PCI_REVISION_ID, &revid);
17401 +
17402 +       /* GNAT 2328 - don't set ECTRL_ENABLE_PREFETCH on Elan rev A */
17403 +       val = ECTRL_EXTEND_LATENCY | (39 << ECTRL_CLOCK_DIVIDE_RATE_SHIFT)
17404 +           | (6 << ECTRL_COMMS_DIVIDE_RATE_SHIFT);
17405 +       switch (revid) 
17406 +       {
17407 +               case PCI_REVISION_ID_ELAN3_REVA:
17408 +                       printk("elan%d: is an elan3 (revision a) - not supported\n", instance);
17409 +                       return (EFAIL);
17410 +
17411 +               case PCI_REVISION_ID_ELAN3_REVB:        
17412 +                       val |= ECTRL_ENABLE_PREFETCH;
17413 +                       if (BackToBackMaster)
17414 +                               val |= ECTRL_FORCE_COMMSCLK_LOCAL;
17415 +                       printk("elan%d: is an elan3 (revision b)\n", instance);
17416 +                       break;
17417 +               default:
17418 +                       printk("elan%d: unsupported elan3 revision %d\n", 
17419 +                           instance, revid);
17420 +                       return EFAIL;
17421 +       }
17422 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val);
17423 +       mb();
17424 +
17425 +       /*
17426 +        * GNAT: 2474
17427 +        * Hit reset on the Elan, then we MUST initialise the schedule status
17428 +        * register to drive reset on the link before the link can come out
17429 +        * of reset (15 uS). We need to keep it like this until we've 
17430 +        * initialised SDRAM
17431 +        */
17432 +       pci_read_config_dword(pci, PCI_CONF_ELAN3_CTRL, &val);
17433 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, 
17434 +           val | ECTRL_SOFTWARE_INTERNAL_RESET);
17435 +       mb();
17436 +
17437 +       /* Read the Vital Product Data to determine the cas latency */
17438 +       if ((res = ReadVitalProductData (dev, &CasLatency)) != ESUCCESS)
17439 +           return (res);
17440 +
17441 +       /*
17442 +        * Now clear the Software internal reset bit, and start the sdram
17443 +        */
17444 +       pci_write_config_dword(pci, PCI_CONF_ELAN3_CTRL, val);
17445 +       mb();
17446 +
17447 +       /* 
17448 +        * Enable SDRAM before sizing and initalising it for ECC.
17449 +        * NOTE: don't enable all sets of the cache (yet), nor ECC 
17450 +        */
17451 +       dev->Cache_Control_Reg = (CasLatency | REFRESH_RATE_16US);
17452 +
17453 +       write_reg32 (dev, Cache_Control_Reg.ContReg, (dev->Cache_Control_Reg | SETUP_SDRAM));
17454 +       mb();
17455 +
17456 +       INIT_SCHED_STATUS(dev, Sched_Initial_Value);
17457 +
17458 +       /*
17459 +        * Set the interrupt mask to 0 and enable the interrupt PAL
17460 +        * by writing any value to it.
17461 +        */
17462 +       SET_INT_MASK (dev, 0);
17463 +       writeb (0, (void *) intPalAddr);
17464
17465 +       return ESUCCESS;
17466 +}
17467 +
17468 +/*
17469 + * Determine the size of elan PCI address spaces.  EFAIL is returned if 
17470 + * unused or invalid BAR is specified, or if board reports I/O mapped space.
17471 + */
17472 +int
17473 +DeviceRegisterSize(ELAN3_DEV *dev, int rnumber, int *sizep)
17474 +{
17475 +       struct pci_dev *pdev = dev->Osdep.pci;
17476 +
17477 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
17478 +       *sizep = pci_resource_size(pdev, rnumber);
17479 +#else
17480 +       *sizep = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1;
17481 +#endif
17482 +       return ESUCCESS;
17483 +}
17484 +
17485 +/*
17486 + * Map PCI memory into kernel virtual address space.  On the alpha, 
17487 + * we just return appropriate kseg address, and Unmap is a no-op.
17488 + */
17489 +int
17490 +MapDeviceRegister(ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp,
17491 +                 int off, int len, DeviceMappingHandle *handlep)
17492 +{      
17493 +       struct pci_dev *pdev = dev->Osdep.pci;
17494 +
17495 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
17496 +       u64 base = pci_get_base_address(pdev, rnumber);
17497 +       *addrp = (ioaddr_t) pci_base_to_kseg(base + off, pdev->bus->number);
17498 +
17499 +#else
17500 +       if (len == 0)
17501 +           len = pci_resource_end(pdev, rnumber) - pci_resource_start(pdev, rnumber) + 1;
17502 +       
17503 +       if (len == 0)
17504 +           return (EINVAL);
17505 +
17506 +       *addrp = (ioaddr_t) ioremap_nocache (pci_resource_start(pdev, rnumber) + off, len);
17507 +#endif
17508 +
17509 +       *handlep = (void *) *addrp;
17510 +
17511 +       return (*addrp ? ESUCCESS : ENOMEM);
17512 +}
17513 +void
17514 +UnmapDeviceRegister(ELAN3_DEV *dev, DeviceMappingHandle *handlep)
17515 +{
17516 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
17517 +    iounmap (*handlep);
17518 +#endif
17519 +}
17520 +
17521 +void
17522 +ElanBusError (ELAN3_DEV *dev)
17523 +{
17524 +       struct pci_dev  *pci = dev->Osdep.pci;  
17525 +       u8  phaseaddr, type;
17526 +       u16 status, cmd, physhi;
17527 +       u32 physlo;
17528
17529 +       printk("elan%d: bus error occured\n", dev->Instance);
17530 +
17531 +       pci_read_config_word (pci, PCI_STATUS,                  &status);
17532 +       pci_read_config_word (pci, PCI_COMMAND,                 &cmd);
17533 +       pci_read_config_dword(pci, PCI_CONF_PARITY_PHYS_LO,     &physlo);
17534 +       pci_read_config_word (pci, PCI_CONF_PARITY_PHYS_HI,     &physhi);
17535 +       pci_read_config_byte (pci, PCI_CONF_PARITY_PHASE_ADDR,  &phaseaddr); 
17536 +       pci_read_config_byte (pci, PCI_CONF_PARITY_MASTER_TYPE, &type);
17537 +
17538 +#define PCI_CONF_STAT_FORMAT   "\20" \
17539 +       "\6SIXTY_SIX_MHZ\7UDF\10FAST_BACK\11PARITY" \
17540 +       "\14SIG_TARGET_ABORT\15REC_TARGET_ABORT\16REC_MASTER_ABORT" \
17541 +       "\17SIG_SYSTEM_ERROR\20DETECTED_PARITY"
17542 +
17543 +       printk ("elan%d: status %x cmd %4x physaddr %04x%08x phase %x type %x\n",
17544 +               dev->Instance, status, cmd, physhi, physlo, phaseaddr, type);
17545 +}
17546 +
17547 +/*
17548 + * Local variables:
17549 + * c-file-style: "stroustrup"
17550 + * End:
17551 + */
17552 diff -urN clean/drivers/net/qsnet/elan3/elansyscall.c linux-2.6.9/drivers/net/qsnet/elan3/elansyscall.c
17553 --- clean/drivers/net/qsnet/elan3/elansyscall.c 1969-12-31 19:00:00.000000000 -0500
17554 +++ linux-2.6.9/drivers/net/qsnet/elan3/elansyscall.c   2004-11-01 13:01:51.000000000 -0500
17555 @@ -0,0 +1,1230 @@
17556 +/*
17557 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
17558 + * 
17559 + *    For licensing information please see the supplied COPYING file
17560 + *
17561 + */
17562 +
17563 +#ident "@(#)$Id: elansyscall.c,v 1.100 2004/11/01 18:01:51 robin Exp $"
17564 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/elansyscall.c,v $*/
17565 +
17566 +#include <qsnet/kernel.h>
17567 +#include <qsnet/autoconf.h>
17568 +
17569 +#include <elan/elanmod.h>
17570 +#include <elan3/elanregs.h>
17571 +#include <elan3/elandev.h>
17572 +#include <elan3/elanvp.h>
17573 +#include <elan3/elan3mmu.h>
17574 +#include <elan3/elanctxt.h>
17575 +#include <elan3/elandebug.h>
17576 +#include <elan3/elansyscall.h>
17577 +#include <elan/devinfo.h>
17578 +
17579 +static int       sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap);
17580 +static int       sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep);
17581 +static int       sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep);
17582 +static void      sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_uint32 value);
17583 +static void      sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr);
17584 +static void      sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *item);
17585 +static void      sys_freeWordItem (ELAN3_CTXT *ctxt, void *item);
17586 +static void      sys_freeBlockItem (ELAN3_CTXT *ctxt, void *item);
17587 +static int       sys_countItems (ELAN3_CTXT *ctxt, int list);
17588 +static int       sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
17589 +static void      sys_swapin (ELAN3_CTXT *ctxt);
17590 +static void      sys_swapout (ELAN3_CTXT *ctxt);
17591 +static void      sys_freePrivate (ELAN3_CTXT *ctxt);
17592 +static int       sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef);
17593 +static int       sys_startFaultCheck (ELAN3_CTXT *ctxt);
17594 +static void      sys_endFaultCheck (ELAN3_CTXT *ctxt);
17595 +static E3_uint8  sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr);
17596 +static void      sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
17597 +static E3_uint16 sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr);
17598 +static void      sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
17599 +static E3_uint32 sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr);
17600 +static void      sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
17601 +static E3_uint64 sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr);
17602 +static void      sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
17603 +
17604 +static ELAN3_OPS elan3_sys_ops = {
17605 +    ELAN3_OPS_VERSION,         /* Version */
17606 +
17607 +    sys_exception,             /* Exception */
17608 +    sys_getWordItem,           /* GetWordItem */
17609 +    sys_getBlockItem,          /* GetBlockItem */
17610 +    sys_putWordItem,           /* PutWordItem */
17611 +    sys_putBlockItem,          /* PutBlockItem */
17612 +    sys_putbackItem,           /* PutbackItem */
17613 +    sys_freeWordItem,          /* FreeWordItem */
17614 +    sys_freeBlockItem,         /* FreeBlockItem */
17615 +    sys_countItems,            /* CountItems */
17616 +    sys_event,                 /* Event */
17617 +    sys_swapin,                        /* Swapin */
17618 +    sys_swapout,               /* Swapout */
17619 +    sys_freePrivate,           /* FreePrivate */
17620 +    sys_fixupNetworkError,     /* FixupNetworkError */
17621 +    NULL,                      /* DProcTrap */
17622 +    NULL,                      /* TProcTrap */
17623 +    NULL,                      /* IProcTrap */
17624 +    NULL,                      /* CProcTrap */
17625 +    NULL,                      /* CProcReissue */
17626 +    sys_startFaultCheck,       /* StartFaultCheck */
17627 +    sys_endFaultCheck,          /* EndFaultCheck */
17628 +    sys_load8,                 /* Load8 */
17629 +    sys_store8,                        /* Store8 */
17630 +    sys_load16,                        /* Load16 */
17631 +    sys_store16,               /* Store16 */
17632 +    sys_load32,                        /* Load32 */
17633 +    sys_store32,               /* Store32 */
17634 +    sys_load64,                        /* Load64 */
17635 +    sys_store64                        /* Store64 */
17636 +};
17637 +
17638 +va_list null_valist;
17639 +
17640 +SYS_CTXT *
17641 +sys_init (ELAN3_CTXT *ctxt)
17642 +{
17643 +    SYS_CTXT *sctx;
17644 +
17645 +    /* Allocate and initialise the context private data */
17646 +    KMEM_ZALLOC (sctx, SYS_CTXT *, sizeof  (SYS_CTXT), TRUE);
17647 +
17648 +    if (sctx == NULL)
17649 +       return ((SYS_CTXT *) NULL);
17650 +
17651 +    sctx->Swap    = NULL;
17652 +    sctx->Armed   = 0;
17653 +    sctx->Backoff = 1;
17654 +    sctx->Table   = cookie_alloc_table ((unsigned long) ELAN3_MY_TASK_HANDLE(), 0);
17655 +    sctx->signal  = SIGSEGV;
17656 +
17657 +    if (sctx->Table == NULL)
17658 +    {
17659 +       KMEM_FREE (sctx, sizeof (SYS_CTXT));
17660 +       return ((SYS_CTXT *) NULL);
17661 +    }
17662 +
17663 +    kmutex_init  (&sctx->Lock);
17664 +    spin_lock_init (&sctx->WaitLock);
17665 +    kcondvar_init (&sctx->NetworkErrorWait);
17666 +    
17667 +    /* Install my context operations and private data */
17668 +    ctxt->Operations = &elan3_sys_ops;
17669 +    ctxt->Private    = (void *) sctx;
17670 +    
17671 +    return (sctx);
17672 +}
17673 +
17674 +/* returns -ve on error or ELAN_CAP_OK or ELAN_CAP_RMS */
17675 +/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */
17676 +int 
17677 +elan3_validate_cap(ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use)
17678 +{
17679 +     /* Don't allow a user process to attach to system context */
17680 +    if (ELAN3_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN3_SYSTEM_CONTEXT (cap->cap_highcontext)
17681 +       || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM  || cap->cap_highcontext <= ELAN_USER_BASE_CONTEXT_NUM)
17682 +    {
17683 +       PRINTF2 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: lctx %x hctx %x \n",cap->cap_lowcontext,  cap->cap_highcontext);
17684 +       PRINTF3 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: bit %x  low %x high %x\n", ((cap->cap_lowcontext) & SYS_CONTEXT_BIT),
17685 +                E3_NUM_CONTEXT_0, ELAN3_KCOMM_BASE_CONTEXT_NUM);
17686 +
17687 +
17688 +       PRINTF0 (DBG_DEVICE, DBG_VP,"elan3_validate_cap: user process cant attach to system cap\n");
17689 +       return (-EINVAL);
17690 +    }
17691
17692 +    if (cap->cap_type & ELAN_CAP_TYPE_HWTEST)
17693 +    {
17694 +       if (!(cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP)) /* cant have a bit map */
17695 +       {
17696 +           PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST must have ELAN_CAP_TYPE_NO_BITMAP\n");
17697 +           return (-EINVAL);
17698 +       }
17699 +       
17700 +       if (cap->cap_lowcontext != cap->cap_highcontext) 
17701 +       {
17702 +           PRINTF2 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST (cap->cap_lowcontext != cap->cap_highcontext) %d %d\n",cap->cap_lowcontext , cap->cap_highcontext) ;
17703 +           return (-EINVAL);
17704 +       }
17705 +       
17706 +       if ( ! (ELAN3_HWTEST_CONTEXT(cap->cap_lowcontext) && ELAN3_HWTEST_CONTEXT(cap->cap_highcontext)))
17707 +       {
17708 +           PRINTF3 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST HWTEST_BASE_CONTEXT %d %d %d \n" , ELAN3_HWTEST_BASE_CONTEXT_NUM,cap->cap_lowcontext ,ELAN3_HWTEST_TOP_CONTEXT_NUM);
17709 +           return (-EINVAL);
17710 +       }
17711 +       
17712 +       if (cap->cap_lownode != ELAN_CAP_UNINITIALISED || cap->cap_highnode != ELAN_CAP_UNINITIALISED)
17713 +       {
17714 +           PRINTF0 (DBG_DEVICE, DBG_VP, "elanmod_classify_cap: ELAN_CAP_TYPE_HWTEST nodes != ELAN_CAP_UNINITIALISED\n");
17715 +           return (-EINVAL);
17716 +       }
17717 +
17718 +       return ELAN_CAP_OK;
17719 +    }
17720 +
17721 +    return elanmod_classify_cap(&dev->Position, cap, use);
17722 +}
17723 +
17724 +int
17725 +sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event)
17726 +{
17727 +    SYS_CTXT    *sctx = (SYS_CTXT *) ctxt->Private;
17728 +    EVENT_COOKIE cookie;
17729 +
17730 +    if (ctxt->Device->Devinfo.dev_revision_id == PCI_REVISION_ID_ELAN3_REVA)
17731 +       return (EINVAL);
17732 +
17733 +    cookie = fuword ((int *) &event->ev_Type) & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY);
17734 +
17735 +    if (cookie_alloc_cookie (sctx->Table, cookie) != ESUCCESS)
17736 +       return (EINVAL);
17737 +
17738 +    cookie_arm_cookie (sctx->Table, cookie);
17739 +
17740 +    if (fuword ((int *) &event->ev_Count) > 0)
17741 +       cookie_wait_cookie (sctx->Table, cookie);
17742 +    
17743 +    cookie_free_cookie (sctx->Table, cookie);
17744 +    
17745 +    return (ESUCCESS);
17746 +}
17747 +
17748 +static void *
17749 +sys_getItem (SYS_SWAP_SPACE *sp, int list)
17750 +{
17751 +    void *itemp = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]);
17752 +    void *next;
17753 +    
17754 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_getItem: sp=%p list=%d head=%p itemp=%p\n",
17755 +            sp, list, &sp->ItemListsHead[list], itemp);
17756 +    
17757 +    if (itemp == NULL)
17758 +       return (NULL);
17759 +
17760 +    next = (void *) fuptr_noerr ((void *) itemp);
17761 +
17762 +    suptr_noerr ((void *) &sp->ItemListsHead[list], (void *) next);
17763 +    if (next == NULL)
17764 +       suptr_noerr ((void *) &sp->ItemListsTailp[list], (void *)&sp->ItemListsHead[list]);
17765 +    return (itemp);
17766 +}
17767 +
17768 +static void
17769 +sys_putItemBack (SYS_SWAP_SPACE *sp, int list, void *itemp)
17770 +{
17771 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemBack: sp=%p list=%d itemp=%p value=%08x\n",
17772 +            sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value));
17773 +
17774 +    suptr_noerr ((void **) itemp, NULL);                                                       /* item->Next = NULL */
17775 +    suptr_noerr ((void **) fuptr_noerr ((void **) &sp->ItemListsTailp[list]), (void *)itemp);  /* *Tailp = item */
17776 +    suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp);                         /* Tailp = &item->Next */
17777 +}
17778 +
17779 +static void
17780 +sys_putItemFront (SYS_SWAP_SPACE *sp, int list, void *itemp)
17781 +{
17782 +    PRINTF4 (DBG_DEVICE, DBG_SYSCALL, "sys_putItemFront: sp=%p list=%d itemp=%p value=%08x\n",
17783 +            sp, list, itemp, fuword_noerr ((int *) &((SYS_WORD_ITEM *) itemp)->Value));
17784 +
17785 +    suptr_noerr ((void **) itemp, fuptr_noerr ((void **) &sp->ItemListsHead[list]));           /* item->Next = Head */
17786 +    suptr_noerr ((void **) &sp->ItemListsHead[list], (void *) itemp);                          /* Head = item */
17787 +
17788 +    if (fuptr_noerr ((void **) &sp->ItemListsTailp[list]) == (void *) &sp->ItemListsHead[list])        /* if (Tailp == &Head) */
17789 +       suptr_noerr ((void **) &sp->ItemListsTailp[list], (void *) itemp);                      /*    Tailp = &Item->Next */
17790 +}
17791 +
17792 +
17793 +static int
17794 +sys_getWordItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep)
17795 +{
17796 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17797 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17798 +    SYS_WORD_ITEM  *item;
17799 +    int                    res;
17800 +    label_t        ljb;
17801 +
17802 +    kmutex_lock (&sctx->Lock);
17803 +    
17804 +    if (on_fault (&ljb))
17805 +    {
17806 +       no_fault();
17807 +       kmutex_unlock (&sctx->Lock);
17808 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17809 +       return (0);
17810 +    }
17811 +
17812 +    item = (SYS_WORD_ITEM *) sys_getItem (sp, list);
17813 +
17814 +    if (item == NULL)
17815 +       res = 0;
17816 +    else
17817 +    {
17818 +       if (list == LIST_DMA_PTR)
17819 +           sctx->Armed = TRUE;
17820 +
17821 +       *itemp  = (void *) item;
17822 +       *valuep = (E3_Addr) fuword_noerr ((E3_int32 *) &item->Value);
17823 +
17824 +       PRINTF3 (ctxt, DBG_SYSCALL, "sys_getWordItem: list=%d -> item=%p value=%08x\n", list, *itemp, *valuep);
17825 +
17826 +       res = 1;
17827 +    }
17828 +    
17829 +    no_fault();
17830 +    kmutex_unlock (&sctx->Lock);
17831 +
17832 +    return (res);
17833 +}
17834 +
17835 +static int
17836 +sys_getBlockItem (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep)
17837 +{
17838 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17839 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17840 +    SYS_BLOCK_ITEM *item;
17841 +    int                    res;
17842 +    label_t        ljb;
17843 +
17844 +    kmutex_lock (&sctx->Lock);
17845 +    
17846 +    if (on_fault (&ljb))
17847 +    {
17848 +       no_fault();
17849 +       kmutex_unlock (&sctx->Lock);
17850 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17851 +       return (0);
17852 +    }
17853 +
17854 +    item = sys_getItem (sp, list);
17855 +
17856 +    if (item == NULL)
17857 +       res = 0;
17858 +    else
17859 +    {
17860 +       E3_uint32 *dest = fuptr_noerr ((void **) &item->Pointer);
17861 +
17862 +       if (list == LIST_DMA_DESC)
17863 +           sctx->Armed = TRUE;
17864 +
17865 +       *itemp  = (void *) item;
17866 +       *valuep = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t) dest);
17867 +
17868 +       PRINTF3 (ctxt, DBG_SYSCALL, "sys_getBlockItem: list=%d -> item=%p addr=%08x\n", list, *itemp, *valuep);
17869 +       PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17870 +                fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), 
17871 +                fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3]));
17872 +       PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17873 +                fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]),
17874 +                fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7]));
17875 +
17876 +       
17877 +       res = 1;
17878 +    }
17879 +    
17880 +    no_fault();
17881 +    kmutex_unlock (&sctx->Lock);
17882 +
17883 +    return (res);
17884 +}
17885 +
17886 +static void
17887 +sys_putWordItem (ELAN3_CTXT *ctxt, int list, E3_Addr value)
17888 +{
17889 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17890 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17891 +    SYS_WORD_ITEM  *item;
17892 +    label_t        ljp;
17893 +
17894 +    kmutex_lock (&sctx->Lock);
17895 +
17896 +    PRINTF2 (ctxt,DBG_SYSCALL, "sys_putWordItem: list=%x value=%x\n", list, value);
17897 +
17898 +    if (on_fault (&ljp))
17899 +    {
17900 +       no_fault();
17901 +       kmutex_unlock (&sctx->Lock);
17902 +       
17903 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17904 +       return;
17905 +    }
17906 +
17907 +    item = sys_getItem (sp, LIST_FREE_WORD);
17908 +
17909 +    PRINTF1 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p\n", item);
17910 +
17911 +    if (item == NULL)
17912 +    {
17913 +       no_fault();
17914 +       kmutex_unlock (&sctx->Lock);
17915 +       
17916 +       sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist);
17917 +       return;
17918 +    }
17919 +    
17920 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: storing value=%08x at %p\n", value, &item->Value);
17921 +
17922 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putWordItem: item=%p value=%08x\n", item, value);
17923 +
17924 +    suword_noerr ((E3_int32 *) &item->Value, value);                                           /* write "value" into item */
17925 +
17926 +    sys_putItemBack (sp, list, item);
17927 +
17928 +    no_fault();
17929 +    kmutex_unlock (&sctx->Lock);
17930 +}
17931 +
17932 +static void
17933 +sys_putBlockItem (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr)
17934 +{
17935 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17936 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
17937 +    SYS_BLOCK_ITEM *item;
17938 +    label_t        ljp;
17939 +    E3_uint32      *source;
17940 +    E3_uint32      *dest;
17941 +
17942 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: list=%x ptr=%p\n", list, ptr);
17943 +
17944 +    kmutex_lock (&sctx->Lock);
17945 +    
17946 +    if (on_fault (&ljp))
17947 +    {
17948 +       no_fault();
17949 +       kmutex_unlock (&sctx->Lock);
17950 +       
17951 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
17952 +       return;
17953 +    }
17954 +
17955 +    item = sys_getItem (sp, LIST_FREE_BLOCK);                  /* get an item from the freelist. */
17956 +
17957 +    if (item == NULL)
17958 +    {
17959 +       no_fault();
17960 +       kmutex_unlock (&sctx->Lock);
17961 +       
17962 +       sys_exception (ctxt, EXCEPTION_SWAP_FAILED, list, (void *) NULL, null_valist);
17963 +       return;
17964 +    }
17965 +
17966 +    /*
17967 +     * The block will have been read using 64 bit reads,  since we have
17968 +     * to write it to user memory using 32 bit writes, we need to perform
17969 +     * an endian swap on the Ultrasparc.
17970 +     */
17971 +    dest   = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer);
17972 +    source = (E3_uint32 *) ptr;
17973 +
17974 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_putBlockItem: item=%p dest=%p\n",item, dest);
17975 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17976 +           source[0^WordEndianFlip], source[1^WordEndianFlip], source[2^WordEndianFlip], source[3^WordEndianFlip]);
17977 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
17978 +            source[4^WordEndianFlip], source[5^WordEndianFlip], source[6^WordEndianFlip], source[7^WordEndianFlip]);
17979 +
17980 +    suword_noerr ((E3_int32 *) &dest[7], (E3_int32) source[7^WordEndianFlip]);
17981 +    suword_noerr ((E3_int32 *) &dest[6], (E3_int32) source[6^WordEndianFlip]);
17982 +    suword_noerr ((E3_int32 *) &dest[5], (E3_int32) source[5^WordEndianFlip]);
17983 +    suword_noerr ((E3_int32 *) &dest[4], (E3_int32) source[4^WordEndianFlip]);
17984 +    suword_noerr ((E3_int32 *) &dest[3], (E3_int32) source[3^WordEndianFlip]);
17985 +    suword_noerr ((E3_int32 *) &dest[2], (E3_int32) source[2^WordEndianFlip]);
17986 +    suword_noerr ((E3_int32 *) &dest[1], (E3_int32) source[1^WordEndianFlip]);
17987 +    suword_noerr ((E3_int32 *) &dest[0], (E3_int32) source[0^WordEndianFlip]);
17988 +
17989 +    sys_putItemBack (sp, list, item);                          /* chain onto list of items. */
17990 +
17991 +    no_fault();
17992 +    kmutex_unlock (&sctx->Lock);
17993 +}
17994 +
17995 +static void
17996 +sys_freeWordItem (ELAN3_CTXT *ctxt, void *itemp)
17997 +{
17998 +    SYS_CTXT      *sctx = (SYS_CTXT *) ctxt->Private;
17999 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
18000 +    label_t        ljp;
18001 +
18002 +    kmutex_lock (&sctx->Lock);
18003 +    
18004 +    if (on_fault (&ljp))
18005 +    {
18006 +       no_fault();
18007 +       kmutex_unlock (&sctx->Lock);
18008 +       
18009 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_WORD, (void *) NULL, null_valist);
18010 +       return;
18011 +    }
18012 +
18013 +    sys_putItemBack (sp, LIST_FREE_WORD, itemp);
18014 +
18015 +    no_fault();
18016 +    kmutex_unlock (&sctx->Lock);
18017 +}
18018 +
18019 +static void
18020 +sys_freeBlockItem (ELAN3_CTXT *ctxt, void *itemp)
18021 +{
18022 +    SYS_CTXT       *sctx = (SYS_CTXT *) ctxt->Private;
18023 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
18024 +    SYS_BLOCK_ITEM *item = (SYS_BLOCK_ITEM *)itemp;
18025 +    E3_uint32      *dest;
18026 +    label_t        ljp;
18027 +
18028 +    kmutex_lock (&sctx->Lock);
18029 +    
18030 +    if (on_fault (&ljp))
18031 +    {
18032 +       no_fault();
18033 +       kmutex_unlock (&sctx->Lock);
18034 +       
18035 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, LIST_FREE_BLOCK, (void *) NULL, null_valist);
18036 +       return;
18037 +    }
18038 +#ifdef DEBUG_PRINTF
18039 +    dest = (E3_uint32 *) fuptr_noerr ((void **) &item->Pointer);
18040 +
18041 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_freeBlockItem: item=%p dest=%p\n", item, dest);
18042 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
18043 +            fuword_noerr ((int *) &dest[0]), fuword_noerr ((int *) &dest[1]), 
18044 +            fuword_noerr ((int *) &dest[2]), fuword_noerr ((int *) &dest[3]));
18045 +    PRINTF4 (ctxt, DBG_SYSCALL, "                  %08x %08x %08x %08x\n",
18046 +            fuword_noerr ((int *) &dest[4]), fuword_noerr ((int *) &dest[5]),
18047 +            fuword_noerr ((int *) &dest[6]), fuword_noerr ((int *) &dest[7]));
18048 +#endif
18049 +
18050 +    sys_putItemBack (sp, LIST_FREE_BLOCK, itemp);
18051 +
18052 +    no_fault();
18053 +    kmutex_unlock (&sctx->Lock);
18054 +}
18055 +
18056 +static void
18057 +sys_putbackItem (ELAN3_CTXT *ctxt, int list, void *itemp)
18058 +{
18059 +    SYS_CTXT       *sctx = (SYS_CTXT *) ctxt->Private;
18060 +    SYS_SWAP_SPACE *sp   = sctx->Swap;
18061 +    label_t        ljp;
18062 +
18063 +    kmutex_lock (&sctx->Lock);
18064 +    
18065 +    if (on_fault (&ljp))
18066 +    {
18067 +       no_fault();
18068 +       kmutex_unlock (&sctx->Lock);
18069 +       
18070 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
18071 +       return;
18072 +    }
18073 +
18074 +    sys_putItemFront (sp, list, itemp);
18075 +
18076 +    no_fault();
18077 +    kmutex_unlock (&sctx->Lock);
18078 +}
18079 +
18080 +static int
18081 +sys_countItems (ELAN3_CTXT *ctxt, int list)
18082 +{
18083 +    SYS_CTXT      *sctx  = (SYS_CTXT *) ctxt->Private;
18084 +    SYS_SWAP_SPACE *sp    = sctx->Swap;
18085 +    int                    count = 0;
18086 +    void          *item;
18087 +    label_t        ljb;
18088 +
18089 +    kmutex_lock (&sctx->Lock);
18090 +    
18091 +    if (on_fault (&ljb))
18092 +    {
18093 +       no_fault();
18094 +       kmutex_unlock (&sctx->Lock);
18095 +       sys_exception (ctxt, EXCEPTION_SWAP_FAULT, list, (void *) NULL, null_valist);
18096 +       return (0);
18097 +    }
18098 +
18099 +    for (item = (void *) fuptr_noerr ((void **) &sp->ItemListsHead[list]); 
18100 +        item != NULL;
18101 +        item = (void *) fuptr_noerr ((void **) item))
18102 +    {
18103 +       count++;
18104 +    }
18105 +
18106 +    no_fault();
18107 +    kmutex_unlock (&sctx->Lock);
18108 +
18109 +    return (count);
18110 +}
18111 +
18112 +
18113 +long sys_longTime;
18114 +long sys_shortTime;
18115 +int  sys_waitTicks;
18116 +int  sys_maxBackoff;
18117 +
18118 +#define SYS_LONG_TIME          MAX((hz * 5) / 1000, 1)         /* 5 ms */
18119 +#define SYS_SHORT_TIME         MAX((hz * 2) / 1000, 1)         /* 2 ms */
18120 +#define SYS_WAIT_TICKS         MAX((hz * 1) / 1000, 1)         /* 1 ms  - backoff granularity */
18121 +#define SYS_MAX_BACKOFF                MAX((hz * 5) / 1000, 1)         /* 5 ms  - max backoff for "nacked" packets*/
18122 +#define SYS_TIMEOUT_BACKOFF    MAX((hz * 10) / 1000, 1)        /* 10 ms - backoff for output timeout (point to point) */
18123 +#define SYS_BCAST_BACKOFF      MAX((hz * 50) / 1000, 1)        /* 50 ms - backoff for output timeout (broadcast) */
18124 +#define SYS_NETERR_BACKOFF     MAX((hz * 10) / 1000, 1)        /* 10 ms - delay for network error in dma data */
18125 +
18126 +static void
18127 +sys_backoffWait (ELAN3_CTXT *ctxt, int ticks)
18128 +{
18129 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
18130 +    long      t;
18131 +
18132 +    spin_lock (&sctx->WaitLock);
18133 +
18134 +    t = lbolt - sctx->Time;
18135 +
18136 +    if (sys_longTime   == 0) sys_longTime   = SYS_LONG_TIME;
18137 +    if (sys_shortTime  == 0) sys_shortTime  = SYS_SHORT_TIME;
18138 +    if (sys_waitTicks  == 0) sys_waitTicks  = SYS_WAIT_TICKS;
18139 +    if (sys_maxBackoff == 0) sys_maxBackoff = SYS_MAX_BACKOFF;
18140 +
18141 +    if (t > sys_longTime)                                      /* It's a long time since the last trap */
18142 +       sctx->Backoff = 0;                                      /* so set the backoff back down to 0 */
18143 +
18144 +    if (ticks)
18145 +    {
18146 +       PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d ticks [%lx]\n", ticks, t);
18147 +       kcondvar_timedwait (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + ticks);
18148 +    }
18149 +    else if (sctx->Armed)
18150 +    {
18151 +       if (t < sys_shortTime)                                  /* It's been a short time since the last */
18152 +       {                                                       /* trap, so increase the backoff */
18153 +           sctx->Backoff++;
18154 +           
18155 +           if (sctx->Backoff > sys_maxBackoff)
18156 +               sctx->Backoff = sys_maxBackoff;
18157 +       }
18158 +
18159 +       PRINTF2 (ctxt, DBG_DPROC, "sys_backoffWait : Waiting - %d [%lx]\n", sctx->Backoff, t);
18160 +
18161 +       if (sctx->Backoff)
18162 +           kcondvar_timedwaitsig (&sctx->NetworkErrorWait, &sctx->WaitLock, NULL, lbolt + sctx->Backoff * sys_waitTicks);
18163 +
18164 +       sctx->Armed = 0;
18165 +    }
18166 +    else
18167 +    {
18168 +       PRINTF1 (ctxt, DBG_DPROC, "sys_backoffWait : Not Waiting - %d\n", sctx->Backoff);
18169 +
18170 +    }
18171 +    sctx->Time = lbolt;
18172 +
18173 +    spin_unlock (&sctx->WaitLock);
18174 +}
18175 +
18176 +static int
18177 +trapSize (int proc)
18178 +{
18179 +    switch (proc)
18180 +    {
18181 +    case DMA_PROC:     return (sizeof (DMA_TRAP));
18182 +    case THREAD_PROC:  return (sizeof (THREAD_TRAP));
18183 +    case COMMAND_PROC: return (sizeof (COMMAND_TRAP));
18184 +    case INPUT_PROC:   return (sizeof (INPUT_TRAP));
18185 +    default:           return (0);
18186 +    }
18187 +}
18188 +
18189 +static int
18190 +sys_exception (ELAN3_CTXT *ctxt, int type, int proc, void *trapp, va_list ap)
18191 +{
18192 +    SYS_CTXT *sctx  = (SYS_CTXT *) ctxt->Private;
18193 +    int              res;
18194 +
18195 +    PRINTF2 (ctxt, DBG_SYSCALL, "sys_exception: type %d proc %d\n", type, proc);
18196 +
18197 +    switch (type)
18198 +    {
18199 +    case EXCEPTION_INVALID_ADDR:
18200 +    {
18201 +       E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *);
18202 +       int              res       = va_arg (ap, int);
18203 +       
18204 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, res, 0);
18205 +       break;
18206 +    }
18207 +    
18208 +    case EXCEPTION_UNIMP_INSTR:
18209 +    {
18210 +       E3_uint32 instr = va_arg (ap, E3_uint32);
18211 +       
18212 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, instr);
18213 +       break;
18214 +    }
18215 +    
18216 +    case EXCEPTION_INVALID_PROCESS:
18217 +    {
18218 +       E3_uint32 vproc = va_arg (ap, E3_uint32);
18219 +       int       res  = va_arg (ap, int);
18220 +
18221 +       switch (proc)
18222 +       {
18223 +       case DMA_PROC:
18224 +           if (sctx->Flags & ELAN3_SYS_FLAG_DMA_BADVP)
18225 +           {
18226 +               DMA_TRAP *trap = (DMA_TRAP *) trapp;
18227 +
18228 +               if (trap->Desc.s.dma_direction != DMA_WRITE)
18229 +                   trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent;
18230 +
18231 +               trap->Desc.s.dma_direction       = DMA_WRITE;
18232 +               trap->Desc.s.dma_size            = 0;
18233 +               trap->Desc.s.dma_source          = (E3_Addr) 0;
18234 +               trap->Desc.s.dma_dest            = (E3_Addr) 0;
18235 +               trap->Desc.s.dma_destEvent       = (E3_Addr) 0;
18236 +               trap->Desc.s.dma_destCookieVProc = 0;
18237 +               trap->Desc.s.dma_srcCookieVProc  = 0;
18238 +               
18239 +               return (OP_IGNORE);
18240 +           }
18241 +           break;
18242 +
18243 +       case THREAD_PROC:
18244 +           if (sctx->Flags & ELAN3_SYS_FLAG_THREAD_BADVP)
18245 +           {
18246 +               THREAD_TRAP *trap = (THREAD_TRAP *) trapp;
18247 +
18248 +               trap->TrapBits.s.PacketAckValue = E3_PAckError;
18249 +               
18250 +               return (OP_IGNORE);
18251 +           }
18252 +           break;
18253 +       }
18254 +           
18255 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, vproc);
18256 +       break;
18257 +    }
18258 +    
18259 +    case EXCEPTION_FAULTED:
18260 +    {
18261 +       E3_Addr addr = va_arg (ap, E3_Addr);
18262 +
18263 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr);
18264 +       break;
18265 +    }
18266 +    
18267 +    case EXCEPTION_QUEUE_OVERFLOW:
18268 +    {
18269 +       E3_FaultSave_BE *faultSave = va_arg (ap, E3_FaultSave_BE *);
18270 +       int              trapType  = va_arg (ap, int);
18271 +       
18272 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), faultSave, 0, trapType);
18273 +       break;
18274 +    }
18275 +    
18276 +    case EXCEPTION_COMMAND_OVERFLOW:
18277 +    {
18278 +       int count = va_arg (ap, int);
18279 +       
18280 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, count);
18281 +       break;
18282 +    }
18283 +    
18284 +    case EXCEPTION_CHAINED_EVENT:
18285 +    {
18286 +       E3_Addr addr = va_arg (ap, E3_Addr);
18287 +       
18288 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, addr);
18289 +       break;
18290 +    }
18291 +    
18292 +    case EXCEPTION_DMA_RETRY_FAIL:
18293 +    case EXCEPTION_PACKET_TIMEOUT:
18294 +       if (proc != DMA_PROC)
18295 +           sys_backoffWait (ctxt, SYS_TIMEOUT_BACKOFF);
18296 +       else
18297 +       {
18298 +           DMA_TRAP *trap = (DMA_TRAP *) trapp;
18299 +           
18300 +           if (sctx->Flags & ELAN3_SYS_FLAG_DMAFAIL)
18301 +           {
18302 +               E3_BlockCopyEvent *event;
18303 +
18304 +               if (trap->Desc.s.dma_direction != DMA_WRITE)
18305 +                   trap->Desc.s.dma_srcEvent = trap->Desc.s.dma_destEvent;
18306 +
18307 +               /* change the source word to be E3_EVENT_FAILED */
18308 +               if ((event = (E3_BlockCopyEvent *) elan3mmu_mainaddr (ctxt->Elan3mmu, trap->Desc.s.dma_srcEvent)) == NULL)
18309 +               {
18310 +                   sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0);
18311 +                   break;
18312 +               }
18313 +
18314 +               suword (&event->ev_Source, E3_EVENT_FAILED);
18315 +               wmb(); mmiob();
18316 +               
18317 +               trap->Desc.s.dma_direction       = DMA_WRITE;
18318 +               trap->Desc.s.dma_size            = 0;
18319 +               trap->Desc.s.dma_source          = (E3_Addr) 0;
18320 +               trap->Desc.s.dma_dest            = (E3_Addr) 0;
18321 +               trap->Desc.s.dma_destEvent       = (E3_Addr) 0;
18322 +               trap->Desc.s.dma_destCookieVProc = 0;
18323 +               trap->Desc.s.dma_srcCookieVProc  = 0;
18324 +               
18325 +               return (OP_IGNORE);
18326 +           }
18327 +
18328 +           if (type == EXCEPTION_DMA_RETRY_FAIL)
18329 +               sys_backoffWait (ctxt, 0);
18330 +           else
18331 +           {
18332 +               ELAN_LOCATION location;
18333 +
18334 +               krwlock_read (&ctxt->VpLock);
18335 +               location = ProcessToLocation (ctxt, NULL, trap->Desc.s.dma_direction == DMA_WRITE ? 
18336 +                                             trap->Desc.s.dma_destVProc : trap->Desc.s.dma_srcVProc, NULL);
18337 +               krwlock_done (&ctxt->VpLock);
18338 +               
18339 +               sys_backoffWait (ctxt, location.loc_node == ELAN3_INVALID_NODE ? SYS_BCAST_BACKOFF : SYS_TIMEOUT_BACKOFF);
18340 +           }
18341 +       }
18342 +       return (OP_IGNORE);
18343 +       
18344 +    case EXCEPTION_NETWORK_ERROR:
18345 +    {
18346 +       INPUT_TRAP       *trap  = (INPUT_TRAP *) trapp;
18347 +       NETERR_RESOLVER **rvpp  = va_arg (ap, NETERR_RESOLVER **);
18348 +
18349 +       ASSERT (trap->State == CTXT_STATE_NETWORK_ERROR);
18350 +
18351 +       if (! (sctx->Flags & ELAN3_SYS_FLAG_NETERR) && (trap->DmaIdentifyTransaction || trap->ThreadIdentifyTransaction))
18352 +       {
18353 +           if ((*rvpp) != (NETERR_RESOLVER *) NULL)
18354 +               res = (*rvpp)->Status;
18355 +           else if ((res = QueueNetworkErrorResolver (ctxt, trap, rvpp)) == ESUCCESS)
18356 +           {
18357 +               /* Successfully queued the network error resolver */
18358 +               return (OP_HANDLED);
18359 +           }
18360 +
18361 +           /* network error resolution has failed - either a bad cookie or */
18362 +           /* an rpc error has occured */
18363 +           sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, res, 0);
18364 +       }
18365 +       else
18366 +       {
18367 +           /* Must be an overlaped dma packet. Must wait long enough to
18368 +            * ensure that the sending dma'er has tried to send the next
18369 +            * packet and had it discarded. In the real world this should
18370 +            * be greater than an output timeout. (About 8mSec) */
18371 +           
18372 +           sys_backoffWait (ctxt, SYS_NETERR_BACKOFF);
18373 +           
18374 +           /* set this inputter state to be ok, since we've been called 
18375 +            * by the lwp it will lower the context filter for us, so 
18376 +            * re-enabling the inputter,  note we don't need to execute
18377 +            * any of the packet since the dma process will re-transmit
18378 +            * it after receiving a nack for the next packet */
18379 +           trap->State = CTXT_STATE_OK;
18380 +           
18381 +           return (OP_HANDLED);
18382 +       }
18383 +       break;
18384 +    }
18385 +    
18386 +    default:
18387 +       sys_addException (sctx, type, proc, trapp, trapSize(proc), NULL, 0, 0);
18388 +       break;
18389 +    }
18390 +    
18391 +    if (type != EXCEPTION_DEBUG)
18392 +#ifdef LINUX
18393 +#ifdef NO_NPTL
18394 +       psignal (CURPROC()->p_opptr, sctx->signal);
18395 +#else
18396 +       psignal (CURPROC()->parent, sctx->signal);
18397 +#endif
18398 +#else
18399 +       psignal (CURPROC(), sctx->signal);
18400 +#endif
18401 +    return (OP_HANDLED);
18402 +}
18403 +
18404 +static int
18405 +sys_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag)
18406 +{
18407 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
18408 +
18409 +    cookie_fire_cookie (sctx->Table, cookie);
18410 +
18411 +    return (OP_HANDLED); 
18412 +}
18413 +
18414 +static void
18415 +sys_swapin (ELAN3_CTXT *ctxt)
18416 +{
18417 +    PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapin\n");
18418 +}
18419 +
18420 +static void
18421 +sys_swapout (ELAN3_CTXT *ctxt)
18422 +{
18423 +    PRINTF0 (ctxt, DBG_SYSCALL, "sys_swapout\n");
18424 +}
18425 +
18426 +static void
18427 +sys_freePrivate (ELAN3_CTXT *ctxt)
18428 +{
18429 +    SYS_CTXT *sctx = (SYS_CTXT *) ctxt->Private;
18430 +
18431 +    cookie_free_table (sctx->Table);
18432 +
18433 +    kmutex_destroy (&sctx->Lock);
18434 +    spin_lock_destroy (&sctx->WaitLock);
18435 +    kcondvar_destroy (&sctx->NetworkErrorWait);
18436 +
18437 +    KMEM_FREE (sctx, sizeof (SYS_CTXT));
18438 +    ctxt->Private = NULL;
18439 +}
18440 +
18441 +static int
18442 +sys_checkThisDma (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, E3_DMA *dma)
18443 +{
18444 +    E3_DmaType type;
18445 +    E3_uint32  cookie;
18446 +    E3_uint32  cvproc;
18447 +    int               ignore;
18448 +    int               match;
18449 +
18450 +    type.type = fuword_noerr ((int *) &dma->dma_type);
18451 +
18452 +    if (type.s.direction == DMA_WRITE)
18453 +    {
18454 +       cookie = fuword_noerr ((int *) &dma->dma_srcCookieVProc);
18455 +       cvproc = fuword_noerr ((int *) &dma->dma_destCookieVProc);
18456 +    }
18457 +    else
18458 +    {
18459 +       cookie = fuword_noerr ((int *) &dma->dma_destCookieVProc);
18460 +       cvproc = fuword_noerr ((int *) &dma->dma_srcCookieVProc);
18461 +    }
18462 +
18463 +    PRINTF5 (ctxt, DBG_NETERR, "sys_checkThisDma: dir = %d cookie = %08x cvproc = %08x CookieVProc %08x DstProcess %04x\n",
18464 +            type.s.direction, cookie, cvproc, nef->Message.CookieVProc, nef->Message.DstProcess);
18465 +
18466 +    /* A DMA matches a network errror fixup if it's going to the right place (or is a broadcast)
18467 +     * and the approriate cookie matches, except that we ignore DMA's which don't have a destEvent
18468 +     * since they don't have any atomic behaviour (though they still send the identify) */
18469 +
18470 +    ignore = (type.s.direction == DMA_WRITE && cookie == 0 &&
18471 +             fuword_noerr ((int *) &dma->dma_destEvent) == 0);
18472 +    match  = (nef->Message.CookieVProc == cookie &&
18473 +             (nef->Message.DstProcess == (cvproc & DMA_PROCESS_MASK) || nef->Message.WaitForEop));
18474 +
18475 +    PRINTF2 (ctxt, DBG_NETERR, "  -> %s %s\n", ignore ? "ignore" : match ? "matched" : "not-matched", nef->Message.WaitForEop ? "wait for eop" : "");
18476 +
18477 +    if (match && !ignore && !nef->Message.WaitForEop)
18478 +    {
18479 +       PRINTF0 (ctxt, DBG_NETERR, "sys_checkThisDma: nuking the dma\n");
18480 +
18481 +       /* NOTE - we access the dma descriptor backwards since it could exist in sdram */
18482 +       if (type.s.direction != DMA_WRITE)
18483 +           suword_noerr ((int *) &dma->dma_srcEvent, 0);
18484 +
18485 +       suword_noerr ((int *) &dma->dma_destEvent, 0);
18486 +       suword_noerr ((int *) &dma->dma_dest,      0);
18487 +       suword_noerr ((int *) &dma->dma_source,    0);
18488 +       suword_noerr ((int *) &dma->dma_size,      0);
18489 +
18490 +       if (type.s.direction != DMA_WRITE)
18491 +           suword_noerr ((int *) &dma->dma_type, fuword_noerr ((int *) &dma->dma_type) & E3_DMA_CONTEXT_MASK);
18492 +
18493 +       wmb(); mmiob();
18494 +    }
18495 +
18496 +    return (match && !ignore);
18497 +}
18498 +
18499 +static int
18500 +sys_fixupNetworkError (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef)
18501 +{
18502 +    SYS_CTXT       *sctx    = (SYS_CTXT *) ctxt->Private;
18503 +    SYS_SWAP_SPACE *sp      = sctx->Swap;
18504 +    int                    matched = 0;
18505 +    SYS_WORD_ITEM  *wordp;
18506 +    SYS_BLOCK_ITEM *blockp;
18507 +    label_t        ljb;
18508 +    int                    res;
18509 +
18510 +    PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError %08x %08x %08x\n", 
18511 +            nef->Message.CookieAddr, nef->Message.CookieVProc, nef->Message.NextCookie);
18512 +
18513 +    if (nef->Message.CookieAddr == (E3_Addr) 0)                        /* It's a DMA which requires fixing up */
18514 +    {
18515 +       kmutex_lock (&sctx->Lock);
18516 +
18517 +       if (on_fault (&ljb))
18518 +           res = EFAULT;
18519 +       else
18520 +       {
18521 +           /* scan the dma ptr list */
18522 +           for (wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_PTR]);
18523 +                wordp != NULL; 
18524 +                wordp = (SYS_WORD_ITEM *) fuptr_noerr ((void **) &wordp->Next))
18525 +           {
18526 +               E3_uint32 value = fuword_noerr ((int *) &wordp->Value);
18527 +               E3_DMA    *dma  = (E3_DMA *) elan3mmu_mainaddr (ctxt->Elan3mmu, value);
18528 +
18529 +               PRINTF3 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Value %08x dma %p\n", wordp, value, dma);
18530 +
18531 +               matched += sys_checkThisDma (ctxt, nef, dma);
18532 +           }
18533 +       
18534 +           /* scan the dma desc list */
18535 +           for (blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &sp->ItemListsHead[LIST_DMA_DESC]);
18536 +                blockp != NULL; 
18537 +                blockp = (SYS_BLOCK_ITEM *) fuptr_noerr ((void **) &blockp->Next))
18538 +           {
18539 +               E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &blockp->Pointer);
18540 +               
18541 +               PRINTF2 (ctxt, DBG_NETERR, "sys_fixupnetworkError: check block item %p Pointer %p\n", blockp, dma);
18542 +
18543 +               matched += sys_checkThisDma (ctxt, nef, dma);
18544 +           }
18545 +           
18546 +           /* If we've still not found it, then check the command port item */
18547 +           /* it MUST be present as a command waiting to be executed, as */
18548 +           /* otherwise it could have already happened and we will claim to */
18549 +           /* have found it, but not realy */
18550 +           if (ctxt->CommandPortItem != NULL)
18551 +           {
18552 +               E3_DMA *dma = (E3_DMA *) fuptr_noerr ((void *) &((SYS_BLOCK_ITEM *) ctxt->CommandPortItem)->Pointer);
18553 +
18554 +               if (sys_checkThisDma (ctxt, nef, dma))
18555 +               {
18556 +                   printk ("!!! it's the command port item - need to ensure that the command exists\n");
18557 +                   matched++;
18558 +               }
18559 +           }
18560 +
18561 +           res = matched ? ESUCCESS : ESRCH;
18562 +       }
18563 +       no_fault();
18564 +       kmutex_unlock (&sctx->Lock);
18565 +
18566 +       if (matched > 1)
18567 +           ElanException (ctxt, EXCEPTION_COOKIE_ERROR, DMA_PROC, NULL, NULL, nef->Message.CookieVProc);
18568 +    }
18569 +    else                                                       /* It's a thread which requires fixing up */
18570 +    {
18571 +       E3_int32  *cookiePtr = (E3_int32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, nef->Message.CookieAddr);
18572 +       E3_uint32  curval    = fuword_noerr (cookiePtr);
18573 +
18574 +       if (curval == nef->Message.CookieVProc)         /* thread doesn't think it's been done */
18575 +       {
18576 +           if (! nef->Message.WaitForEop)
18577 +           {
18578 +               suword_noerr (cookiePtr, nef->Message.NextCookie);
18579 +               mb(); mmiob();
18580 +           }
18581 +           
18582 +           res = ESUCCESS;
18583 +       }
18584 +       else                                                    /* thread thinks that it's been executed */
18585 +       {
18586 +           res = ESRCH;
18587 +       }
18588 +    }
18589 +    
18590 +    CompleteNetworkErrorFixup (ctxt, nef, res);
18591 +
18592 +    return (OP_HANDLED);
18593 +}
18594 +
18595 +
18596 +static int
18597 +sys_startFaultCheck (ELAN3_CTXT *ctxt)
18598 +{
18599 +    return (0);
18600 +}
18601 +
18602 +static void
18603 +sys_endFaultCheck (ELAN3_CTXT *ctxt)
18604 +{
18605 +    wmb();
18606 +}
18607 +
18608 +static E3_uint8
18609 +sys_load8 (ELAN3_CTXT *ctxt, E3_Addr addr)
18610 +{
18611 +    E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18612 +
18613 +    return (fubyte_noerr (maddr));
18614 +}
18615 +
18616 +static void
18617 +sys_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val)
18618 +{
18619 +    E3_uint8 *maddr = (E3_uint8 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18620 +
18621 +    subyte_noerr (maddr, val);
18622 +    wmb(); mmiob();
18623 +}
18624 +
18625 +static E3_uint16
18626 +sys_load16 (ELAN3_CTXT *ctxt, E3_Addr addr)
18627 +{
18628 +    E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18629 +
18630 +    return (fusword_noerr (maddr));
18631 +}
18632 +
18633 +static void
18634 +sys_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val)
18635 +{
18636 +    E3_uint16 *maddr = (E3_uint16 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18637 +
18638 +    susword_noerr (maddr, val);
18639 +    wmb(); mmiob();
18640 +}
18641 +
18642 +static E3_uint32
18643 +sys_load32 (ELAN3_CTXT *ctxt, E3_Addr addr)
18644 +{
18645 +    E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18646 +
18647 +    return (fuword_noerr (maddr));
18648 +}
18649 +
18650 +static void
18651 +sys_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val)
18652 +{
18653 +    E3_uint32 *maddr = (E3_uint32 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18654 +
18655 +    suword_noerr (maddr, val);
18656 +    wmb(); mmiob();
18657 +}
18658 +
18659 +static E3_uint64
18660 +sys_load64 (ELAN3_CTXT *ctxt, E3_Addr addr)
18661 +{
18662 +    E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18663 +
18664 +    return (fulonglong_noerr ((long long *) maddr));
18665 +}
18666 +
18667 +static void
18668 +sys_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val)
18669 +{
18670 +    E3_uint64 *maddr = (E3_uint64 *) elan3mmu_mainaddr (ctxt->Elan3mmu, addr);
18671 +
18672 +    sulonglong_noerr ((long long *) maddr, val);
18673 +    wmb(); mmiob();
18674 +}
18675 +
18676 +
18677 +void
18678 +sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t trapp, int size,
18679 +                 E3_FaultSave_BE *faultSave, u_long res, u_long value)
18680 +{
18681 +    SYS_EXCEPTION      *ex_ptr;
18682 +    int                        front;
18683 +    int                        back;
18684 +    int                        count;
18685 +    label_t            ljp;
18686 +
18687 +    PRINTF4 (DBG_DEVICE, DBG_FN, "sys_addException: type %d proc %d res %ld value %ld\n",
18688 +            type, proc, res, value);
18689 +
18690 +    KMEM_ZALLOC (ex_ptr, SYS_EXCEPTION *, sizeof  (SYS_EXCEPTION), TRUE);
18691 +
18692 +    if (ex_ptr != NULL)
18693 +    {
18694 +       bzero ((caddr_t) ex_ptr, sizeof (SYS_EXCEPTION));
18695 +
18696 +       ex_ptr->Type  = type;
18697 +       ex_ptr->Proc  = proc;
18698 +       ex_ptr->Res   = res;
18699 +       ex_ptr->Value = value;
18700 +       
18701 +       if (trapp && size)
18702 +           bcopy (trapp, (caddr_t) &ex_ptr->Union, size);
18703 +       if (faultSave)
18704 +           bcopy ((caddr_t) faultSave, (caddr_t) &ex_ptr->FaultArea, sizeof (E3_FaultSave_BE));
18705 +    }
18706 +
18707 +    kmutex_lock (&sctx->Lock);
18708 +    if (! on_fault (&ljp))
18709 +    {
18710 +       front = fuword_noerr (&sctx->Exceptions->Front);
18711 +       back  = fuword_noerr (&sctx->Exceptions->Back);
18712 +       count = fuword_noerr (&sctx->Exceptions->Count);
18713 +
18714 +       if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count)
18715 +           suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1);
18716 +       else if (((front+1) % count ) == back)
18717 +           suword_noerr (&sctx->Exceptions->Overflow, fuword_noerr (&sctx->Exceptions->Overflow) + 1);
18718 +       else
18719 +       {
18720 +           if (ex_ptr != NULL)
18721 +               copyout_noerr ((caddr_t) ex_ptr, (caddr_t) &sctx->Exceptions->Exceptions[front], sizeof (SYS_EXCEPTION));
18722 +           else
18723 +           {
18724 +               suword_noerr (&sctx->Exceptions->Exceptions[front].Type, EXCEPTION_ENOMEM);
18725 +               suword_noerr (&sctx->Exceptions->Exceptions[front].Proc, 0);
18726 +           }
18727 +           suword_noerr (&sctx->Exceptions->Front, (front + 1) % count);
18728 +       }
18729 +
18730 +       /* always reset the magic number in case it's been overwritten */
18731 +       /* so that 'edb' can find the exception page in the core file */
18732 +       suword_noerr (&sctx->Exceptions->Magic, SYS_EXCEPTION_MAGIC);
18733 +    }
18734 +    no_fault();
18735 +    kmutex_unlock (&sctx->Lock);
18736 +    
18737 +    if (ex_ptr != NULL)
18738 +       KMEM_FREE (ex_ptr, sizeof  (SYS_EXCEPTION));
18739 +}
18740 +
18741 +int
18742 +sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex)
18743 +{
18744 +    int     front;
18745 +    int     back;
18746 +    int     count;
18747 +    int     res;
18748 +    label_t ljp;
18749 +
18750 +    if (sctx->Exceptions == NULL)
18751 +       return (EINVAL);
18752 +
18753 +    kmutex_lock (&sctx->Lock);
18754 +    if (on_fault (&ljp))
18755 +    {
18756 +       no_fault();
18757 +       kmutex_unlock (&sctx->Lock);
18758 +       return (EFAULT);
18759 +    }
18760 +    
18761 +    front = fuword_noerr (&sctx->Exceptions->Front);
18762 +    back  = fuword_noerr (&sctx->Exceptions->Back);
18763 +    count = fuword_noerr (&sctx->Exceptions->Count);
18764 +
18765 +    if (count <= 0 || front < 0 || back < 0 || front >= count || back >= count || back == front)
18766 +       res = EINVAL;
18767 +    else
18768 +    {
18769 +       copyin_noerr ((caddr_t) &sctx->Exceptions->Exceptions[back], (caddr_t) ex, sizeof (SYS_EXCEPTION));
18770 +       suword_noerr (&sctx->Exceptions->Back, (back+1) % count);
18771 +
18772 +       res = ESUCCESS;
18773 +    }
18774 +    no_fault();
18775 +    kmutex_unlock (&sctx->Lock);
18776 +
18777 +    return (res);
18778 +}
18779 +
18780 +
18781 +/*
18782 + * Local variables:
18783 + * c-file-style: "stroustrup"
18784 + * End:
18785 + */
18786 diff -urN clean/drivers/net/qsnet/elan3/eventcookie.c linux-2.6.9/drivers/net/qsnet/elan3/eventcookie.c
18787 --- clean/drivers/net/qsnet/elan3/eventcookie.c 1969-12-31 19:00:00.000000000 -0500
18788 +++ linux-2.6.9/drivers/net/qsnet/elan3/eventcookie.c   2003-08-13 06:03:03.000000000 -0400
18789 @@ -0,0 +1,324 @@
18790 +/*
18791 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
18792 + * 
18793 + *    For licensing information please see the supplied COPYING file
18794 + *
18795 + */
18796 +
18797 +#ident "@(#)$Id: eventcookie.c,v 1.7 2003/08/13 10:03:03 fabien Exp $"
18798 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/eventcookie.c,v $*/
18799 +
18800 +#include <qsnet/kernel.h>
18801 +#include <elan3/elanregs.h>
18802 +#include <elan3/elandev.h>
18803 +#include <elan3/elanvp.h>
18804 +#include <elan3/elan3mmu.h>
18805 +#include <elan3/elanctxt.h>
18806 +#include <elan3/elandebug.h>
18807 +#include <elan3/urom_addrs.h>
18808 +#include <elan3/thread.h>
18809 +#include <elan3/vmseg.h>
18810 +
18811 +static EVENT_COOKIE_TABLE *cookie_tables;
18812 +static spinlock_t         cookie_table_lock;
18813 +
18814 +/*
18815 + * cookie_drop_entry:
18816 + *   drop the reference to a cookie held 
18817 + *   by the cookie table
18818 + */
18819 +static void
18820 +cookie_drop_entry (EVENT_COOKIE_ENTRY *ent)
18821 +{
18822 +    unsigned long flags;
18823 +
18824 +    spin_lock_irqsave (&ent->ent_lock, flags);
18825 +    if (--ent->ent_ref != 0)
18826 +    {
18827 +       ent->ent_fired = ent->ent_cookie;
18828 +       kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
18829 +
18830 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18831 +    }
18832 +    else
18833 +    {
18834 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
18835 +
18836 +       spin_lock_destroy (&ent->ent_lock);
18837 +       kcondvar_destroy (&ent->ent_wait);
18838 +
18839 +       KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY));
18840 +    }
18841 +}
18842 +
18843 +void
18844 +cookie_init()
18845 +{
18846 +    spin_lock_init (&cookie_table_lock);
18847 +}
18848 +
18849 +void
18850 +cookie_fini()
18851 +{
18852 +    spin_lock_destroy (&cookie_table_lock);
18853 +}
18854 +
18855 +EVENT_COOKIE_TABLE *
18856 +cookie_alloc_table (unsigned long task, unsigned long handle)
18857 +{
18858 +    EVENT_COOKIE_TABLE *tbl, *ntbl;
18859 +
18860 +    KMEM_ZALLOC (ntbl, EVENT_COOKIE_TABLE *, sizeof (EVENT_COOKIE_TABLE), TRUE);
18861 +
18862 +    if (ntbl == NULL)
18863 +       return (NULL);
18864 +
18865 +    spin_lock (&cookie_table_lock);
18866 +    
18867 +    for (tbl = cookie_tables; tbl; tbl = tbl->tbl_next)
18868 +       if (tbl->tbl_task == task && tbl->tbl_handle == handle)
18869 +           break;
18870 +    
18871 +    if (tbl != NULL)
18872 +       tbl->tbl_ref++;
18873 +    else
18874 +    {
18875 +       spin_lock_init (&ntbl->tbl_lock);
18876 +
18877 +       ntbl->tbl_task    = task;
18878 +       ntbl->tbl_handle  = handle;
18879 +       ntbl->tbl_ref     = 1;
18880 +       ntbl->tbl_entries = NULL;
18881 +
18882 +       if ((ntbl->tbl_next = cookie_tables) != NULL)
18883 +           cookie_tables->tbl_prev = ntbl;
18884 +       cookie_tables = ntbl;
18885 +       ntbl->tbl_prev = NULL;
18886 +    }
18887 +    spin_unlock (&cookie_table_lock);
18888 +
18889 +    if (tbl == NULL)
18890 +       return (ntbl);
18891 +    else
18892 +    {
18893 +       KMEM_FREE (ntbl, sizeof (EVENT_COOKIE_TABLE));
18894 +       return (tbl);
18895 +    }    
18896 +}
18897 +
18898 +void
18899 +cookie_free_table (EVENT_COOKIE_TABLE *tbl)
18900 +{
18901 +    EVENT_COOKIE_ENTRY *ent;
18902 +
18903 +    spin_lock (&cookie_table_lock);
18904 +    if (tbl->tbl_ref > 1)
18905 +    {
18906 +       tbl->tbl_ref--;
18907 +       spin_unlock (&cookie_table_lock);
18908 +       return;
18909 +    }
18910 +    
18911 +    if (tbl->tbl_prev)
18912 +       tbl->tbl_prev->tbl_next = tbl->tbl_next;
18913 +    else
18914 +       cookie_tables = tbl->tbl_next;
18915 +    if (tbl->tbl_next)
18916 +       tbl->tbl_next->tbl_prev = tbl->tbl_prev;
18917 +    
18918 +    spin_unlock (&cookie_table_lock);
18919 +    
18920 +    /* NOTE - table no longer visible to other threads
18921 +     *        no need to aquire tbl_lock */
18922 +    while ((ent = tbl->tbl_entries) != NULL)
18923 +    {
18924 +       if ((tbl->tbl_entries = ent->ent_next) != NULL)
18925 +           ent->ent_next->ent_prev = NULL;
18926 +       
18927 +       cookie_drop_entry (ent);
18928 +    }
18929 +    spin_lock_destroy (&tbl->tbl_lock);
18930 +
18931 +    KMEM_FREE (tbl, sizeof (EVENT_COOKIE_TABLE));
18932 +}
18933 +
18934 +int
18935 +cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18936 +{
18937 +    EVENT_COOKIE_ENTRY *ent, *nent;
18938 +    unsigned long flags;
18939 +
18940 +    KMEM_ZALLOC (nent, EVENT_COOKIE_ENTRY *, sizeof (EVENT_COOKIE_ENTRY), TRUE);
18941 +    
18942 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18943 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18944 +       if (ent->ent_cookie == cookie)
18945 +           break;
18946 +
18947 +    if (ent == NULL)
18948 +    {
18949 +       kcondvar_init (&nent->ent_wait);
18950 +       spin_lock_init (&nent->ent_lock);
18951 +
18952 +       nent->ent_ref    = 1;
18953 +       nent->ent_cookie = cookie;
18954 +
18955 +       if ((nent->ent_next = tbl->tbl_entries) != NULL)
18956 +           tbl->tbl_entries->ent_prev = nent;
18957 +       tbl->tbl_entries = nent;
18958 +       nent->ent_prev = NULL;
18959 +    }
18960 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18961 +
18962 +    if (ent == NULL)
18963 +       return (ESUCCESS);
18964 +    else
18965 +    {
18966 +       KMEM_FREE (nent, sizeof (EVENT_COOKIE_ENTRY));
18967 +       return (EINVAL);
18968 +    }
18969 +}
18970 +
18971 +int
18972 +cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
18973 +{
18974 +    EVENT_COOKIE_ENTRY *ent;
18975 +    unsigned long flags;
18976 +
18977 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
18978 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
18979 +       if (ent->ent_cookie == cookie)
18980 +           break;
18981 +    
18982 +    if (ent == NULL)
18983 +    {
18984 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18985 +       return (EINVAL);
18986 +    }
18987 +
18988 +    if (ent->ent_prev == NULL)
18989 +       tbl->tbl_entries = ent->ent_next;
18990 +    else
18991 +       ent->ent_prev->ent_next = ent->ent_next;
18992 +
18993 +    if (ent->ent_next != NULL)
18994 +       ent->ent_next->ent_prev = ent->ent_prev;
18995 +    
18996 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
18997 +
18998 +    cookie_drop_entry (ent);
18999 +
19000 +    return (ESUCCESS);
19001 +}
19002 +
19003 +/*
19004 + * cookie_fire_cookie:
19005 + *    fire the cookie - this is called from the event interrupt.
19006 + */
19007 +int
19008 +cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
19009 +{
19010 +    EVENT_COOKIE_ENTRY *ent;
19011 +    unsigned long flags;
19012 +
19013 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
19014 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
19015 +       if (ent->ent_cookie == cookie)
19016 +           break;
19017 +    
19018 +    if (ent == NULL)
19019 +    {
19020 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
19021 +       return (EINVAL);
19022 +    }
19023 +           
19024 +    spin_lock (&ent->ent_lock);
19025 +    ent->ent_fired = cookie;
19026 +    kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
19027 +    spin_unlock (&ent->ent_lock);
19028 +
19029 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
19030 +
19031 +    return (ESUCCESS);
19032 +}    
19033 +
19034 +/*
19035 + * cookie_wait_cookie:
19036 + *    deschedule on a cookie if it has not already fired.
19037 + *    note - if the cookie is removed from the table, then
19038 + *           we free it off when we're woken up.
19039 + */
19040 +int
19041 +cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
19042 +{
19043 +    EVENT_COOKIE_ENTRY *ent;
19044 +    unsigned long flags;
19045 +    
19046 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
19047 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
19048 +       if (ent->ent_cookie == cookie)
19049 +           break;
19050 +    
19051 +    if (ent == NULL)
19052 +    {
19053 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
19054 +       return (EINVAL);
19055 +    }
19056 +
19057 +    spin_lock (&ent->ent_lock);
19058 +    spin_unlock (&tbl->tbl_lock);
19059 +
19060 +    if (ent->ent_fired != 0)
19061 +    {
19062 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
19063 +       return (ESUCCESS);
19064 +    }
19065 +
19066 +    ent->ent_ref++;
19067 +    kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags);
19068 +    
19069 +    if (--ent->ent_ref > 0)
19070 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
19071 +    else
19072 +    {
19073 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
19074 +       
19075 +       spin_lock_destroy (&ent->ent_lock);
19076 +       kcondvar_destroy (&ent->ent_wait);
19077 +
19078 +       KMEM_FREE (ent, sizeof (EVENT_COOKIE_ENTRY));
19079 +    }
19080 +    return (ESUCCESS);
19081 +}
19082 +
19083 +int
19084 +cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie)
19085 +{
19086 +    EVENT_COOKIE_ENTRY *ent;
19087 +    unsigned long flags;
19088 +
19089 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
19090 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
19091 +       if (ent->ent_cookie == cookie)
19092 +           break;
19093 +    
19094 +    if (ent == NULL)
19095 +    {
19096 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
19097 +       return (EINVAL);
19098 +    }
19099 +           
19100 +    spin_lock (&ent->ent_lock);
19101 +    ent->ent_fired = 0;
19102 +    spin_unlock (&ent->ent_lock);
19103 +
19104 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
19105 +
19106 +    return (ESUCCESS);
19107 +}
19108 +
19109 +/*
19110 + * Local variables:
19111 + * c-file-style: "stroustrup"
19112 + * End:
19113 + */
19114 diff -urN clean/drivers/net/qsnet/elan3/iproc.c linux-2.6.9/drivers/net/qsnet/elan3/iproc.c
19115 --- clean/drivers/net/qsnet/elan3/iproc.c       1969-12-31 19:00:00.000000000 -0500
19116 +++ linux-2.6.9/drivers/net/qsnet/elan3/iproc.c 2003-09-24 09:57:25.000000000 -0400
19117 @@ -0,0 +1,925 @@
19118 +/*
19119 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
19120 + * 
19121 + *    For licensing information please see the supplied COPYING file
19122 + *
19123 + */
19124 +
19125 +#ident "@(#)$Id: iproc.c,v 1.47 2003/09/24 13:57:25 david Exp $"
19126 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/iproc.c,v $ */
19127 +
19128 +#include <qsnet/kernel.h>
19129 +
19130 +#include <elan3/elanregs.h>
19131 +#include <elan3/elandev.h>
19132 +#include <elan3/elanvp.h>
19133 +#include <elan3/elan3mmu.h>
19134 +#include <elan3/elanctxt.h>
19135 +#include <elan3/elandebug.h>
19136 +#include <elan3/urom_addrs.h>
19137 +#include <elan3/trtype.h>
19138 +#include <elan3/vmseg.h>
19139 +
19140 +
19141 +static int TrSizeTable[] = {0, 8, 16, 32, 64};
19142 +
19143 +static void  ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr);
19144 +static void  SimulateBlockWrite  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
19145 +static void  SimulateWriteWord   (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
19146 +static void  SimulateWriteDWord  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
19147 +static void  SimulateTraceRoute  (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap);
19148 +static void  BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp);
19149 +
19150 +void
19151 +HandleIProcTrap (ELAN3_DEV           *dev, 
19152 +                int                 Channel,
19153 +                E3_uint32           Pend,
19154 +                sdramaddr_t         FaultSaveOff,
19155 +                sdramaddr_t         TransactionsOff,
19156 +                sdramaddr_t         DataOff)
19157 +{
19158 +    E3_IprocTrapHeader_BE Transaction0;
19159 +    ELAN3_CTXT          *ctxt;
19160 +    INPUT_TRAP           *trap;
19161 +    register int          i;
19162 +
19163 +    /*
19164 +     * Read the 1st set of transactions, so we can determine the 
19165 +     * context for the trap 
19166 +     */
19167 +    elan3_sdram_copyq_from_sdram (dev, TransactionsOff, (void *) &Transaction0, 16);
19168 +    
19169 +    BumpStat (dev, IProcTraps);
19170 +    BumpInputterStats (dev, &Transaction0);
19171 +
19172 +    if (Transaction0.s.TrTypeCntx.s.TypeCntxInvalid)
19173 +    {
19174 +       /*
19175 +        * The context is not valid. This will occur if the packet
19176 +        * trapped for an EopError with no IdentTrans or an error corrupted the context
19177 +        * giving a CRC error on the first transaction and the Ack had not been returned.
19178 +        */
19179 +       if (Transaction0.s.TrTypeCntx.s.LastTrappedTrans)
19180 +       {
19181 +           PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: Error on EOP without a good context, ignoring trap\n");
19182 +       }
19183 +       else
19184 +       {
19185 +           /* Check that only crap has been received.  If not then die. */
19186 +           if (! Transaction0.s.IProcTrapStatus.s.BadLength &&
19187 +               (Transaction0.s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_GOOD)
19188 +           {
19189 +               printk ("iproc: Did not have a valid context for the trap area.\n");
19190 +               printk ("iproc: TrTypeCntx=%x TrAddr=%x TrData0=%x IProcTrapStatus=%x\n",
19191 +                        Transaction0.s.TrTypeCntx.TypeContext, Transaction0.s.TrAddr,
19192 +                        Transaction0.s.TrData0, Transaction0.s.IProcTrapStatus.Status);
19193 +               panic ("elan3: iproc did not have a valid context");
19194 +               /* NOTREACHED */
19195 +           }
19196 +           PRINTF0 (DBG_DEVICE, DBG_IPROC, "iproc: First transaction is bad, ignoring trap\n");
19197 +       }
19198 +    }
19199 +    else
19200 +    {
19201 +       ctxt = ELAN3_DEV_CTX_TABLE(dev, Transaction0.s.TrTypeCntx.s.Context);
19202 +       
19203 +       if (ctxt == NULL)
19204 +       {
19205 +           PRINTF1 (DBG_DEVICE, DBG_INTR, "HandleIProcTrap: context %x invalid\n", 
19206 +                    Transaction0.s.TrTypeCntx.s.Context);
19207 +
19208 +           BumpStat (dev, InvalidContext);
19209 +       }
19210 +       else
19211 +       {
19212 +           trap = (Channel == 0) ? &ctxt->Input0Trap : &ctxt->Input1Trap;
19213 +
19214 +           ASSERT (trap->State == CTXT_STATE_OK);
19215 +           
19216 +           trap->Transactions[0] = Transaction0;
19217 +
19218 +           PRINTF1 (ctxt, DBG_INTR, "HandleIProcTrap: %s\n", IProcTrapString (&trap->Transactions[0], NULL));
19219 +           /*
19220 +            * Copy the rest of the transactions into the trap area.
19221 +            */
19222 +           for (i = 0; !(trap->Transactions[i].s.TrTypeCntx.s.LastTrappedTrans);)
19223 +           {
19224 +               if (++i >= MAX_TRAPPED_TRANS)
19225 +               {
19226 +                   trap->Overflow = 1;
19227 +                   break;
19228 +               }
19229 +
19230 +               elan3_sdram_copyq_from_sdram (dev, TransactionsOff + i*sizeof (E3_IprocTrapHeader), (void *) &trap->Transactions[i], 16);
19231 +
19232 +               PRINTF1 (ctxt, DBG_INTR, "                 %s\n", IProcTrapString (&trap->Transactions[i], NULL));
19233 +
19234 +               BumpInputterStats (dev, &trap->Transactions[i]);
19235 +           }
19236 +           
19237 +           /*
19238 +            * Remember the number of transactions we've copied.
19239 +            */
19240 +           trap->NumTransactions = i+1;
19241 +
19242 +           PRINTF1 (ctxt, DBG_INTR, "                 NumTransactions = %d\n", trap->NumTransactions);
19243 +           
19244 +           /*
19245 +            * Copy all the data blocks in one go to let the Elan prefetcher work 
19246 +            */
19247 +           elan3_sdram_copyq_from_sdram (dev, DataOff, trap->DataBuffers, trap->NumTransactions*sizeof (E3_IprocTrapData));
19248 +
19249 +           /*
19250 +            * Copy fault save area and clear out for next time round.
19251 +            */
19252 +           elan3_sdram_copyq_from_sdram (dev, FaultSaveOff, (void *) &trap->FaultSave, 16);
19253 +           elan3_sdram_zeroq_sdram (dev, FaultSaveOff, 16);
19254 +
19255 +           if (ELAN3_OP_IPROC_TRAP (ctxt, trap, Channel) == OP_DEFER)
19256 +           {
19257 +               /*
19258 +                * Mark the trap as valid and set the inputter state to 
19259 +                * raise the context filter.
19260 +                */
19261 +               trap->State = CTXT_STATE_TRAPPED;
19262 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
19263 +               
19264 +               SetInputterStateForContext (ctxt, Pend, NULL);
19265 +           }
19266 +       }
19267 +    }
19268 +}
19269 +
19270 +void
19271 +InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap)
19272 +{
19273 +    int              i;
19274 +    int              StatusValid;
19275 +
19276 +    trap->AckSent                  = 0;
19277 +    trap->BadTransaction            = 0;
19278 +    
19279 +    trap->TrappedTransaction        = NULL;
19280 +    trap->TrappedDataBuffer        = NULL;
19281 +    trap->WaitForEopTransaction     = NULL;
19282 +    trap->WaitForEopDataBuffer      = NULL;
19283 +    trap->DmaIdentifyTransaction    = NULL;
19284 +    trap->ThreadIdentifyTransaction = NULL;
19285 +    trap->LockQueuePointer          = (E3_Addr) 0;
19286 +    trap->UnlockQueuePointer        = (E3_Addr) 0;
19287 +
19288 +    /*
19289 +     * Now scan all the transactions received 
19290 +     */
19291 +    for (i = 0; i < trap->NumTransactions ; i++)
19292 +    {
19293 +       E3_IprocTrapHeader_BE *hdrp = &trap->Transactions[i];
19294 +       E3_IprocTrapData_BE   *datap = &trap->DataBuffers[i];
19295 +
19296 +       StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid != 0;
19297 +       
19298 +       if (StatusValid && hdrp->s.IProcTrapStatus.s.AckSent)   /* Remember if we've sent the ack back */
19299 +           trap->AckSent = 1;
19300 +       
19301 +       if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)              /* Check for EOP */
19302 +       {
19303 +           ASSERT (i == trap->NumTransactions - 1);
19304 +
19305 +           switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
19306 +           {
19307 +           case EOP_GOOD:
19308 +               /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */  
19309 +               /* unless it was a flood, in which case someone must have sent an ack */
19310 +               /* but not necessarily us */
19311 +               break;
19312 +
19313 +           case EOP_BADACK:
19314 +               BumpUserStat (ctxt, EopBadAcks);
19315 +
19316 +               /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if
19317 +                * we sent a PAckOk. We can clear tinfo.AckSent. */
19318 +               if (trap->AckSent == 1)
19319 +               {
19320 +                   PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: Network error destroyed PAckOk\n");
19321 +                   trap->AckSent = 0;
19322 +               }
19323 +               break;
19324 +
19325 +           case EOP_ERROR_RESET:
19326 +               BumpUserStat (ctxt, EopResets);
19327 +
19328 +               /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */
19329 +               trap->BadTransaction = 1;
19330 +               break;
19331 +
19332 +           default:
19333 +               panic ("InspectIProcTrap: invalid EOP type in status register\n");
19334 +               /* NOTREACHED */
19335 +           }
19336 +           continue;
19337 +       }
19338 +
19339 +       PRINTF2 (ctxt, DBG_IPROC, "InspectIProcTrap: %2d: %s\n", i, IProcTrapString (hdrp, datap));
19340 +       
19341 +       if (! StatusValid)                                      /* We're looking at transactions stored before the trap */
19342 +       {                                                       /* these should only be identifies and lock transactions */
19343 +
19344 +           if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT)
19345 +               panic ("InspectIProcTrap: writeblock transaction found in input trap header before trap occured\n");
19346 +
19347 +           switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
19348 +           {
19349 +           case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK:
19350 +               if (trap->LockQueuePointer)                             /* Already seen a LOCKQUEUE transaction in this packet, */
19351 +               {                                               /* the user program should not have done this !! */
19352 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19353 +                   return;
19354 +               }
19355 +
19356 +               trap->LockQueuePointer = (E3_Addr) hdrp->s.TrAddr;      /* Remember the queue pointer in case we need to unlock it */
19357 +               break;
19358 +
19359 +           case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK:
19360 +               if (trap->DmaIdentifyTransaction ||             /* Already seen an identify transaction in this packet */
19361 +                   trap->ThreadIdentifyTransaction)            /* the user program should not have done this */
19362 +               {                                                       
19363 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19364 +                   return;
19365 +               }
19366 +               trap->DmaIdentifyTransaction = hdrp;
19367 +               break;
19368 +
19369 +           case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK:
19370 +               if (trap->DmaIdentifyTransaction ||             /* Already seen an identify transaction in this packet */
19371 +                   trap->ThreadIdentifyTransaction)            /* the user program should not have done this */
19372 +               {                                                       
19373 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19374 +                   return;
19375 +               }
19376 +               trap->ThreadIdentifyTransaction = hdrp;
19377 +               break;
19378 +               
19379 +           default:
19380 +               panic ("InspectIProcTrap: invalid transaction found in input trap header before trap occured\n");
19381 +               /* NOTREACHED */
19382 +           }
19383 +           continue;
19384 +       }
19385 +
19386 +       if (StatusValid && trap->TrappedTransaction == NULL)    /* Remember the transaction which caused the */
19387 +       {                                                       /* trap */
19388 +           trap->TrappedTransaction = hdrp;
19389 +           trap->TrappedDataBuffer  = datap;
19390 +       }
19391 +
19392 +       if(hdrp->s.IProcTrapStatus.s.BadLength ||
19393 +          ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR) ||
19394 +          ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD))
19395 +       {
19396 +           int j;
19397 +           PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: transaction has a bad crc\n");
19398 +           for (j=0; j<TRANS_DATA_WORDS; j+=4)
19399 +              PRINTF5 (ctxt, DBG_IPROC, "InspectIProcTrap: Data %0d %8x %8x %8x %8x\n",
19400 +                       j, datap->TrData[j], datap->TrData[j+1], datap->TrData[j+2], datap->TrData[j+3]);
19401 +           trap->BadTransaction = 1;
19402 +           continue;
19403 +       }
19404 +       
19405 +       /* No more to do if it's a writeblock transaction */
19406 +       if (hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT)
19407 +           continue;
19408 +
19409 +       
19410 +       if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap &&
19411 +           (hdrp->s.TrTypeCntx.s.Type & TR_WAIT_FOR_EOP) != 0)
19412 +       {
19413 +           /*
19414 +            * This is a wait for eop transaction that has trapped because the inputer
19415 +            * then received a EopError. The next transaction saved should always be an
19416 +            * EopError.
19417 +            */
19418 +           PRINTF0 (ctxt, DBG_IPROC, "InspectIProcTrap: got a trapped WaitForEop transaction due to EopError\n");
19419 +           
19420 +           trap->WaitForEopTransaction = hdrp;
19421 +           trap->WaitForEopDataBuffer  = datap;
19422 +           continue;
19423 +       }
19424 +
19425 +       switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
19426 +       {
19427 +       case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:
19428 +           if (trap->UnlockQueuePointer)
19429 +           {
19430 +               ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19431 +               return;
19432 +           }
19433 +           trap->UnlockQueuePointer = (E3_Addr) hdrp->s.TrAddr;
19434 +           break;
19435 +       }
19436 +    }
19437 +}
19438 +
19439 +void
19440 +ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp)
19441 +{
19442 +    ELAN3_DEV     *dev = ctxt->Device;
19443 +    int           res;
19444 +    unsigned long flags;
19445 +
19446 +    ASSERT (! CTXT_IS_KERNEL (ctxt));
19447 +
19448 +    BumpUserStat (ctxt, IProcTraps);
19449 +
19450 +    InspectIProcTrap (ctxt, trap);
19451 +
19452 +    /*
19453 +     * fixup page fault if we've trapped because of one.
19454 +     */
19455 +    if (trap->FaultSave.s.FaultContext != 0)
19456 +    {
19457 +       /*
19458 +        * If it's a WRITEBLOCK transaction, then see if we remember faulting
19459 +        * before it, and try and prefault in a sensible amount past it.
19460 +        */
19461 +       int                fixedFault = FALSE;
19462 +       INPUT_FAULT_SAVE  *entry;
19463 +       INPUT_FAULT_SAVE **predp;
19464 +       int                npages;
19465 +
19466 +       if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0 && /* a DMA packet */
19467 +           trap->LockQueuePointer == (E3_Addr) 0 &&                                    /* but not a queueing DMA */
19468 +           trap->TrappedTransaction->s.TrAddr != 0)                                    /* and not a DMA to 0 */
19469 +       {
19470 +           spin_lock (&ctxt->InputFaultLock);
19471 +           
19472 +           for (predp = &ctxt->InputFaultList; (entry = *predp)->Next != NULL ; predp = &entry->Next)
19473 +           {
19474 +               if (entry->Addr == trap->TrappedTransaction->s.TrAddr)
19475 +                   break;
19476 +           }
19477 +           
19478 +           *predp = entry->Next;
19479 +           entry->Next = ctxt->InputFaultList;
19480 +           ctxt->InputFaultList = entry;
19481 +           
19482 +           if (entry->Addr == trap->TrappedTransaction->s.TrAddr)
19483 +           {
19484 +               if ((entry->Count <<= 1) > MAX_INPUT_FAULT_PAGES)
19485 +                   entry->Count = MAX_INPUT_FAULT_PAGES;
19486 +           }
19487 +           else
19488 +           {
19489 +               entry->Count = MIN_INPUT_FAULT_PAGES;
19490 +           }
19491 +           
19492 +           entry->Addr = trap->TrappedTransaction->s.TrAddr + (entry->Count * PAGESIZE);
19493 +           npages = entry->Count;
19494 +           
19495 +           spin_unlock (&ctxt->InputFaultLock);
19496 +           
19497 +           if (elan3_pagefault (ctxt, &trap->FaultSave, npages) != ESUCCESS)
19498 +           {
19499 +               PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - failed\n", 
19500 +                        npages, trap->TrappedTransaction->s.TrAddr);
19501 +           }
19502 +           else
19503 +           {
19504 +               PRINTF2 (ctxt, DBG_IPROC, "ResolveIProcTrap: pagefaulting %d pages at %08x - succeeded\n", 
19505 +                        npages, trap->TrappedTransaction->s.TrAddr);
19506 +               
19507 +               fixedFault = TRUE;
19508 +           }
19509 +       }
19510 +
19511 +       /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */
19512 +       /* the packet will have been nacked */
19513 +       if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) &&      /* a DMA packet */
19514 +           trap->LockQueuePointer == 0 && trap->UnlockQueuePointer &&                  /* a queueing DMA */
19515 +           trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress)       /* and missed lockqueue */
19516 +       {
19517 +           fixedFault = TRUE;
19518 +       }
19519 +
19520 +       if (! fixedFault)
19521 +       {
19522 +           if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
19523 +           {
19524 +               PRINTF1 (ctxt, DBG_IPROC, "ResolveIProcTrap: elan3_pagefault failed at %x\n", 
19525 +                        trap->FaultSave.s.FaultAddress);
19526 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, INPUT_PROC, trap, &trap->FaultSave, res);
19527 +               return;
19528 +           }
19529 +       }
19530 +    }
19531 +
19532 +    if (! trap->AckSent && trap->LockQueuePointer)                     /* Queued DMA */
19533 +    {                                                                  /* The ack was not sent, so the queue will be locked. */
19534 +       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE);      /* We must unlock it. */
19535 +    }
19536 +
19537 +    if (trap->AckSent && trap->BadTransaction)
19538 +    {
19539 +       if (trap->DmaIdentifyTransaction)
19540 +       {
19541 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Dma identify needs network resultion\n");
19542 +
19543 +           BumpStat (dev, DmaIdentifyNetworkErrors);
19544 +           BumpUserStat (ctxt, DmaIdentifyNetworkErrors);
19545 +
19546 +           if (trap->WaitForEopTransaction)
19547 +               PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n");
19548 +       }
19549 +       else if (trap->ThreadIdentifyTransaction)
19550 +       {
19551 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: Thread identify needs network resolution\n");
19552 +
19553 +           BumpStat (dev, ThreadIdentifyNetworkErrors);
19554 +           BumpUserStat (ctxt, ThreadIdentifyNetworkErrors);
19555 +
19556 +           if (trap->WaitForEopTransaction)
19557 +               PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: have delayed wait for eop transaction\n");
19558 +       }
19559 +       else
19560 +       {
19561 +           BumpStat (dev, DmaNetworkErrors);
19562 +           BumpUserStat (ctxt, DmaNetworkErrors);
19563 +       }
19564 +    }
19565 +
19566 +    spin_lock_irqsave (&dev->IntrLock, flags);
19567 +    
19568 +    if (! trap->AckSent)
19569 +    {
19570 +       PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack not sent, lowering context filter\n");
19571 +
19572 +       trap->State = CTXT_STATE_OK;
19573 +    }
19574 +    else
19575 +    {
19576 +       if (trap->BadTransaction)
19577 +       {
19578 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on bad transaction\n");
19579 +           trap->State = CTXT_STATE_NETWORK_ERROR;
19580 +       }
19581 +       else
19582 +       {
19583 +           PRINTF0 (ctxt, DBG_IPROC, "ResolveIProcTrap: ack sent, waiting on packet to be re-executed\n");
19584 +           trap->State = CTXT_STATE_NEEDS_RESTART;
19585 +       }
19586 +    }
19587 +
19588 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
19589 +
19590 +    if (trap->AckSent && trap->BadTransaction)
19591 +       ElanException (ctxt, EXCEPTION_NETWORK_ERROR, INPUT_PROC, trap, rvpp);
19592 +}
19593 +
19594 +int
19595 +RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap)
19596 +{
19597 +    PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: %d transactions\n", trap->NumTransactions);
19598 +
19599 +    if (trap->TrappedTransaction == NULL)                      /* No transaction trapped - probably a network */
19600 +       return (ESUCCESS);                                      /* error */
19601 +
19602 +    while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans)
19603 +    {
19604 +       E3_IprocTrapHeader_BE *hdrp = trap->TrappedTransaction;
19605 +       E3_IprocTrapData_BE   *datap = trap->TrappedDataBuffer;
19606 +       
19607 +       ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0);
19608 +
19609 +       PRINTF2 (ctxt, DBG_IPROC, "RestartIProc: TrType=0x%x Status=0x%x\n",
19610 +                hdrp->s.TrTypeCntx.TypeContext, hdrp->s.IProcTrapStatus.Status);
19611 +       
19612 +       if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0)
19613 +       {
19614 +           PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr);
19615 +           SimulateBlockWrite (ctxt, hdrp, datap);
19616 +       }
19617 +       else
19618 +       {
19619 +           switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
19620 +           {
19621 +           case TR_SETEVENT & TR_OPCODE_TYPE_MASK:
19622 +               PRINTF1 (ctxt, DBG_IPROC, "RestartIProc: SETEVENT : %x\n", hdrp->s.TrAddr);
19623 +
19624 +               if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_InputDoTrap)
19625 +                   FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus), &trap->FaultSave, FALSE);
19626 +               else if (hdrp->s.TrAddr)
19627 +               {
19628 +                   if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent), hdrp->s.TrAddr, FALSE) != ISSUE_COMMAND_OK)
19629 +                       return (EAGAIN);
19630 +               }
19631 +               break;
19632 +
19633 +           case TR_WRITEWORD & TR_OPCODE_TYPE_MASK:
19634 +               SimulateWriteWord (ctxt, hdrp, datap);
19635 +               break;
19636 +
19637 +           case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK:
19638 +               SimulateWriteDWord (ctxt, hdrp, datap);
19639 +               break;
19640 +               
19641 +           case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:
19642 +               if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) == MI_InputDoTrap)
19643 +                   ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19644 +               else
19645 +               {
19646 +                   switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus))
19647 +                   {
19648 +                   case MI_WaitForUnLockDescRead:
19649 +                       /*
19650 +                        * Fault occured on the read of the queue descriptor - since the ack
19651 +                        * has been sent we need to move the queue on one slot.
19652 +                        */
19653 +                       PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TR_UNLOCKQUEUE : desc read fault\n");
19654 +
19655 +                       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE);
19656 +                       
19657 +                       if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent),
19658 +                                         hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK)
19659 +                       {
19660 +                           /* Failed to issue setevent to complete queue unlock, since we've already unlocked */
19661 +                           /* the queue, we should "convert" this transaction into a setevent transaction that */
19662 +                           /* hasn't trapped */
19663 +                           PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n");
19664 +
19665 +                           ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET);
19666 +                           return (EAGAIN);
19667 +                       }
19668 +                       break;
19669 +                       
19670 +                   case MI_DoSetEvent:
19671 +                       /*
19672 +                        * Fault occured on either the write to unlock the queue or during 
19673 +                        * processing of the event.  Test the fault address against the
19674 +                        * queue address to find out which - in this case, since the ack
19675 +                        * has been sent we need to move the queue on one slot.
19676 +                        */
19677 +                       if (trap->FaultSave.s.FaultAddress == trap->LockQueuePointer)
19678 +                       {
19679 +                           PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: fixed unlock queue write to unlock fault\n");
19680 +
19681 +                           SimulateUnlockQueue (ctxt, trap->LockQueuePointer, TRUE);
19682 +                           
19683 +                           if (IssueCommand (ctxt, offsetof (E3_CommandPort, SetEvent),
19684 +                                             hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET, FALSE) != ISSUE_COMMAND_OK)
19685 +                           {
19686 +                               /* Failed to issue setevent to complete queue unlock, since we've already unlocked */
19687 +                               /* the queue, we should "convert" this transaction into a setevent transaction that */
19688 +                               /* hasn't trapped */
19689 +                               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: could not issue setevent for SimulateUnlockQueue\n");
19690 +                               
19691 +                               ConvertTransactionToSetEvent (ctxt, hdrp, hdrp->s.TrAddr + E3_QUEUE_EVENT_OFFSET);
19692 +                               return (EFAIL);
19693 +                           }
19694 +                           break;
19695 +                       }
19696 +                       /*DROPTHROUGH*/
19697 +                       
19698 +                   default:
19699 +                       FixupEventTrap (ctxt, INPUT_PROC, trap, GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus),
19700 +                                       &trap->FaultSave, FALSE);
19701 +                       break;
19702 +                   }
19703 +                   trap->LockQueuePointer = trap->UnlockQueuePointer = 0;
19704 +               }
19705 +               break;
19706 +
19707 +           case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:
19708 +               /* Just ignore send-discard transactions */
19709 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: ignore SENDDISCARD\n");
19710 +               break;
19711 +
19712 +           case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:
19713 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: REMOTEDMA\n");         
19714 +
19715 +               /* modify the dma type since it will still be a "read" dma */
19716 +               ((E3_DMA_BE *) datap)->s.dma_type &= ~(DMA_TYPE_READ | E3_DMA_CONTEXT_MASK);
19717 +               ((E3_DMA_BE *) datap)->s.dma_type |= DMA_TYPE_ISREMOTE;
19718 +
19719 +               RestartDmaDesc (ctxt, (E3_DMA_BE *) datap);
19720 +               break;
19721 +
19722 +           case TR_TRACEROUTE & TR_OPCODE_TYPE_MASK:
19723 +               PRINTF0 (ctxt, DBG_IPROC, "RestartIProc: TRACEROUTE\n");
19724 +               SimulateTraceRoute (ctxt, hdrp, datap);
19725 +               break;
19726 +
19727 +           default:
19728 +               ElanException (ctxt, EXCEPTION_BAD_PACKET, INPUT_PROC, trap);
19729 +               break;
19730 +           }
19731 +       }
19732 +
19733 +       /*
19734 +        * We've successfully processed this transaction, so move onto the 
19735 +        * next one.
19736 +        */
19737 +       trap->TrappedTransaction++;
19738 +       trap->TrappedDataBuffer++;
19739 +    }
19740 +    
19741 +    return (ESUCCESS);
19742 +}
19743 +
19744 +static void
19745 +ConvertTransactionToSetEvent (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_Addr Addr)
19746 +{
19747 +    hdrp->s.TrTypeCntx.s.Type           = TR_SETEVENT;
19748 +    hdrp->s.TrTypeCntx.s.StatusRegValid = 0;
19749 +    hdrp->s.TrAddr                      = Addr;
19750 +}
19751 +
19752 +void
19753 +SimulateBlockWrite (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19754 +{
19755 +    void     *saddr  = (void *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f));
19756 +    unsigned  nbytes = (hdrp->s.TrTypeCntx.s.Type) & TR_PARTSIZE_MASK;
19757 +    int       i;
19758 +
19759 +    if (nbytes == 0)
19760 +       nbytes = sizeof (E3_IprocTrapData_BE);
19761 +
19762 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19763 +    {
19764 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19765 +
19766 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateBlockWrite: faulted at %x\n", hdrp->s.TrAddr);
19767 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19768 +       return;
19769 +    }
19770 +
19771 +    /*
19772 +     * NOTE: since the block copy could be to sdram, we issue the writes backwards,
19773 +     *       except we MUST ensure that the last item in the block is written last.
19774 +     */
19775 +    switch (((hdrp->s.TrTypeCntx.s.Type) >> TR_TYPE_SHIFT) & TR_TYPE_MASK)
19776 +    {
19777 +    case TR_TYPE_BYTE:                                         /* 8 bit */
19778 +       for (i = nbytes - (2*sizeof (E3_uint8)); i >= 0; i -= sizeof (E3_uint8))
19779 +           ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]);
19780 +       i = nbytes - sizeof (E3_uint8);
19781 +       ELAN3_OP_STORE8 (ctxt, hdrp->s.TrAddr + i, ((E3_uint8 *) saddr)[i]);
19782 +       break;
19783 +       
19784 +    case TR_TYPE_SHORT:                                                /* 16 bit */
19785 +       for (i = nbytes - (2*sizeof (E3_uint16)); i >= 0; i -= sizeof (E3_uint16))
19786 +       ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]);
19787 +       i = nbytes - sizeof (E3_uint16);
19788 +       ELAN3_OP_STORE16 (ctxt, hdrp->s.TrAddr + i, ((E3_uint16 *) saddr)[i]);
19789 +       break;
19790 +       
19791 +    case TR_TYPE_WORD:                                         /* 32 bit */
19792 +       for (i = nbytes - (2*sizeof (E3_uint32)); i >= 0; i -= sizeof (E3_uint32))
19793 +           ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]);
19794 +       i = nbytes - sizeof (E3_uint32);
19795 +       ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + i, ((E3_uint32 *) saddr)[i]);
19796 +       break;
19797 +       
19798 +    case TR_TYPE_DWORD:                                                /* 64 bit  */
19799 +       for (i = nbytes - (2*sizeof (E3_uint64)); i >= 0; i -= sizeof (E3_uint64))
19800 +           ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]);
19801 +       i = nbytes - sizeof (E3_uint64);
19802 +       ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr + i, ((E3_uint64 *) saddr)[i]);
19803 +       break;
19804 +    }
19805 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19806 +}
19807 +
19808 +void
19809 +SimulateWriteWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19810 +{
19811 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19812 +    {
19813 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19814 +
19815 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteWord: faulted at %x\n", hdrp->s.TrAddr);
19816 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19817 +       return;
19818 +    }
19819 +
19820 +    ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr, ((E3_uint32 *) datap)[WordEndianFlip]);
19821 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19822 +}
19823 +
19824 +void
19825 +SimulateWriteDWord (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19826 +{
19827 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19828 +    {
19829 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19830 +
19831 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateWriteDWord: faulted at %x\n", hdrp->s.TrAddr);
19832 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19833 +       return;
19834 +    }
19835 +
19836 +    ELAN3_OP_STORE64 (ctxt, hdrp->s.TrAddr, ((E3_uint64 *) datap)[0]);
19837 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19838 +}
19839 +
19840 +void
19841 +SimulateTraceRoute (ELAN3_CTXT *ctxt, E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19842 +{
19843 +    E3_uint32 *saddr  = (E3_uint32 *) ((unsigned long) datap + (hdrp->s.TrAddr & 0x3f));
19844 +    unsigned   nwords = TrSizeTable[(hdrp->s.TrTypeCntx.s.Type >> TR_SIZE_SHIFT) & TR_SIZE_MASK] / sizeof (E3_uint32);
19845 +    int        i;
19846 +
19847 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19848 +    {
19849 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19850 +
19851 +       PRINTF1 (ctxt, DBG_IPROC, "SimulateTraceRoute: faulted at %x\n", hdrp->s.TrAddr);
19852 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, hdrp->s.TrAddr);
19853 +       return;
19854 +    }
19855 +    
19856 +    for (i = nwords-2; i >= 0; i--)
19857 +       ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]);
19858 +
19859 +    i = nwords-1;
19860 +    ELAN3_OP_STORE32 (ctxt, hdrp->s.TrAddr + (i * sizeof (E3_uint32)), saddr[i ^ WordEndianFlip]);
19861 +
19862 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
19863 +}
19864 +
19865 +void
19866 +SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck)
19867 +{
19868 +    E3_uint32 QueueLock;
19869 +    E3_Addr   QueueBPTR;
19870 +    E3_Addr   QueueFPTR;
19871 +    E3_uint64 QueueStateAndBPTR;
19872 +
19873 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
19874 +    {
19875 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
19876 +
19877 +       PRINTF1 (ctxt, DBG_IPROC, "UnlockQueue: faulted with QueuePointer %x\n", QueuePointer);
19878 +       ElanException (ctxt, EXCEPTION_FAULTED, INPUT_PROC, NULL, QueuePointer);
19879 +       return;
19880 +    }
19881 +    
19882 +    if (SentAck)
19883 +    {
19884 +       QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_bptr));
19885 +       QueueFPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_fptr));
19886 +
19887 +       if (QueueBPTR == ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_top)))     /* move on back pointer */
19888 +           QueueBPTR = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_base));
19889 +       else
19890 +           QueueBPTR += ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_size));
19891 +       
19892 +       QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state));
19893 +
19894 +       if (QueueBPTR == QueueFPTR)                             /* and set full bit if fptr == bptr */
19895 +           QueueLock |= E3_QUEUE_FULL;
19896 +       
19897 +       QueueLock &= ~E3_QUEUE_LOCKED;
19898 +       
19899 +       QueueStateAndBPTR = (E3_uint64)QueueLock << 32 | QueueBPTR;
19900 +
19901 +       ELAN3_OP_STORE64 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueStateAndBPTR);
19902 +    }
19903 +    else
19904 +    {
19905 +       QueueLock = ELAN3_OP_LOAD32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state));
19906 +
19907 +       QueueLock &= ~E3_QUEUE_LOCKED;
19908 +       
19909 +       ELAN3_OP_STORE32 (ctxt, QueuePointer + offsetof (E3_Queue, q_state), QueueLock);
19910 +    }
19911 +
19912 +    no_fault();
19913 +}
19914 +
19915 +static void
19916 +BumpInputterStats (ELAN3_DEV *dev, E3_IprocTrapHeader_BE *hdrp)
19917 +{
19918 +    if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)                 /* EOP */
19919 +    {
19920 +       switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
19921 +       {
19922 +       case EOP_BADACK:
19923 +           BumpStat (dev, EopBadAcks);
19924 +           break;
19925 +       case EOP_ERROR_RESET:
19926 +           BumpStat (dev, EopResets);
19927 +           break;
19928 +       }
19929 +    }
19930 +    else if (hdrp->s.TrTypeCntx.s.StatusRegValid)
19931 +    {
19932 +       /*
19933 +        * Errors are tested in order of badness. i.e. badlength will prevent a BadCrc and so on...
19934 +        */
19935 +       if (hdrp->s.IProcTrapStatus.s.BadLength)
19936 +           BumpStat (dev, InputterBadLength);
19937 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_BAD)
19938 +           BumpStat (dev, InputterCRCBad);
19939 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_ERROR)
19940 +           BumpStat (dev, InputterCRCErrors);
19941 +       else if ((hdrp->s.IProcTrapStatus.Status & CRC_MASK) == CRC_STATUS_DISCARD)
19942 +           BumpStat (dev, InputterCRCDiscards);
19943 +    }
19944 +}
19945 +
19946 +char *
19947 +IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData_BE *datap)
19948 +{
19949 +    static char buffer[256];
19950 +    static char typeString[256];
19951 +    static char statusString[256];
19952 +    char *ptr;
19953 +    E3_Addr     Addr        = hdrp->s.TrAddr;
19954 +    E3_uint32   Type        = hdrp->s.TrTypeCntx.s.Type;
19955 +    E3_uint32   Context     = hdrp->s.TrTypeCntx.s.Context;
19956 +    E3_uint32   StatusValid = hdrp->s.TrTypeCntx.s.StatusRegValid;
19957 +    
19958 +    if (hdrp->s.TrTypeCntx.s.LastTrappedTrans)
19959 +    {
19960 +       switch (hdrp->s.IProcTrapStatus.Status & E3_IPS_EopType)
19961 +       {
19962 +       case EOP_GOOD:          sprintf (typeString, "EOP GOOD"); break;
19963 +       case EOP_BADACK:        sprintf (typeString, "EOP BADACK"); break;
19964 +       case EOP_ERROR_RESET:   sprintf (typeString, "EOP ERROR RESET"); break;
19965 +       default:                sprintf (typeString, "EOP - bad status"); break;
19966 +       }
19967 +       sprintf (buffer, "%15s Cntx=%08x", typeString, Context);
19968 +    }
19969 +    else
19970 +    {
19971 +       if (Type & TR_WRITEBLOCK_BIT)
19972 +       {
19973 +           switch ((Type >> TR_TYPE_SHIFT) & TR_TYPE_MASK)
19974 +           {
19975 +           case TR_TYPE_BYTE:  ptr = "Byte";    break;
19976 +           case TR_TYPE_SHORT: ptr = "Short";   break;
19977 +           case TR_TYPE_WORD:  ptr = "Word";    break;
19978 +           case TR_TYPE_DWORD: ptr = "Double";  break;
19979 +           default:            ptr = "Unknown"; break;
19980 +           }
19981 +           
19982 +           sprintf (typeString, "WriteBlock Type=%s Size=%2d", ptr, Type & TR_PARTSIZE_MASK);
19983 +       }
19984 +       else
19985 +       {
19986 +           switch (Type & TR_OPCODE_TYPE_MASK)
19987 +           {
19988 +           case TR_SETEVENT & TR_OPCODE_TYPE_MASK:             sprintf (typeString, "Setevent"); break;
19989 +           case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Remote DMA"); break;
19990 +           case TR_LOCKQUEUE & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Lock Queue"); break;
19991 +           case TR_UNLOCKQUEUE & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "Unlock Queue"); break;
19992 +           case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "Send Discard"); break;
19993 +           case TR_DMAIDENTIFY & TR_OPCODE_TYPE_MASK:          sprintf (typeString, "DMA Identify"); break;
19994 +           case TR_THREADIDENTIFY & TR_OPCODE_TYPE_MASK:       sprintf (typeString, "Thread Identify"); break;
19995 +           case TR_GTE & TR_OPCODE_TYPE_MASK:                  sprintf (typeString, "GTE"); break;
19996 +           case TR_LT & TR_OPCODE_TYPE_MASK:                   sprintf (typeString, "LT"); break;
19997 +           case TR_EQ & TR_OPCODE_TYPE_MASK:                   sprintf (typeString, "EQ"); break;
19998 +           case TR_NEQ & TR_OPCODE_TYPE_MASK:                  sprintf (typeString, "NEQ"); break;
19999 +           case TR_WRITEWORD & TR_OPCODE_TYPE_MASK:            sprintf (typeString, "Write Word"); break;
20000 +           case TR_WRITEDOUBLEWORD & TR_OPCODE_TYPE_MASK:      sprintf (typeString, "Write Double"); break;
20001 +           case TR_ATOMICADDWORD & TR_OPCODE_TYPE_MASK:        sprintf (typeString, "Atomic Add"); break;
20002 +           case TR_TESTANDWRITE & TR_OPCODE_TYPE_MASK:         sprintf (typeString, "Test and Write"); break;
20003 +           default:                                            sprintf (typeString, "Type=%d", Type & TR_OPCODE_TYPE_MASK); break;
20004 +           }
20005 +       }
20006 +       sprintf (buffer, "%15s Addr=%08x Cntx=%08x", typeString, Addr, Context);
20007 +       /*(Type & TR_SENDACK)      ? " Sendack" : "", */
20008 +       /*(Type & TR_LAST_TRANS)   ? " LastTrans" : "", */
20009 +       /*(Type & TR_WAIT_FOR_EOP) ? " WaitForEop" : ""); */
20010 +    }
20011 +    
20012 +    if (StatusValid)
20013 +    {
20014 +       sprintf (statusString, " Type=%s %x", MiToName (hdrp->s.IProcTrapStatus.s.TrapType), hdrp->s.IProcTrapStatus.Status);
20015 +       strcat (buffer, statusString);
20016 +
20017 +       if (hdrp->s.IProcTrapStatus.s.BadLength)
20018 +           strcat (buffer, " BadLength");
20019 +       switch (hdrp->s.IProcTrapStatus.Status & CRC_MASK)
20020 +       {
20021 +       case CRC_STATUS_DISCARD:
20022 +           strcat (buffer, " CRC Discard");
20023 +           break;
20024 +       case CRC_STATUS_ERROR:
20025 +           strcat (buffer, " CRC Error");
20026 +           break;
20027 +
20028 +       case CRC_STATUS_BAD:
20029 +           strcat (buffer, " CRC Bad");
20030 +           break;
20031 +       }
20032 +    }
20033 +
20034 +    return (buffer);
20035 +}
20036 +
20037 +
20038 +/*
20039 + * Local variables:
20040 + * c-file-style: "stroustrup"
20041 + * End:
20042 + */
20043 diff -urN clean/drivers/net/qsnet/elan3/Makefile linux-2.6.9/drivers/net/qsnet/elan3/Makefile
20044 --- clean/drivers/net/qsnet/elan3/Makefile      1969-12-31 19:00:00.000000000 -0500
20045 +++ linux-2.6.9/drivers/net/qsnet/elan3/Makefile        2005-10-10 17:47:30.000000000 -0400
20046 @@ -0,0 +1,15 @@
20047 +#
20048 +# Makefile for Quadrics QsNet
20049 +#
20050 +# Copyright (c) 2002-2004 Quadrics Ltd
20051 +#
20052 +# File: drivers/net/qsnet/elan3/Makefile
20053 +#
20054 +
20055 +
20056 +#
20057 +
20058 +obj-$(CONFIG_ELAN3)    += elan3.o
20059 +elan3-objs     := context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o
20060 +
20061 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
20062 diff -urN clean/drivers/net/qsnet/elan3/Makefile.conf linux-2.6.9/drivers/net/qsnet/elan3/Makefile.conf
20063 --- clean/drivers/net/qsnet/elan3/Makefile.conf 1969-12-31 19:00:00.000000000 -0500
20064 +++ linux-2.6.9/drivers/net/qsnet/elan3/Makefile.conf   2005-09-07 10:39:38.000000000 -0400
20065 @@ -0,0 +1,10 @@
20066 +# Flags for generating QsNet Linux Kernel Makefiles
20067 +MODNAME                =       elan3.o
20068 +MODULENAME     =       elan3
20069 +KOBJFILES      =       context.o cproc.o dproc.o elandebug.o elandev_generic.o elansyscall.o eventcookie.o iproc.o sdram.o minames.o network_error.o route_table.o tproc.o tprocinsts.o routecheck.o virtual_process.o elan3ops.o context_linux.o elandev_linux.o procfs_linux.o tproc_linux.o elan3mmu_generic.o elan3mmu_linux.o
20070 +EXPORT_KOBJS   =       elandev_linux.o procfs_linux.o
20071 +CONFIG_NAME    =       CONFIG_ELAN3
20072 +SGALFC         =       
20073 +# EXTRALINES START
20074 +
20075 +# EXTRALINES END
20076 diff -urN clean/drivers/net/qsnet/elan3/minames.c linux-2.6.9/drivers/net/qsnet/elan3/minames.c
20077 --- clean/drivers/net/qsnet/elan3/minames.c     1969-12-31 19:00:00.000000000 -0500
20078 +++ linux-2.6.9/drivers/net/qsnet/elan3/minames.c       2003-06-07 11:57:49.000000000 -0400
20079 @@ -0,0 +1,38 @@
20080 +/*
20081 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20082 + *
20083 + *    For licensing information please see the supplied COPYING file
20084 + *
20085 + */
20086 +
20087 +#ident "@(#)$Id: minames.c,v 1.12 2003/06/07 15:57:49 david Exp $"
20088 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/minames.c,v $*/
20089 +
20090 +#include <qsnet/kernel.h>
20091 +#include <elan3/urom_addrs.h>
20092 +
20093 +caddr_t
20094 +MiToName (int mi)
20095 +{
20096 +    static char space[32];
20097 +    static struct {
20098 +       int   mi;
20099 +       char *name;
20100 +    } info[] = {
20101 +#include <elan3/minames.h>
20102 +    };
20103 +    register int i;
20104 +
20105 +
20106 +    for (i = 0; i < sizeof(info)/sizeof(info[0]); i++)
20107 +       if (info[i].mi == mi)
20108 +           return (info[i].name);
20109 +    sprintf (space, "MI %x", mi);
20110 +    return (space);
20111 +}
20112 +
20113 +/*
20114 + * Local variables:
20115 + * c-file-style: "stroustrup"
20116 + * End:
20117 + */
20118 diff -urN clean/drivers/net/qsnet/elan3/network_error.c linux-2.6.9/drivers/net/qsnet/elan3/network_error.c
20119 --- clean/drivers/net/qsnet/elan3/network_error.c       1969-12-31 19:00:00.000000000 -0500
20120 +++ linux-2.6.9/drivers/net/qsnet/elan3/network_error.c 2004-10-28 07:51:00.000000000 -0400
20121 @@ -0,0 +1,777 @@
20122 +/*
20123 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20124 + *
20125 + *    For licensing information please see the supplied COPYING file
20126 + *
20127 + */
20128 +
20129 +#ident "@(#)$Id: network_error.c,v 1.33 2004/10/28 11:51:00 david Exp $"
20130 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/network_error.c,v $*/
20131 +
20132 +#include <qsnet/kernel.h>
20133 +#include <qsnet/kthread.h>
20134 +
20135 +#include <elan3/elanregs.h>
20136 +#include <elan3/elandev.h>
20137 +#include <elan3/elanvp.h>
20138 +#include <elan3/elan3mmu.h>
20139 +#include <elan3/elanctxt.h>
20140 +#include <elan3/elan3mmu.h>
20141 +#include <elan3/elandebug.h>
20142 +
20143 +#ifdef DIGITAL_UNIX
20144 +#include <sys/cred.h>
20145 +#include <sys/mbuf.h>
20146 +#include <sys/utsname.h>
20147 +#include <net/if.h>
20148 +#include <netinet/in.h>
20149 +#include <netinet/in_var.h>
20150 +
20151 +#include <rpc/types.h>
20152 +#include <rpc/auth.h>
20153 +#include <rpc/xdr.h>
20154 +#include <rpc/clnt.h>
20155 +
20156 +typedef xdrproc_t kxdrproc_t;
20157 +#endif
20158 +
20159 +#ifdef LINUX
20160 +#include <linux/sunrpc/types.h>
20161 +#include <linux/sunrpc/auth.h>
20162 +#include <linux/sunrpc/xdr.h>
20163 +#include <linux/sunrpc/clnt.h>
20164 +
20165 +#include <linux/utsname.h>
20166 +#define SYS_NMLN       __NEW_UTS_LEN
20167 +#endif
20168 +
20169 +#include <elan3/neterr_rpc.h>
20170 +
20171 +spinlock_t       ResolveRequestLock;
20172 +kcondvar_t       ResolveRequestWait;
20173 +
20174 +NETERR_RESOLVER  *ResolveRequestHead;
20175 +NETERR_RESOLVER **ResolveRequestTailp = &ResolveRequestHead;
20176 +int              ResolveRequestCount;
20177 +int              ResolveRequestThreads;
20178 +int              ResolveRequestMaxThreads = 4;
20179 +int              ResolveRequestTimeout = 60;
20180 +
20181 +typedef struct neterr_server
20182 +{
20183 +    struct neterr_server *Next;
20184 +    struct neterr_server *Prev;
20185 +    unsigned             ElanId;
20186 +
20187 +    char                *Name;
20188 +    int                          RefCount;
20189 +    struct sockaddr_in    Addr;
20190 +} NETERR_SERVER;
20191 +
20192 +#define NETERR_HASH_ENTRIES    64
20193 +#define NETERR_HASH(elanid)    (((unsigned) elanid) % NETERR_HASH_ENTRIES)
20194 +NETERR_SERVER *NeterrServerHash[NETERR_HASH_ENTRIES];
20195 +kmutex_t       NeterrServerLock;
20196 +
20197 +static NETERR_SERVER *FindNeterrServer (int elanId);
20198 +static void           DereferenceNeterrServer (NETERR_SERVER *server);
20199 +static int            CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg);
20200 +
20201 +void
20202 +InitialiseNetworkErrorResolver ()
20203 +{
20204 +    spin_lock_init (&ResolveRequestLock);
20205 +    kcondvar_init (&ResolveRequestWait);
20206 +    
20207 +    ResolveRequestHead  = NULL;
20208 +    ResolveRequestTailp = &ResolveRequestHead;
20209 +
20210 +    kmutex_init (&NeterrServerLock);
20211 +}
20212 +
20213 +void
20214 +FinaliseNetworkErrorResolver ()
20215 +{
20216 +    spin_lock_destroy (&ResolveRequestLock);
20217 +    kcondvar_destroy (&ResolveRequestWait);
20218 +    
20219 +    kmutex_destroy (&NeterrServerLock);
20220 +}
20221 +
20222 +static NETERR_RESOLVER *
20223 +AllocateNetworkErrorResolver (void)
20224 +{
20225 +    NETERR_RESOLVER *rvp;
20226 +
20227 +    KMEM_ZALLOC (rvp, NETERR_RESOLVER *, sizeof (NETERR_RESOLVER), TRUE);
20228 +    spin_lock_init (&rvp->Lock);
20229 +
20230 +    return (rvp);
20231 +}
20232 +
20233 +void
20234 +FreeNetworkErrorResolver (NETERR_RESOLVER *rvp)
20235 +{
20236 +    spin_lock_destroy (&rvp->Lock);
20237 +    KMEM_FREE (rvp, sizeof (NETERR_RESOLVER));
20238 +}
20239 +
20240 +static void
20241 +elan3_neterr_resolver (void)
20242 +{
20243 +    NETERR_RESOLVER *rvp;
20244 +    NETERR_SERVER   *server;
20245 +    int                     status;
20246 +    unsigned long    flags;
20247 +
20248 +    kernel_thread_init("elan3_neterr_resolver");
20249 +    spin_lock (&ResolveRequestLock);
20250 +
20251 +    while ((rvp = ResolveRequestHead) != NULL)
20252 +    {
20253 +       if ((ResolveRequestHead = rvp->Next) == NULL)
20254 +           ResolveRequestTailp = &ResolveRequestHead;
20255 +       
20256 +       spin_unlock (&ResolveRequestLock);
20257 +
20258 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: rvp = %p\n", rvp);
20259 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      Rail          %d\n", rvp->Message.Rail);
20260 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      SrcCapability %s\n", CapabilityString (&rvp->Message.SrcCapability));
20261 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      DstCapability %s\n", CapabilityString (&rvp->Message.DstCapability));
20262 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieAddr    %08x\n", rvp->Message.CookieAddr);
20263 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieVProc   %08x\n", rvp->Message.CookieVProc);
20264 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      NextCookie    %08x\n", rvp->Message.NextCookie);
20265 +       PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      WaitForEop    %08x\n", rvp->Message.WaitForEop);
20266 +       
20267 +       if ((server = FindNeterrServer (rvp->Location.loc_node)) == NULL)
20268 +           status = ECONNREFUSED;
20269 +       else if (ResolveRequestTimeout && ((int)(lbolt - rvp->Timestamp)) > (ResolveRequestTimeout*HZ))
20270 +       {
20271 +           printk ("elan_neterr: rpc to '%s' timedout - context %d killed\n", server->Name, rvp->Message.SrcCapability.cap_mycontext);
20272 +           status = ECONNABORTED;
20273 +       }
20274 +       else
20275 +       {
20276 +           status = CallNeterrServer (server, &rvp->Message);
20277 +
20278 +           DereferenceNeterrServer (server);
20279 +       }
20280 +       
20281 +       if ((status == EINTR || status == ETIMEDOUT) && rvp->Ctxt != NULL)
20282 +       {
20283 +           PRINTF1 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: retry rvp=%p\n", rvp);
20284 +           spin_lock (&ResolveRequestLock);
20285 +           rvp->Next = NULL;
20286 +           *ResolveRequestTailp = rvp;
20287 +           ResolveRequestTailp = &rvp->Next;
20288 +       }
20289 +       else
20290 +       {
20291 +           rvp->Status = status;
20292 +           
20293 +           spin_lock (&rvp->Lock);
20294 +           
20295 +           if (rvp->Ctxt != NULL)
20296 +           {
20297 +               PRINTF2 (rvp->Ctxt, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for ctxt %p\n", rvp, rvp->Ctxt);
20298 +               spin_lock_irqsave (&rvp->Ctxt->Device->IntrLock, flags);
20299 +               
20300 +               rvp->Completed = TRUE;
20301 +               
20302 +               kcondvar_wakeupall (&rvp->Ctxt->Wait, &rvp->Ctxt->Device->IntrLock);
20303 +               
20304 +               /*
20305 +                * drop the locks out of order since the rvp can get freeed
20306 +                * as soon as we drop the IntrLock - so cannot reference the
20307 +                * rvp after this.
20308 +                */
20309 +               
20310 +               spin_unlock (&rvp->Lock);
20311 +               spin_unlock_irqrestore (&rvp->Ctxt->Device->IntrLock, flags);
20312 +           }
20313 +           else
20314 +           {
20315 +               PRINTF2 (DBG_DEVICE, DBG_NETERR, "elan3_neterr_resolver: completing rvp %p for deceased ctxt %p\n", rvp, rvp->Ctxt);
20316 +               spin_unlock (&rvp->Lock);
20317 +               FreeNetworkErrorResolver (rvp);
20318 +           }
20319 +           
20320 +           spin_lock (&ResolveRequestLock);
20321 +           ResolveRequestCount--;
20322 +       }
20323 +    }
20324 +
20325 +    ResolveRequestThreads--;
20326 +
20327 +    spin_unlock (&ResolveRequestLock);
20328 +    kernel_thread_exit();
20329 +}
20330 +
20331 +int
20332 +QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp)
20333 +{
20334 +    int                           isdma   = trap->DmaIdentifyTransaction != NULL;
20335 +    E3_IprocTrapHeader_BE *hdrp    = isdma ? trap->DmaIdentifyTransaction : trap->ThreadIdentifyTransaction;
20336 +    E3_uint32              process = isdma ? (hdrp->s.TrAddr & 0xFFFF) : (hdrp->s.TrData0 & 0xFFFF);
20337 +    NETERR_RESOLVER       *rvp;
20338 +
20339 +    PRINTF2 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: process = %d %s\n", process, isdma ? "(dma)" : "(thread)");
20340 +
20341 +    if ((rvp = AllocateNetworkErrorResolver()) == NULL)
20342 +    {
20343 +       PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot allocate resolver\n");
20344 +       return (ENOMEM);
20345 +    }
20346 +
20347 +    rvp->Message.Rail = ctxt->Device->Devinfo.dev_rail;
20348 +
20349 +    krwlock_read (&ctxt->VpLock);
20350 +    rvp->Location = ProcessToLocation (ctxt, NULL, process, &rvp->Message.SrcCapability);
20351 +    krwlock_done (&ctxt->VpLock);
20352 +
20353 +    if (rvp->Location.loc_node == ELAN3_INVALID_NODE)
20354 +    {
20355 +       PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: invalid elan id\n");
20356 +
20357 +       FreeNetworkErrorResolver (rvp);
20358 +       return (EINVAL);
20359 +    }
20360 +
20361 +    rvp->Message.DstCapability = ctxt->Capability;
20362 +    rvp->Message.DstProcess    = elan3_process (ctxt);
20363 +    rvp->Message.WaitForEop    = (trap->WaitForEopTransaction != NULL);
20364 +
20365 +    if (isdma)
20366 +    {
20367 +       rvp->Message.CookieAddr  = 0;
20368 +       rvp->Message.CookieVProc = hdrp->s.TrAddr;
20369 +       rvp->Message.NextCookie  = 0;
20370 +    }
20371 +    else
20372 +    {
20373 +       rvp->Message.CookieAddr  = hdrp->s.TrAddr;
20374 +       rvp->Message.CookieVProc = hdrp->s.TrData0;
20375 +       rvp->Message.NextCookie  = hdrp->s.TrData1;
20376 +    }
20377 +
20378 +    rvp->Completed = FALSE;
20379 +    rvp->Ctxt      = ctxt;
20380 +    rvp->Timestamp = lbolt;
20381 +
20382 +    spin_lock (&ResolveRequestLock);
20383 +
20384 +    rvp->Next = NULL;
20385 +    *ResolveRequestTailp = rvp;
20386 +    ResolveRequestTailp = &rvp->Next;
20387 +    ResolveRequestCount++;
20388 +
20389 +    kcondvar_wakeupone (&ResolveRequestWait, &ResolveRequestLock);
20390 +
20391 +    if (ResolveRequestCount < ResolveRequestThreads || ResolveRequestThreads >= ResolveRequestMaxThreads)
20392 +       spin_unlock (&ResolveRequestLock);
20393 +    else
20394 +    {
20395 +       ResolveRequestThreads++;
20396 +
20397 +       spin_unlock (&ResolveRequestLock);
20398 +       if (kernel_thread_create (elan3_neterr_resolver, NULL) == NULL)
20399 +       {
20400 +           spin_lock (&ResolveRequestLock);
20401 +           ResolveRequestThreads--;
20402 +           spin_unlock (&ResolveRequestLock);
20403 +           
20404 +           if (ResolveRequestThreads == 0)
20405 +           {
20406 +               PRINTF0 (ctxt, DBG_NETERR, "QueueNetworkErrorResolver: cannot thread pool\n");
20407 +
20408 +               FreeNetworkErrorResolver (rvp);
20409 +               return (ENOMEM);
20410 +           }
20411 +       }
20412 +    }
20413 +
20414 +    *rvpp = rvp;
20415 +    return (ESUCCESS);
20416 +}
20417 +
20418 +void
20419 +CancelNetworkErrorResolver (NETERR_RESOLVER *rvp)
20420 +{
20421 +    spin_lock (&rvp->Lock);
20422 +
20423 +    PRINTF2 (rvp->Ctxt, DBG_NETERR, "CancelNetworkErrorResolver: rvp=%p %s\n", rvp, rvp->Completed ? "Completed" : "Pending");
20424 +
20425 +    if (rvp->Completed)
20426 +    {
20427 +       spin_unlock (&rvp->Lock);
20428 +       FreeNetworkErrorResolver (rvp);
20429 +    }
20430 +    else
20431 +    {
20432 +       rvp->Ctxt = NULL;
20433 +       spin_unlock (&rvp->Lock);
20434 +    }
20435 +}
20436 +
20437 +static NETERR_FIXUP *
20438 +AllocateNetworkErrorFixup (void)
20439 +{
20440 +    NETERR_FIXUP *nef;
20441 +
20442 +    KMEM_ZALLOC (nef, NETERR_FIXUP *, sizeof (NETERR_FIXUP), TRUE);
20443 +
20444 +    if (nef == (NETERR_FIXUP *) NULL)
20445 +       return (NULL);
20446 +
20447 +    kcondvar_init (&nef->Wait);
20448 +
20449 +    return (nef);
20450 +}
20451 +
20452 +static void
20453 +FreeNetworkErrorFixup (NETERR_FIXUP *nef)
20454 +{
20455 +    kcondvar_destroy (&nef->Wait);
20456 +    KMEM_FREE (nef, sizeof (NETERR_FIXUP));
20457 +}
20458 +
20459 +int
20460 +ExecuteNetworkErrorFixup (NETERR_MSG *msg)
20461 +{
20462 +    ELAN3_DEV      *dev;
20463 +    ELAN3_CTXT   *ctxt;
20464 +    NETERR_FIXUP  *nef;
20465 +    NETERR_FIXUP **predp;
20466 +    int                   rc;
20467 +    unsigned long  flags;
20468 +
20469 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "ExecuteNetworkErrorFixup: msg = %p\n", msg);
20470 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      Rail          %d\n", msg->Rail);
20471 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      SrcCapability %s\n", CapabilityString (&msg->SrcCapability));
20472 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      DstCapability %s\n", CapabilityString (&msg->DstCapability));
20473 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieAddr    %08x\n", msg->CookieAddr);
20474 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      CookieVProc   %08x\n", msg->CookieVProc);
20475 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      NextCookie    %08x\n", msg->NextCookie);
20476 +    PRINTF1 (DBG_DEVICE, DBG_NETERR, "                      WaitForEop    %08x\n", msg->WaitForEop);
20477 +       
20478 +    if ((dev = elan3_device (msg->Rail)) == NULL)
20479 +       return (ESRCH);
20480 +
20481 +    if ((nef = AllocateNetworkErrorFixup()) == NULL)
20482 +       return (ENOMEM);
20483 +
20484 +    if (nef == (NETERR_FIXUP *) NULL)
20485 +       return (ENOMEM);
20486 +    
20487 +    bcopy (msg, &nef->Message, sizeof (NETERR_MSG));
20488 +
20489 +    spin_lock_irqsave (&dev->IntrLock, flags);
20490 +    
20491 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, msg->SrcCapability.cap_mycontext);
20492 +
20493 +    if (ctxt == NULL)
20494 +       rc = ESRCH;
20495 +    else if (!ELAN_CAP_MATCH (&msg->SrcCapability, &ctxt->Capability))
20496 +       rc = EPERM;
20497 +    else
20498 +    {  
20499 +       if (ctxt->Status & CTXT_NO_LWPS)
20500 +           rc = EAGAIN;
20501 +       else
20502 +       {
20503 +           for (predp = &ctxt->NetworkErrorFixups; *predp != NULL; predp = &(*predp)->Next)
20504 +               ;
20505 +           nef->Next = NULL;
20506 +           *predp = nef;
20507 +           
20508 +           kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
20509 +
20510 +           while (! nef->Completed)
20511 +               kcondvar_wait (&nef->Wait, &dev->IntrLock, &flags);
20512 +
20513 +           rc = nef->Status;
20514 +       }
20515 +    }
20516 +    
20517 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
20518 +
20519 +    FreeNetworkErrorFixup (nef);
20520 +
20521 +    return (rc);
20522 +}
20523 +
20524 +void
20525 +CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status)
20526 +{
20527 +    ELAN3_DEV *dev = ctxt->Device;
20528 +    unsigned long flags;
20529 +
20530 +    PRINTF2 (ctxt, DBG_NETERR, "CompleteNetworkErrorFixup: %p %d\n", nef, status);
20531 +
20532 +    spin_lock_irqsave (&dev->IntrLock, flags);
20533 +
20534 +    nef->Status = status;
20535 +    nef->Completed = TRUE;
20536 +    kcondvar_wakeupone (&nef->Wait, &dev->IntrLock);
20537 +
20538 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
20539 +}
20540 +
20541 +
20542 +static NETERR_SERVER *
20543 +NewNeterrServer (int elanId, struct sockaddr_in *addr, char *name)
20544 +{
20545 +    NETERR_SERVER *server;
20546 +
20547 +    KMEM_ZALLOC (server, NETERR_SERVER *, sizeof (NETERR_SERVER), TRUE);
20548 +    KMEM_ALLOC  (server->Name, char *, strlen (name)+1, TRUE);
20549 +
20550 +    bcopy (addr, &server->Addr, sizeof (struct sockaddr_in));
20551 +    bcopy (name, server->Name, strlen (name)+1);
20552 +
20553 +    server->ElanId   = elanId;
20554 +    server->RefCount = 1;
20555 +    
20556 +    return (server);
20557 +}
20558 +
20559 +static void
20560 +DeleteNeterrServer (NETERR_SERVER *server)
20561 +{
20562 +    KMEM_FREE (server->Name, strlen(server->Name)+1);
20563 +    KMEM_FREE (server, sizeof (NETERR_SERVER));
20564 +}
20565 +
20566 +static NETERR_SERVER *
20567 +FindNeterrServer (int elanId)
20568 +{
20569 +    NETERR_SERVER *server;
20570 +    
20571 +    kmutex_lock (&NeterrServerLock);
20572 +    
20573 +    for (server = NeterrServerHash[NETERR_HASH(elanId)]; server != NULL; server = server->Next)
20574 +       if (server->ElanId == elanId)
20575 +           break;
20576 +
20577 +    if (server != NULL)
20578 +       server->RefCount++;
20579 +    kmutex_unlock (&NeterrServerLock);
20580 +
20581 +    return (server);
20582 +}
20583 +
20584 +static void
20585 +DereferenceNeterrServer (NETERR_SERVER *server)
20586 +{
20587 +    kmutex_lock (&NeterrServerLock);
20588 +    if ((--server->RefCount) == 0)
20589 +       DeleteNeterrServer (server);
20590 +    kmutex_unlock  (&NeterrServerLock);
20591 +}
20592 +
20593 +int
20594 +AddNeterrServer (int elanId, struct sockaddr_in *addr, char *name)
20595 +{
20596 +    NETERR_SERVER *server;
20597 +    NETERR_SERVER *old;
20598 +    int            hashval = NETERR_HASH(elanId);
20599 +
20600 +    server = NewNeterrServer (elanId, addr, name);
20601 +    
20602 +    if (server == NULL)
20603 +       return (ENOMEM);
20604 +    
20605 +    kmutex_lock (&NeterrServerLock);
20606 +    for (old = NeterrServerHash[hashval]; old != NULL; old = old->Next)
20607 +       if (old->ElanId == elanId)
20608 +           break;
20609 +    
20610 +    /* remove "old" server from hash table */
20611 +    if (old != NULL)
20612 +    {
20613 +       if (old->Prev)
20614 +           old->Prev->Next = old->Next;
20615 +       else
20616 +           NeterrServerHash[hashval] = old->Next;
20617 +       if (old->Next)
20618 +           old->Next->Prev = old->Prev;
20619 +    }
20620 +
20621 +    /* insert "new" server into hash table */
20622 +    if ((server->Next = NeterrServerHash[hashval]) != NULL)
20623 +       server->Next->Prev = server;
20624 +    server->Prev = NULL;
20625 +    NeterrServerHash[hashval] = server;
20626 +
20627 +    kmutex_unlock (&NeterrServerLock);
20628 +
20629 +    if (old != NULL)
20630 +       DereferenceNeterrServer (old);
20631 +    
20632 +    return (ESUCCESS);
20633 +}
20634 +
20635 +int
20636 +AddNeterrServerSyscall (int elanId, void *addrp, void *namep, char *unused)
20637 +{
20638 +    struct sockaddr_in addr;
20639 +    char              *name;
20640 +    int                error;
20641 +    int                nob;
20642 +
20643 +    /* Sanity check the supplied elanId argument */
20644 +    if (elanId < 0)
20645 +       return ( set_errno(EINVAL) );
20646 +
20647 +    KMEM_ALLOC (name, caddr_t, SYS_NMLN, TRUE);
20648 +    
20649 +    if (copyin ((caddr_t) addrp, (caddr_t) &addr, sizeof (addr)) ||
20650 +       copyinstr ((caddr_t) namep, name, SYS_NMLN, &nob))
20651 +    {
20652 +       error = EFAULT;
20653 +    }
20654 +    else
20655 +    {
20656 +       PRINTF2 (DBG_DEVICE, DBG_NETERR, "AddNeterrServer: '%s' at elanid %d\n", name, elanId);
20657 +
20658 +       error = AddNeterrServer (elanId, &addr, name);
20659 +    }
20660 +    KMEM_FREE (name, SYS_NMLN);
20661 +
20662 +    return (error ? set_errno(error) : ESUCCESS);
20663 +}
20664 +
20665 +
20666 +#if defined(DIGITAL_UNIX)
20667 +static int
20668 +CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg)
20669 +{
20670 +    cred_t        *cr = crget();
20671 +    struct rpc_err  rpcerr;
20672 +    extern cred_t  *kcred;
20673 +    struct timeval  wait;
20674 +    enum clnt_stat  rc;
20675 +    int                    status;
20676 +    CLIENT         *clnt;
20677 +    int             error;
20678 +
20679 +    PRINTF4 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) - family=%d port=%d addr=%08x\n", server->Name,
20680 +            server->Addr.sin_family, server->Addr.sin_port, server->Addr.sin_addr.s_addr);
20681 +
20682 +    if ((clnt = clntkudp_create (&server->Addr, (struct sockaddr_in *)0, NETERR_PROGRAM, NETERR_VERSION, 1, cr)) == NULL)
20683 +    {
20684 +       PRINTF1 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): clntkudp_create error\n", server->Name);
20685 +
20686 +       return (ENOMEM);
20687 +    }
20688 +    
20689 +    wait.tv_sec  = NETERR_RPC_TIMEOUT;
20690 +    wait.tv_usec = 0;
20691 +    
20692 +    PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL timeout = %d\n", server->Name, NETERR_RPC_TIMEOUT);
20693 +   
20694 +    rc = CLNT_CALL(clnt, NETERR_FIXUP_RPC, xdr_neterr_msg, (void *)msg, xdr_int, (void *) &status, wait);
20695 +
20696 +    PRINTF3 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): CLNT_CALL -> %d (%s)\n", server->Name, rc, clnt_sperrno(rc));;
20697 +
20698 +    switch (rc)
20699 +    {
20700 +    case RPC_SUCCESS:
20701 +       break;
20702 +
20703 +    case RPC_INTR:
20704 +       status = EINTR;
20705 +       break;
20706 +
20707 +    case RPC_TIMEDOUT:
20708 +       status = ETIMEDOUT;
20709 +       break;
20710 +
20711 +    default:
20712 +       printf ("CallNeterrServer(%s): %s\n", server->Name, clnt_sperrno(status));
20713 +       status = ENOENT;
20714 +       break;
20715 +    }
20716 +
20717 +    CLNT_DESTROY(clnt);
20718 +
20719 +    crfree(cr);
20720 +    
20721 +    ASSERT(rc == RPC_SUCCESS || status != 0);
20722 +
20723 +    PRINTF2 (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): status=%d\n", server->Name, status);
20724 +
20725 +    return (status);
20726 +}
20727 +#endif
20728 +
20729 +#if defined(LINUX)
20730 +
20731 +#define xdrsize(type) ((sizeof(type) + 3) >> 2)
20732 +
20733 +static int
20734 +xdr_error(struct rpc_rqst *req, u32 *p, void *dummy)
20735 +{
20736 +    return -EIO;
20737 +}
20738 +
20739 +static int
20740 +xdr_decode_int(struct rpc_rqst *req, u32 *p, int *res)
20741 +{ 
20742 +    *res = ntohl(*p++);
20743 +    return 0;
20744 +}
20745 +
20746 +#define XDR_capability_sz ((12 + BT_BITOUL(ELAN3_MAX_VPS)) * sizeof (u32))
20747 +
20748 +static int
20749 +xdr_encode_capability(u32 *p, ELAN_CAPABILITY *cap)
20750 +{
20751 +    u32 *pp = p;
20752 +
20753 +    /* basic xdr unit is u32 - for opaque types we must round up to that */
20754 +    memcpy(p, &cap->cap_userkey, sizeof(cap->cap_userkey));
20755 +    p += xdrsize(cap->cap_userkey);
20756 +
20757 +    *p++ = htonl(cap->cap_version);
20758 +    ((u16 *) (p++))[1] = htons(cap->cap_type);
20759 +    *p++ = htonl(cap->cap_lowcontext);
20760 +    *p++ = htonl(cap->cap_highcontext);
20761 +    *p++ = htonl(cap->cap_mycontext);
20762 +    *p++ = htonl(cap->cap_lownode);
20763 +    *p++ = htonl(cap->cap_highnode);
20764 +    *p++ = htonl(cap->cap_railmask);
20765 +
20766 +    memcpy(p, &cap->cap_bitmap[0], sizeof(cap->cap_bitmap));
20767 +    p += xdrsize(cap->cap_bitmap);
20768 +
20769 +    ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_capability_sz);
20770 +
20771 +    return (p - pp);
20772 +}
20773 +
20774 +
20775 +#define XDR_neterr_sz  (((1 + 5) * sizeof (u32)) + (2*XDR_capability_sz))
20776 +
20777 +static int
20778 +xdr_encode_neterr_msg(struct rpc_rqst *req, u32 *p, NETERR_MSG *msg)
20779 +{
20780 +    u32 *pp = p;
20781 +
20782 +    *p++ = htonl(msg->Rail);
20783 +
20784 +    p += xdr_encode_capability(p, &msg->SrcCapability);
20785 +    p += xdr_encode_capability(p, &msg->DstCapability);
20786 +
20787 +    *p++ = htonl(msg->DstProcess);
20788 +    *p++ = htonl(msg->CookieAddr);
20789 +    *p++ = htonl(msg->CookieVProc);
20790 +    *p++ = htonl(msg->NextCookie);
20791 +    *p++ = htonl(msg->WaitForEop);
20792 +
20793 +    ASSERT (((unsigned long) p - (unsigned long) pp) == XDR_neterr_sz);
20794 +
20795 +    req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
20796 +
20797 +    return 0;
20798 +}
20799 +
20800 +static struct rpc_procinfo neterr_procedures[2] = 
20801 +{
20802 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
20803 +#      define RPC_ID_NULL      "neterr_null"
20804 +#      define RPC_ID_FIXUP_RPC "neterr_fixup_rpc"
20805 +#else
20806 +#      define RPC_ID_NULL      NETERR_NULL_RPC
20807 +#      define RPC_ID_FIXUP_RPC NETERR_FIXUP_RPC
20808 +#endif
20809 +    {  
20810 +       RPC_ID_NULL,                    /* procedure name or number*/
20811 +       (kxdrproc_t) xdr_error,         /* xdr encode fun */
20812 +        (kxdrproc_t) xdr_error,        /* xdr decode fun */
20813 +       0,                              /* req buffer size */
20814 +       0,                              /* call count */
20815 +    },
20816 +    {  
20817 +       RPC_ID_FIXUP_RPC,
20818 +        (kxdrproc_t) xdr_encode_neterr_msg,
20819 +        (kxdrproc_t) xdr_decode_int,
20820 +       XDR_neterr_sz,
20821 +       0,                      
20822 +    },
20823 +};
20824 +
20825 +static struct rpc_version neterr_version1 = 
20826 +{
20827 +    1,                         /* version */
20828 +    2,                         /* number of procedures */
20829 +    neterr_procedures  /* procedures */
20830 +};
20831 +
20832 +static struct rpc_version *neterr_version[] = 
20833 +{
20834 +    NULL,
20835 +    &neterr_version1,
20836 +};
20837 +
20838 +static struct rpc_stat neterr_stats;
20839 +
20840 +static struct rpc_program neterr_program = 
20841 +{
20842 +    NETERR_SERVICE,
20843 +    NETERR_PROGRAM,
20844 +    sizeof(neterr_version)/sizeof(neterr_version[0]),
20845 +    neterr_version,
20846 +    &neterr_stats,
20847 +};
20848 +
20849 +static int
20850 +CallNeterrServer (NETERR_SERVER *server, NETERR_MSG *msg)
20851 +{
20852 +    struct rpc_xprt   *xprt;
20853 +    struct rpc_clnt   *clnt;
20854 +    struct rpc_timeout to;
20855 +    int                rc, status;
20856 +    
20857 +    PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s)\n", server->Name);
20858 +
20859 +    xprt_set_timeout(&to, 1, NETERR_RPC_TIMEOUT * HZ);
20860 +
20861 +    if ((xprt = xprt_create_proto(IPPROTO_UDP, &server->Addr, &to)) == NULL)
20862 +    {
20863 +       PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) xprt_create_proto failed\n", server->Name);
20864 +       return EFAIL;
20865 +    }
20866 +
20867 +    if ((clnt = rpc_create_client(xprt, server->Name, &neterr_program, NETERR_VERSION, RPC_AUTH_NULL)) == NULL)
20868 +    {
20869 +       PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s) rpc_create_client failed\n", server->Name);
20870 +       xprt_destroy (xprt);
20871 +       
20872 +       return EFAIL;
20873 +    }
20874 +
20875 +    clnt->cl_softrtry = 1;
20876 +    clnt->cl_chatty   = 0;
20877 +    clnt->cl_oneshot  = 1;
20878 +    clnt->cl_intr     = 0;
20879 +
20880 +    if ((rc = rpc_call(clnt, NETERR_FIXUP_RPC, msg, &status, 0)) < 0)
20881 +    {
20882 +       /* RPC error has occured - determine whether we should retry */
20883 +
20884 +       status = ETIMEDOUT;
20885 +    }
20886 +
20887 +    PRINTF (DBG_DEVICE, DBG_NETRPC, "CallNeterrServer(%s): -> %d\n", server->Name, status);
20888 +
20889 +    return (status);
20890 +}
20891 +
20892 +#endif /* defined(LINUX) */
20893 +
20894 +/*
20895 + * Local variables:
20896 + * c-file-style: "stroustrup"
20897 + * End:
20898 + */
20899 diff -urN clean/drivers/net/qsnet/elan3/procfs_linux.c linux-2.6.9/drivers/net/qsnet/elan3/procfs_linux.c
20900 --- clean/drivers/net/qsnet/elan3/procfs_linux.c        1969-12-31 19:00:00.000000000 -0500
20901 +++ linux-2.6.9/drivers/net/qsnet/elan3/procfs_linux.c  2005-09-07 10:35:03.000000000 -0400
20902 @@ -0,0 +1,195 @@
20903 +/*
20904 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
20905 + *
20906 + *    For licensing information please see the supplied COPYING file
20907 + *
20908 + */
20909 +
20910 +#ident "@(#)$Id: procfs_linux.c,v 1.21.8.2 2005/09/07 14:35:03 mike Exp $"
20911 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/procfs_linux.c,v $*/
20912 +
20913 +#include <qsnet/kernel.h>
20914 +
20915 +#include <elan3/elanregs.h>
20916 +#include <elan3/elandev.h>
20917 +#include <elan3/elandebug.h>
20918 +#include <elan3/elan3mmu.h>
20919 +#include <elan3/elanvp.h>
20920 +
20921 +#include <qsnet/module.h>
20922 +#include <linux/ctype.h>
20923 +
20924 +#include <qsnet/procfs_linux.h>
20925 +
20926 +struct proc_dir_entry *elan3_procfs_root;
20927 +struct proc_dir_entry *elan3_config_root;
20928 +
20929 +static int
20930 +proc_read_position (char *page, char **start, off_t off,
20931 +                   int count, int *eof, void *data)
20932 +{
20933 +    ELAN3_DEV *dev = (ELAN3_DEV *) data;
20934 +    int       len;
20935 +
20936 +    if (dev->Position.pos_mode == ELAN_POS_UNKNOWN)
20937 +       len = sprintf (page, "<unknown>\n");
20938 +    else
20939 +       len = sprintf (page, 
20940 +                      "NodeId                 %d\n"
20941 +                      "NumLevels              %d\n"
20942 +                      "NumNodes               %d\n",
20943 +                      dev->Position.pos_nodeid, dev->Position.pos_levels, dev->Position.pos_nodes);
20944 +
20945 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
20946 +}
20947 +
20948 +static int
20949 +proc_write_position (struct file *file, const char *buf, unsigned long count, void *data)
20950 +{
20951 +    ELAN3_DEV *dev      = (ELAN3_DEV *) data;
20952 +    unsigned  nodeid   = ELAN3_INVALID_NODE;
20953 +    unsigned  numnodes = 0;
20954 +    char     *page, *p;
20955 +    int       res;
20956 +
20957 +    if (count == 0)
20958 +       return (0);
20959 +
20960 +    if (count >= PAGE_SIZE)
20961 +       return (-EINVAL);
20962 +
20963 +    if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
20964 +       return (-ENOMEM);
20965 +
20966 +    MOD_INC_USE_COUNT;
20967 +
20968 +    if (copy_from_user (page, buf, count))
20969 +       res = -EFAULT;
20970 +    else
20971 +    {
20972 +       page[count] = '\0';
20973 +       
20974 +       if (page[count-1] == '\n')
20975 +           page[count-1] = '\0';
20976 +
20977 +       if (! strcmp (page, "<unknown>"))
20978 +       {
20979 +           dev->Position.pos_mode      = ELAN_POS_UNKNOWN;
20980 +           dev->Position.pos_nodeid    = ELAN3_INVALID_NODE;
20981 +           dev->Position.pos_nodes     = 0;
20982 +           dev->Position.pos_levels    = 0;
20983 +       }
20984 +       else
20985 +       {
20986 +           for (p = page; *p; )
20987 +           {
20988 +               while (isspace (*p))
20989 +                   p++;
20990 +               
20991 +               if (! strncmp (p, "NodeId=", strlen("NodeId=")))
20992 +                   nodeid   = simple_strtoul (p + strlen ("NodeId="), NULL, 0);
20993 +               if (! strncmp (p, "NumNodes=", strlen ("NumNodes=")))
20994 +                   numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0);
20995 +               
20996 +               while (*p && !isspace(*p))
20997 +                   p++;
20998 +           }
20999 +
21000 +           if (ComputePosition (&dev->Position, nodeid, numnodes, dev->Devinfo.dev_num_down_links_value) != 0)
21001 +               printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->Instance, nodeid, numnodes);
21002 +           else
21003 +               printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->Instance, dev->Position.pos_nodeid,
21004 +                       dev->Position.pos_nodes, dev->Position.pos_levels);
21005 +       }
21006 +    }
21007 +
21008 +    MOD_DEC_USE_COUNT;
21009 +    free_page ((unsigned long) page);
21010 +
21011 +    return (count);
21012 +}
21013 +
21014 +
21015 +void
21016 +elan3_procfs_device_init (ELAN3_DEV *dev)
21017 +{
21018 +    struct proc_dir_entry *dir, *p;
21019 +    char name[NAME_MAX];
21020 +
21021 +    sprintf (name, "device%d", dev->Instance);
21022 +    dir = dev->Osdep.procdir = proc_mkdir (name, elan3_procfs_root);
21023 +
21024 +    if ((p = create_proc_entry ("position", 0, dir)) != NULL)
21025 +    {
21026 +       p->read_proc  = proc_read_position;
21027 +       p->write_proc = proc_write_position;
21028 +       p->data       = dev;
21029 +       p->owner      = THIS_MODULE;
21030 +    }
21031 +
21032 +}
21033 +
21034 +void
21035 +elan3_procfs_device_fini (ELAN3_DEV *dev)
21036 +{
21037 +    struct proc_dir_entry *dir = dev->Osdep.procdir;
21038 +    char name[NAME_MAX];
21039 +
21040 +    remove_proc_entry ("position", dir);
21041 +
21042 +    sprintf (name, "device%d", dev->Instance);
21043 +    remove_proc_entry (name, elan3_procfs_root);
21044 +}
21045 +
21046 +void
21047 +elan3_procfs_init()
21048 +{
21049 +    extern int eventint_punt_loops;
21050 +    extern int ResolveRequestTimeout;
21051 +
21052 +    elan3_procfs_root = proc_mkdir("elan3",  qsnet_procfs_root);
21053 +
21054 +    elan3_config_root = proc_mkdir("config", elan3_procfs_root);
21055 +
21056 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug",           &elan3_debug,           0);
21057 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug_console",   &elan3_debug_console,   0);
21058 +    qsnet_proc_register_hex (elan3_config_root, "elan3_debug_buffer",    &elan3_debug_buffer,    0);
21059 +    qsnet_proc_register_hex (elan3_config_root, "elan3mmu_debug",      &elan3mmu_debug,      0);
21060 +    qsnet_proc_register_int (elan3_config_root, "eventint_punt_loops", &eventint_punt_loops, 0);
21061 +    qsnet_proc_register_int (elan3_config_root, "neterr_timeout",      &ResolveRequestTimeout, 0);
21062 +
21063 +#if defined(__ia64__)
21064 +    {
21065 +       extern int enable_sdram_writecombining;
21066 +       qsnet_proc_register_int (elan3_config_root, "enable_sdram_writecombining", &enable_sdram_writecombining, 0);
21067 +    }
21068 +#endif
21069 +}
21070 +
21071 +void
21072 +elan3_procfs_fini()
21073 +{
21074 +#if defined(__ia64__)
21075 +    remove_proc_entry ("enable_sdram_writecombining", elan3_config_root);
21076 +#endif
21077 +    remove_proc_entry ("neterr_timeout",      elan3_config_root);
21078 +    remove_proc_entry ("eventint_punt_loops", elan3_config_root);
21079 +    remove_proc_entry ("elan3mmu_debug",      elan3_config_root);
21080 +    remove_proc_entry ("elan3_debug_buffer",    elan3_config_root);
21081 +    remove_proc_entry ("elan3_debug_console",   elan3_config_root);
21082 +    remove_proc_entry ("elan3_debug",           elan3_config_root);
21083 +
21084 +    remove_proc_entry ("config",  elan3_procfs_root);
21085 +    remove_proc_entry ("version", elan3_procfs_root);
21086
21087 +    remove_proc_entry ("elan3",  qsnet_procfs_root);
21088 +}
21089 +
21090 +EXPORT_SYMBOL(elan3_procfs_root);
21091 +EXPORT_SYMBOL(elan3_config_root);
21092 +
21093 +/*
21094 + * Local variables:
21095 + * c-file-style: "stroustrup"
21096 + * End:
21097 + */
21098 diff -urN clean/drivers/net/qsnet/elan3/quadrics_version.h linux-2.6.9/drivers/net/qsnet/elan3/quadrics_version.h
21099 --- clean/drivers/net/qsnet/elan3/quadrics_version.h    1969-12-31 19:00:00.000000000 -0500
21100 +++ linux-2.6.9/drivers/net/qsnet/elan3/quadrics_version.h      2005-09-07 10:39:49.000000000 -0400
21101 @@ -0,0 +1 @@
21102 +#define QUADRICS_VERSION "5.11.3qsnet"
21103 diff -urN clean/drivers/net/qsnet/elan3/routecheck.c linux-2.6.9/drivers/net/qsnet/elan3/routecheck.c
21104 --- clean/drivers/net/qsnet/elan3/routecheck.c  1969-12-31 19:00:00.000000000 -0500
21105 +++ linux-2.6.9/drivers/net/qsnet/elan3/routecheck.c    2003-09-24 09:57:25.000000000 -0400
21106 @@ -0,0 +1,313 @@
21107 +/*
21108 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
21109 + *
21110 + *    For licensing information please see the supplied COPYING file
21111 + *
21112 + */
21113 +
21114 +/* ------------------------------------------------------------- */
21115 +
21116 +#include <qsnet/kernel.h>
21117 +
21118 +#include <elan3/elanregs.h>
21119 +#include <elan3/elandev.h>
21120 +#include <elan3/elanvp.h>
21121 +#include <elan3/elan3mmu.h>
21122 +#include <elan3/elanctxt.h>
21123 +#include <elan3/elandebug.h>
21124 +#include <elan3/urom_addrs.h>
21125 +#include <elan3/thread.h>
21126 +#include <elan3/vmseg.h>
21127 +
21128 +/* ---------------------------------------------------------------------- */
21129 +typedef struct elan3_net_location {
21130 +    int netid;
21131 +    int plane;
21132 +    int level;
21133 +} ELAN3_NET_LOCATION;
21134 +/* ---------------------------------------------------------------------- */
21135 +#define FLIT_LINK_ARRAY_MAX (ELAN3_MAX_LEVELS*2)
21136 +/* ---------------------------------------------------------------------- */
21137 +int 
21138 +elan3_route_follow_link( ELAN3_CTXT *ctxt, ELAN3_NET_LOCATION *loc, int link)
21139 +{
21140 +    ELAN_POSITION *pos = &ctxt->Position;
21141 +
21142 +    if ((link<0) || (link>7)) 
21143 +    {
21144 +       PRINTF1 (ctxt, DBG_VP, "elan3_route_follow_link: link (%d) out of range \n",link);
21145 +       return (ELAN3_ROUTE_INVALID);
21146 +    }   
21147 +
21148 +    /* going up or down ? */
21149 +    if ( link >= pos->pos_arity[loc->level] ) 
21150 +    {
21151 +       /* Up */
21152 +       if (loc->level >= pos->pos_levels)
21153 +           loc->plane = 0;
21154 +       else
21155 +       {
21156 +           if ((loc->level == 1) && (pos->pos_arity[0]  == 8)) /* oddness in some machines ie 512 */
21157 +               loc->plane = (16 * ( loc->plane / 8 )) + (4 * ( loc->plane % 4)) 
21158 +                   +(link - pos->pos_arity[loc->level]);
21159 +           else
21160 +               loc->plane = (loc->plane * (8 - pos->pos_arity[loc->level]))
21161 +                   +(link - pos->pos_arity[loc->level]);
21162 +       }
21163 +       loc->level--; 
21164 +       if ( loc->level < 0 )
21165 +       {
21166 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the top\n");
21167 +           return (ELAN3_ROUTE_INVALID_LEVEL);
21168 +       }
21169 +       loc->netid = loc->netid / pos->pos_arity[loc->level];
21170 +    }
21171 +    else
21172 +    {
21173 +       /* going down */
21174 +       if ((loc->level == 0) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */
21175 +           loc->netid = link % 2;
21176 +       else
21177 +           loc->netid =(loc->netid * pos->pos_arity[loc->level])+link;
21178 +
21179 +       loc->level++;
21180 +       if (loc->level > pos->pos_levels)
21181 +       {
21182 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_follow_link: link goes off the bottom\n");
21183 +           return (ELAN3_ROUTE_INVALID_LEVEL); 
21184 +       }
21185 +
21186 +       if ( loc->level >= (pos->pos_levels-1))
21187 +           loc->plane = 0;
21188 +       else
21189 +           if ((loc->level == 1) && (pos->pos_arity[0] == 8)) /* oddness in some machines ie 512 */
21190 +               loc->plane = (((loc->plane)>>2)*2) - ( ((loc->plane)>>2) & 3  ) + ((link<2)?0:4); /* ((p/4) % 4) */
21191 +           else 
21192 +               loc->plane = loc->plane/(8-pos->pos_arity[loc->level]);
21193 +    }
21194 +    return (ELAN3_ROUTE_SUCCESS);
21195 +}
21196 +/* ---------------------------------------------------------------------- */
21197 +int /* assumes they are connected, really only used for finding the MyLink */
21198 +elan3_route_get_mylink (ELAN_POSITION *pos, ELAN3_NET_LOCATION *locA, ELAN3_NET_LOCATION *locB)
21199 +{
21200 +    /* whats the My Link for locA to LocB */
21201 +    if ( locA->level > locB->level ) 
21202 +       return locB->plane - (locA->plane * (8 - pos->pos_arity[locA->level])) + pos->pos_arity[locA->level];
21203 +    
21204 +    return  locB->netid - (locA->netid * pos->pos_arity[locA->level]);
21205 +}
21206 +/* ---------------------------------------------------------------------- */
21207 +#define FIRST_GET_HIGH_PRI(FLIT)    (FLIT & FIRST_HIGH_PRI)
21208 +#define FIRST_GET_AGE(FLIT)         ((FLIT & FIRST_AGE(15))>>11)
21209 +#define FIRST_GET_TIMEOUT(FLIT)     ((FLIT & FIRST_TIMEOUT(3))>>9)
21210 +#define FIRST_GET_NEXT(FLIT)        ((FLIT & FIRST_PACKED(3))>>7)
21211 +#define FIRST_GET_ROUTE(FLIT)       (FLIT & 0x7f)
21212 +#define FIRST_GET_BCAST(FLIT)       (FLIT & 0x40)
21213 +#define FIRST_GET_IS_INVALID(FLIT)  ((FLIT & 0x78) == 0x08)
21214 +#define FIRST_GET_TYPE(FLIT)        ((FLIT & 0x30)>>4)
21215 +#define PRF_GET_ROUTE(FLIT,N)       ((FLIT >> (N*4)) & 0x0F)
21216 +#define PRF_GET_IS_MYLINK(ROUTE)    (ROUTE == PACKED_MYLINK)
21217 +#define PRF_GET_IS_NORMAL(ROUTE)    (ROUTE & 0x8)
21218 +#define PRF_GET_NORMAL_LINK(ROUTE)  (ROUTE & 0x7)
21219 +#define PRF_MOVE_ON(INDEX,NEXT)     do { if (NEXT==3) {NEXT=0;INDEX++;} else {NEXT++; }} while (0);
21220 +/* ---------------------------------------------------------------------- */
21221 +int /* turn level needed or -1 if not possible */
21222 +elan3_route_get_min_turn_level( ELAN_POSITION *pos, int nodeId)
21223 +{
21224 +    int l,range = 1;
21225 +
21226 +    for(l=pos->pos_levels-1;l>=0;l--)
21227 +    {
21228 +       range = range * pos->pos_arity[l];
21229 +       
21230 +       if ( ((pos->pos_nodeid - (pos->pos_nodeid % range)) <= nodeId ) 
21231 +            && (nodeId <= (pos->pos_nodeid - (pos->pos_nodeid % range)+range -1))) 
21232 +           return l;
21233 +    }
21234 +    return -1;
21235 +}
21236 +/* ---------------------------------------------------------------------- */
21237 +int  
21238 +elan3_route_check(ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNodeId)
21239 +{
21240 +    ELAN3_NET_LOCATION lastLoc,currLoc;
21241 +    int               err;
21242 +    int               turnLevel;
21243 +    int               goingDown;
21244 +    int               lnk,index,next,val;
21245 +    ELAN_POSITION    *pos = &ctxt->Position;
21246 +   
21247 +    /* is the dest possible */
21248 +    if ( (destNodeId <0 ) || (destNodeId >= pos->pos_nodes))
21249 +       return  (ELAN3_ROUTE_PROC_RANGE);
21250 +
21251 +    /* 
21252 +     * walk the route, 
21253 +     * - to see if we get there 
21254 +     * - checking we dont turn around 
21255 +     */
21256 +    currLoc.netid = pos->pos_nodeid;         /* the elan */
21257 +    currLoc.plane = 0;
21258 +    currLoc.level = pos->pos_levels;
21259 +
21260 +    turnLevel = currLoc.level; /* track the how far the route goes in */
21261 +    goingDown = 0;             /* once set we cant go up again ie only one change of direction */
21262 +
21263 +    /* move onto the network from the elan */
21264 +    if ((err=elan3_route_follow_link(ctxt,&currLoc,4)) != ELAN3_ROUTE_SUCCESS) 
21265 +    {
21266 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: initial elan3_route_follow_link failed\n");
21267 +       return err;
21268 +    }
21269 +    /* do the first part of flit */
21270 +    switch ( FIRST_GET_TYPE(flits[0]) ) 
21271 +    {
21272 +    case 0  /* sent */   : { lnk = (flits[0] & 0x7);                                 break; }    
21273 +    case PACKED_MYLINK  : { lnk = pos->pos_nodeid % pos->pos_arity[pos->pos_levels-1];    break; }
21274 +    case PACKED_ADAPTIVE : { lnk = 7; /* all routes are the same just check one */    break; }
21275 +    default : 
21276 +       PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected first flit (%d)\n",flits[0]);
21277 +       return (ELAN3_ROUTE_INVALID); 
21278 +    }
21279 +    
21280 +    /* move along this link and check new location */
21281 +    memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */
21282 +    if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS ) 
21283 +    {
21284 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: elan3_route_follow_link failed\n");
21285 +       return err;
21286 +    }
21287 +    if ((currLoc.level > pos->pos_levels) || (currLoc.level < 0 )) 
21288 +    { 
21289 +       PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n");
21290 +       return (ELAN3_ROUTE_INVALID_LEVEL);
21291 +    }
21292 +    if ( lastLoc.level < currLoc.level ) 
21293 +    {
21294 +       turnLevel = lastLoc.level;
21295 +       goingDown = 1;
21296 +    }
21297 +    else 
21298 +    {
21299 +       if (turnLevel > currLoc.level)
21300 +           turnLevel =  currLoc.level;
21301 +       if  (goingDown) 
21302 +       {
21303 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n");
21304 +           return (ELAN3_ROUTE_OCILATES);
21305 +       }
21306 +    }   
21307 +
21308 +    /* loop on doing the remaining flits */
21309 +    index = 1;
21310 +    next  = FIRST_GET_NEXT(flits[0]);
21311 +    val   = PRF_GET_ROUTE(flits[index],next);
21312 +    while(val)
21313 +    {
21314 +       if (PRF_GET_IS_NORMAL(val) ) 
21315 +           lnk = PRF_GET_NORMAL_LINK(val);
21316 +       else
21317 +       {
21318 +         switch ( val ) 
21319 +         {
21320 +         case  PACKED_MYLINK : 
21321 +         {
21322 +             lnk = elan3_route_get_mylink(pos, &currLoc,&lastLoc);
21323 +             break;
21324 +         }
21325 +         default : 
21326 +             PRINTF1 (ctxt, DBG_VP, "elan3_route_check: unexpected packed flit (%d)\n",val);
21327 +             return (ELAN3_ROUTE_INVALID);
21328 +         }
21329 +       }
21330 +
21331 +       /* move along this link and check new location */
21332 +       memcpy(&lastLoc,&currLoc,sizeof(ELAN3_NET_LOCATION)); /* keep track of last loc */
21333 +       if ((err=elan3_route_follow_link(ctxt,&currLoc,lnk)) != ELAN3_ROUTE_SUCCESS) 
21334 +           return err;
21335 +       
21336 +       if ((currLoc.level > pos->pos_levels ) || ( currLoc.level < 0 ))
21337 +       { 
21338 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route leaves machine\n");
21339 +           return (ELAN3_ROUTE_INVALID_LEVEL);
21340 +       }
21341 +
21342 +       if ( lastLoc.level < currLoc.level ) 
21343 +           goingDown = 1;
21344 +       else 
21345 +       {
21346 +           if (turnLevel > currLoc.level)
21347 +               turnLevel =  currLoc.level;
21348 +           if  (goingDown) 
21349 +           {
21350 +               PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route ocilated\n");
21351 +               return (ELAN3_ROUTE_OCILATES);
21352 +           }
21353 +       }   
21354 +
21355 +       /* move to next part of flit */
21356 +       PRF_MOVE_ON(index,next);
21357 +       if ( index >= MAX_FLITS)
21358 +       {
21359 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: route too long\n");
21360 +           return (ELAN3_ROUTE_TOO_LONG);
21361 +       }
21362 +       /* extract the new value */
21363 +       val = PRF_GET_ROUTE(flits[index],next);
21364 +    }
21365 +
21366 +    /* have we got to where we want ? */
21367 +    if ((currLoc.level != pos->pos_levels) || (currLoc.netid != destNodeId))
21368 +    {
21369 +       PRINTF2 (ctxt, DBG_VP, "elan3_route_check: goes to %d instead of %d\n",currLoc.netid , destNodeId );
21370 +       return (ELAN3_ROUTE_WRONG_DEST);
21371 +    }
21372 +
21373 +    /*
21374 +     * there is the case of src == dest 
21375 +     * getTurnLevel returns pos->pos_levels, and turnLevel is (pos->pos_levels -1) 
21376 +     * then we assume they really want to  go onto the network.
21377 +     * otherwise we check that the turn at the appriate level
21378 +     */
21379 +    if ( (pos->pos_nodeid != destNodeId) || ( turnLevel != (pos->pos_levels -1)) )
21380 +    {
21381 +       int lev;
21382 +       if ((lev = elan3_route_get_min_turn_level(pos,destNodeId)) == -1) 
21383 +       {
21384 +           PRINTF0 (ctxt, DBG_VP, "elan3_route_check: cant calculate turn level\n");
21385 +           return (ELAN3_ROUTE_INVALID); /* not sure this can happen here as checks above should protect me */
21386 +       }
21387 +       if (turnLevel != lev) 
21388 +       {
21389 +           PRINTF2 (ctxt, DBG_VP, "elan3_route_check: turn level should be %d but is %d \n", lev, turnLevel);
21390 +           return (ELAN3_ROUTE_TURN_LEVEL);
21391 +       }
21392 +    }
21393 +    return (ELAN3_ROUTE_SUCCESS);
21394 +}
21395 +/* ---------------------------------------------------------------------- */
21396 +int
21397 +elan3_route_broadcast_check(ELAN3_CTXT *ctxt , E3_uint16 *flits, int lowNode, int highNode ) 
21398 +{
21399 +    E3_uint16 flitsTmp[MAX_FLITS];
21400 +    int       nflits,i;
21401 +    
21402 +    nflits = GenerateRoute (&ctxt->Position, flitsTmp, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
21403 +    
21404 +    for(i=0;i<nflits;i++)
21405 +       if ( flitsTmp[i] != flits[i] ) 
21406 +       {
21407 +           PRINTF3 (ctxt, DBG_VP, "elan3_route_broadcast_check:  flit[%d] %d (should be %d)\n",i,flits[i],flitsTmp[i]);
21408 +           return (ELAN3_ROUTE_INVALID);   
21409 +       }
21410 +    
21411 +    return (ELAN3_ROUTE_SUCCESS);
21412 +}
21413 +/* ---------------------------------------------------------------------- */
21414 +
21415 +/*
21416 + * Local variables:
21417 + * c-file-style: "stroustrup"
21418 + * End:
21419 + */
21420 diff -urN clean/drivers/net/qsnet/elan3/route_table.c linux-2.6.9/drivers/net/qsnet/elan3/route_table.c
21421 --- clean/drivers/net/qsnet/elan3/route_table.c 1969-12-31 19:00:00.000000000 -0500
21422 +++ linux-2.6.9/drivers/net/qsnet/elan3/route_table.c   2003-09-24 09:57:25.000000000 -0400
21423 @@ -0,0 +1,560 @@
21424 +/*
21425 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
21426 + *
21427 + *    For licensing information please see the supplied COPYING file
21428 + *
21429 + */
21430 +
21431 +#ident "$Id: route_table.c,v 1.23 2003/09/24 13:57:25 david Exp $"
21432 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/route_table.c,v $ */
21433 +
21434 +#include <qsnet/kernel.h>
21435 +
21436 +#include <elan3/elanregs.h>
21437 +#include <elan3/elandev.h>
21438 +#include <elan3/elanvp.h>
21439 +#include <elan3/elan3mmu.h>
21440 +#include <elan3/elanctxt.h>
21441 +#include <elan3/elandebug.h>
21442 +
21443 +static sdramaddr_t
21444 +AllocateLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int ctxnum, E3_uint64 *smallRoute)
21445 +{
21446 +    int                  bit = -1;
21447 +    ELAN3_ROUTES  *rent;
21448 +    unsigned long flags;
21449 +    
21450 +    spin_lock_irqsave (&tbl->Lock, flags);
21451 +    
21452 +    for (rent = tbl->LargeRoutes; rent; rent = rent->Next)
21453 +    {
21454 +       if ((bit = bt_freebit (rent->Bitmap, NROUTES_PER_BLOCK)) != -1)
21455 +           break;
21456 +    }
21457 +    
21458 +    if (bit == -1)                                             /* No spare entries in large routes */
21459 +    {                                                          /* so allocate a new page */
21460 +       PRINTF0 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: allocate route entries\n");
21461 +       
21462 +       spin_unlock_irqrestore (&tbl->Lock, flags);
21463 +
21464 +       KMEM_ZALLOC(rent, ELAN3_ROUTES *, sizeof (ELAN3_ROUTES), TRUE);
21465 +       
21466 +       if (rent == (ELAN3_ROUTES *) NULL)
21467 +           return ((sdramaddr_t) 0);
21468 +       
21469 +       rent->Routes = elan3_sdram_alloc (dev, PAGESIZE);
21470 +       if (rent->Routes == (sdramaddr_t) 0)
21471 +       {
21472 +           KMEM_FREE (rent, sizeof (ELAN3_ROUTES));
21473 +           return ((sdramaddr_t) 0);
21474 +       }
21475 +
21476 +       spin_lock_irqsave (&tbl->Lock, flags);
21477 +
21478 +       /* Add to list of large routes */
21479 +       rent->Next       = tbl->LargeRoutes;
21480 +       tbl->LargeRoutes = rent;
21481 +
21482 +       /* and use entry 0 */
21483 +       bit = 0;
21484 +    }
21485 +    
21486 +    /* Set the bit in the bitmap to mark this route as allocated */
21487 +    BT_SET (rent->Bitmap, bit);
21488 +    
21489 +    /* And generate the small route pointer and the pointer to the large routes */
21490 +    (*smallRoute) = BIG_ROUTE_PTR(rent->Routes + (bit*NBYTES_PER_LARGE_ROUTE), ctxnum);
21491 +
21492 +    PRINTF4 (DBG_DEVICE, DBG_VP, "AllocateLargeRoute: rent %p using entry %d at %lx with route pointer %llx\n",
21493 +            rent, bit, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), (long long) (*smallRoute));
21494 +
21495 +    /* Invalidate the large route */
21496 +    elan3_sdram_zeroq_sdram (dev, rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE), NBYTES_PER_LARGE_ROUTE);
21497 +
21498 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21499 +
21500 +    return (rent->Routes + (bit * NBYTES_PER_LARGE_ROUTE));
21501 +}
21502 +
21503 +static void
21504 +FreeLargeRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, E3_uint64 smallRoute)
21505 +{
21506 +    E3_Addr      addr = (E3_Addr) (smallRoute & ((1ULL << ROUTE_CTXT_SHIFT)-1));
21507 +    ELAN3_ROUTES *rent;
21508 +
21509 +    PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: free route %llx\n", (long long) smallRoute);
21510 +
21511 +    ASSERT (SPINLOCK_HELD (&tbl->Lock));
21512 +
21513 +    for (rent = tbl->LargeRoutes; rent; rent = rent->Next)
21514 +    {
21515 +       if (rent->Routes <= addr && (rent->Routes + ROUTE_BLOCK_SIZE) > addr)
21516 +       {
21517 +           int indx = (addr - rent->Routes)/NBYTES_PER_LARGE_ROUTE;
21518 +           
21519 +           PRINTF2 (DBG_DEVICE, DBG_VP, "FreeLargeRoute: rent=%p indx=%d\n", rent, indx);
21520 +           
21521 +           BT_CLEAR(rent->Bitmap, indx);
21522 +           return;
21523 +       }
21524 +    }
21525 +
21526 +    panic ("elan: FreeLargeRoute - route not found in large route tables");
21527 +}
21528 +
21529 +static void
21530 +FreeLargeRoutes (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl)
21531 +{
21532 +    ELAN3_ROUTES *rent;
21533 +
21534 +    while ((rent = tbl->LargeRoutes) != NULL)
21535 +    {
21536 +       PRINTF1 (DBG_DEVICE, DBG_VP, "FreeLargeRoutes: free rent %p\n", rent);
21537 +
21538 +       tbl->LargeRoutes = rent->Next;
21539 +
21540 +       elan3_sdram_free (dev, rent->Routes, PAGESIZE);
21541 +       
21542 +       KMEM_FREE (rent, sizeof(ELAN3_ROUTES));
21543 +    }
21544 +}
21545 +
21546 +int
21547 +GetRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits)
21548 +{
21549 +    E3_uint64  routeValue;
21550 +    sdramaddr_t largeRouteOff;
21551 +  
21552 +    if (process < 0 || process >= tbl->Size)
21553 +       return (EINVAL);
21554 +
21555 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21556 +
21557 +    if (routeValue & ROUTE_PTR)
21558 +    {
21559 +       largeRouteOff = (routeValue & ROUTE_PTR_MASK);
21560 +       
21561 +       routeValue = elan3_sdram_readq (dev, largeRouteOff + 0);
21562 +       flits[0] = routeValue & 0xffff;
21563 +       flits[1] = (routeValue >> 16)  & 0xffff;
21564 +       flits[2] = (routeValue >> 32)  & 0xffff;
21565 +       flits[3] = (routeValue >> 48)  & 0xffff;
21566 +       
21567 +       routeValue = elan3_sdram_readq (dev, largeRouteOff + 8);
21568 +       flits[4] = routeValue & 0xffff;
21569 +       flits[5] = (routeValue >> 16)  & 0xffff;
21570 +       flits[6] = (routeValue >> 32)  & 0xffff;
21571 +       flits[6] = (routeValue >> 48)  & 0xffff;
21572 +    }
21573 +    else
21574 +    {
21575 +       flits[0] = routeValue & 0xffff;
21576 +       flits[1] = (routeValue >> 16)  & 0xffff;
21577 +       flits[2] = (routeValue >> 32)  & 0xffff;
21578 +    }
21579 +
21580 +    return (ESUCCESS);
21581 +}
21582 +
21583 +ELAN3_ROUTE_TABLE *
21584 +AllocateRouteTable (ELAN3_DEV *dev, int size)
21585 +{
21586 +    ELAN3_ROUTE_TABLE *tbl;
21587 +
21588 +    KMEM_ZALLOC (tbl, ELAN3_ROUTE_TABLE *, sizeof (ELAN3_ROUTE_TABLE), TRUE);
21589 +
21590 +    if (tbl == (ELAN3_ROUTE_TABLE *) NULL)
21591 +       return (NULL);
21592 +    
21593 +    tbl->Size  = size;
21594 +    tbl->Table = elan3_sdram_alloc (dev, size*NBYTES_PER_SMALL_ROUTE);
21595 +
21596 +    if (tbl->Table == 0)
21597 +    {
21598 +       KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE));
21599 +       return (NULL);
21600 +    }
21601 +    spin_lock_init (&tbl->Lock);
21602 +
21603 +    /* zero the route table */
21604 +    elan3_sdram_zeroq_sdram (dev, tbl->Table, size*NBYTES_PER_SMALL_ROUTE);
21605 +
21606 +    return (tbl);
21607 +}
21608 +
21609 +void
21610 +FreeRouteTable (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl)
21611 +{
21612 +    elan3_sdram_free (dev, tbl->Table, tbl->Size*NBYTES_PER_SMALL_ROUTE);
21613 +
21614 +    FreeLargeRoutes (dev, tbl);
21615 +
21616 +    spin_lock_destroy (&tbl->Lock);
21617 +
21618 +    KMEM_FREE (tbl, sizeof (ELAN3_ROUTE_TABLE));
21619 +}
21620 +
21621 +int
21622 +LoadRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, int ctxnum, int nflits, E3_uint16 *flits)
21623 +{
21624 +    E3_uint64    routeValue;
21625 +    E3_uint64    largeRouteValue;
21626 +    sdramaddr_t   largeRouteOff;
21627 +    unsigned long flags;
21628 +
21629 +    if (process < 0 || process >= tbl->Size)
21630 +       return (EINVAL);
21631 +
21632 +    PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: table %lx process %d ctxnum %x\n", tbl->Table ,process, ctxnum);
21633 +
21634 +    if (nflits < 4)
21635 +    {
21636 +       spin_lock_irqsave (&tbl->Lock, flags);
21637 +
21638 +       /* See if we're replacing a "large" route */
21639 +       routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21640 +       if (routeValue & ROUTE_PTR)
21641 +           FreeLargeRoute (dev, tbl, routeValue);
21642 +
21643 +       routeValue = SMALL_ROUTE(flits, ctxnum);
21644 +
21645 +       if ( routeValue &  ROUTE_PTR)
21646 +           PRINTF0 (DBG_DEVICE, DBG_VP, "SHOULD BE  A SMALL ROUTE !!!!!!!\n");
21647 +
21648 +       PRINTF2 (DBG_DEVICE, DBG_VP, "LoadRoute: loading small route %d  %llx\n", process, (long long) routeValue);
21649 +       elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, routeValue);
21650 +    }
21651 +    else
21652 +    {
21653 +       E3_uint64 value0 = BIG_ROUTE0(flits);
21654 +       E3_uint64 value1 = BIG_ROUTE1(flits);
21655 +
21656 +       if ((largeRouteOff = AllocateLargeRoute (dev, tbl, ctxnum, &largeRouteValue)) == (sdramaddr_t) 0)
21657 +           return (ENOMEM);
21658 +
21659 +       spin_lock_irqsave (&tbl->Lock, flags);
21660 +           
21661 +       routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21662 +
21663 +       if ((routeValue & ROUTE_PTR) == 0)
21664 +           elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, largeRouteValue);
21665 +       else
21666 +       {
21667 +           FreeLargeRoute (dev, tbl, largeRouteValue);
21668 +
21669 +           largeRouteOff   = (routeValue & ROUTE_PTR_MASK);
21670 +       }
21671 +
21672 +       PRINTF3 (DBG_DEVICE, DBG_VP, "LoadRoute: loading large route %d - %llx %llx\n", process, 
21673 +                (long long) value0, (long long) value1);
21674 +
21675 +       elan3_sdram_writeq (dev, largeRouteOff + 0, value0);
21676 +       elan3_sdram_writeq (dev, largeRouteOff + 8, value1);
21677 +    }
21678 +
21679 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21680 +    return (ESUCCESS);
21681 +}
21682 +void
21683 +InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
21684 +{
21685 +    E3_uint64 routeValue;
21686 +    unsigned long flags;
21687 +
21688 +    if (process < 0 || process >= tbl->Size)
21689 +       return;
21690 +
21691 +    spin_lock_irqsave (&tbl->Lock, flags);
21692 +
21693 +    /* unset ROUTE_VALID
21694 +     * does not matter if its short or long, will check when we re-use it
21695 +     */
21696 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21697 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue & (~ROUTE_VALID)));
21698 +
21699 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21700 +}
21701 +void
21702 +ValidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
21703 +{
21704 +    E3_uint64 routeValue;
21705 +    unsigned long flags;
21706 +
21707 +    if (process < 0 || process >= tbl->Size)
21708 +       return;
21709 +
21710 +    PRINTF2 (DBG_DEVICE, DBG_VP, "ValidateRoute: table %ld process %d  \n", tbl->Table ,process);
21711 +
21712 +    spin_lock_irqsave (&tbl->Lock, flags);
21713 +
21714 +    /* set ROUTE_VALID
21715 +     */
21716 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21717 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, (routeValue | ROUTE_VALID));
21718 +
21719 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21720 +}
21721 +void
21722 +ClearRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process)
21723 +{
21724 +    E3_uint64 routeValue;
21725 +    unsigned long flags;
21726 +
21727 +    if (process < 0 || process >= tbl->Size)
21728 +       return;
21729 +
21730 +    spin_lock_irqsave (&tbl->Lock, flags);
21731 +
21732 +    PRINTF2 (DBG_DEVICE, DBG_VP, "ClearRoute: table %ld process %d  \n", tbl->Table ,process);
21733 +
21734 +    routeValue = elan3_sdram_readq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE);
21735 +
21736 +    elan3_sdram_writeq (dev, tbl->Table + process * NBYTES_PER_SMALL_ROUTE, 0);
21737 +
21738 +    if (routeValue & ROUTE_PTR)
21739 +       FreeLargeRoute (dev, tbl, routeValue);
21740 +
21741 +    spin_unlock_irqrestore (&tbl->Lock, flags);
21742 +}
21743 +
21744 +static int
21745 +ElanIdEqual (ELAN_POSITION *pos, int level, int ida, int idb)
21746 +{
21747 +    int l;
21748 +
21749 +    for (l = pos->pos_levels-1; l >= level; l--)
21750 +    {
21751 +       ida /= pos->pos_arity[l];
21752 +       idb /= pos->pos_arity[l];
21753 +    }
21754 +       
21755 +    return (ida == idb);
21756 +}
21757 +
21758 +static int
21759 +RouteDown (ELAN_POSITION *pos, int level, int elanid)
21760 +{
21761 +    int l;
21762 +
21763 +    for (l = (pos->pos_levels - 1); level < pos->pos_levels - 1; level++, l--)
21764 +    {  
21765 +       if (  pos->pos_arity[l] )
21766 +           elanid /= pos->pos_arity[l];    
21767 +    }
21768 +    elanid %= pos->pos_arity[l];
21769 +
21770 +    return elanid;
21771 +}
21772 +
21773 +static int
21774 +InitPackedAndFlits (u_char *packed, E3_uint16 *flits)
21775 +{
21776 +    int rb = 0;
21777 +
21778 +    bzero ((caddr_t) packed, MAX_PACKED+4);
21779 +    bzero ((caddr_t) flits, MAX_FLITS * sizeof (E3_uint16));
21780 +
21781 +    /* Initialise 4 bytes of packed, so that the "padding" */
21782 +    /* NEVER terminates with 00, as this is recognised as */
21783 +    /* as CRC flit */
21784 +    packed[rb++] = 0xF;
21785 +    packed[rb++] = 0xF;
21786 +    packed[rb++] = 0xF;
21787 +    packed[rb++] = 0xF;
21788 +
21789 +    return (rb);
21790 +}
21791 +
21792 +static int
21793 +PackThemRoutesUp (E3_uint16 *flits, u_char *packed, int rb, int timeout, int highPri)
21794 +{
21795 +    int i, nflits;
21796 +
21797 +    flits[0] |= FIRST_TIMEOUT(timeout);
21798 +    if (highPri)
21799 +       flits[0] |= FIRST_HIGH_PRI;
21800 +
21801 +    /* round up the number of route bytes to flits */
21802 +    /* and subtract the 4 extra we've padded out with */
21803 +    nflits = (rb-1)/4;
21804 +    
21805 +    for (i = nflits; i > 0; i--)
21806 +    {
21807 +       flits[i] = (packed[rb-1] << 12 |
21808 +                   packed[rb-2] << 8  |
21809 +                   packed[rb-3] << 4  |
21810 +                   packed[rb-4] << 0);
21811 +       rb -= 4;
21812 +    }
21813 +    
21814 +    /* Now set the position of the first packed route  */
21815 +    /* byte in the 2nd 16 bit flit, taking account of the */
21816 +    /* 4 byte padding */
21817 +    flits[0] |= FIRST_PACKED (4-rb);
21818 +    
21819 +    return (nflits+1);
21820 +}
21821 +
21822 +int
21823 +GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri)
21824 +{
21825 +    int     broadcast  = (lowid != highid);
21826 +    int     rb         = 0;
21827 +    int            first      = 1;
21828 +    int     noRandom   = 0;
21829 +    int     level;
21830 +    u_char  packed[MAX_PACKED+4];
21831 +    int     numDownLinks;
21832 +
21833 +    rb = InitPackedAndFlits (packed, flits);
21834 +
21835 +    for (level = pos->pos_levels-1;                            /* Move up out of the elan */
21836 +        level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) &&
21837 +                        ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--)
21838 +    {
21839 +       noRandom |= pos->pos_random_disabled & (1 << (pos->pos_levels-1-level));
21840 +    }
21841 +    
21842 +    for (level = pos->pos_levels-1;                            /* Move up out of the elan */
21843 +        level > 0 && ! (ElanIdEqual (pos, level, pos->pos_nodeid, lowid) &&
21844 +                        ElanIdEqual (pos, level, pos->pos_nodeid, highid)); level--)
21845 +    {
21846 +       numDownLinks = pos->pos_arity [level];
21847 +       if (first)
21848 +       {
21849 +           if (broadcast || noRandom)
21850 +               flits[0] = FIRST_BCAST_TREE;
21851 +           else
21852 +           {
21853 +               if (numDownLinks == 4) 
21854 +                   flits[0] = FIRST_ADAPTIVE;
21855 +               else
21856 +                   flits[0] = FIRST_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) ));
21857 +           }
21858 +           first = 0;
21859 +       }
21860 +       else
21861 +       {
21862 +           if (broadcast || noRandom)
21863 +               packed[rb++] = PACKED_BCAST_TREE;
21864 +           else 
21865 +           {
21866 +               if (numDownLinks == 4) 
21867 +                   packed[rb++] = PACKED_ADAPTIVE;
21868 +               else
21869 +                   packed[rb++] = PACKED_ROUTE( numDownLinks + ( lowid % (8-numDownLinks) ));
21870 +           }               
21871 +       }
21872 +    }
21873 +    
21874 +    while (level < pos->pos_levels)
21875 +    {
21876 +       int lowRoute  = RouteDown (pos, level, lowid);
21877 +       int highRoute = RouteDown (pos, level, highid);
21878 +
21879 +       if (first)
21880 +       {
21881 +           if (broadcast)
21882 +               flits[0] = FIRST_BCAST(highRoute, lowRoute);
21883 +           else
21884 +               flits[0] = FIRST_ROUTE(lowRoute);
21885 +
21886 +           first = 0;
21887 +       }
21888 +       else
21889 +       {
21890 +           if (broadcast)
21891 +           {
21892 +               packed[rb++] = PACKED_BCAST0(highRoute, lowRoute);
21893 +               packed[rb++] = PACKED_BCAST1(highRoute, lowRoute);
21894 +           }
21895 +           else
21896 +               packed[rb++] = PACKED_ROUTE(lowRoute);
21897 +       }
21898 +       
21899 +       level++;
21900 +    }
21901 +
21902 +#ifdef ELITE_REVA_SUPPORTED
21903 +    if (broadcast && (pos->pos_levels == 3))
21904 +    {
21905 +      packed[rb++] = PACKED_BCAST0(0, 0);
21906 +      packed[rb++] = PACKED_BCAST1(0, 0);
21907 +    }
21908 +#endif
21909 +
21910 +    return (PackThemRoutesUp (flits, packed, rb, timeout, highPri));
21911 +}
21912 +
21913 +int
21914 +GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive)
21915 +{
21916 +    int     notfirst = 0;
21917 +    int     l, rb;
21918 +    u_char  packed[MAX_PACKED+4];
21919 +
21920 +    rb = InitPackedAndFlits (packed, flits);
21921 +
21922 +    for (l = pos->pos_levels-1; l > level; l--)
21923 +       if (! notfirst++)
21924 +           flits[0] = adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE;
21925 +       else
21926 +           packed[rb++] = adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE;
21927 +
21928 +    if (! notfirst++ ) 
21929 +       flits[0] = FIRST_MYLINK;
21930 +    else
21931 +       packed[rb++] = PACKED_MYLINK;
21932 +
21933 +    for (l++ /* consume mylink */; l < pos->pos_levels; l++)
21934 +       if (! notfirst++)
21935 +           flits[0] = FIRST_ROUTE (RouteDown (pos, l, pos->pos_nodeid));
21936 +       else
21937 +           packed[rb++] = PACKED_ROUTE (RouteDown (pos, l, pos->pos_nodeid));
21938 +
21939 +
21940 +    return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY));
21941 +}
21942 +
21943 +
21944 +/*
21945 + * In this case "level" is the number of levels counted from the bottom.
21946 + */
21947 +int
21948 +GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive )
21949 +{
21950 +    int            first = 1;
21951 +    int     i, rb;
21952 +    u_char  packed[MAX_PACKED+4];
21953 +
21954 +    rb = InitPackedAndFlits (packed, flits);
21955 +
21956 +    /* Generate "up" routes */
21957 +    for (i = 0; i < level; i++)
21958 +    {
21959 +       if (first)
21960 +           flits[0] = linkup ? FIRST_ROUTE(linkup[i]) : adaptive ? FIRST_ADAPTIVE : FIRST_BCAST_TREE;
21961 +       else
21962 +           packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : adaptive ? PACKED_ADAPTIVE : PACKED_BCAST_TREE;
21963 +       first = 0;
21964 +    }
21965 +
21966 +    /* Generate a "to-me" route down */
21967 +    if (first)
21968 +       flits[0] = FIRST_MYLINK;
21969 +    else
21970 +       packed[rb++] = PACKED_MYLINK;
21971 +
21972 +    for (i = level-1; i >= 0; i--)
21973 +       packed[rb++] =  PACKED_ROUTE(linkdown[i]);
21974 +
21975 +    return (PackThemRoutesUp (flits, packed, rb, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY));
21976 +}
21977 +
21978 +
21979 +/*
21980 + * Local variables:
21981 + * c-file-style: "stroustrup"
21982 + * End:
21983 + */
21984 diff -urN clean/drivers/net/qsnet/elan3/sdram.c linux-2.6.9/drivers/net/qsnet/elan3/sdram.c
21985 --- clean/drivers/net/qsnet/elan3/sdram.c       1969-12-31 19:00:00.000000000 -0500
21986 +++ linux-2.6.9/drivers/net/qsnet/elan3/sdram.c 2003-09-24 09:57:25.000000000 -0400
21987 @@ -0,0 +1,807 @@
21988 +/*
21989 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
21990 + * 
21991 + *    For licensing information please see the supplied COPYING file
21992 + *
21993 + */
21994 +
21995 +#ident "@(#)$Id: sdram.c,v 1.17 2003/09/24 13:57:25 david Exp $"
21996 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/sdram.c,v $*/
21997 +
21998 +
21999 +#include <qsnet/kernel.h>
22000 +
22001 +#include <elan3/elanregs.h>
22002 +#include <elan3/elandev.h>
22003 +#include <elan3/elandebug.h>
22004 +
22005 +/* sdram access functions */
22006 +#define sdram_off_to_bank(dev,off)     (&dev->SdramBanks[(off) >> ELAN3_SDRAM_BANK_SHIFT])
22007 +#define sdram_off_to_offset(dev,off)   ((off) & (ELAN3_SDRAM_BANK_SIZE-1))
22008 +#define sdram_off_to_bit(dev,indx,off) (sdram_off_to_offset(dev,off) >> (SDRAM_MIN_BLOCK_SHIFT+(indx)))
22009 +
22010 +#define sdram_off_to_mapping(dev,off)  (sdram_off_to_bank(dev,off)->Mapping + sdram_off_to_offset(dev,off))
22011 +    
22012 +unsigned char
22013 +elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t off)
22014 +{
22015 +    return (readb ((unsigned char *) sdram_off_to_mapping(dev, off)));
22016 +}
22017 +
22018 +unsigned short
22019 +elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t off)
22020 +{
22021 +    return (readw ((unsigned short *) sdram_off_to_mapping(dev, off)));
22022 +}
22023 +
22024 +unsigned int
22025 +elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t off)
22026 +{
22027 +    return (readl ((unsigned int *) sdram_off_to_mapping(dev, off)));
22028 +}
22029 +
22030 +unsigned long long
22031 +elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t off)
22032 +{
22033 +    return (readq ((unsigned long long *) sdram_off_to_mapping(dev, off)));
22034 +}
22035 +
22036 +void
22037 +elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t off, unsigned char val)
22038 +{
22039 +    writeb (val, (unsigned char *) sdram_off_to_mapping(dev, off));
22040 +    wmb();
22041 +}
22042 +
22043 +void
22044 +elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t off, unsigned short val)
22045 +{
22046 +    writew (val, (unsigned short *) sdram_off_to_mapping(dev, off));
22047 +    wmb();
22048 +}
22049 +
22050 +void
22051 +elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t off, unsigned int val)
22052 +{
22053 +    writel (val, (unsigned int *) sdram_off_to_mapping(dev, off));
22054 +    wmb();
22055 +}
22056 +
22057 +void
22058 +elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t off, unsigned long long val)
22059 +{
22060 +    writeq (val, (unsigned long long *) sdram_off_to_mapping(dev, off));
22061 +    wmb();
22062 +}
22063 +
22064 +void
22065 +elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
22066 +{
22067 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
22068 +}
22069 +
22070 +void
22071 +elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
22072 +{
22073 +#ifdef __LITTLE_ENDIAN__
22074 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
22075 +#else
22076 +#error incorrect for big endian
22077 +#endif
22078 +}
22079 +
22080 +void
22081 +elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
22082 +{
22083 +#ifdef __LITTLE_ENDIAN__
22084 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
22085 +#else
22086 +#error incorrect for big endian
22087 +#endif
22088 +}
22089 +
22090 +void
22091 +elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes)
22092 +{
22093 +#ifdef __LITTLE_ENDIAN__
22094 +    bcopy ((void *)sdram_off_to_mapping(dev, from), to, nbytes);
22095 +#else
22096 +#error incorrect for big endian
22097 +#endif
22098 +}
22099 +
22100 +#define E3_WRITEBUFFER_SIZE            16
22101 +#define E3_WRITEBUFFER_OFFSET(x)       (((unsigned long) x) & (E3_WRITEBUFFER_SIZE-1))
22102 +#define E3_WRITEBUFFER_BASE(x)         (((unsigned long) x) & ~((unsigned long) (E3_WRITEBUFFER_SIZE-1)))
22103 +
22104 +void
22105 +elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
22106 +{
22107 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22108 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22109 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
22110 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22111 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t);
22112 +    int        i;
22113 +
22114 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22115 +    {
22116 +       for (i = 0; i < nbytes/sizeof(uint8_t); i++)
22117 +           writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]);
22118 +       wmb();
22119 +    }
22120 +    else
22121 +    {
22122 +       if (ntop < E3_WRITEBUFFER_SIZE)
22123 +       {
22124 +           slim -= ntop;
22125 +           dlim -= ntop;
22126 +           
22127 +           for (i = 0; i < ntop/sizeof(uint8_t); i++)
22128 +               writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]);
22129 +           wmb();
22130 +       }
22131 +       
22132 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22133 +       {
22134 +           dlim -= E3_WRITEBUFFER_SIZE;
22135 +           slim -= E3_WRITEBUFFER_SIZE;
22136 +
22137 +           for (i = 0; i < E3_WRITEBUFFER_SIZE/sizeof (uint8_t); i++)
22138 +               writeb (((uint8_t *) slim)[i], &((uint8_t *) dlim)[i]);
22139 +           wmb();
22140 +       }
22141 +       
22142 +       if (nbase < E3_WRITEBUFFER_SIZE)
22143 +       {
22144 +           for (i = 0; i < nbase/sizeof(uint8_t); i++)
22145 +               writeb (((uint8_t *) from)[i], &((uint8_t *) dbase)[i]);
22146 +           wmb();
22147 +       }
22148 +    }
22149 +}
22150 +
22151 +void
22152 +elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
22153 +{
22154 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22155 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22156 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22157 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint8_t)) + sizeof (uint8_t);
22158 +    int        i;
22159 +
22160 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22161 +    {
22162 +       for (i = 0; i < nbytes/sizeof(uint8_t); i++)
22163 +           writeb (0, &((uint8_t *) dbase)[i]);
22164 +       wmb();
22165 +    }
22166 +    else
22167 +    {
22168 +       if (ntop < E3_WRITEBUFFER_SIZE)
22169 +       {
22170 +           dlim -= ntop;
22171 +           
22172 +           for (i = 0; i < ntop/sizeof(uint8_t); i++)
22173 +               writeb (0, &((uint8_t *) dlim)[i]);
22174 +           wmb();
22175 +       }
22176 +       
22177 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22178 +       {
22179 +           dlim -= E3_WRITEBUFFER_SIZE;
22180 +
22181 +           writeq (0, &((uint64_t *) dlim)[0]);
22182 +           writeq (0, &((uint64_t *) dlim)[1]);
22183 +
22184 +           wmb();
22185 +       }
22186 +       
22187 +       if (nbase < E3_WRITEBUFFER_SIZE)
22188 +       {
22189 +           for (i = 0; i < nbase/sizeof(uint8_t); i++)
22190 +               writeb (0, &((uint8_t *) dbase)[i]);
22191 +           wmb();
22192 +       }
22193 +    }
22194 +}
22195 +
22196 +void
22197 +elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
22198 +{
22199 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22200 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22201 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
22202 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22203 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t);
22204 +    int        i;
22205 +
22206 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22207 +    {
22208 +       for (i = 0; i < nbytes/sizeof(uint16_t); i++)
22209 +           writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]);
22210 +       wmb();
22211 +    }
22212 +    else
22213 +    {
22214 +       if (ntop < E3_WRITEBUFFER_SIZE)
22215 +       {
22216 +           slim -= ntop;
22217 +           dlim -= ntop;
22218 +
22219 +           for (i = 0; i < ntop/sizeof(uint16_t); i++)
22220 +               writew (((uint16_t *) slim)[i], &((uint16_t *) dlim)[i]);
22221 +           wmb();
22222 +       }
22223 +       
22224 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22225 +       {
22226 +           dlim -= E3_WRITEBUFFER_SIZE;
22227 +           slim -= E3_WRITEBUFFER_SIZE;
22228 +
22229 +           writew (((uint16_t *) slim)[0], &((uint16_t *) dlim)[0]);
22230 +           writew (((uint16_t *) slim)[1], &((uint16_t *) dlim)[1]);
22231 +           writew (((uint16_t *) slim)[2], &((uint16_t *) dlim)[2]);
22232 +           writew (((uint16_t *) slim)[3], &((uint16_t *) dlim)[3]);
22233 +           writew (((uint16_t *) slim)[4], &((uint16_t *) dlim)[4]);
22234 +           writew (((uint16_t *) slim)[5], &((uint16_t *) dlim)[5]);
22235 +           writew (((uint16_t *) slim)[6], &((uint16_t *) dlim)[6]);
22236 +           writew (((uint16_t *) slim)[7], &((uint16_t *) dlim)[7]);
22237 +           wmb();
22238 +       }
22239 +       
22240 +       if (nbase < E3_WRITEBUFFER_SIZE)
22241 +       {
22242 +           for (i = 0; i < nbase/sizeof(uint16_t); i++)
22243 +               writew (((uint16_t *) from)[i], &((uint16_t *) dbase)[i]);
22244 +           wmb();
22245 +       }
22246 +    }
22247 +}
22248 +
22249 +void
22250 +elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
22251 +{
22252 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22253 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22254 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22255 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint16_t)) + sizeof (uint16_t);
22256 +    int        i;
22257 +
22258 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22259 +    {
22260 +       for (i = 0; i < nbytes/sizeof(uint16_t); i++)
22261 +           writew (0, &((uint16_t *) dbase)[i]);
22262 +       wmb();
22263 +    }
22264 +    else
22265 +    {
22266 +       if (ntop < E3_WRITEBUFFER_SIZE)
22267 +       {
22268 +           dlim -= ntop;
22269 +           
22270 +           for (i = 0; i < ntop/sizeof(uint16_t); i++)
22271 +               writew (0, &((uint16_t *) dlim)[i]);
22272 +           wmb();
22273 +       }
22274 +       
22275 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22276 +       {
22277 +           dlim -= E3_WRITEBUFFER_SIZE;
22278 +
22279 +           writeq (0, &((uint64_t *) dlim)[0]);
22280 +           writeq (0, &((uint64_t *) dlim)[1]);
22281 +           wmb();
22282 +       }
22283 +       
22284 +       if (nbase < E3_WRITEBUFFER_SIZE)
22285 +       {
22286 +           for (i = 0; i < nbase/sizeof(uint16_t); i++)
22287 +               writew (0, &((uint16_t *) dbase)[i]);
22288 +           wmb();
22289 +       }
22290 +    }
22291 +}
22292 +
22293 +void
22294 +elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
22295 +{
22296 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22297 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22298 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
22299 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22300 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t);
22301 +    int        i;
22302 +
22303 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22304 +    {
22305 +       for (i = 0; i < nbytes/sizeof(uint32_t); i++)
22306 +           writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]);
22307 +       wmb();
22308 +    }
22309 +    else
22310 +    {
22311 +       if (ntop < E3_WRITEBUFFER_SIZE)
22312 +       {
22313 +           slim -= ntop;
22314 +           dlim -= ntop;
22315 +
22316 +           for (i = 0; i < ntop/sizeof(uint32_t); i++)
22317 +               writel (((uint32_t *) slim)[i], &((uint32_t *) dlim)[i]);
22318 +           wmb();
22319 +       }
22320 +       
22321 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22322 +       {
22323 +           dlim -= E3_WRITEBUFFER_SIZE;
22324 +           slim -= E3_WRITEBUFFER_SIZE;
22325 +
22326 +           writel (((uint32_t *) slim)[0], &((uint32_t *) dlim)[0]);
22327 +           writel (((uint32_t *) slim)[1], &((uint32_t *) dlim)[1]);
22328 +           writel (((uint32_t *) slim)[2], &((uint32_t *) dlim)[2]);
22329 +           writel (((uint32_t *) slim)[3], &((uint32_t *) dlim)[3]);
22330 +           wmb();
22331 +       }
22332 +       
22333 +       if (nbase < E3_WRITEBUFFER_SIZE)
22334 +       {
22335 +           for (i = 0; i < nbase/sizeof(uint32_t); i++)
22336 +               writel (((uint32_t *) from)[i], &((uint32_t *) dbase)[i]);
22337 +           wmb();
22338 +       }
22339 +    }
22340 +}
22341 +
22342 +void
22343 +elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
22344 +{
22345 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22346 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22347 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22348 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint32_t)) + sizeof (uint32_t);
22349 +    int        i;
22350 +
22351 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22352 +    {
22353 +       for (i = 0; i < nbytes/sizeof(uint32_t); i++)
22354 +           writel (0, &((uint32_t *) dbase)[i]);
22355 +       wmb();
22356 +    }
22357 +    else
22358 +    {
22359 +       if (ntop < E3_WRITEBUFFER_SIZE)
22360 +       {
22361 +           dlim -= ntop;
22362 +
22363 +           for (i = 0; i < ntop/sizeof(uint32_t); i++)
22364 +               writel (0, &((uint32_t *) dlim)[i]);
22365 +           wmb();
22366 +       }
22367 +       
22368 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22369 +       {
22370 +           dlim -= E3_WRITEBUFFER_SIZE;
22371 +
22372 +           writeq (0, &((uint64_t *) dlim)[0]);
22373 +           writeq (0, &((uint64_t *) dlim)[1]);
22374 +           wmb();
22375 +       }
22376 +       
22377 +       if (nbase < E3_WRITEBUFFER_SIZE)
22378 +       {
22379 +           for (i = 0; i < nbase/sizeof(uint32_t); i++)
22380 +               writel (0, &((uint32_t *) dbase)[i]);
22381 +           wmb();
22382 +       }
22383 +    }
22384 +}
22385 +
22386 +void
22387 +elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes)
22388 +{
22389 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22390 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22391 +    virtaddr_t slim  = (virtaddr_t) from + nbytes;
22392 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22393 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t);
22394 +
22395 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22396 +    {
22397 +       writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]);
22398 +       wmb();
22399 +    }
22400 +    else
22401 +    {
22402 +       if (ntop < E3_WRITEBUFFER_SIZE)
22403 +       {
22404 +           slim -= ntop;
22405 +           dlim -= ntop;
22406 +
22407 +           writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]);
22408 +           wmb();
22409 +       }
22410 +       
22411 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22412 +       {
22413 +           dlim -= E3_WRITEBUFFER_SIZE;
22414 +           slim -= E3_WRITEBUFFER_SIZE;
22415 +
22416 +           writeq (((uint64_t *) slim)[0], &((uint64_t *) dlim)[0]);
22417 +           writeq (((uint64_t *) slim)[1], &((uint64_t *) dlim)[1]);
22418 +           wmb();
22419 +       }
22420 +       
22421 +       if (nbase < E3_WRITEBUFFER_SIZE)
22422 +       {
22423 +           writeq (((uint64_t *) from)[0], &((uint64_t *) dbase)[0]);
22424 +           wmb();
22425 +       }
22426 +    }
22427 +}
22428 +
22429 +void
22430 +elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t to, int nbytes)
22431 +{
22432 +    virtaddr_t dbase = (virtaddr_t) sdram_off_to_mapping (dev, to);
22433 +    virtaddr_t dlim  = (virtaddr_t) dbase + nbytes;
22434 +    unsigned   nbase = E3_WRITEBUFFER_SIZE - E3_WRITEBUFFER_OFFSET (dbase);
22435 +    unsigned   ntop  = E3_WRITEBUFFER_OFFSET (dlim - sizeof (uint64_t)) + sizeof (uint64_t);
22436 +
22437 +    if (E3_WRITEBUFFER_BASE(dbase) == E3_WRITEBUFFER_BASE(dlim))
22438 +    {
22439 +       writeq (0, &((uint64_t *) dbase)[0]);
22440 +       wmb();
22441 +    }
22442 +    else
22443 +    {
22444 +       if (ntop < E3_WRITEBUFFER_SIZE)
22445 +       {
22446 +           dlim -= ntop;
22447 +
22448 +           writeq (0, &((uint64_t *) dlim)[0]);
22449 +           wmb();
22450 +       }
22451 +       
22452 +       while (dlim >= (dbase + E3_WRITEBUFFER_SIZE))
22453 +       {
22454 +           dlim -= E3_WRITEBUFFER_SIZE;
22455 +           
22456 +           writeq (0, &((uint64_t *) dlim)[0]);
22457 +           writeq (0, &((uint64_t *) dlim)[1]);
22458 +           wmb();
22459 +       }
22460 +       
22461 +       if (nbase < E3_WRITEBUFFER_SIZE)
22462 +       {
22463 +           writeq (0, &((uint64_t *) dbase)[0]);
22464 +           wmb();
22465 +       }
22466 +    }
22467 +}
22468 +
22469 +physaddr_t
22470 +elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t off)
22471 +{
22472 +#if defined(DIGITAL_UNIX)
22473 +    return (KSEG_TO_PHYS (sdram_off_to_mapping (dev, off)));
22474 +#elif defined(LINUX)
22475 +    return (kmem_to_phys ((void *) sdram_off_to_mapping (dev, off)));
22476 +#endif    
22477 +}
22478 +
22479 +/* sdram buddy allocator */
22480 +#define read_next(dev, block)          elan3_sdram_readl(dev, block + 0)
22481 +#define read_prev(dev, block)          elan3_sdram_readl(dev, block + 4)
22482 +#define write_next(dev, block, val)    (elan3_sdram_writel(dev, block + 0, val), val)
22483 +#define write_prev(dev, block, val)    (elan3_sdram_writel(dev, block + 4, val), val)
22484 +
22485 +#define freelist_insert(dev,idx,block)\
22486 +do {\
22487 +    sdramaddr_t next = dev->SdramFreeLists[(idx)];\
22488 +\
22489 +    /*\
22490 +     * block->prev = NULL;\
22491 +     * block->next = next;\
22492 +     * if (next != NULL)\
22493 +     *    next->prev = block;\
22494 +     * freelist = block;\
22495 +     */\
22496 +    write_prev (dev, block, (sdramaddr_t) 0);\
22497 +    write_next (dev, block, next);\
22498 +    if (next != (sdramaddr_t) 0)\
22499 +       write_prev (dev, next, block);\
22500 +    dev->SdramFreeLists[idx] = block;\
22501 +\
22502 +    dev->SdramFreeCounts[idx]++;\
22503 +    dev->Stats.SdramBytesFree += (SDRAM_MIN_BLOCK_SIZE << idx);\
22504 +} while (0)
22505 +
22506 +#define freelist_remove(dev,idx,block)\
22507 +do {\
22508 +    /*\
22509 +     * if (block->prev)\
22510 +     *     block->prev->next = block->next;\
22511 +     * else\
22512 +     *     dev->SdramFreeLists[idx] = block->next;\
22513 +     * if (block->next)\
22514 +     *     block->next->prev = block->prev;\
22515 +     */\
22516 +    sdramaddr_t blocknext = read_next (dev, block);\
22517 +    sdramaddr_t blockprev = read_prev (dev, block);\
22518 +\
22519 +    if (blockprev)\
22520 +       write_next (dev, blockprev, blocknext);\
22521 +    else\
22522 +       dev->SdramFreeLists[idx] = blocknext;\
22523 +    if (blocknext)\
22524 +       write_prev (dev, blocknext, blockprev);\
22525 +\
22526 +    dev->SdramFreeCounts[idx]--;\
22527 +    dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\
22528 +} while (0)
22529 +
22530 +#define freelist_removehead(dev,idx,block)\
22531 +do {\
22532 +    sdramaddr_t blocknext = read_next (dev, block);\
22533 +\
22534 +    if ((dev->SdramFreeLists[idx] = blocknext) != 0)\
22535 +       write_prev (dev, blocknext, 0);\
22536 +\
22537 +    dev->SdramFreeCounts[idx]--;\
22538 +    dev->Stats.SdramBytesFree -= (SDRAM_MIN_BLOCK_SIZE << idx);\
22539 +} while (0)
22540 +
22541 +#if defined(DEBUG)
22542 +static int
22543 +display_blocks (ELAN3_DEV *dev, int indx, char *string)
22544 +{
22545 +    sdramaddr_t block;
22546 +    int nbytes = 0;
22547 +
22548 +    printk ("%s - indx %d\n", string, indx);
22549 +    for (block = dev->SdramFreeLists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block))
22550 +    {
22551 +       printk ("  %lx", block);
22552 +       nbytes += (SDRAM_MIN_BLOCK_SIZE << indx);
22553 +    }
22554 +    printk ("\n");
22555 +
22556 +    return (nbytes);
22557 +}
22558 +
22559 +
22560 +void
22561 +elan3_sdram_display (ELAN3_DEV *dev, char *string)
22562 +{
22563 +    int indx;
22564 +    int nbytes = 0;
22565 +
22566 +    printk ("elan3_sdram_display: dev=%p\n", dev);
22567 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
22568 +       if (dev->SdramFreeLists[indx] != (sdramaddr_t) 0)
22569 +           nbytes += display_blocks (dev, indx, string);
22570 +    printk ("\n%d bytes free\n", nbytes);
22571 +}
22572 +
22573 +void
22574 +elan3_sdram_verify (ELAN3_DEV *dev)
22575 +{
22576 +    int indx, size, nbits, i, b;
22577 +    sdramaddr_t block;
22578 +
22579 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
22580 +    {
22581 +       unsigned count = 0;
22582 +
22583 +       for (block = dev->SdramFreeLists[indx]; block; block = read_next (dev, block), count++)
22584 +       {
22585 +           ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block);
22586 +           unsigned         off  = sdram_off_to_offset (dev, block);
22587 +           int              bit  = sdram_off_to_bit (dev, indx, block);
22588 +
22589 +           if ((block & (size-1)) != 0)
22590 +               printk ("elan3_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx);
22591 +           
22592 +           if (bank == NULL || off > bank->Size)
22593 +               printk ("elan3_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx);
22594 +           else if (BT_TEST (bank->Bitmaps[indx], bit) == 0)
22595 +               printk ("elan3_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx);
22596 +           else
22597 +           {
22598 +               for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1)
22599 +               {
22600 +                   bit = sdram_off_to_bit (dev, i, block);
22601 +
22602 +                   for (b = 0; b < nbits; b++)
22603 +                       if (BT_TEST(bank->Bitmaps[i], bit + b))
22604 +                           printk ("elan3_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b);
22605 +               }
22606 +           }
22607 +       }
22608 +
22609 +       if (dev->SdramFreeCounts[indx] != count)
22610 +           printk ("elan3_sdram_verify: indx=%x expected %d got %d\n", indx, dev->SdramFreeCounts[indx], count);
22611 +    }
22612 +}
22613 +
22614 +#endif /* defined(DEBUG) */
22615 +
22616 +static void
22617 +free_block (ELAN3_DEV *dev, sdramaddr_t block, int indx)
22618 +{
22619 +    ELAN3_SDRAM_BANK *bank = sdram_off_to_bank (dev, block);
22620 +    unsigned        bit  = sdram_off_to_bit(dev, indx, block);
22621 +    unsigned         size = SDRAM_MIN_BLOCK_SIZE << indx;
22622 +
22623 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%lx indx=%d bit=%x\n", block, indx, bit);
22624 +
22625 +    ASSERT ((block & (size-1)) == 0);
22626 +    ASSERT (BT_TEST (bank->Bitmaps[indx], bit) == 0);
22627 +    
22628 +    while (BT_TEST (bank->Bitmaps[indx], bit ^ 1))
22629 +    {
22630 +       sdramaddr_t buddy = block ^ size;
22631 +       
22632 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%lx buddy=%lx indx=%d\n", block, buddy, indx);
22633 +
22634 +       BT_CLEAR (bank->Bitmaps[indx], bit ^ 1);
22635 +
22636 +       freelist_remove (dev, indx, buddy);
22637 +       
22638 +       block = (block < buddy) ? block : buddy;
22639 +       indx++;
22640 +       size <<= 1;
22641 +       bit >>= 1;
22642 +    }
22643 +
22644 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%lx indx=%d bit=%x\n", block, indx, bit);
22645 +
22646 +    freelist_insert (dev, indx, block);
22647 +
22648 +    BT_SET (bank->Bitmaps[indx], bit);
22649 +}
22650 +
22651 +void
22652 +elan3_sdram_init (ELAN3_DEV *dev)
22653 +{
22654 +    int indx;
22655 +
22656 +    spin_lock_init (&dev->SdramLock);
22657 +
22658 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
22659 +    {
22660 +       dev->SdramFreeLists[indx]  = (sdramaddr_t) 0;
22661 +       dev->SdramFreeCounts[indx] = 0;
22662 +    }
22663 +}
22664 +
22665 +void
22666 +elan3_sdram_fini (ELAN3_DEV *dev)
22667 +{
22668 +    spin_lock_destroy (&dev->SdramLock);
22669 +}
22670 +
22671 +void
22672 +elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top)
22673 +{
22674 +    register int indx;
22675 +    register unsigned long size;
22676 +
22677 +    /* align to the minimum block size */
22678 +    base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
22679 +    top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
22680 +
22681 +    /* don't allow 0 as a valid "base" */
22682 +    if (base == 0)
22683 +       base = E3_CACHE_SIZE;
22684 +
22685 +    /* carve the bottom to the biggest boundary */
22686 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
22687 +    {
22688 +       if ((base & size) == 0)
22689 +           continue;
22690 +
22691 +       if ((base + size) > top)
22692 +           break;
22693 +
22694 +       free_block (dev, base, indx);
22695 +       
22696 +       base += size;
22697 +    }
22698 +
22699 +    /* carve the top down to the biggest boundary */
22700 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
22701 +    {
22702 +       if ((top & size) == 0)
22703 +           continue;
22704 +
22705 +       if ((top - size) < base)
22706 +           break;
22707 +
22708 +       free_block (dev, (top - size), indx);
22709 +       
22710 +       top -= size;
22711 +    }
22712 +
22713 +    /* now free of the space in between */
22714 +    while (base < top)
22715 +    {
22716 +       free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1));
22717 +
22718 +       base += SDRAM_MAX_BLOCK_SIZE;
22719 +    }
22720 +}
22721 +
22722 +sdramaddr_t
22723 +elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes)
22724 +{
22725 +    sdramaddr_t block;
22726 +    register int i, indx;
22727 +    unsigned long size;
22728 +    unsigned long flags;
22729 +
22730 +    spin_lock_irqsave (&dev->SdramLock, flags);
22731 +
22732 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
22733 +       ;
22734 +
22735 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx);
22736 +
22737 +    /* find the smallest block which is big enough for this allocation */
22738 +    for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1)
22739 +       if (dev->SdramFreeLists[i])
22740 +           break;
22741 +    
22742 +    if (i == SDRAM_NUM_FREE_LISTS)
22743 +    {
22744 +       spin_unlock_irqrestore (&dev->SdramLock, flags);
22745 +       return ((sdramaddr_t) 0);
22746 +    }
22747 +    
22748 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: use block=%lx indx=%d\n", dev->SdramFreeLists[i], i);
22749 +
22750 +    /* remove the block from the free list */
22751 +    freelist_removehead (dev, i, (block = dev->SdramFreeLists[i]));
22752 +
22753 +    /* clear the approriate bit in the bitmap */
22754 +    BT_CLEAR (sdram_off_to_bank (dev, block)->Bitmaps[i], sdram_off_to_bit (dev,i, block));
22755 +
22756 +    /* and split it up as required */
22757 +    while (i-- > indx)
22758 +       free_block (dev, block + (size >>= 1), i);
22759 +
22760 +    PRINTF1 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_alloc: return block=%lx\n", block);
22761 +
22762 +    spin_unlock_irqrestore (&dev->SdramLock, flags);
22763 +
22764 +    ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0);
22765 +
22766 +    return ((sdramaddr_t) block);
22767 +}
22768 +
22769 +void
22770 +elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t block, int nbytes)
22771 +{
22772 +    register int indx;
22773 +    unsigned long size;
22774 +    unsigned long flags;
22775 +
22776 +    spin_lock_irqsave (&dev->SdramLock, flags);
22777 +
22778 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
22779 +       ;
22780 +
22781 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan3_sdram_free: indx=%d block=%lx\n", indx, block);
22782 +    
22783 +    free_block (dev, block, indx);
22784 +
22785 +    spin_unlock_irqrestore (&dev->SdramLock, flags);
22786 +}
22787 +
22788 +
22789 +
22790 +/*
22791 + * Local variables:
22792 + * c-file-style: "stroustrup"
22793 + * End:
22794 + */
22795 diff -urN clean/drivers/net/qsnet/elan3/tproc.c linux-2.6.9/drivers/net/qsnet/elan3/tproc.c
22796 --- clean/drivers/net/qsnet/elan3/tproc.c       1969-12-31 19:00:00.000000000 -0500
22797 +++ linux-2.6.9/drivers/net/qsnet/elan3/tproc.c 2004-11-15 06:14:12.000000000 -0500
22798 @@ -0,0 +1,778 @@
22799 +/*
22800 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
22801 + *
22802 + *    For licensing information please see the supplied COPYING file
22803 + *
22804 + */
22805 +
22806 +#ident "@(#)$Id: tproc.c,v 1.52 2004/11/15 11:14:12 mike Exp $"
22807 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc.c,v $ */
22808 +
22809 +#include <qsnet/kernel.h>
22810 +
22811 +#include <elan3/elanregs.h>
22812 +#include <elan3/elandev.h>
22813 +#include <elan3/elanvp.h>
22814 +#include <elan3/elan3mmu.h>
22815 +#include <elan3/elanctxt.h>
22816 +#include <elan3/elandebug.h>
22817 +#include <elan3/urom_addrs.h>
22818 +#include <elan3/thread.h>
22819 +#include <elan3/elansyscall.h>
22820 +#include <elan3/threadsyscall.h>
22821 +#include <elan3/intrinsics.h>
22822 +#include <elan3/vmseg.h>
22823 +
22824 +int
22825 +HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits)
22826 +{
22827 +    THREAD_TRAP  *trap  = dev->ThreadTrap;
22828 +    int           delay = 1;
22829 +
22830 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
22831 +
22832 +    trap->Status.Status  = read_reg32 (dev, Exts.TProcStatus);
22833 +    trap->sp             = read_reg32 (dev, Thread_Desc_SP);
22834 +    trap->pc             = read_reg32 (dev, ExecutePC);
22835 +    trap->npc            = read_reg32 (dev, ExecuteNPC);
22836 +    trap->StartPC        = read_reg32 (dev, StartPC);
22837 +    trap->mi             = GET_STATUS_TRAPTYPE(trap->Status);
22838 +    trap->TrapBits.Bits  = read_reg32 (dev, TrapBits.Bits);
22839 +    trap->DirtyBits.Bits = read_reg32 (dev, DirtyBits.Bits);
22840 +
22841 +    if ( ! (trap->Status.s.WakeupFunction == SleepOneTick) ) {
22842 +       int p,i;
22843 +       E3_uint32 reg = read_reg32 (dev, Exts.InterruptReg);    
22844 +
22845 +       ELAN_REG_REC(reg);
22846 +       p = elan_reg_rec_index;
22847 +       for(i=0;i<ELAN_REG_REC_MAX;i++) {
22848 +           if (elan_reg_rec_file[i] != NULL ) 
22849 +               printk("Elan Reg Record[%2d](%ld): cpu %d  reg %x [%d:%s]\n", p, elan_reg_rec_lbolt[p], elan_reg_rec_cpu[p], elan_reg_rec_reg[p],
22850 +                      elan_reg_rec_line[p], elan_reg_rec_file[p]);
22851 +           p = ( (p+1) % ELAN_REG_REC_MAX);
22852 +       }
22853 +    }
22854 +    
22855 +    ASSERT(trap->Status.s.WakeupFunction == SleepOneTick);
22856 +
22857 +    /* copy the four access fault areas */
22858 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc),          (void *) &trap->FaultSave, 16);
22859 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), (void *) &trap->DataFaultSave, 16);
22860 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), (void *) &trap->InstFaultSave, 16);
22861 +    elan3_sdram_copyq_from_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), (void *) &trap->OpenFaultSave, 16);
22862 +    
22863 +    /* copy the registers,  note the endian swap flips the odd registers into the even registers
22864 +       and visa versa. */
22865 +    copy_thread_regs (dev, trap->Registers);
22866 +
22867 +    /*
22868 +     * If the output was open then the ack may not have returned yet. Must wait for the
22869 +     * ack to become valid and update trap_dirty with the new value. Will simulate the
22870 +     * instructions later.
22871 +     */
22872 +    if (trap->TrapBits.s.OutputWasOpen)
22873 +    {
22874 +       trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits);
22875 +       while (! trap->TrapBits.s.AckBufferValid)
22876 +       {
22877 +           PRINTF0 (DBG_DEVICE, DBG_INTR, "tproc: waiting for ack to become valid\n");
22878 +           trap->TrapBits.Bits = read_reg32 (dev, TrapBits.Bits);
22879 +           DELAY (delay);
22880 +
22881 +           if ((delay <<= 1) == 0) delay = 1;
22882 +       }
22883 +    }
22884 +    
22885 +    /* update device statistics */
22886 +    BumpStat (dev, TProcTraps);
22887 +    switch (trap->mi)
22888 +    {
22889 +    case MI_UnimplementedError:
22890 +       if (trap->TrapBits.s.ForcedTProcTrap)
22891 +           BumpStat (dev, ForcedTProcTraps);
22892 +       if (trap->TrapBits.s.ThreadTimeout)
22893 +       {
22894 +           if (trap->TrapBits.s.PacketTimeout)
22895 +               BumpStat (dev, ThreadOutputTimeouts);
22896 +           else if (trap->TrapBits.s.PacketAckValue == E3_PAckError)
22897 +               BumpStat (dev, ThreadPacketAckErrors);
22898 +       }
22899 +       if (trap->TrapBits.s.TrapForTooManyInsts)
22900 +           BumpStat (dev, TrapForTooManyInsts);
22901 +       break;
22902 +    }
22903 +
22904 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, TProc), 16);
22905 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcData), 16);
22906 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcInst), 16);
22907 +    elan3_sdram_zeroq_sdram (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, ThreadProcOpen), 16);
22908 +
22909 +    *RestartBits |= RestartTProc;
22910 +
22911 +    return (TRUE);
22912 +}
22913 +
22914 +void
22915 +DeliverTProcTrap (ELAN3_DEV *dev, THREAD_TRAP *threadTrap, E3_uint32 Pend)
22916 +{
22917 +    ELAN3_CTXT      *ctxt;
22918 +    THREAD_TRAP      *trap;
22919 +
22920 +    ASSERT(SPINLOCK_HELD (&dev->IntrLock));
22921 +
22922 +    ctxt = ELAN3_DEV_CTX_TABLE(dev, threadTrap->Status.s.Context);
22923 +
22924 +    if (ctxt == NULL)
22925 +    {
22926 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "DeliverTProcTrap: context %x invalid\n", threadTrap->Status.s.Context);
22927 +       BumpStat (dev, InvalidContext);
22928 +    }
22929 +    else
22930 +    {
22931 +       if (ELAN3_OP_TPROC_TRAP (ctxt, threadTrap) == OP_DEFER)
22932 +       {
22933 +           if (ELAN3_QUEUE_REALLY_FULL (ctxt->ThreadTrapQ))
22934 +           {
22935 +               ctxt->Status |= CTXT_COMMAND_OVERFLOW_ERROR;
22936 +               StartSwapoutContext (ctxt, Pend, NULL);
22937 +           }
22938 +           else
22939 +           {
22940 +               trap = ELAN3_QUEUE_BACK (ctxt->ThreadTrapQ, ctxt->ThreadTraps);
22941 +               
22942 +               bcopy (threadTrap, trap, sizeof (THREAD_TRAP));
22943 +               
22944 +               PRINTF4 (ctxt, DBG_INTR, "DeliverTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n",
22945 +                        trap->sp, trap->pc, trap->npc, trap->StartPC);
22946 +               PRINTF3 (ctxt, DBG_INTR, "       mi=%s trap=%08x dirty=%08x\n",
22947 +                        MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits);
22948 +               PRINTF3 (ctxt, DBG_INTR, "       FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
22949 +                        trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
22950 +               PRINTF3 (ctxt, DBG_INTR, "       DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22951 +                        trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
22952 +               PRINTF3 (ctxt, DBG_INTR, "       InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22953 +                        trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
22954 +               PRINTF3 (ctxt, DBG_INTR, "       OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
22955 +                        trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
22956 +               
22957 +               PRINTF4 (ctxt, DBG_INTR, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
22958 +                        trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
22959 +                        trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
22960 +               PRINTF4 (ctxt, DBG_INTR, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
22961 +                        trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
22962 +                        trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
22963 +               PRINTF4 (ctxt, DBG_INTR, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
22964 +                        trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
22965 +                        trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
22966 +               PRINTF4 (ctxt, DBG_INTR, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
22967 +                        trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
22968 +                        trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
22969 +               PRINTF4 (ctxt, DBG_INTR, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
22970 +                        trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)],
22971 +                        trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
22972 +               PRINTF4 (ctxt, DBG_INTR, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
22973 +                        trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)],
22974 +                        trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
22975 +               PRINTF4 (ctxt, DBG_INTR, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
22976 +                        trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)],
22977 +                        trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
22978 +               PRINTF4 (ctxt, DBG_INTR, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
22979 +                        trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)],
22980 +                        trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
22981 +               
22982 +               ELAN3_QUEUE_ADD (ctxt->ThreadTrapQ);
22983 +               kcondvar_wakeupone (&ctxt->Wait, &dev->IntrLock);
22984 +               
22985 +               if (ELAN3_QUEUE_FULL (ctxt->ThreadTrapQ))
22986 +               {
22987 +                   PRINTF0 (ctxt, DBG_INTR, "DeliverTProcTrap: thread queue full,  must swap out\n");
22988 +                   ctxt->Status |= CTXT_THREAD_QUEUE_FULL;
22989 +                   
22990 +                   StartSwapoutContext (ctxt, Pend, NULL);
22991 +               }
22992 +           }
22993 +       }
22994 +    }
22995 +}
22996 +
22997 +int
22998 +NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
22999 +{
23000 +    ELAN3_DEV *dev = ctxt->Device;
23001 +
23002 +    ASSERT (SPINLOCK_HELD (&dev->IntrLock));
23003 +    
23004 +    if (ELAN3_QUEUE_EMPTY (ctxt->ThreadTrapQ))
23005 +       return (0);
23006 +
23007 +    *trap = *ELAN3_QUEUE_FRONT (ctxt->ThreadTrapQ, ctxt->ThreadTraps);
23008 +    ELAN3_QUEUE_REMOVE (ctxt->ThreadTrapQ);
23009 +    
23010 +    return (1);
23011 +}
23012 +
23013 +void
23014 +ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
23015 +{
23016 +    int       i;
23017 +    int       res;
23018 +    E3_Addr   StackPointer;
23019 +
23020 +    PRINTF4 (ctxt, DBG_TPROC, "ResolveTProcTrap: SP=%08x PC=%08x NPC=%08x StartPC %08x\n",
23021 +            trap->sp, trap->pc, trap->npc, trap->StartPC);
23022 +    PRINTF3 (ctxt, DBG_TPROC, "       mi=%s trap=%08x dirty=%08x\n",
23023 +            MiToName (trap->mi), trap->TrapBits.Bits, trap->DirtyBits.Bits);
23024 +    PRINTF3 (ctxt, DBG_TPROC, "       FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
23025 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
23026 +    PRINTF3 (ctxt, DBG_TPROC, "       DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
23027 +            trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
23028 +    PRINTF3 (ctxt, DBG_TPROC, "       InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
23029 +            trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
23030 +    PRINTF3 (ctxt, DBG_TPROC, "       OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
23031 +            trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
23032 +    
23033 +    PRINTF4 (ctxt, DBG_TPROC, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
23034 +            trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
23035 +            trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
23036 +    PRINTF4 (ctxt, DBG_TPROC, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
23037 +            trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
23038 +            trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
23039 +    PRINTF4 (ctxt, DBG_TPROC, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
23040 +            trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
23041 +            trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
23042 +    PRINTF4 (ctxt, DBG_TPROC, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
23043 +            trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
23044 +            trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
23045 +    PRINTF4 (ctxt, DBG_TPROC, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
23046 +            trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)],
23047 +            trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
23048 +    PRINTF4 (ctxt, DBG_TPROC, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
23049 +            trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)],
23050 +            trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
23051 +    PRINTF4 (ctxt, DBG_TPROC, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
23052 +            trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)],
23053 +            trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
23054 +    PRINTF4 (ctxt, DBG_TPROC, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
23055 +            trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)],
23056 +            trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
23057 +           
23058 +
23059 +    BumpUserStat (ctxt, TProcTraps);
23060 +
23061 +    switch (trap->mi)
23062 +    {
23063 +    case MI_UnimplementedError:
23064 +    {
23065 +       /*
23066 +        * This occurs if the threads processor trapped. All other cases will be for the ucode
23067 +        * thread trapping.
23068 +        */
23069 +       int restart = 1;
23070 +       int skip    = 0;
23071 +       
23072 +       PRINTF1 (ctxt, DBG_TPROC, "TProc: Mi=Unimp. Using trap->TrapBits=%x\n", trap->TrapBits.Bits);
23073 +       
23074 +       /*
23075 +        * Data Access Exception.
23076 +        */
23077 +       if (trap->TrapBits.s.DataAccessException)
23078 +       {
23079 +           ASSERT (CTXT_IS_KERNEL(ctxt) || trap->DataFaultSave.s.FSR.Status == 0 ||
23080 +                   ctxt->Capability.cap_mycontext == trap->DataFaultSave.s.FaultContext);
23081 +
23082 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: DataAccessException %08x\n", trap->DataFaultSave.s.FaultAddress);
23083 +
23084 +           if ((res = elan3_pagefault (ctxt, &trap->DataFaultSave, 1)) != ESUCCESS)
23085 +           {
23086 +               PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for data %08x\n",
23087 +                        trap->DataFaultSave.s.FaultAddress);
23088 +
23089 +               if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE)
23090 +                   restart = 0;
23091 +           }
23092 +       }
23093 +       
23094 +       /* 
23095 +        * Instruction Access Exception.
23096 +        */
23097 +       if (trap->TrapBits.s.InstAccessException)
23098 +       {
23099 +           ASSERT (CTXT_IS_KERNEL (ctxt) || trap->InstFaultSave.s.FSR.Status == 0 ||
23100 +                   ctxt->Capability.cap_mycontext == trap->InstFaultSave.s.FaultContext);
23101 +           
23102 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: InstAccessException %08x\n", trap->InstFaultSave.s.FaultAddress);
23103 +
23104 +           if ((res = elan3_pagefault (ctxt, &trap->InstFaultSave, 1)) != ESUCCESS)
23105 +           {
23106 +               PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed for inst %08x\n",
23107 +                        trap->InstFaultSave.s.FaultAddress);
23108 +
23109 +               ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->InstFaultSave, res);
23110 +               restart = 0;
23111 +           }
23112 +       }
23113 +       
23114 +       /*
23115 +        * Forced TProc trap/Unimplemented instruction
23116 +        *
23117 +        *  If there is a force tproc trap then don't look at 
23118 +        *  the unimplemented instruction bit - since it can
23119 +        *  be set in obscure circumstances.
23120 +        */
23121 +       if (trap->TrapBits.s.ForcedTProcTrap)
23122 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: forced tproc trap, restarting\n");
23123 +       else if (trap->TrapBits.s.Unimplemented)
23124 +       {
23125 +           E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK);
23126 +
23127 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: unimplemented instruction %08x\n", instr);
23128 +
23129 +           if ((instr & OPCODE_MASK) == OPCODE_Ticc &&
23130 +               (instr & OPCODE_IMM)  == OPCODE_IMM &&
23131 +               (Ticc_COND(instr)     == Ticc_TA))
23132 +           {
23133 +               switch (INSTR_IMM(instr))
23134 +               {
23135 +               case ELAN3_ELANCALL_TRAPNUM:
23136 +                   /*
23137 +                    * Since the thread cannot easily access the global variable which holds
23138 +                    * the elan system call number, we provide a different trap for the elan
23139 +                    * system call, and copy the system call number into %g1 before calling
23140 +                    * ThreadSyscall().
23141 +                    */
23142 +                   BumpUserStat (ctxt, ThreadElanCalls);
23143 +
23144 +                   if (ThreadElancall (ctxt, trap, &skip) != ESUCCESS)
23145 +                   {
23146 +                       ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap);
23147 +                       restart = 0;
23148 +                   }
23149 +                   break;
23150 +
23151 +               case ELAN3_SYSCALL_TRAPNUM:
23152 +                   BumpUserStat (ctxt, ThreadSystemCalls);
23153 +
23154 +                   if (ThreadSyscall (ctxt, trap, &skip) != ESUCCESS)
23155 +                   {
23156 +                       ElanException (ctxt, EXCEPTION_BAD_SYSCALL, THREAD_PROC, trap);
23157 +                       restart = 0;
23158 +                   }
23159 +                   break;
23160 +
23161 +               case ELAN3_DEBUG_TRAPNUM:
23162 +                   ElanException (ctxt, EXCEPTION_DEBUG, THREAD_PROC, trap);
23163 +                   skip = 1;
23164 +                   break;
23165 +                   
23166 +               case ELAN3_ABORT_TRAPNUM:
23167 +               default:
23168 +                   ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr);
23169 +                   restart = 0;
23170 +                   break;
23171 +               }
23172 +                   
23173 +           }
23174 +           else
23175 +           {
23176 +               ElanException (ctxt, EXCEPTION_UNIMP_INSTR, THREAD_PROC, trap, instr);
23177 +               restart = 0;
23178 +           }
23179 +       }
23180 +       
23181 +       /*
23182 +        * Faulted fetching routes.
23183 +        */
23184 +       if (trap->TrapBits.s.OpenRouteFetch)
23185 +       {
23186 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: OpenRouteFetch %08x\n", trap->OpenFaultSave.s.FaultAddress);
23187 +           
23188 +           if ((res = ResolveVirtualProcess (ctxt, trap->OpenFaultSave.s.FaultAddress)) != ESUCCESS &&
23189 +               ElanException (ctxt, EXCEPTION_INVALID_PROCESS, THREAD_PROC, trap, trap->DataFaultSave.s.FaultAddress, res) != OP_IGNORE)
23190 +           {
23191 +               restart = 0;
23192 +           }
23193 +           else if (RollThreadToClose (ctxt, trap, E3_PAckDiscard) != ESUCCESS)        /* Force a discard */
23194 +           {
23195 +               restart = 0;
23196 +           }
23197 +       }
23198 +       
23199 +       /*
23200 +        * Thread Timeout
23201 +        */
23202 +       if (trap->TrapBits.s.ThreadTimeout)
23203 +       {
23204 +           if (ElanException (ctxt, EXCEPTION_PACKET_TIMEOUT, THREAD_PROC, trap) != OP_IGNORE)
23205 +               restart = 0;
23206 +           else
23207 +           {
23208 +               PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: timeout or PAckError!\n");
23209 +               
23210 +               /* Might deschedule the thread for a while or mark the link error here. */
23211 +               if (! trap->TrapBits.s.OutputWasOpen && RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS)
23212 +               {
23213 +                   restart = 0;
23214 +               }
23215 +           }
23216 +       }
23217 +       
23218 +       /*
23219 +        * Open exception
23220 +        */
23221 +       if (trap->TrapBits.s.OpenException)
23222 +       {
23223 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: open exception\n");
23224 +           if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE)
23225 +               restart = 0;
23226 +       }
23227 +       
23228 +       /*
23229 +        * Too many instructions.
23230 +        */
23231 +       if (trap->TrapBits.s.TrapForTooManyInsts)
23232 +       {
23233 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: too many instructions\n");
23234 +           if (ElanException (ctxt, EXCEPTION_THREAD_KILLED, THREAD_PROC, trap) != OP_IGNORE)
23235 +               restart = 0;
23236 +       }
23237 +       
23238 +       if (restart)
23239 +       {
23240 +           /*
23241 +            * If the output was open when the trap was taken then the trap code must move
23242 +            * the PC on past the close instruction and simulate the effect of all the instructions
23243 +            * that do not output onto the link. The value of the ack received is then used to
23244 +            * simulate the close instruction.
23245 +            */
23246 +           if (trap->TrapBits.s.OutputWasOpen && RollThreadToClose(ctxt, trap, trap->TrapBits.s.PacketAckValue) != ESUCCESS)
23247 +           {
23248 +               /*
23249 +                * Don't restart if we couldn't roll it forweards 
23250 +                * to a close instruction.
23251 +                */
23252 +               break;
23253 +           }
23254 +
23255 +           /*
23256 +            * We must check back 3 instructions from the PC,  and if we see the
23257 +            * c_close_cookie() sequence then we must execute the instructions to
23258 +            * the end of it.
23259 +            */
23260 +           /* XXXX: code to be written */
23261 +           
23262 +           StackPointer = SaveThreadToStack (ctxt, trap, skip);
23263 +           
23264 +           ReissueStackPointer (ctxt, StackPointer);
23265 +       }
23266 +       
23267 +       break;
23268 +    }
23269 +    
23270 +    /*
23271 +     * This case is different from the others as %o6 has been overwritten with
23272 +     * the SP. The real PC can be read from StartPC and written back
23273 +     * into %o6 on the stack.
23274 +     */
23275 +    case MI_TProcNext:                 /* Reading the outs block */
23276 +    {
23277 +       E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
23278 +
23279 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
23280 +       {
23281 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
23282 +
23283 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing StartPc to o6\n");
23284 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
23285 +           break;
23286 +       }
23287 +       ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->StartPC & PC_MASK);
23288 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
23289 +       /* DROPTHROUGH */
23290 +    }
23291 +    /*
23292 +     * all of these will be generated when starting up a thread.
23293 +     * Just re-issue the command after fixing the trap. The ucode keeps the startup
23294 +     * from trap information in Thread_Desc_SP while it is still loading the regs.
23295 +     */
23296 +    case MI_WaitForGlobalsRead:                /* Reading the globals block (trap restart) */
23297 +    case MI_WaitForNPCRead:            /* Reading the nPC, V and C (trap restart) */
23298 +    case MI_WaitForPCload:             /* Reading the PC, N and Z (trap restart) */
23299 +    case MI_WaitForInsRead:            /* Reading the ins block (trap restart) */
23300 +    case MI_WaitForLocals:             /* Reading the ins block (trap restart) */
23301 +    case MI_WaitForPCload2:            /* Reading the PC (normal thread start) */
23302 +    case MI_WaitForSpStore:            /* Writing the SP to the outs block */
23303 +       PRINTF2 (ctxt, DBG_TPROC, "ResolveTProcTrap: %s %08x\n", MiToName (trap->mi), trap->InstFaultSave.s.FaultAddress);
23304 +
23305 +       if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
23306 +       {
23307 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n",
23308 +                    trap->FaultSave.s.FaultAddress);
23309 +           if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, &trap->FaultSave, trap, res) != OP_IGNORE)
23310 +               break;
23311 +       }
23312 +
23313 +       ReissueStackPointer (ctxt, trap->sp);
23314 +       break;
23315 +       
23316 +       /*
23317 +        * These traps could occur after the threads proc has stopped (either for a wait,
23318 +        * break, or suspend, but not a trap). Must simulate the uCode's job.
23319 +        */
23320 +    case MI_WaitForOutsWrite:          /* Writing the outs block */
23321 +    case MI_WaitForNPCWrite:           /* Writing the nPC block */
23322 +    {
23323 +       E3_uint32 DeschedBits = (trap->TrapBits.Bits & E3_TProcDescheduleMask);
23324 +       E3_Addr   stack       = (trap->sp & SP_MASK) - sizeof (E3_Stack);
23325 +       
23326 +       PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: trapped on %s while stopping a thread\n", MiToName(trap->mi));
23327 +       
23328 +       /*
23329 +        * Copy npc into o6.
23330 +        */
23331 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)] = trap->npc;
23332 +       
23333 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
23334 +       {
23335 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
23336 +
23337 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing outs to stack\n");
23338 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
23339 +           break;
23340 +       }
23341 +       
23342 +       /*
23343 +        * Now write the outs back to the stack. NOTE then endian flip is undone.
23344 +        */
23345 +       for (i = 0; i < 8; i++)
23346 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]), trap->Registers[REG_OUTS+(i^WordEndianFlip)]);
23347 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
23348 +       
23349 +       /*
23350 +        * thread has been saved. Now find out why the thread proc stopped.
23351 +        */
23352 +       if (DeschedBits == E3_TProcDescheduleSuspend)
23353 +       {
23354 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: suspend instruction executed\n");
23355 +           break;
23356 +       }
23357 +       
23358 +       /*
23359 +        * Break. Just reissue the command.
23360 +        */
23361 +       if (DeschedBits == E3_TProcDescheduleBreak)
23362 +       {
23363 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: break instruction, reissue sp %08x\n", trap->sp);
23364 +           ReissueStackPointer (ctxt, trap->sp);
23365 +           break;
23366 +       }
23367 +       
23368 +       ASSERT (DeschedBits == E3_TProcDescheduleWait);
23369 +        
23370 +       /* DROPTHROUGH to fix up a wait event */
23371 +    }
23372 +    
23373 +    /*
23374 +     * Trapped here trying to execute a wait instruction. All the thread state has already
23375 +     * been saved and the trap has been fixed so simplest thing to do is to start the
23376 +     * thread up at the wait instruction again.
23377 +     */
23378 +    case MI_WaitForEventWaitAddr:      /* Reading back the %o0,%o1 pair for a
23379 +                                          wait event instr. */
23380 +    case MI_WaitForWaitEventAccess:    /* Locked dword read of the event location.
23381 +                                          Note that this read is done with write
23382 +                                          permissions so we never get a trap on the write */
23383 +    {
23384 +       E3_Addr stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
23385 +       
23386 +       if ((res = elan3_pagefault (ctxt, &trap->FaultSave, 1)) != ESUCCESS)
23387 +       {
23388 +           PRINTF1 (ctxt, DBG_TPROC, "ResolveTProcTrap: elan3_pagefault failed at %08x\n", 
23389 +                    trap->FaultSave.s.FaultAddress);
23390 +           if (ElanException (ctxt, EXCEPTION_INVALID_ADDR, THREAD_PROC, trap, &trap->DataFaultSave, res) != OP_IGNORE)
23391 +               break;
23392 +       }
23393 +
23394 +       if (ELAN3_OP_START_FAULT_CHECK (ctxt))
23395 +       {
23396 +           ELAN3_OP_END_FAULT_CHECK (ctxt);
23397 +
23398 +           PRINTF0 (ctxt, DBG_TPROC, "ResolveTProcTrap: faulted writing pc to stack\n");
23399 +           ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
23400 +           break;
23401 +       }
23402 +
23403 +       ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[6]), trap->pc);
23404 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
23405 +       
23406 +       ReissueStackPointer (ctxt, trap->sp);
23407 +       break;
23408 +    }
23409 +    
23410 +    /*
23411 +     * Assume the fault will be fixed by FixupEventTrap.
23412 +     */
23413 +    default:
23414 +       FixupEventTrap (ctxt, THREAD_PROC, trap, trap->mi, &trap->FaultSave, 0);
23415 +       break;
23416 +    }
23417 +}
23418 +
23419 +int
23420 +TProcNeedsRestart (ELAN3_CTXT *ctxt)
23421 +{
23422 +    return (ctxt->ItemCount[LIST_THREAD] != 0);
23423 +}
23424 +
23425 +void
23426 +RestartTProcItems (ELAN3_CTXT *ctxt)
23427 +{
23428 +    void     *item;
23429 +    E3_uint32 StackPointer;
23430 +
23431 +    kmutex_lock (&ctxt->SwapListsLock);
23432 +    
23433 +    while (ctxt->ItemCount[LIST_THREAD])
23434 +    {
23435 +       if (! ELAN3_OP_GET_WORD_ITEM (ctxt, LIST_THREAD, &item, &StackPointer))
23436 +           ctxt->ItemCount[LIST_THREAD] = 0;
23437 +       else
23438 +       {
23439 +           if (IssueCommand (ctxt, offsetof (E3_CommandPort, RunThread), StackPointer, 0) == ISSUE_COMMAND_RETRY)
23440 +           {
23441 +               ELAN3_OP_PUTBACK_ITEM (ctxt, LIST_THREAD, item);
23442 +               kmutex_unlock (&ctxt->SwapListsLock);
23443 +               return;
23444 +           }
23445 +           
23446 +           ctxt->ItemCount[LIST_THREAD]--;
23447 +           ELAN3_OP_FREE_WORD_ITEM (ctxt, item);
23448 +       }
23449 +    }
23450 +    kmutex_unlock (&ctxt->SwapListsLock);
23451 +}
23452 +
23453 +E3_Addr
23454 +SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction)
23455 +{
23456 +    E3_Addr      stack = (trap->sp & SP_MASK) - sizeof (E3_Stack);
23457 +    E3_Addr      orflag;
23458 +    register int i;
23459 +
23460 +    /*
23461 +     * When the thread deschedules normally, the N & Z flags are written 
23462 +     * to the stack in o6, and the V & C flags are lost.
23463 +     * Since the Elan will store the NPC into o6 (to skip the instruction), 
23464 +     * the CC flags are visible to the trap handler in the trapped PC and NPC.   
23465 +     * If the instruction needs to be re-executed then the CC flags need to be
23466 +     * kept in the right place to be read in when the thread re-starts.
23467 +     *
23468 +     * PC  has N & Z from trapped NPC.
23469 +     * NPC has V & C from trapped PC.
23470 +     */
23471 +    if (SkipInstruction)
23472 +    {
23473 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)]    = trap->npc;
23474 +       trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = ((trap->npc & PC_MASK) + 4) | (trap->pc & CC_MASK);
23475 +    }
23476 +    else
23477 +    {
23478 +       trap->Registers[REG_OUTS+(6^WordEndianFlip)]    = (trap->pc & PC_MASK) | (trap->npc & CC_MASK);
23479 +       trap->Registers[REG_GLOBALS+(0^WordEndianFlip)] = (trap->npc & PC_MASK) | (trap->pc & CC_MASK);
23480 +    }
23481 +    
23482 +    if (ELAN3_OP_START_FAULT_CHECK(ctxt))
23483 +    {
23484 +       PRINTF0 (ctxt, DBG_TPROC, "RestartThread: faulted writing out thread\n");
23485 +       ELAN3_OP_END_FAULT_CHECK(ctxt);
23486 +
23487 +       ElanException (ctxt, EXCEPTION_CANNOT_SAVE_THREAD, THREAD_PROC, NULL);
23488 +       return ((E3_Addr) 0);
23489 +    }
23490 +
23491 +
23492 +#ifdef DEBUG_PRINTF
23493 +    PRINTF4 (ctxt, DBG_TPROC, "SaveThreadToStack: SP=%08x PC=%08x NPC=%08x DIRTY=%08x\n",
23494 +            trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits);
23495 +    if (trap->DirtyBits.s.GlobalsDirty)
23496 +    {
23497 +       PRINTF4 (ctxt, DBG_TPROC, "       g0=%08x g1=%08x g2=%08x g3=%08x\n", 
23498 +                trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
23499 +                trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
23500 +       PRINTF4 (ctxt, DBG_TPROC, "       g4=%08x g5=%08x g6=%08x g7=%08x\n", 
23501 +                trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
23502 +                trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
23503 +    }
23504 +    if (trap->DirtyBits.s.OutsDirty)
23505 +    {
23506 +       PRINTF4 (ctxt, DBG_TPROC, "       o0=%08x o1=%08x o2=%08x o3=%08x\n", 
23507 +                trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
23508 +                trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
23509 +       PRINTF4 (ctxt, DBG_TPROC, "       o4=%08x o5=%08x o6=%08x o7=%08x\n", 
23510 +                trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
23511 +                trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
23512 +    }
23513 +    if (trap->DirtyBits.s.LocalsDirty)
23514 +    {
23515 +       PRINTF4 (ctxt, DBG_TPROC, "       l0=%08x l1=%08x l2=%08x l3=%08x\n", 
23516 +                trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
23517 +                trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
23518 +       PRINTF4 (ctxt, DBG_TPROC, "       l4=%08x l5=%08x l6=%08x l7=%08x\n", 
23519 +                trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
23520 +                trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
23521 +    }
23522 +    if (trap->DirtyBits.s.InsDirty)
23523 +    {
23524 +       PRINTF4 (ctxt, DBG_TPROC, "       i0=%08x i1=%08x i2=%08x i3=%08x\n", 
23525 +                trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
23526 +                trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
23527 +       PRINTF4 (ctxt, DBG_TPROC, "       i4=%08x i5=%08x i6=%08x i7=%08x\n", 
23528 +                trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
23529 +                trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
23530 +    }
23531 +#endif 
23532 +    
23533 +    PRINTF1 (ctxt, DBG_TPROC, "flushing registers to stack %08x\n", stack);
23534 +
23535 +    /* 
23536 +     * NOTE - store the register to the stack in reverse order, since the stack 
23537 +     * will be allocated in sdram, and we cannot use the sdram accessing functions 
23538 +     * here, as it is "mapped" in user-space.
23539 +     */
23540 +    for (i = 0; i < 8; i++)
23541 +    {
23542 +       if (trap->DirtyBits.s.GlobalsDirty & (1 << i))
23543 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Globals[i]), trap->Registers[REG_GLOBALS+(i^WordEndianFlip)]);
23544 +       if (trap->DirtyBits.s.OutsDirty & (1 << i))
23545 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Outs[i]),    trap->Registers[REG_OUTS+(i^WordEndianFlip)]);
23546 +       if (trap->DirtyBits.s.LocalsDirty & (1 << i))
23547 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Locals[i]),  trap->Registers[REG_LOCALS+(i^WordEndianFlip)]);
23548 +       if (trap->DirtyBits.s.InsDirty & (1 << i))
23549 +           ELAN3_OP_STORE32 (ctxt, stack + offsetof (E3_Stack, Ins[i]),     trap->Registers[REG_INS+(i^WordEndianFlip)]);
23550 +    }
23551 +
23552 +    /* always restore all registers */
23553 +    orflag = ThreadRestartFromTrapBit | ThreadReloadAllRegs;
23554 +    
23555 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
23556 +
23557 +    return (trap->sp | orflag);
23558 +}
23559 +
23560 +void
23561 +ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer)
23562 +{
23563 +    PRINTF1 (ctxt, DBG_TPROC, "ReissueStackPointer : Queue SP %08x\n", StackPointer);
23564 +    
23565 +    kmutex_lock (&ctxt->SwapListsLock);
23566 +    ctxt->ItemCount[LIST_THREAD]++;
23567 +    ELAN3_OP_PUT_WORD_ITEM (ctxt, LIST_THREAD, StackPointer);
23568 +    kmutex_unlock (&ctxt->SwapListsLock);
23569 +}
23570 +
23571 +
23572 +/*
23573 + * Local variables:
23574 + * c-file-style: "stroustrup"
23575 + * End:
23576 + */
23577 diff -urN clean/drivers/net/qsnet/elan3/tprocinsts.c linux-2.6.9/drivers/net/qsnet/elan3/tprocinsts.c
23578 --- clean/drivers/net/qsnet/elan3/tprocinsts.c  1969-12-31 19:00:00.000000000 -0500
23579 +++ linux-2.6.9/drivers/net/qsnet/elan3/tprocinsts.c    2003-09-24 09:57:25.000000000 -0400
23580 @@ -0,0 +1,401 @@
23581 +/*
23582 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23583 + *
23584 + *    For licensing information please see the supplied COPYING file
23585 + *
23586 + */
23587 +
23588 +#ident "@(#)$Id: tprocinsts.c,v 1.20 2003/09/24 13:57:25 david Exp $"
23589 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tprocinsts.c,v $*/
23590 +
23591 +#include <qsnet/kernel.h>
23592 +
23593 +#include <elan3/elanregs.h>
23594 +#include <elan3/elandev.h>
23595 +#include <elan3/elanvp.h>
23596 +#include <elan3/elan3mmu.h>
23597 +#include <elan3/elanctxt.h>
23598 +#include <elan3/elandebug.h>
23599 +#include <elan3/urom_addrs.h>
23600 +#include <elan3/thread.h>
23601 +#include <elan3/vmseg.h>
23602 +#include <elan3/elan3mmu.h>
23603 +
23604 +#define MAXINSTR       256             /* # Instructions to look at while looking for close */
23605 +
23606 +static E3_uint32 ALU (ELAN3_CTXT *ctxt,
23607 +                     E3_uint32 fcode, E3_uint32 X, E3_uint32 Y,
23608 +                     E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V);
23609 +
23610 +char *OpcodeNames[] =
23611 +{
23612 +   "ADD   ",
23613 +   "AND   ",
23614 +   "OR    ",
23615 +   "XOR   ",
23616 +   "SUB   ",
23617 +   "ANDN  ",
23618 +   "ORN   ",
23619 +   "XNOR  ",
23620 +   "ADDX  ",
23621 +   "UNIP  ",
23622 +   "UMUL  ",
23623 +   "SMUL  ",
23624 +   "SUBX  ",
23625 +   "UNIP  ",
23626 +   "UDIV  ",
23627 +   "SDIV  ",
23628 +   "ADDcc ",
23629 +   "ANDcc ",
23630 +   "ORcc  ",
23631 +   "XORcc ",
23632 +   "SUBcc ",
23633 +   "ANDNcc",
23634 +   "ORNcc ",
23635 +   "XNORcc",
23636 +   "ADDXcc",
23637 +   "UNIPcc",
23638 +   "UMULcc",
23639 +   "SMULcc",
23640 +   "SUBXcc",
23641 +   "UNIPcc",
23642 +   "UDIVcc",
23643 +   "SDIVcc"
23644 +};
23645 +
23646 +#define REGISTER_VALUE(trap, rN)               (((rN) == 0) ? 0 : (trap)->Registers[(rN)^WordEndianFlip])
23647 +#define ASSIGN_REGISTER(trap, rN, value)       ((rN) != 0 ? trap->Registers[(rN)^WordEndianFlip] = (value) : 0)
23648 +
23649 +int
23650 +RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal)
23651 +{
23652 +    E3_Addr   pc      = (trap->pc & PC_MASK);
23653 +    E3_Addr   npc     = (trap->npc & PC_MASK);
23654 +    E3_uint32 Z       = (trap->npc & PSR_Z_BIT) ? 1 : 0;
23655 +    E3_uint32 N       = (trap->npc & PSR_N_BIT) ? 1 : 0;
23656 +    E3_uint32 C       = (trap->pc  & PSR_C_BIT) ? 1 : 0;
23657 +    E3_uint32 V       = (trap->pc  & PSR_V_BIT) ? 1 : 0;
23658 +    E3_uint32 instr;
23659 +    E3_Addr   addr;
23660 +
23661 +    if (ELAN3_OP_START_FAULT_CHECK (ctxt))
23662 +    {
23663 +    failed:
23664 +       ELAN3_OP_END_FAULT_CHECK (ctxt);
23665 +
23666 +       ElanException (ctxt, EXCEPTION_SIMULATION_FAILED, THREAD_PROC, trap);
23667 +       return (EFAULT);
23668 +    }
23669 +
23670 +    /*
23671 +     * Thread trapped with output open, or while closing,
23672 +     * so roll the PC forwards to the instruction after the
23673 +     * next c_close, and execute that with the register
23674 +     * specified in c_close set to the trap which occured.
23675 +     * (This is not 1 which means an ACK)
23676 +     */
23677 +    PRINTF1 (ctxt, DBG_TPROC, "RollThreadToClose: roll pc %x to c_close\n", pc);
23678 +    
23679 +    for (;;)
23680 +    {
23681 +       instr = ELAN3_OP_LOAD32 (ctxt, pc);
23682 +
23683 +       PRINTF2 (ctxt, DBG_TPROC, "RollThreadToClose: PC=%x INSTR=%x\n", pc, instr);
23684 +
23685 +       switch (OPCODE_CLASS(instr))
23686 +       {
23687 +       case OPCODE_CLASS_0:
23688 +           switch ((instr) & OPCODE_CLASS0_MASK)
23689 +           {
23690 +           case OPCODE_SETHI:
23691 +               PRINTF3 (ctxt, DBG_TPROC, "PC %x : sethi r%d = %x\n", pc, INSTR_RD(instr), instr << 10);
23692 +
23693 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), instr << 10);
23694 +               break;
23695 +
23696 +           case OPCODE_SENDREG:
23697 +               PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendreg\n", pc);
23698 +               break;
23699 +               
23700 +           case OPCODE_SENDMEM:
23701 +               PRINTF1 (ctxt, DBG_TPROC, "PC %x : sendmem\n", pc);
23702 +               break;
23703 +               
23704 +           case OPCODE_BICC:
23705 +           {
23706 +               int     DoBranch   = (instr >> 28) & 1;
23707 +               int     CondBranch = 1;
23708 +               E3_Addr OldnPC     = npc;
23709 +
23710 +               PRINTF5 (ctxt, DBG_TPROC, "PC %x : Bicc Z=%x N=%x C=%x V=%x ", pc, Z, N, C, V);
23711 +               switch (instr & OPCODE_BICC_MASK)
23712 +               {
23713 +               case OPCODE_BICC_BN:    CondBranch = 0;                 break;
23714 +               case OPCODE_BICC_BE:    DoBranch ^= Z;                  break;
23715 +               case OPCODE_BICC_BLE:   DoBranch ^= Z | (N ^ V);        break;
23716 +               case OPCODE_BICC_BL:    DoBranch ^= N ^ V;              break;
23717 +               case OPCODE_BICC_BLEU:  DoBranch ^= C | Z;              break;
23718 +               case OPCODE_BICC_BCS:   DoBranch ^= C;                  break;
23719 +               case OPCODE_BICC_BNEG:  DoBranch ^= N;                  break;
23720 +               case OPCODE_BICC_BVS:   DoBranch ^= V;                  break;
23721 +               }
23722 +
23723 +               /* Do the branch */
23724 +               if (DoBranch != 0)
23725 +               {
23726 +                   npc = pc + (((instr & 0x3fffff) << 2) |
23727 +                               (((instr & 0x200000) != 0) ? 0xff000000 : 0));
23728 +                   
23729 +                   PRINTF2 (ctxt, DBG_TPROC, "PC %x : branch taken to %x\n", pc, npc);
23730 +               }
23731 +               else
23732 +               {
23733 +                   npc = npc + 4;
23734 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch not taken\n", pc);
23735 +               }
23736 +               pc = OldnPC;
23737 +
23738 +               /* Test if the next is annuled */
23739 +               if (((instr & OPCODE_BICC_ANNUL) != 0) &
23740 +                   ((DoBranch == 0) | (CondBranch == 0)))
23741 +               {
23742 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : branch annulled\n", pc);
23743 +
23744 +                   pc = npc;
23745 +                   npc += 4;
23746 +               }
23747 +
23748 +               /*
23749 +                * we've already consumed the instruction - so continue rather
23750 +                * than break;
23751 +                */
23752 +               continue;
23753 +           }
23754 +           
23755 +           default:
23756 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 0 instr %x\n", pc, instr);
23757 +               goto failed;
23758 +           }
23759 +           break;
23760 +
23761 +       case OPCODE_CLASS_1:
23762 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 1 instr %x\n", pc, instr);
23763 +               goto failed;
23764 +               
23765 +       case OPCODE_CLASS_2:
23766 +       {
23767 +           E3_uint32 X = REGISTER_VALUE (trap, INSTR_RS1(instr));
23768 +           E3_uint32 Y = (instr & OPCODE_IMM) ? INSTR_IMM(instr) : REGISTER_VALUE (trap, INSTR_RS2(instr));
23769 +           
23770 +           if ((instr & OPCODE_NOT_ALUOP) == 0)
23771 +           {
23772 +               E3_uint32 fcode  = (instr >> OPCODE_FCODE_SHIFT) & OPCODE_FCODE_MASK;
23773 +               E3_uint32 result = ALU (ctxt, fcode, X, Y, &Z, &N, &C, &V);
23774 +
23775 +               PRINTF5 (ctxt, DBG_TPROC, "PC %x : %s %x %x -> %x", pc, OpcodeNames[fcode], X, Y, result);
23776 +               PRINTF4 (ctxt, DBG_TPROC, "        Z=%x N=%x C=%x V=%x\n", Z, N, C, V);
23777 +               
23778 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), result);
23779 +           }
23780 +           else
23781 +           {
23782 +               switch (instr & OPCODE_MASK)
23783 +               {
23784 +               case OPCODE_OPEN:
23785 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_open\n", pc);
23786 +                   break;
23787 +
23788 +               case OPCODE_CLOSE:
23789 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc);
23790 +                   goto found_close;
23791 +
23792 +               case OPCODE_SLL:
23793 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SLL\n", pc);
23794 +
23795 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X << Y);
23796 +                   break;
23797 +                   
23798 +               case OPCODE_SRL:
23799 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRL\n", pc);
23800 +                   
23801 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y);
23802 +                   break;
23803 +                   
23804 +               case OPCODE_SRA:
23805 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SRA\n", pc);
23806 +                   
23807 +                   ASSIGN_REGISTER (trap, INSTR_RD(instr), X >> Y);
23808 +                   break;
23809 +                   
23810 +               case OPCODE_BREAKTEST:
23811 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAKTEST  not allowed while open\n", pc);
23812 +                   goto failed;
23813 +                   
23814 +               case OPCODE_BREAK:
23815 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : BREAK not allowed while open\n", pc);
23816 +                   goto failed;
23817 +
23818 +               case OPCODE_SUSPEND:
23819 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : SUSPEND not allowed while open\n", pc);
23820 +                   goto failed;
23821 +                   
23822 +               case OPCODE_WAIT:
23823 +                   PRINTF1 (ctxt, DBG_TPROC, "PC %x : WAIT not allowed while open\n", pc);
23824 +                   goto failed;
23825 +
23826 +               default:
23827 +                   PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 2 instr %x\n", pc, instr);
23828 +                   goto failed;
23829 +               }
23830 +           }
23831 +           break;
23832 +       }
23833 +       
23834 +       case OPCODE_CLASS_3:
23835 +       {
23836 +           if ((instr & OPCODE_IMM) != 0)
23837 +               addr = REGISTER_VALUE (trap, INSTR_RS1(instr)) + INSTR_IMM(instr);
23838 +           else
23839 +               addr = (REGISTER_VALUE (trap, INSTR_RS1(instr)) + 
23840 +                       REGISTER_VALUE (trap, INSTR_RS2(instr)));
23841 +
23842 +           switch (instr & OPCODE_MASK)
23843 +           {
23844 +           case OPCODE_LD:
23845 +               PRINTF3 (ctxt, DBG_TPROC, "PC %x : LD [%x], r%d\n", pc, addr, INSTR_RD(instr));
23846 +               
23847 +               ASSIGN_REGISTER (trap, INSTR_RD(instr), ELAN3_OP_LOAD32 (ctxt, addr));
23848 +               break;
23849 +               
23850 +           case OPCODE_LDD:
23851 +           case OPCODE_LDBLOCK16:
23852 +           case OPCODE_LDBLOCK32:
23853 +           case OPCODE_LDBLOCK64:
23854 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : LDBLOCKx @ %x is not possible while output open\n", pc, addr);
23855 +               goto failed;
23856 +           
23857 +           case OPCODE_ST:
23858 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : ST @ %x\n", pc, addr);
23859 +               
23860 +               ELAN3_OP_STORE32 (ctxt, addr, REGISTER_VALUE (trap, INSTR_RD(instr)));
23861 +               break;
23862 +                             
23863 +           case OPCODE_STD:
23864 +           case OPCODE_STBLOCK16:
23865 +           case OPCODE_STBLOCK32:
23866 +           case OPCODE_STBLOCK64:
23867 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : STD @ %x is not posisble while output open\n", pc, addr);
23868 +               goto failed;
23869 +
23870 +           case OPCODE_SWAP:
23871 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : SWAP @ %x is not posible while output open\n", pc, addr);
23872 +               goto failed;
23873 +               
23874 +           default:
23875 +               PRINTF2 (ctxt, DBG_TPROC, "PC %x : unknown class 3 instr %x\n", pc, instr);
23876 +               goto failed;
23877 +           }
23878 +           break;
23879 +       }}
23880 +
23881 +       pc = npc;
23882 +       npc += 4;
23883 +    }
23884 +    
23885 +found_close:
23886 +    ELAN3_OP_END_FAULT_CHECK (ctxt);
23887 +
23888 +    PRINTF1 (ctxt, DBG_TPROC, "PC %x : c_close\n", pc);
23889 +    
23890 +    /*
23891 +     * Found the new pc, and have the close instruction in *instr
23892 +     */
23893 +    ASSIGN_REGISTER (trap, INSTR_RD(instr), PAckVal);
23894 +    
23895 +    /*
23896 +     * Move to instruction after close.
23897 +    */
23898 +    trap->pc = npc;
23899 +    
23900 +    /* Insert the value of Z and N from the close inst */
23901 +    trap->npc = (npc + 4) | ((PAckVal == E3_PAckOk) ? 1 :
23902 +                            (PAckVal == E3_PAckTestFail) ? 2 : 0);
23903 +
23904 +    return (ESUCCESS);
23905 +}
23906 +
23907 +E3_uint32
23908 +ALU (ELAN3_CTXT *ctxt,
23909 +     E3_uint32 fcode, E3_uint32 X, E3_uint32 Y,
23910 +     E3_uint32 *Z, E3_uint32 *N, E3_uint32 *C, E3_uint32 *V)
23911 +{
23912 +    E3_uint32 XMSB, YMSB, ZMSB, Cprime;
23913 +    E3_uint32 Yprime;
23914 +    E3_uint32 Result=0;
23915 +    
23916 +    Yprime = ((fcode >> 2) & 1) ? ~Y : Y;
23917 +    Cprime = ((fcode >> 2) & 1) ^ (*C & ((fcode >> 3) & 1));
23918 +    XMSB             = (X >> 31) & 1;
23919 +    YMSB             = (Yprime >> 31) & 1;
23920 +    /* mul or div */
23921 +    if ((fcode & 0xa) == 0xa)
23922 +    {
23923 +       PRINTF0 (ctxt, DBG_TPROC, "ALU: tried a multiply or a divide\n");
23924 +       return (0);
23925 +    }
23926 +
23927 +    switch (fcode & 3)
23928 +    {
23929 +       /*ADD */
23930 +    case 0:
23931 +       Result = X + Yprime + Cprime ;
23932 +       if ((fcode & 0x10) == 0)
23933 +           return (Result);
23934 +       
23935 +       ZMSB   = Result >> 31;
23936 +       *V = ((XMSB & YMSB & ~ZMSB) | (~XMSB &~YMSB &  ZMSB));
23937 +       *C = ((fcode >> 2) & 1) ^ ( (XMSB & YMSB) | (~ZMSB & (XMSB | YMSB)));
23938 +       break;
23939 +       
23940 +       /*AND */
23941 +    case 1:
23942 +       Result = X & Yprime ;
23943 +       if ((fcode & 0x10) == 0)
23944 +           return (Result);
23945 +       
23946 +       *V = 0;
23947 +       *C = 0;
23948 +       break;
23949 +       
23950 +       /*OR  */
23951 +    case 2:
23952 +       Result = X | Yprime ;
23953 +       if ((fcode & 0x10) == 0)
23954 +           return (Result);
23955 +       
23956 +       *V = 0;
23957 +       *C = 0;
23958 +       break;
23959 +       
23960 +       /*XOR */
23961 +    case 3:
23962 +       Result = X ^ Yprime ;
23963 +       if ((fcode & 0x10) == 0)
23964 +           return (Result);
23965 +       
23966 +       *V = 0;
23967 +       *C = 0;
23968 +       break;
23969 +    }
23970 +    
23971 +    *Z = (Result == 0) ? 1 : 0;
23972 +    *N = (Result >> 31) & 1;
23973 +
23974 +    return (Result);
23975 +}
23976 +
23977 +/*
23978 + * Local variables:
23979 + * c-file-style: "stroustrup"
23980 + * End:
23981 + */
23982 diff -urN clean/drivers/net/qsnet/elan3/tproc_linux.c linux-2.6.9/drivers/net/qsnet/elan3/tproc_linux.c
23983 --- clean/drivers/net/qsnet/elan3/tproc_linux.c 1969-12-31 19:00:00.000000000 -0500
23984 +++ linux-2.6.9/drivers/net/qsnet/elan3/tproc_linux.c   2005-05-31 06:29:07.000000000 -0400
23985 @@ -0,0 +1,223 @@
23986 +/*
23987 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
23988 + *
23989 + *    For licensing information please see the supplied COPYING file
23990 + *
23991 + */
23992 +
23993 +#ident "$Id: tproc_linux.c,v 1.22.2.1 2005/05/31 10:29:07 addy Exp $"
23994 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/tproc_linux.c,v $*/
23995 +
23996 +#include <qsnet/kernel.h>
23997 +#include <qsnet/autoconf.h>
23998 +
23999 +#include <asm/mman.h>
24000 +#include <linux/file.h>
24001 +
24002 +#ifdef NO_ABI
24003 +#include <asm/poll.h>
24004 +extern asmlinkage long sys_open(const char *, int, int);
24005 +extern asmlinkage ssize_t sys_write(unsigned int, const char *, size_t);
24006 +extern asmlinkage ssize_t sys_read(unsigned int, char *, size_t);
24007 +extern asmlinkage off_t sys_lseek(unsigned int, off_t, unsigned int);
24008 +extern asmlinkage long sys_poll(struct pollfd *, unsigned int, long);
24009 +extern asmlinkage long sys_kill(int, int); 
24010 +#elif  LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
24011 +# include <linux/syscall.h>
24012 +#else
24013 +# include <linux/syscalls.h>
24014 +#endif
24015 +
24016 +#include <elan3/elanregs.h>
24017 +#include <elan3/elandev.h>
24018 +#include <elan3/elanvp.h>
24019 +#include <elan3/elan3mmu.h>
24020 +#include <elan3/elanctxt.h>
24021 +#include <elan3/elandebug.h>
24022 +#include <elan3/urom_addrs.h>
24023 +#include <elan3/thread.h>
24024 +#include <elan3/elansyscall.h>
24025 +#include <elan3/threadsyscall.h>
24026 +
24027 +/*
24028 + * NOTE: system calls from kernel on Linux are different on alpha and i386 
24029 + *       on alpha they return -errno on failure 
24030 + *       on i386  they return -1 on failure and set errno 
24031 + */
24032 +
24033 +static void
24034 +ReturnSyscall (THREAD_TRAP *trap, unsigned long rc, int *skip)
24035 +{
24036 +    if (rc >= (unsigned long) (-130))
24037 +    {
24038 +       trap->pc |= PSR_C_BIT;  /* clear carry to indicate failure */
24039 +
24040 +       trap->Registers[REG_OUTS+(0^WordEndianFlip)] = -rc;
24041 +    } 
24042 +    else 
24043 +    {
24044 +       trap->pc &= ~PSR_C_BIT; /* set carry to indicate success */
24045 +       trap->Registers[REG_OUTS+(0^WordEndianFlip)] = rc;
24046 +    }
24047 +    trap->Registers[REG_OUTS+(1^WordEndianFlip)] = 0;
24048 +    *skip = 1;
24049 +}
24050 +
24051 +static void 
24052 +dump_regs(ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
24053 +{
24054 +    PRINTF (ctxt, DBG_TPROC, "               OUTS %08x %08x %08x %08x\n",
24055 +      trap->Registers[REG_OUTS+(0^WordEndianFlip)], 
24056 +      trap->Registers[REG_OUTS+(1^WordEndianFlip)],
24057 +      trap->Registers[REG_OUTS+(2^WordEndianFlip)], 
24058 +      trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
24059 +    PRINTF (ctxt, DBG_TPROC, "                    %08x %08x %08x %08x\n",
24060 +      trap->Registers[REG_OUTS+(4^WordEndianFlip)], 
24061 +      trap->Registers[REG_OUTS+(5^WordEndianFlip)],
24062 +      trap->Registers[REG_OUTS+(6^WordEndianFlip)], 
24063 +      trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
24064 +}
24065 +
24066 +int
24067 +ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip)
24068 +{
24069 +    int                  code;
24070 +    caddr_t       maddr;
24071 +    struct file  *file;
24072 +    unsigned long rc;
24073 +    int           i;
24074 +    uintptr_t     av[6];
24075 +    uintptr_t     ptr;
24076 +   
24077 +    PRINTF (ctxt, DBG_TPROC, "ThreadSyscall: PC %08x G1 %08x\n", 
24078 +      trap->pc, trap->Registers[REG_GLOBALS+(1^WordEndianFlip)]);
24079 +    dump_regs(ctxt, trap);
24080 +
24081 +    code = trap->Registers[REG_GLOBALS+(1^WordEndianFlip)];
24082 +
24083 +    /* Copy the system call arguments from %o0-%o5 */
24084 +    for (i = 0; i < 6; i++)
24085 +       av[i] = trap->Registers[REG_OUTS+(i^WordEndianFlip)];
24086 +    
24087 +    rc = (unsigned long) -EINVAL;
24088 +
24089 +    switch (code) {
24090 +#if defined(IOPROC_PATCH_APPLIED)
24091 +       case ELAN3_SYS_open:
24092 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
24093 +           if (maddr != NULL)
24094 +               rc = sys_open((const char *)maddr, av[1], av[2]);
24095 +           break;
24096 +
24097 +       case ELAN3_SYS_close:
24098 +           rc = sys_close(av[0]);
24099 +           break;
24100 +
24101 +       case ELAN3_SYS_write:
24102 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]);
24103 +           if (maddr != NULL)
24104 +               rc = sys_write(av[0], (const char *)maddr, av[2]);
24105 +           break;
24106 +
24107 +       case ELAN3_SYS_read:
24108 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[1]);
24109 +           if (maddr != NULL)
24110 +               rc = sys_read(av[0], (char *)maddr, av[2]);
24111 +           break;
24112 +
24113 +       case ELAN3_SYS_poll:
24114 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
24115 +           if (maddr != NULL)
24116 +               rc = sys_poll((struct pollfd *)maddr, av[1], av[2]);
24117 +           break;
24118 +       
24119 +       case ELAN3_SYS_lseek:
24120 +           rc = sys_lseek(av[0], av[1], av[2]);
24121 +           break;
24122 +           
24123 +       case ELAN3_SYS_mmap:
24124 +           if ((E3_Addr) av[0] == (E3_Addr) 0)
24125 +               maddr = NULL;
24126 +           else if ((maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0])) == NULL)
24127 +               break;
24128 +       
24129 +           file = NULL;
24130 +           /* GNAT 5515: If *not* anonymous memory need to do fget */
24131 +           if ((av[3] & MAP_ANONYMOUS) == 0 && (file = fget (av[4])) == NULL)
24132 +           {
24133 +               rc = -EBADF;
24134 +               break;
24135 +           }
24136 +
24137 +           down_write (&current->mm->mmap_sem);
24138 +           ptr = do_mmap_pgoff (file, (unsigned long) maddr, av[1], av[2], av[3], av[5] >>PAGE_SHIFT);
24139 +           up_write (&current->mm->mmap_sem);
24140 +
24141 +           if (file)
24142 +               fput (file);
24143 +           
24144 +           if (IS_ERR((void *) ptr))
24145 +               rc = PTR_ERR((void *) ptr);
24146 +           else
24147 +               rc = elan3mmu_elanaddr (ctxt->Elan3mmu, (caddr_t)ptr);
24148 +
24149 +           break;
24150 +       
24151 +       case ELAN3_SYS_munmap:
24152 +           maddr = elan3mmu_mainaddr (ctxt->Elan3mmu, (E3_Addr) av[0]);
24153 +
24154 +#ifdef AC
24155 +           if (maddr != NULL)
24156 +               rc = do_munmap(current->mm, (unsigned long) maddr, av[1], 1);
24157 +#else
24158 +           if (maddr != NULL)
24159 +               rc = do_munmap(current->mm, (unsigned long) maddr, av[1]);
24160 +#endif
24161 +           break;
24162 +
24163 +       case ELAN3_SYS_kill:
24164 +           rc = sys_kill(av[0], av[1]);
24165 +           break;
24166 +
24167 +       case ELAN3_SYS_getpid:
24168 +           rc = current->pid;
24169 +           break;
24170 +#else
24171 +
24172 +#warning "NO IOPROC patch applied - thread cannot perform system calls"
24173 +
24174 +#endif /* defined(IOPROC_PATCH_APPLIED) */
24175 +
24176 +       default:
24177 +           return EINVAL;
24178 +    }
24179 +    ReturnSyscall(trap, rc, skip);
24180 +    return ESUCCESS;
24181 +}
24182 +
24183 +
24184 +int
24185 +ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip)
24186 +{
24187 +       int ret = ESUCCESS;
24188 +
24189 +       PRINTF (ctxt, DBG_TPROC, "ThreadElancall: PC %08x\n", trap->pc);
24190 +       dump_regs(ctxt, trap);
24191 +
24192 +       /*
24193 +        * Elan system call 'type' is passed in o0
24194 +        */
24195 +       switch (trap->Registers[REG_OUTS+(0^WordEndianFlip)]) 
24196 +       {
24197 +       default:
24198 +               ret = EINVAL;
24199 +               break;
24200 +       }
24201 +       return ret;
24202 +}
24203 +
24204 +/*
24205 + * Local variables:
24206 + * c-file-style: "stroustrup"
24207 + * End:
24208 + */
24209 diff -urN clean/drivers/net/qsnet/elan3/virtual_process.c linux-2.6.9/drivers/net/qsnet/elan3/virtual_process.c
24210 --- clean/drivers/net/qsnet/elan3/virtual_process.c     1969-12-31 19:00:00.000000000 -0500
24211 +++ linux-2.6.9/drivers/net/qsnet/elan3/virtual_process.c       2004-06-07 09:50:10.000000000 -0400
24212 @@ -0,0 +1,884 @@
24213 +/*
24214 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
24215 + *
24216 + *    For licensing information please see the supplied COPYING file
24217 + *
24218 + */
24219 +
24220 +#ident "@(#)$Id: virtual_process.c,v 1.68 2004/06/07 13:50:10 mike Exp $"
24221 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/os/virtual_process.c,v $*/
24222 +
24223 +#include <qsnet/kernel.h>
24224 +
24225 +#include <elan/elanmod.h>
24226 +#include <elan3/elanregs.h>
24227 +#include <elan3/elandev.h>
24228 +#include <elan3/elanvp.h>
24229 +#include <elan3/elan3mmu.h>
24230 +#include <elan3/elanctxt.h>
24231 +#include <elan3/elandebug.h>
24232 +#include <elan3/urom_addrs.h>
24233 +#include <elan3/thread.h>
24234 +#include <elan3/vmseg.h>
24235 +#include <elan3/elansyscall.h>
24236 +
24237 +static ELAN3_VPSEG *
24238 +InstallSegment (ELAN3_CTXT *ctxt, int process, int entries)
24239 +{
24240 +    ELAN3_VPSEG **prevSeg, *seg;
24241 +    int lastTop = -1;
24242 +    int        top     = process + entries-1;
24243 +
24244 +    ASSERT (krwlock_is_write_locked (&ctxt->VpLock));
24245 +
24246 +    for (prevSeg = &ctxt->VpSegs; (seg = (*prevSeg)) != NULL; prevSeg = &seg->Next)
24247 +    {
24248 +       int thisTop = seg->Process + seg->Entries - 1;
24249 +
24250 +        if (process < seg->Process && (process <= lastTop || top >= seg->Process))
24251 +       {
24252 +           /*
24253 +            * Overlaps with last segment, or this one 
24254 +            */
24255 +           return (NULL);
24256 +       }
24257 +       if (seg->Process > process)
24258 +           break;
24259 +       
24260 +       lastTop = thisTop;
24261 +    }
24262 +    
24263 +    KMEM_ZALLOC (seg, ELAN3_VPSEG *, sizeof (ELAN3_VPSEG), TRUE);
24264 +    
24265 +    if (seg == (ELAN3_VPSEG *) NULL)
24266 +       return (NULL);
24267 +    
24268 +    seg->Process = process;
24269 +    seg->Entries = entries;
24270 +
24271 +
24272 +    PRINTF2 (ctxt, DBG_VP, "InstallSegment: add seg %p before %p\n", seg, *prevSeg);
24273 +
24274 +    seg->Next = *prevSeg;
24275 +    *prevSeg = seg;
24276 +
24277 +    return (seg);
24278 +}
24279 +
24280 +static int 
24281 +RemoveSegment (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg)
24282 +{
24283 +    ELAN3_VPSEG **prevSeg, *thisSeg;
24284 +
24285 +    ASSERT (krwlock_is_write_locked (&ctxt->VpLock));
24286 +
24287 +    for (prevSeg = &ctxt->VpSegs; (thisSeg = (*prevSeg)) != NULL; prevSeg = &thisSeg->Next)
24288 +    {
24289 +       if (thisSeg == seg)
24290 +           break;
24291 +    }
24292 +
24293 +    if (thisSeg == (ELAN3_VPSEG *) NULL)
24294 +       return (EINVAL);
24295 +
24296 +
24297 +    PRINTF2 (ctxt, DBG_VP, "RemoveSegment: remove seg %p next %p\n", thisSeg, thisSeg->Next);
24298 +
24299 +    *prevSeg = thisSeg->Next;
24300 +    
24301 +    KMEM_FREE ((caddr_t) seg, sizeof (ELAN3_VPSEG));
24302 +
24303 +    return (ESUCCESS);
24304 +}
24305 +
24306 +static ELAN3_VPSEG *
24307 +FindSegment (ELAN3_CTXT *ctxt, int low, int high)
24308 +{
24309 +    ELAN3_VPSEG *seg;
24310 +
24311 +    ASSERT(krwlock_is_locked (&ctxt->VpLock));
24312 +    
24313 +    for (seg = ctxt->VpSegs; seg; seg = seg->Next)
24314 +    {
24315 +       if (seg->Process <= low && (seg->Process + seg->Entries) > high)
24316 +           return (seg);
24317 +    }
24318 +
24319 +    return ((ELAN3_VPSEG *) NULL);
24320 +}
24321 +
24322 +ELAN_LOCATION
24323 +ProcessToLocation (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap)
24324 +{
24325 +    ELAN_LOCATION location;
24326 +    int           nnodes,nctxs;
24327 +    int           node,ctx,i;
24328 +
24329 +    ASSERT(krwlock_is_locked (&ctxt->VpLock));
24330 +
24331 +    location.loc_node    = ELAN3_INVALID_NODE;
24332 +    location.loc_context = -1;
24333 +
24334 +    PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d seg %p cap %p\n", process, seg, cap);
24335 +
24336 +    if (seg == NULL)
24337 +       seg = FindSegment (ctxt, process, process);
24338 +
24339 +    if (!seg || (seg->Type != ELAN3_VPSEG_P2P))
24340 +       return (location);
24341 +
24342 +    cap    = &seg->SegCapability;
24343 +    nnodes = ELAN_CAP_NUM_NODES (cap);
24344 +    nctxs  = ELAN_CAP_NUM_CONTEXTS (cap);
24345 +
24346 +    switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24347 +    {
24348 +    case ELAN_CAP_TYPE_BLOCK:
24349 +    {
24350 +       int entries = ELAN_CAP_ENTRIES(cap);
24351 +
24352 +       for (node = 0, i = 0; node < nnodes && i < entries; node++)
24353 +       {
24354 +           for (ctx = 0; ctx < nctxs && i < entries; ctx++)
24355 +           {
24356 +               if (( seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, ctx + (node * nctxs)))
24357 +               {
24358 +                   if (i++ == (process - seg->Process))
24359 +                   { 
24360 +                       location.loc_node    = seg->SegCapability.cap_lownode    + node;
24361 +                       location.loc_context = seg->SegCapability.cap_lowcontext + ctx;
24362 +                       goto found;
24363 +                   }
24364 +               }
24365 +           }
24366 +       }
24367 +       break;
24368 +    }
24369 +    case ELAN_CAP_TYPE_CYCLIC:
24370 +    {
24371 +       int entries = ELAN_CAP_ENTRIES(cap);
24372 +
24373 +       for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++)
24374 +       {
24375 +           for (node = 0; node < nnodes && i < entries; node++)
24376 +           {
24377 +               if ((seg->SegCapability.cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->SegCapability.cap_bitmap, node + (ctx * nnodes)))
24378 +               {                                   
24379 +                   if (i++ ==  (process - seg->Process))
24380 +                   { 
24381 +                       location.loc_node    = seg->SegCapability.cap_lownode    + node;
24382 +                       location.loc_context = seg->SegCapability.cap_lowcontext + ctx;
24383 +                       goto found;
24384 +                   }
24385 +               }
24386 +           }
24387 +       }
24388 +       break;  
24389 +    }
24390 +    default:
24391 +       break;
24392 +    }
24393 +       
24394 + found:
24395 +    
24396 +    PRINTF3 (ctxt, DBG_VP, "ProcessToLocation: process %d -> Node %d Context %d\n", process, location.loc_node,  location.loc_context);
24397 +
24398 +    if (cap != NULL)
24399 +    {
24400 +       bcopy ((caddr_t) &seg->SegCapability, (caddr_t) cap, sizeof (ELAN_CAPABILITY));
24401 +       cap->cap_mycontext = location.loc_context;
24402 +    }
24403 +
24404 +    return (location);
24405 +}
24406 +
24407 +int
24408 +LocationToProcess (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, ELAN_LOCATION loc, ELAN_CAPABILITY *cap)
24409 +{
24410 +    int nnodes,nctxs;
24411 +    int node,ctx,i;
24412 +
24413 +    if (seg == NULL)
24414 +       return ELAN3_INVALID_PROCESS;
24415 +
24416 +    if (!seg || (seg->Type != ELAN3_VPSEG_P2P))
24417 +       return ELAN3_INVALID_PROCESS;
24418 +
24419 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
24420 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
24421 +
24422 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
24423 +    {
24424 +    case ELAN_CAP_TYPE_BLOCK:
24425 +    {
24426 +       int entries = ELAN_CAP_ENTRIES(cap);
24427 +
24428 +       for (node = 0, i = 0; node < nnodes && i < entries; node++)
24429 +       {
24430 +           for (ctx = 0; ctx < nctxs && i < entries; ctx++)
24431 +           {
24432 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctx + (node * nctxs)))
24433 +               {
24434 +                   if ((loc.loc_node    == (cap->cap_lownode + node) ) 
24435 +                       && (loc.loc_context == (cap->cap_lowcontext + ctx) ))
24436 +                   {
24437 +                       return (i + seg->Process);
24438 +                   }
24439 +                   i++;
24440 +               }
24441 +           }
24442 +       }
24443 +       break;
24444 +    }  
24445 +    case ELAN_CAP_TYPE_CYCLIC:
24446 +    {
24447 +       int entries = ELAN_CAP_ENTRIES(cap);
24448 +
24449 +       for (ctx = 0, i = 0; ctx < nctxs && i < entries; ctx++)
24450 +       {
24451 +           for (node = 0; node < nnodes && i < entries; node++)
24452 +           {
24453 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, node + (ctx * nnodes)))
24454 +               {
24455 +                   if ((loc.loc_node   == (cap->cap_lownode + node) ) 
24456 +                       && (loc.loc_context == (cap->cap_lowcontext + ctx) ))
24457 +                   {
24458 +                       return (i + seg->Process);
24459 +                   }
24460 +                   i++;
24461 +                   
24462 +               }
24463 +           }
24464 +       }
24465 +       break;
24466 +    }  
24467 +    default:
24468 +       break;
24469 +    }
24470 +       
24471 +    return ELAN3_INVALID_PROCESS;
24472 +}
24473 +
24474 +int
24475 +elan3_addvp (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap)
24476 +{
24477 +    ELAN3_DEV       *dev = ctxt->Device;
24478 +    ELAN_POSITION    *pos = &ctxt->Position;
24479 +    ELAN3_VPSEG       *seg;
24480 +    int                      i;
24481 +    int                      nodeOff;
24482 +    int                      ctxOff;
24483 +    int                      nnodes;
24484 +    int                      nctxs;
24485 +    E3_uint16         flits[MAX_FLITS];
24486 +    int               nflits;
24487 +    int               entries;
24488 +
24489 +    PRINTF2 (ctxt, DBG_VP, "elan3_addvp: %d -> %s\n", process, CapabilityString (cap));
24490 +
24491 +    entries = ELAN_CAP_ENTRIES(cap);
24492 +    if (entries <= 0 || (process + entries) > ELAN3_MAX_VPS)
24493 +       return (EINVAL);
24494 +
24495 +    /*
24496 +     * Scan the virtual process segment list, to add this entry, and ensure that
24497 +     * the ranges don't overlap.
24498 +     */
24499 +    krwlock_write (&ctxt->VpLock);
24500 +
24501 +    /* check cap. */
24502 +    switch (elan3_validate_cap (ctxt->Device, cap, ELAN_USER_P2P))
24503 +    {
24504 +    case ELAN_CAP_OK:
24505 +       /* nothing */
24506 +       break;
24507 +
24508 +    case ELAN_CAP_RMS:
24509 +       if ( elan_validate_map(cap, cap) != ESUCCESS)
24510 +       {
24511 +           krwlock_done (&ctxt->VpLock);
24512 +           return (EINVAL);
24513 +       }
24514 +       break;
24515 +
24516 +    default:
24517 +       krwlock_done (&ctxt->VpLock);
24518 +       return (EINVAL);
24519 +    }
24520 +
24521 +    if ((seg = InstallSegment (ctxt, process, entries)) == NULL)
24522 +    {
24523 +       PRINTF0 (ctxt, DBG_VP, "elan3_addvp: failed to find a seg\n");
24524 +       krwlock_done (&ctxt->VpLock);
24525 +       return (EINVAL);
24526 +    }
24527 +    
24528 +    seg->Type                        = ELAN3_VPSEG_P2P;
24529 +    seg->SegCapability               = *cap;
24530 +    seg->SegCapability.cap_mycontext = ELAN_CAP_UNINITIALISED;
24531 +
24532 +    PRINTF3 (ctxt, DBG_VP, "elan3_addvp: segment type %x  %d %d\n",
24533 +           seg->SegCapability.cap_type, seg->Process, entries);
24534 +
24535 +
24536 +    nnodes = cap->cap_highnode - cap->cap_lownode + 1;
24537 +    nctxs  = cap->cap_highcontext - cap->cap_lowcontext + 1;
24538 +
24539 +    /* position not determined, so cannot load any routes, the hwtest
24540 +     * process must explicitly set it's own routes */
24541 +    
24542 +    if (!(cap->cap_type & ELAN_CAP_TYPE_HWTEST) && (pos->pos_mode != ELAN_POS_UNKNOWN))
24543 +    {
24544 +       switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
24545 +       {
24546 +       case ELAN_CAP_TYPE_BLOCK:
24547 +           for (nodeOff = 0, i = 0; nodeOff < nnodes && i < entries; nodeOff++)
24548 +           {
24549 +               for (ctxOff = 0; ctxOff < nctxs && i < entries; ctxOff++)
24550 +               {
24551 +                   if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
24552 +                   {
24553 +                       /* Don't load a route if there's no switch and trying to talk to myself */
24554 +                       if (pos->pos_mode == ELAN_POS_MODE_SWITCHED ||
24555 +                           (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) ||
24556 +                           (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid))
24557 +                       {
24558 +                           PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n",
24559 +                                    seg->Process + i, cap->cap_lownode +nodeOff, cap->cap_lowcontext +ctxOff);
24560 +                           
24561 +                           nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff,
24562 +                                                   DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24563 +                           
24564 +
24565 +
24566 +                           LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext + ctxOff, nflits, flits);  
24567 +                       }
24568 +                       
24569 +                       i++;
24570 +                   }
24571 +               }
24572 +           }
24573 +           break;
24574 +           
24575 +       case ELAN_CAP_TYPE_CYCLIC:
24576 +           for (ctxOff = 0, i = 0; ctxOff < nctxs && i < entries; ctxOff++)
24577 +           {
24578 +               for (nodeOff = 0; nodeOff < nnodes && i < entries; nodeOff++)
24579 +               {
24580 +                   if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
24581 +                   {
24582 +                       /* Don't load a route if there's no switch and trying to talk to myself */
24583 +                       if (pos->pos_mode == ELAN_POS_MODE_SWITCHED ||
24584 +                           (pos->pos_mode == ELAN_POS_MODE_LOOPBACK && cap->cap_lownode + nodeOff == pos->pos_nodeid) ||
24585 +                           (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && cap->cap_lownode + nodeOff != pos->pos_nodeid))
24586 +                       {
24587 +                           PRINTF3 (ctxt, DBG_VP, "elan3_addvp: virtual process %d -> node %d context %d\n",
24588 +                                    seg->Process + i, cap->cap_lownode + nodeOff, cap->cap_lowcontext +ctxOff);
24589 +                       
24590 +                           nflits = GenerateRoute (pos, flits, cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff,
24591 +                                                   DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24592 +                           
24593 +
24594 +                           LoadRoute (dev, ctxt->RouteTable, seg->Process+i, cap->cap_lowcontext +ctxOff, nflits, flits);  
24595 +                       } 
24596 +                       i++;                
24597 +                   }
24598 +               }
24599 +           }
24600 +           break;      
24601 +       default:
24602 +           break;
24603 +       }
24604 +    }
24605 +  
24606 +    krwlock_done (&ctxt->VpLock);
24607 +
24608 +    return (ESUCCESS);
24609 +}
24610 +
24611 +int
24612 +elan3_removevp (ELAN3_CTXT *ctxt, int process)
24613 +{
24614 +    ELAN3_VPSEG *seg;
24615 +    ELAN3_VPSEG *next;
24616 +    int                i;
24617 +
24618 +    krwlock_write (&ctxt->VpLock);
24619 +
24620 +    PRINTF1 (ctxt, DBG_VP, "elan3_removevp: remove process %d\n", process);
24621 +
24622 +    if (process == ELAN3_INVALID_PROCESS)
24623 +       seg = ctxt->VpSegs;
24624 +    else
24625 +       seg = FindSegment (ctxt, process, process);
24626 +
24627 +    if (seg == (ELAN3_VPSEG *) NULL)
24628 +    {
24629 +       krwlock_done (&ctxt->VpLock);
24630 +       return (EINVAL);
24631 +    }
24632 +    
24633 +    do {
24634 +       PRINTF3 (ctxt, DBG_VP, "elan3_removevp: segment is %p [%x,%x]\n",
24635 +                seg, seg->Process, seg->Process+seg->Entries);
24636 +
24637 +       for (i = 0; i < seg->Entries; i++)
24638 +           ClearRoute (ctxt->Device, ctxt->RouteTable, seg->Process+i);
24639 +
24640 +        /* get Next pointer value before structure is free'd */
24641 +        next = seg->Next;      
24642 +       RemoveSegment (ctxt, seg);
24643 +
24644 +    } while (process == ELAN3_INVALID_PROCESS && (seg = next) != NULL);
24645 +    
24646 +    krwlock_done (&ctxt->VpLock);
24647 +
24648 +    return (ESUCCESS);
24649 +}
24650 +
24651 +int
24652 +elan3_addbcastvp (ELAN3_CTXT *ctxt, int process, int lowProc, int highProc)
24653 +{
24654 +    ELAN_POSITION *pos = &ctxt->Position;
24655 +    ELAN3_VPSEG    *seg;
24656 +    ELAN3_VPSEG    *aseg;
24657 +    int            virtualProcess;
24658 +    E3_uint64     routeValue;
24659 +
24660 +    PRINTF3 (ctxt, DBG_VP, "elan3_addbcastvp: process %d [%d,%d]\n", process, lowProc, highProc);
24661 +
24662 +    if (lowProc > highProc || pos->pos_mode != ELAN_POS_MODE_SWITCHED)
24663 +       return (EINVAL);
24664 +    
24665 +    krwlock_write (&ctxt->VpLock);
24666 +
24667 +    if ((aseg = FindSegment (ctxt, lowProc, highProc)) == NULL || (aseg->Type != ELAN3_VPSEG_P2P))
24668 +    {
24669 +       PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to p2p segment\n", lowProc, highProc);
24670 +       krwlock_done (&ctxt->VpLock);
24671 +       return (EINVAL);
24672 +    }
24673 +
24674 +    /* check aseg->SegCapability */    
24675 +    switch (elan3_validate_cap (ctxt->Device, &aseg->SegCapability, ELAN_USER_BROADCAST))
24676 +    {
24677 +    case ELAN_CAP_OK:
24678 +       /* nothing */
24679 +       break;
24680 +       
24681 +    case ELAN_CAP_RMS:
24682 +       if ( elan_validate_map(&ctxt->Capability, &aseg->SegCapability) != ESUCCESS )
24683 +       {
24684 +           krwlock_done (&ctxt->VpLock);
24685 +           return (EINVAL);
24686 +       }
24687 +       break;
24688 +
24689 +    default:
24690 +       krwlock_done (&ctxt->VpLock);
24691 +       return (EINVAL);
24692 +    }
24693 +
24694 +    if ( ProcessToLocation (ctxt, aseg, lowProc,  NULL).loc_context != 
24695 +        ProcessToLocation (ctxt, aseg, highProc, NULL).loc_context)
24696 +    {
24697 +       PRINTF2 (ctxt, DBG_VP, "elan3_addbcastvp: process [%d,%d] does not map to single context\n", lowProc, highProc);
24698 +       krwlock_done (&ctxt->VpLock);
24699 +       return (EINVAL);
24700 +    }
24701 +    
24702 +    if ((seg = InstallSegment (ctxt, process, 1)) == NULL)
24703 +    {
24704 +       krwlock_done (&ctxt->VpLock);
24705 +       return (EINVAL);
24706 +    }
24707 +
24708 +    seg->Type        = ELAN3_VPSEG_BROADCAST;
24709 +    seg->SegLowProc  = lowProc;
24710 +    seg->SegHighProc = highProc;
24711 +
24712 +    PRINTF4 (ctxt, DBG_VP, "elan3_addbcastvp: installed seg %p Type %d LowProc %d HighProc %d\n",
24713 +           seg, seg->Type, seg->SegLowProc, seg->SegHighProc);
24714 +
24715 +    for (virtualProcess = lowProc; virtualProcess <= highProc; virtualProcess++)
24716 +    {
24717 +       if (virtualProcess < 0 || virtualProcess >= ctxt->RouteTable->Size)
24718 +           routeValue = 0;
24719 +       else
24720 +           routeValue = elan3_sdram_readq ( ctxt->Device, ctxt->RouteTable->Table + virtualProcess * NBYTES_PER_SMALL_ROUTE);
24721 +       
24722 +       if (! (routeValue & ROUTE_VALID))
24723 +       {
24724 +           PRINTF2 (ctxt, DBG_VP, "loadvp[%x]: broadcast %x not valid\n", 
24725 +                    ctxt->Capability.cap_mycontext, virtualProcess);
24726 +           break;
24727 +       }
24728 +    }
24729 +           
24730 +    if (virtualProcess > highProc)                     /* All vps now present */
24731 +    {                                          /* so load up broadcast route */
24732 +       E3_uint16     flits[MAX_FLITS];
24733 +       ELAN_LOCATION low    = ProcessToLocation (ctxt, aseg, lowProc,   NULL);
24734 +       ELAN_LOCATION high   = ProcessToLocation (ctxt, aseg, highProc,  NULL);
24735 +       int           nflits = GenerateRoute (pos, flits, low.loc_node, high.loc_node, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24736 +       
24737 +       PRINTF6 (ctxt, DBG_VP, "loadvp[%x]: broadcast %d -> %x.%x [%x.%x]\n", ctxt->Capability.cap_mycontext,
24738 +                seg->Process, low.loc_node, high.loc_node, 
24739 +                low.loc_context, high.loc_context);
24740 +       
24741 +       LoadRoute ( ctxt->Device, ctxt->RouteTable, seg->Process, low.loc_context, nflits, flits);
24742 +    }
24743 +
24744 +    krwlock_done (&ctxt->VpLock);
24745 +
24746 +    return (ESUCCESS);
24747 +}
24748 +
24749 +int
24750 +elan3_process (ELAN3_CTXT *ctxt)
24751 +{
24752 +    int           res = ELAN3_INVALID_PROCESS;
24753 +    ELAN3_VPSEG   *seg;
24754 +    ELAN_LOCATION loc;
24755 +
24756 +    krwlock_write (&ctxt->VpLock);
24757 +
24758 +    loc.loc_node    = ctxt->Position.pos_nodeid;
24759 +    loc.loc_context = ctxt->Capability.cap_mycontext;
24760 +
24761 +    for (seg = ctxt->VpSegs ; seg; seg = seg->Next)
24762 +    {
24763 +       if (seg->Type == ELAN3_VPSEG_P2P &&
24764 +           seg->SegCapability.cap_lowcontext  <= ctxt->Capability.cap_mycontext &&
24765 +           seg->SegCapability.cap_highcontext >= ctxt->Capability.cap_mycontext &&
24766 +           seg->SegCapability.cap_lownode     <= ctxt->Position.pos_nodeid &&
24767 +           seg->SegCapability.cap_highnode    >= ctxt->Position.pos_nodeid)
24768 +       {
24769 +           if ((res=LocationToProcess (ctxt,seg,loc,&ctxt->Capability)) != ELAN3_INVALID_PROCESS)
24770 +           {
24771 +                krwlock_done (&ctxt->VpLock);
24772 +                return res;
24773 +           }
24774 +       }
24775 +    }
24776 +
24777 +    krwlock_done (&ctxt->VpLock);
24778 +
24779 +    return (res);
24780 +}
24781 +
24782 +int
24783 +elan3_check_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError)
24784 +{
24785 +    PRINTF5 (ctxt, DBG_VP, "elan3_check_route: vp=%d flits=%04x %04x %04x %04x\n",
24786 +            process, flits[0], flits[1], flits[2], flits[3]);
24787 +    PRINTF4 (ctxt, DBG_VP, "                            %04x %04x %04x %04x\n",
24788 +            flits[4], flits[5], flits[6], flits[7]);
24789 +
24790 +    krwlock_read (&ctxt->VpLock);
24791 +    *routeError=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node);
24792 +    krwlock_done (&ctxt->VpLock);
24793 +
24794 +    return (ESUCCESS); /* the call is a success tho the errorcode may be set */
24795 +}
24796 +
24797 +int
24798 +elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits)
24799 +{
24800 +    ELAN3_VPSEG *seg;
24801 +    int                res = 0;
24802 +    int                nflits;
24803 +    int         err;
24804 +
24805 +    PRINTF5 (ctxt, DBG_VP, "elan3_load_route: vp=%d flits=%04x %04x %04x %04x\n",
24806 +            process, flits[0], flits[1], flits[2], flits[3]);
24807 +    PRINTF4 (ctxt, DBG_VP, "                            %04x %04x %04x %04x\n",
24808 +            flits[4], flits[5], flits[6], flits[7]);
24809 +
24810 +    krwlock_write (&ctxt->VpLock);
24811 +
24812 +    /* check the route is valid */
24813 +    if (!(ctxt->Capability.cap_type & ELAN_CAP_TYPE_HWTEST))
24814 +    {
24815 +       /* must have already attached to define my context number */
24816 +       if (ctxt->Capability.cap_mycontext == ELAN_CAP_UNINITIALISED)
24817 +       {
24818 +           krwlock_done (&ctxt->VpLock);
24819 +           return (EINVAL);
24820 +       }
24821 +
24822 +       if ((err=elan3_route_check(ctxt,flits,ProcessToLocation (ctxt, NULL, process, NULL).loc_node)) != ELAN3_ROUTE_SUCCESS)
24823 +       {
24824 +           krwlock_done (&ctxt->VpLock);
24825 +           return (EINVAL);
24826 +       }
24827 +    }
24828 +
24829 +    if ((seg = FindSegment (ctxt, process, process)) == NULL || seg->Type != ELAN3_VPSEG_P2P)
24830 +    {
24831 +       krwlock_done (&ctxt->VpLock);
24832 +       return (EINVAL);
24833 +    }
24834 +
24835 +    /* Calculate number of flits in this route */
24836 +    for (nflits = 0; nflits < MAX_FLITS && flits[nflits]; nflits++)
24837 +       ;
24838 +    
24839 +    res = LoadRoute (ctxt->Device, ctxt->RouteTable, process, ProcessToLocation (ctxt, seg, process, NULL).loc_context, nflits, flits);
24840 +
24841 +    krwlock_done (&ctxt->VpLock);
24842 +
24843 +    return (res);
24844 +}
24845 +
24846 +int
24847 +elan3_get_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits)
24848 +{
24849 +    ELAN3_VPSEG *seg;
24850 +    int                res = 0;
24851 +
24852 +    PRINTF1 (ctxt, DBG_VP, "elan3_get_route: vp=%d \n",  process);
24853 +
24854 +    krwlock_write (&ctxt->VpLock);
24855 +
24856 +    if (ctxt->RouteTable == NULL)  /* is there a route table */
24857 +    {
24858 +       krwlock_done (&ctxt->VpLock);
24859 +       return (EINVAL);
24860 +    }
24861 +
24862 +    if ((seg = FindSegment (ctxt, process, process)) != NULL && seg->Type != ELAN3_VPSEG_P2P)
24863 +    {
24864 +       krwlock_done (&ctxt->VpLock);
24865 +       return (EINVAL);
24866 +    }
24867 +    
24868 +    if (seg == NULL)
24869 +    {
24870 +       krwlock_done (&ctxt->VpLock);
24871 +       return (EINVAL);
24872 +    }
24873 +    
24874 +    res = GetRoute (ctxt->Device, ctxt->RouteTable, process, flits);
24875 +    
24876 +    krwlock_done (&ctxt->VpLock);
24877 +
24878 +    return (res);
24879 +}
24880 +
24881 +int
24882 +elan3_reset_route (ELAN3_CTXT *ctxt, int process)
24883 +{
24884 +    E3_uint16     flits[MAX_FLITS];
24885 +
24886 +    PRINTF1 (ctxt, DBG_VP, "elan3_reset_route: vp=%d \n",  process);
24887
24888 +    GenerateRoute (&ctxt->Position, flits, process, process, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
24889 +    
24890 +    return elan3_load_route(ctxt,process,flits);
24891 +}
24892 +
24893 +int
24894 +ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process)
24895 +{
24896 +    E3_uint16    flits[MAX_FLITS];
24897 +    ELAN3_DEV     *dev = ctxt->Device;
24898 +    int                  res = ESUCCESS;
24899 +    ELAN3_VPSEG   *seg;
24900 +    ELAN3_VPSEG   *aseg;
24901 +    E3_uint64    routeValue;
24902 +
24903 +    krwlock_read (&ctxt->VpLock);
24904 +
24905 +    PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: vp=%d \n",  process);
24906 +
24907 +    if (ctxt->RouteTable == NULL || process < 0 || process >= ctxt->RouteTable->Size)
24908 +    {
24909 +       krwlock_done (&ctxt->VpLock);
24910 +       return (EINVAL);
24911 +    }
24912 +
24913 +    if (! (seg = FindSegment (ctxt, process, process)))
24914 +    {
24915 +       PRINTF1 (ctxt, DBG_VP, "ResolveVirtualProcess: cannot find segment for virtual process %d\n", process);
24916 +       krwlock_done (&ctxt->VpLock);
24917 +       return (EINVAL);
24918 +    }
24919 +    
24920 +    /* check cap. */
24921 +    switch (elan3_validate_cap (ctxt->Device, &seg->SegCapability, ((seg->Type == ELAN3_VPSEG_P2P) ? ELAN_USER_P2P : ELAN_USER_BROADCAST)))
24922 +    {
24923 +    case ELAN_CAP_OK:
24924 +       /* nothing */
24925 +       break;
24926 +
24927 +    case ELAN_CAP_RMS:
24928 +       if ( elan_validate_map(&ctxt->Capability, &seg->SegCapability) != ESUCCESS)
24929 +       {
24930 +           krwlock_done (&ctxt->VpLock);
24931 +           return (EINVAL);
24932 +       }
24933 +       break;
24934 +
24935 +    default:
24936 +       krwlock_done (&ctxt->VpLock);
24937 +       return (EINVAL);
24938 +    }
24939 +
24940 +    BumpUserStat (ctxt, LoadVirtualProcess);
24941 +
24942 +    routeValue = elan3_sdram_readq (dev, ctxt->RouteTable->Table + process * NBYTES_PER_SMALL_ROUTE);
24943 +    if (routeValue & ROUTE_VALID)                              /* Virtual process already */
24944 +    {                                                          /* loaded */
24945 +       krwlock_done (&ctxt->VpLock);
24946 +       return (ESUCCESS);                      
24947 +    }
24948 +    
24949 +    switch (seg->Type)
24950 +    {
24951 +    case ELAN3_VPSEG_P2P:
24952 +       switch (seg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24953 +       {
24954 +       case ELAN_CAP_TYPE_BLOCK:
24955 +       case ELAN_CAP_TYPE_CYCLIC:
24956 +           if ((res = elan_validate_map (&ctxt->Capability,&seg->SegCapability)) == ESUCCESS &&
24957 +               (res = GetRoute(dev, ctxt->RouteTable ,process,  flits)) == ESUCCESS)
24958 +           {
24959 +               if (elan3_route_check(ctxt, flits, ProcessToLocation (ctxt, seg, process, NULL).loc_node))
24960 +                   res = EINVAL;
24961 +               else
24962 +                   ValidateRoute(dev, ctxt->RouteTable, process);
24963 +           }
24964 +           break;
24965 +       default:
24966 +           res = EINVAL;
24967 +           break;
24968 +       }
24969 +       break;
24970 +
24971 +    case ELAN3_VPSEG_BROADCAST:
24972 +       /* Find the segment that this broadcast range spans. */
24973 +       aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc);
24974 +       
24975 +       if (aseg == NULL || (aseg->Type != ELAN3_VPSEG_P2P) || !(aseg->SegCapability.cap_type & ELAN_CAP_TYPE_BROADCASTABLE))
24976 +       {
24977 +           PRINTF2 (ctxt, DBG_VP, "resolveVirtualProcess: %d -> EINVAL (%s)\n", process, 
24978 +                    (aseg == NULL ? "no segment" : ((seg->Type != ELAN3_VPSEG_P2P) ? "not point to point" :
24979 +                                                    "not broadcastable")));
24980 +           res = EINVAL;
24981 +           break;
24982 +       }
24983 +       
24984 +       switch (aseg->SegCapability.cap_type & ELAN_CAP_TYPE_MASK)
24985 +       {
24986 +       case ELAN_CAP_TYPE_BLOCK:
24987 +       case ELAN_CAP_TYPE_CYCLIC:
24988 +       {
24989 +           ELAN_LOCATION lowNode  = ProcessToLocation (ctxt,aseg,seg->SegLowProc  , NULL);
24990 +           ELAN_LOCATION highNode = ProcessToLocation (ctxt,aseg,seg->SegHighProc , NULL);
24991 +
24992 +
24993 +           if ((res = elan_validate_map (&ctxt->Capability,&aseg->SegCapability)) == ESUCCESS &&
24994 +               (res=GetRoute(dev, ctxt->RouteTable ,process,  flits)) == ESUCCESS)
24995 +           {
24996 +               if (elan3_route_broadcast_check(ctxt,flits, lowNode.loc_node , highNode.loc_node ) != ELAN3_ROUTE_SUCCESS )
24997 +                   res = EINVAL;
24998 +               else
24999 +                   ValidateRoute(dev, ctxt->RouteTable, process);
25000 +           }
25001 +           break;
25002 +       }
25003 +
25004 +       default:
25005 +           res = EINVAL;
25006 +           break;
25007 +       }
25008 +    default:
25009 +       res  = EINVAL;
25010 +       break;
25011 +    }
25012 +
25013 +    krwlock_done (&ctxt->VpLock);
25014 +    return (res);
25015 +}        
25016 +
25017 +void
25018 +UnloadVirtualProcess (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap)
25019 +{
25020 +    ELAN3_DEV        *dev  = ctxt->Device;
25021 +    ELAN3_VPSEG      *seg;
25022 +    ELAN_CAPABILITY *scap;
25023 +    int              i;
25024 +
25025 +    for (seg = ctxt->VpSegs; seg; seg = seg->Next)
25026 +    {
25027 +       switch (seg->Type)
25028 +       {
25029 +       case ELAN3_VPSEG_P2P:
25030 +           scap = &seg->SegCapability;
25031 +           
25032 +           if (cap == NULL || ELAN_CAP_MATCH (scap, cap))
25033 +           {
25034 +               PRINTF2 (ctxt, DBG_VP, "unloadvp: segment [%x.%x]\n", 
25035 +                        seg->Process, seg->Process + seg->Entries-1);
25036 +               
25037 +               for (i = 0; i < seg->Entries; i++)
25038 +                   InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i);
25039 +           }
25040 +           break;
25041 +
25042 +       case ELAN3_VPSEG_BROADCAST:
25043 +           for (i = 0; i < seg->Entries; i++)
25044 +           {
25045 +               ELAN3_VPSEG *aseg = FindSegment (ctxt, seg->SegLowProc, seg->SegHighProc);
25046 +               
25047 +               if (aseg != NULL && ELAN_CAP_MATCH(&aseg->SegCapability, cap))
25048 +               {
25049 +                   PRINTF1 (ctxt, DBG_VP, "unloadvp: broadcast vp %d\n", seg->Process);
25050 +               
25051 +                   InvalidateRoute (dev, ctxt->RouteTable, seg->Process+i);
25052 +               }
25053 +           }
25054 +       }
25055 +    }
25056 +}
25057 +
25058 +caddr_t
25059 +CapabilityString (ELAN_CAPABILITY *cap)
25060 +{
25061 +#define CAPSTR_LEN     200
25062 +#define NCAPSTRS       4
25063 +    static char       space[CAPSTR_LEN*NCAPSTRS];
25064 +    static int        bufnum;
25065 +    static spinlock_t lock;
25066 +    static int       lockinitialised;
25067 +    int                      num;
25068 +    unsigned long     flags;
25069 +
25070 +    if (! lockinitialised)
25071 +    {
25072 +       spin_lock_init (&lock);
25073 +       lockinitialised = 1;
25074 +    }
25075 +
25076 +    spin_lock_irqsave (&lock, flags);
25077 +    
25078 +    if ((num = ++bufnum) == NCAPSTRS)
25079 +       num = bufnum = 0;
25080 +    spin_unlock_irqrestore (&lock, flags);
25081 +
25082 +    sprintf (space + (num * CAPSTR_LEN), "%4x %4x %4x %4x %4x %4x %4x [%x.%x.%x.%x]", cap->cap_type,
25083 +            cap->cap_lownode, cap->cap_highnode, 
25084 +            cap->cap_lowcontext, cap->cap_mycontext, cap->cap_highcontext,  ELAN_CAP_ENTRIES(cap),
25085 +            cap->cap_userkey.key_values[0],  cap->cap_userkey.key_values[1],
25086 +            cap->cap_userkey.key_values[2],  cap->cap_userkey.key_values[3]);
25087 +
25088 +    return (space + (num * CAPSTR_LEN));
25089 +}
25090 +
25091 +
25092 +/*
25093 + * Local variables:
25094 + * c-file-style: "stroustrup"
25095 + * End:
25096 + */
25097 diff -urN clean/drivers/net/qsnet/elan4/debug.c linux-2.6.9/drivers/net/qsnet/elan4/debug.c
25098 --- clean/drivers/net/qsnet/elan4/debug.c       1969-12-31 19:00:00.000000000 -0500
25099 +++ linux-2.6.9/drivers/net/qsnet/elan4/debug.c 2005-03-23 06:06:15.000000000 -0500
25100 @@ -0,0 +1,146 @@
25101 +/*
25102 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
25103 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
25104 + * 
25105 + *    For licensing information please see the supplied COPYING file
25106 + *
25107 + */
25108 +
25109 +#ident "@(#)$Id: debug.c,v 1.17 2005/03/23 11:06:15 david Exp $"
25110 +/*      $Source: /cvs/master/quadrics/elan4mod/debug.c,v $*/
25111 +
25112 +#include <qsnet/kernel.h>
25113 +
25114 +#include <elan4/debug.h>
25115 +#include <elan4/device.h>
25116 +
25117 +unsigned       elan4_debug           = 0;
25118 +unsigned       elan4_debug_toconsole = 0;
25119 +unsigned       elan4_debug_tobuffer  = DBG_ALL;
25120 +
25121 +unsigned       elan4_debug_display_ctxt;
25122 +unsigned       elan4_debug_ignore_ctxt;
25123 +unsigned       elan4_debug_ignore_type;
25124 +
25125 +void
25126 +elan4_debug_init()
25127 +{
25128 +    if ((elan4_debug & elan4_debug_tobuffer) != 0)
25129 +       qsnet_debug_alloc();
25130 +}
25131 +
25132 +void
25133 +elan4_debug_fini()
25134 +{
25135 +}
25136 +
25137 +void
25138 +elan4_debugf (void *type, int mode, char *fmt,...)
25139 +{
25140 +    char    prefix[128];
25141 +    int     where = 0;
25142 +    va_list ap;
25143 +
25144 +    if ((mode & elan4_debug_tobuffer) != 0 || type == DBG_BUFFER)
25145 +       where |= QSNET_DEBUG_BUFFER;
25146 +    if ((mode & elan4_debug_toconsole) != 0 || type == DBG_CONSOLE)
25147 +       where |= QSNET_DEBUG_CONSOLE;
25148 +
25149 +    if (where == 0)
25150 +       return;
25151 +    
25152 +    if ((unsigned long) type > DBG_NTYPES)
25153 +    {
25154 +       ELAN4_CTXT *ctxt = (ELAN4_CTXT *) type;
25155 +
25156 +        if (elan4_debug_display_ctxt && ctxt->ctxt_num != elan4_debug_display_ctxt)
25157 +            return;
25158 +        if (elan4_debug_ignore_ctxt  && ctxt->ctxt_num == elan4_debug_ignore_ctxt)
25159 +            return;
25160 +
25161 +       sprintf (prefix, "[%08ld.%04d] elan4 (%03x) ", lbolt,  current->pid, ctxt->ctxt_num);
25162 +    }
25163 +    else if ((unsigned long) type == (int) DBG_CONSOLE)
25164 +       prefix[0] = '\0';
25165 +    else
25166 +    {
25167 +       char *what;
25168 +
25169 +       if (elan4_debug_ignore_type & (1 << ((unsigned long) type)))
25170 +           return;
25171 +
25172 +       switch ((unsigned long) type)
25173 +       {
25174 +       case (int) DBG_DEVICE: what = "dev"; break;
25175 +       case (int) DBG_USER:   what = "usr"; break;
25176 +       default:               what = NULL; break;
25177 +       }
25178 +           
25179 +       if (what)
25180 +           sprintf (prefix, "[%08ld.%04d] elan4 [%s] ", lbolt, current->pid, what);
25181 +       else
25182 +           sprintf (prefix, "[%08ld.%04d] elan4 [%3d] ", lbolt, current->pid, (int)(long)type);
25183 +    }
25184 +
25185 +    va_start(ap,fmt);
25186 +    qsnet_vdebugf (where, prefix, fmt, ap);
25187 +    va_end (ap);
25188 +}
25189 +
25190 +int
25191 +elan4_assfail (ELAN4_CTXT *ctxt, const char *ex, const char *func, const char *file, const int line)
25192 +{
25193 +    qsnet_debugf (QSNET_DEBUG_BUFFER, "elan%d: assertion failure: %s, function: %s, file %s, line: %d\n", 
25194 +                 ctxt->ctxt_dev->dev_instance, ex, func, file, line);
25195 +
25196 +    printk (KERN_EMERG "elan%d: assertion failure: %s, function: %s, file %s, line: %d\n", 
25197 +                  ctxt->ctxt_dev->dev_instance, ex, func, file, line);
25198 +
25199 +    if (panicstr)
25200 +       return 0;
25201 +    
25202 +    if (assfail_mode & 1)                              /* return to BUG() */
25203 +       return 1;
25204 +    
25205 +    if (assfail_mode & 2)
25206 +       panic ("elan%d: assertion failure: %s, function: %s, file %s, line: %d\n", 
25207 +              ctxt->ctxt_dev->dev_instance, ex, func, file, line);
25208 +
25209 +    if (assfail_mode & 4)
25210 +       elan4_debug = 0;
25211 +    
25212 +    return 0;
25213 +    
25214 +}
25215 +
25216 +int
25217 +elan4_debug_trigger (ELAN4_CTXT *ctxt, const char *func, const char *file, const int line, const char *fmt, ...)
25218 +{
25219 +    va_list ap;
25220 +
25221 +    va_start (ap, fmt);
25222 +    qsnet_vdebugf (QSNET_DEBUG_CONSOLE|QSNET_DEBUG_BUFFER, "", fmt, ap);
25223 +    va_end (ap);
25224 +
25225 +    printk (KERN_EMERG "elan%d: debug trigger: function: %s, file %s, line: %d\n", ctxt->ctxt_dev->dev_instance, func, file, line);
25226 +
25227 +    if (panicstr)
25228 +       return 0;
25229 +    
25230 +    if (assfail_mode & 1)                              /* return to BUG() */
25231 +       return 1;
25232 +    
25233 +    if (assfail_mode & 2)
25234 +       panic ("elan%d: debug trigger: function: %s, file %s, line: %d\n", ctxt->ctxt_dev->dev_instance, func, file, line);
25235 +
25236 +    if (assfail_mode & 4)
25237 +       elan4_debug = 0;
25238 +    
25239 +    return 0;
25240 +}
25241 +
25242 +/*
25243 + * Local variables:
25244 + * c-file-style: "stroustrup"
25245 + * End:
25246 + */
25247 diff -urN clean/drivers/net/qsnet/elan4/device.c linux-2.6.9/drivers/net/qsnet/elan4/device.c
25248 --- clean/drivers/net/qsnet/elan4/device.c      1969-12-31 19:00:00.000000000 -0500
25249 +++ linux-2.6.9/drivers/net/qsnet/elan4/device.c        2005-08-09 05:57:04.000000000 -0400
25250 @@ -0,0 +1,3127 @@
25251 +/*
25252 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
25253 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
25254 + * 
25255 + *    For licensing information please see the supplied COPYING file
25256 + *
25257 + */
25258 +
25259 +#ident "@(#)$Id: device.c,v 1.106.2.5 2005/08/09 09:57:04 mike Exp $"
25260 +/*      $Source: /cvs/master/quadrics/elan4mod/device.c,v $*/
25261 +
25262 +#include <qsnet/kernel.h>
25263 +#include <qsnet/kthread.h>
25264 +
25265 +#include <elan4/sdram.h>
25266 +#include <elan4/debug.h>
25267 +#include <elan4/device.h>
25268 +#include <elan4/commands.h>
25269 +#include <elan4/trtype.h>
25270 +#include <elan4/neterr.h>
25271 +
25272 +#include <elan4/i2c.h>
25273 +#include <elan3/vpd.h>
25274 +
25275 +/* allow this code to compile against an Eagle elanmod */
25276 +#ifdef __ELANMOD_DEVICE_H
25277 +#define ELAN_DEV_OPS           ELANMOD_DEV_OPS
25278 +#define ELAN_DEV_OPS_VERSION   ELANMOD_DEV_OPS_VERSION
25279 +#define elan_dev_register      elanmod_dev_register
25280 +#define elan_dev_deregister    elanmod_dev_deregister
25281 +#endif
25282 +
25283 +/* XXXX configurational defines */
25284 +
25285 +#if defined (CONFIG_MPSAS)
25286 +#define HASH_0_SIZE_VAL                        (12 + 6)
25287 +#define HASH_1_SIZE_VAL                        (2 + 6)
25288 +#define CTXT_TABLE_SHIFT               8
25289 +#define LN2_MAX_CQS                    8               /* 256 */
25290 +#else
25291 +#define HASH_0_SIZE_VAL                        (13 + 6)
25292 +#define HASH_1_SIZE_VAL                        (2 + 6)
25293 +#define CTXT_TABLE_SHIFT               12
25294 +#define LN2_MAX_CQS                    10              /* 1024 */
25295 +#endif
25296 +
25297 +unsigned int elan4_hash_0_size_val       = HASH_0_SIZE_VAL;
25298 +unsigned int elan4_hash_1_size_val       = HASH_1_SIZE_VAL;
25299 +unsigned int elan4_ctxt_table_shift      = CTXT_TABLE_SHIFT;
25300 +unsigned int elan4_ln2_max_cqs           = LN2_MAX_CQS;
25301 +unsigned int elan4_dmaq_highpri_size     = 2;                  /* 8192 entries */
25302 +unsigned int elan4_threadq_highpri_size  = 1;                  /* 1024 entries */
25303 +unsigned int elan4_dmaq_lowpri_size      = 2;                  /* 8192 entries */
25304 +unsigned int elan4_threadq_lowpri_size   = 1;                  /* 1024 entries */
25305 +unsigned int elan4_interruptq_size       = 0;                  /* 1024 entries */
25306 +unsigned int elan4_mainint_punt_loops    = 1;
25307 +unsigned int elan4_mainint_resched_ticks = 0;
25308 +unsigned int elan4_linkport_lock        = 0xbe0fcafe;          /* default link port lock */
25309 +unsigned int elan4_eccerr_recheck        = 1;
25310 +
25311 +static int 
25312 +elan4_op_get_position (void *arg, ELAN_POSITION *ptr)
25313 +{
25314 +    ELAN4_DEV     *dev = (ELAN4_DEV *)arg;
25315 +    ELAN_POSITION  pos;
25316 +
25317 +    elan4_get_position (dev, &pos);
25318 +
25319 +    return copyout (&pos, ptr, sizeof (ELAN_POSITION));
25320 +}
25321 +
25322 +static int 
25323 +elan4_op_set_position (void *arg, unsigned short nodeid, unsigned short numnodes)
25324 +{
25325 +    /* XXXXX 
25326 +
25327 +       ELAN4_DEV *dev = (ELAN4_DEV *) arg;
25328 +
25329 +       compute_position (&pos, nodeid, numnode, num_down_links_value);
25330 +
25331 +       return elan4_set_position (dev, pos);
25332 +    */
25333 +    return EINVAL;
25334 +}
25335 +
25336 +ELAN_DEV_OPS elan4_dev_ops = 
25337 +{
25338 +    elan4_op_get_position,
25339 +    elan4_op_set_position,
25340 +
25341 +    ELAN_DEV_OPS_VERSION
25342 +};
25343 +
25344 +static E4_uint32
25345 +elan4_read_filter (ELAN4_DEV *dev, unsigned networkctx)
25346 +{
25347 +    return (elan4_sdram_readl (dev, dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) + 
25348 +                              offsetof (E4_ContextControlBlock, Filter)));
25349 +}
25350 +
25351 +static void
25352 +elan4_write_filter (ELAN4_DEV *dev, unsigned networkctx, E4_uint32 value)
25353 +{
25354 +    elan4_sdram_writel (dev, (dev->dev_ctxtable + (networkctx * sizeof (E4_ContextControlBlock)) +
25355 +                       offsetof (E4_ContextControlBlock, Filter)), value);
25356 +    pioflush_sdram(dev);
25357 +}
25358 +
25359 +void
25360 +elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg)
25361 +{
25362 +    E4_uint32 setbits  = 0;
25363 +    E4_uint32 intmask  = 0;
25364 +    E4_uint32 haltmask;
25365 +    E4_uint32 next_sched;
25366 +    E4_uint32 next_intmask;
25367 +    unsigned long flags;
25368 +
25369 +    spin_lock_irqsave (&dev->dev_intmask_lock, flags);
25370 +
25371 +    haltmask = (dev->dev_haltop_mask | dev->dev_haltop_active);
25372 +
25373 +    if ((haltmask & INT_DProcHalted) || dev->dev_halt_all_count || dev->dev_halt_dproc_count)
25374 +       setbits |= SCH_DProcHalt;
25375 +    
25376 +    if ((haltmask & INT_TProcHalted) || dev->dev_halt_all_count || dev->dev_halt_tproc_count)
25377 +       setbits |= SCH_TProcHalt;
25378 +
25379 +    if ((haltmask & INT_CProcHalted) || dev->dev_halt_all_count || dev->dev_halt_cproc_count)
25380 +       setbits |= SCH_CProcHalt;
25381 +
25382 +    if ((haltmask & INT_DiscardingLowPri) || dev->dev_discard_all_count || dev->dev_discard_lowpri_count)
25383 +       setbits |= SCH_DiscardLowPriInput;
25384 +    
25385 +    if ((haltmask & INT_DiscardingHighPri) || dev->dev_discard_all_count || dev->dev_discard_highpri_count)
25386 +       setbits |= SCH_DiscardHighPriInput;
25387 +    
25388 +    if (dev->dev_halt_lowpri_count)
25389 +       setbits |= SCH_StopLowPriQueues;
25390 +    
25391 +    if (haltmask & INT_DProcHalted) intmask |= INT_DProcHalted;
25392 +    if (haltmask & INT_TProcHalted) intmask |= INT_TProcHalted;
25393 +    if (haltmask & INT_CProcHalted) intmask |= INT_CProcHalted;
25394 +    if (haltmask & INT_DiscardingLowPri) intmask |= INT_DiscardingLowPri;
25395 +    if (haltmask & INT_DiscardingHighPri) intmask |= INT_DiscardingHighPri;
25396 +
25397 +    next_intmask = (dev->dev_intmask     & ~(INT_Halted | INT_Discarding)) | (intmask & ~intreg);
25398 +    next_sched   = (dev->dev_schedstatus & ~(SCH_Halt | SCH_Discard))      | setbits;
25399 +
25400 +    PRINTF5 (DBG_DEVICE, DBG_REGISTER, "elan4_set_schedstatus: haltmask=%x setbits=%x intmask=%x next_sched=%x next_intmask=%x\n",
25401 +            haltmask, setbits, intmask, next_sched, next_intmask);
25402 +
25403 +    CHANGE_INT_MASK (dev, next_intmask);
25404 +    CHANGE_SCHED_STATUS (dev, next_sched);
25405 +
25406 +    spin_unlock_irqrestore (&dev->dev_intmask_lock, flags);
25407 +}
25408 +
25409 +int
25410 +elan4_route2str (E4_VirtualProcessEntry *route, char *routeStr)
25411 +{
25412 +    int        part = 0;
25413 +    int        shift;
25414 +    int        broadcast;
25415 +    E4_uint64  value;
25416 +    char      *ptr = routeStr;
25417 +    int        b;
25418 +
25419 +    /* unpack first */
25420 +    value = route->Values[part] & 0x7f;
25421 +    if ( (value & 0x78) == 0) {
25422 +        /* empty route */
25423 +        strcpy(routeStr,"Invalid lead route");
25424 +        return (-EINVAL);
25425 +    }
25426 +
25427 +    if ( value & 0x40 ) {
25428 +        /* broad cast */
25429 +       strcpy(routeStr,"Broadcast");
25430 +       return (-EINVAL);
25431 +    } else {
25432 +        switch ((value  & 0x30) >> 4) {
25433 +        case 0: { *ptr++ = '0' + (value & 0x7); break; }
25434 +        case 1: { *ptr++ = 'M';                 break; }
25435 +        case 2: { *ptr++ = 'U';                 break; }
25436 +        case 3: { *ptr++ = 'A';                 break; }
25437 +        }
25438 +    }
25439 +
25440 +    shift = 16;
25441 +    broadcast = 0;
25442 +    while ( 1 ) {
25443 +        b =  (route->Values[part] >> shift) & 0xf;
25444 +
25445 +        if ( broadcast ) {
25446 +            /* about to pick up the second byte of a broadcast pair */
25447 +            broadcast = 0;
25448 +        } else {
25449 +            if ( b & 0x8) {
25450 +                /*  output link */
25451 +                 *ptr++ = '0' + (b & 0x7);
25452 +            } else {
25453 +                if ( b & 0x4) {
25454 +                    /* broad cast */
25455 +                    broadcast = 1;
25456 +                } else {
25457 +                    switch ( b & 0x3 ) {
25458 +                    case 0: { *ptr++ =  0 ; return (0);     break; }
25459 +                    case 1: { *ptr++ = 'M';                 break; }
25460 +                    case 2: { *ptr++ = 'U';                 break; }
25461 +                    case 3: { *ptr++ = 'A';                 break; }
25462 +                    }
25463 +                }
25464 +            }
25465 +        }
25466 +
25467 +        shift += 4; 
25468 +        if ( part != 0 ) {
25469 +            if ( shift > 36) {
25470 +                /* too far, now in the crc value */
25471 +                strcpy(routeStr,"Invalid route length");
25472 +                return (-EINVAL);
25473 +            }
25474 +        } else { 
25475 +            if ( shift >= 64) { 
25476 +                /* move to the next 64 bits */
25477 +                part = 1;
25478 +                shift = 2;
25479 +            }
25480 +        }
25481 +    }
25482 +
25483 +    /* never reached */
25484 +    return (-EINVAL);
25485 +}
25486 +
25487 +static int elan4_hardware_lock_count = 0;
25488 +
25489 +void
25490 +elan4_hardware_lock_check(ELAN4_DEV *dev, char *from)
25491 +{
25492 +
25493 +    int reg = read_reg32 (dev, CommandSchedDataPort[2]);
25494 +
25495 +    /* dont spam too much */
25496 +    if ( elan4_hardware_lock_count++ > 10)  return;
25497 +
25498 +    printk ("elan%d: %s timed out intmask=0x%x InterruptReg=0x%x (%d)\n", dev->dev_instance, from, dev->dev_intmask, read_reg32 (dev, InterruptReg), elan4_hardware_lock_count);
25499 +
25500 +    /* an 0xF in either and we need to output more */
25501 +    if ((reg & 0xf0) || ( reg & 0x0f)) {
25502 +       ELAN4_ROUTE_RINGBUF *ringbuf;
25503 +       char                 routestr[33];
25504 +       
25505 +       printk ("elan%d: CommandSchedDataPort[0] 0x%016x 0x%016x 0x%016x 0x%016x\n", 
25506 +               dev->dev_instance, 
25507 +               read_reg32 (dev, CommandSchedDataPort[0]),
25508 +               read_reg32 (dev, CommandSchedDataPort[1]),
25509 +               reg,
25510 +               read_reg32 (dev, CommandSchedDataPort[3])
25511 +           );
25512 +       /* dump out /proc/qsnet/elan4/deviceN/stats/cproctimeoutroutes */
25513 +       printk ("elan%d: cat of /proc/qsnet/elan4/device%d/stats/cproctimeoutroutes\n", dev->dev_instance, dev->dev_instance);
25514 +
25515 +       ringbuf = &dev->dev_cproc_timeout_routes;
25516 +       
25517 +       if (!ringbuf) 
25518 +           printk ("elan%d: No stats available\n", dev->dev_instance);
25519 +       else
25520 +       {
25521 +           int start;
25522 +           int end;
25523 +           int i;
25524 +           
25525 +           memset(&routestr, 0, 33);
25526 +           
25527 +           start = ringbuf->start;
25528 +           end = ringbuf->end;
25529 +           
25530 +           if (end < start)
25531 +               end = DEV_STASH_ROUTE_COUNT;
25532 +           
25533 +           for (i=start; i<end; i++) 
25534 +           {
25535 +               elan4_route2str (&ringbuf->routes[i], routestr);
25536 +               printk ( "elan%d: Route %llx %llx->%s\n", dev->dev_instance, (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
25537 +           }
25538 +           
25539 +           if (ringbuf->end < start)
25540 +           {
25541 +               start = 0;
25542 +               end = ringbuf->end;
25543 +               for (i=start; i<end; i++)
25544 +               {
25545 +                   elan4_route2str (&ringbuf->routes[i], routestr);
25546 +                   printk ( "elan%d: Route %llx %llx->%s\n", dev->dev_instance, (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
25547 +               }
25548 +           }
25549 +           
25550 +       }    
25551 +    }
25552 +}
25553 +static void
25554 +dev_haltop_timer_func (unsigned long arg)
25555 +{
25556 +    ELAN4_DEV *dev  = (ELAN4_DEV *) arg;
25557 +
25558 +    elan4_hardware_lock_check(dev,"haltop");
25559 +}
25560 +
25561 +void
25562 +elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op)
25563 +{
25564 +    unsigned long flags;
25565 +
25566 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25567 +
25568 +    /* add to the end of the halt operations list */
25569 +    list_add_tail (&op->op_link, &dev->dev_haltop_list);
25570 +
25571 +    if ((dev->dev_haltop_mask & op->op_mask) != op->op_mask)
25572 +    {
25573 +       dev->dev_haltop_mask |= op->op_mask;
25574 +       
25575 +       elan4_set_schedstatus (dev, 0);
25576 +    }
25577 +
25578 +    mod_timer (&dev->dev_haltop_timer, (jiffies + (HZ*10))); /* 10 seconds */
25579 +
25580 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25581 +}
25582 +
25583 +void
25584 +elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op)
25585 +{
25586 +    unsigned long flags;
25587 +
25588 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
25589 +
25590 +    op->op_cookie = INTOP_ONESHOT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK);
25591 +
25592 +    list_add_tail (&op->op_link, &dev->dev_intop_list);
25593 +
25594 +    writeq ((op->op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, (void *)(cq->cq_mapping));
25595 +
25596 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
25597 +}
25598 +
25599 +void
25600 +elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op)
25601 +{
25602 +    unsigned long flags;
25603 +
25604 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
25605 +
25606 +    op->op_cookie = INTOP_PERSISTENT | ((dev->dev_intop_cookie++) & INTOP_VALUE_MASK);
25607 +
25608 +    list_add_tail (&op->op_link, &dev->dev_intop_list);
25609 +
25610 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
25611 +}
25612 +
25613 +void
25614 +elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op)
25615 +{
25616 +    unsigned long flags;
25617 +
25618 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
25619 +    list_del (&op->op_link);
25620 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
25621 +}
25622 +
25623 +static __inline__ void
25624 +__issue_dma_flushop_cmd (ELAN4_DEV *dev, ELAN4_CQ *cq)
25625 +{
25626 +    E4_uint64 eventaddr = dev->dev_tproc_space + 64;
25627 +
25628 +    writeq (WAIT_EVENT_CMD | eventaddr,   (void *)(cq->cq_mapping));
25629 +    writeq (0,                            (void *)(cq->cq_mapping));
25630 +    writeq (0,                            (void *)(cq->cq_mapping));
25631 +    writeq (0,                            (void *)(cq->cq_mapping));
25632 +
25633 +    writeq (DMA_ShMemWrite | RUN_DMA_CMD, (void *)(cq->cq_mapping));
25634 +    writeq (0 /* cookie */,               (void *)(cq->cq_mapping));
25635 +    writeq (0 /* vproc */,                (void *)(cq->cq_mapping));
25636 +    writeq (0 /* srcAddr */,              (void *)(cq->cq_mapping));
25637 +    writeq (0 /* dstAddr */,              (void *)(cq->cq_mapping));
25638 +    writeq (0 /* srcEvent */,             (void *)(cq->cq_mapping));
25639 +    writeq (0 /* dstEvent */,             (void *)(cq->cq_mapping));
25640 +    writeq (SET_EVENT_CMD,                (void *)(cq->cq_mapping));
25641 +}
25642 +
25643 +static void
25644 +handle_dma_flushops_intop (ELAN4_DEV *dev, void *arg)
25645 +{
25646 +    unsigned int  hipri        = ((unsigned long) arg & 1);
25647 +    E4_uint64     status       = dev->dev_dma_flushop[hipri].status;
25648 +    ELAN4_CQ     *cq           = dev->dev_dma_flushop[hipri].cq;
25649 +    sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
25650 +    E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
25651 +    E4_uint32     completedPtr = CQ_CompletedPtr(queuePtrs);
25652 +    E4_uint32     size         = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
25653 +    unsigned long flags;
25654 +
25655 +    /*
25656 +     * Since we're called from a main interrupt which was issued through the approriate
25657 +     * flushcq the command queue descriptor for dma flushing can no longer be in the 
25658 +     * insert cache, nor can it be in the extractor (as it's trapped), hence it is
25659 +     * safe to modify the completed pointer
25660 +     */
25661 +
25662 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25663 +
25664 +    ASSERT (status != 0);
25665 +
25666 +    /* skip over either the DMA/SETEVENT or just the SETEVENT depending on the trap type */
25667 +    if (CPROC_TrapType (status) == CommandProcDmaQueueOverflow)
25668 +       completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 64) & (size - 1));
25669 +    else
25670 +       completedPtr = (completedPtr & ~(size-1)) | ((completedPtr + 8) & (size - 1));
25671 +    
25672 +    elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs) + 4,
25673 +                       ((queuePtrs >> 32) & ~CQ_PtrOffsetMask) | (completedPtr & CQ_PtrOffsetMask));
25674 +    
25675 +    elan4_restartcq (dev, dev->dev_dma_flushop[hipri].cq);
25676 +
25677 +    if (! list_empty (&dev->dev_dma_flushop[hipri].list))
25678 +       __issue_dma_flushop_cmd (dev, dev->dev_dma_flushop[hipri].cq);
25679 +
25680 +    dev->dev_dma_flushop[hipri].status = 0;
25681 +    
25682 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25683 +
25684 +}
25685 +
25686 +static void
25687 +handle_dma_flushops (ELAN4_DEV *dev, E4_uint64 status, int cqnum)
25688 +{
25689 +    unsigned int       hipri  = (cqnum == elan4_cq2num(dev->dev_dma_flushop[1].cq) ? 1 : 0);
25690 +    ELAN4_CQ          *cq     = dev->dev_dma_flushop[hipri].cq;
25691 +    ELAN4_CQ          *flushq = dev->dev_flush_cq[elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1)];
25692 +    struct list_head  *ops;
25693 +    unsigned long      flags;
25694 +    int                       qfull,count;
25695 +    E4_uint64         queuePtrs;
25696 +    LIST_HEAD(list);
25697 +    
25698 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25699 +    
25700 +    ASSERT (cqnum == elan4_cq2num (dev->dev_dma_flushop[hipri].cq));
25701 +    ASSERT (! list_empty (&dev->dev_dma_flushop[hipri].list));
25702 +    ASSERT (dev->dev_dma_flushop[hipri].status == 0);
25703 +    
25704 +    /* remove the whole list */
25705 +    ops = dev->dev_dma_flushop[hipri].list.next;
25706 +
25707 +    list_del_init (&dev->dev_dma_flushop[hipri].list);
25708 +    
25709 +    /* and add it to our local list */
25710 +    list_add_tail (&list, ops);
25711 +    
25712 +    /* now determine whether the queue was full - since it cannot be empty 
25713 +     * then if the front and back pointers are the same then it is full */
25714 +    queuePtrs = hipri ? read_reg64 (dev, DProcHighPriPtrs) : read_reg64 (dev, DProcLowPriPtrs);
25715 +    qfull     = (E4_QueueFrontPointer (queuePtrs) == E4_QueueBackPointer (queuePtrs));
25716 +    
25717 +    if (CPROC_TrapType(status) == CommandProcDmaQueueOverflow && !qfull)
25718 +       printk (" ******* queue overflow trap - but queue not full\n");
25719 +
25720 +    if (qfull && CPROC_TrapType(status) != CommandProcDmaQueueOverflow)
25721 +       printk (" ****** queue full - but not overflow trap : %llx %llx %x\n", 
25722 +               read_reg64 (dev, DProcLowPriPtrs), read_reg64 (dev, DProcHighPriPtrs), CPROC_TrapType(status));
25723 +
25724 +    /* Store the status register, this also indicates that the intop is pending */
25725 +    dev->dev_dma_flushop[hipri].status = status;
25726 +
25727 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25728 +
25729 +    /* Issue a main interrupt command to the approriate flush command queue,
25730 +     * which will then safely update the completed pointer to skip over the
25731 +     * command which has trapped, also prevent any new commands to be issued
25732 +     * to the command queue.
25733 +     */
25734 +    dev->dev_dma_flushop[hipri].intop.op_function = handle_dma_flushops_intop;
25735 +    dev->dev_dma_flushop[hipri].intop.op_arg      = (void *) (unsigned long) hipri;
25736 +
25737 +    elan4_queue_intop (dev, flushq, &dev->dev_dma_flushop[hipri].intop);
25738 +    
25739 +    /* now execute all operations */
25740 +    for (count = 0; ! list_empty (&list); count++)
25741 +    {
25742 +       ELAN4_DMA_FLUSHOP *op = list_entry (list.next, ELAN4_DMA_FLUSHOP, op_link);
25743 +       
25744 +       list_del (&op->op_link);
25745 +       
25746 +       (*op->op_function) (dev, op->op_arg, qfull);
25747 +    }
25748 +
25749 +    /* finally release the "reasons" for halting */
25750 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25751 +    if ((dev->dev_halt_dproc_count -= count) == 0)
25752 +       elan4_set_schedstatus (dev, 0);
25753 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25754 +       
25755 +    return;
25756 +}
25757 +
25758 +void
25759 +elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri)
25760 +{
25761 +    unsigned long flags;
25762 +
25763 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
25764 +
25765 +    if (dev->dev_halt_dproc_count++ == 0)                      /* ensure that the DMA processor cannot */
25766 +       elan4_set_schedstatus (dev, 0);                         /* execute the DMA we issue. */
25767 +
25768 +    if (list_empty (&dev->dev_dma_flushop[hipri].list) && dev->dev_dma_flushop[hipri].status == 0)
25769 +       __issue_dma_flushop_cmd (dev, dev->dev_dma_flushop[hipri].cq);
25770 +       
25771 +    list_add_tail (&op->op_link, &dev->dev_dma_flushop[hipri].list);
25772 +
25773 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
25774 +}
25775 +
25776 +static void
25777 +enable_elan_errors (void *arg)
25778 +{
25779 +    ELAN4_DEV *dev = (ELAN4_DEV *) arg;
25780 +
25781 +    ENABLE_INT_MASK (dev, INT_ErrorInterrupts);
25782 +}
25783 +
25784 +#define ERROR_DISABLE_PERIOD   (hz/2)
25785 +#define ERROR_SAMPLE_PERIOD    (hz/10)
25786 +#define ERROR_LIMIT            (100)
25787 +
25788 +static __inline__ void
25789 +check_error_rate (ELAN4_DEV *dev)
25790 +{
25791 +    if (dev->dev_error_time == (lbolt/ERROR_SAMPLE_PERIOD))
25792 +    {
25793 +        if (++dev->dev_errors_per_period >= ERROR_LIMIT && (dev->dev_intmask & INT_ErrorInterrupts))
25794 +       {
25795 +           DISABLE_INT_MASK (dev, INT_ErrorInterrupts);
25796 +           
25797 +           schedule_timer_fn (&dev->dev_error_timeoutid, enable_elan_errors, (void *) dev, ERROR_DISABLE_PERIOD);
25798 +       }
25799 +    }
25800 +    else
25801 +    {
25802 +       dev->dev_error_time        = (lbolt/ERROR_SAMPLE_PERIOD);
25803 +       dev->dev_errors_per_period = 0;
25804 +    }
25805 +}
25806 +
25807 +static __inline__ int
25808 +handle_mainints (ELAN4_DEV *dev, int nticks, int nintr)
25809 +{
25810 +    E4_uint32 nfptr = dev->dev_interruptq_nfptr;
25811 +    E4_uint32 bptr  = read_reg32 (dev, MainIntQueuePtrs.s.Back);
25812 +    E4_uint32 qsize = E4_QueueSize(elan4_interruptq_size);
25813 +    E4_uint32 qmask = qsize - 1;
25814 +    long      tlim  = lbolt + nticks;
25815 +    int       done = 0;
25816 +    unsigned long flags;
25817 +
25818 +    do {
25819 +       int todo  = ((bptr - nfptr) & qmask) / E4_MainIntEntrySize;
25820 +
25821 +       ASSERT (todo > 0);
25822 +
25823 +       PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: fptr %x nfptr %x bptr %x : %d todo\n", 
25824 +                read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr, todo);
25825 +
25826 +       if (nintr >= 0 && (done + todo) > nintr)                /* punt because too may to do in interrupt */
25827 +       {
25828 +           PRINTF4 (DBG_DEVICE, DBG_MAININT, "handle_mainints: punting (done %d todo %d) (bptr %x fptr %x)\n",
25829 +                    done, todo, bptr, read_reg32 (dev, MainIntQueuePtrs.s.Front));
25830 +
25831 +           return 1;
25832 +       }
25833 +
25834 +       BucketDevStat (dev, s_mainints, todo, MainIntBuckets);
25835 +
25836 +       /* consume all the entries in the queue which we think are there */
25837 +       do {
25838 +           E4_uint64   value = elan4_sdram_readq (dev, nfptr);
25839 +           ELAN4_CTXT *ctxt  = elan4_localctxt (dev, E4_MAIN_INT_CTX (value));
25840 +           E4_uint32   fptr  = nfptr;
25841 +
25842 +           PRINTF2 (DBG_DEVICE, DBG_MAININT, "handle_mainints: process cookie %llx - write fptr=%x\n", value, nfptr);
25843 +
25844 +           if (ctxt == NULL)
25845 +               ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_mainints: context %d invalid\n", E4_MAIN_INT_CTX (value));
25846 +           else
25847 +               ctxt->ctxt_ops->op_interrupt (ctxt, E4_MAIN_INT_COOKIE(value));
25848 +
25849 +           /* compute the next queue front pointer, before updating the front pointer
25850 +            * since we need to ensure that elan4_queue_mainintop doesn't see the queue
25851 +            * as being empty if an extra interrupt is queued in between */
25852 +           dev->dev_interruptq_nfptr = nfptr = (nfptr & ~qmask) | ((nfptr + sizeof (E4_uint64)) & qmask);
25853 +    
25854 +           /* update the queue front pointer, doing this will clear the
25855 +            * interrupt for *all* interrupt cookies which have previously 
25856 +            * been added to the queue */
25857 +           write_reg32 (dev, MainIntQueuePtrs.s.Front, E4_QueueFrontValue (fptr, elan4_interruptq_size));
25858 +           pioflush_reg (dev);
25859 +       } while (bptr != nfptr);
25860 +       
25861 +       /* re-sample the back pointer and if it's different from the previous
25862 +        * queue front pointer, then the queue has something on it again */
25863 +       done += todo;
25864 +       
25865 +       if ((nticks > 0 && ((int) (lbolt - tlim)) > 0))         /* been executing for too long in thread */
25866 +           return 1;
25867 +
25868 +       bptr = read_reg32 (dev, MainIntQueuePtrs.s.Back);
25869 +
25870 +       PRINTF3 (DBG_DEVICE, DBG_MAININT, "handle_mainints: resample : fptr %x nfptr %x bptr %x\n", 
25871 +                read_reg32 (dev, MainIntQueuePtrs.s.Front), nfptr, bptr);
25872 +
25873 +       /* at this point we've made some space in the interrupt queue,
25874 +        * so check to see if we've got anything to restart */
25875 +       spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25876 +       while (! list_empty (&dev->dev_interruptq_list))
25877 +       {
25878 +           ELAN4_INTOP *op = list_entry (dev->dev_interruptq_list.next, ELAN4_INTOP, op_link);
25879 +           
25880 +           list_del (&op->op_link);
25881 +
25882 +           op->op_function (dev, op->op_arg);
25883 +       }
25884 +       spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25885 +
25886 +    } while (bptr != nfptr);
25887 +
25888 +    return 0;
25889 +}
25890 +
25891 +static void
25892 +elan4_mainint_thread (ELAN4_DEV *dev)
25893 +{
25894 +    unsigned long flags;
25895 +
25896 +    kernel_thread_init ("elan4_mainint");
25897 +    
25898 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25899 +    for (;;)
25900 +    {
25901 +       if (dev->dev_stop_threads)
25902 +           break;
25903 +       
25904 +       if (! (dev->dev_intmask & INT_MainInterrupt))
25905 +       {
25906 +           spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25907 +           
25908 +           if (handle_mainints (dev, elan4_mainint_resched_ticks, -1))
25909 +               BumpDevStat (dev, s_mainint_rescheds);
25910 +
25911 +           spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25912 +           ENABLE_INT_MASK (dev, INT_MainInterrupt);
25913 +       }
25914 +       
25915 +       kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags);
25916 +    }
25917 +
25918 +    dev->dev_mainint_stopped = 1;
25919 +    kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
25920 +
25921 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25922 +
25923 +    kernel_thread_exit();
25924 +}
25925 +
25926 +void
25927 +elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op)
25928 +{
25929 +    unsigned long flags;
25930 +
25931 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
25932 +    if (dev->dev_interruptq_nfptr == read_reg32 (dev, MainIntQueuePtrs.s.Back))
25933 +       op->op_function (dev, op->op_arg);
25934 +    else
25935 +       list_add_tail (&op->op_link, &dev->dev_interruptq_list);
25936 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
25937 +}
25938 +
25939 +static __inline__ E4_uint32
25940 +handle_cproc_trap (ELAN4_DEV *dev)
25941 +{
25942 +    E4_uint32   cqptr   = read_reg32 (dev, CommandControl.CommandQueueDescsBase) & E4_QueueDescPtrMask;
25943 +    unsigned    cqnum   = ((cqptr - dev->dev_cqaddr) / sizeof (E4_CommandQueueDesc));
25944 +    sdramaddr_t cqdesc  = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc));
25945 +    E4_uint64   control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
25946 +    E4_uint64   status  = read_reg64 (dev, CProcStatus);
25947 +    ELAN4_CTXT *ctxt    = elan4_localctxt (dev, CQ_Context (control));
25948 +
25949 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "handle_cproc_trap: cqnum=%d status=%016llx control=%016llx TrapType\n", 
25950 +            cqnum, status, control, CPROC_TrapType (status));
25951 +    PRINTF4 (DBG_DEVICE, DBG_INTR, "                   %016llx %016llx %016llx %016llx\n",
25952 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)),
25953 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue)),
25954 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers)),
25955 +            elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control)));
25956 +
25957 +    BumpDevStat (dev, s_cproc_traps);
25958 +
25959 +    if (ctxt == NULL)
25960 +       ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_cproc_trap: context %d is invalid\n", CQ_Context (control));
25961 +    else
25962 +       ctxt->ctxt_ops->op_cproc_trap (ctxt, status, cqnum);
25963 +
25964 +    return (CPROC_TrapType (status) == CommandProcWaitTrap ? SCH_RestartCProc | SCH_RestartEProc : SCH_RestartCProc);
25965 +}
25966 +
25967 +static __inline__ E4_uint32
25968 +handle_dproc_trap (ELAN4_DEV *dev, int unit)
25969 +{
25970 +    E4_uint64   status  = (unit == 0) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status);
25971 +    E4_uint32   restart = (unit == 0) ? SCH_RestartDma0Proc : SCH_RestartDma1Proc;
25972 +    ELAN4_CTXT *ctxt    = elan4_localctxt (dev, DPROC_Context (status));
25973 +    
25974 +    PRINTF3 (DBG_DEVICE, DBG_INTR, "handle_dproc_trap: unit %d context %d%s\n", unit, DPROC_Context(status),
25975 +            DPROC_PrefetcherFault(status) ? " (prefetcher)" : "");
25976 +
25977 +    if (DPROC_PrefetcherFault (status))
25978 +       restart |= SCH_RestartDmaPrefetchProc;
25979 +                     
25980 +    BumpDevStat (dev, s_dproc_traps);
25981 +
25982 +    if (ctxt == NULL)
25983 +       ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_dproc_trap: context %d is invalid\n", DPROC_Context (status));
25984 +    else
25985 +       ctxt->ctxt_ops->op_dproc_trap (ctxt, status, unit);
25986 +
25987 +    return (restart);
25988 +}
25989 +
25990 +static __inline__ E4_uint32
25991 +handle_eproc_trap (ELAN4_DEV *dev)
25992 +{
25993 +    E4_uint64   status = read_reg64 (dev, EProcStatus);
25994 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, EPROC_Context (status));
25995 +
25996 +    BumpDevStat (dev, s_eproc_traps);
25997 +
25998 +    if (ctxt == NULL)
25999 +       ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_eproc_trap: context %d is invalid\n", EPROC_Context (status));
26000 +    else
26001 +       ctxt->ctxt_ops->op_eproc_trap (ctxt, status);
26002 +
26003 +    return (SCH_RestartEProc);
26004 +}
26005 +
26006 +static __inline__ E4_uint32
26007 +handle_tproc_trap (ELAN4_DEV *dev)
26008 +{
26009 +    E4_uint64   status = read_reg64 (dev, TProcStatus);
26010 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, TPROC_Context (status));
26011 +
26012 +    BumpDevStat (dev, s_tproc_traps);
26013 +
26014 +    if (ctxt == NULL)
26015 +       ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_tproc_trap: context %d is invalid\n", TPROC_Context (status));
26016 +    else
26017 +       ctxt->ctxt_ops->op_tproc_trap (ctxt, status);
26018 +    
26019 +    return (SCH_RestartTProc);
26020 +}
26021 +
26022 +static __inline__ void
26023 +handle_haltints (ELAN4_DEV *dev, E4_uint32 intreg)
26024 +{
26025 +    struct list_head  list   = LIST_HEAD_INIT(list);
26026 +    E4_uint32         mask   = 0;
26027 +    E4_uint32         active = 0;
26028 +    struct list_head *entry;
26029 +    struct list_head *next;
26030 +    unsigned long     flags;
26031 +
26032 +    BumpDevStat (dev, s_haltints);
26033 +
26034 +    spin_lock_irqsave (&dev->dev_haltop_lock, flags);
26035 +
26036 +    list_for_each_safe (entry, next, &dev->dev_haltop_list) {
26037 +       ELAN4_HALTOP *op = list_entry (entry, ELAN4_HALTOP, op_link);
26038 +
26039 +       PRINTF (DBG_DEVICE, DBG_INTR, "handle_haltints: op=%p op_mask=%x intreg=%x\n", op, op->op_mask, intreg);
26040 +
26041 +       if ((op->op_mask & intreg) != op->op_mask)
26042 +           mask |= op->op_mask;
26043 +       else
26044 +       {
26045 +           list_del (&op->op_link);                            /* remove from list */
26046 +           list_add_tail (&op->op_link, &list);                /* add to local list */
26047 +
26048 +           active |= op->op_mask;
26049 +       }
26050 +    }
26051 +
26052 +    ASSERT (dev->dev_haltop_mask == (mask | active));
26053 +
26054 +    dev->dev_haltop_mask = mask;
26055 +
26056 +    if (list_empty (&dev->dev_haltop_list)) {
26057 +       del_timer(&dev->dev_haltop_timer);
26058 +    }
26059 +
26060 +    if (list_empty (&list))
26061 +       elan4_set_schedstatus (dev, intreg);
26062 +    else
26063 +    {
26064 +       dev->dev_haltop_active = active;
26065 +       spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
26066 +
26067 +       while (! list_empty (&list)) 
26068 +       {
26069 +           ELAN4_HALTOP *op = list_entry (list.next, ELAN4_HALTOP, op_link);
26070 +           
26071 +           list_del (&op->op_link);
26072 +
26073 +           (*op->op_function) (dev, op->op_arg);
26074 +       }
26075 +
26076 +       spin_lock_irqsave (&dev->dev_haltop_lock, flags);
26077 +       dev->dev_haltop_active = 0;
26078 +
26079 +       elan4_set_schedstatus (dev, 0);
26080 +    }
26081 +
26082 +    spin_unlock_irqrestore (&dev->dev_haltop_lock, flags);
26083 +}
26084 +
26085 +static __inline__ E4_uint32
26086 +handle_iproc_trap (ELAN4_DEV *dev, unsigned unit)
26087 +{
26088 +    sdramaddr_t hdroff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]);
26089 +    E4_uint64   status = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, IProcStatusCntxAndTrType));
26090 +    E4_uint32   filter = elan4_read_filter (dev, IPROC_NetworkContext (status));
26091 +    ELAN4_CTXT *ctxt   = elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK);
26092 +
26093 +    /*
26094 +     * The context is not valid in the following case :
26095 +     *     ack not been sent AND bad CRC/bad length.
26096 +     *
26097 +     *  NOTE TransCRCStatus and BadLength only valid if NOT an EopTrap.
26098 +     */
26099 +    ASSERT ((IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status))) || IPROC_EOPTrap (status) ||
26100 +           (IPROC_TransCRCStatus (status) == CRC_STATUS_GOOD && !IPROC_BadLength (status)));
26101 +    
26102 +    BumpDevStat (dev, s_iproc_traps);
26103 +
26104 +    if (ctxt == NULL)
26105 +    {
26106 +       ELAN4_DEBUG_TRIGGER (&dev->dev_ctxt, "elan4:handle_iproc_trap: network %d context %d (%x) is invalid\n", IPROC_NetworkContext (status), 
26107 +                            filter & E4_FILTER_CONTEXT_MASK, filter);
26108 +       
26109 +       elan4_write_filter (dev, IPROC_NetworkContext (status), E4_FILTER_DISCARD_ALL);
26110 +    }
26111 +    else
26112 +       ctxt->ctxt_ops->op_iproc_trap (ctxt, status, unit);
26113 +
26114 +    return (SCH_RestartCh0LowPriInput << unit);
26115 +}
26116 +
26117 +void
26118 +handle_pcimemerr (ELAN4_DEV *dev)
26119 +{
26120 +    elan4_pcierror (dev);
26121 +
26122 +    check_error_rate (dev);
26123 +}
26124 +
26125 +void
26126 +handle_sdramint (ELAN4_DEV *dev)
26127 +{
26128 +    E4_uint64 status    = read_reg64 (dev, SDRamECCStatus);
26129 +    E4_uint64 ConfigRegValue = read_reg64 (dev, SDRamConfigReg);
26130 +    char      errstr[200];
26131 +    int              i;
26132 +    int              Found = 0;
26133 +
26134 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_sdramint\n");
26135 +
26136 +    printk ("elan%d: ECC Error %s status=%llx\n",
26137 +           dev->dev_instance, elan4_sdramerr2str (dev, status, ConfigRegValue, errstr), (long long)status);
26138 +
26139 +    if (!ECC_UncorrectableErr(status) && !ECC_MultUncorrectErrs(status))
26140 +       printk ("elan%d: ECC error data=%016llx\n", dev->dev_instance, elan4_sdram_readq (dev, ECC_Addr(status)));
26141 +
26142 +    if (ECC_CorrectableErr (status))
26143 +       BumpDevStat (dev, s_correctable_errors);
26144 +    if (ECC_MultCorrectErrs (status))
26145 +       BumpDevStat (dev, s_multiple_errors);
26146 +
26147 +    if (ECC_UncorrectableErr(status))
26148 +       panic ("elan%d: uncorrectable ECC error\n", dev->dev_instance);
26149 +    if (ECC_MultUncorrectErrs(status))
26150 +       panic ("elan%d: muliple uncorrectable ECC error\n", dev->dev_instance);
26151 +    
26152 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR);
26153 +
26154 +    /*
26155 +     * Now try to test for a read/write error type.
26156 +     * This can only be done if it was a correctable error as an uncorrectable error might lockup the node.
26157 +     * It should not be attempted if the data is in the dcache because fetching again would not generate an
26158 +     * error even if the problem was a read, and flushing the cache line would fix a write probelm.
26159 +     * Reading the same location again should cause a new error if the problem was caused by a bad write.
26160 +     */
26161 +    if (elan4_eccerr_recheck &&
26162 +       (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA) &&
26163 +        ECC_CorrectableErr(status) && !ECC_UncorrectableErr(status))
26164 +    {
26165 +       E4_uint64 status2;
26166 +       E4_uint64 Addr = ECC_Addr(status) & ~(E4_CACHELINE_SIZE-1);
26167 +       E4_uint32 SetIndex = (Addr >> 6) & ~(E4_NumCacheLines-1);
26168 +       int       InCache = 0;
26169 +
26170 +       /* check the cache tags to see if the data has been read into a cache line. */
26171 +       for (i=0; i<E4_NumCacheSets; i++)
26172 +          if (((E4_uint32)__elan4_readq (dev, dev->dev_regs + offsetof(E4_Registers, Tags.Tags[i][SetIndex].Value)) & 0x7fffe000) == (Addr & 0x7fffe000))
26173 +          {
26174 +              InCache = 1;
26175 +              break;
26176 +          }
26177 +
26178 +       if (InCache == 0)
26179 +       {
26180 +           printk ("elan%d: checking if ECC error was read or write\n", dev->dev_instance);
26181 +
26182 +           /* Now read and throw away the answer. A read of a word will schedule a block read of sdram */
26183 +           elan4_sdram_readq (dev, Addr);
26184 +           status2 = read_reg64 (dev, SDRamECCStatus);
26185 +           if ((Addr == (ECC_Addr(status2) & ~(E4_CACHELINE_SIZE-1))) && ECC_CorrectableErr(status2))  // Write error.
26186 +           {
26187 +               status = (status & ~0x0030000000000000ULL) | 0x0010000000000000ULL;
26188 +               PULSE_SYSCONTROL (dev, CONT_CLEAR_SDRAM_ERROR);
26189 +           }
26190 +           else
26191 +               status = (status & ~0x0030000000000000ULL) | 0x0020000000000000ULL;
26192 +       }
26193 +       else
26194 +           status = status | 0x0030000000000000ULL;
26195 +    }
26196 +    else
26197 +       status &= ~0x0030000000000000ULL;
26198 +
26199 +    /* search for this error already being logged */
26200 +    for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i >= 0; i--)
26201 +        if ((dev->dev_sdramerrs[i].EccStatus == status) && (dev->dev_sdramerrs[i].ConfigReg == ConfigRegValue))
26202 +       {
26203 +            Found = 1;
26204 +           dev->dev_sdramerrs[i].ErrorCount += 1; // Keep a count.
26205 +           break;
26206 +       }
26207 +
26208 +    /* stash the status for /proc */
26209 +    if (!Found)
26210 +    {
26211 +       for (i = sizeof (dev->dev_sdramerrs)/sizeof (dev->dev_sdramerrs[0]) - 1; i > 0; i--)
26212 +           dev->dev_sdramerrs[i] = dev->dev_sdramerrs[i-1];
26213 +       dev->dev_sdramerrs[0].EccStatus = status;
26214 +       dev->dev_sdramerrs[0].ConfigReg = ConfigRegValue;
26215 +       dev->dev_sdramerrs[0].ErrorCount = 1; // First error
26216 +    }
26217 +    
26218 +    check_error_rate (dev);
26219 +}
26220 +
26221 +static void
26222 +clear_linkerr_led (void *arg)
26223 +{
26224 +    ELAN4_DEV *dev = (ELAN4_DEV *) arg;
26225 +
26226 +    write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError);
26227 +}
26228 +
26229 +void
26230 +handle_linkerror (ELAN4_DEV *dev)
26231 +{
26232 +    E4_uint32 LinkState;
26233 +    E4_uint32 CurrState = read_reg32 (dev, LinkControlReg);
26234 +
26235 +    /* Set for reading errors. */
26236 +    write_reg32 (dev, LinkControlReg,
26237 +                 (CurrState = CurrState & ~((LCONT_TEST_CONTROL_MASK << LCONT_TEST_CONTROL_SHIFT) |
26238 +                                           (LCONT_TEST_VALUE_MASK << LCONT_TEST_VALUE_SHIFT))));
26239 +    LinkState = LCONT_LINK_STATE(CurrState = read_reg32 (dev, LinkControlReg));
26240 +
26241 +#ifdef DEBUG
26242 +    {
26243 +       E4_uint8 ErrorMsg[256], DataErrorVal[64];
26244 +
26245 +       strcpy (ErrorMsg, "handle_linkerror:");
26246 +       if (LinkState & LS_LockError)    strcat (ErrorMsg, " LockError");
26247 +       if (LinkState & LS_DeskewError)  strcat (ErrorMsg, " DeskewError");
26248 +       if (LinkState & LS_PhaseError)   strcat (ErrorMsg, " PhaseError");
26249 +       if (LinkState & LS_DataError)
26250 +       {
26251 +           E4_uint32 error[4];
26252 +           E4_uint32 i;
26253 +           strcat (ErrorMsg, " DataError");
26254 +           /* Errors */
26255 +           for(i = LRS_ErrorVal8to0; i <= LRS_ErrorVal35to27; i++)
26256 +           {
26257 +               write_reg32 (dev, LinkControlReg,
26258 +                            CurrState | LCONT_TEST_VALUE(i) | (LCONT_READ_STATE << LCONT_TEST_CONTROL_SHIFT));
26259 +               error[i - LRS_ErrorVal8to0] = LCONT_LINK_STATE(read_reg32 (dev, LinkControlReg));
26260 +           }
26261 +           sprintf (DataErrorVal, " Link State Error Val: %09llx %03x %03x %03x %03x", 
26262 +                    (unsigned long long) ((error[0] & 0x1ffUL) | ((error[1] & 0x1ffUL) << 9)  |
26263 +                                 ((error[2] & 0x1ffUL) << 18) | ((error[3] & 0x1ffUL) << 27)),
26264 +                    error[3], error[2], error[1], error[0]);
26265 +           strcat (ErrorMsg, DataErrorVal);
26266 +       }
26267 +       if (LinkState & LS_FifoOvFlow0)  strcat (ErrorMsg, " FifoOvFlow0");
26268 +       if (LinkState & LS_FifoOvFlow1)  strcat (ErrorMsg, " FifoOvFlow1");
26269 +       if (LinkState & LS_Mod45Changed)         strcat (ErrorMsg, " Mod45Changed");
26270 +       if (LinkState & LS_PAckNotSeenError) strcat (ErrorMsg, " PAckNotSeenError");
26271 +       strcat (ErrorMsg, "\n");
26272 +       PRINTF0 (DBG_DEVICE, DBG_INTR, ErrorMsg);
26273 +    }
26274 +#endif
26275 +
26276 +    BumpDevStat (dev, s_link_errors);
26277 +    
26278 +    if (LinkState & LS_LockError)       BumpDevStat (dev, s_lock_errors);
26279 +    if (LinkState & LS_DeskewError)     BumpDevStat (dev, s_deskew_errors);
26280 +    if (LinkState & LS_PhaseError)      BumpDevStat (dev, s_phase_errors);
26281 +    if (LinkState & LS_DataError)       BumpDevStat (dev, s_data_errors);
26282 +    if (LinkState & LS_FifoOvFlow0)     BumpDevStat (dev, s_fifo_overflow0);
26283 +    if (LinkState & LS_FifoOvFlow1)     BumpDevStat (dev, s_fifo_overflow1);
26284 +    if (LinkState & LS_Mod45Changed)    BumpDevStat (dev, s_mod45changed);
26285 +    if (LinkState & LS_PAckNotSeenError) BumpDevStat (dev, s_pack_not_seen);
26286 +
26287 +    PULSE_SCHED_RESTART (dev, SCH_ClearLinkErrorInt);
26288 +    
26289 +    /* schedule a timer to clear the link error LED, so that it stays on 
26290 +     * for a second for every link error that occurs */
26291 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && !timer_fn_queued (&dev->dev_linkerr_timeoutid))
26292 +       schedule_timer_fn (&dev->dev_linkerr_timeoutid, clear_linkerr_led, (void *) dev, HZ);
26293 +
26294 +    /*
26295 +     * Signal the link error to the switch by
26296 +     * enabling the INT_LinkPortKeyFail bit.
26297 +     * Always clear the error bit as the switch
26298 +     * might have produced a spurious "ack" ...
26299 +     */
26300 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT);
26301 +
26302 +    if (dev->dev_linkerr_signalled == 0)
26303 +       dev->dev_linkerr_signalled = 1;
26304 +    else
26305 +       dev->dev_linkerr_signalled = 2;
26306 +    
26307 +    ENABLE_INT_MASK (dev, INT_LinkPortKeyFail);
26308 +      
26309 +    check_error_rate (dev);
26310 +}
26311 +
26312 +void
26313 +handle_linkportkeyfail (ELAN4_DEV *dev)
26314 +{
26315 +    PRINTF0 (DBG_DEVICE, DBG_INTR, "handle_linkportkeyfail\n");
26316 +
26317 +    PULSE_SYSCONTROL (dev, CONT_CLEAR_LINKPORT_INT);
26318 +
26319 +    if (! dev->dev_linkerr_signalled)
26320 +    {
26321 +       /* Hmmm - they're not playing ball */
26322 +       BumpDevStat (dev, s_linkport_keyfail);
26323 +
26324 +       DISABLE_INT_MASK (dev, INT_LinkPortKeyFail);
26325 +    }
26326 +    else
26327 +    {
26328 +       /* If more link errors have occured since we 
26329 +        * signalled the error, then leave it signalled. */
26330 +       if (--dev->dev_linkerr_signalled == 0)
26331 +           DISABLE_INT_MASK (dev, INT_LinkPortKeyFail);
26332 +    }
26333 +}
26334 +
26335 +
26336 +static __inline__ void
26337 +__elan4_4msi0 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
26338 +{
26339 +    unsigned long flags;
26340 +
26341 +    if (intreg & intmask & INT_MainInterrupt)
26342 +    {
26343 +       DISABLE_INT_MASK (dev, INT_MainInterrupt);
26344 +
26345 +       if (handle_mainints (dev, -1, elan4_mainint_punt_loops) == 0)
26346 +           ENABLE_INT_MASK (dev, INT_MainInterrupt);
26347 +       else
26348 +       {
26349 +           BumpDevStat (dev, s_mainint_punts);
26350 +           
26351 +           spin_lock_irqsave (&dev->dev_mainint_lock, flags);
26352 +           kcondvar_wakeupone (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
26353 +           spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
26354 +       }
26355 +    }
26356 +}
26357 +
26358 +static __inline__ void
26359 +__elan4_4msi1 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
26360 +{
26361 +    E4_uint32 restart = 0;
26362 +
26363 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi1: %x\n", intreg);
26364 +    
26365 +    spin_lock (&dev->dev_trap_lock);
26366 +    
26367 +    if (intreg & intmask & INT_CProc)
26368 +       restart |= handle_cproc_trap (dev);
26369 +    if (intreg & intmask & INT_EProc) 
26370 +       restart |= handle_eproc_trap (dev);
26371 +    if (intreg & intmask & INT_Dma0Proc) 
26372 +       restart |= handle_dproc_trap (dev, 0);
26373 +    if (intreg & intmask & INT_Dma1Proc) 
26374 +       restart |= handle_dproc_trap (dev, 1);
26375 +    if (intreg & intmask & INT_TProc)
26376 +       restart |= handle_tproc_trap (dev);
26377 +    
26378 +    PULSE_SCHED_RESTART (dev, restart);
26379 +    
26380 +    spin_unlock (&dev->dev_trap_lock);
26381 +    
26382 +    if (intreg & (INT_Halted|INT_Discarding))
26383 +       handle_haltints (dev, intreg);
26384 +}
26385 +
26386 +static __inline__ void
26387 +__elan4_4msi2 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
26388 +{
26389 +    E4_uint32 restart = 0;
26390 +
26391 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi2: %x\n", intreg);
26392 +    
26393 +    spin_lock (&dev->dev_trap_lock);
26394 +    if (intreg & intmask & INT_IProcCh0LowPri)
26395 +       restart |= handle_iproc_trap (dev, 0);
26396 +    
26397 +    if (intreg & intmask & INT_IProcCh1LowPri)
26398 +       restart |= handle_iproc_trap (dev, 1);
26399 +    
26400 +    if (intreg & intmask & INT_IProcCh0HighPri)
26401 +       restart |= handle_iproc_trap (dev, 2);
26402 +    
26403 +    if (intreg & intmask & INT_IProcCh1HighPri)
26404 +       restart |= handle_iproc_trap (dev, 3);
26405 +    
26406 +    PULSE_SCHED_RESTART (dev, restart);
26407 +    
26408 +    spin_unlock (&dev->dev_trap_lock);
26409 +}
26410 +
26411 +static __inline__ void
26412 +__elan4_4msi3 (ELAN4_DEV *dev, E4_uint32 intreg, E4_uint32 intmask)
26413 +{
26414 +    PRINTF1 (DBG_DEVICE, DBG_INTR, "__elan4_4msi3: %x\n", intreg);
26415 +    
26416 +    if (intreg & intmask & INT_PciMemErr)
26417 +       handle_pcimemerr (dev);
26418 +    
26419 +    if (intreg & intmask & INT_SDRamInt)
26420 +       handle_sdramint (dev);
26421 +    
26422 +    if (intreg & intmask & INT_LinkError)
26423 +       handle_linkerror (dev);
26424 +    
26425 +    if (intreg & intmask & INT_LinkPortKeyFail)
26426 +       handle_linkportkeyfail (dev);
26427 +}
26428 +
26429 +int
26430 +elan4_1msi0 (ELAN4_DEV *dev)
26431 +{
26432 +    E4_uint32 intmask = dev->dev_intmask;
26433 +    E4_uint32 intreg;
26434 +
26435 +    if (intmask == 0 || ((intreg = read_reg32 (dev, InterruptReg)) & intmask) == 0)
26436 +       return (0);
26437 +
26438 +    BumpDevStat (dev, s_interrupts);
26439 +    
26440 +    do {
26441 +       PRINTF1 (DBG_DEVICE, DBG_INTR, "elan4_1msi0: %x\n", intreg);
26442 +
26443 +       if (intreg & intmask & INT_MSI0)
26444 +           __elan4_4msi0(dev, intreg, intmask);
26445 +       if (intreg & intmask & INT_MSI1)
26446 +           __elan4_4msi1(dev, intreg, intmask);
26447 +       if (intreg & intmask & INT_MSI2)
26448 +           __elan4_4msi2(dev, intreg, intmask); 
26449 +       if (intreg & intmask & INT_MSI3)
26450 +           __elan4_4msi3(dev, intreg, intmask);
26451 +
26452 +       if (intreg & INT_LinkPortKeyFail)
26453 +           handle_linkportkeyfail (dev);
26454 +
26455 +       /* must ensure that the read of the interrupt mask
26456 +        * completes before the read of the interrupt register
26457 +        * since the main interrupt thread clears it's interrupt
26458 +        * and then re-enables it in the interrupt mask. */
26459 +       intmask = dev->dev_intmask;
26460 +       mb();
26461 +       intreg = read_reg32 (dev, InterruptReg);
26462 +
26463 +    } while ((intreg & intmask) != 0);
26464 +
26465 +    return (1);
26466 +}
26467 +
26468 +/* local context management */
26469 +int
26470 +elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops)
26471 +{
26472 +    unsigned long flags;
26473 +    int tbl;
26474 +    int i;
26475 +
26476 +    ctxt->ctxt_dev      = dev;
26477 +    ctxt->ctxt_ops      = ops;
26478 +    ctxt->ctxt_features = dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES];
26479 +
26480 +    INIT_LIST_HEAD (&ctxt->ctxt_cqalist);
26481 +    spin_lock_init (&ctxt->ctxt_mmulock);
26482 +
26483 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
26484 +    {
26485 +       ctxt->shuffle_needed[tbl] = 0;
26486 +       for(i=0; i<ELAN4_CTXT_MAX_SHUFFLE; i++)
26487 +           ctxt->shuffle[tbl][i] = -1;
26488 +
26489 +       KMEM_ZALLOC (ctxt->ctxt_mmuhash[tbl], ELAN4_HASH_ENTRY **,  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *), 1);
26490 +       
26491 +       if (ctxt->ctxt_mmuhash[tbl] == NULL)
26492 +       {
26493 +           if (tbl != 0)
26494 +               KMEM_FREE (ctxt->ctxt_mmuhash[0], dev->dev_hashsize[0] * sizeof (ELAN4_HASH_ENTRY *));
26495 +           spin_lock_destroy (&ctxt->ctxt_mmulock);
26496 +           return (-ENOMEM);
26497 +       }
26498 +    }
26499 +
26500 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
26501 +
26502 +    if ((ctxt->ctxt_num = bt_freebit (dev->dev_ctxmap, (1 << dev->dev_ctxtableshift))) >= 0)
26503 +    {
26504 +       /* chain onto the lists of all contexts */
26505 +       list_add (&ctxt->ctxt_link, &dev->dev_ctxt_list);
26506 +
26507 +       BT_SET (dev->dev_ctxmap, ctxt->ctxt_num);
26508 +    }
26509 +    
26510 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
26511 +
26512 +    if (ctxt->ctxt_num >= 0)
26513 +       proc_insertctxt(dev, ctxt);
26514 +
26515 +    return (ctxt->ctxt_num < 0 ? -ENOMEM : 0);
26516 +}
26517 +
26518 +void
26519 +elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt)
26520 +{
26521 +    unsigned long flags;
26522 +    int tbl;
26523 +
26524 +    proc_removectxt(dev, ctxt);
26525 +
26526 +    /* remove from list of contexts */
26527 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
26528 +
26529 +    list_del (&ctxt->ctxt_link);
26530 +
26531 +    BT_CLEAR (dev->dev_ctxmap, ctxt->ctxt_num);
26532 +
26533 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
26534 +
26535 +    spin_lock_destroy (&ctxt->ctxt_info_lock);
26536 +
26537 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
26538 +       KMEM_FREE (ctxt->ctxt_mmuhash[tbl],  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY *));
26539 +
26540 +    spin_lock_destroy (&ctxt->ctxt_mmulock);
26541 +}
26542 +
26543 +ELAN4_CTXT *
26544 +elan4_localctxt (ELAN4_DEV *dev, unsigned num)
26545 +{
26546 +    struct list_head *entry;
26547 +    unsigned long flags;
26548 +
26549 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
26550 +
26551 +    list_for_each (entry, &dev->dev_ctxt_list) {
26552 +       ELAN4_CTXT *ctxt = list_entry (entry, ELAN4_CTXT, ctxt_link);
26553 +       
26554 +       if (ctxt->ctxt_num == num)
26555 +       {
26556 +           spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
26557 +           return (ctxt);
26558 +       }
26559 +    }
26560 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
26561 +
26562 +    return ((ELAN4_CTXT *) NULL);
26563 +}
26564 +
26565 +ELAN4_CTXT *
26566 +elan4_networkctxt (ELAN4_DEV *dev, unsigned num)
26567 +{
26568 +    E4_uint32 filter = elan4_read_filter (dev, num);
26569 +    
26570 +    if ((filter & E4_FILTER_CONTEXT_MASK) == INVALID_CONTEXT)
26571 +       return NULL;
26572 +    else
26573 +       return elan4_localctxt (dev, filter & E4_FILTER_CONTEXT_MASK);
26574 +}
26575 +
26576 +/* network context management */
26577 +int
26578 +elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum)
26579 +{
26580 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
26581 +    int               res = 0;
26582 +    E4_uint32         filter;
26583 +    unsigned long     flags;
26584 +    
26585 +    spin_lock_irqsave (&dev->dev_ctxt_lock, flags);
26586 +    
26587 +    filter = elan4_read_filter (dev, ctxnum);
26588 +    if ((filter & E4_FILTER_CONTEXT_MASK) != INVALID_CONTEXT)
26589 +    {
26590 +       PRINTF2 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d filter=%x -> EBUSY\n", ctxnum, filter);
26591 +       res = -EBUSY;
26592 +    }
26593 +    else
26594 +    {
26595 +       PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_attach_filter: ctx=%d - SUCCESS\n", ctxnum);
26596 +
26597 +       elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | E4_FILTER_DISCARD_ALL);
26598 +       PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
26599 +    }
26600 +    spin_unlock_irqrestore (&dev->dev_ctxt_lock, flags);
26601 +    
26602 +    return (res);
26603 +}
26604 +
26605 +void
26606 +elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum)
26607 +{
26608 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26609 +
26610 +    PRINTF1 (ctxt, DBG_NETWORK_CTX, "elan4_detach_filter: detach from network context %d\n", ctxnum);
26611 +           
26612 +    elan4_write_filter (dev, ctxnum, INVALID_CONTEXT | E4_FILTER_DISCARD_ALL);
26613 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
26614 +}
26615 +
26616 +void
26617 +elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state)
26618 +{
26619 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26620 +
26621 +    PRINTF6 (ctxt, DBG_NETWORK_CTX, "elan4_set_filter: set filter state %x for network context %d <%s%s%s%s>\n", state, ctxnum,
26622 +            (state & E4_FILTER_DISCARD_ALL) ? "discard,"  : "",
26623 +            (state & E4_FILTER_ACKOK_ALL)   ? "ack-ok,"   : "",
26624 +            (state & E4_FILTER_HIGH_PRI)    ? "high-pri," : "",
26625 +            (state & E4_FILTER_STATS)       ? "stats,"    : "");
26626 +           
26627 +    elan4_write_filter (dev, ctxnum, ctxt->ctxt_num | state);
26628 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush);
26629 +}
26630 +
26631 +void
26632 +elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl)
26633 +{
26634 +    ELAN4_DEV *dev   = ctxt->ctxt_dev;
26635 +    E4_uint32  value = tbl ? (E4_VPT_VALID | E4_VPT_VALUE(tbl->tbl_entries, tbl->tbl_size)) : 0;
26636 +    
26637 +    /* and insert into the vp table */
26638 +    elan4_sdram_writel (dev, (dev->dev_ctxtable + (ctxt->ctxt_num * sizeof (E4_ContextControlBlock)) +
26639 +                       offsetof (E4_ContextControlBlock, VirtualProcessTable)), value);
26640 +    pioflush_sdram(dev);
26641 +
26642 +    PULSE_SYSCONTROL (dev, CONT_ROUTE_FLUSH);
26643 +}
26644 +
26645 +/* command queue management */
26646 +ELAN4_CQA *
26647 +elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx)
26648 +{
26649 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26650 +    struct list_head *el;
26651 +
26652 +    spin_lock (&dev->dev_cqlock);
26653 +    list_for_each (el, &ctxt->ctxt_cqalist) {
26654 +       ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link);
26655 +       
26656 +       if (cqa->cqa_idx == idx)
26657 +       {
26658 +           cqa->cqa_ref++;
26659 +
26660 +           spin_unlock (&dev->dev_cqlock);
26661 +           return cqa;
26662 +       }
26663 +    }
26664 +    spin_unlock (&dev->dev_cqlock);
26665 +    return NULL;
26666 +}
26667 +
26668 +void
26669 +elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx)
26670 +{
26671 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
26672 +    struct list_head *el, *nel;
26673 +
26674 +    spin_lock (&dev->dev_cqlock);
26675 +    list_for_each_safe (el, nel, &ctxt->ctxt_cqalist) {
26676 +       ELAN4_CQA *cqa = list_entry (el, ELAN4_CQA, cqa_link);
26677 +       
26678 +       if (cqa->cqa_idx == idx)
26679 +       {
26680 +           if (--cqa->cqa_ref || bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1)
26681 +               spin_unlock (&dev->dev_cqlock);
26682 +           else
26683 +           {
26684 +               list_del (&cqa->cqa_link);
26685 +               
26686 +               BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx);
26687 +               BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA);
26688 +               spin_unlock (&dev->dev_cqlock);
26689 +               
26690 +               KMEM_FREE (cqa, sizeof (ELAN4_CQA));
26691 +           }
26692 +           return;
26693 +       }
26694 +    }
26695 +    spin_unlock (&dev->dev_cqlock);
26696 +
26697 +    printk ("elan4_putcqa: idx %d not found\n", idx);
26698 +    BUG();
26699 +}
26700 +
26701 +static ELAN4_CQ *
26702 +elan4_getcq (ELAN4_CTXT *ctxt, unsigned int type)
26703 +{
26704 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
26705 +    ELAN4_CQA        *cqa;
26706 +    struct list_head *el;
26707 +    int                      cidx, didx;
26708 +
26709 +    spin_lock (&dev->dev_cqlock);
26710 +    list_for_each (el, &ctxt->ctxt_cqalist) {
26711 +       cqa = list_entry (el, ELAN4_CQA, cqa_link);
26712 +
26713 +       if (cqa->cqa_type == type && (cidx = bt_freebit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA)) >=0)
26714 +       {
26715 +           BT_SET (cqa->cqa_bitmap, cidx);
26716 +           
26717 +           spin_unlock (&dev->dev_cqlock);
26718 +           return &cqa->cqa_cq[cidx];
26719 +       }
26720 +    }
26721 +    spin_unlock (&dev->dev_cqlock);
26722 +
26723 +    /* allocate a new cqa and it's chunk of command queue descriptors */
26724 +    KMEM_ZALLOC (cqa, ELAN4_CQA *, sizeof (ELAN4_CQA), 1);
26725 +    if (cqa == NULL)
26726 +       return NULL;
26727 +
26728 +    spin_lock (&dev->dev_cqlock);
26729 +    cidx = bt_freebit (ctxt->ctxt_cqamap, ELAN4_MAX_CQA);
26730 +
26731 +    /* On architectures which have MTRR registers for write-combinig
26732 +     * the top command queues from dev->dev_cqreorder upwards are
26733 +     * used for reordered queues.  Without MTRR registers any page
26734 +     * sized group can use write combinig through the ptes. */
26735 +    if (dev->dev_cqreorder == 0)
26736 +       didx = bt_freebit (dev->dev_cqamap, dev->dev_cqcount/ELAN4_CQ_PER_CQA);
26737 +    else
26738 +    {
26739 +       if ((type & CQ_Reorder) != 0)
26740 +           didx = bt_nextbit (dev->dev_cqamap, dev->dev_cqcount/ELAN4_CQ_PER_CQA, (dev->dev_cqreorder/ELAN4_CQ_PER_CQA) - 1, 0);
26741 +       else
26742 +           didx = bt_freebit (dev->dev_cqamap, dev->dev_cqreorder/ELAN4_CQ_PER_CQA);
26743 +    }
26744 +
26745 +    if (cidx < 0 || didx < 0)
26746 +    {
26747 +       spin_unlock (&dev->dev_cqlock);
26748 +       KMEM_FREE (cqa, sizeof (ELAN4_CQA));
26749 +       return NULL;
26750 +    }
26751 +
26752 +    BT_SET (ctxt->ctxt_cqamap, cidx);
26753 +    BT_SET (dev->dev_cqamap, didx);
26754 +
26755 +    cqa->cqa_idx   = cidx;
26756 +    cqa->cqa_type  = type;
26757 +    cqa->cqa_cqnum = (didx * ELAN4_CQ_PER_CQA);
26758 +    
26759 +    list_add_tail (&cqa->cqa_link, &ctxt->ctxt_cqalist);
26760 +    
26761 +    /* initialise the cqa struct */
26762 +    for (cidx = 0; cidx < ELAN4_CQ_PER_CQA; cidx++)
26763 +    {
26764 +       cqa->cqa_cq[cidx].cq_idx = cidx;
26765 +       cqa->cqa_cq[cidx].cq_cqa = cqa;
26766 +    }
26767 +
26768 +    /* no mappings yet */
26769 +    cqa->cqa_ref = 0;
26770 +
26771 +    /* we're going to return entry zero */
26772 +    BT_SET (cqa->cqa_bitmap, 0);
26773 +    spin_unlock (&dev->dev_cqlock);
26774 +    
26775 +    return &cqa->cqa_cq[0];
26776 +}
26777 +
26778 +static void
26779 +elan4_putcq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq)
26780 +{
26781 +    ELAN4_DEV        *dev = ctxt->ctxt_dev;
26782 +    ELAN4_CQA        *cqa = cq->cq_cqa;
26783 +
26784 +    spin_lock (&dev->dev_cqlock);
26785 +
26786 +    BT_CLEAR (cqa->cqa_bitmap, cq->cq_idx);
26787 +
26788 +    if (bt_lowbit (cqa->cqa_bitmap, ELAN4_CQ_PER_CQA) != -1 || cqa->cqa_ref)
26789 +       spin_unlock (&dev->dev_cqlock);
26790 +    else
26791 +    {
26792 +       list_del (&cqa->cqa_link);
26793 +       
26794 +       BT_CLEAR (ctxt->ctxt_cqamap, cqa->cqa_idx);
26795 +       BT_CLEAR (dev->dev_cqamap, cqa->cqa_cqnum/ELAN4_CQ_PER_CQA);
26796 +       spin_unlock (&dev->dev_cqlock);
26797 +       
26798 +       KMEM_FREE (cqa, sizeof (ELAN4_CQA));
26799 +    }
26800 +}
26801 +
26802 +ELAN4_CQ *
26803 +elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned perm, unsigned cqtype)
26804 +{
26805 +    ELAN4_DEV   *dev = ctxt->ctxt_dev;
26806 +    ELAN4_CQ    *cq;
26807 +    int         cqnum;
26808 +    sdramaddr_t cqdesc;
26809 +    unsigned    offset;
26810 +    E4_uint64   value;
26811 +
26812 +    if ((cq = elan4_getcq (ctxt, cqtype)) == NULL)
26813 +       return NULL;
26814 +
26815 +    cqnum = elan4_cq2num(cq);
26816 +    
26817 +    cq->cq_space = elan4_sdram_alloc (dev, CQ_Size(cqsize));
26818 +    if (cq->cq_space == (virtaddr_t) 0)
26819 +    {
26820 +       elan4_putcq (ctxt, cq);
26821 +       return (NULL);
26822 +    }
26823 +
26824 +    cq->cq_size   = cqsize;
26825 +    cq->cq_perm   = perm;
26826 +    
26827 +    /* and finally initialise the command queue descriptor */
26828 +    cqdesc = dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc));
26829 +
26830 +    value  = CQ_QueuePtrsValue (cqsize, cq->cq_space, cq->cq_space);
26831 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26832 +       value |= ((cqtype & CQ_Priority) ? CQ_RevA_Priority : 0);
26833 +    else
26834 +       value |= (((cqtype & CQ_Priority) ? CQ_RevB_Priority : 0) | 
26835 +                 ((cqtype & CQ_Reorder)  ? CQ_RevB_ReorderingQueue : CQ_RevB_32bitWriteQueue));
26836 +
26837 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs), value);
26838 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue), 0);
26839 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), 0);
26840 +    elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), CQ_ControlValue (ctxt->ctxt_num, 2, perm));
26841 +    pioflush_sdram (dev);
26842 +
26843 +    offset = (cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize;
26844 +
26845 +    cq->cq_mapping = elan4_map_device (dev, ELAN4_BAR_REGISTERS, (offset & ~(PAGE_SIZE-1)), 
26846 +                                      PAGE_SIZE, &cq->cq_handle) + (offset & (PAGE_SIZE-1));
26847 +#ifdef CONFIG_MPSAS
26848 +    if (ctxt == &dev->dev_ctxt)
26849 +       return (cq);
26850 +#endif
26851 +
26852 +    elan4_sdram_flushcache (dev, cq->cq_space, CQ_Size(cqsize));
26853 +
26854 +    return (cq);
26855 +}
26856 +    
26857 +void
26858 +elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq)
26859 +{
26860 +    ELAN4_DEV *dev    = ctxt->ctxt_dev;
26861 +    unsigned   offset = (elan4_cq2num(cq) + dev->dev_cqoffset) * CQ_CommandMappingSize;
26862 +
26863 +    elan4_flushcq (dev, cq);
26864 +
26865 +    elan4_unmap_device (dev, cq->cq_mapping - (offset & (PAGE_SIZE-1)), PAGE_SIZE, &cq->cq_handle);
26866 +    elan4_sdram_free (dev, cq->cq_space, CQ_Size (cq->cq_size));
26867 +
26868 +    elan4_putcq (ctxt, cq);
26869 +}
26870 +
26871 +void
26872 +elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq)
26873 +{
26874 +    sdramaddr_t   cqdesc = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
26875 +    int           hipri;
26876 +    unsigned long flags;
26877 +    
26878 +    PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restarting cq %p\n", cq);
26879 +    
26880 +    spin_lock_irqsave (&dev->dev_requeue_lock, flags);
26881 +
26882 +    while (read_reg32 (dev, CommandControl.CommandRequeuePtr) & E4_CommandRequeueBusy)
26883 +       ;
26884 +    
26885 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26886 +       hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevA_Priority) != 0;
26887 +    else
26888 +       hipri = (elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs)) & CQ_RevB_Priority) != 0;
26889 +    
26890 +    if (hipri)
26891 +    {
26892 +       PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as high pri\n", elan4_cq2num(cq));
26893 +       write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc | E4_CommandRequeueHighPri);
26894 +    }
26895 +    else
26896 +    {
26897 +       PRINTF1 (DBG_DEVICE, DBG_CPROC, "restartcq: restart cq %d as low pri\n", elan4_cq2num(cq));
26898 +       write_reg32 (dev, CommandControl.CommandRequeuePtr, cqdesc);
26899 +    }
26900 +    pioflush_reg (dev);
26901 +    
26902 +    spin_unlock_irqrestore (&dev->dev_requeue_lock, flags);
26903 +}
26904 +
26905 +static void
26906 +flushcq_intop (ELAN4_DEV *dev, void *arg)
26907 +{
26908 +    unsigned long flags;
26909 +
26910 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26911 +    dev->dev_flush_finished |= (1 << (unsigned long) arg);
26912 +    kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock);
26913 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26914 +}
26915 +void
26916 +elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq)
26917 +{
26918 +    int                  flushqnum = elan4_cq2num(cq) & (COMMAND_INSERTER_CACHE_ENTRIES-1);
26919 +    ELAN4_CQ     *flushq    = dev->dev_flush_cq[flushqnum];
26920 +    unsigned long flags;
26921 +
26922 +    PRINTF (DBG_DEVICE, DBG_FLUSH, "elan4_flushcq: cqnum=%d\n", elan4_cq2num(cq));
26923 +
26924 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26925 +
26926 +    while (! (dev->dev_flush_finished & (1 << flushqnum)))
26927 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
26928 +    
26929 +    dev->dev_flush_finished &= ~(1 << flushqnum);
26930 +
26931 +    dev->dev_flush_op[flushqnum].op_function = flushcq_intop;
26932 +    dev->dev_flush_op[flushqnum].op_arg      = (void *) (unsigned long) flushqnum;
26933 +    
26934 +    elan4_queue_intop (dev, flushq, &dev->dev_flush_op[flushqnum]);
26935 +
26936 +    while (! (dev->dev_flush_finished & (1 << flushqnum)))
26937 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
26938 +    
26939 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26940 +}
26941 +
26942 +void
26943 +elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart)
26944 +{
26945 +    sdramaddr_t cqdesc  = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
26946 +    E4_uint32   control = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
26947 +
26948 +    /* Write the command queues control word, but ensure that the ChannelNotCompleted fields
26949 +     * are not modified.   We use this to just alter the RestartCount/Permissions fields */
26950 +
26951 +    elan4_sdram_writel (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control), 
26952 +                       CQ_ControlValue (CQ_Context (control), restart ? restart : CQ_RestartCount (control), perm));
26953 +}
26954 +
26955 +/* instruction cache flush */
26956 +static __inline__ void
26957 +elan4_flush_icache_locked (ELAN4_DEV *dev)
26958 +{
26959 +    int i, j;
26960 +
26961 +    PRINTF0 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache_locked: flushing icache\n");
26962 +
26963 +    for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++)
26964 +    {
26965 +        write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift);
26966 +        for (j = 0; j < E4_ICachePortSize; j++)
26967 +           write_reg64 (dev, ICachePort[j], E4_InvalidTagValue);
26968 +    }
26969 +
26970 +    /*
26971 +     * Initialise the top of the ICache Set0 with a instruction which will
26972 +     * cause a know trap fingerprint so that the application can identify it
26973 +     * and ignore the trap.
26974 +     */
26975 +    write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams);
26976 +
26977 +    /* Errata 24: must ensure that the DCache is flushed after loading 
26978 +     *            code for the thread processor. */
26979 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
26980 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
26981 +
26982 +    pioflush_reg (dev);
26983 +}
26984 +
26985 +static void
26986 +device_iflush_haltop (ELAN4_DEV *dev, void *arg)
26987 +{
26988 +    unsigned long flags;
26989 +
26990 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
26991 +    
26992 +    elan4_flush_icache_locked (dev);
26993 +
26994 +    dev->dev_iflush_queued = 0;
26995 +
26996 +    kcondvar_wakeupall (&dev->dev_flush_wait, &dev->dev_flush_lock);
26997 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
26998 +}
26999 +
27000 +void
27001 +elan4_flush_icache_halted (ELAN4_CTXT *ctxt)
27002 +{
27003 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
27004 +    unsigned long flags;
27005 +
27006 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
27007 +    
27008 +    elan4_flush_icache_locked (dev);
27009 +
27010 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
27011 +}
27012 +
27013 +void
27014 +elan4_flush_icache (ELAN4_CTXT *ctxt)
27015 +{
27016 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
27017 +    unsigned long flags;
27018 +    
27019 +    spin_lock_irqsave (&dev->dev_flush_lock, flags);
27020 +
27021 +    PRINTF1 (DBG_DEVICE, DBG_FLUSH, "elan4_flush_icache: queued=%d\n", dev->dev_iflush_queued);
27022 +
27023 +    if (! dev->dev_iflush_queued)
27024 +    {
27025 +       dev->dev_iflush_queued = 1;
27026 +       
27027 +       elan4_queue_haltop (dev, &dev->dev_iflush_haltop);
27028 +    }
27029 +
27030 +    while (dev->dev_iflush_queued)
27031 +       kcondvar_wait (&dev->dev_flush_wait, &dev->dev_flush_lock, &flags);
27032 +
27033 +    spin_unlock_irqrestore (&dev->dev_flush_lock, flags);
27034 +}
27035 +
27036 +/* device context operations */
27037 +static void
27038 +device_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
27039 +{
27040 +    ELAN4_DEV        *dev  = ctxt->ctxt_dev;
27041 +    ELAN4_CPROC_TRAP *trap = &dev->dev_cproc_trap;
27042 +
27043 +    elan4_extract_cproc_trap (dev, status, trap, cqnum);
27044 +
27045 +    DBGCMD (DBG_DEVICE, DBG_FLUSH, elan4_display_cproc_trap (DBG_DEVICE, DBG_FLUSH, "device_cproc_trap", trap));
27046 +
27047 +    switch (CPROC_TrapType (trap->tr_status))
27048 +    {
27049 +    case CommandProcInterruptQueueOverflow:
27050 +       PRINTF (ctxt, DBG_FLUSH, "device_cproc_trap: cqnum=%d\n", cqnum);
27051 +
27052 +       /* XXXX: we could either just hit restart (and hope) - or we could extract
27053 +        *       the event interrupt cookie out and "complete" the command before
27054 +        *       restarting it */
27055 +       elan4_restartcq (dev, dev->dev_flush_cq[cqnum]);
27056 +       return;
27057 +
27058 +    case CommandProcDmaQueueOverflow:
27059 +    case CommandProcPermissionTrap:
27060 +       handle_dma_flushops (dev, status, cqnum);
27061 +       return;
27062 +       
27063 +    default:
27064 +       printk ("device_cproc_trap: status=%llx control=%llx TrapType=%x cqnum=%d\n", (long long) trap->tr_status,
27065 +               elan4_sdram_readq (dev, dev->dev_cqaddr + cqnum * sizeof (E4_CommandQueueDesc) +
27066 +                                  offsetof (E4_CommandQueueDesc, CQ_Control)),
27067 +               (int) CPROC_TrapType(trap->tr_status), cqnum);
27068 +       ELAN4_DEBUG_TRIGGER (ctxt, "elan4:device_cproc_trap\n");
27069 +    }
27070 +}
27071 +
27072 +static void
27073 +device_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
27074 +{
27075 +    ELAN4_TPROC_TRAP trap;
27076 +
27077 +    elan4_extract_tproc_trap (ctxt->ctxt_dev, status, &trap);
27078 +
27079 +    elan4_display_tproc_trap (DBG_CONSOLE, DBG_TRAP, "device_tproc_trap", &trap);
27080 +    ELAN4_DEBUG_TRIGGER (ctxt, "elan4:device_tproc_trap\n");
27081 +}
27082 +
27083 +static void
27084 +device_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
27085 +{
27086 +    ELAN4_DPROC_TRAP trap;
27087 +
27088 +    elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit);
27089 +
27090 +    elan4_display_dproc_trap (DBG_CONSOLE, DBG_TRAP, "device_dproc_trap", &trap);
27091 +
27092 +    ELAN4_DEBUG_TRIGGER (ctxt, "elan4:device_dproc_trap\n");
27093 +}
27094 +
27095 +static void
27096 +device_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
27097 +{
27098 +    ELAN4_DEV *dev = (ELAN4_DEV *) ctxt;
27099 +    struct list_head *el,*nel;
27100 +    unsigned long flags;
27101 +
27102 +    PRINTF (ctxt, DBG_FLUSH, "device_interrupt: cookie=%llx\n", cookie);
27103 +
27104 +    spin_lock_irqsave (&dev->dev_intop_lock, flags);
27105 +    list_for_each_safe (el, nel, &dev->dev_intop_list) {
27106 +       ELAN4_INTOP *op = list_entry (el, ELAN4_INTOP, op_link);
27107 +
27108 +       if (op->op_cookie == cookie)
27109 +       {
27110 +           if ((op->op_cookie & INTOP_TYPE_MASK) == INTOP_ONESHOT)
27111 +               list_del (&op->op_link);
27112 +
27113 +           spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
27114 +           
27115 +           (*op->op_function)(dev, op->op_arg);
27116 +           return;
27117 +       }
27118 +    }
27119 +    spin_unlock_irqrestore (&dev->dev_intop_lock, flags);
27120 +
27121 +    panic ("device_interrupt: interrupt cookie %llx not found\n", (long long)cookie);
27122 +}
27123 +
27124 +static void
27125 +device_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
27126 +{
27127 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
27128 +    ELAN4_IPROC_TRAP *trap = &dev->dev_iproc_trap;
27129 +
27130 +    elan4_extract_iproc_trap (dev, status, trap, unit);
27131 +    elan4_inspect_iproc_trap (trap);
27132 +
27133 +    DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "device_iproc_trap", trap));
27134 +
27135 +    if (elan4_neterr_iproc_trap (dev, trap))
27136 +       return;
27137 +
27138 +    elan4_display_iproc_trap (DBG_CONSOLE, DBG_TRAP, "device_iproc_trap", trap);
27139 +    panic ("device_iproc_trap: unexpected trap\n");
27140 +}
27141 +
27142 +static void
27143 +device_needs_shuffle (ELAN4_CTXT *ctxt, int tbl, int hashidx)
27144 +{   
27145 +    /* XXXX currently this doesnt need to do anything 
27146 +       as the chains have only 2 entries */
27147 +}
27148 +
27149 +ELAN4_TRAP_OPS device_trap_ops = 
27150 +{
27151 +    NULL,
27152 +    device_cproc_trap,
27153 +    device_dproc_trap,
27154 +    device_tproc_trap,
27155 +    device_iproc_trap,
27156 +    device_interrupt,
27157 +    NULL,
27158 +    device_needs_shuffle,
27159 +};
27160 +
27161 +/*
27162 + * elan4_initialise_device
27163 + *    initialise the ELAN4_DEV struct - spinlocks,cvs etc.
27164 + *    map the registers, sdram etc
27165 + */
27166 +int
27167 +elan4_initialise_device (ELAN4_DEV *dev)
27168 +{
27169 +    int i, bit;
27170 +
27171 +    if (elan4_mainint_resched_ticks == 0)
27172 +       elan4_mainint_resched_ticks = (hz/4);
27173 +
27174 +    /* map the registers */
27175 +    switch (dev->dev_devinfo.dev_revision_id)
27176 +    {
27177 +    case PCI_REVISION_ID_ELAN4_REVA:
27178 +       dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle);
27179 +       
27180 +       dev->dev_rom  = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVA_EBUS_OFFSET + ELAN4_REVA_EBUS_ROM_OFFSET, 
27181 +                                         ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle);
27182 +       break;
27183 +       
27184 +    case PCI_REVISION_ID_ELAN4_REVB:
27185 +       dev->dev_regs = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_REG_OFFSET, ELAN4_REG_SIZE, &dev->dev_regs_handle);
27186 +       dev->dev_rom  = (ioaddr_t) 0;
27187 +       dev->dev_i2c  = elan4_map_device (dev, ELAN4_BAR_REGISTERS, ELAN4_REVB_I2C_OFFSET, ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle);
27188 +       break;
27189 +
27190 +    default:
27191 +       return -EINVAL;
27192 +    }
27193 +
27194 +    /* XXXX: parse the ebus rom to determine the sdram configuration */
27195 +    {
27196 +       extern long long       sdram_cfg;
27197 +
27198 +       if (sdram_cfg == 0)
27199 +           dev->dev_sdram_cfg = SDRAM_STARTUP_VALUE;
27200 +       else
27201 +           dev->dev_sdram_cfg = sdram_cfg;
27202 +    }
27203 +
27204 +    for (bit = 0; ((1 << bit) & elan4_resource_len (dev, ELAN4_BAR_SDRAM)) == 0; bit++)
27205 +       ;
27206 +
27207 +    switch ((dev->dev_sdram_cfg >> SDRAM_RamSize_SH) & 3)
27208 +    {
27209 +    case 0:                    /* 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output) */
27210 +       dev->dev_sdram_numbanks = 4; bit -= 2;
27211 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
27212 +       {
27213 +           dev->dev_sdram_banks[i].b_base = (i << bit);
27214 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
27215 +       }
27216 +       break;
27217 +
27218 +    case 1:                    /*  64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output) */
27219 +       dev->dev_sdram_numbanks = 4; bit -= 2;
27220 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
27221 +       {
27222 +           dev->dev_sdram_banks[i].b_base = ((i & 2) << (bit)) | ((i & 1) << (bit-1));
27223 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
27224 +       }
27225 +       break;
27226 +       
27227 +    case 2:                    /* 2Gbit (16-bit output) or 1Gbit (8-bit output) */
27228 +       dev->dev_sdram_numbanks = 2; bit--;
27229 +       for (i = 0; i < dev->dev_sdram_numbanks; i++)
27230 +       {
27231 +           dev->dev_sdram_banks[i].b_base = (i << bit);
27232 +           dev->dev_sdram_banks[i].b_size = (1 << bit);
27233 +       }
27234 +       break;
27235 +
27236 +    case 3:                    /* 4Gbit (16-bit output) or 2Gbit (8-bit output) */
27237 +       dev->dev_sdram_numbanks = 1;
27238 +       dev->dev_sdram_banks[0].b_base = 0;
27239 +       dev->dev_sdram_banks[0].b_size = (1 << bit);
27240 +       break;
27241 +    }
27242 +
27243 +    elan4_sdram_init (dev);
27244 +
27245 +    /* initialise locks for classes of interrupts */
27246 +    spin_lock_init (&dev->dev_trap_lock);
27247 +    spin_lock_init (&dev->dev_intop_lock);
27248 +    spin_lock_init (&dev->dev_haltop_lock);
27249 +    spin_lock_init (&dev->dev_mainint_lock);
27250 +
27251 +    init_timer (&dev->dev_haltop_timer);
27252 +    dev->dev_haltop_timer.function = dev_haltop_timer_func;
27253 +    dev->dev_haltop_timer.data     = (unsigned long) dev;
27254 +
27255 +    /* initialise other locks */
27256 +    spin_lock_init (&dev->dev_i2c_lock);
27257 +
27258 +    spin_lock_init (&dev->dev_mmulock);
27259 +    spin_lock_init (&dev->dev_cqlock);
27260 +    spin_lock_init (&dev->dev_ctxlock);
27261 +
27262 +    spin_lock_init (&dev->dev_intmask_lock);
27263 +    spin_lock_init (&dev->dev_syscontrol_lock);
27264 +
27265 +    spin_lock_init (&dev->dev_ctxt_lock);
27266 +    spin_lock_init (&dev->dev_flush_lock);
27267 +    spin_lock_init (&dev->dev_requeue_lock);
27268 +
27269 +    kmutex_init (&dev->dev_lock);
27270 +
27271 +    kcondvar_init (&dev->dev_mainint_wait);
27272 +    kcondvar_init (&dev->dev_flush_wait);
27273 +
27274 +    /* initialsie lists */
27275 +    INIT_LIST_HEAD (&dev->dev_ctxt_list);
27276 +    INIT_LIST_HEAD (&dev->dev_intop_list);
27277 +    INIT_LIST_HEAD (&dev->dev_interruptq_list);
27278 +    INIT_LIST_HEAD (&dev->dev_hc_list);
27279 +    INIT_LIST_HEAD (&dev->dev_haltop_list);
27280 +    INIT_LIST_HEAD (&dev->dev_dma_flushop[0].list);
27281 +    INIT_LIST_HEAD (&dev->dev_dma_flushop[1].list);
27282 +
27283 +    dev->dev_state = ELAN4_STATE_STOPPED;
27284 +
27285 +    return (0);
27286 +}
27287 +
27288 +void
27289 +elan4_finalise_device (ELAN4_DEV *dev)
27290 +{
27291 +    kcondvar_destroy (&dev->dev_flush_wait);
27292 +    kcondvar_destroy (&dev->dev_mainint_wait);
27293 +
27294 +    kmutex_destroy (&dev->dev_lock);
27295 +
27296 +    spin_lock_destroy (&dev->dev_requeue_lock);
27297 +    spin_lock_destroy (&dev->dev_flush_lock);
27298 +    spin_lock_destroy (&dev->dev_ctxt_lock);
27299 +
27300 +    spin_lock_destroy (&dev->dev_syscontrol_lock);
27301 +    spin_lock_destroy (&dev->dev_intmask_lock);
27302 +
27303 +    spin_lock_destroy (&dev->dev_ctxlock);
27304 +    spin_lock_destroy (&dev->dev_cqlock);
27305 +    spin_lock_destroy (&dev->dev_mmulock);
27306 +
27307 +    spin_lock_destroy (&dev->dev_i2c_lock);
27308 +
27309 +    spin_lock_destroy (&dev->dev_mainint_lock);
27310 +    spin_lock_destroy (&dev->dev_haltop_lock);
27311 +    spin_lock_destroy (&dev->dev_intop_lock);
27312 +    spin_lock_destroy (&dev->dev_trap_lock);
27313 +
27314 +    del_timer_sync (&dev->dev_haltop_timer);
27315 +
27316 +    while (! list_empty (&dev->dev_hc_list))
27317 +    {
27318 +       ELAN4_HASH_CHUNK *hc = list_entry (dev->dev_hc_list.next, ELAN4_HASH_CHUNK, hc_link);
27319 +       
27320 +       list_del (&hc->hc_link);
27321 +
27322 +       KMEM_FREE(hc, sizeof (ELAN4_HASH_CHUNK));
27323 +    }
27324 +    
27325 +    elan4_sdram_fini (dev);
27326 +    
27327 +    switch (dev->dev_devinfo.dev_revision_id)
27328 +    {
27329 +    case PCI_REVISION_ID_ELAN4_REVA:
27330 +       elan4_unmap_device (dev, dev->dev_rom,  ELAN4_REVA_EBUS_ROM_SIZE, &dev->dev_rom_handle);
27331 +       elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle);
27332 +       break;
27333 +    case PCI_REVISION_ID_ELAN4_REVB:
27334 +       elan4_unmap_device (dev, dev->dev_i2c,  ELAN4_REVB_I2C_SIZE, &dev->dev_i2c_handle);
27335 +       elan4_unmap_device (dev, dev->dev_regs, ELAN4_REG_SIZE, &dev->dev_regs_handle);
27336 +       break;
27337 +    }
27338 +}
27339 +
27340 +static int
27341 +measure_sysclk (ELAN4_DEV *dev)        
27342 +{
27343 +    E4_uint64 val0, val1;
27344 +    E4_uint32 ticks, ns;
27345 +    
27346 +    write_ureg64 (dev, StatCont, STP_SYS_CLOCK_RATE0);
27347 +    
27348 +    val0 = read_ureg64 (dev, StatCounts[0]);
27349 +    udelay (1000);
27350 +    val1 = read_ureg64 (dev, StatCounts[0]);
27351 +    
27352 +    
27353 +    ticks = ((val1 >> 32) - (val0 >> 32));
27354 +    ns    = ((val1 & 0xffffffff) - (val0 & 0xffffffff));
27355 +    
27356 +    return (ticks / (ns / 1000));
27357 +}
27358 +
27359 +static void
27360 +initialise_cache (ELAN4_DEV *dev)
27361 +{
27362 +    register int set, line;
27363 +
27364 +    /* Initialise the cache to "map" the bottom of sdram - we will use
27365 +     * this space for cache flushing, so require the cache to be set
27366 +     * up so that cachelines for this are in the correct set.
27367 +     *
27368 +     * XXXX: for MPSAS we set bit 28, to ensure that any access to 
27369 +     *       sdram causes the line to be filled first to expunge any
27370 +     *       Xs. */
27371 +    for (set = 0; set < E4_NumCacheSets; set++)
27372 +       for (line = 0; line < E4_NumCacheLines; line++)
27373 +           write_tag (dev, Tags[set][line], (((E4_uint64) set) << 29) | (1 << 28) | (line << 16));
27374 +}
27375 +
27376 +#ifndef CONFIG_MPSAS
27377 +static void
27378 +initialise_cache_tags (ELAN4_DEV *dev, unsigned addr)
27379 +{
27380 +    register int set, line;
27381 +
27382 +    /* Initialise the whole cache to hold sdram at "addr" as direct mapped */
27383 +
27384 +    for (set = 0; set < E4_NumCacheSets; set++)
27385 +       for (line = 0; line < E4_NumCacheLines; line++)
27386 +           write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11));
27387 +}
27388 +
27389 +static void
27390 +initialise_ecc (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
27391 +{
27392 +    register int i, addr;
27393 +
27394 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
27395 +    {
27396 +        initialise_cache_tags (dev, E4_CacheSize);
27397 +        for (addr = 0; addr < bank->b_size; addr += E4_CacheSize)
27398 +        {
27399 +           for (i = 0; i < E4_CacheSize; i += sizeof (E4_uint64))
27400 +               writeq (0xbeec000000000000ull | addr | i, (void *)(bank->b_ioaddr + addr + i));
27401 +           initialise_cache_tags (dev, addr);
27402 +        }
27403 +    }
27404 +    else
27405 +    {
27406 +       /* Write the whole of this bank of sdram. */
27407 +        for (addr = 0; addr < bank->b_size; addr += sizeof (E4_uint64))
27408 +           writeq (0xbeec000000000000ull | addr, (void *)(bank->b_ioaddr + addr));
27409 +
27410 +       /* Now flush out the top out of the cache */
27411 +        for (addr = 0; addr < E4_CacheSize; addr += sizeof (E4_uint64))
27412 +           writeq (0xbeec000000000000ull | addr, (void *)(bank->b_ioaddr + addr));
27413 +
27414 +       /* Now read the top value of sdram to guarantee the write has occured before the ecc is enabled */
27415 +       __elan4_readq (dev, bank->b_ioaddr + bank->b_size - sizeof (E4_uint64));
27416 +    }
27417 +}
27418 +#endif
27419 +
27420 +#ifdef CONFIG_MPSAS
27421 +static void
27422 +do_initdma (ELAN4_DEV *dev)
27423 +{
27424 +#define VIRTUAL_ADDRESS        0x10000000ull
27425 +    ELAN4_CQ  *cq  = dev->dev_flush_cq[0];
27426 +    E4_uint64 value;
27427 +    E4_uint32 intreg;
27428 +    E4_uint64 status;
27429 +
27430 +    PRINTF (DBG_DEVICE, DBG_CONFIG, "elan: performing initialising dma\n");
27431 +    
27432 +    DISABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc);
27433 +    
27434 +    /* initialise the context filter */
27435 +    elan4_attach_filter (&dev->dev_ctxt, 0);
27436 +
27437 +    /* now issue a DMA - we expect this to trap */
27438 +    writeq (E4_DMA_TYPE_SIZE (128*4, DMA_DataTypeByte, 0, 0) | RUN_DMA_CMD, cq->cq_mapping + (0 << 3));
27439 +    writeq (0,                                                              cq->cq_mapping + (1 << 3));
27440 +    writeq (0,                                                              cq->cq_mapping + (2 << 3));
27441 +    writeq (dev->dev_tproc_space,                                           cq->cq_mapping + (3 << 3));
27442 +    writeq (dev->dev_tproc_space,                                           cq->cq_mapping + (4 << 3));
27443 +    writeq (0,                                                              cq->cq_mapping + (5 << 3));
27444 +    writeq (0,                                                              cq->cq_mapping + (6 << 3));
27445 +    
27446 +    /* spin waiting for it to trap - then restart the dma processor */
27447 +    do {
27448 +       value   = read_reg64 (dev, IntAndMaskReg);
27449 +       intreg  = (value >> E4_INTERRUPT_REG_SHIFT);
27450 +    } while ((intreg & (INT_Dma0Proc | INT_Dma1Proc)) == 0);
27451 +    
27452 +    /* check it trapped for the right reason */
27453 +    status = (intreg & INT_Dma0Proc) ? read_reg64 (dev, DProc0Status) : read_reg64 (dev, DProc1Status);
27454 +    
27455 +    if (DPROC_PrefetcherFault (status) || (DPROC_TrapType(status) != DmaProcFailCountError && DPROC_TrapType(status) != DmaProcPacketAckError))
27456 +    {
27457 +       printk ("elan: bad dma trap, status = %lx\n", (long)status);
27458 +       panic ("elan: bad dma trap\n");
27459 +    }
27460 +    
27461 +    PULSE_SCHED_RESTART (dev, SCH_RestartDma0Proc | SCH_RestartDma1Proc | SCH_RestartDmaPrefetchProc);
27462 +
27463 +    elan4_detach _filter (&dev->dev_ctxt, 0);
27464 +
27465 +    ENABLE_INT_MASK (dev, INT_Dma0Proc | INT_Dma1Proc);
27466 +#undef VIRTUAL_ADDRESS
27467 +}
27468 +#endif
27469 +
27470 +static int
27471 +ebus_read_vpd (ELAN4_DEV *dev, unsigned char *data, unsigned int nob)
27472 +{
27473 +    unsigned int pci_data_ptr;
27474 +    unsigned int vpd_ptr;
27475 +    register int i;
27476 +
27477 +    if (read_ebus_rom (dev, 0) != 0x55 || read_ebus_rom (dev, 1) != 0xaa)
27478 +    {
27479 +       printk ("elan%d: invalid rom signature in ebus rom\n", dev->dev_instance);
27480 +       return -EINVAL;
27481 +    }
27482 +
27483 +    pci_data_ptr = (read_ebus_rom (dev, 0x19) << 8) | read_ebus_rom (dev, 0x18);
27484 +
27485 +    /* check the pci data structure */
27486 +    if (read_ebus_rom (dev, pci_data_ptr + 0) != 'P' ||
27487 +       read_ebus_rom (dev, pci_data_ptr + 1) != 'C' ||
27488 +       read_ebus_rom (dev, pci_data_ptr + 2) != 'I' ||
27489 +       read_ebus_rom (dev, pci_data_ptr + 3) != 'R')
27490 +    {
27491 +       printk ("elan%d: invalid pci data structure in ebus rom\n", dev->dev_instance);
27492 +       return -EINVAL;
27493 +    }
27494 +    
27495 +    /* extract the VPD pointer */
27496 +    vpd_ptr = (read_ebus_rom (dev, pci_data_ptr + 9) << 8) | read_ebus_rom (dev, pci_data_ptr + 8);
27497 +
27498 +    if (vpd_ptr == 0)
27499 +    {
27500 +       printk ("elan%d: no vital product data in ebus rom\n", dev->dev_instance);
27501 +       return -EINVAL;
27502 +    }
27503 +    
27504 +    /* read the vpd data */
27505 +    for (i = 0; i < nob; i++)
27506 +       data[i] = read_ebus_rom (dev, vpd_ptr + i);
27507 +
27508 +    return 0;
27509 +}
27510 +
27511 +int
27512 +elan4_read_vpd (ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) 
27513 +{
27514 +    unsigned char vpd[I2C_ELAN_EEPROM_VPD_SIZE];
27515 +    unsigned char *ptr = vpd;
27516 +    unsigned int   finished = 0;
27517 +    unsigned char *lim;
27518 +    unsigned char  name[3];
27519 +    unsigned char  value[256];
27520 +    unsigned char  type;
27521 +    unsigned int   len, len2;
27522 +    register int   i;
27523 +
27524 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
27525 +    {
27526 +       if (ebus_read_vpd (dev, vpd, I2C_ELAN_EEPROM_VPD_SIZE) < 0)
27527 +       {
27528 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from EBUS rom\n", dev->dev_instance);
27529 +           return -EINVAL ;
27530 +       }       
27531 +    }
27532 +    else
27533 +    {
27534 +       if (i2c_read_rom (dev, I2C_ELAN_EEPROM_VPD_BASEADDR, I2C_ELAN_EEPROM_VPD_SIZE, vpd) < 0)
27535 +       {
27536 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unable to read serial number from I2C rom\n", dev->dev_instance);
27537 +           return  -EINVAL;
27538 +       }
27539 +    }
27540 +
27541 +    result[0] = 0;
27542 +    while (! finished)
27543 +    {
27544 +       type = *ptr++;
27545 +       
27546 +       if (type & LARGE_RESOURCE_BIT)
27547 +       {
27548 +           len = *(ptr++);
27549 +           len += *(ptr++) << 8;
27550 +           
27551 +           switch (type & ~LARGE_RESOURCE_BIT)
27552 +           {
27553 +           case LARGE_RESOURCE_STRING:
27554 +           case LARGE_RESOURCE_VENDOR_DEFINED:
27555 +               ptr += len;
27556 +               break;
27557 +               
27558 +           case LARGE_RESOURCE_VITAL_PRODUCT_DATA:
27559 +               for (lim = ptr + len; ptr < lim; )
27560 +               {
27561 +                   name[0] = *ptr++;
27562 +                   name[1] = *ptr++;
27563 +                   name[2] = '\0';
27564 +                   len2    = *ptr++;
27565 +                   
27566 +                   for (i = 0; i < len2 && ptr < lim; i++)
27567 +                       value[i] = *ptr++;
27568 +                   value[i] = '\0';
27569 +                                   
27570 +                   PRINTF3 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, %s: $s\n", dev->dev_instance, name, value);
27571 +
27572 +                   if (tag != NULL) 
27573 +                   { /* looking for just one tag */
27574 +                       if (!strcmp (name, tag))
27575 +                           strcpy(result, value);
27576 +                   } 
27577 +                   else 
27578 +                   { /* get all tags */
27579 +                       strcat(result,name);
27580 +                       strcat(result,": ");
27581 +                       strcat(result,value);
27582 +                       strcat(result,"\n");
27583 +                   }
27584 +               }
27585 +               break;
27586 +               
27587 +           default:
27588 +               PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown large resource %x\n", dev->dev_instance, type);
27589 +               finished = 1;
27590 +               break;
27591 +           }
27592 +       }
27593 +       else
27594 +       {
27595 +           len = type & 0x7;
27596 +           
27597 +           switch (type >> 3)
27598 +           {
27599 +           case SMALL_RESOURCE_COMPATIBLE_DEVICE_ID:
27600 +               ptr += len;
27601 +               break;
27602 +               
27603 +           case SMALL_RESOURCE_VENDOR_DEFINED:
27604 +               ptr += len;
27605 +               break;
27606 +               
27607 +           case SMALL_RESOURCE_END_TAG:
27608 +               finished = 1;
27609 +               break;
27610 +               
27611 +           default:
27612 +               PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, unknown small resource %x\n", dev->dev_instance, type >> 3);
27613 +               finished = 1;
27614 +               break;
27615 +           }
27616 +       }
27617 +    }
27618 +
27619 +    if ( result[0] == 0 ) {
27620 +       if ( tag != 0 ) 
27621 +           PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find tag %s\n", dev->dev_instance, tag);
27622 +       else
27623 +           PRINTF1 (DBG_DEVICE, DBG_CONFIG, "elan%d: elan4_read_vpd, failed to find any tags\n", dev->dev_instance);
27624 +       return -EINVAL;
27625 +    }
27626 +
27627 +    return (0);
27628 +}
27629 +
27630 +int
27631 +elan4_start_device (ELAN4_DEV *dev)
27632 +{
27633 +    E4_VirtualProcessEntry entry;
27634 +    unsigned              pagesizeval[2];
27635 +    unsigned              hashsizeval[2];
27636 +    register int           i, j, tbl, res;
27637 +    unsigned               attempts = 0;
27638 +    E4_PAGE_SIZE_TABLE;
27639 +    unsigned char          serial[256];
27640 +    unsigned int           sdram_factor = SDRAM_166_DLL_CORRECTION_FACTOR;
27641 +
27642 +    PRINTF (DBG_DEVICE, DBG_ALL, "elan4_start_device: entered\n");
27643 +
27644 +    dev->dev_state = ELAN4_STATE_STARTING;
27645 +
27646 + tryagain:
27647 +    /* Initialise the pci config space */
27648 +    if ((res = elan4_pciinit (dev)) < 0)
27649 +       return (res);
27650 +
27651 +    /* Display the serial number */
27652 +    if (elan4_read_vpd (dev, "SN", serial))
27653 +       printk("elan%d: SN: failed to read\n", dev->dev_instance);
27654 +    else
27655 +       printk("elan%d: SN: %s\n", dev->dev_instance, serial);
27656 +
27657 +    /* initialise the interrupt mask to zero */
27658 +    SET_INT_MASK (dev, 0);
27659 +
27660 +    /* Initialise the device registers */
27661 +    write_reg64 (dev, TlbLineValue, 0);
27662 +    write_reg64 (dev, SysControlReg, 0);
27663 +
27664 +    /* Initialise the SDRAM using the configuration value from the ROM */
27665 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_SETUP);
27666 +
27667 +    /* Setup the linkport registers */
27668 +    write_reg64 (dev, LinkPortLock, elan4_linkport_lock);
27669 +
27670 +    /* Setup the tick rates, start the clock, and init the stats registers */
27671 +    write_ureg32 (dev, ClockTickRate.s.TickRates, ELAN4_CLOCK_TICK_RATE);
27672 +    write_ureg64 (dev, Clock, 0);
27673 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
27674 +    for (i = 0; i < 8; i++)
27675 +       write_ureg32 (dev, StatCounts[i].s.StatsCount, 0);
27676 +
27677 +    /* Initialise the Link Control register - disable the TLB prefetcher on RevB
27678 +     * as it can cause very occasional data corruption. */
27679 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
27680 +       write_reg32 (dev, LinkControlReg, LCONT_EN_SYS_READS | LCONT_REVB_DISABLE_TLB_PREFETCH);
27681 +    else
27682 +       write_reg32 (dev, LinkControlReg, LCONT_EN_SYS_READS);
27683 +
27684 +    /* Initialise the Link Control Settings to set the PLL Reference Value */
27685 +    write_reg32 (dev, LinkContSettings, 
27686 +                (elan4_mod45disable ? LCONT_MOD45_DISABLE : 0) |
27687 +                (3 << LCONT_CONFIG_PHASE_SHIFT) |
27688 +                ((elan4_pll_div & LCONT_PLL_REF_VAL_BITS_MASK) << LCONT_PLL_REF_VAL_BITS_SHIFT) |
27689 +                (LCONT_VOD_360 << LCONT_LVDS_VOLTAGE_BITS_SHIFT) |
27690 +                (LCONT_TERM_AUTO_OHM << LCONT_LVDS_TERMINATION_SHIFT));
27691 +
27692 +    /* Clear the link error LED on RevB and above */
27693 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA)
27694 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_ClearLinkError);
27695 +
27696 +    /* Compute the SysClk frequency and update the PLL if necessary */
27697 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA)
27698 +    {
27699 +       int mhz = measure_sysclk (dev);
27700 +
27701 +       if (elan4_pll_cfg != 0 || mhz > 190 || mhz < 170)
27702 +           printk ("elan%d: SysClk running at %d Mhz\n", dev->dev_instance, measure_sysclk (dev));
27703 +       else
27704 +       {
27705 +           sdram_factor = SDRAM_150_DLL_CORRECTION_FACTOR;
27706 +
27707 +           elan4_updatepll (dev, ECTRL_SYS_CLOCK_RATIO_4_3);
27708 +           
27709 +           printk ("elan%d: SysClk now running at %d Mhz\n", dev->dev_instance, measure_sysclk (dev));
27710 +       }
27711 +    }
27712 +       
27713 +    initialise_cache (dev);
27714 +
27715 +    /* Initialise the MMU hash table parameters */
27716 +    /* Select the largest elan pagesize which is spanned by the
27717 +     * system pagesize for mmu table 0*/
27718 +    for (i = 0; i < E4_PAGE_SIZE_TABLE_SIZE; i++)
27719 +       if (PageSizeTable[i] > PAGE_SHIFT)
27720 +           break;
27721 +
27722 +    pagesizeval[0] = i - 1;
27723 +    hashsizeval[0] = elan4_hash_0_size_val;
27724 +       
27725 +    /* Select a suitable elan pagesize to match any "large" page
27726 +     * support that the OS provides. */
27727 +    pagesizeval[1] = PAGE_SIZE_4M;
27728 +    hashsizeval[1] = elan4_hash_1_size_val;
27729 +
27730 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27731 +    {
27732 +       dev->dev_pagesizeval[tbl]   = pagesizeval[tbl];
27733 +       dev->dev_pageshift[tbl]     = PageSizeTable[pagesizeval[tbl]];
27734 +       dev->dev_hashsize[tbl]      = (1 << hashsizeval[tbl])/sizeof (E4_HashTableEntry);
27735 +       dev->dev_rsvd_hashmask[tbl] = ((1 << (27 - dev->dev_pageshift[tbl]))-1) & ~((1 << hashsizeval[tbl])-1);
27736 +       dev->dev_rsvd_hashval[tbl]  = 0xFFFFFFFF;
27737 +    }
27738 +
27739 +    PRINTF2 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: pageshifts %d,%d\n", dev->dev_pageshift[0], 
27740 +            NUM_HASH_TABLES == 2 ? dev->dev_pageshift[1] : 0);
27741 +
27742 +    /* Initialise the control register to the desired value */
27743 +    dev->dev_syscontrol = (CONT_EN_ALL_SETS | CONT_MMU_ENABLE | CONT_CACHE_ALL | CONT_2K_NOT_1K_DMA_PACKETS |
27744 +                          (pagesizeval[0] << CONT_TABLE0_PAGE_SIZE_SHIFT) | (hashsizeval[0] << CONT_TABLE0_MASK_SIZE_SHIFT));
27745 +
27746 +    if (NUM_HASH_TABLES == 2)
27747 +       dev->dev_syscontrol |= CONT_TWO_HASH_TABLES | (pagesizeval[1] << CONT_TABLE1_PAGE_SIZE_SHIFT) | (hashsizeval[1] << CONT_TABLE1_MASK_SIZE_SHIFT);
27748 +
27749 +    write_reg64 (dev, SysControlReg, dev->dev_syscontrol);
27750 +
27751 +    /* use direct mapped pci writes during sdram initialisation, since for 
27752 +     * cache flushing to work, we need to ensure that the cacheflush page
27753 +     * never gets lines into the incorrect cache set. */
27754 +    SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
27755 +
27756 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
27757 +       elan4_sdram_setup_delay_lines(dev, sdram_factor);
27758 +
27759 +    for (i = res = 0; i < dev->dev_sdram_numbanks; i++)
27760 +       if (dev->dev_sdram_banks[i].b_size)
27761 +           res |= elan4_sdram_init_bank (dev, &dev->dev_sdram_banks[i]);
27762 +
27763 +    if (! res)
27764 +    {
27765 +       if (dev->dev_devinfo.dev_device_id == PCI_REVISION_ID_ELAN4_REVB && ++attempts < 5)
27766 +       {
27767 +           printk ("elan%d: sdram not working, resetting\n", dev->dev_instance);
27768 +           goto tryagain;
27769 +       }
27770 +
27771 +       printk ("elan%d: could not find any sdram banks\n", dev->dev_instance);
27772 +       goto failed;
27773 +    }
27774 +
27775 +#ifndef CONFIG_MPSAS
27776 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialising for ECC\n");
27777 +
27778 +    for (i = 0 ; i < dev->dev_sdram_numbanks; i++)
27779 +       if (dev->dev_sdram_banks[i].b_ioaddr)
27780 +           initialise_ecc (dev, &dev->dev_sdram_banks[i]);
27781 +#endif
27782 +
27783 +    dev->dev_sdram_initial_ecc_val = read_reg64 (dev, SDRamECCStatus);
27784 +
27785 +    /* Now enable ECC after we've scrubbed the memory */
27786 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg | SDRAM_ENABLE_ECC);
27787 +
27788 +    /* clear any errors, and flush the tlb/route cache */
27789 +    PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH | CONT_ROUTE_FLUSH | CONT_CLEAR_LINKPORT_INT | CONT_CLEAR_SDRAM_ERROR);
27790 +
27791 +    write_ureg32 (dev, InstCount.s.StatsCount, 0);
27792 +
27793 +    /* Initialise the thread processor's register file */
27794 +    for (i = 0; i < 64; i++)
27795 +       write_reg64 (dev, TProcRegs[i], 0);
27796 +
27797 +    /* Initialise the thread processor's ICache tags */
27798 +    for (i = 0; i < (E4_ICacheLines/E4_ICachePortSize); i++)
27799 +    {
27800 +        write_reg64 (dev, ICachePort_Cntl_Addr, i << E4_ICacheTagAddrShift);
27801 +        for (j = 0; j < E4_ICachePortSize; j++)
27802 +           write_reg64 (dev, ICachePort[j], E4_InvalidTagValue);
27803 +    }
27804 +
27805 +    /*
27806 +     * Initialise the ICache with a sethi %hi(addr << 7), %r0
27807 +     * writing 8 64 bit values per loop of sethi %g0 values ending in 77 for something different??
27808 +     */
27809 +    for (i = 0; i < E4_ICacheSizeInBytes; i += (E4_ICachePortSize << 3))
27810 +    {
27811 +       write_reg64 (dev, ICachePort_Cntl_Addr, E4_AccessICacheRams | (i >> 3));
27812 +
27813 +       for (j = 0; j < E4_ICachePortSize; j++)
27814 +           write_reg64 (dev, ICachePort[j], 
27815 +                        (E4_uint64) (((E4_uint64)i << (4+7))    + ((E4_uint64)j << (1+7))    + (0x077)) |
27816 +                        (E4_uint64) (((E4_uint64)i << (4+7+32)) + ((E4_uint64)j << (1+7+32)) + (0x0e7)) << 32);
27817 +    }
27818 +
27819 +    /*
27820 +     * Initialise the top of the ICache Set0 with a instruction which will
27821 +     * cause a know trap fingerprint so that the application can identify it
27822 +     * and ignore the trap.
27823 +     */
27824 +    write_reg64 (dev, ICachePort_Cntl_Addr, E4_ICacheFixupOffset | E4_AccessICacheRams);
27825 +    for (i = 0; i < E4_ICachePortSize; i++)
27826 +       write_reg64 (dev, ICachePort[i], E4_ICacheFixupInsn | (E4_ICacheFixupInsn << 32));
27827 +
27828 +    /* create the buddy allocator for SDRAM */
27829 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
27830 +       if (dev->dev_sdram_banks[i].b_ioaddr)
27831 +           elan4_sdram_add_bank (dev, &dev->dev_sdram_banks[i]);
27832 +
27833 +    dev->dev_ctxtableshift        = elan4_ctxt_table_shift;
27834 +    dev->dev_cqcount              = (1 << elan4_ln2_max_cqs);
27835 +    dev->dev_cqreorder            = 0;
27836 +
27837 +    /* allocate the sdram for cache flushing whilst still in direct mapped mode */
27838 +    dev->dev_cacheflush_space = elan4_sdram_alloc (dev, E4_CacheSize);
27839 +
27840 +    /* and longer need direct mapped pci writes */
27841 +    CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
27842 +
27843 +    /* allocate the hash tables, command queues, context tables etc */
27844 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: allocating hash tables, command queueus, context tables\n");
27845 +
27846 +    dev->dev_comqlowpri       = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS));
27847 +    dev->dev_comqhighpri      = elan4_sdram_alloc (dev, (1 << COMMAND_RUN_QUEUE_BITS));
27848 +    dev->dev_cqaddr           = elan4_sdram_alloc (dev, sizeof (E4_CommandQueueDesc) * dev->dev_cqcount);
27849 +    dev->dev_dmaqhighpri      = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_highpri_size));
27850 +    dev->dev_dmaqlowpri       = elan4_sdram_alloc (dev, E4_QueueSize(elan4_dmaq_lowpri_size));
27851 +    dev->dev_threadqhighpri   = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_highpri_size));
27852 +    dev->dev_threadqlowpri    = elan4_sdram_alloc (dev, E4_QueueSize(elan4_threadq_lowpri_size));
27853 +    dev->dev_interruptq       = elan4_sdram_alloc (dev, E4_QueueSize(elan4_interruptq_size));
27854 +
27855 +    dev->dev_ctxtable         = elan4_sdram_alloc (dev, (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock));
27856 +    dev->dev_faultarea        = elan4_sdram_alloc (dev, CUN_Entries * sizeof (E4_FaultSave));
27857 +    dev->dev_inputtraparea    = elan4_sdram_alloc (dev, sizeof (E4_IprocTrapState));
27858 +
27859 +    dev->dev_sdrampages[0]    = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
27860 +    dev->dev_sdrampages[1]    = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
27861 +
27862 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27863 +    {
27864 +       dev->dev_hashtable[tbl] = elan4_sdram_alloc (dev, dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
27865 +#ifndef CONFIG_MPSAS
27866 +       /* Initialise hash tables to invalid (zero) */
27867 +       elan4_sdram_zeroq_sdram (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
27868 +#endif
27869 +    }
27870 +
27871 +    /* Initialise all context filters to discard */
27872 +#ifdef CONFIG_MPSAS
27873 +    if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, dev->dev_ctxtable, 
27874 +                       E4_FILTER_DISCARD_ALL, (1 << (dev->dev_ctxtableshift-1))) < 0)
27875 +    {
27876 +       for (i = 0; i < (1 << dev->dev_ctxtableshift); i++)
27877 +           elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL);
27878 +    }
27879 +#else
27880 +    for (i = 0; i < (1 << dev->dev_ctxtableshift); i++)
27881 +       elan4_write_filter (dev, i, E4_FILTER_DISCARD_ALL);
27882 +#endif
27883 +
27884 +    PRINTF4 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: hashtables %x,%x, %x,%x\n", dev->dev_hashtable[0], 
27885 +           dev->dev_hashsize[0], dev->dev_hashtable[1], dev->dev_hashsize[1]);
27886 +
27887 +    /* install the hash table pointers */
27888 +    PRINTF0 (DBG_DEVICE, DBG_CONFIG, "elan4_start_device: initialise registers with table addresses\n");
27889 +    write_reg64 (dev, MmuTableBasePtrs, (((E4_uint64) dev->dev_hashtable[0]) | ((E4_uint64) dev->dev_hashtable[1]) << 32));
27890 +    write_reg64 (dev, MmuFaultAndRootCntxPtr, (((E4_uint64) dev->dev_ctxtableshift) | 
27891 +                                              ((E4_uint64) dev->dev_ctxtable) |
27892 +                                              ((E4_uint64) dev->dev_faultarea) << 32));
27893 +    write_reg64 (dev, InputTrapAndFilter, (((E4_uint64) dev->dev_ctxtableshift) | 
27894 +                                          ((E4_uint64) dev->dev_ctxtable) |
27895 +                                          ((E4_uint64) dev->dev_inputtraparea) << 32));
27896 +    /*
27897 +     * The run ptrs have this format: (Front << 32) | Back
27898 +     * The base for both the front and back is uses the high bits of the back pointer.
27899 +     * So writting just the base value is good enough.
27900 +     */
27901 +    write_reg64 (dev, CommandLowPriRunPtrs,  dev->dev_comqlowpri);
27902 +    write_reg64 (dev, CommandHighPriRunPtrs, dev->dev_comqhighpri);
27903 +
27904 +    /* Initialise the run queues */
27905 +    write_reg64 (dev, DProcHighPriPtrs,       E4_QueueValue (dev->dev_dmaqhighpri,    elan4_dmaq_highpri_size));
27906 +    write_reg64 (dev, DProcLowPriPtrs,        E4_QueueValue (dev->dev_dmaqlowpri,     elan4_dmaq_lowpri_size));
27907 +    write_reg64 (dev, TProcHighPriPtrs,       E4_QueueValue (dev->dev_threadqhighpri, elan4_threadq_highpri_size));
27908 +    write_reg64 (dev, TProcLowPriPtrs,        E4_QueueValue (dev->dev_threadqlowpri,  elan4_threadq_lowpri_size));
27909 +
27910 +    /* Initialise the interrupt queue as "empty" - this is actually with one entry on it */
27911 +    write_reg64 (dev, MainIntQueuePtrs.Value, (((E4_uint64) E4_QueueFrontValue (dev->dev_interruptq, elan4_interruptq_size) << 32) |
27912 +                                              ((E4_uint64) E4_QueueBackPointer(dev->dev_interruptq + E4_MainIntEntrySize))));
27913 +    
27914 +    dev->dev_interruptq_nfptr = dev->dev_interruptq + E4_MainIntEntrySize;
27915 +
27916 +    /*
27917 +     * Flush the context filter before dropping the Discard all bits in the schedule status register.
27918 +     * Also hit the SCH_RestartTProc to clear out X's from the trap state and
27919 +     * hit the SCH_RestartDmaPrefetchProc to clear out X's from the prev register.
27920 +     */
27921 +    PULSE_SCHED_RESTART (dev, SCH_ContextFilterFlush | SCH_RestartTProc | SCH_RestartDmaPrefetchProc);
27922 +
27923 +    /* setup the schedule status register. */
27924 +    SET_SCHED_STATUS (dev, SCH_CProcTimeout6p2us | SCH_DProcTimeslice512us);
27925 +
27926 +    /*
27927 +     * Now initialise the inserter cache.s
27928 +     * Bit 31 of the first word of the descriptor is a valid bit. This must be cleared.
27929 +     * Bit 31 becomes a used bit in the descriptors in memory.
27930 +     */
27931 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
27932 +    {
27933 +       write_reg32 (dev, CommandControl.CommandQueueDescsBase, i);     /* select a cache line */
27934 +       write_reg64 (dev, CommandCacheTestPort, 0);                     /* Mark it invalid */
27935 +    }
27936 +    
27937 +    /* Setup the pointer to the command descriptors */
27938 +    /*   the table must be aligned on a CQ_CommandDescsAlignement boundary */
27939 +    /*   since we've allocated a small table - we work out the offset of the */
27940 +    /*   first entry in our table for mapping in the command ports later */
27941 +    dev->dev_cqoffset = (dev->dev_cqaddr & (CQ_CommandDescsAlignment-1)) / sizeof (E4_CommandQueueDesc);
27942 +
27943 +    write_reg32 (dev, CommandControl.CommandQueueDescsBase, (dev->dev_cqaddr & ~(CQ_CommandDescsAlignment-1)) | COM_ENABLE_DEQUEUE);
27944 +
27945 +    /* allocate the bitmaps for cq,ctxt allocation */
27946 +    KMEM_ZALLOC (dev->dev_cqamap, bitmap_t *, BT_BITOUL(dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t), 1);
27947 +    KMEM_ZALLOC (dev->dev_ctxmap, bitmap_t *, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t), 1);
27948 +
27949 +    if (dev->dev_cqamap == NULL || dev->dev_ctxmap == NULL)
27950 +       goto failed;
27951 +
27952 +    /* Make every fourth context be invalid for ICache fixup.
27953 +     * context 0 is also invalid - since it is used to indicate 
27954 +     * an invalid tag. */
27955 +    for (i = 0; i < (1 << dev->dev_ctxtableshift); i += 4)
27956 +       BT_SET (dev->dev_ctxmap, i);
27957 +    
27958 +    /* initialise the halt operations */
27959 +    dev->dev_haltop_mask   = 0;
27960 +    dev->dev_haltop_active = 0;
27961 +
27962 +    /* allocate the hash table shadow structures - and place all blocks on the free lists */
27963 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
27964 +    {
27965 +       KMEM_ZALLOC (dev->dev_mmuhash[tbl], ELAN4_HASH_ENTRY *,  dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY), 1);
27966 +
27967 +       if (dev->dev_mmuhash[tbl] == NULL)
27968 +           goto failed;
27969 +
27970 +       for (i = 0; i < dev->dev_hashsize[tbl]; i++)
27971 +           dev->dev_mmuhash[tbl][i].he_entry = dev->dev_hashtable[tbl] + (i * sizeof (E4_HashTableEntry));
27972 +    }
27973 +
27974 +    /* setup the interrupt mask register */
27975 +    SET_INT_MASK (dev, (INT_MSI0 | INT_MSI1 | INT_MSI2 | INT_MSI3) & ~(INT_Discarding | INT_Halted | INT_LinkPortKeyFail));
27976 +
27977 +    /* start a thread to handle excessive main interrupts */
27978 +    if (kernel_thread_create (elan4_mainint_thread, (caddr_t) dev) == NULL)
27979 +       goto failed;
27980 +    dev->dev_mainint_started = 1;
27981 +    
27982 +    /* install the device context - and allocate the first 16 command queues */
27983 +    if (elan4_insertctxt (dev, &dev->dev_ctxt, &device_trap_ops) != 0)
27984 +       goto failed;
27985 +
27986 +    /* Allocate command queues, one for each entry in the inserter cache, 
27987 +     * we'll use these queues to flush the insert cache */
27988 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
27989 +    {
27990 +       if ((dev->dev_flush_cq[i] = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit | CQ_InterruptEnableBit, 
27991 +                                                  CQ_Priority)) == NULL)
27992 +           goto failed;
27993 +       
27994 +       ASSERT (elan4_cq2num(dev->dev_flush_cq[i]) == i);
27995 +
27996 +       dev->dev_flush_finished |= (1 << i);
27997 +    }
27998 +
27999 +    /* Allocate command queues for dma halt operations */
28000 +    if ((dev->dev_dma_flushop[0].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit|CQ_WaitEventEnableBit, 0)) == NULL ||
28001 +       (dev->dev_dma_flushop[1].cq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_DmaStartEnableBit|CQ_WaitEventEnableBit, CQ_Priority)) == NULL)
28002 +       goto failed;
28003 +
28004 +#ifdef CONFIG_MPSAS
28005 +    elan4_sdram_flushcache (dev, 0, E4_CacheSize);
28006 +#endif
28007 +
28008 +    /* initialise halt operation for flushing the icache */
28009 +    dev->dev_iflush_haltop.op_function = device_iflush_haltop;
28010 +    dev->dev_iflush_haltop.op_arg      = dev;
28011 +    dev->dev_iflush_haltop.op_mask     = INT_TProcHalted;
28012 +
28013 +    /* Allocate a route table, and create a valid route for vp==0, this is used
28014 +     * when a DMA is removed from the dma run queue */
28015 +    if ((dev->dev_routetable = elan4_alloc_routetable (dev, 0)) == NULL)
28016 +       goto failed;
28017 +
28018 +    elan4_set_routetable (&dev->dev_ctxt, dev->dev_routetable);
28019 +
28020 +    entry.Values[0] = FIRST_MYLINK;
28021 +    entry.Values[1] = 0;
28022 +
28023 +    elan4_write_route (dev, dev->dev_routetable, 0, &entry);
28024 +
28025 +    /* map the sdram pages into the elan */
28026 +    dev->dev_tproc_suspend = DEVICE_TPROC_SUSPEND_ADDR;
28027 +    dev->dev_tproc_space   = DEVICE_TPROC_SPACE_ADDR;
28028 +
28029 +
28030 +    elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, HE_TYPE_SDRAM, (dev->dev_sdrampages[0] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocExecute));
28031 +    elan4mmu_pteload (&dev->dev_ctxt, 0, dev->dev_tproc_space,   HE_TYPE_SDRAM, (dev->dev_sdrampages[1] >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocEventWrite));
28032 +
28033 +    /* and store the thread suspend sequence in it for use when a thread is removed from the run queue */
28034 +    elan4_sdram_writel (dev, dev->dev_sdrampages[0], DEVICE_TPROC_SUSPEND_INSTR);
28035 +
28036 +    /* and initialise the dma flush event in sdrampage[1] */
28037 +    elan4_sdram_writeq (dev, dev->dev_sdrampages[1] + 64, E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
28038 +
28039 +#ifdef CONFIG_MPSAS
28040 +    do_initdma (dev);
28041 +#endif
28042 +    
28043 +    if (!elan4_neterr_init (dev))
28044 +       goto failed;
28045 +
28046 +    elan4_configure_writecombining (dev);
28047 +
28048 +    /* finally register the device with elanmod for rms */
28049 +    dev->dev_idx = elan_dev_register (&dev->dev_devinfo, &elan4_dev_ops, (void *) dev);
28050 +
28051 +    dev->dev_state = ELAN4_STATE_STARTED;
28052 +
28053 +    return (0);
28054 +
28055 + failed:
28056 +    printk ("elan%d: failed to start elan4 device - stopping\n", dev->dev_instance);
28057 +
28058 +    elan4_stop_device (dev);
28059 +    return (-ENOMEM);
28060 +}
28061 +
28062 +void
28063 +elan4_stop_device (ELAN4_DEV *dev)
28064 +{
28065 +    unsigned long flags;
28066 +    int i, tbl;
28067 +
28068 +    dev->dev_state = ELAN4_STATE_STOPPING;
28069 +
28070 +    elan_dev_deregister (&dev->dev_devinfo);
28071 +
28072 +    elan4_unconfigure_writecombining (dev);
28073 +
28074 +    elan4_neterr_destroy (dev);
28075 +
28076 +    if (dev->dev_tproc_suspend)
28077 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_suspend, 1 << dev->dev_pageshift[0]);
28078 +
28079 +    if (dev->dev_tproc_space)
28080 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, dev->dev_tproc_space,   1 << dev->dev_pageshift[0]);
28081 +
28082 +    if (dev->dev_routetable)
28083 +    {
28084 +       elan4_set_routetable (&dev->dev_ctxt, NULL);
28085 +       elan4_free_routetable (dev, dev->dev_routetable);
28086 +    }
28087 +
28088 +    for (i = 0; i < 2; i++)
28089 +       if (dev->dev_dma_flushop[i].cq)
28090 +           elan4_freecq (&dev->dev_ctxt, dev->dev_dma_flushop[i].cq);
28091 +
28092 +    /* free of the device context - and insert cache flushing command queues */
28093 +    for (i = 0; i < COMMAND_INSERTER_CACHE_ENTRIES; i++)
28094 +       if (dev->dev_flush_cq[i])
28095 +           elan4_freecq (&dev->dev_ctxt, dev->dev_flush_cq[i]);
28096 +
28097 +    if (dev->dev_ctxt.ctxt_dev)
28098 +       elan4_removectxt (dev, &dev->dev_ctxt);
28099 +
28100 +    /* stop the mainint thread */
28101 +    spin_lock_irqsave (&dev->dev_mainint_lock, flags);
28102 +    dev->dev_stop_threads = 1;
28103 +
28104 +    while (dev->dev_mainint_started && !dev->dev_mainint_stopped)
28105 +    {
28106 +       kcondvar_wakeupall (&dev->dev_mainint_wait, &dev->dev_mainint_lock);
28107 +       kcondvar_wait (&dev->dev_mainint_wait, &dev->dev_mainint_lock, &flags);
28108 +    }
28109 +    dev->dev_mainint_started = dev->dev_mainint_stopped = 0;
28110 +    spin_unlock_irqrestore (&dev->dev_mainint_lock, flags);
28111 +
28112 +    /* cancel any error interrupt timeouts */
28113 +    if (timer_fn_queued (&dev->dev_error_timeoutid))
28114 +       cancel_timer_fn (&dev->dev_error_timeoutid);
28115 +
28116 +    if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && timer_fn_queued (&dev->dev_linkerr_timeoutid))
28117 +       cancel_timer_fn (&dev->dev_linkerr_timeoutid);
28118 +    
28119 +    /* reset the interrupt mask register to zero */
28120 +    if (dev->dev_regs)
28121 +       SET_INT_MASK (dev, 0);
28122 +
28123 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
28124 +    {
28125 +       if (dev->dev_mmuhash[tbl])
28126 +           KMEM_FREE (dev->dev_mmuhash[tbl], dev->dev_hashsize[tbl] * sizeof (ELAN4_HASH_ENTRY));
28127 +       if (dev->dev_hashtable[tbl])
28128 +           elan4_sdram_free (dev, dev->dev_hashtable[tbl], dev->dev_hashsize[tbl] * sizeof (E4_HashTableEntry));
28129 +    }
28130 +
28131 +    if (dev->dev_cqamap)
28132 +       KMEM_FREE (dev->dev_cqamap, BT_BITOUL (dev->dev_cqcount/ELAN4_CQ_PER_CQA) * sizeof (bitmap_t));
28133 +    if (dev->dev_ctxmap)
28134 +       KMEM_FREE (dev->dev_ctxmap, BT_BITOUL(1 << dev->dev_ctxtableshift) * sizeof (bitmap_t));
28135 +
28136 +    if (dev->dev_comqlowpri)
28137 +       elan4_sdram_free (dev, dev->dev_comqlowpri,     (1 << COMMAND_RUN_QUEUE_BITS));
28138 +    if (dev->dev_comqhighpri)
28139 +       elan4_sdram_free (dev, dev->dev_comqhighpri,    (1 << COMMAND_RUN_QUEUE_BITS));
28140 +    if (dev->dev_cqaddr)
28141 +       elan4_sdram_free (dev, dev->dev_cqaddr,         sizeof (E4_CommandQueueDesc) * dev->dev_cqcount);
28142 +    if (dev->dev_dmaqhighpri)
28143 +       elan4_sdram_free (dev, dev->dev_dmaqhighpri,    E4_QueueSize(elan4_dmaq_highpri_size));
28144 +    if (dev->dev_dmaqlowpri)
28145 +       elan4_sdram_free (dev, dev->dev_dmaqlowpri,     E4_QueueSize(elan4_dmaq_lowpri_size));
28146 +    if (dev->dev_threadqhighpri)
28147 +       elan4_sdram_free (dev, dev->dev_threadqhighpri, E4_QueueSize(elan4_threadq_highpri_size));
28148 +    if (dev->dev_threadqlowpri)
28149 +       elan4_sdram_free (dev, dev->dev_threadqlowpri,  E4_QueueSize(elan4_threadq_lowpri_size));
28150 +    if (dev->dev_interruptq)
28151 +       elan4_sdram_free (dev, dev->dev_interruptq,     E4_QueueSize(elan4_interruptq_size));
28152 +    
28153 +    if (dev->dev_ctxtable)
28154 +       elan4_sdram_free (dev, dev->dev_ctxtable,       (1 << dev->dev_ctxtableshift) * sizeof (E4_ContextControlBlock));
28155 +    if (dev->dev_faultarea)
28156 +       elan4_sdram_free (dev, dev->dev_faultarea,      CUN_Entries * sizeof (E4_FaultSave));
28157 +    if (dev->dev_inputtraparea)
28158 +       elan4_sdram_free (dev, dev->dev_inputtraparea,  sizeof (E4_IprocTrapState));
28159 +
28160 +    if (dev->dev_sdrampages[0])
28161 +       elan4_sdram_free (dev, dev->dev_sdrampages[0],  SDRAM_PAGE_SIZE);
28162 +    if (dev->dev_sdrampages[1])
28163 +       elan4_sdram_free (dev, dev->dev_sdrampages[1],  SDRAM_PAGE_SIZE);
28164 +
28165 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
28166 +       if (dev->dev_sdram_banks[i].b_ioaddr)
28167 +               elan4_sdram_fini_bank (dev, &dev->dev_sdram_banks[i]);
28168 +
28169 +    elan4_pcifini (dev);
28170 +
28171 +    dev->dev_state = ELAN4_STATE_STOPPED;
28172 +
28173 +    if (dev->dev_ack_errors)
28174 +        kfree(dev->dev_ack_errors);
28175 +    if (dev->dev_dproc_timeout)
28176 +        kfree(dev->dev_dproc_timeout);
28177 +    if (dev->dev_cproc_timeout)
28178 +        kfree(dev->dev_cproc_timeout);
28179 +}
28180 +
28181 +static __inline__ int
28182 +compute_arity (int lvl, unsigned n, char *arity)
28183 +{
28184 +    if (arity[lvl] == 0)
28185 +    {
28186 +       if (n <= 8)
28187 +           arity[lvl] = n;
28188 +       else
28189 +           arity[lvl] = 4;
28190 +    }
28191 +
28192 +    return (arity[lvl]);
28193 +}
28194 +
28195 +int
28196 +elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned arityval)
28197 +{
28198 +    int i, lvl, n;
28199 +    char arity[ELAN_MAX_LEVELS];
28200 +
28201 +    if (nodeid >= numnodes)
28202 +       return -EINVAL;
28203 +
28204 +    for (i = 0; i < ELAN_MAX_LEVELS; i++, arityval >>= 4)
28205 +       arity[i] = arityval & 7;
28206 +    
28207 +    for (lvl = 0, n = numnodes; n > compute_arity(lvl, n, arity) && lvl < ELAN_MAX_LEVELS; lvl++)
28208 +    {
28209 +       if ((n % arity[lvl]) != 0)
28210 +           return -EINVAL;
28211 +       
28212 +       n /= arity[lvl];
28213 +    }
28214 +
28215 +    if (arity[lvl] != n)
28216 +       return -EINVAL;
28217 +
28218 +    for (i = 0; i <= lvl; i++)
28219 +       pos->pos_arity[i] = arity[lvl - i];
28220 +
28221 +    pos->pos_nodes  = numnodes;
28222 +    pos->pos_levels = lvl + 1;
28223 +    pos->pos_nodeid = nodeid;
28224 +    pos->pos_mode   = ELAN_POS_MODE_SWITCHED;
28225 +
28226 +    return 0;
28227 +}
28228 +
28229 +int
28230 +elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos)
28231 +{
28232 +    kmutex_lock (&dev->dev_lock);
28233 +    *pos = dev->dev_position;
28234 +    kmutex_unlock (&dev->dev_lock);
28235 +
28236 +    return (pos->pos_mode);
28237 +}
28238 +
28239 +int
28240 +elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos)
28241 +{
28242 +    int forceLocal = 0;
28243 +    int nnodes, i;
28244 +    unsigned int *ack_errors;
28245 +    unsigned int *dproc_timeout;
28246 +    unsigned int *cproc_timeout;
28247 +
28248 +    switch (pos->pos_mode)
28249 +    {
28250 +    case ELAN_POS_UNKNOWN:
28251 +       break;
28252 +       
28253 +    case ELAN_POS_MODE_SWITCHED:
28254 +       if (pos->pos_levels > ELAN_MAX_LEVELS)
28255 +           return (-EINVAL);
28256 +       
28257 +       for (i = 0, nnodes = 1; i < pos->pos_levels; i++)
28258 +       {
28259 +
28260 +           if (pos->pos_arity[i] <= 0 || (i == 0 ? pos->pos_arity[i] > 8 : pos->pos_arity[i] >= 8))  /* allow an 8 way top-switch */
28261 +               return (-EINVAL);
28262 +           
28263 +           nnodes *= pos->pos_arity[i];
28264 +       }
28265 +
28266 +       if (pos->pos_nodes > nnodes || pos->pos_nodeid >= pos->pos_nodes)
28267 +           return (-EINVAL);
28268 +       break;
28269 +       
28270 +    case ELAN_POS_MODE_LOOPBACK:
28271 +       if (pos->pos_levels != 1 || pos->pos_nodes != 1 || pos->pos_nodeid != 0 || pos->pos_arity[0] != 1)
28272 +           return (-EINVAL);
28273 +
28274 +       forceLocal = 1;
28275 +       break;
28276 +
28277 +    case ELAN_POS_MODE_BACKTOBACK:
28278 +       if (pos->pos_levels != 1 || pos->pos_nodes != 2 || pos->pos_nodeid >= 2 || pos->pos_arity[0] != 2)
28279 +           return (-EINVAL);
28280 +
28281 +       forceLocal = (pos->pos_nodeid == 0);
28282 +       break;
28283 +
28284 +    default:
28285 +       return (-EINVAL);
28286 +    }
28287 +
28288 +    ack_errors = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
28289 +    if (!ack_errors)
28290 +       return (-EINVAL);
28291 +    memset(ack_errors, 0, pos->pos_nodes * sizeof(unsigned int));
28292 +    dproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
28293 +    if (!dproc_timeout) 
28294 +    {
28295 +        kfree(ack_errors);
28296 +        return (-EINVAL);
28297 +    }
28298 +    memset(dproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int));
28299 +    cproc_timeout = kmalloc(pos->pos_nodes * sizeof(unsigned int), GFP_KERNEL);
28300 +    if (!cproc_timeout)
28301 +    {
28302 +        kfree(ack_errors);
28303 +        kfree(dproc_timeout);
28304 +        return (-EINVAL);
28305 +    }
28306 +    memset(cproc_timeout, 0, pos->pos_nodes * sizeof(unsigned int));
28307 +       
28308 +    kmutex_lock (&dev->dev_lock);
28309 +    dev->dev_position = *pos;
28310 +    dev->dev_ack_errors = ack_errors;
28311 +    dev->dev_dproc_timeout = dproc_timeout;
28312 +    dev->dev_cproc_timeout = cproc_timeout;
28313 +
28314 +    if (forceLocal)
28315 +       write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) | LCONT_FORCE_COMMSCLK_LOCAL);
28316 +    else
28317 +       write_reg32 (dev, LinkContSettings, read_reg32 (dev, LinkContSettings) & ~LCONT_FORCE_COMMSCLK_LOCAL);
28318 +
28319 +    pioflush_reg (dev);
28320 +    kmutex_unlock (&dev->dev_lock);
28321 +
28322 +    return (0);
28323 +}
28324 +
28325 +void
28326 +elan4_get_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask)
28327 +{
28328 +    kmutex_lock (&dev->dev_lock);
28329 +
28330 +    *mask = dev->dev_devinfo.dev_params_mask;
28331 +    memcpy (params, &dev->dev_devinfo.dev_params, sizeof (ELAN_PARAMS));
28332 +    
28333 +    kmutex_unlock (&dev->dev_lock);
28334 +}
28335 +
28336 +void
28337 +elan4_set_params (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask)
28338 +{      
28339 +    int i;
28340 +
28341 +    kmutex_lock (&dev->dev_lock);
28342 +    for (i = 0; i < ELAN4_PARAM_COUNT; i++)
28343 +       if (mask & (1 << i))
28344 +           dev->dev_devinfo.dev_params.values[i] = params->values[i];
28345 +    
28346 +    dev->dev_devinfo.dev_params_mask |= mask;
28347 +    kmutex_unlock (&dev->dev_lock);
28348 +}
28349 +
28350 +
28351 +EXPORT_SYMBOL(elan4_get_position);
28352 +EXPORT_SYMBOL(elan4_set_position);
28353 +
28354 +EXPORT_SYMBOL(elan4_queue_haltop);
28355 +EXPORT_SYMBOL(elan4_queue_dma_flushop);
28356 +EXPORT_SYMBOL(elan4_queue_mainintop);
28357 +
28358 +EXPORT_SYMBOL(elan4_insertctxt);
28359 +EXPORT_SYMBOL(elan4_removectxt);
28360 +
28361 +EXPORT_SYMBOL(elan4_attach_filter);
28362 +EXPORT_SYMBOL(elan4_detach_filter);
28363 +EXPORT_SYMBOL(elan4_set_filter);
28364 +EXPORT_SYMBOL(elan4_set_routetable);
28365 +
28366 +EXPORT_SYMBOL(elan4_alloccq);
28367 +EXPORT_SYMBOL(elan4_freecq);
28368 +EXPORT_SYMBOL(elan4_restartcq);
28369 +
28370 +EXPORT_SYMBOL(elan4_flush_icache);
28371 +EXPORT_SYMBOL(elan4_hardware_lock_check);
28372 +
28373 +/*
28374 + * Local variables:
28375 + * c-file-style: "stroustrup"
28376 + * End:
28377 + */
28378 diff -urN clean/drivers/net/qsnet/elan4/device_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/device_Linux.c
28379 --- clean/drivers/net/qsnet/elan4/device_Linux.c        1969-12-31 19:00:00.000000000 -0500
28380 +++ linux-2.6.9/drivers/net/qsnet/elan4/device_Linux.c  2005-09-07 10:35:03.000000000 -0400
28381 @@ -0,0 +1,3034 @@
28382 +/*
28383 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
28384 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
28385 + * 
28386 + *    For licensing information please see the supplied COPYING file
28387 + *
28388 + */
28389 +
28390 +#ident "@(#)$Id: device_Linux.c,v 1.110.2.9 2005/09/07 14:35:03 mike Exp $"
28391 +/*      $Source: /cvs/master/quadrics/elan4mod/device_Linux.c,v $*/
28392 +
28393 +#include <qsnet/kernel.h>
28394 +#include <qsnet/kthread.h>
28395 +#include <qsnet/kpte.h>
28396 +
28397 +#include <asm/io.h>
28398 +#include <asm/irq.h>
28399 +#ifdef CONFIG_MTRR
28400 +#include <asm/mtrr.h>
28401 +#endif
28402 +
28403 +#include <linux/init.h>
28404 +#include <linux/pci.h>
28405 +#include <qsnet/module.h>
28406 +#include <linux/reboot.h>
28407 +#include <linux/notifier.h>
28408 +
28409 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
28410 +#include <linux/wrapper.h>
28411 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23)
28412 +typedef void irqreturn_t;
28413 +#endif
28414 +#       define IRQ_NONE
28415 +#       define IRQ_HANDLED
28416 +#endif
28417 +
28418 +#include <elan4/debug.h>
28419 +#include <elan4/device.h>
28420 +#include <elan4/user.h>
28421 +#include <elan4/ioctl.h>
28422 +#include <elan4/intcookie.h>
28423 +
28424 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
28425 +#error please use a 2.4.0 series kernel or newer
28426 +#endif
28427 +
28428 +
28429 +#if defined(LINUX_SPARC) || defined(LINUX_PPC64)
28430 +#define __io_remap_page_range(from,offset,size,prot)   remap_page_range(from,offset,size,prot)
28431 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
28432 +#elif defined(NO_RMAP)
28433 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(from,offset,size,prot)
28434 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(from,offset,size,prot)
28435 +#else
28436 +#define __io_remap_page_range(from,offset,size,prot)   io_remap_page_range(vma,from,offset,size,prot)
28437 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
28438 +#define __remap_page_range(from,offset,size,prot)      remap_pfn_range(vma,from,(offset)>>PAGE_SHIFT,size,prot)
28439 +#else
28440 +#define __remap_page_range(from,offset,size,prot)      remap_page_range(vma,from,offset,size,prot)
28441 +#endif
28442 +#endif
28443 +
28444 +#if defined (X86_FEATURE_PAT)
28445 +static unsigned int pat_pteval = -1;
28446 +#endif
28447 +
28448 +#if defined(__alpha)
28449 +static inline physaddr_t bar2phys (unsigned long addr)
28450 +{
28451 +    return virt_to_phys((void *) ioremap(addr, PAGE_SIZE));
28452 +}
28453 +#elif defined(__ia64)
28454 +static inline physaddr_t bar2phys (unsigned long addr)
28455 +{
28456 +    return ((addr) & ~__IA64_UNCACHED_OFFSET);
28457 +}
28458 +#elif defined(__powerpc64__)
28459 +
28460 +#ifdef CONFIG_PPC_PSERIES
28461 +#include <asm/eeh.h>
28462 +
28463 +static inline physaddr_t bar2phys (unsigned long addr)
28464 +{
28465 +    return eeh_token_to_phys (addr);
28466 +}
28467 +#endif
28468 +
28469 +#else
28470 +static inline physaddr_t bar2phys (unsigned long addr)
28471 +{
28472 +    return (addr);
28473 +}
28474 +#endif
28475 +
28476 +#ifndef pgprot_noncached
28477 +static inline pgprot_t pgprot_noncached(pgprot_t _prot)
28478 +{
28479 +       unsigned long prot = pgprot_val(_prot);
28480 +#if defined(__powerpc__)
28481 +       prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
28482 +#elif defined(__sparc__)
28483 +       prot &= ~(_PAGE_CACHE);
28484 +       prot |= _PAGE_IE;
28485 +#endif
28486 +       return __pgprot(prot);
28487 +}
28488 +#endif
28489 +
28490 +#ifndef pgprot_writecombine
28491 +static inline pgprot_t pgprot_writecombine (pgprot_t _prot)
28492 +{
28493 +    unsigned long prot = pgprot_val(_prot);
28494 +
28495 +#if defined (X86_FEATURE_PAT)
28496 +    if (pat_pteval != -1)
28497 +       prot = (prot & ~(_PAGE_PCD | _PAGE_PWT | _PAGE_PSE)) | pat_pteval;
28498 +#endif
28499 +    return __pgprot (prot);
28500 +}
28501 +#endif
28502 +
28503 +#define ELAN4_DRIVER_VERSION           0x103           /* 16 bit value */
28504 +
28505 +/*
28506 + * Function prototypes.
28507 + */
28508 +static int        elan4_attach_device (int instance, struct pci_dev *pdev);
28509 +static void       elan4_detach_device (ELAN4_DEV *dev);
28510 +
28511 +static int        elan4_open (struct inode *inode, struct file *file);
28512 +static int        elan4_release(struct inode *inode, struct file *file);
28513 +static int        elan4_ioctl (struct inode *inode, struct file *file, 
28514 +                               unsigned int cmd, unsigned long arg);
28515 +static int        elan4_mmap (struct file *file, struct vm_area_struct *vm_area);
28516 +
28517 +static irqreturn_t elan4_irq (int irq, void *arg, struct pt_regs *regs);
28518 +
28519 +static void        elan4_shutdown_devices(int panicing);
28520 +
28521 +static int      disabled;                                      /* bitmask of which devices not to start */
28522 +unsigned int   elan4_pll_cfg      = 0;
28523 +int            elan4_pll_div      = 31;                        /* RevC PCB */
28524 +int            elan4_mod45disable = 0;
28525 +static int      optimise_pci_bus   = 1;                                /* 0 => don't, 1 => if ok, 2 => always */
28526 +static int      default_features   = 0;                                /* default values for dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] */
28527 +int             assfail_mode       = 0;
28528 +
28529 +long long       sdram_cfg = SDRAM_STARTUP_VALUE;
28530 +static int      sdram_cfg_lo;
28531 +static int     sdram_cfg_hi;
28532 +int            sdram_bank_limit;
28533 +
28534 +MODULE_AUTHOR("Quadrics Ltd.");
28535 +MODULE_DESCRIPTION("Elan 4 Device Driver");
28536 +MODULE_LICENSE("GPL");
28537 +
28538 +module_param(elan4_debug, uint, 0);
28539 +module_param(elan4_debug_toconsole, uint, 0);
28540 +module_param(elan4_debug_tobuffer, uint, 0);
28541 +module_param(elan4_debug_mmu, uint, 0);
28542 +module_param(elan4_pll_cfg, uint, 0);
28543 +module_param(elan4_pll_div, uint, 0);
28544 +module_param(elan4_mod45disable, uint, 0);
28545 +module_param(optimise_pci_bus, uint, 0);
28546 +module_param(default_features, uint, 0);
28547 +module_param(assfail_mode, uint, 0);
28548 +
28549 +module_param(disabled, uint, 0);
28550 +module_param(sdram_cfg_lo, uint, 0);
28551 +module_param(sdram_cfg_hi, uint, 0);
28552 +module_param(sdram_bank_limit, uint, 0);
28553 +
28554 +module_param(elan4_hash_0_size_val, uint, 0);
28555 +module_param(elan4_hash_1_size_val, uint, 0);
28556 +module_param(elan4_ctxt_table_shift, uint, 0);
28557 +module_param(elan4_ln2_max_cqs, uint, 0);
28558 +module_param(elan4_dmaq_highpri_size, uint, 0);
28559 +module_param(elan4_threadq_highpri_size, uint, 0);
28560 +module_param(elan4_dmaq_lowpri_size, uint, 0);
28561 +module_param(elan4_threadq_lowpri_size, uint, 0);
28562 +module_param(elan4_interruptq_size, uint, 0);
28563 +
28564 +module_param(elan4_mainint_punt_loops, uint, 0);
28565 +module_param(elan4_mainint_resched_ticks, uint, 0);
28566 +module_param(elan4_linkport_lock, uint, 0);
28567 +module_param(elan4_eccerr_recheck, uint, 0);
28568 +
28569 +module_param(user_p2p_route_options, uint, 0);
28570 +module_param(user_bcast_route_options, uint, 0);
28571 +module_param(user_dproc_retry_count, uint, 0);
28572 +module_param(user_cproc_retry_count, uint, 0);
28573 +module_param(user_ioproc_enabled, uint, 0);
28574 +module_param(user_pagefault_enabled, uint, 0);
28575 +
28576 +/*
28577 + * Standard device entry points.
28578 + */
28579 +static struct file_operations elan4_fops = {
28580 +    ioctl:   elan4_ioctl,
28581 +    mmap:    elan4_mmap,
28582 +    open:    elan4_open,
28583 +    release: elan4_release,
28584 +};
28585 +
28586 +ELAN4_DEV *elan4_devices[ELAN4_MAX_CONTROLLER];
28587 +
28588 +#if defined(CONFIG_DEVFS_FS)
28589 +static devfs_handle_t devfs_handle;
28590 +#endif
28591 +
28592 +
28593 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
28594 +static int
28595 +elan4_ioctl32_cmds[] =
28596 +{      /* /dev/elan/control */
28597 +    ELAN4IO_DEVINFO,
28598 +    ELAN4IO_GET_POSITION,
28599 +    ELAN4IO_SET_POSITION,
28600 +    ELAN4IO_GET_PARAMS,
28601 +    ELAN4IO_SET_PARAMS,
28602 +
28603 +    /* /dev/elan4/user */
28604 +    ELAN4IO_POSITION,
28605 +    ELAN4IO_FREE,
28606 +    ELAN4IO_ATTACH,
28607 +    ELAN4IO_DETACH,
28608 +    ELAN4IO_BLOCK_INPUTTER,
28609 +
28610 +    ELAN4IO_ADD_P2PVP,
28611 +    ELAN4IO_ADD_BCASTVP,
28612 +    ELAN4IO_REMOVEVP,
28613 +    ELAN4IO_SET_ROUTE,
28614 +    ELAN4IO_RESET_ROUTE,
28615 +    ELAN4IO_GET_ROUTE,
28616 +    ELAN4IO_CHECK_ROUTE,
28617 +
28618 +    ELAN4IO_ALLOCCQ,
28619 +    ELAN4IO_FREECQ,
28620 +    ELAN4IO_SETPERM32,
28621 +    ELAN4IO_CLRPERM32,
28622 +    ELAN4IO_TRAPSIG,
28623 +    ELAN4IO_TRAPHANDLER32,
28624 +    ELAN4IO_REQUIRED_MAPPINGS,
28625 +       
28626 +    ELAN4IO_RESUME_EPROC_TRAP,
28627 +    ELAN4IO_RESUME_CPROC_TRAP,
28628 +    ELAN4IO_RESUME_DPROC_TRAP,
28629 +    ELAN4IO_RESUME_TPROC_TRAP,
28630 +    ELAN4IO_RESUME_IPROC_TRAP,
28631 +
28632 +    ELAN4IO_FLUSH_ICACHE,
28633 +
28634 +    ELAN4IO_STOP_CTXT,
28635 +
28636 +    ELAN4IO_ALLOC_INTCOOKIE,
28637 +    ELAN4IO_FREE_INTCOOKIE,
28638 +    ELAN4IO_ARM_INTCOOKIE,
28639 +    ELAN4IO_WAIT_INTCOOKIE,
28640 +
28641 +    ELAN4IO_ALLOC_TRAP_QUEUES,
28642 +    ELAN4IO_NETERR_MSG,
28643 +    ELAN4IO_NETERR_TIMER,
28644 +    ELAN4IO_NETERR_FIXUP,
28645 +
28646 +    ELAN4IO_DUMPCQ32,
28647 +};
28648 +
28649 +static int      elan4_ioctl32 (unsigned int fd, unsigned int cmd, 
28650 +                              unsigned long arg, struct file *file);
28651 +#endif
28652 +
28653 +/*
28654 + * Standard device entry points.
28655 + */
28656 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
28657 +
28658 +#include <linux/dump.h>
28659 +
28660 +static int
28661 +elan4_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
28662 +{
28663 +    if (event == DUMP_BEGIN)
28664 +       elan4_shutdown_devices (FALSE);
28665 +
28666 +    return (NOTIFY_DONE);
28667 +}
28668 +static struct notifier_block elan4_dump_notifier = 
28669 +{
28670 +    notifier_call:     elan4_dump_event,
28671 +    priority:          0,
28672 +};
28673 +
28674 +#endif
28675 +
28676 +static int
28677 +elan4_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
28678 +{
28679 +    if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
28680 +       elan4_shutdown_devices (0);
28681 +
28682 +    return (NOTIFY_DONE);
28683 +}
28684 +
28685 +static struct notifier_block elan4_reboot_notifier = 
28686 +{
28687 +    notifier_call:     elan4_reboot_event,
28688 +    priority:          0,
28689 +};
28690 +
28691 +#if !defined(NO_PANIC_NOTIFIER)
28692 +static int
28693 +elan4_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
28694 +{
28695 +    elan4_shutdown_devices (1);
28696 +
28697 +    return (NOTIFY_DONE);
28698 +}
28699 +
28700 +static struct notifier_block elan4_panic_notifier = 
28701 +{
28702 +    notifier_call:     elan4_panic_event,
28703 +    priority:          0,
28704 +};
28705 +#endif 
28706 +
28707 +static int __init
28708 +elan4_init (void)
28709 +{
28710 +    int             err;
28711 +    struct pci_dev *pdev;
28712 +    int                    count;
28713 +#if defined(__ia64)
28714 +    int             seenRevA = 0;
28715 +#endif
28716 +    
28717 +    if ((err = register_chrdev (ELAN4_MAJOR, ELAN4_NAME, &elan4_fops)) < 0)
28718 +       return (err);
28719 +
28720 +#if defined(CONFIG_DEVFS_FS)
28721 +    devfs_handle = devfs_mk_dir (NULL, "elan4", NULL);
28722 +#endif
28723 +
28724 +    intcookie_init();
28725 +    elan4_debug_init();
28726 +    elan4_procfs_init();
28727 +    
28728 +#ifdef CONFIG_MPSAS
28729 +    sas_init();
28730 +#endif
28731 +
28732 +    if (sdram_cfg_lo != 0 && sdram_cfg_hi != 0)
28733 +       sdram_cfg = (((unsigned long long) sdram_cfg_hi) << 32) | ((unsigned long long) sdram_cfg_lo);
28734 +
28735 +    for (count = 0, pdev = NULL; (pdev = pci_find_device(PCI_VENDOR_ID_QUADRICS, PCI_DEVICE_ID_ELAN4, pdev)) != NULL ; count++)
28736 +    {
28737 +#if defined(__ia64)
28738 +       unsigned char revid;
28739 +       
28740 +       pci_read_config_byte (pdev, PCI_REVISION_ID, &revid);
28741 +
28742 +       if (revid == PCI_REVISION_ID_ELAN4_REVA && seenRevA++ != 0 && pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL))
28743 +       {
28744 +           printk ("elan: only a single elan4a supported on rx2600\n");
28745 +           continue;
28746 +       }
28747 +#endif
28748 +
28749 +       if (count < ELAN4_MAX_CONTROLLER)
28750 +           elan4_attach_device (count, pdev);
28751 +    }
28752 +
28753 +    if (count >= ELAN4_MAX_CONTROLLER)
28754 +       printk ("elan: found %d elan4 devices - only support %d\n", count, ELAN4_MAX_CONTROLLER);
28755 +
28756 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
28757 +    lock_kernel();
28758 +    {
28759 +       extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
28760 +       register int i;
28761 +       for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++)
28762 +           register_ioctl32_conversion (elan4_ioctl32_cmds[i], elan4_ioctl32);
28763 +    }
28764 +    unlock_kernel();
28765 +#endif
28766 +
28767 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
28768 +    register_dump_notifier (&elan4_dump_notifier);
28769 +#endif
28770 +    register_reboot_notifier (&elan4_reboot_notifier);
28771 +
28772 +#if !defined(NO_PANIC_NOTIFIER)
28773 +    notifier_chain_register (&panic_notifier_list, &elan4_panic_notifier);
28774 +#endif
28775 +
28776 +    return (0);
28777 +}
28778 +
28779 +#ifdef MODULE
28780 +static void __exit
28781 +elan4_exit (void)
28782 +{
28783 +    int i;
28784 +
28785 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
28786 +    lock_kernel();
28787 +    {
28788 +       extern void unregister_ioctl32_conversion(unsigned int cmd);
28789 +
28790 +       for (i = 0; i < sizeof (elan4_ioctl32_cmds)/sizeof(elan4_ioctl32_cmds[0]); i++)
28791 +           unregister_ioctl32_conversion (elan4_ioctl32_cmds[i]);
28792 +    }
28793 +    unlock_kernel();
28794 +#endif
28795 +
28796 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
28797 +    unregister_dump_notifier (&elan4_dump_notifier);
28798 +#endif
28799 +    unregister_reboot_notifier (&elan4_reboot_notifier);
28800 +
28801 +#if !defined(NO_PANIC_NOTIFIER)
28802 +    notifier_chain_unregister (&panic_notifier_list, &elan4_panic_notifier);
28803 +#endif
28804 +
28805 +    for (i = 0; i < ELAN4_MAX_CONTROLLER; i++)
28806 +       if (elan4_devices[i] != NULL)
28807 +           elan4_detach_device (elan4_devices[i]);
28808 +    
28809 +    elan4_procfs_fini();
28810 +    elan4_debug_fini();
28811 +    intcookie_fini();
28812 +
28813 +#if defined(CONFIG_DEVFS_FS)
28814 +    devfs_unregister (devfs_handle);
28815 +#endif
28816 +
28817 +    unregister_chrdev(ELAN4_MAJOR, ELAN4_NAME);
28818 +}
28819 +
28820 +module_init (elan4_init);
28821 +module_exit (elan4_exit);
28822 +
28823 +#else
28824 +__initcall (elan4_init);
28825 +#endif
28826 +
28827 +/*
28828 + * Minor numbers encoded as :
28829 + *   [5:0]     device number
28830 + *   [15:6]    function number
28831 + */
28832 +#define ELAN4_DEVICE_MASK      0x3F
28833 +#define ELAN4_DEVICE(inode)    (MINOR((inode)->i_rdev) & ELAN4_DEVICE_MASK)
28834 +
28835 +#define ELAN4_MINOR_CONTROL    0
28836 +#define ELAN4_MINOR_MEM                1
28837 +#define ELAN4_MINOR_USER       2
28838 +
28839 +#define ELAN4_MINOR_SHIFT      6
28840 +#define ELAN4_MINOR(inode)     (MINOR((inode)->i_rdev) >> ELAN4_MINOR_SHIFT)
28841 +
28842 +/*
28843 + * Called by init_module() for each card discovered on PCI.
28844 + */
28845 +static int
28846 +elan4_attach_device (int instance, struct pci_dev *pdev)
28847 +{
28848 +    ELAN4_DEV *dev;
28849 +    int res;
28850 +
28851 +    KMEM_ALLOC (dev, ELAN4_DEV *, sizeof (ELAN4_DEV), 1);
28852 +    if ((dev == NULL))
28853 +       return (-ENOMEM);
28854 +    memset (dev, 0, sizeof (ELAN4_DEV));
28855 +
28856 +    /* setup os dependent section of ELAN4_DEV */
28857 +    dev->dev_instance   = instance;
28858 +    dev->dev_osdep.pdev = pdev;
28859 +
28860 +#if !defined(IOPROC_PATCH_APPLIED)
28861 +    printk ("elan%d: pinning down pages as no ioproc patch\n", dev->dev_instance);
28862 +
28863 +    default_features |= ELAN4_FEATURE_NO_IOPROC | ELAN4_FEATURE_PIN_DOWN;
28864 +
28865 +    /* Also change this flag so as to make the /proc entry consistent */
28866 +    user_ioproc_enabled = 0;
28867 +#endif
28868 +
28869 +    /* initialise the devinfo */
28870 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_VENDOR_ID,   &dev->dev_devinfo.dev_vendor_id);
28871 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_DEVICE_ID,   &dev->dev_devinfo.dev_device_id);
28872 +    pci_read_config_byte (dev->dev_osdep.pdev, PCI_REVISION_ID, &dev->dev_devinfo.dev_revision_id);
28873 +
28874 +    dev->dev_devinfo.dev_rail                                        = instance;
28875 +    dev->dev_devinfo.dev_driver_version                              = ELAN4_DRIVER_VERSION;
28876 +    dev->dev_devinfo.dev_num_down_links_value                        = 0;
28877 +    dev->dev_devinfo.dev_params_mask                                 = (1 << ELAN4_PARAM_DRIVER_FEATURES);
28878 +    dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES]  = default_features;
28879 +
28880 +    dev->dev_position.pos_mode = ELAN_POS_UNKNOWN;
28881 +
28882 +    dev->dev_regs_phys  = bar2phys (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS));
28883 +    dev->dev_sdram_phys = bar2phys (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
28884 +
28885 +    /* initialise the data structures and map the device */
28886 +    if ((res = elan4_initialise_device (dev)) != 0)
28887 +    {
28888 +       kfree (dev);
28889 +       return res;
28890 +    }
28891 +
28892 +#if defined(CONFIG_DEVFS_FS)
28893 +    {
28894 +       char name[16];
28895 +       
28896 +       sprintf (name, "control%d", dev->dev_instance);
28897 +       dev->dev_osdep.devfs_control = devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
28898 +                                                     dev->dev_instance | (ELAN4_MINOR_CONTROL << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR | S_IWUSR, 
28899 +                                                     &elan4_fops, NULL);
28900 +       sprintf (name, "sdram%d", dev->dev_instance);
28901 +       dev->dev_osdep.devfs_sdram =  devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
28902 +                                                    dev->dev_instance | (ELAN4_MINOR_MEM << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH,
28903 +                                                    &elan4_fops, NULL);
28904 +       sprintf (name, "user%d", dev->dev_instance);
28905 +       dev->dev_osdep.devfs_user =  devfs_register(devfs_handle, name, DEVFS_FL_NONE, ELAN4_MAJOR,
28906 +                                                   dev->dev_instance | (ELAN4_MINOR_USER << ELAN4_MINOR_SHIFT), S_IFCHR | S_IRUSR|S_IWUSR | S_IRGRP|S_IWGRP | S_IROTH|S_IWOTH,
28907 +                                                   &elan4_fops, NULL);
28908 +    }
28909 +#endif
28910 +
28911 +    /* add the procfs entry */
28912 +    elan4_procfs_device_init (dev);
28913 +
28914 +    /* allow the device to be referenced now */
28915 +    elan4_devices[instance] = dev;
28916 +
28917 +    if ((disabled & (1 << instance)) == 0)
28918 +    {
28919 +       if (elan4_start_device (dev) != 0)
28920 +       {
28921 +           printk ("elan%d: auto-start of device failed\n", dev->dev_instance);
28922 +
28923 +           elan4_detach_device (dev);
28924 +           return (-ENXIO);
28925 +       }
28926 +       
28927 +       dev->dev_state = ELAN4_STATE_STARTED;
28928 +    }
28929 +
28930 +#if defined (__sparc)
28931 +    printk ("elan%d: at pci %s (irq = %s)\n", instance, pdev->slot_name, __irq_itoa(pdev->irq));
28932 +#else
28933 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
28934 +    printk ("elan%d: at pci %s (irq = %d)\n", instance, pdev->slot_name, pdev->irq);
28935 +#else
28936 +    printk ("elan%d: at pci %s (irq = %d)\n", instance, pci_name (pdev), pdev->irq);
28937 +#endif
28938 +#endif
28939 +
28940 +    return (0);
28941 +}
28942 +
28943 +/*
28944 + * Called by cleanup_module() for each board found on PCI.
28945 + */
28946 +static void
28947 +elan4_detach_device (ELAN4_DEV *dev)
28948 +{
28949 +    /* stop the chip and free of resources */
28950 +    if (dev->dev_state == ELAN4_STATE_STARTED)
28951 +       elan4_stop_device (dev);
28952 +    
28953 +    elan4_devices[dev->dev_instance] = NULL;
28954 +
28955 +#if defined(CONFIG_DEVFS_FS)
28956 +    devfs_unregister (dev->dev_osdep.devfs_control);
28957 +    devfs_unregister (dev->dev_osdep.devfs_sdram);
28958 +    devfs_unregister (dev->dev_osdep.devfs_user);
28959 +#endif
28960 +
28961 +    /* remove the procfs entry */
28962 +    elan4_procfs_device_fini (dev);
28963 +
28964 +    /* unmap the device and finalise the data structures */
28965 +    elan4_finalise_device (dev);
28966 +    
28967 +    KMEM_FREE (dev, sizeof(*dev));
28968 +}
28969 +
28970 +/*
28971 + * Maintain reference counts on the device
28972 + */
28973 +ELAN4_DEV *
28974 +elan4_reference_device (int instance, int state)
28975 +{
28976 +    ELAN4_DEV *dev = elan4_devices[instance];
28977 +
28978 +    if (dev == NULL)
28979 +       return (NULL);
28980 +
28981 +    kmutex_lock (&dev->dev_lock);
28982 +
28983 +    if ((dev->dev_state & state) == 0)
28984 +    {
28985 +       kmutex_unlock (&dev->dev_lock);
28986 +       return (NULL);
28987 +    }
28988 +
28989 +    dev->dev_references++;
28990 +    kmutex_unlock (&dev->dev_lock);
28991 +
28992 +#ifdef MODULE
28993 +    MOD_INC_USE_COUNT;
28994 +#endif
28995 +
28996 +#ifdef CONFIG_MPSAS
28997 +    sas_set_position(dev);
28998 +#endif
28999 +
29000 +    return (dev);
29001 +}
29002 +
29003 +void
29004 +elan4_dereference_device (ELAN4_DEV *dev)
29005 +{
29006 +    kmutex_lock (&dev->dev_lock);
29007 +    dev->dev_references--;
29008 +    kmutex_unlock (&dev->dev_lock);
29009 +
29010 +#ifdef MODULE
29011 +    MOD_DEC_USE_COUNT;
29012 +#endif
29013 +}
29014 +
29015 +static void
29016 +elan4_shutdown_devices(int panicing)
29017 +{
29018 +    ELAN4_DEV *dev;
29019 +    unsigned long flags;
29020 +    register int i;
29021 +
29022 +    local_irq_save (flags);
29023 +    for (i = 0; i < ELAN4_MAX_CONTROLLER; i++)
29024 +    {
29025 +       if ((dev = elan4_devices[i]) != NULL)
29026 +       {
29027 +           printk(KERN_INFO "elan%d: forcing link into reset\n", dev->dev_instance);
29028 +
29029 +           /* set the inputters to discard everything */
29030 +           if (! panicing) spin_lock (&dev->dev_haltop_lock);
29031 +
29032 +           if (dev->dev_discard_lowpri_count++ == 0)
29033 +               elan4_set_schedstatus (dev, 0);
29034 +           if (dev->dev_discard_highpri_count++ == 0)
29035 +               elan4_set_schedstatus (dev, 0);
29036 +
29037 +           if (! panicing) spin_unlock (&dev->dev_haltop_lock);
29038 +
29039 +           /* ideally we'd like to halt all the outputters too,
29040 +            * however this will prevent the kernel comms flushing
29041 +            * to work correctly .....
29042 +            */
29043 +       }
29044 +    }
29045 +    local_irq_restore (flags);
29046 +}
29047 +
29048 +/*
29049 + * /dev/elan4/controlX - control device
29050 + *
29051 + */
29052 +static int
29053 +control_open (struct inode *inode, struct file *file)
29054 +{
29055 +    ELAN4_DEV       *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STOPPED | ELAN4_STATE_STARTED);
29056 +    CONTROL_PRIVATE *pr;
29057 +    
29058 +    if (dev == NULL)
29059 +       return (-ENXIO);
29060 +    
29061 +    KMEM_ALLOC (pr, CONTROL_PRIVATE *, sizeof (CONTROL_PRIVATE), 1);
29062 +    if ((pr == NULL))
29063 +    {
29064 +       elan4_dereference_device (dev);
29065 +       
29066 +       return (-ENOMEM);
29067 +    }
29068 +
29069 +    PRINTF (DBG_USER, DBG_FILE, "control_open: dev=%p pr=%p\n", dev, pr);
29070 +
29071 +    pr->pr_dev           = dev;
29072 +    pr->pr_boundary_scan = 0;
29073 +
29074 +    file->private_data = (void *) pr;
29075 +
29076 +    return (0);
29077 +}
29078 +
29079 +static int
29080 +control_release (struct inode *inode, struct file *file)
29081 +{
29082 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
29083 +    ELAN4_DEV       *dev = pr->pr_dev;
29084 +
29085 +    PRINTF (DBG_DEVICE, DBG_FILE, "control_release: pr=%p\n", pr);
29086 +
29087 +    //if (pr->pr_boundary_scan)
29088 +    //    elan4_clear_boundary_scan (dev, pr);
29089 +
29090 +    elan4_dereference_device (dev);
29091 +
29092 +    KMEM_FREE (pr, sizeof(*pr));
29093 +
29094 +    return (0);
29095 +}
29096 +
29097 +static int
29098 +control_ioctl (struct inode *inode, struct file *file, 
29099 +                    unsigned int cmd, unsigned long arg)
29100 +{
29101 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
29102 +
29103 +    PRINTF (DBG_DEVICE, DBG_FILE, "control_ioctl: cmd=%x arg=%lx\n", cmd, arg);
29104 +
29105 +    switch (cmd)
29106 +    {
29107 +    case ELAN4IO_DEVINFO:
29108 +       if (copy_to_user ((void *) arg, &pr->pr_dev->dev_devinfo, sizeof (ELAN_DEVINFO)))
29109 +           return (-EFAULT);
29110 +       return (0);
29111 +
29112 +    case ELAN4IO_GET_POSITION:
29113 +    {
29114 +       ELAN_POSITION pos;
29115 +
29116 +       elan4_get_position (pr->pr_dev, &pos);
29117 +
29118 +       if (copy_to_user ((void *) arg, &pos, sizeof (ELAN_POSITION)))
29119 +           return (-EFAULT);
29120 +
29121 +       return (0);
29122 +    }
29123 +
29124 +    case ELAN4IO_SET_POSITION:
29125 +    {
29126 +       ELAN_POSITION pos;
29127 +
29128 +       if (copy_from_user (&pos, (void *) arg, sizeof (ELAN_POSITION)))
29129 +           return (-EFAULT);
29130 +       
29131 +       return (elan4_set_position (pr->pr_dev, &pos));
29132 +    }
29133 +
29134 +    case ELAN4IO_OLD_GET_PARAMS:
29135 +    {
29136 +       ELAN_PARAMS params;
29137 +       unsigned short mask;
29138 +
29139 +       elan4_get_params (pr->pr_dev, &params, &mask);
29140 +
29141 +       if (copy_to_user ((void *) arg, &params, sizeof (ELAN_PARAMS)))
29142 +           return (-EFAULT);
29143 +
29144 +       return (0);
29145 +    }
29146 +
29147 +    case ELAN4IO_OLD_SET_PARAMS:
29148 +    {
29149 +       ELAN_PARAMS params;
29150 +
29151 +       if (copy_from_user (&params, (void *) arg, sizeof (ELAN_PARAMS)))
29152 +           return (-EFAULT);
29153 +       
29154 +       elan4_set_params (pr->pr_dev, &params, 3);
29155 +       
29156 +       return (0);
29157 +    }
29158 +
29159 +    case ELAN4IO_SET_PARAMS:
29160 +    {
29161 +       ELAN4IO_PARAMS_STRUCT args;
29162 +
29163 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PARAMS_STRUCT)))
29164 +           return (-EFAULT);
29165 +       
29166 +       elan4_set_params (pr->pr_dev, &args.p_params, args.p_mask);
29167 +       
29168 +       return (0);
29169 +    }
29170 +
29171 +    case ELAN4IO_GET_PARAMS:
29172 +    {
29173 +       ELAN4IO_PARAMS_STRUCT args;
29174 +
29175 +       elan4_get_params (pr->pr_dev, &args.p_params, &args.p_mask);
29176 +
29177 +       if (copy_to_user ((void *) arg, &args, sizeof (ELAN_PARAMS)))
29178 +           return (-EFAULT);
29179 +
29180 +       return (0);
29181 +    }
29182 +    }
29183 +
29184 +    return (-EINVAL);
29185 +}
29186 +
29187 +static int
29188 +control_mmap (struct file *file, struct vm_area_struct *vma)
29189 +{
29190 +    CONTROL_PRIVATE *pr  = (CONTROL_PRIVATE *) file->private_data;
29191 +    unsigned        bar = OFF_TO_BAR (vma->vm_pgoff << PAGE_SHIFT);
29192 +    unsigned long    off = OFF_TO_OFFSET (vma->vm_pgoff << PAGE_SHIFT);
29193 +    long            len = vma->vm_end - vma->vm_start;
29194 +
29195 +    PRINTF (DBG_USER, DBG_FILE, "control_mmap: pr=%p bar=%x off=%x\n", pr, bar, off);
29196 +
29197 +    /* check bar number and translate the standard psuedo bars */
29198 +    switch (bar)
29199 +    {
29200 +    case ELAN4_BAR_SDRAM:
29201 +    case ELAN4_BAR_REGISTERS:
29202 +       break;
29203 +
29204 +    default:
29205 +       return (-EINVAL);
29206 +    }
29207 +
29208 +    if (off < 0 || (off + len) > pci_resource_len (pr->pr_dev->dev_osdep.pdev, bar))
29209 +       return (-EINVAL);
29210 +
29211 +    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
29212 +
29213 +    if (__io_remap_page_range (vma->vm_start, pci_resource_start (pr->pr_dev->dev_osdep.pdev, bar) + off, len, vma->vm_page_prot))
29214 +       return (-EAGAIN);
29215 +
29216 +    return (0);
29217 +}
29218 +
29219 +/*
29220 + * /dev/elan4/sdramX - sdram access device
29221 + */
29222 +static void 
29223 +mem_freepage (MEM_PRIVATE *pr, MEM_PAGE *pg)
29224 +{
29225 +    PRINTF (DBG_USER, DBG_MEM, "mem_freepage: pr=%p pgoff=%lx pg=%p ref=%d\n", pr, pg->pg_pgoff, pg, pg->pg_ref);
29226 +
29227 +    elan4_sdram_free (pr->pr_dev, pg->pg_addr, SDRAM_PAGE_SIZE);
29228 +
29229 +    KMEM_FREE(pg, sizeof(*pg));
29230 +}
29231 +
29232 +static MEM_PAGE *
29233 +mem_getpage (MEM_PRIVATE *pr, unsigned long pgoff)
29234 +{
29235 +    int       hashval = MEM_HASH (pgoff);
29236 +    MEM_PAGE *npg = NULL;
29237 +    MEM_PAGE *pg;
29238 +
29239 +    ASSERT ((pgoff & SDRAM_PGOFF_OFFSET) == 0);
29240 +
29241 +    PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx\n", pr, pgoff);
29242 +    
29243 + again:
29244 +    spin_lock (&pr->pr_lock);
29245 +    for (pg = pr->pr_pages[hashval]; pg; pg = pg->pg_next)
29246 +       if (pg->pg_pgoff == pgoff)
29247 +           break;
29248 +    
29249 +    if (pg != NULL)
29250 +    {
29251 +       PRINTF (DBG_USER, DBG_MEM, "mem_getpage: pr=%p pgoff=%lx -> found %p addr=%x\n", pr, pgoff, pg, pg->pg_addr);
29252 +
29253 +       pg->pg_ref++;
29254 +       spin_unlock (&pr->pr_lock);
29255 +
29256 +       if (npg != NULL)                                        /* we'd raced and someone else had created */
29257 +           mem_freepage (pr, npg);                             /* this page - so free of our new one*/
29258 +       return (pg);
29259 +    }
29260 +    
29261 +    if (npg != NULL)                                           /* didn't find the page, so inset the */
29262 +    {                                                          /* new one we've just created */
29263 +       npg->pg_next = pr->pr_pages[hashval];
29264 +       pr->pr_pages[hashval] = npg;
29265 +       
29266 +       spin_unlock (&pr->pr_lock);
29267 +       return (npg);
29268 +    }
29269 +    
29270 +    spin_unlock (&pr->pr_lock);                                        /* drop spinlock before creating a new page */
29271 +    
29272 +    KMEM_ALLOC (npg, MEM_PAGE *, sizeof (MEM_PAGE), 1);
29273 +    if ((npg == NULL))
29274 +       return (NULL);
29275 +    
29276 +    if ((npg->pg_addr = elan4_sdram_alloc (pr->pr_dev, SDRAM_PAGE_SIZE)) == 0)
29277 +    {
29278 +       KMEM_FREE(npg, sizeof(*npg));
29279 +       return (NULL);
29280 +    }
29281 +
29282 +#ifndef CONFIG_MPSAS
29283 +    /* zero the page before returning it to the user */
29284 +    elan4_sdram_zeroq_sdram (pr->pr_dev, npg->pg_addr, SDRAM_PAGE_SIZE);
29285 +#endif
29286 +    
29287 +    npg->pg_pgoff = pgoff;
29288 +    npg->pg_ref   = 1;
29289 +    
29290 +    /* created a new page - so have to rescan before inserting it */
29291 +    goto again;
29292 +}
29293 +
29294 +static void
29295 +mem_droppage (MEM_PRIVATE *pr, unsigned long pgoff, int dontfree)
29296 +{
29297 +    MEM_PAGE **ppg;
29298 +    MEM_PAGE  *pg;
29299 +
29300 +    spin_lock (&pr->pr_lock);
29301 +    for (ppg = &pr->pr_pages[MEM_HASH(pgoff)]; *ppg; ppg = &(*ppg)->pg_next)
29302 +       if ((*ppg)->pg_pgoff == pgoff)
29303 +           break;
29304 +
29305 +    pg = *ppg;
29306 +
29307 +    ASSERT (*ppg != NULL);
29308 +
29309 +    PRINTF (DBG_USER, DBG_MEM, "mem_droppage: pr=%p pgoff=%lx pg=%p ref=%d dontfree=%d\n", pr, pgoff, (*ppg), (*ppg)->pg_ref, dontfree);
29310 +
29311 +    if (--pg->pg_ref == 0 && !dontfree)
29312 +    {
29313 +       *ppg = pg->pg_next;
29314 +
29315 +       mem_freepage (pr, pg);
29316 +    }
29317 +
29318 +    spin_unlock (&pr->pr_lock);
29319 +}
29320 +
29321 +static int
29322 +mem_open (struct inode *inode, struct file *file)
29323 +{
29324 +    ELAN4_DEV   *dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED);
29325 +    MEM_PRIVATE *pr;
29326 +    register int i;
29327 +
29328 +    if (dev == NULL)
29329 +       return (-ENXIO);
29330 +
29331 +    KMEM_ALLOC (pr, MEM_PRIVATE *, sizeof (MEM_PRIVATE), 1);
29332 +    if ((pr == NULL))
29333 +    {
29334 +       elan4_dereference_device (dev);
29335 +       return (-ENOMEM);
29336 +    }
29337 +
29338 +    spin_lock_init (&pr->pr_lock);
29339 +    pr->pr_dev = dev;
29340 +    for (i = 0; i < MEM_HASH_SIZE; i++)
29341 +       pr->pr_pages[i] = NULL;
29342 +
29343 +    file->private_data = (void *) pr;
29344 +    
29345 +    return (0);
29346 +}
29347 +
29348 +static int
29349 +mem_release (struct inode *node, struct file *file)
29350 +{
29351 +    MEM_PRIVATE *pr = (MEM_PRIVATE *) file->private_data;
29352 +    MEM_PAGE    *pg, *next;
29353 +    int          i;
29354 +
29355 +    /* free off any pages that we'd allocated */
29356 +    spin_lock (&pr->pr_lock);
29357 +    for (i = 0; i < MEM_HASH_SIZE; i++)
29358 +    {
29359 +       for (pg = pr->pr_pages[i]; pg; pg = next)
29360 +       {
29361 +           next = pg->pg_next;
29362 +           mem_freepage (pr, pg);
29363 +       }
29364 +    }
29365 +    spin_unlock (&pr->pr_lock);
29366 +
29367 +    elan4_dereference_device (pr->pr_dev);
29368 +    KMEM_FREE(pr, sizeof(*pr));
29369 +
29370 +    return (0);
29371 +}
29372 +
29373 +static int
29374 +mem_ioctl (struct inode *inode, struct file *file, 
29375 +                 unsigned int cmd, unsigned long arg)
29376 +{
29377 +    return (-EINVAL);
29378 +}
29379 +
29380 +static void 
29381 +mem_vma_open (struct vm_area_struct *vma)
29382 +{
29383 +    MEM_PRIVATE   *pr = (MEM_PRIVATE *) vma->vm_private_data;
29384 +    unsigned long addr;
29385 +    unsigned long pgoff;
29386 +
29387 +    PRINTF (DBG_USER, DBG_MEM, "mem_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
29388 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
29389 +    
29390 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
29391 +       mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK);
29392 +}
29393 +
29394 +static void
29395 +mem_vma_close (struct vm_area_struct *vma)
29396 +{
29397 +    MEM_PRIVATE  *pr  = (MEM_PRIVATE *) vma->vm_private_data;
29398 +    unsigned long addr;
29399 +    unsigned long pgoff;
29400 +
29401 +    PRINTF (DBG_USER, DBG_MEM, "mem_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
29402 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
29403 +
29404 +    /* NOTE: the call to close may not have the same vm_start/vm_end values as 
29405 +     *       were passed into mmap()/open() - since if an partial unmap had occurred
29406 +     *       then the vma could have been shrunk or even split.
29407 +     *
29408 +     *       if a the vma is split then an vma_open() will be called for the top
29409 +     *       portion - thus causing the reference counts to become incorrect.
29410 +     *
29411 +     * We drop the reference to any pages we're notified about - so they get freed
29412 +     * earlier than when the device is finally released.
29413 +     */
29414 +    for (pgoff = vma->vm_pgoff, addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
29415 +       mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
29416 +}
29417 +
29418 +struct vm_operations_struct mem_vm_ops = {
29419 +    open:              mem_vma_open,
29420 +    close:             mem_vma_close,
29421 +};
29422 +
29423 +static int
29424 +mem_mmap (struct file *file, struct vm_area_struct *vma)
29425 +{
29426 +    MEM_PRIVATE  *pr = (MEM_PRIVATE *) file->private_data;
29427 +    MEM_PAGE     *pg;
29428 +    unsigned long addr;
29429 +    unsigned long pgoff;
29430 +
29431 +    PRINTF (DBG_USER, DBG_MEM, "mem_mmap: vma=%p start=%lx end=%lx pgoff=%lx file=%p\n",
29432 +           vma, vma->vm_start, vma->vm_end, vma->vm_pgoff, file);
29433 +
29434 +    /* Don't allow these pages to be swapped out of dumped */
29435 +    vma->vm_flags |= (VM_RESERVED | VM_IO);
29436 +
29437 +    vma->vm_ops          = &mem_vm_ops;
29438 +    vma->vm_file         = file;
29439 +    vma->vm_private_data = (void *) pr;
29440 +
29441 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
29442 +    {
29443 +       if ((pg = mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK)) == NULL)
29444 +           goto failed;
29445 +
29446 +       PRINTF (DBG_USER, DBG_MEM, "mem_mmap: addr %lx -> pg=%p sdram=%x+%x bar=%lx\n",
29447 +               addr, pg, pg->pg_addr, (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE,
29448 +               pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
29449 +
29450 +       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
29451 +
29452 +       if (! (pr->pr_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE))
29453 +           vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
29454 +
29455 +       if (__io_remap_page_range (addr, pci_resource_start (pr->pr_dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) +
29456 +                                pg->pg_addr + (pgoff & SDRAM_PGOFF_OFFSET) * PAGE_SIZE, PAGE_SIZE, vma->vm_page_prot))
29457 +       {
29458 +           mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0); /* drop our reference to this page */
29459 +           goto failed;
29460 +       }
29461 +
29462 +#if defined(conditional_schedule)
29463 +       conditional_schedule();
29464 +#endif
29465 +    }
29466 +
29467 +    return (0);
29468 +
29469 + failed:
29470 +    /* free of any pages we've already allocated/referenced */
29471 +    while (pgoff-- > vma->vm_pgoff)
29472 +       mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
29473 +
29474 +    return (-ENOMEM);
29475 +}
29476 +
29477 +int
29478 +mem_pteload (struct vm_area_struct *vma, unsigned long maddr, USER_CTXT *uctx, E4_Addr eaddr, int perm)
29479 +{
29480 +    MEM_PRIVATE  *pr    = (MEM_PRIVATE *) vma->vm_private_data;
29481 +    ELAN4_DEV    *dev   = uctx->uctx_ctxt.ctxt_dev;
29482 +    unsigned long pgoff = vma->vm_pgoff + ((maddr - vma->vm_start) >> PAGE_SHIFT);
29483 +    sdramaddr_t   pgaddr;
29484 +    MEM_PAGE      *pg;
29485 +    register int  i, res;
29486 +
29487 +    if (pr->pr_dev != dev)
29488 +       return -EINVAL;
29489 +
29490 +    if ((pg = mem_getpage (pr, pgoff & SDRAM_PGOFF_MASK)) == NULL)
29491 +       return -ENOMEM;
29492 +    
29493 +    pgaddr = pg->pg_addr + ((pgoff & SDRAM_PGOFF_OFFSET) << PAGE_SHIFT);
29494 +
29495 +    if (! elan4mmu_sdram_aliascheck (&uctx->uctx_ctxt, eaddr, pgaddr))
29496 +       return -EINVAL;
29497 +           
29498 +    for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
29499 +    {
29500 +       E4_uint64 newpte = ((pgaddr + i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm);
29501 +       
29502 +       if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_SDRAM, newpte)) < 0)
29503 +           return res;
29504 +    }
29505 +    
29506 +    mem_droppage (pr, pgoff & SDRAM_PGOFF_MASK, 0);
29507 +
29508 +    return 0;
29509 +}
29510 +
29511 +
29512 +/*
29513 + * /dev/elan4/userX - control device
29514 + *
29515 + */
29516 +static inline void
29517 +user_private_free (USER_PRIVATE *pr)
29518 +{
29519 +    ELAN4_DEV *dev = pr->pr_uctx->uctx_ctxt.ctxt_dev;
29520 +
29521 +    ASSERT (atomic_read (&pr->pr_ref) == 0);
29522 +
29523 +    user_free (pr->pr_uctx);
29524 +    KMEM_FREE(pr, sizeof(*pr));
29525 +
29526 +    elan4_dereference_device (dev);
29527 +}
29528 +
29529 +#if defined(IOPROC_PATCH_APPLIED)
29530 +static void
29531 +user_ioproc_release (void *arg, struct mm_struct *mm)
29532 +{
29533 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29534 +
29535 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_release: ref=%d\n", atomic_read (&pr->pr_ref));
29536 +
29537 +    elan4mmu_invalidate_ctxt (&pr->pr_uctx->uctx_ctxt);
29538 +
29539 +    pr->pr_mm = NULL;
29540 +
29541 +    if (atomic_dec_and_test (&pr->pr_ref))
29542 +       user_private_free (pr);
29543 +}
29544 +
29545 +/*
29546 + * On 2.4 kernels we get passed a mm_struct, whereas on 2.6 kernels
29547 + * we get the vma which is more usefull
29548 + */
29549 +#if defined(IOPROC_MM_STRUCT_ARG)
29550 +static void
29551 +user_ioproc_sync_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
29552 +{
29553 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29554 +
29555 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
29556 +
29557 +    /* XXXX: this is intended to sync the modified bit from our page tables,
29558 +     *       into the main cpu's modified bits - however since we do not
29559 +     *       syncronize our modified bit on a ioproc_invalidate_page() call,
29560 +     *       then it could get lost if we modify the page after the last
29561 +     *       modification and writepage has occurred. Hence we invalidate
29562 +     *       all translations and allow it to refault.
29563 +     */
29564 +
29565 +    user_unload_main (pr->pr_uctx, start, end - start);
29566 +}
29567 +
29568 +static void
29569 +user_ioproc_invalidate_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
29570 +{
29571 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29572 +
29573 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
29574 +
29575 +    user_unload_main (pr->pr_uctx, start, end - start);
29576 +}
29577 +
29578 +static void
29579 +user_ioproc_update_range (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end)
29580 +{
29581 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29582 +    struct vm_area_struct *vma;
29583 +
29584 +    if (pr->pr_uctx->uctx_ctxt.ctxt_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_IOPROC_UPDATE)
29585 +       return;
29586 +
29587 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_range: start=%lx end=%lx\n", start, end);
29588 +
29589 +    vma = find_vma_intersection (mm, start, end);
29590 +
29591 +    user_update_main (pr->pr_uctx, mm, vma, start, end - start);
29592 +}
29593 +
29594 +static void
29595 +user_ioproc_change_protection (void *arg, struct mm_struct *mm, unsigned long start, unsigned long end, pgprot_t newprot)
29596 +{
29597 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29598 +
29599 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
29600 +
29601 +    user_unload_main (pr->pr_uctx, start, end - start);
29602 +}
29603 +
29604 +
29605 +#else
29606 +
29607 +static void
29608 +user_ioproc_sync_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
29609 +{
29610 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29611 +
29612 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_range: start=%lx end=%lx\n", start, end);
29613 +
29614 +    /* XXXX: this is intended to sync the modified bit from our page tables,
29615 +     *       into the main cpu's modified bits - however since we do not
29616 +     *       syncronize our modified bit on a ioproc_invalidate_page() call,
29617 +     *       then it could get lost if we modify the page after the last
29618 +     *       modification and writepage has occurred. Hence we invalidate
29619 +     *       all translations and allow it to refault.
29620 +     */
29621 +
29622 +    user_unload_main (pr->pr_uctx, start, end - start);
29623 +}
29624 +
29625 +static void
29626 +user_ioproc_invalidate_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
29627 +{
29628 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29629 +
29630 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_range: start=%lx end=%lx\n", start, end);
29631 +
29632 +    user_unload_main (pr->pr_uctx, start, end - start);
29633 +}
29634 +
29635 +static void
29636 +user_ioproc_update_range (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end)
29637 +{
29638 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29639 +
29640 +    if (pr->pr_uctx->uctx_ctxt.ctxt_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_IOPROC_UPDATE)
29641 +       return;
29642 +
29643 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_range: start=%lx end=%lx\n", start, end);
29644 +
29645 +    user_update_main (pr->pr_uctx, vma->vm_mm, vma, start, end - start);
29646 +}
29647 +
29648 +static void
29649 +user_ioproc_change_protection (void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
29650 +{
29651 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29652 +
29653 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_change_protection: start=%lx end=%lx\n", start, end);
29654 +
29655 +    user_unload_main (pr->pr_uctx, start, end - start);
29656 +}
29657 +#endif /* defined(IOPROC_NO_VMA_RANGE) */
29658 +
29659 +static void
29660 +user_ioproc_sync_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
29661 +{
29662 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29663 +
29664 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_sync_page: addr=%lx\n", addr);
29665 +
29666 +    user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE);
29667 +}
29668 +
29669 +static void
29670 +user_ioproc_invalidate_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
29671 +{
29672 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29673 +
29674 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_invalidate_page: addr=%lx\n", addr);
29675 +
29676 +    user_unload_main (pr->pr_uctx, addr & PAGE_MASK, PAGE_SIZE);
29677 +}
29678 +
29679 +static void
29680 +user_ioproc_update_page (void *arg, struct vm_area_struct *vma, unsigned long addr)
29681 +{
29682 +    USER_PRIVATE *pr = (USER_PRIVATE *) arg;
29683 +
29684 +    if (pr->pr_uctx->uctx_ctxt.ctxt_dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_IOPROC_UPDATE)
29685 +       return;
29686 +
29687 +    PRINTF (pr->pr_uctx, DBG_IOPROC, "user_ioproc_update_page: addr=%lx\n", addr);
29688 +
29689 +    user_update_main (pr->pr_uctx, vma->vm_mm, vma, addr & PAGE_MASK, PAGE_SIZE);
29690 +}
29691 +#endif /* defined(IOPROC_PATCH_APPLIED) */
29692 +
29693 +static int
29694 +user_open (struct inode *inode, struct file *file)
29695 +{
29696 +    ELAN4_DEV    *dev;
29697 +    USER_PRIVATE *pr;
29698 +    USER_CTXT    *uctx;
29699 +    
29700 +    PRINTF (DBG_USER, DBG_FILE, "user_open: mm=%p users=%d count=%d\n", current->mm,
29701 +           atomic_read (&current->mm->mm_users), atomic_read (&current->mm->mm_count));
29702 +
29703 +    if ((dev = elan4_reference_device (ELAN4_DEVICE(inode), ELAN4_STATE_STARTED)) == NULL)
29704 +       return (-ENXIO);
29705 +    
29706 +    KMEM_ALLOC (pr, USER_PRIVATE *, sizeof (USER_PRIVATE), 1);
29707 +    if ((pr == NULL))
29708 +    {
29709 +       elan4_dereference_device (dev);
29710 +       return (-ENOMEM);
29711 +    }
29712 +
29713 +    uctx = user_alloc (dev);
29714 +
29715 +    if (IS_ERR(uctx))
29716 +    {
29717 +       elan4_dereference_device (dev);
29718 +       KMEM_FREE(pr, sizeof(*pr));
29719 +
29720 +       return PTR_ERR(uctx);
29721 +    }
29722 +
29723 +    /* initialise refcnt to 1 - one for "file" */
29724 +    atomic_set (&pr->pr_ref, 1);
29725 +
29726 +    pr->pr_uctx = uctx;
29727 +    pr->pr_mm   = current->mm;
29728 +
29729 +#if defined(IOPROC_PATCH_APPLIED)
29730 +    if (! (uctx->uctx_ctxt.ctxt_features & ELAN4_FEATURE_NO_IOPROC))
29731 +    {
29732 +       /* register a ioproc callback to notify us of translation changes */
29733 +       pr->pr_ioproc.arg               = (void *) pr;
29734 +       pr->pr_ioproc.release           = user_ioproc_release;
29735 +       pr->pr_ioproc.sync_range        = user_ioproc_sync_range;
29736 +       pr->pr_ioproc.invalidate_range  = user_ioproc_invalidate_range;
29737 +       pr->pr_ioproc.update_range      = user_ioproc_update_range;
29738 +       pr->pr_ioproc.change_protection = user_ioproc_change_protection;
29739 +       pr->pr_ioproc.sync_page         = user_ioproc_sync_page;
29740 +       pr->pr_ioproc.invalidate_page   = user_ioproc_invalidate_page;
29741 +       pr->pr_ioproc.update_page       = user_ioproc_update_page;
29742 +       
29743 +       /* add an extra reference for the ioproc ops */
29744 +       atomic_inc (&pr->pr_ref);
29745 +       
29746 +       spin_lock (&current->mm->page_table_lock);
29747 +       ioproc_register_ops (current->mm, &pr->pr_ioproc);
29748 +       spin_unlock (&current->mm->page_table_lock);
29749 +    }
29750 +#endif
29751 +
29752 +    file->private_data = (void *) pr;
29753 +
29754 +    return (0);
29755 +}
29756 +
29757 +static int
29758 +user_release (struct inode *inode, struct file *file)
29759 +{
29760 +    USER_PRIVATE *pr = (USER_PRIVATE *) file->private_data;
29761 +
29762 +    PRINTF (pr->pr_uctx, DBG_FILE, "user_release: ref=%d\n", atomic_read (&pr->pr_ref));
29763 +
29764 +    if (atomic_dec_and_test (&pr->pr_ref))
29765 +       user_private_free (pr);
29766 +
29767 +    return (0);
29768 +}
29769 +
29770 +static int
29771 +user_ioctl (struct inode *inode, struct file *file, 
29772 +           unsigned int cmd, unsigned long arg)
29773 +{
29774 +    USER_PRIVATE *pr   = (USER_PRIVATE *) file->private_data;
29775 +    USER_CTXT    *uctx = pr->pr_uctx;
29776 +    int           res  = 0;
29777 +
29778 +    PRINTF (uctx, DBG_FILE, "user_ioctl: cmd=%x arg=%lx\n", cmd, arg);
29779 +
29780 +    if (current->mm != pr->pr_mm)
29781 +       return (-EINVAL);
29782 +    
29783 +    switch (cmd)
29784 +    {
29785 +    case ELAN4IO_DEVINFO:
29786 +    {
29787 +       ELAN_DEVINFO devinfo = uctx->uctx_ctxt.ctxt_dev->dev_devinfo;
29788 +
29789 +       if ((devinfo.dev_params_mask & (1 << ELAN4_PARAM_DRIVER_FEATURES)) != 0)
29790 +           devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] = uctx->uctx_ctxt.ctxt_features;
29791 +
29792 +       if (copy_to_user ((void *) arg, &devinfo, sizeof (ELAN_DEVINFO)))
29793 +           return (-EFAULT);
29794 +       return (0);
29795 +    }
29796 +
29797 +    case ELAN4IO_POSITION:
29798 +    {
29799 +       ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
29800 +
29801 +       if (copy_to_user ((void *) arg, &dev->dev_position, sizeof (ELAN_POSITION)))
29802 +           return (-EFAULT);
29803 +       return (0);
29804 +    }
29805 +
29806 +    case ELAN4IO_FREE:
29807 +#if defined(IOPROC_PATCH_APPLIED)
29808 +       if (! (uctx->uctx_ctxt.ctxt_features & ELAN4_FEATURE_NO_IOPROC))
29809 +       {
29810 +           spin_lock (&current->mm->page_table_lock);
29811 +           if (pr->pr_mm != current->mm)
29812 +               spin_unlock (&current->mm->page_table_lock);
29813 +           else
29814 +           {
29815 +               ioproc_unregister_ops (current->mm, &pr->pr_ioproc);
29816 +               spin_unlock (&current->mm->page_table_lock);
29817 +               
29818 +               user_ioproc_release (pr, current->mm);
29819 +           }
29820 +       }
29821 +#endif
29822 +       return (0);
29823 +
29824 +    case ELAN4IO_ATTACH:
29825 +    {
29826 +       ELAN_CAPABILITY *cap;
29827 +
29828 +       KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
29829 +       if ((cap == NULL))
29830 +           return (-ENOMEM);
29831 +
29832 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
29833 +           res = -EFAULT;
29834 +       else if ((res = user_attach (uctx, cap)) == 0 && 
29835 +                copy_to_user ((void *) arg, cap, sizeof (ELAN_CAPABILITY)))
29836 +       {
29837 +           user_detach (uctx, cap);
29838 +           res = -EFAULT;
29839 +       }
29840 +
29841 +       KMEM_FREE(cap, sizeof(*cap));
29842 +       return (res);
29843 +    }
29844 +
29845 +    case ELAN4IO_DETACH:
29846 +    {
29847 +       ELAN_CAPABILITY *cap;
29848 +
29849 +       KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
29850 +       if ((cap == NULL))
29851 +           return (-ENOMEM);
29852 +
29853 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
29854 +           res = -EFAULT;
29855 +       else
29856 +           user_detach (uctx, cap);
29857 +
29858 +       KMEM_FREE(cap, sizeof(*cap));
29859 +       return (res);
29860 +    }
29861 +
29862 +    case ELAN4IO_BLOCK_INPUTTER:
29863 +       user_block_inputter (uctx, arg);
29864 +       return (0);
29865 +
29866 +    case ELAN4IO_ADD_P2PVP:
29867 +    {
29868 +       ELAN4IO_ADD_P2PVP_STRUCT *args;
29869 +       
29870 +       KMEM_ALLOC (args, ELAN4IO_ADD_P2PVP_STRUCT *, sizeof (ELAN4IO_ADD_P2PVP_STRUCT), 1);
29871 +       if ((args == NULL))
29872 +           return (-ENOMEM);
29873 +
29874 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_ADD_P2PVP_STRUCT)))
29875 +           res = -EFAULT;
29876 +       else 
29877 +           res = user_add_p2pvp (uctx, args->vp_process, &args->vp_capability);
29878 +       
29879 +       KMEM_FREE(args, sizeof(*args));
29880 +       return (res);
29881 +    }
29882 +
29883 +    case ELAN4IO_ADD_BCASTVP:
29884 +    {
29885 +       ELAN4IO_ADD_BCASTVP_STRUCT args;
29886 +
29887 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ADD_BCASTVP_STRUCT)))
29888 +           return (-EFAULT);
29889 +
29890 +       return (user_add_bcastvp (uctx, args.vp_process, args.vp_lowvp, args.vp_highvp));
29891 +    }
29892 +
29893 +    case ELAN4IO_REMOVEVP:
29894 +       return (user_removevp (uctx, arg));
29895 +
29896 +    case ELAN4IO_SET_ROUTE:
29897 +    {
29898 +       ELAN4IO_ROUTE_STRUCT args;
29899 +       
29900 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29901 +           return (-EFAULT);
29902 +
29903 +       return (user_set_route (uctx, args.rt_process, &args.rt_route));
29904 +    }
29905 +
29906 +    case ELAN4IO_RESET_ROUTE:
29907 +    {
29908 +       ELAN4IO_ROUTE_STRUCT args;
29909 +       
29910 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29911 +           return (-EFAULT);
29912 +
29913 +       return (user_reset_route (uctx, args.rt_process));
29914 +    }
29915 +
29916 +    case ELAN4IO_GET_ROUTE:
29917 +    {
29918 +       ELAN4IO_ROUTE_STRUCT args;
29919 +       
29920 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29921 +           return (-EFAULT);
29922 +
29923 +       if ((res = user_get_route (uctx, args.rt_process, &args.rt_route)) == 0 &&
29924 +           copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT)))
29925 +           res = -EFAULT;
29926 +
29927 +       return (res);
29928 +    }
29929 +
29930 +    case ELAN4IO_CHECK_ROUTE:
29931 +    {
29932 +       ELAN4IO_ROUTE_STRUCT args;
29933 +       
29934 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ROUTE_STRUCT)))
29935 +           return (-EFAULT);
29936 +
29937 +       if ((res = user_check_route (uctx, args.rt_process, &args.rt_route, &args.rt_error)) == 0 &&
29938 +           copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ROUTE_STRUCT)))
29939 +           res = -EFAULT;
29940 +
29941 +       return (res);
29942 +    }
29943 +       
29944 +    case ELAN4IO_ALLOCCQ:
29945 +    {
29946 +       ELAN4IO_ALLOCCQ_STRUCT args;
29947 +       USER_CQ              *ucq;
29948 +
29949 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOCCQ_STRUCT)))
29950 +           return (-EFAULT);
29951 +       
29952 +       ucq = user_alloccq (uctx, args.cq_size & CQ_SizeMask, args.cq_perm & CQ_PermissionMask,
29953 +                           (args.cq_type & ELAN4IO_CQ_TYPE_REORDER) ? UCQ_REORDER : 0);
29954 +       if (IS_ERR (ucq))
29955 +           return PTR_ERR (ucq);
29956 +       
29957 +       args.cq_indx = elan4_cq2idx (ucq->ucq_cq);
29958 +       
29959 +       if (copy_to_user ((void *) arg, &args, sizeof (ELAN4IO_ALLOCCQ_STRUCT)))
29960 +       {
29961 +           user_dropcq (uctx, ucq);
29962 +           return (-EFAULT);
29963 +       }
29964 +       
29965 +       /* don't drop the reference on the cq until the context is freed,
29966 +        * or the caller explicitly frees the cq */
29967 +       return (0);
29968 +    }
29969 +       
29970 +    case ELAN4IO_FREECQ:
29971 +    {
29972 +       USER_CQ *ucq;
29973 +       unsigned indx;
29974 +
29975 +       if (copy_from_user (&indx, (void *) arg, sizeof (unsigned)))
29976 +           return (-EFAULT);
29977 +
29978 +       if ((ucq = user_findcq (uctx, indx)) == NULL)           /* can't free unallocated cq */
29979 +           return (-EINVAL);
29980 +       
29981 +       user_dropcq (uctx, ucq);                                /* drop the reference we've just taken */
29982 +
29983 +       if ((ucq->ucq_flags & UCQ_SYSTEM))                      /* can't free device driver cq */
29984 +           return (-EINVAL);
29985 +
29986 +       user_dropcq (uctx, ucq);                                /* and the one held from the alloccq call */
29987 +
29988 +       return (0);
29989 +    }
29990 +
29991 +    case ELAN4IO_DUMPCQ:
29992 +    {
29993 +       ELAN4IO_DUMPCQ_STRUCT args;
29994 +       ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
29995 +       USER_CQ *ucq;
29996 +       void *buf;
29997 +       int i;
29998 +       
29999 +       if (copy_from_user (&args, (void *) arg, sizeof(ELAN4IO_DUMPCQ_STRUCT)))
30000 +           return (-EFAULT);
30001 +
30002 +       if ((ucq = user_findcq (uctx, args.cq_indx)) == NULL)
30003 +           return (-EINVAL);
30004 +
30005 +       if (args.bufsize)
30006 +       {
30007 +           E4_uint32 usedBufSize = min(args.cq_size, args.bufsize);
30008 +
30009 +           KMEM_ALLOC (buf, void *, usedBufSize, 1);
30010 +
30011 +           if (buf == NULL)
30012 +               return (-ENOMEM);
30013 +
30014 +           for (i=0; i<usedBufSize; i+=sizeof(int))
30015 +               ((int *)buf)[i/sizeof(int)] = elan4_sdram_readl(dev, ucq->ucq_cq->cq_space + i);
30016 +
30017 +           if (copy_to_user((void *)args.buffer, buf, usedBufSize))
30018 +           {
30019 +               KMEM_FREE(buf, args.bufsize);
30020 +               return (-EFAULT);
30021 +           }
30022 +           KMEM_FREE(buf, usedBufSize);
30023 +           args.bufsize = usedBufSize;
30024 +       }
30025 +
30026 +       args.cq_size = CQ_Size(ucq->ucq_cq->cq_size);
30027 +       args.cq_space = ucq->ucq_cq->cq_space;
30028 +
30029 +
30030 +       if (copy_to_user((void *)arg, &args, sizeof(ELAN4IO_DUMPCQ_STRUCT)))
30031 +       {
30032 +           return (-EFAULT);
30033 +       }
30034 +       
30035 +       user_dropcq (uctx, ucq); /* drop the reference we've just taken */
30036 +
30037 +       return (0);
30038 +    }
30039 +
30040 +    case ELAN4IO_SETPERM:
30041 +    {
30042 +       ELAN4IO_PERM_STRUCT args;
30043 +       
30044 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT)))
30045 +           return (-EFAULT);
30046 +
30047 +       return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm));
30048 +    }
30049 +
30050 +    case ELAN4IO_CLRPERM:
30051 +    {
30052 +       ELAN4IO_PERM_STRUCT args;
30053 +
30054 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT)))
30055 +           return (-EFAULT);
30056 +
30057 +       user_clrperm (uctx, args.ps_eaddr, args.ps_len);
30058 +       return (0);
30059 +    }
30060 +    
30061 +    case ELAN4IO_TRAPSIG:
30062 +    {
30063 +       ELAN4IO_TRAPSIG_STRUCT args;
30064 +
30065 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPSIG_STRUCT)))
30066 +           return (-EFAULT);
30067 +
30068 +       pr->pr_uctx->uctx_trap_pid   = current->pid;
30069 +       pr->pr_uctx->uctx_trap_signo = args.ts_signo;
30070 +       
30071 +       return (0);
30072 +    }
30073 +    
30074 +    case ELAN4IO_TRAPHANDLER:
30075 +    {
30076 +       ELAN4IO_TRAPHANDLER_STRUCT args;
30077 +
30078 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT)))
30079 +           return (-EFAULT);
30080 +
30081 +       return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)args.th_trapp, args.th_nticks));
30082 +    }
30083 +
30084 +    case ELAN4IO_REQUIRED_MAPPINGS:
30085 +    {
30086 +       ELAN4IO_REQUIRED_MAPPINGS_STRUCT args;
30087 +       
30088 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_REQUIRED_MAPPINGS_STRUCT)))
30089 +           return (-EFAULT);
30090 +
30091 +       pr->pr_uctx->uctx_upage_addr    = args.rm_upage_addr;
30092 +       pr->pr_uctx->uctx_trestart_addr = args.rm_trestart_addr;
30093 +
30094 +       return (0);
30095 +    }
30096 +
30097 +    case ELAN4IO_ALLOC_TRAP_QUEUES:
30098 +    {
30099 +       ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT args;
30100 +
30101 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT)))
30102 +           return (-EFAULT);
30103 +
30104 +       return (user_alloc_trap_queues (uctx, args.tq_ndproc_traps, args.tq_neproc_traps, 
30105 +                                       args.tq_ntproc_traps, args.tq_nthreads, args.tq_ndmas));
30106 +    }
30107 +
30108 +    case ELAN4IO_RESUME_EPROC_TRAP:
30109 +    {
30110 +       ELAN4IO_RESUME_EPROC_TRAP_STRUCT args;
30111 +       
30112 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_EPROC_TRAP_STRUCT)))
30113 +           return (-EFAULT);
30114 +
30115 +       return (user_resume_eproc_trap (pr->pr_uctx, args.rs_addr));
30116 +    }
30117 +
30118 +    case ELAN4IO_RESUME_CPROC_TRAP:
30119 +    {
30120 +       ELAN4IO_RESUME_CPROC_TRAP_STRUCT args;
30121 +       
30122 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_CPROC_TRAP_STRUCT)))
30123 +           return (-EFAULT);
30124 +
30125 +       return (user_resume_cproc_trap (pr->pr_uctx, args.rs_indx));
30126 +    }
30127 +
30128 +    case ELAN4IO_RESUME_DPROC_TRAP:
30129 +    {
30130 +       ELAN4IO_RESUME_DPROC_TRAP_STRUCT args;
30131 +       
30132 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_DPROC_TRAP_STRUCT)))
30133 +           return (-EFAULT);
30134 +
30135 +       return (user_resume_dproc_trap (pr->pr_uctx, &args.rs_desc));
30136 +    }
30137 +
30138 +    case ELAN4IO_RESUME_TPROC_TRAP:
30139 +    {
30140 +       ELAN4IO_RESUME_TPROC_TRAP_STRUCT args;
30141 +       
30142 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_TPROC_TRAP_STRUCT)))
30143 +           return (-EFAULT);
30144 +
30145 +       return (user_resume_tproc_trap (pr->pr_uctx, &args.rs_regs));
30146 +    }
30147 +
30148 +    case ELAN4IO_RESUME_IPROC_TRAP:
30149 +    {
30150 +       ELAN4IO_RESUME_IPROC_TRAP_STRUCT args;
30151 +       
30152 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_RESUME_IPROC_TRAP_STRUCT)))
30153 +           return (-EFAULT);
30154 +
30155 +       return (user_resume_iproc_trap (pr->pr_uctx, args.rs_channel, args.rs_trans, 
30156 +                                       &args.rs_header, &args.rs_data));
30157 +    }
30158 +
30159 +    case ELAN4IO_FLUSH_ICACHE:
30160 +       elan4_flush_icache (&uctx->uctx_ctxt);
30161 +       return (0);
30162 +
30163 +    case ELAN4IO_STOP_CTXT:
30164 +       if (arg)
30165 +           user_swapout (uctx, UCTX_USER_STOPPED);
30166 +       else
30167 +           user_swapin (uctx, UCTX_USER_STOPPED);
30168 +       return (0);
30169 +
30170 +    case ELAN4IO_ALLOC_INTCOOKIE_TABLE:
30171 +    {
30172 +       ELAN_CAPABILITY *cap;
30173 +       INTCOOKIE_TABLE *tbl;
30174 +
30175 +       KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
30176 +       if ((cap == NULL))
30177 +           return (-ENOMEM);
30178 +
30179 +       if (copy_from_user (cap, (void *) arg, sizeof (ELAN_CAPABILITY)))
30180 +           res = -EFAULT;
30181 +       else
30182 +       {
30183 +           tbl = intcookie_alloc_table(cap);
30184 +
30185 +           if (tbl == NULL)
30186 +               res = -ENOMEM;
30187 +           else
30188 +           {
30189 +               /* Install the intcookie table we've just created */
30190 +               spin_lock (&uctx->uctx_spinlock);
30191 +               if (uctx->uctx_intcookie_table != NULL)
30192 +                   res = -EBUSY;
30193 +               else
30194 +                   uctx->uctx_intcookie_table = tbl;
30195 +               spin_unlock (&uctx->uctx_spinlock);
30196 +               
30197 +               /* drop the table we created if there already was one */
30198 +               if (res != 0)
30199 +                   intcookie_free_table (tbl);
30200 +           }
30201 +       }
30202 +
30203 +       KMEM_FREE(cap, sizeof(*cap));
30204 +
30205 +       return (res);
30206 +    }
30207 +
30208 +    case ELAN4IO_FREE_INTCOOKIE_TABLE:
30209 +    {
30210 +       INTCOOKIE_TABLE *tbl;
30211 +
30212 +       spin_lock (&uctx->uctx_spinlock);
30213 +       tbl = uctx->uctx_intcookie_table;
30214 +       uctx->uctx_intcookie_table = NULL;
30215 +       spin_unlock (&uctx->uctx_spinlock);
30216 +
30217 +       if (tbl != NULL)
30218 +           intcookie_free_table (tbl);
30219 +
30220 +       return (tbl == NULL ? -EINVAL : 0);
30221 +    }
30222 +
30223 +    case ELAN4IO_ALLOC_INTCOOKIE:
30224 +    {
30225 +       /* For backwards compatibility with the old libs (pre 1.8.0)
30226 +        * we allocate an intcookie table on the first cookie
30227 +        * alloc if one hasn't be created already
30228 +        */
30229 +       if (uctx->uctx_intcookie_table == NULL)
30230 +       {
30231 +           ELAN_CAPABILITY *cap;
30232 +           INTCOOKIE_TABLE *tbl;
30233 +           
30234 +           KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), 1);
30235 +           if ((cap == NULL))
30236 +               return (-ENOMEM);
30237 +
30238 +           /* Create a dummy capability */
30239 +           elan_nullcap(cap);
30240 +
30241 +           /* Must be unique for each process on a node */
30242 +           cap->cap_mycontext = (int) ELAN4_TASK_HANDLE();
30243 +
30244 +           /* Create a new intcookie table */
30245 +           tbl = intcookie_alloc_table(cap);
30246 +
30247 +           /* Hang intcookie table off uctx */
30248 +           spin_lock (&uctx->uctx_spinlock);
30249 +           if (uctx->uctx_intcookie_table == NULL)
30250 +           {
30251 +               uctx->uctx_intcookie_table = tbl;
30252 +               spin_unlock (&uctx->uctx_spinlock);
30253 +           }
30254 +           else
30255 +           {
30256 +               spin_unlock (&uctx->uctx_spinlock);
30257 +               intcookie_free_table(tbl);
30258 +           }
30259 +
30260 +           KMEM_FREE(cap, sizeof(*cap));
30261 +       }
30262 +       
30263 +       return (intcookie_alloc (uctx->uctx_intcookie_table, arg));
30264 +    }
30265 +
30266 +    case ELAN4IO_FREE_INTCOOKIE:
30267 +       if (uctx->uctx_intcookie_table == NULL)
30268 +           return -EINVAL;
30269 +       else
30270 +           return (intcookie_free (uctx->uctx_intcookie_table, arg));
30271 +
30272 +    case ELAN4IO_ARM_INTCOOKIE:
30273 +       if (uctx->uctx_intcookie_table == NULL)
30274 +           return -EINVAL;
30275 +       else
30276 +           return (intcookie_arm (uctx->uctx_intcookie_table, arg));
30277 +
30278 +    case ELAN4IO_WAIT_INTCOOKIE:
30279 +       if (uctx->uctx_intcookie_table == NULL)
30280 +           return -EINVAL;
30281 +       else
30282 +           return (intcookie_wait (uctx->uctx_intcookie_table, arg));
30283 +
30284 +    case ELAN4IO_FIRE_INTCOOKIE:
30285 +    {
30286 +       ELAN4IO_FIRECAP_STRUCT *args;
30287 +
30288 +       KMEM_ALLOC (args, ELAN4IO_FIRECAP_STRUCT *, sizeof (ELAN4IO_FIRECAP_STRUCT), 1);
30289 +       if ((args == NULL))
30290 +           return (-ENOMEM);
30291 +
30292 +       if (copy_from_user (args, (void *) arg, sizeof (ELAN4IO_FIRECAP_STRUCT)))
30293 +           res = -EFAULT;
30294 +       else
30295 +           res = intcookie_fire_cap (&args->fc_capability, args->fc_cookie);
30296 +       
30297 +       KMEM_FREE(args, sizeof(*args));
30298 +
30299 +       return (res);
30300 +    }
30301 +
30302 +    case ELAN4IO_NETERR_MSG:
30303 +    {
30304 +       ELAN4IO_NETERR_MSG_STRUCT args;
30305 +       
30306 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_MSG_STRUCT)))
30307 +           return (-EFAULT);
30308 +
30309 +       return (user_send_neterr_msg (uctx, args.nm_vp, args.nm_nctx, args.nm_retries, &args.nm_msg));
30310 +    }
30311 +
30312 +    case ELAN4IO_NETERR_TIMER:
30313 +    {
30314 +       unsigned long ticks = ((unsigned long) arg * HZ) / 1000;
30315 +
30316 +       PRINTF (uctx, DBG_NETERR, "elan4_neterr_timer: arg %ld inc %ld\n", arg, ticks);
30317 +
30318 +       mod_timer (&uctx->uctx_neterr_timer, (jiffies + (ticks > 0 ? ticks : 1)));
30319 +       return 0;
30320 +    }
30321 +               
30322 +    case ELAN4IO_NETERR_FIXUP:
30323 +    {
30324 +       ELAN4IO_NETERR_FIXUP_STRUCT args;
30325 +
30326 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_NETERR_FIXUP_STRUCT)))
30327 +           return (-EFAULT);
30328 +
30329 +       if (args.nf_sten)
30330 +           return (user_neterr_sten (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop));
30331 +       else
30332 +           return (user_neterr_dma  (uctx, args.nf_vp, args.nf_cookie, args.nf_waitforeop));
30333 +    }
30334 +
30335 +    case ELAN4IO_LOAD_TRANSLATION:
30336 +    {
30337 +       ELAN4IO_TRANSLATION_STRUCT args;
30338 +       unsigned long              base, top;
30339 +
30340 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRANSLATION_STRUCT)))
30341 +           return (-EFAULT);
30342 +       
30343 +       top  = (args.tr_addr + args.tr_len - 1) | (PAGE_SIZE-1);
30344 +       base = args.tr_addr & PAGE_MASK;
30345 +       
30346 +       return user_load_range (uctx, base, top - base + 1, args.tr_access);
30347 +    }
30348 +    case ELAN4IO_UNLOAD_TRANSLATION:
30349 +    {
30350 +       ELAN4IO_TRANSLATION_STRUCT args;
30351 +       unsigned long              base, top;
30352 +       
30353 +       if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRANSLATION_STRUCT)))
30354 +           return (-EFAULT);
30355 +       
30356 +       top  = (args.tr_addr + args.tr_len - 1) | (PAGE_SIZE-1);
30357 +       base = args.tr_addr & PAGE_MASK;
30358 +
30359 +       elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, base, top - base + 1);
30360 +
30361 +       return 0;
30362 +    }
30363 +
30364 +    default:
30365 +       PRINTF (uctx, DBG_FILE, "user_ioctl: invalid ioctl %x\n", cmd);
30366 +       return (-EINVAL);
30367 +    }
30368 +}
30369 +
30370 +static void
30371 +user_vma_open (struct vm_area_struct *vma)
30372 +{
30373 +    USER_PRIVATE *pr   = (USER_PRIVATE *) vma->vm_private_data;
30374 +    USER_CTXT    *uctx = pr->pr_uctx;
30375 +    unsigned long addr;
30376 +    unsigned long pgoff;
30377 +
30378 +    PRINTF (uctx, DBG_FILE, "user_vma_open: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
30379 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
30380 +
30381 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
30382 +       elan4_getcqa (&uctx->uctx_ctxt, pgoff);
30383 +}
30384 +
30385 +static void 
30386 +user_vma_close (struct vm_area_struct *vma)
30387 +{
30388 +    USER_PRIVATE *pr   = (USER_PRIVATE *) vma->vm_private_data;
30389 +    USER_CTXT    *uctx = pr->pr_uctx;
30390 +    unsigned long addr;
30391 +    unsigned long pgoff;
30392 +
30393 +    PRINTF (uctx, DBG_FILE, "user_vma_close: vm_mm=%p start=%lx end=%lx pgoff=%lx file=%p\n",
30394 +           vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_file);
30395 +
30396 +    /* NOTE: the same comments apply as mem_vma_close */
30397 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
30398 +       if (elan4_getcqa (&uctx->uctx_ctxt, pgoff) != NULL)
30399 +       {
30400 +           elan4_putcqa (&uctx->uctx_ctxt, pgoff);                     /* drop the reference we've just taken */
30401 +           elan4_putcqa (&uctx->uctx_ctxt, pgoff);                     /* and the one held by the mmap */
30402 +       }
30403 +}
30404 +
30405 +struct vm_operations_struct user_vm_ops = {
30406 +    open:              user_vma_open,
30407 +    close:             user_vma_close,
30408 +};
30409 +
30410 +static int
30411 +user_mmap (struct file *file, struct vm_area_struct *vma)
30412 +{
30413 +    USER_PRIVATE *pr    = (USER_PRIVATE *) file->private_data;
30414 +    USER_CTXT    *uctx  = pr->pr_uctx;
30415 +    ELAN4_DEV     *dev   = uctx->uctx_ctxt.ctxt_dev;
30416 +    ELAN4_CQA     *cqa;
30417 +    unsigned long addr;
30418 +    unsigned long pgoff;
30419 +    int           res;
30420 +    ioaddr_t      ioaddr;
30421 +    
30422 +    /* Don't allow these pages to be swapped out of dumped */
30423 +    vma->vm_flags |= (VM_RESERVED | VM_IO);
30424 +
30425 +    vma->vm_ops          = &user_vm_ops;
30426 +    vma->vm_file         = file;
30427 +    vma->vm_private_data = (void *) pr;
30428 +    
30429 +    for (addr = vma->vm_start, pgoff = vma->vm_pgoff; addr < vma->vm_end; addr += PAGE_SIZE, pgoff++)
30430 +    {
30431 +       switch (pgoff)
30432 +       {
30433 +       default:
30434 +           PRINTF (uctx, DBG_FILE, "user_mmap: command queue %ld mapping at %lx\n",  pgoff, addr);
30435 +           
30436 +           if ((cqa = elan4_getcqa (&uctx->uctx_ctxt, pgoff)) == NULL)
30437 +           {
30438 +               res = -EINVAL;
30439 +               goto failed;
30440 +           }
30441 +
30442 +           PRINTF (uctx, DBG_FILE, "user_mmap: cqa=%p idx=%d num=%d ref=%d\n", cqa, cqa->cqa_idx, cqa->cqa_cqnum, cqa->cqa_ref);
30443 +    
30444 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
30445 +
30446 +           if (! (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE) && (cqa->cqa_type & CQ_Reorder) != 0)
30447 +               vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
30448 +
30449 +           PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (%lx, %lx, %lx, %lx)\n",
30450 +                   addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
30451 +                   (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize, PAGE_SIZE,
30452 +                   vma->vm_page_prot);
30453 +
30454 +           if (__io_remap_page_range (addr, 
30455 +                                      pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
30456 +                                      (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize,
30457 +                                      PAGE_SIZE, vma->vm_page_prot))
30458 +           {
30459 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range failed\n");
30460 +
30461 +               elan4_putcqa (&uctx->uctx_ctxt, pgoff);
30462 +               res = -ENOMEM;
30463 +               goto failed;
30464 +           }
30465 +           break;
30466 +           
30467 +       case ELAN4_OFF_USER_REGS:
30468 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
30469 +
30470 +           switch (dev->dev_devinfo.dev_revision_id)
30471 +           {
30472 +           case PCI_REVISION_ID_ELAN4_REVA:
30473 +               ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVA_REG_OFFSET + offsetof(E4_Registers, uRegs);
30474 +               break;
30475 +               
30476 +           case PCI_REVISION_ID_ELAN4_REVB:
30477 +               ioaddr = pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + ELAN4_REVB_REG_OFFSET + offsetof(E4_Registers, uRegs);
30478 +               break;
30479 +
30480 +           default:
30481 +               res = -EINVAL;
30482 +               goto failed;
30483 +           }
30484 +
30485 +           PRINTF (uctx, DBG_FILE, "user_mmap: user_regs at %lx ioaddr %lx prot %lx\n",
30486 +                   addr, ioaddr, vma->vm_page_prot.pgprot);
30487 +
30488 +           if (__io_remap_page_range (addr,  (ioaddr & PAGEMASK), PAGE_SIZE, vma->vm_page_prot))
30489 +           {                     
30490 +               res = -EAGAIN;
30491 +               goto failed;
30492 +           }
30493 +
30494 +           break;
30495 +           
30496 +       case ELAN4_OFF_USER_PAGE:
30497 +           PRINTF (uctx, DBG_FILE, "user_mmap: shared user page - kaddr=%lx uaddr=%lx phys=%lx\n", 
30498 +                   uctx->uctx_upage, addr, kmem_to_phys (uctx->uctx_upage));
30499 +
30500 +           /* we do not want to have this area swapped out, lock it */
30501 +           vma->vm_flags |= VM_LOCKED;
30502 +           
30503 +           /* Mark the page as reserved or else the remap_page_range() doesn't remap it */
30504 +           SetPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)));
30505 +       
30506 +           if (__remap_page_range (addr, kmem_to_phys (uctx->uctx_upage), PAGE_SIZE, vma->vm_page_prot))
30507 +           {
30508 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (user_page) failed\n");
30509 +               res = -ENOMEM;
30510 +               goto failed;
30511 +           }
30512 +           break;
30513 +           
30514 +       case ELAN4_OFF_TPROC_TRAMPOLINE:
30515 +           vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
30516 +
30517 +           PRINTF (uctx, DBG_FILE, "user_mmap: tproc trampoline - kaddr=%lx uaddr=%lx phys=%lx\n", uctx->uctx_trampoline, addr, 
30518 +                   pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)));
30519 +
30520 +           if (__io_remap_page_range (addr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM) + 
30521 +                                      uctx->uctx_trampoline + (addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)),
30522 +                                      PAGE_SIZE, vma->vm_page_prot))
30523 +           {
30524 +               PRINTF (uctx, DBG_FILE, "user_mmap: remap_page_range (tproc_trampoline) failed\n");
30525 +               res = -ENOMEM;
30526 +               goto failed;
30527 +           }
30528 +           break;
30529 +           
30530 +       case ELAN4_OFF_DEVICE_STATS:
30531 +           printk ("user_mmap: device_stats\n");
30532 +           break;
30533 +       }
30534 +       
30535 +    }
30536 +
30537 +    return (0);
30538 +
30539 + failed:
30540 +    for (addr -= PAGE_SIZE, pgoff--; addr >= vma->vm_start; addr -= PAGE_SIZE, pgoff--)
30541 +       elan4_putcqa (&uctx->uctx_ctxt, pgoff);         /* drop the reference we've just taken */
30542 +    return (res);
30543 +}
30544 +
30545 +int
30546 +user_pteload (struct vm_area_struct *vma, unsigned long maddr, USER_CTXT *uctx, E4_Addr eaddr, int perm)
30547 +{
30548 +    USER_PRIVATE *pr    = (USER_PRIVATE *) vma->vm_private_data;
30549 +    ELAN4_DEV    *dev   = uctx->uctx_ctxt.ctxt_dev;
30550 +    unsigned long pgoff = vma->vm_pgoff + ((maddr - vma->vm_start) >> PAGE_SHIFT);
30551 +    register int i, res;
30552 +
30553 +    if (pr->pr_uctx != uctx)
30554 +       return -EINVAL;
30555 +    
30556 +    switch (pgoff)
30557 +    {
30558 +    default:
30559 +    {
30560 +       ELAN4_CQA    *cqa;
30561 +       unsigned long cqaddr;
30562 +
30563 +       if ((cqa = elan4_getcqa (&uctx->uctx_ctxt, pgoff)) == NULL)
30564 +           return -EINVAL;
30565 +
30566 +       cqaddr = (cqa->cqa_cqnum + dev->dev_cqoffset) * CQ_CommandMappingSize;
30567 +
30568 +       for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
30569 +       {
30570 +           E4_uint64 newpte = ((cqaddr + i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm) | PTE_CommandQueue;
30571 +
30572 +           if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_COMMAND, newpte)) < 0)
30573 +               return res;
30574 +       }
30575 +       elan4_putcqa (&uctx->uctx_ctxt, pgoff);
30576 +
30577 +       return 0;
30578 +    }
30579 +
30580 +    case ELAN4_OFF_USER_REGS:
30581 +    {
30582 +       u32 blow, bhigh;
30583 +       physaddr_t ioaddr;
30584 +
30585 +       /* compute a local pci address from our register BAR */
30586 +       pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_2, &blow);
30587 +       pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_3, &bhigh);
30588 +
30589 +       ioaddr = (((physaddr_t) bhigh) << 32) | (blow & PCI_BASE_ADDRESS_MEM_MASK);
30590 +
30591 +       switch (dev->dev_devinfo.dev_revision_id)
30592 +       {
30593 +       case PCI_REVISION_ID_ELAN4_REVA:
30594 +           ioaddr |= ELAN4_REVA_REG_OFFSET + offsetof(E4_Registers, uRegs);
30595 +           break;
30596 +           
30597 +       case PCI_REVISION_ID_ELAN4_REVB:
30598 +           ioaddr |= ELAN4_REVB_REG_OFFSET + offsetof(E4_Registers, uRegs);
30599 +           break;
30600 +           
30601 +       default:
30602 +           return -EINVAL;
30603 +       }
30604 +
30605 +       for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
30606 +       {
30607 +           E4_uint64 newpte = (((ioaddr & PAGE_MASK) | i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm) | PTE_PciNotLocal;
30608 +
30609 +           if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_REGS, newpte)) < 0)
30610 +               return res;
30611 +       }
30612 +
30613 +       return 0;
30614 +    }
30615 +
30616 +    case ELAN4_OFF_USER_PAGE:
30617 +       for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
30618 +           if ((res = elan4mmu_pteload_page (&uctx->uctx_ctxt, 0, eaddr, pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)), perm)) < 0)
30619 +               return res;
30620 +       return 0;
30621 +
30622 +    case ELAN4_OFF_TPROC_TRAMPOLINE:
30623 +    {
30624 +       sdramaddr_t trampoline = uctx->uctx_trampoline + (maddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT));
30625 +
30626 +       if (! elan4mmu_sdram_aliascheck (&uctx->uctx_ctxt, eaddr, trampoline))
30627 +           return -EINVAL;
30628 +           
30629 +       for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
30630 +       {
30631 +           E4_uint64 newpte = ((trampoline + i) >> PTE_PADDR_SHIFT) | PTE_SetPerm (perm);
30632 +
30633 +           if ((res = elan4mmu_pteload (&uctx->uctx_ctxt, 0, eaddr + i, HE_TYPE_SDRAM, newpte)) < 0)
30634 +               return res;
30635 +       }
30636 +       return 0;
30637 +    }
30638 +    }
30639 +}
30640 +
30641 +/* driver entry points */
30642 +static int
30643 +elan4_open (struct inode *inode, struct file *file)
30644 +{
30645 +    PRINTF (DBG_USER, DBG_FILE, "elan4_open: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file);
30646 +    
30647 +    switch (ELAN4_MINOR (inode))
30648 +    {
30649 +    case ELAN4_MINOR_CONTROL:
30650 +       return (control_open (inode, file));
30651 +    case ELAN4_MINOR_MEM:
30652 +       return (mem_open (inode, file));
30653 +    case ELAN4_MINOR_USER:
30654 +       return (user_open (inode, file));
30655 +    default:
30656 +       return (-ENXIO);
30657 +    }
30658 +}
30659 +
30660 +static int
30661 +elan4_release (struct inode *inode, struct file *file)
30662 +{
30663 +    PRINTF (DBG_USER, DBG_FILE, "elan4_release: device %d minor %d file=%p\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), file);
30664 +    
30665 +    switch (ELAN4_MINOR (inode))
30666 +    {
30667 +    case ELAN4_MINOR_CONTROL:
30668 +       return (control_release (inode, file));
30669 +    case ELAN4_MINOR_MEM:
30670 +       return (mem_release (inode, file));
30671 +    case ELAN4_MINOR_USER:
30672 +       return (user_release (inode, file));
30673 +    default:
30674 +       return (-ENXIO);
30675 +    }
30676 +}
30677 +
30678 +static int
30679 +elan4_ioctl (struct inode *inode, struct file *file, 
30680 +            unsigned int cmd, unsigned long arg)
30681 +{
30682 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd);
30683 +    
30684 +    switch (ELAN4_MINOR (inode))
30685 +    {
30686 +    case ELAN4_MINOR_CONTROL:
30687 +       return (control_ioctl (inode, file, cmd, arg));
30688 +    case ELAN4_MINOR_MEM:
30689 +       return (mem_ioctl (inode, file, cmd, arg));
30690 +    case ELAN4_MINOR_USER:
30691 +       return (user_ioctl (inode, file, cmd, arg));
30692 +    default:
30693 +       return (-ENXIO);
30694 +    }
30695 +}
30696 +
30697 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
30698 +static int
30699 +elan4_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
30700 +{
30701 +    struct inode *inode = file->f_dentry->d_inode;
30702 +    extern int sys_ioctl (unsigned int fd, unsigned int cmd, unsigned long arg);
30703 +
30704 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: device %d minor %d cmd %x\n", ELAN4_DEVICE(inode), ELAN4_MINOR(inode), cmd);
30705 +    
30706 +    if (ELAN4_MINOR (inode) == ELAN4_MINOR_USER)
30707 +    {
30708 +       USER_PRIVATE *pr    = (USER_PRIVATE *) file->private_data;
30709 +       USER_CTXT    *uctx  = pr->pr_uctx;
30710 +
30711 +       if (current->mm != pr->pr_mm)
30712 +           return -EINVAL;
30713 +       
30714 +       switch (cmd)
30715 +       {
30716 +       case ELAN4IO_SETPERM32:
30717 +       {
30718 +           ELAN4IO_PERM_STRUCT32 args;
30719 +           
30720 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32)))
30721 +               return (-EFAULT);
30722 +           
30723 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: setperm maddr=%x eaddr=%llx len=%llxx perm=%d\n",
30724 +                   args.ps_maddr, args.ps_eaddr,args.ps_len, args.ps_perm);
30725 +
30726 +           return (user_setperm (uctx, args.ps_maddr, args.ps_eaddr, args.ps_len, args.ps_perm));
30727 +       }
30728 +       
30729 +       case ELAN4IO_CLRPERM32:
30730 +       {
30731 +           ELAN4IO_PERM_STRUCT32 args;
30732 +           
30733 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_PERM_STRUCT32)))
30734 +               return (-EFAULT);
30735 +           
30736 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: clrperm eaddr=%llx len=%ll\n",
30737 +                   args.ps_eaddr, args.ps_len);
30738 +
30739 +           user_clrperm (uctx, args.ps_eaddr, args.ps_len);
30740 +           return (0);
30741 +       }
30742 +    
30743 +       case ELAN4IO_TRAPHANDLER32:
30744 +       {
30745 +           ELAN4IO_TRAPHANDLER_STRUCT32 args;
30746 +           
30747 +           if (copy_from_user (&args, (void *) arg, sizeof (ELAN4IO_TRAPHANDLER_STRUCT32)))
30748 +               return (-EFAULT);
30749 +           
30750 +           PRINTF (DBG_USER, DBG_FILE, "user_ioctl32: traphandler trapp=%x nticks=%d\n",
30751 +                   args.th_trapp, args.th_nticks);
30752 +
30753 +           return (user_trap_handler (pr->pr_uctx, (ELAN4_USER_TRAP *)(unsigned long)args.th_trapp, args.th_nticks));
30754 +       }
30755 +       }
30756 +    }
30757 +
30758 +    PRINTF (DBG_USER, DBG_FILE, "elan4_ioctl32: fd=%d cmd=%x arg=%lx file=%p\n", fd, cmd, arg, file);
30759 +    return (sys_ioctl (fd, cmd, arg));
30760 +}
30761 +#endif
30762 +
30763 +
30764 +
30765 +static int
30766 +elan4_mmap (struct file *file, struct vm_area_struct *vma)
30767 +{
30768 +    PRINTF (DBG_USER, DBG_FILE, "elan4_mmap: instance %d minor %d start=%lx end=%lx pgoff=%lx\n", 
30769 +           ELAN4_DEVICE (file->f_dentry->d_inode), ELAN4_MINOR (file->f_dentry->d_inode),
30770 +           vma->vm_start, vma->vm_end, vma->vm_pgoff);
30771 +
30772 +    switch (ELAN4_MINOR (file->f_dentry->d_inode))
30773 +    {
30774 +    case ELAN4_MINOR_CONTROL:
30775 +       return (control_mmap (file, vma));
30776 +    case ELAN4_MINOR_MEM:
30777 +       return (mem_mmap (file, vma));
30778 +    case ELAN4_MINOR_USER:
30779 +       return (user_mmap (file, vma));
30780 +    default:
30781 +       return (-ENXIO);
30782 +    }
30783 +}
30784 +
30785 +void
30786 +elan4_update_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *bridge)
30787 +{
30788 +    u16 cnf;
30789 +    
30790 +    pci_read_config_word (bridge, 0x40 /* CNF */, &cnf);
30791 +    
30792 +    /* We expect the CNF register to be configured as follows
30793 +     *
30794 +     * [8]   == 1      PMODE PCI Mode
30795 +     * [7:6] == 2/3    PFREQ PCI Frequency (100/133)
30796 +     * [5]   == 0      RSDIS Restreaming Disable
30797 +     * [4:3] == 0x     PP    Prefetch Policy
30798 +     * [2]   == 0       DTD   Delayed Transaction Depth
30799 +     * [1:0] == 10      MDT   MaximumDelaedTransactions
30800 +     */
30801 +    
30802 +    if ((cnf & (1 << 8)) == 0)
30803 +       printk ("elan%d: strangeness - elan reports PCI-X but P64H2 reports PCI mode !\n", dev->dev_instance);
30804 +    else if ((cnf & 0xb7) != 0x82 && (cnf & 0xb7) != 0x84 && optimise_pci_bus < 2)
30805 +       printk ("elan%d: P64H2 CNF is not configured as expected : RSDIS=%d PP=%d DTD=%d MDT=%d\n",
30806 +               dev->dev_instance, (cnf >> 5) & 1, (cnf >> 3) & 3, (cnf >> 2) & 1, cnf & 3);
30807 +    else
30808 +    {
30809 +       switch ((cnf >> 6) & 3)
30810 +       {
30811 +       case 2:                                         /* PCI-X 100 */
30812 +           pci_write_config_word (bridge, 0xfc /* PC100 */, 0x7777);
30813 +           
30814 +           printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 100\n", dev->dev_instance);
30815 +           
30816 +           break;
30817 +           
30818 +       case 3:                                         /* PCI-X 133 */
30819 +           pci_write_config_word (bridge, 0xfe /* PC133 */, 0x7777);
30820 +           
30821 +           printk ("elan%d: optimise P64H2 : setting MDT=0, DTD=1, PFC=777 for PCI-X 133\n", dev->dev_instance);
30822 +           break;
30823 +       }
30824 +       
30825 +       pci_write_config_word (bridge, 0x40 /* CNF */, (cnf & 0xfff8) | 0x4);   /* DTD=1 MDT=0 */
30826 +    }
30827 +}
30828 +
30829 +int
30830 +elan4_optimise_intel_p64h2 (ELAN4_DEV *dev, struct pci_dev *pdev)
30831 +{
30832 +    struct pci_bus   *bus      = pdev->bus;
30833 +    struct pci_dev   *bridge   = bus->self;
30834 +    unsigned int      devcount = 0;
30835 +    u8                revision;
30836 +    u32               ectrl;
30837 +    struct list_head *el;
30838 +    
30839 +    pci_read_config_dword (pdev, PCI_ELAN_CONTROL, &ectrl);
30840 +
30841 +    /* We can only run in PCI-Xmode with a B1 stepping P64H2 because of P64H2 Errata 3 */
30842 +    pci_read_config_byte (bridge, PCI_REVISION_ID, &revision);
30843 +    if (revision < 0x04)
30844 +    {
30845 +       if ((ectrl & ECTRL_INITIALISATION_MODE) != Pci2_2)
30846 +       {
30847 +           static const char *p64h2_stepping[4] = {"UNKNOWN", "UNKNOWN", "UNKNOWN", "B0"};
30848 +
30849 +           printk ("elan%d: unable to use device because of P64H2 Errata 3 on\n"
30850 +                   "       %s stepping part and running in a PCI-X slot\n", 
30851 +                   dev->dev_instance, p64h2_stepping[revision]);
30852 +           return -EINVAL;
30853 +       }
30854 +    }
30855 +    
30856 +    /* We can only alter the bus configuration registers if the Elan is the only device
30857 +     * on the bus ... */
30858 +    list_for_each (el, &bus->devices) {
30859 +       struct pci_dev *pcip = list_entry (el, struct pci_dev, bus_list);
30860 +
30861 +       if (pcip == pdev || (pcip->vendor == PCI_VENDOR_ID_INTEL && pcip->device == 0x1462 /* P64H2 HOTPLUG */))
30862 +           continue;
30863 +           
30864 +       devcount++;
30865 +    }
30866 +
30867 +    if (devcount > 0 || !list_empty (&bus->children))
30868 +    {
30869 +       printk ("elan%d: unable to optimise P64H2 settings as %s%s\n", dev->dev_instance,
30870 +               (devcount > 0) ? "more than one device on bus" :  "",
30871 +               ! list_empty (&bus->children) ? "has child buses" : "");
30872 +       return 0;
30873 +    }
30874 +
30875 +#ifdef __ia64
30876 +    if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz)
30877 +    {
30878 +       struct pci_dev *pcip;
30879 +       unsigned int sioh_good      = 0;
30880 +       unsigned int sioh_downgrade = 0;
30881 +       unsigned int snc_good       = 0;
30882 +       unsigned int snc_downgrade  = 0;
30883 +       
30884 +       /* Search for the associated SIOH and SNC on ia64,
30885 +        * if we have a C2 SIOH and a C0/C1 SNC, then we can
30886 +        * reconfigure the P64H2 as follows:
30887 +        *    CNF:MDT   = 0
30888 +        *    CNF:DTD   = 1
30889 +        *    CNF:PC133 = 7777
30890 +        *
30891 +        * if not, then issue a warning that down rev parts
30892 +        * affect bandwidth.
30893 +        */
30894 +       for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x500, pcip)); )
30895 +       {
30896 +           pci_read_config_byte (pcip, PCI_REVISION_ID, &revision);
30897 +           
30898 +           if (revision >= 0x21)
30899 +               snc_good++;
30900 +           else
30901 +           {
30902 +               printk ("elan%d: SNC revision %x (%s)\n", dev->dev_instance, revision,
30903 +                       revision == 0x00 ? "A0" : revision == 0x01 ? "A1" : 
30904 +                       revision == 0x02 ? "A2" : revision == 0x03 ? "A3" :
30905 +                       revision == 0x10 ? "B0" : revision == 0x20 ? "C0" : 
30906 +                       revision == 0x21 ? "C1" : "UNKNOWN");
30907 +           
30908 +               snc_downgrade++;
30909 +           }
30910 +       }
30911 +
30912 +       for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_INTEL, 0x510, pcip)) != NULL; )
30913 +       {
30914 +           pci_read_config_byte (pcip, PCI_REVISION_ID, &revision);
30915 +           
30916 +           
30917 +           if (revision >= 0x22)
30918 +               sioh_good++;
30919 +           else
30920 +           {
30921 +               printk ("elan%d: SIOH revsision %x (%s)\n", dev->dev_instance, revision,
30922 +                       revision == 0x10 ? "C0" : revision == 0x20 ? "C0" : 
30923 +                       revision == 0x21 ? "C1" : revision == 0x22 ? "C2" : "UNKNOWN");
30924 +
30925 +               sioh_downgrade++;
30926 +           }
30927 +       }
30928 +
30929 +       if (optimise_pci_bus < 2 && (sioh_downgrade || snc_downgrade))
30930 +           printk ("elan%d: unable to optimise as SNC/SIOH below required C1/C2 steppings\n", dev->dev_instance);
30931 +       else if (optimise_pci_bus < 2 && (sioh_good == 0 || snc_good == 0))
30932 +           printk ("elan%d: unable to optimise as cannot determine SNC/SIOH revision\n", dev->dev_instance);
30933 +       else
30934 +           elan4_update_intel_p64h2 (dev, bridge);
30935 +    }
30936 +#endif
30937 +    
30938 +#ifdef __i386
30939 +    if ((ectrl & ECTRL_INITIALISATION_MODE) == PciX100to133MHz)
30940 +       elan4_update_intel_p64h2 (dev, bridge);
30941 +#endif     
30942 +    return 0;
30943 +}
30944 +
30945 +int
30946 +elan4_optimise_intel_pxh (ELAN4_DEV *dev, struct pci_dev *pdev)
30947 +{
30948 +    dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_64BIT_READ;
30949 +
30950 +    return 0;
30951 +}
30952 +
30953 +void
30954 +elan4_optimise_serverworks_ciobx2 (ELAN4_DEV *dev)
30955 +{
30956 +    struct pci_dev *pdev = dev->dev_osdep.pdev;
30957 +    struct pci_dev *pcip;
30958 +    unsigned char   bus;
30959 +    unsigned int    dor;
30960 +    
30961 +    /* Find the CIOBX2 for our bus number */
30962 +    for (pcip = NULL; (pcip = pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, pcip)) != NULL;)
30963 +    {
30964 +       pci_read_config_byte (pcip, 0x44 /* BUSNUM */, &bus);
30965 +       
30966 +       if (pdev->bus->number == bus)
30967 +       {
30968 +           printk ("elan%d: optimise CIOBX2 : setting DOR to disable read pipe lining\n", dev->dev_instance);
30969 +
30970 +           pci_read_config_dword (pcip, 0x78 /* DOR */, &dor);
30971 +           pci_write_config_dword (pcip, 0x78 /* DOR */, dor | (1 << 16));
30972 +
30973 +           printk ("elan%d: disabling write-combining on ServerWorks chipset\n", dev->dev_instance);
30974 +           dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_NO_WRITE_COMBINE;
30975 +       }
30976 +    }
30977 +}
30978 +
30979 +#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_PPC_PSERIES) || defined(__alpha)
30980 +int
30981 +elan4_optimise_pci_map (ELAN4_DEV *dev, unsigned int features)
30982 +{
30983 +    dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= features;
30984 +
30985 +    if (pci_set_dma_mask (dev->dev_osdep.pdev, (u64) 0XFFFFFFFFFFFFFFFFull) ||
30986 +       pci_set_consistent_dma_mask (dev->dev_osdep.pdev, (u64) 0XFFFFFFFFFFFFFFFFull))
30987 +    {
30988 +       printk (KERN_ERR "elan%d: unable to set DAC mode\n", dev->dev_instance);
30989 +       return -EINVAL;
30990 +    }
30991 +
30992 +    return 0;
30993 +}
30994 +#endif
30995 +
30996 +int
30997 +elan4_optimise_bus (ELAN4_DEV *dev)
30998 +{
30999 +    struct pci_dev *pdev = dev->dev_osdep.pdev;
31000 +
31001 +    if (pdev->bus && pdev->bus->self) 
31002 +    {
31003 +       struct pci_dev *bridge = pdev->bus->self;
31004 +       
31005 +       if (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x1460 /* Intel P64H2 */)
31006 +           return elan4_optimise_intel_p64h2 (dev, pdev);
31007 +
31008 +       /* See http://pciids.sourceforge.net/iii/?i=8086 */
31009 +
31010 +       if ((bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0329) /* Intel 6700PXH Fn 0 */ ||
31011 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032a) /* Intel 6700PXH Fn 2 */ ||
31012 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x032c) /* Intel 6702PXH */ ||
31013 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0320) /* Intel PXH-D */ ||
31014 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0330) /* Intel 80332 (A segment) */ ||
31015 +           (bridge->vendor == PCI_VENDOR_ID_INTEL && bridge->device == 0x0332) /* Intel 80332 (B segment) */
31016 +           )
31017 +           return elan4_optimise_intel_pxh (dev, pdev);
31018 +    }
31019 +
31020 +    if (pci_find_device (PCI_VENDOR_ID_HP, 0x122e, NULL) != NULL)              /* on HP ZX1 set the relaxed ordering  */
31021 +       dev->dev_pteval = PTE_RelaxedOrder;                                     /* bit to get better DMA bandwidth. */
31022 +
31023 +    if (pci_find_device (PCI_VENDOR_ID_SERVERWORKS, 0x0101, NULL) != NULL)     /* ServerWorks CIOBX2 */
31024 +       elan4_optimise_serverworks_ciobx2 (dev);
31025 +
31026 +#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_PPC_PESERIES)
31027 +    return elan4_optimise_pci_map (dev, ELAN4_FEATURE_NO_WRITE_COMBINE | ELAN4_FEATURE_PCI_MAP | ELAN4_FEATURE_NO_PREFETCH);
31028 +#endif
31029 +
31030 +#ifdef __alpha
31031 +    return elan4_optimise_pci_map (dev, ELAN4_FEATURE_PCI_MAP);
31032 +#endif
31033 +
31034 +#ifdef __sparc
31035 +    if (! (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP))
31036 +       dev->dev_pteval |= (0xfffe000000000000 >> PTE_PADDR_SHIFT);
31037 +#endif
31038 +
31039 +    return 0;
31040 +}
31041 +
31042 +int
31043 +elan4_pciinit (ELAN4_DEV *dev)
31044 +{
31045 +    int res;
31046 +    u32 value;
31047 +    u16 command;
31048 +    u8 cacheline;
31049 +    unsigned long flags;
31050 +
31051 +    if (optimise_pci_bus && (res = elan4_optimise_bus (dev)) <0)
31052 +       return (res);
31053 +
31054 +    if ((res = pci_enable_device (dev->dev_osdep.pdev)) < 0)
31055 +       return (res);
31056 +
31057 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
31058 +    if ((value & ECTRL_INITIALISATION_MODE) == Pci2_2)
31059 +       printk ("elan%d: is an elan4%c (PCI-2.2)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
31060 +    else
31061 +    {
31062 +       switch (value & ECTRL_INITIALISATION_MODE)
31063 +       {
31064 +       case PciX50To66MHz:
31065 +           printk ("elan%d: is an elan4%c (PCI-X 50-66)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
31066 +           break;
31067 +           
31068 +       case PciX66to100MHz:
31069 +           printk ("elan%d: is an elan4%c (PCI-X 66-100)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
31070 +           break;
31071 +           
31072 +       case PciX100to133MHz:
31073 +           printk ("elan%d: is an elan4%c (PCI-X 100-133)\n", dev->dev_instance, 'a' + dev->dev_devinfo.dev_revision_id);
31074 +           break;
31075 +           
31076 +       default:
31077 +           printk ("elan%d: Invalid PCI-X mode\n", dev->dev_instance);
31078 +           return (-EINVAL);
31079 +       }
31080 +    }
31081 +
31082 +    /* initialise the elan pll control register */
31083 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value);
31084 +
31085 +    if (elan4_pll_cfg)
31086 +    {
31087 +       printk ("elan%d: setting pll control to %08x\n", dev->dev_instance, elan4_pll_cfg);
31088 +
31089 +       pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, elan4_pll_cfg);
31090 +    }
31091 +    else
31092 +    {
31093 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
31094 +           pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
31095 +                                   (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_4_3);
31096 +       else
31097 +           pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
31098 +                                   (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | ECTRL_SYS_CLOCK_RATIO_6_5 | SysPll_FeedForwardISel0 | SysPll_FeedForwardISel1);
31099 +    }  
31100 +
31101 +    /* initialise the elan control register */
31102 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
31103 +
31104 +    value = ((15 << ECTRL_IPROC_HIGH_PRI_TIME_SHIFT) |
31105 +            (15 << ECTRL_OTHER_HIGH_PRI_TIME_SHIFT) |
31106 +            (value & ECTRL_28_NOT_30_BIT_LOCAL_BAR) |
31107 +            (dev->dev_topaddrmode ? ECTRL_ExtraMasterAddrBits : 0) |
31108 +            ECTRL_ENABLE_LATENCY_RESET | 
31109 +            ECTRL_ENABLE_WRITEBURSTS | 
31110 +            ECTRL_ENABLE_2_2READBURSTS);
31111 +
31112 +#ifdef LINUX_SPARC
31113 +    value &= ~(ECTRL_ENABLE_LATENCY_RESET | ECTRL_ENABLE_WRITEBURSTS);
31114 +#endif
31115 +
31116 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET);
31117 +
31118 +    switch (dev->dev_devinfo.dev_revision_id)
31119 +    {
31120 +    case PCI_REVISION_ID_ELAN4_REVA:
31121 +       /* Delay 10ms here if we've changed the sysclock ratio */
31122 +       /* to allow the PLL to stabalise before proceeding */
31123 +       udelay (10000);
31124 +       break;
31125 +       
31126 +    case PCI_REVISION_ID_ELAN4_REVB:
31127 +    {
31128 +       unsigned char val = read_i2c (dev, I2cLedsValue);
31129 +
31130 +       /* On RevB we have to explicitly reset the PLLs */
31131 +       pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command);
31132 +
31133 +       write_i2c (dev, I2cLedsValue, val | 0x80);
31134 +       udelay (1000);
31135 +
31136 +       /* Issue the PLL counter reset and immediately inhibit all pci interaction 
31137 +        * while the PLL is recovering. The write to the PCI_COMMAND register has 
31138 +        * to occur within 50uS of the write to the i2c registers */
31139 +       local_irq_save (flags);
31140 +       write_i2c (dev, I2cLedsValue, val & ~0x80);
31141 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, (1 << 10) /* PCI_COMMAND_DISABLE_INT */);
31142 +       local_irq_restore (flags);
31143 +
31144 +       /* Wait for the write to occur and for the PLL to regain lock */
31145 +       udelay (20000); udelay (20000);
31146 +
31147 +       /* Re-enable pci interaction and clear any spurious errors deteced */
31148 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_STATUS, PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR);
31149 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command);
31150 +       break;
31151 +    }
31152 +    }
31153 +
31154 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value);
31155 +
31156 +    /* Enable master accesses */
31157 +    pci_set_master (dev->dev_osdep.pdev);
31158 +
31159 +    /* Verify that the memWrInvalidate bit is set */
31160 +    pci_read_config_word (dev->dev_osdep.pdev, PCI_COMMAND, &command);
31161 +    pci_read_config_byte (dev->dev_osdep.pdev, PCI_CACHE_LINE_SIZE, &cacheline);
31162 +
31163 +    if ((command & PCI_COMMAND_INVALIDATE) == 0)
31164 +    {
31165 +       printk ("elan%d: enable MemWrInvalidate (cacheline %d)\n",
31166 +               dev->dev_instance, cacheline * 4);
31167 +
31168 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_COMMAND, command | PCI_COMMAND_INVALIDATE);
31169 +    }
31170 +
31171 +    if (pci_request_regions(dev->dev_osdep.pdev, "elan4"))
31172 +       return -ENODEV;
31173 +
31174 +    /* add the interrupt handler */
31175 +    if (request_irq (dev->dev_osdep.pdev->irq, elan4_irq, SA_SHIRQ, "elan4", dev) != 0)
31176 +    {
31177 +       pci_release_regions (dev->dev_osdep.pdev);
31178 +       return -ENXIO;
31179 +    }
31180 +
31181 +    return (0);
31182 +}
31183 +
31184 +void
31185 +elan4_updatepll (ELAN4_DEV *dev, unsigned int val)
31186 +{
31187 +    u32 value;
31188 +
31189 +    if (elan4_pll_cfg == 0)
31190 +    {
31191 +       pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, &value);
31192 +
31193 +       pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_PLL_CONTROL, 
31194 +                               (value & ~ECTRL_SYS_CLOCK_RATIO_MASK) | val);
31195 +
31196 +       /* Delay 10ms here if we've changed the sysclock ratio */
31197 +       /* to allow the PLL to stabalise before proceeding */
31198 +       udelay (10000);
31199 +    }  
31200 +}
31201 +
31202 +void
31203 +elan4_pcifini (ELAN4_DEV *dev)
31204 +{
31205 +    u32 value;
31206 +
31207 +    /* release the interrupt handler */
31208 +    free_irq (dev->dev_osdep.pdev->irq, dev);
31209 +
31210 +    /* release the address space */
31211 +    pci_release_regions (dev->dev_osdep.pdev);
31212 +
31213 +    pci_read_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, &value);
31214 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value | ECTRL_SOFTWARE_INTERNAL_RESET);
31215 +    pci_write_config_dword (dev->dev_osdep.pdev, PCI_ELAN_CONTROL, value);
31216 +
31217 +    pci_disable_device (dev->dev_osdep.pdev);
31218 +}
31219 +
31220 +void
31221 +elan4_pcierror (ELAN4_DEV *dev)
31222 +{
31223 +    struct pci_dev *pci = dev->dev_osdep.pdev;
31224 +    u8  type;
31225 +    u16 status, cmd;
31226 +    u32 physlo, physhi, control;
31227 +    
31228 +    printk("elan%d: pci error has occurred\n", dev->dev_instance);
31229 +
31230 +    pci_read_config_word  (pci, PCI_STATUS,             &status);
31231 +    pci_read_config_word  (pci, PCI_COMMAND,             &cmd);
31232 +    pci_read_config_dword (pci, PCI_ELAN_CONTROL,       &control);
31233 +
31234 +    if (control & ECTRL_REC_SPLIT_COMP_MESSAGE)
31235 +    {
31236 +       u32 message, attr;
31237 +       
31238 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control & ~ECTRL_SELECT_SPLIT_MESS_ATTR);
31239 +       pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &message);
31240 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_SELECT_SPLIT_MESS_ATTR);
31241 +       pci_read_config_dword (pci, PCI_ELAN_SPLIT_MESSAGE_VALUE, &attr);
31242 +
31243 +       printk ("elan%d: pcierror - received split completion message - attr=%08x, message=%08x\n", 
31244 +               dev->dev_instance, attr, message);
31245 +
31246 +       pci_write_config_dword (pci, PCI_ELAN_CONTROL, control | ECTRL_REC_SPLIT_COMP_MESSAGE); /* clear the error */
31247 +    }
31248 +    else
31249 +    {
31250 +       pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_LO, &physlo);
31251 +       pci_read_config_dword (pci, PCI_ELAN_PARITY_ADDR_HI, &physhi);
31252 +       pci_read_config_byte  (pci, PCI_ELAN_PARITY_TYPE,    &type);
31253 +       
31254 +       printk ("elan%d: pcierror - status %x cmd %4x physaddr %08x%08x type %x\n", 
31255 +               dev->dev_instance, status, cmd, physhi, physlo, type);
31256 +       
31257 +       if (status & PCI_STATUS_PARITY)
31258 +           printk ("elan%d: parity error signalled (PERR)\n", dev->dev_instance);
31259 +       if (status & PCI_STATUS_DETECTED_PARITY)
31260 +           printk ("elan%d: detected parity error\n", dev->dev_instance);
31261 +       if (status & PCI_STATUS_REC_MASTER_ABORT)
31262 +           printk ("elan%d: received master abort\n", dev->dev_instance);
31263 +       if (status & PCI_STATUS_REC_TARGET_ABORT)
31264 +           printk ("elan%d: received target abort\n", dev->dev_instance);
31265 +       if (status & PCI_STATUS_SIG_SYSTEM_ERROR)
31266 +           printk ("elan%d: signalled SERR\n", dev->dev_instance);
31267 +       if (status & PCI_STATUS_SIG_TARGET_ABORT)
31268 +           printk ("elan%d: signalled target abort\n", dev->dev_instance);
31269 +
31270 +       pci_write_config_word (pci, PCI_STATUS, status);        /* clear the errors */
31271 +    }
31272 +
31273 +    DISABLE_INT_MASK (dev, INT_PciMemErr);
31274 +
31275 +#ifdef notdef
31276 +    panic ("elan%d: pcierror\n", dev->dev_instance);           /* better panic ! */
31277 +#endif
31278 +}
31279 +
31280 +static irqreturn_t
31281 +elan4_irq (int irq, void *arg, struct pt_regs *regs)
31282 +{
31283 +    if (elan4_1msi0 ((ELAN4_DEV *) arg))
31284 +           return IRQ_HANDLED;
31285 +    else
31286 +           return IRQ_NONE;
31287 +}
31288 +
31289 +ioaddr_t
31290 +elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handle)
31291 +{
31292 +    return (ioaddr_t) ioremap_nocache (pci_resource_start (dev->dev_osdep.pdev, bar) + off, size);
31293 +}
31294 +
31295 +void
31296 +elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handle)
31297 +{
31298 +    iounmap ((void *) ptr);
31299 +}
31300 +
31301 +unsigned long
31302 +elan4_resource_len (ELAN4_DEV *dev, unsigned bar)
31303 +{
31304 +    return (pci_resource_len (dev->dev_osdep.pdev, bar));
31305 +}
31306 +
31307 +void
31308 +elan4_configure_writecombining (ELAN4_DEV *dev)
31309 +{
31310 +#ifdef CONFIG_MTRR
31311 +    dev->dev_osdep.sdram_mtrr = dev->dev_osdep.regs_mtrr = -1;
31312 +#endif
31313 +
31314 +    if ((dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_NO_WRITE_COMBINE))
31315 +       return;
31316 +
31317 +#if (defined(__i386) || defined(__x86_64))
31318 +
31319 +#if defined (X86_FEATURE_PAT)
31320 +
31321 +#ifndef boot_cpu_has
31322 +#  define boot_cpu_has(bit)      test_bit(bit, boot_cpu_data.x86_capability)
31323 +#endif
31324 +
31325 +    /* Try to utilise PAT entries which already exist */
31326 +    if (boot_cpu_has (X86_FEATURE_PAT))
31327 +    {
31328 +       unsigned int val0, val1, i;
31329 +       int slot = -1;
31330 +
31331 +       /* Read the IA32CR_PAT MSR register and see if a slot is
31332 +        * set for write-combinig.  Note we assume that all CPUs 
31333 +        * are configured the same like they're supposed to. */
31334 +       rdmsr (0x277, val0, val1);
31335 +       
31336 +       /* Check for PAT write combining entry (value 0x01) */
31337 +       for (i = 0; i < 4; i++, val0 >>= 8)
31338 +           if ((val0 & 0xff) == 0x01)
31339 +               slot = i;
31340 +       for (i = 4; i < 8; i++, val1 >>= 8)
31341 +           if ((val1 & 0xff) == 0x01)
31342 +               slot = i;
31343 +
31344 +       if (slot >= 0)
31345 +       {
31346 +           printk ("elan%d: using PAT for write combining (slot %d)\n", dev->dev_instance, slot);
31347 +
31348 +           pat_pteval = ((slot & 4) ? _PAGE_PSE : 0) | ((slot & 2) ? _PAGE_PCD : 0) | ((slot & 1) ? _PAGE_PWT : 0);
31349 +           return;
31350 +       }
31351 +    }
31352 +#endif
31353 +
31354 +#ifdef CONFIG_MTRR
31355 +    /* try and initialise the MTRR registers to enable write-combining */
31356 +    dev->dev_osdep.sdram_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), 
31357 +                                         pci_resource_len   (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM),
31358 +                                         MTRR_TYPE_WRCOMB, 1);
31359 +    if (dev->dev_osdep.sdram_mtrr < 0)
31360 +       printk ("elan%d: cannot configure MTRR for sdram\n", dev->dev_instance);
31361 +    
31362 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVB)
31363 +    {
31364 +       unsigned int cqreorder = dev->dev_cqcount >> 1;
31365 +       unsigned int cqcount   = dev->dev_cqcount  - cqreorder;
31366 +
31367 +       dev->dev_osdep.regs_mtrr = mtrr_add (pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
31368 +                                            (dev->dev_cqoffset + cqreorder) * CQ_CommandMappingSize,
31369 +                                            CQ_CommandMappingSize * cqcount,
31370 +                                            MTRR_TYPE_WRCOMB, 1);
31371 +       
31372 +       if (dev->dev_osdep.regs_mtrr < 0)
31373 +           printk ("elan%d: cannot configure MTRR for command ports\n", dev->dev_instance);
31374 +       else
31375 +       {
31376 +           dev->dev_cqreorder = cqreorder;
31377 +           return;
31378 +       }
31379 +    }
31380 +#endif
31381 +
31382 +    /* Set flag so that userspace knows write-combining is disabled */
31383 +    dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] |= ELAN4_FEATURE_NO_WRITE_COMBINE;
31384 +#endif
31385 +
31386 +}
31387 +
31388 +void
31389 +elan4_unconfigure_writecombining (ELAN4_DEV *dev)
31390 +{
31391 +#if defined (X86_FEATURE_PAT)
31392 +    if (pat_pteval != -1) return;
31393 +#endif
31394 +
31395 +#ifdef CONFIG_MTRR
31396 +    if (dev->dev_osdep.sdram_mtrr >=0 )
31397 +       mtrr_del (dev->dev_osdep.sdram_mtrr, pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM), 
31398 +                 pci_resource_len   (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM));
31399 +    
31400 +    if (dev->dev_cqreorder && dev->dev_osdep.regs_mtrr >= 0)
31401 +       mtrr_del (dev->dev_osdep.regs_mtrr, 
31402 +                 pci_resource_start (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS) + 
31403 +                 (dev->dev_cqoffset + dev->dev_cqreorder) * CQ_CommandMappingSize,
31404 +                 CQ_CommandMappingSize * (dev->dev_cqcount >> 1));
31405 +#endif
31406 +}
31407 +
31408 +EXPORT_SYMBOL(elan4_reference_device);
31409 +EXPORT_SYMBOL(elan4_dereference_device);
31410 +
31411 +/*
31412 + * Local variables:
31413 + * c-file-style: "stroustrup"
31414 + * End:
31415 + */
31416 diff -urN clean/drivers/net/qsnet/elan4/i2c.c linux-2.6.9/drivers/net/qsnet/elan4/i2c.c
31417 --- clean/drivers/net/qsnet/elan4/i2c.c 1969-12-31 19:00:00.000000000 -0500
31418 +++ linux-2.6.9/drivers/net/qsnet/elan4/i2c.c   2004-01-07 08:37:45.000000000 -0500
31419 @@ -0,0 +1,248 @@
31420 +/*
31421 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31422 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31423 + * 
31424 + *    For licensing information please see the supplied COPYING file
31425 + *
31426 + */
31427 +
31428 +#ident "@(#)$Id: i2c.c,v 1.4 2004/01/07 13:37:45 jon Exp $"
31429 +/*      $Source: /cvs/master/quadrics/elan4mod/i2c.c,v $*/
31430 +#include <qsnet/kernel.h>
31431 +
31432 +#include <elan4/sdram.h>
31433 +#include <elan4/debug.h>
31434 +#include <elan4/device.h>
31435 +#include <elan4/commands.h>
31436 +
31437 +#include <elan4/i2c.h>
31438 +#include <elan4/pci.h>
31439 +#include <elan4/ioctl.h>
31440 +#include <elan4/registers.h>
31441 +
31442 +#define I2C_POLL_LIMIT         8
31443 +
31444 +static int
31445 +i2c_poll_busy (ELAN4_DEV *dev)
31446 +{
31447 +    int t    = 100;
31448 +    int loop = 0;
31449 +    volatile unsigned char val;
31450 +
31451 +    /* wait for any led I2C operation to finish */
31452 +    while (((val = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cPortBusy) && loop++ < I2C_POLL_LIMIT)
31453 +    {
31454 +       DELAY (t);
31455 +       
31456 +       if (t < 500000)
31457 +           t <<= 1;
31458 +    }
31459 +    if (loop >= I2C_POLL_LIMIT)
31460 +    {
31461 +       printk ("elan%d: I2c has timed out waiting for I2cPortBusy to clear!\n", dev->dev_instance);
31462 +       printk ("elan%d: I2cPortControl=%x I2cLedBase=%x I2cStatus=%x\n",
31463 +               dev->dev_instance, val, read_i2c (dev, I2cLedBase), read_i2c (dev, I2cStatus));
31464 +    }
31465 +
31466 +    return val;
31467 +}
31468 +
31469 +static int
31470 +i2c_poll_stopped (ELAN4_DEV *dev)
31471 +{
31472 +    int t    = 100;
31473 +    int loop = 0;
31474 +    unsigned char val=0, newval;
31475 +
31476 +    /* wait for any led I2C operation to finish. Must see it stopped at least twice */
31477 +    while (!(((newval = read_i2c (dev, I2cPortControl)) & I2cCntl_I2cStopped) &&
31478 +             (val & I2cCntl_I2cStopped)) &&
31479 +             (loop++ < I2C_POLL_LIMIT))
31480 +    {
31481 +       DELAY (t);
31482 +       
31483 +       if (t < 500000)
31484 +           t <<= 1;
31485 +       val = newval;
31486 +    }
31487 +
31488 +    return val;
31489 +}
31490 +
31491 +int
31492 +i2c_disable_auto_led_update (ELAN4_DEV *dev)
31493 +{
31494 +    spin_lock (&dev->dev_i2c_lock);
31495 +
31496 +    if (dev->dev_i2c_led_disabled++ == 0)
31497 +    {
31498 +       write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) & ~I2cCntl_I2cUpdatingLedReg);
31499 +
31500 +       if (! (i2c_poll_stopped (dev) & I2cCntl_I2cStopped))
31501 +       {
31502 +           write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg);
31503 +           
31504 +           spin_unlock (&dev->dev_i2c_lock);
31505 +           
31506 +           return -EAGAIN;
31507 +       }
31508 +       
31509 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) & ~I2cCntl_SampleNewLedValues);
31510 +    }
31511 +
31512 +    spin_unlock (&dev->dev_i2c_lock);
31513 +
31514 +    return 0;
31515 +}
31516 +
31517 +void
31518 +i2c_enable_auto_led_update (ELAN4_DEV *dev)
31519 +{
31520 +    spin_lock (&dev->dev_i2c_lock);
31521 +    if (--dev->dev_i2c_led_disabled == 0)
31522 +    {
31523 +       write_i2c (dev, I2cLedBase, read_i2c (dev, I2cLedBase) | I2cCntl_I2cUpdatingLedReg);
31524 +       write_i2c (dev, I2cStatus, read_i2c (dev, I2cStatus) | I2cCntl_SampleNewLedValues);
31525 +    }
31526 +
31527 +    spin_unlock (&dev->dev_i2c_lock);
31528 +}
31529 +
31530 +int
31531 +i2c_write (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data)
31532 +{
31533 +    int i;
31534 +
31535 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
31536 +       return -EAGAIN;
31537 +    
31538 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
31539 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
31540 +    
31541 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31542 +       return -ENXIO;
31543 +
31544 +    for (i = 0; i < count; i++)
31545 +    {
31546 +       write_i2c (dev, I2cWrData, data[i]);
31547 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | (i == (count-1) ? I2cCntl_I2cPortGenStopBit : 0));
31548 +    }
31549 +
31550 +    return 0;
31551 +}
31552 +
31553 +int
31554 +i2c_read (ELAN4_DEV *dev, unsigned int address, unsigned int count, unsigned char *data)
31555 +{
31556 +    int i;
31557 +
31558 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
31559 +       return -EAGAIN; /* not idle */ 
31560 +
31561 +    write_i2c (dev, I2cWrData,      I2C_READ_ADDR(address));
31562 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
31563 +
31564 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31565 +       return -ENXIO;
31566 +    
31567 +    for (i = 0; i < count; i++)
31568 +    {
31569 +       write_i2c (dev, I2cWrData, 0xff);
31570 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortRead | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0));
31571 +
31572 +       i2c_poll_busy (dev);
31573 +
31574 +       data[i] = read_i2c (dev, I2cRdData);
31575 +    }
31576 +
31577 +    return 0;
31578 +}
31579 +
31580 +int
31581 +i2c_writereg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data)
31582 +{
31583 +    int i;
31584 +
31585 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
31586 +       return -EAGAIN; /* not idle */ 
31587 +
31588 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
31589 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
31590 +
31591 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31592 +       return -ENXIO;
31593 +    
31594 +    write_i2c (dev, I2cWrData,      reg);
31595 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
31596 +
31597 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31598 +       return -ENXIO;
31599 +    
31600 +    for (i = 0; i < count; i++)
31601 +    {
31602 +       write_i2c (dev, I2cWrData, data[i]);
31603 +       write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | ((i == count-1) ? I2cCntl_I2cPortGenStopBit : 0));
31604 +
31605 +       if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31606 +           printk (" i2c_writereg: off %d failed\n", i);
31607 +    }
31608 +
31609 +    return 0;
31610 +}
31611 +
31612 +int
31613 +i2c_readreg (ELAN4_DEV *dev, unsigned int address, unsigned int reg, unsigned int count, unsigned char *data)
31614 +{
31615 +    if (! (i2c_poll_busy (dev) & I2cCntl_I2cStopped))
31616 +       return -EAGAIN; /* not idle */ 
31617 +
31618 +    write_i2c (dev, I2cWrData,      I2C_WRITE_ADDR(address));
31619 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite);
31620 +
31621 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31622 +       return -ENXIO;
31623 +    
31624 +    write_i2c (dev, I2cWrData,      reg);
31625 +    write_i2c (dev, I2cPortControl, I2cCntl_I2cPortWrite | I2cCntl_I2cPortGenStopBit);
31626 +
31627 +    if (i2c_poll_busy (dev) & I2cCntl_I2cPortAccFailed)
31628 +       return -ENXIO;
31629 +
31630 +    return i2c_read (dev, address, count, data);
31631 +}
31632 +
31633 +int
31634 +i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int len, unsigned char *data)
31635 +{
31636 +    unsigned int top = addr + len;
31637 +    int res;
31638 +
31639 +    if ((res = i2c_disable_auto_led_update (dev)) == 0)
31640 +    {
31641 +       /* read the rom in chunks that don't span the block boundary */
31642 +       while (addr < top)
31643 +       {
31644 +           unsigned int thisnob  = top - addr;
31645 +           unsigned int blocknob = I2C_24LC16B_BLOCKSIZE - I2C_24LC16B_BLOCKOFFSET(addr);
31646 +           
31647 +           if (thisnob > blocknob)
31648 +               thisnob = blocknob;
31649 +
31650 +           if ((res = i2c_readreg (dev, I2C_EEPROM_ADDR + I2C_24LC16B_BLOCKADDR(addr),
31651 +                                   I2C_24LC16B_BLOCKOFFSET(addr), thisnob, data)) < 0)
31652 +               break;
31653 +           
31654 +           addr += thisnob;
31655 +           data += thisnob;
31656 +       }
31657 +
31658 +       i2c_enable_auto_led_update (dev);
31659 +    }
31660 +    return res;
31661 +}
31662 +
31663 +/*
31664 + * Local variables:
31665 + * c-file-style: "stroustrup"
31666 + * End:
31667 + */
31668 diff -urN clean/drivers/net/qsnet/elan4/intcookie.c linux-2.6.9/drivers/net/qsnet/elan4/intcookie.c
31669 --- clean/drivers/net/qsnet/elan4/intcookie.c   1969-12-31 19:00:00.000000000 -0500
31670 +++ linux-2.6.9/drivers/net/qsnet/elan4/intcookie.c     2005-02-03 11:24:44.000000000 -0500
31671 @@ -0,0 +1,371 @@
31672 +/*
31673 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
31674 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
31675 + * 
31676 + *    For licensing information please see the supplied COPYING file
31677 + *
31678 + */
31679 +
31680 +#ident "@(#)$Id: intcookie.c,v 1.15 2005/02/03 16:24:44 addy Exp $"
31681 +/*      $Source: /cvs/master/quadrics/elan4mod/intcookie.c,v $*/
31682 +
31683 +#include <qsnet/kernel.h>
31684 +
31685 +#include <elan4/debug.h>
31686 +#include <elan4/types.h>
31687 +#include <elan/capability.h>
31688 +#include <elan4/intcookie.h>
31689 +
31690 +static INTCOOKIE_TABLE *intcookie_tables;
31691 +static spinlock_t      intcookie_table_lock;
31692 +
31693 +/*
31694 + * intcookie_drop_entry:
31695 + *   drop the reference to a cookie held 
31696 + *   by the cookie table
31697 + */
31698 +static void
31699 +intcookie_drop_entry (INTCOOKIE_ENTRY *ent)
31700 +{
31701 +    unsigned long flags;
31702 +
31703 +    spin_lock_irqsave (&ent->ent_lock, flags);
31704 +    if (--ent->ent_ref != 0)
31705 +    {
31706 +       ent->ent_fired = ent->ent_cookie;
31707 +       kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
31708 +
31709 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
31710 +    }
31711 +    else
31712 +    {
31713 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
31714 +
31715 +       spin_lock_destroy (&ent->ent_lock);
31716 +       kcondvar_destroy (&ent->ent_wait);
31717 +
31718 +       KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY));
31719 +    }
31720 +}
31721 +
31722 +void
31723 +intcookie_init()
31724 +{
31725 +    spin_lock_init (&intcookie_table_lock);
31726 +}
31727 +
31728 +void
31729 +intcookie_fini()
31730 +{
31731 +    spin_lock_destroy (&intcookie_table_lock);
31732 +}
31733 +
31734 +INTCOOKIE_TABLE *
31735 +intcookie_alloc_table (ELAN_CAPABILITY *cap)
31736 +{
31737 +    INTCOOKIE_TABLE *tbl, *ntbl;
31738 +    ELAN_CAPABILITY *ncap;
31739 +    
31740 +    KMEM_ZALLOC (ntbl, INTCOOKIE_TABLE *, sizeof (INTCOOKIE_TABLE), 1);
31741 +
31742 +    if (ntbl == NULL)
31743 +       return (NULL);
31744 +
31745 +    KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1);
31746 +
31747 +    if (ncap == NULL)
31748 +    {
31749 +       KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE));
31750 +       return (NULL);
31751 +    }
31752 +
31753 +    spin_lock (&intcookie_table_lock);
31754 +    
31755 +    for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next)
31756 +       if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext)
31757 +           break;
31758 +    
31759 +    if (tbl != NULL)
31760 +       tbl->tbl_ref++;
31761 +    else
31762 +    {
31763 +       spin_lock_init (&ntbl->tbl_lock);
31764 +
31765 +       ntbl->tbl_cap     = ncap;
31766 +       ntbl->tbl_ref     = 1;
31767 +       ntbl->tbl_entries = NULL;
31768 +       
31769 +       /* Save supplied cap */
31770 +       memcpy (ncap, cap, ELAN_CAP_SIZE(cap));
31771 +
31772 +       if ((ntbl->tbl_next = intcookie_tables) != NULL)
31773 +           intcookie_tables->tbl_prev = ntbl;
31774 +       intcookie_tables = ntbl;
31775 +       ntbl->tbl_prev = NULL;
31776 +    }
31777 +    spin_unlock (&intcookie_table_lock);
31778 +
31779 +    if (tbl == NULL)
31780 +       return (ntbl);
31781 +    else
31782 +    {
31783 +       KMEM_FREE (ntbl, sizeof (INTCOOKIE_TABLE));
31784 +       KMEM_FREE (ncap, ELAN_CAP_SIZE(cap));
31785 +       return (tbl);
31786 +    }    
31787 +}
31788 +
31789 +void
31790 +intcookie_free_table (INTCOOKIE_TABLE *tbl)
31791 +{
31792 +    INTCOOKIE_ENTRY *ent;
31793 +
31794 +    spin_lock (&intcookie_table_lock);
31795 +    if (tbl->tbl_ref > 1)
31796 +    {
31797 +       tbl->tbl_ref--;
31798 +       spin_unlock (&intcookie_table_lock);
31799 +       return;
31800 +    }
31801 +    
31802 +    if (tbl->tbl_prev)
31803 +       tbl->tbl_prev->tbl_next = tbl->tbl_next;
31804 +    else
31805 +       intcookie_tables = tbl->tbl_next;
31806 +    if (tbl->tbl_next)
31807 +       tbl->tbl_next->tbl_prev = tbl->tbl_prev;
31808 +    
31809 +    spin_unlock (&intcookie_table_lock);
31810 +    
31811 +    /* NOTE - table no longer visible to other threads
31812 +     *        no need to aquire tbl_lock */
31813 +    while ((ent = tbl->tbl_entries) != NULL)
31814 +    {
31815 +       if ((tbl->tbl_entries = ent->ent_next) != NULL)
31816 +           ent->ent_next->ent_prev = NULL;
31817 +       
31818 +       intcookie_drop_entry (ent);
31819 +    }
31820 +    spin_lock_destroy (&tbl->tbl_lock);
31821 +
31822 +    KMEM_FREE (tbl->tbl_cap, ELAN_CAP_SIZE(tbl->tbl_cap));
31823 +    KMEM_FREE (tbl, sizeof (INTCOOKIE_TABLE));
31824 +}
31825 +
31826 +int
31827 +intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
31828 +{
31829 +    INTCOOKIE_ENTRY *ent, *nent;
31830 +    unsigned long flags;
31831 +
31832 +    KMEM_ZALLOC (nent, INTCOOKIE_ENTRY *, sizeof (INTCOOKIE_ENTRY), 1);
31833 +
31834 +    if (nent == NULL)
31835 +       return (-ENOMEM);
31836 +    
31837 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
31838 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
31839 +       if (ent->ent_cookie == cookie)
31840 +           break;
31841 +
31842 +    if (ent == NULL)
31843 +    {
31844 +       kcondvar_init (&nent->ent_wait);
31845 +       spin_lock_init (&nent->ent_lock);
31846 +
31847 +       nent->ent_ref    = 1;
31848 +       nent->ent_cookie = cookie;
31849 +
31850 +       if ((nent->ent_next = tbl->tbl_entries) != NULL)
31851 +           tbl->tbl_entries->ent_prev = nent;
31852 +       tbl->tbl_entries = nent;
31853 +       nent->ent_prev = NULL;
31854 +    }
31855 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31856 +
31857 +    if (ent == NULL)
31858 +       return (0);
31859 +    else
31860 +    {
31861 +       KMEM_FREE (nent, sizeof (INTCOOKIE_ENTRY));
31862 +       return (-EINVAL);
31863 +    }
31864 +}
31865 +
31866 +int
31867 +intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
31868 +{
31869 +    INTCOOKIE_ENTRY *ent;
31870 +    unsigned long flags;
31871 +
31872 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
31873 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
31874 +       if (ent->ent_cookie == cookie)
31875 +           break;
31876 +    
31877 +    if (ent == NULL)
31878 +    {
31879 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31880 +       return (-EINVAL);
31881 +    }
31882 +
31883 +    if (ent->ent_prev == NULL)
31884 +       tbl->tbl_entries = ent->ent_next;
31885 +    else
31886 +       ent->ent_prev->ent_next = ent->ent_next;
31887 +
31888 +    if (ent->ent_next != NULL)
31889 +       ent->ent_next->ent_prev = ent->ent_prev;
31890 +    
31891 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31892 +
31893 +    intcookie_drop_entry (ent);
31894 +
31895 +    return (0);
31896 +}
31897 +
31898 +/*
31899 + * intcookie_fire_cookie:
31900 + *    fire the cookie - this is called from the event interrupt.
31901 + */
31902 +int
31903 +intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
31904 +{
31905 +    INTCOOKIE_ENTRY *ent;
31906 +    unsigned long flags;
31907 +
31908 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
31909 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
31910 +       if (ent->ent_cookie == cookie)
31911 +           break;
31912 +    
31913 +    if (ent == NULL)
31914 +    {
31915 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31916 +       return (-EINVAL);
31917 +    }
31918 +           
31919 +    spin_lock (&ent->ent_lock);
31920 +    ent->ent_fired = cookie;
31921 +    kcondvar_wakeupall (&ent->ent_wait, &ent->ent_lock);
31922 +    spin_unlock (&ent->ent_lock);
31923 +
31924 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31925 +
31926 +    return (0);
31927 +}    
31928 +
31929 +int
31930 +intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie)
31931 +{
31932 +    int res;
31933 +    INTCOOKIE_TABLE *tbl;
31934 +
31935 +    spin_lock (&intcookie_table_lock);
31936 +    
31937 +    for (tbl = intcookie_tables; tbl; tbl = tbl->tbl_next)
31938 +       if (ELAN_CAP_MATCH (tbl->tbl_cap, cap) && tbl->tbl_cap->cap_mycontext == cap->cap_mycontext)
31939 +           break;
31940 +    
31941 +    if (tbl != NULL)
31942 +       tbl->tbl_ref++;
31943 +
31944 +    spin_unlock (&intcookie_table_lock);
31945 +
31946 +    /* No matching table found */
31947 +    if (tbl == NULL)
31948 +       return (-EINVAL);
31949 +
31950 +    /* Fire the correct cookie */
31951 +    res = intcookie_fire (tbl, cookie);
31952 +
31953 +    /* Decrement reference count (and free if necessary) */
31954 +    intcookie_free_table (tbl);
31955 +
31956 +    return (res);
31957 +}
31958 +
31959 +/*
31960 + * intcookie_wait_cookie:
31961 + *    deschedule on a cookie if it has not already fired.
31962 + *    note - if the cookie is removed from the table, then
31963 + *           we free it off when we're woken up.
31964 + */
31965 +int
31966 +intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
31967 +{
31968 +    INTCOOKIE_ENTRY *ent;
31969 +    unsigned long flags;
31970 +    int res;
31971 +    
31972 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
31973 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
31974 +       if (ent->ent_cookie == cookie)
31975 +           break;
31976 +    
31977 +    if (ent == NULL)
31978 +    {
31979 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
31980 +       return (-EINVAL);
31981 +    }
31982 +
31983 +    spin_lock (&ent->ent_lock);
31984 +    spin_unlock (&tbl->tbl_lock);
31985 +
31986 +    if (ent->ent_fired != 0)
31987 +    {
31988 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
31989 +       return (0);
31990 +    }
31991 +
31992 +    ent->ent_ref++;
31993 +    kcondvar_waitsig (&ent->ent_wait, &ent->ent_lock, &flags);
31994 +    
31995 +    res = ent->ent_fired ? 0 : -EINTR;
31996 +
31997 +    if (--ent->ent_ref > 0)
31998 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
31999 +    else
32000 +    {
32001 +       spin_unlock_irqrestore (&ent->ent_lock, flags);
32002 +       
32003 +       spin_lock_destroy (&ent->ent_lock);
32004 +       kcondvar_destroy (&ent->ent_wait);
32005 +
32006 +       KMEM_FREE (ent, sizeof (INTCOOKIE_ENTRY));
32007 +    }
32008 +
32009 +    return (res);
32010 +}
32011 +
32012 +int
32013 +intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie)
32014 +{
32015 +    INTCOOKIE_ENTRY *ent;
32016 +    unsigned long flags;
32017 +
32018 +    spin_lock_irqsave (&tbl->tbl_lock, flags);
32019 +    for (ent = tbl->tbl_entries; ent; ent = ent->ent_next)
32020 +       if (ent->ent_cookie == cookie)
32021 +           break;
32022 +    
32023 +    if (ent == NULL)
32024 +    {
32025 +       spin_unlock_irqrestore (&tbl->tbl_lock, flags);
32026 +       return (-EINVAL);
32027 +    }
32028 +           
32029 +    spin_lock (&ent->ent_lock);
32030 +    ent->ent_fired = 0;
32031 +    spin_unlock (&ent->ent_lock);
32032 +
32033 +    spin_unlock_irqrestore (&tbl->tbl_lock, flags);
32034 +
32035 +    return (0);
32036 +}
32037 +
32038 +/*
32039 + * Local variables:
32040 + * c-file-style: "stroustrup"
32041 + * End:
32042 + */
32043 diff -urN clean/drivers/net/qsnet/elan4/Makefile linux-2.6.9/drivers/net/qsnet/elan4/Makefile
32044 --- clean/drivers/net/qsnet/elan4/Makefile      1969-12-31 19:00:00.000000000 -0500
32045 +++ linux-2.6.9/drivers/net/qsnet/elan4/Makefile        2005-10-10 17:47:30.000000000 -0400
32046 @@ -0,0 +1,15 @@
32047 +#
32048 +# Makefile for Quadrics QsNet
32049 +#
32050 +# Copyright (c) 2002-2004 Quadrics Ltd
32051 +#
32052 +# File: drivers/net/qsnet/elan4/Makefile
32053 +#
32054 +
32055 +
32056 +#
32057 +
32058 +obj-$(CONFIG_ELAN4)    += elan4.o
32059 +elan4-objs     := device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o
32060 +
32061 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
32062 diff -urN clean/drivers/net/qsnet/elan4/Makefile.conf linux-2.6.9/drivers/net/qsnet/elan4/Makefile.conf
32063 --- clean/drivers/net/qsnet/elan4/Makefile.conf 1969-12-31 19:00:00.000000000 -0500
32064 +++ linux-2.6.9/drivers/net/qsnet/elan4/Makefile.conf   2005-09-07 10:39:42.000000000 -0400
32065 @@ -0,0 +1,10 @@
32066 +# Flags for generating QsNet Linux Kernel Makefiles
32067 +MODNAME                =       elan4.o
32068 +MODULENAME     =       elan4
32069 +KOBJFILES      =       device.o i2c.o mmu.o sdram.o debug.o routetable.o trap.o user.o user_ddcq.o regions.o intcookie.o neterr.o device_Linux.o user_Linux.o procfs_Linux.o mmu_Linux.o
32070 +EXPORT_KOBJS   =       device.o device_Linux.o mmu.o mmu_Linux.o procfs_Linux.o routetable.o sdram.o trap.o
32071 +CONFIG_NAME    =       CONFIG_ELAN4
32072 +SGALFC         =       
32073 +# EXTRALINES START
32074 +
32075 +# EXTRALINES END
32076 diff -urN clean/drivers/net/qsnet/elan4/mmu.c linux-2.6.9/drivers/net/qsnet/elan4/mmu.c
32077 --- clean/drivers/net/qsnet/elan4/mmu.c 1969-12-31 19:00:00.000000000 -0500
32078 +++ linux-2.6.9/drivers/net/qsnet/elan4/mmu.c   2005-07-14 09:34:12.000000000 -0400
32079 @@ -0,0 +1,1552 @@
32080 +/*
32081 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
32082 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
32083 + * 
32084 + *    For licensing information please see the supplied COPYING file
32085 + *
32086 + */
32087 +
32088 +#ident "@(#)$Id: mmu.c,v 1.47.2.3 2005/07/14 13:34:12 david Exp $"
32089 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu.c,v $*/
32090 +
32091 +#include <qsnet/kernel.h>
32092 +#include <qsnet/kpte.h>
32093 +
32094 +#include <elan4/debug.h>
32095 +#include <elan4/device.h>
32096 +#include <linux/pagemap.h>
32097 +
32098 +int elan4_debug_mmu;
32099 +int elan4_mmuhash_chain_reduction     = 1;
32100 +int elan4_mmuhash_chain_end_reduce    = 0;
32101 +int elan4_mmuhash_chain_middle_reduce = 0;
32102 +int elan4_mmuhash_chain_middle_fail   = 0;
32103 +int elan4_mmuhash_shuffle_attempts    = 0;
32104 +int elan4_mmuhash_shuffle_done        = 0;
32105 +
32106 +/* Permission table - see ELAN4 MMU documentation */
32107 +u_char elan4_permtable[] =
32108 +{
32109 +   0x00, /* 0x000000 - Disable */
32110 +   0x00, /* 0x000000 - Unused  */
32111 +   0x01, /* 0x000001 - Local Data Read */
32112 +   0x03, /* 0x000011 - Local Data Write */
32113 +   0x11, /* 0x010001 - Local Read */
32114 +   0x10, /* 0x010000 - Local Execute */
32115 +   0x05, /* 0x000101 - Read Only */
32116 +   0x13, /* 0x010011 - Local Write */
32117 +   0x20, /* 0x100000 - Local Event Access */
32118 +   0x23, /* 0x100011 - Local Event Write Ac */
32119 +   0xa3, /* 1x100011 - Remote Ev Loc Write */
32120 +   0xaf, /* 1x101111 - Remote All */
32121 +   0x07, /* 0x000111 - Remote Read Only */
32122 +   0x0d, /* 0x001101 - Remote Write Only */
32123 +   0x0f, /* 0x001111 - Remote Read/Write */
32124 +   0xbf, /* 1x111111 - No Fault */
32125 +};
32126 +
32127 +u_char elan4_permreadonly[] = 
32128 +{
32129 +    PERM_Disabled,             /* PERM_Disabled */
32130 +    PERM_Disabled,             /* PERM_Unused */
32131 +    PERM_LocDataRead,          /* PERM_LocDataRead */
32132 +    PERM_LocDataRead,          /* PERM_LocDataWrite */
32133 +    PERM_LocRead,              /* PERM_LocRead */
32134 +    PERM_LocExecute,           /* PERM_LocExecute */
32135 +    PERM_ReadOnly,             /* PERM_ReadOnly */
32136 +    PERM_LocRead,              /* PERM_LocWrite */
32137 +    PERM_LocEventOnly,         /* PERM_LocEventOnly */
32138 +    PERM_LocDataRead,          /* PERM_LocEventWrite */
32139 +    PERM_LocDataRead,          /* PERM_RemoteEvent */
32140 +    PERM_ReadOnly,             /* PERM_RemoteAll */
32141 +    PERM_RemoteReadOnly,       /* PERM_RemoteReadOnly */
32142 +    PERM_ReadOnly,             /* PERM_RemoteWriteLocRead */
32143 +    PERM_ReadOnly,             /* PERM_DataReadWrite */
32144 +    PERM_ReadOnly,             /* PERM_NoFault */
32145 +};
32146 +
32147 +static void
32148 +elan4mmu_synctag (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx)
32149 +{
32150 +    E4_uint64 value = (he->he_tag[tagidx] & HE_TAG_VALID) ? he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK) : INVALID_CONTEXT;
32151 +    
32152 +    if (he->he_next)
32153 +       value |= ((tagidx == 0) ? 
32154 +                 ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) :
32155 +                 ((he->he_next->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK));
32156 +    else if (tagidx == 0)
32157 +       value |= TAG_CHAINPTR_30TO19_MASK;
32158 +    
32159 +    MPRINTF (DBG_DEVICE, 4, "elan4mmu_synctag: he=%p tagidx=%d he->he_tag=%llx -> value=%llx\n", he, tagidx, he->he_tag[tagidx], value);
32160 +
32161 +    elan4_sdram_writeq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx), value);
32162 +}
32163 +
32164 +static void
32165 +elan4mmu_chain_hents (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *phe, ELAN4_HASH_ENTRY *he)
32166 +{
32167 +    ASSERT ((elan4_sdram_readq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0)) & TAG_CHAINPTR_30TO19_MASK) == TAG_CHAINPTR_30TO19_MASK);
32168 +
32169 +    elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(1),
32170 +                       ((phe->he_tag[1] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry << TAG_CHAINPTR_LOW_SHIFT) & TAG_CHAINPTR_18TO6_MASK)));
32171 +    elan4_sdram_writeq (dev, phe->he_entry + E4MMU_TAG_OFFSET(0),
32172 +                       ((phe->he_tag[0] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK)) | ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK)));
32173 +}
32174 +
32175 +static void
32176 +elan4mmu_writepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx, E4_uint64 value)
32177 +{
32178 +    /*
32179 +     * NOTE - we can only change a valid PTE if we're upgrading it's permissions,
32180 +     * any other changes should have invalidated it first. */
32181 +
32182 +    MPRINTF (DBG_DEVICE, 4, "elan4mmu_writepte: he=%p tagidx=%d pteidx=%x value=%llx\n", he, tagidx, pteidx, (unsigned long long) value);
32183 +
32184 +    if (pteidx == 3)
32185 +    {
32186 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx), (value >> 16) & 0xFFFF);
32187 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx), (value >> 32) & 0xFFFF);
32188 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), (value >> 0)  & 0xFFFF);
32189 +    }
32190 +    else
32191 +    {
32192 +       elan4_sdram_writew (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx), (value >> 32) & 0xFFFF);
32193 +       elan4_sdram_writel (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), value & 0xFFFFFFFF);
32194 +    }
32195 +}
32196 +
32197 +static void
32198 +elan4mmu_invalidatepte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx)
32199 +{
32200 +    if (pteidx == 3)
32201 +       elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx), PTE_SetPerm (PERM_Disabled));
32202 +    else
32203 +       elan4_sdram_writeb (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx), PTE_SetPerm (PERM_Disabled));
32204 +}
32205 +
32206 +static E4_uint64
32207 +elan4mmu_readpte (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int tagidx, int pteidx)
32208 +{
32209 +    if (pteidx == 3)
32210 +       return (((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD0_OFFSET(tagidx)) << 0)  |
32211 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD1_OFFSET(tagidx)) << 16) |
32212 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE3_WORD2_OFFSET(tagidx)) << 32));
32213 +    else
32214 +       return ((E4_uint64) elan4_sdram_readl (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, pteidx)) |
32215 +               ((E4_uint64) elan4_sdram_readw (dev, he->he_entry + E4MMU_PTE_HIGH_OFFSET(tagidx, pteidx)) << 32));
32216 +}
32217 +
32218 +
32219 +void
32220 +elan4mmu_flush_tlb (ELAN4_DEV *dev)
32221 +{
32222 +    PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH);
32223 +
32224 +    while (read_reg64 (dev, SysControlReg) & CONT_TLB_FLUSH)
32225 +       DELAY (1);
32226 +}
32227 +
32228 +/*
32229 + * elanmmu_flush_tlb_hash - this flushes the hash copy entries and the elan
32230 + * tlb.  However after the write to the hash copy entry if the elan was
32231 + * in the process of walking, then it could write the hash copy with a valid
32232 + * entry which we had just invalidated. However once we've seen the tlb flushed
32233 + * then if the walk engine had done a write - then we need to invaldate the
32234 + * hash copy entries again and reflush the tlb.
32235 + *
32236 + * If we're invalidating a lot of hash blocks, then the chances are that the
32237 + * walk engine will perform a write - so we flush the tlb first, then invalidate
32238 + * the hash copy entries, then flush the tlb again.
32239 + */
32240 +static void
32241 +elan4mmu_flush_tlb_hash (ELAN4_DEV *dev, int tbl, unsigned baseidx, unsigned topidx)
32242 +{
32243 +    int       notmany = (abs(topidx - baseidx) < 5) ? 1 : 0;
32244 +    int       hashidx;
32245 +    E4_uint32 reg;
32246 +
32247 +    if (notmany)
32248 +       PULSE_SYSCONTROL (dev, CONT_CLEAR_WALK_WROTE_TABLES);
32249 +    else
32250 +       elan4mmu_flush_tlb(dev);
32251 +
32252 +    do {
32253 +       for (hashidx = baseidx; hashidx <= topidx; hashidx++)
32254 +           if (dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_COPY)
32255 +           {
32256 +               ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[0] & HE_TAG_VALID) == 0);
32257 +               ASSERT ((dev->dev_mmuhash[tbl][hashidx].he_tag[1] & HE_TAG_VALID) == 0);
32258 +
32259 +               elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 0);
32260 +               elan4mmu_synctag (dev, &dev->dev_mmuhash[tbl][hashidx], 1);
32261 +           }
32262 +       
32263 +       PULSE_SYSCONTROL (dev, CONT_TLB_FLUSH);
32264 +       
32265 +       while ((reg = read_reg64 (dev, SysControlReg)) & CONT_TLB_FLUSH)
32266 +           DELAY (1);
32267 +       
32268 +    } while (notmany-- && (reg & CONT_CLEAR_WALK_WROTE_TABLES) != 0);
32269 +}
32270 +
32271 +void
32272 +elan4mmu_display_hent (ELAN4_DEV *dev, ELAN4_HASH_ENTRY *he, int hashidx)
32273 +{
32274 +    int tagidx;
32275 +
32276 +    elan4_debugf (DBG_DEVICE, DBG_MMU, "elan4mmu_display_hent: hashidx=%d he=%p entry at %lx\n", hashidx, he, he->he_entry);
32277 +    elan4_debugf (DBG_DEVICE, DBG_MMU, "                       next=%p prev=%p chain=%p,%p\n", he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1]);
32278 +    for (tagidx = 0; tagidx < 2; tagidx++)
32279 +    {
32280 +       E4_uint64 tag  = elan4_sdram_readq (dev, he->he_entry + E4MMU_TAG_OFFSET(tagidx));
32281 +       E4_uint64 pte0 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 0));
32282 +       E4_uint64 pte1 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 1));
32283 +       E4_uint64 pte2 = elan4_sdram_readq (dev, he->he_entry + E4MMU_PTE_LOW_OFFSET(tagidx, 2));
32284 +       E4_uint64 pte3 = ((pte0 >> 48) | (pte1 >> 32) | (pte2 >> 16));
32285 +       
32286 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Tag %d (%llx,%08x) context=%04x vaddr=%llx\n", tagidx, he->he_tag[tagidx], he->he_pte[tagidx], (int) (tag & TAG_CONTEXT_MASK), (tag & TAG_ADDRESS_MASK));
32287 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 0 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte0 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
32288 +                     (int) (pte0 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte0 & PTE_TYPE_MASK), (pte0 & PTE_MOD_MASK) ? " mod" : "", (pte0 & PTE_REF_MASK) ? " ref" : "");
32289 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 1 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte1 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
32290 +                     (int) (pte1 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte1 & PTE_TYPE_MASK), (pte1 & PTE_MOD_MASK) ? " mod" : "", (pte1 & PTE_REF_MASK) ? " ref" : "");
32291 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 2 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte2 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
32292 +                     (int) (pte2 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte2 & PTE_TYPE_MASK), (pte2 & PTE_MOD_MASK) ? " mod" : "", (pte2 & PTE_REF_MASK) ? " ref" : "");
32293 +       elan4_debugf (DBG_DEVICE, DBG_MMU, "                       Pte 3 - PPN=%llx PERM=%x TYPE=%x%s%s\n", (pte3 & PTE_PPN_MASK) >> PTE_PPN_SHIFT, 
32294 +                     (int) (pte3 & PTE_PERM_MASK) >> PTE_PERM_SHIFT, (int)(pte3 & PTE_TYPE_MASK), (pte3 & PTE_MOD_MASK) ? " mod" : "", (pte3 & PTE_REF_MASK) ? " ref" : "");
32295 +    }
32296 +}
32297 +
32298 +static __inline__ ELAN4_HASH_ENTRY *
32299 +he_ctxt_next (ELAN4_HASH_ENTRY *he, int ctxnum)
32300 +{
32301 +    return ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum) ? he->he_chain[0] : he->he_chain[1];
32302 +}
32303 +
32304 +static __inline__ ELAN4_HASH_ENTRY *
32305 +he_ctxt_unlink (ELAN4_CTXT *ctxt, int tbl, int hashidx, ELAN4_HASH_ENTRY *prevhe, ELAN4_HASH_ENTRY *he, ELAN4_HASH_ENTRY *next)
32306 +{
32307 +    /* Check whether either tag is in use by this context */
32308 +    if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
32309 +       return he;
32310 +
32311 +    if (prevhe == NULL)
32312 +       ctxt->ctxt_mmuhash[tbl][hashidx] = next;
32313 +    else
32314 +    {
32315 +       /* previous he, ensure that both chain pointers are changed is this ctxt is using both tags */
32316 +       ASSERT ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num || (prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num);
32317 +
32318 +       if ((prevhe->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
32319 +           prevhe->he_chain[0] = next;
32320 +       if ((prevhe->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
32321 +           prevhe->he_chain[1] = next;
32322 +    }
32323 +
32324 +    return prevhe;
32325 +}
32326 +
32327 +void
32328 +elan4mmu_display (ELAN4_CTXT *ctxt, int tbl, const char *tag)
32329 +{
32330 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
32331 +    ELAN4_HASH_ENTRY *he;
32332 +    int hashidx;
32333 +
32334 +    for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
32335 +       for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxt->ctxt_num))
32336 +       {
32337 +           elan4_debugf (DBG_DEVICE, DBG_MMU, "%s: hashidx=%d he=%p tags <%llx,%llx>\n", tag, hashidx, he,
32338 +                         (he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0,
32339 +                         (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ? E4MMU_TAG2VADDR (he->he_tag[1], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1) : 0);
32340 +           elan4mmu_display_hent (dev, he, hashidx);
32341 +       }
32342 +}
32343 +static ELAN4_HASH_ENTRY *
32344 +elan4mmu_find_next_free (ELAN4_HASH_ENTRY *he)
32345 +{
32346 +    /* the current one could be free */        
32347 +    /* return NULL if not free one   */
32348 +    while ( he ) 
32349 +    {
32350 +       if ( ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) || ((he->he_tag[1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)) 
32351 +           return he;
32352 +       he = he->he_next;
32353 +    }
32354 +    return (NULL);
32355 +}
32356 +static ELAN4_HASH_ENTRY *
32357 +elan4mmu_alloc_hent (ELAN4_DEV *dev, int tbl, int hashidx, E4_uint64 newtag, int *tagidx)
32358 +{
32359 +    ELAN4_HASH_ENTRY *he, *phe;
32360 +    unsigned long flags;
32361 +    int i;
32362 +
32363 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
32364 +
32365 +    /* see if there are any partial free blocks */
32366 +    if ((he = elan4mmu_find_next_free (&dev->dev_mmuhash[tbl][hashidx])) != NULL)
32367 +    {
32368 +       *tagidx = ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) ? 0 : 1;
32369 +       
32370 +       MPRINTF (DBG_DEVICE, 3, "elan4mmu_alloc_hent: allocate he=%p idx=%d%s\n", he, *tagidx, (he == &dev->dev_mmuhash[tbl][hashidx]) ? " hash-block" : "");
32371 +       
32372 +       he->he_tag[*tagidx] = newtag | HE_TAG_VALID;
32373 +
32374 +       elan4mmu_synctag (dev, he, *tagidx);
32375 +               
32376 +       spin_unlock_irqrestore (&dev->dev_mmulock, flags);
32377 +       return (he);
32378 +    }
32379 +    
32380 +    if ((he = dev->dev_mmufreelist) != NULL)
32381 +       dev->dev_mmufreelist = he->he_next;
32382 +    else
32383 +    {
32384 +       ELAN4_HASH_CHUNK *hc;
32385 +       sdramaddr_t       entry;
32386 +
32387 +       KMEM_ALLOC (hc, ELAN4_HASH_CHUNK *, sizeof (ELAN4_HASH_CHUNK), 0);
32388 +       
32389 +       if (hc == NULL)
32390 +       {
32391 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
32392 +           return ((ELAN4_HASH_ENTRY *) NULL);
32393 +       }
32394 +       
32395 +       if ((entry = elan4_sdram_alloc (dev, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS)) == (sdramaddr_t) 0)
32396 +       {
32397 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
32398 +
32399 +           KMEM_FREE (hc, sizeof (ELAN4_HASH_CHUNK));
32400 +           return ((ELAN4_HASH_ENTRY *) NULL);
32401 +       }
32402 +
32403 +       list_add_tail (&hc->hc_link, &dev->dev_hc_list);
32404 +
32405 +       elan4_sdram_zeroq_sdram (dev, entry, sizeof (E4_HashTableEntry) * ELAN4_HENT_CHUNKS);
32406 +
32407 +       /* no initialise all chunks and chain all but the first onto the freelist */
32408 +       for (i = 0; i < ELAN4_HENT_CHUNKS; i++, entry += sizeof (E4_HashTableEntry))
32409 +       {
32410 +           hc->hc_hents[i].he_entry = entry;
32411 +
32412 +           if (i == 0)
32413 +               he = &hc->hc_hents[0];
32414 +           else
32415 +           {
32416 +               hc->hc_hents[i].he_next = dev->dev_mmufreelist;
32417 +               dev->dev_mmufreelist = &hc->hc_hents[i];
32418 +           }
32419 +       }
32420 +    }
32421 +
32422 +    /* Initialise hash entry, using slot 0 */
32423 +    *tagidx = 0;
32424 +
32425 +    he->he_next     = NULL;
32426 +    he->he_prev     = NULL;
32427 +    he->he_chain[0] = NULL;
32428 +    he->he_chain[1] = NULL;
32429 +    he->he_tag[0]   = newtag | HE_TAG_VALID;
32430 +    he->he_tag[1]   = E4MMU_TAG(0, INVALID_CONTEXT);
32431 +    he->he_pte[0]   = 0;
32432 +    he->he_pte[1]   = 0;
32433 +    
32434 +    elan4mmu_synctag (dev, he, 0);
32435 +    
32436 +    /* add to mmuhash lists */
32437 +    for (phe = &dev->dev_mmuhash[tbl][hashidx]; phe->he_next; phe = phe->he_next)
32438 +       ;
32439 +    phe->he_next = he;
32440 +    he->he_prev  = phe;
32441 +    he->he_next  = NULL;
32442 +    
32443 +    /* finally chain the hash block into the hash tables */
32444 +    elan4mmu_chain_hents (dev, phe, he);
32445 +    
32446 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
32447 +    return (he);
32448 +}
32449 +void
32450 +elan4mmu_set_shuffle(ELAN4_CTXT *ctxt, int tbl, int hashidx)
32451 +{
32452 +    int i;
32453 +    
32454 +    for(i=0; (i<ELAN4_CTXT_MAX_SHUFFLE) && (ctxt->shuffle[tbl][i]>=0) && (ctxt->shuffle[tbl][i]!=hashidx); i++)
32455 +       ;
32456 +    if (i<ELAN4_CTXT_MAX_SHUFFLE) {
32457 +       ctxt->shuffle_needed[tbl] = 1;
32458 +       ctxt->shuffle[tbl][i]     = hashidx;
32459 +    }
32460 +}
32461 +static int 
32462 +elan4mmm_try_to_free_hent(ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he)
32463 +{   
32464 +    ELAN4_HASH_ENTRY *prev;
32465 +    int               t;
32466 +    ELAN4_CTXT       *ctxt;
32467 +
32468 +    
32469 +    while (he) {
32470 +       if ( ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)
32471 +            && ((he->he_tag[1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)) {
32472 +           /* Both tags are now free */
32473 +           
32474 +           if (he != &dev->dev_mmuhash[tbl][hashidx]) {
32475 +               /* its not the hash entry block */
32476 +               
32477 +               if ( he->he_next == NULL ) {
32478 +                   /* its the end one so just remove it */
32479 +                   prev = he->he_prev;
32480 +                   
32481 +                   /* make the previous entry the end one and sync it */
32482 +                   prev->he_next = NULL;
32483 +                   elan4mmu_synctag (dev, prev, 0);
32484 +                   
32485 +                   /* make sure the elan had finished traversing the list */
32486 +                   elan4mmu_flush_tlb(dev);
32487 +                   
32488 +                   /* now we have a free he in our hands put it onto the free list */
32489 +                   he->he_next = dev->dev_mmufreelist;
32490 +                   dev->dev_mmufreelist = he;
32491 +                   
32492 +                   elan4_mmuhash_chain_end_reduce++;
32493 +                   
32494 +                   he = prev;
32495 +               } else {
32496 +                   /* can only remove if my he_entry high bits = next he_entry high bits. */
32497 +
32498 +                   if (((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == 
32499 +                       ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))
32500 +                   {
32501 +                       prev = he->he_prev;
32502 +                       
32503 +                       /* make the previous entry jump over us and sync it */
32504 +                       prev->he_next = he->he_next;
32505 +                       elan4mmu_synctag (dev, prev, 1);
32506 +                       he->he_next->he_prev = prev;
32507 +                       
32508 +                       /* make sure the elan had finished traversing the list */
32509 +                       elan4mmu_flush_tlb(dev);
32510 +                       
32511 +                       /* now we have a free he in our hands put it onto the free list */
32512 +                       he->he_next = dev->dev_mmufreelist;
32513 +                       dev->dev_mmufreelist = he;
32514 +                       
32515 +                       elan4_mmuhash_chain_middle_reduce++;
32516 +                       
32517 +                       he = prev;      
32518 +                       
32519 +                   } else {
32520 +                       elan4_mmuhash_chain_middle_fail++;
32521 +                       /* at this point we wanted to remove an entry but we cant because this would mean
32522 +                          chanaging the high bits of the perivious pointer.
32523 +                          
32524 +                          It is assumed that this is a fairly rare occurance.
32525 +                          
32526 +                          The plan is. to tell the ctxt's in the end entry (which can always be removed)
32527 +                          to shuffle down. They need to do this as its guarded by a ctxt lock i dont have.
32528 +                          
32529 +                          Note the ctxt entry might not exist by the time they get round to shuffling.
32530 +                          and/or the empty node we want to shuffle to might have gone. so there is no
32531 +                          value in storing info about what you want to shuffle. 
32532 +                          
32533 +                          just tell the ctxt to shuffle this hashidx. rather than allocate a block 
32534 +                          of memory the size of the number of hashidx's to handle this we will use 
32535 +                          a short array. assuming its rarely going to fill. if it does the all the ctxt's
32536 +                          hashidx's are shuffled (as its really unlikely to happen
32537 +                       */
32538 +                       
32539 +                       /* mark all up to the end as needing shuffle */
32540 +                       while (he->he_next) {
32541 +                           for(t=0;t<2;t++) {
32542 +                               if ((he->he_tag[t] & TAG_CONTEXT_MASK)!=INVALID_CONTEXT) {
32543 +                                   ctxt = elan4_localctxt (dev, (he->he_tag[t] & TAG_CONTEXT_MASK));
32544 +                                   if (ctxt) {
32545 +                                       ASSERT(ctxt->ctxt_ops);
32546 +                                       if (ctxt->ctxt_ops->op_need_shuffle)
32547 +                                           ctxt->ctxt_ops->op_need_shuffle (ctxt, tbl, hashidx);
32548 +                                   }
32549 +                               }
32550 +                           }
32551 +                           he = he->he_next;
32552 +                       }
32553 +                       
32554 +                       he = NULL;
32555 +                   }
32556 +               }
32557 +           } else he = NULL;
32558 +       } else he = NULL;
32559 +    }
32560 +    return (0);
32561 +}
32562 +static void
32563 +elan4mmu_free_hent_nolock (ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he, int tagidx)
32564 +{ 
32565 +    /* assumes some one has the mmulock before this is called */
32566 +    int pteidx;
32567 +
32568 +    /* Invalidate the tag, and zero all ptes */
32569 +    for (pteidx = 0; pteidx < 4; pteidx++)
32570 +       if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID)
32571 +           elan4mmu_writepte (dev, he, tagidx, pteidx, 0);
32572 +
32573 +    he->he_tag[tagidx] = E4MMU_TAG(0, INVALID_CONTEXT);
32574 +    he->he_pte[tagidx] = 0;
32575 +
32576 +    elan4mmu_synctag (dev, he, tagidx);
32577 +
32578 +    if ((he->he_tag[tagidx^1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) /* Both tags are now free */
32579 +    {
32580 +       if (he == &dev->dev_mmuhash[tbl][hashidx])              /* it's the hash block entry */
32581 +       {                                                       /* so as it's already on the freelist */
32582 +           he->he_chain[tagidx] = he->he_chain[tagidx^1];      /* just copy it's chain pointers */
32583 +
32584 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free but hashblk\n", tbl, hashidx, tagidx, he);
32585 +       }
32586 +       else
32587 +       {
32588 +           MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => all free\n", tbl, hashidx, tagidx, he);
32589 +           
32590 +           /* remove it from the hash table, and place back on the anonymous freelist */
32591 +           he->he_chain[tagidx] = he->he_chain[tagidx^1];
32592 +
32593 +           if (elan4_mmuhash_chain_reduction) {
32594 +               elan4mmm_try_to_free_hent (dev, tbl, hashidx, he);
32595 +           }
32596 +       }
32597 +    }
32598 +    else
32599 +    {
32600 +       /* Other tag still in use */
32601 +       MPRINTF (DBG_DEVICE, 3, "elan4mmu_free_hent: tbl=%d hashidx=%x tagidx=%d he=%p => other tag in use\n", tbl, hashidx, tagidx, he);
32602 +    }
32603 +}
32604 +static void
32605 +elan4mmu_free_hent (ELAN4_DEV *dev, int tbl, int hashidx, ELAN4_HASH_ENTRY *he, int tagidx)
32606 +{
32607 +    unsigned long flags;
32608 +
32609 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
32610 +    elan4mmu_free_hent_nolock (dev, tbl, hashidx, he, tagidx);
32611 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
32612 +}
32613 +void
32614 +print_dev(ELAN4_DEV *dev, int tbl, int index)
32615 +{
32616 +    ELAN4_HASH_ENTRY *he = &dev->dev_mmuhash[tbl][index];
32617 +    int count=0;
32618 +
32619 +    while (he) {
32620 +       qsnet_debugf(1,"(dev) he%s %p  entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx,0x%016llx) pte(0x%010x,0x%010x)%s\n",
32621 +              (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he, 
32622 +              he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1],
32623 +              (long long)he->he_tag[0], (long long)he->he_tag[1], he->he_pte[0], he->he_pte[1],
32624 +              (he->he_next)? (( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == 
32625 +                                ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" ");
32626 +       he = he->he_next;
32627 +
32628 +       if (count++ > 1000) {
32629 +           qsnet_debugf(1,"List Failed\n");
32630 +           he = NULL;  
32631 +           elan4_mmuhash_chain_reduction = 0;
32632 +       }
32633 +    }
32634 +}
32635 +void
32636 +print_ctx(ELAN4_CTXT *ctxt,  int tbl, int index)
32637 +{
32638 +    ELAN4_HASH_ENTRY *he = ctxt->ctxt_mmuhash[tbl][index];
32639 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
32640 +    int count=0;
32641 +
32642 +    while (he) {    
32643 +       qsnet_debugf(1,"(%04d) he%s %p  entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx,0x%016llx) pte(0x%010x,0x%010x)%s\n",
32644 +              ctxt->ctxt_num, (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he,
32645 +              he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1],
32646 +              (long long)he->he_tag[0], (long long)he->he_tag[1], he->he_pte[0], he->he_pte[1],  
32647 +              (he->he_next)?(( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == 
32648 +                               ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" ");
32649 +
32650 +       if (((he->he_tag[0] & TAG_CONTEXT_MASK) !=  ctxt->ctxt_num) && ((he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num)) {
32651 +           qsnet_debugf(1,"(%04d) neither tag is us so stopping 0x%llx 0x%llx \n", ctxt->ctxt_num, (long long)(he->he_tag[0] & TAG_CONTEXT_MASK), (long long)(he->he_tag[1] & TAG_CONTEXT_MASK));
32652 +           he = NULL;
32653 +       } else {
32654 +           he =  he_ctxt_next (he, ctxt->ctxt_num);
32655 +       }
32656 +       if (count++ > 1000) {
32657 +           qsnet_debugf(1,"List Failed\n"); 
32658 +           he = NULL; 
32659 +           elan4_mmuhash_chain_reduction = 0;
32660 +       }
32661 +    }
32662 +}
32663 +int
32664 +dev_count(ELAN4_DEV *dev, int tbl, int index, int ctxt_num)
32665 +{
32666 +    ELAN4_HASH_ENTRY *he = &dev->dev_mmuhash[tbl][index];
32667 +    int               count = 0;
32668 +    while (he) {
32669 +
32670 +       if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt_num) count++;
32671 +       if ((he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt_num) count++;
32672 +
32673 +       he = he->he_next;
32674 +    }
32675 +    return (count);
32676 +}
32677 +int
32678 +ctx_count(ELAN4_CTXT *ctxt,  int tbl, int index)
32679 +{
32680 +    ELAN4_HASH_ENTRY *he = ctxt->ctxt_mmuhash[tbl][index];
32681 +    int count = 0;
32682 +    while (he) {    
32683 +
32684 +       if ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) count++;
32685 +       if ((he->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num) count++;
32686 +
32687 +       if (((he->he_tag[0] & TAG_CONTEXT_MASK) !=  ctxt->ctxt_num) && ((he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num)) {
32688 +           he = NULL;
32689 +       } else {
32690 +           he =  he_ctxt_next (he, ctxt->ctxt_num);
32691 +       }
32692 +    }
32693 +    return (count);
32694 +}
32695 +void 
32696 +elan4mmu_shuffle_up (ELAN4_CTXT *ctxt, int tbl, int hashidx)
32697 +{
32698 +    ELAN4_DEV        *dev       = ctxt->ctxt_dev;
32699 +    ELAN4_HASH_ENTRY *ctxt_prev = NULL;
32700 +    ELAN4_HASH_ENTRY *ctxt_he   = NULL;
32701 +    ELAN4_HASH_ENTRY *ctxt_next = NULL;
32702 +    ELAN4_HASH_ENTRY *hole;
32703 +    ELAN4_HASH_ENTRY *tmp;
32704 +    ELAN4_HASH_ENTRY *ctxt_remember;
32705 +    int               hole_tagidx;
32706 +    int               ctxt_tagidx;
32707 +    int               pteidx;
32708 +    E4_uint64         value;
32709 +
32710 +    elan4_mmuhash_shuffle_attempts++;
32711 +
32712 +    /* find the first hole */
32713 +    hole = elan4mmu_find_next_free ( &dev->dev_mmuhash[tbl][hashidx] ) ;
32714 +    if (hole == NULL) return;
32715 +
32716 +    /* find the last ctx */
32717 +    /* 1 move tmp to the end */
32718 +    for(ctxt_he = hole; (ctxt_he->he_next != NULL); ctxt_he = ctxt_he->he_next)
32719 +       ;
32720 +    /* 2 move tmp back looking for either hole or ctxt */
32721 +    while ((ctxt_he != hole)
32722 +          && ((ctxt_he->he_tag[0] & TAG_CONTEXT_MASK) != ctxt->ctxt_num ) 
32723 +          && ((ctxt_he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num ))
32724 +       ctxt_he = ctxt_he->he_prev;
32725 +
32726 +    /* if we found hole then there is not usefull swap to do */
32727 +    if (ctxt_he == hole) return;
32728 +
32729 +    while (ctxt_he != hole) {
32730 +       /***********/
32731 +       /* do swap */
32732 +       /***********/
32733 +       elan4_mmuhash_shuffle_done++;
32734 +
32735 +       /* now we can move this ctxt's entry in ctxt_he to hole */
32736 +       if ( (hole->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT ) hole_tagidx = 0;
32737 +       else                                                           hole_tagidx = 1;
32738 +
32739 +       if ( (ctxt_he->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num ) ctxt_tagidx = 0;
32740 +       else                                                             ctxt_tagidx = 1;
32741 +
32742 +       /* find us in list do this now before list is played with */
32743 +       ctxt_prev = NULL;
32744 +       tmp       = ctxt->ctxt_mmuhash[tbl][hashidx];
32745 +       ctxt_next = he_ctxt_next (tmp, ctxt->ctxt_num);
32746 +       while (tmp != ctxt_he) {
32747 +           ctxt_prev = tmp;
32748 +           tmp       = ctxt_next;
32749 +           ctxt_next = he_ctxt_next (tmp, ctxt->ctxt_num);
32750 +       }
32751 +
32752 +       /* copy over software chain and pte */
32753 +       hole->he_pte[hole_tagidx] = ctxt_he->he_pte[ctxt_tagidx];
32754 +
32755 +       /* copy over the valid elan pte's */
32756 +       /* not preserving the modified and referene bits */
32757 +       for (pteidx = 0; pteidx <= 3; pteidx++)
32758 +           if (HE_GET_PTE(hole, hole_tagidx, pteidx))
32759 +           {
32760 +               /* copy the pg_page and pg_dma_addr */
32761 +               hole->he_pg[hole_tagidx][pteidx] = ctxt_he->he_pg[ctxt_tagidx][pteidx];
32762 +
32763 +               value = elan4mmu_readpte (dev, ctxt_he, ctxt_tagidx, pteidx);
32764 +               elan4mmu_writepte (dev, hole, hole_tagidx, pteidx, value);
32765 +           }       
32766 +
32767 +       /* copy over tag and sync it*/
32768 +       hole->he_tag[hole_tagidx] = ctxt_he->he_tag[ctxt_tagidx];
32769 +       elan4mmu_synctag (dev, hole, hole_tagidx);
32770 +         
32771 +       /* before we remove it check if its going to get free'd */
32772 +       if ((ctxt_he->he_tag[ctxt_tagidx ^ 1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT) {
32773 +           /* this is ok as the existence of a hole guards agains falling off front of list */
32774 +           ctxt_remember = ctxt_he->he_prev;
32775 +       } else ctxt_remember = ctxt_he;
32776 +
32777 +       /* invalidate pte and tag */
32778 +       ctxt_he->he_tag[ctxt_tagidx] = E4MMU_TAG(0, INVALID_CONTEXT);
32779 +       elan4mmu_synctag (dev, ctxt_he, ctxt_tagidx);
32780 +
32781 +       /* should ensure that any walk in progress has completed */
32782 +       elan4mmu_flush_tlb(dev);
32783 +
32784 +       for (pteidx = 0; pteidx <= 3; pteidx++)
32785 +           if (HE_GET_PTE(ctxt_he, ctxt_tagidx, pteidx))
32786 +               elan4mmu_invalidatepte (dev, ctxt_he, ctxt_tagidx, pteidx);
32787 +
32788 +       /* remove from the source end */
32789 +       elan4mmu_free_hent_nolock (dev, tbl, hashidx, ctxt_he, ctxt_tagidx);
32790 +
32791 +
32792 +       /* sort out the ctxt links */
32793 +       /* first the hole */
32794 +       if ((hole->he_tag[hole_tagidx^1]& TAG_CONTEXT_MASK) ==  ctxt->ctxt_num) {
32795 +           /* already in the list */
32796 +           hole->he_chain[hole_tagidx] = hole->he_chain[hole_tagidx^1];            
32797 +       } else {
32798 +           /* hole not in list */
32799 +           hole->he_chain[hole_tagidx] = ctxt->ctxt_mmuhash[tbl][hashidx];
32800 +           ctxt->ctxt_mmuhash[tbl][hashidx] = hole;
32801 +
32802 +           /* this is one i missed for a bit                                */
32803 +           /* if we put the hole onto the list it might become the previous */
32804 +           if (ctxt_prev == NULL) ctxt_prev = hole;
32805 +       }
32806 +
32807 +       /* second remove the old one */
32808 +       if ((ctxt_he->he_tag[ctxt_tagidx^1]& TAG_CONTEXT_MASK) ==  ctxt->ctxt_num) {
32809 +           /* Nothing to do as still in list as other tag is ours */ 
32810 +       } else {
32811 +           if (ctxt_prev == NULL) {
32812 +               ctxt->ctxt_mmuhash[tbl][hashidx] = ctxt_next;
32813 +           } else {
32814 +               if ((ctxt_prev->he_tag[0] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
32815 +                   ctxt_prev->he_chain[0] = ctxt_next;
32816 +               if ((ctxt_prev->he_tag[1] & TAG_CONTEXT_MASK) == ctxt->ctxt_num)
32817 +                   ctxt_prev->he_chain[1] = ctxt_next;
32818 +           }
32819 +       }
32820 +
32821 +       /***********/
32822 +       /* move on */
32823 +       /***********/
32824 +       ctxt_he = ctxt_remember;
32825 +
32826 +       /* the hole is still a valid place to start looking                        */
32827 +       /* cant use  elan4mmu_find_next_free as we need to stop if we pass ctxt_he */
32828 +       if (hole == ctxt_he) return;
32829 +       while ( hole
32830 +               && ((hole->he_tag[0] & TAG_CONTEXT_MASK) != INVALID_CONTEXT)
32831 +               && ((hole->he_tag[1] & TAG_CONTEXT_MASK) != INVALID_CONTEXT)) 
32832 +       {
32833 +           hole = hole->he_next;
32834 +           if (hole == ctxt_he) return;
32835 +       }
32836 +       if (hole == NULL) return;
32837 +       
32838 +       /* start looking for the next ctxt */
32839 +       while ((ctxt_he != hole)
32840 +              && ((ctxt_he->he_tag[0] & TAG_CONTEXT_MASK) != ctxt->ctxt_num ) 
32841 +              && ((ctxt_he->he_tag[1] & TAG_CONTEXT_MASK) != ctxt->ctxt_num )) 
32842 +           ctxt_he = ctxt_he->he_prev; 
32843 +    }
32844 +
32845 +    /* if we found hole then there is not usefull swap to do */
32846 +    return;
32847 +}
32848 +void
32849 +elan4mmu_do_shuffle (ELAN4_CTXT *ctxt, int tbl)
32850 +{
32851 +    int               i;
32852 +    ELAN4_DEV        *dev;
32853 +    unsigned long     flags;
32854 +
32855 +    if (!ctxt) return;
32856 +    dev = ctxt->ctxt_dev; 
32857 +
32858 +    spin_lock (&ctxt->ctxt_mmulock);
32859 +
32860 +    for(i=0; i < ELAN4_CTXT_MAX_SHUFFLE ;i++) 
32861 +    {
32862 +       if (ctxt->shuffle[tbl][i] != -1) 
32863 +       {
32864 +           spin_lock_irqsave (&dev->dev_mmulock, flags);
32865 +
32866 +           elan4mmu_shuffle_up(ctxt, tbl, ctxt->shuffle[tbl][i]);
32867 +           ctxt->shuffle[tbl][i] = -1;
32868 +           
32869 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
32870 +       }
32871 +    }
32872 +
32873 +    ctxt->shuffle_needed[tbl] = 0;
32874 +    
32875 +    spin_unlock (&ctxt->ctxt_mmulock);
32876 +}
32877 +
32878 +ELAN4_HASH_ENTRY *
32879 +elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp)
32880 +{
32881 +    ELAN4_DEV        *dev     = ctxt->ctxt_dev;
32882 +    unsigned         ctxnum  = ctxt->ctxt_num;
32883 +    unsigned          hashidx = E4MMU_HASH_INDEX (ctxnum, vaddr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
32884 +    E4_uint64         newtag  = E4MMU_TAG(vaddr, ctxnum);
32885 +    ELAN4_HASH_ENTRY *he      = &dev->dev_mmuhash[tbl][hashidx];
32886 +    unsigned         tagidx;
32887 +
32888 +    MPRINTF (ctxt, 2, "elan4mmu_ptealloc: tbl=%d ctxnum=%d vaddr=%llx -> hashidx %d\n", tbl, ctxnum, vaddr, hashidx);
32889 +
32890 +    /* 1st) check whether we're reloading an existing entry */
32891 +    for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
32892 +    {
32893 +       ASSERT ((he->he_tag[0] & TAG_CONTEXT_MASK) == ctxnum || (he->he_tag[1] & TAG_CONTEXT_MASK) == ctxnum);
32894 +
32895 +       for (tagidx = 0; tagidx < 2; tagidx++)
32896 +       {
32897 +           if ((he->he_tag[tagidx] & (TAG_ADDRESS_MASK | TAG_CONTEXT_MASK | HE_TAG_VALID)) == (newtag | HE_TAG_VALID))
32898 +           {
32899 +               MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return old he %p tagidx %d\n", he, tagidx);
32900 +
32901 +               *tagidxp = tagidx;
32902 +               return he;
32903 +           }
32904 +       }
32905 +    }
32906 +
32907 +    if ((he = elan4mmu_alloc_hent (dev, tbl, hashidx, newtag, &tagidx)) == NULL)
32908 +       return NULL;
32909 +
32910 +    /* chain onto context hash */
32911 +    if ((he->he_tag[tagidx ^ 1] & TAG_CONTEXT_MASK) == ctxnum) /* already chained using other link */
32912 +    {                                                          /* so ensure both slots are chained the same */
32913 +       he->he_chain[tagidx] = he->he_chain[tagidx^1];
32914 +    }
32915 +    else
32916 +    {
32917 +       he->he_chain[tagidx] = ctxt->ctxt_mmuhash[tbl][hashidx];
32918 +       ctxt->ctxt_mmuhash[tbl][hashidx] = he;
32919 +    }
32920 +
32921 +    MPRINTF (ctxt, 2, "elan4mmu_ptealloc: return new he %p tagidx %d\n", he, tagidx);
32922 +
32923 +    *tagidxp = tagidx;
32924 +
32925 +    return he;
32926 +}
32927 +
32928 +int
32929 +elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, int type, E4_uint64 newpte)
32930 +{
32931 +    ELAN4_DEV        *dev     = ctxt->ctxt_dev;
32932 +    unsigned          pteidx  = E4MMU_SHIFT_ADDR(vaddr, dev->dev_pageshift[tbl]) & 3;
32933 +    unsigned         tagidx;
32934 +    ELAN4_HASH_ENTRY *he;
32935 +
32936 +    MPRINTF (ctxt, 0, "elan4mmu_pteload: ctx=%d tbl=%d pteidx=%d vaddr=%llx type=%d pte=%llx\n", 
32937 +           ctxt->ctxt_num, tbl, pteidx, (unsigned long long)vaddr, type, newpte);
32938 +
32939 +    spin_lock (&ctxt->ctxt_mmulock);
32940 +
32941 +    if ((he = elan4mmu_ptealloc (ctxt, tbl, vaddr, &tagidx)) == NULL)
32942 +    {
32943 +       spin_unlock (&ctxt->ctxt_mmulock);
32944 +       return -ENOMEM;
32945 +    }
32946 +
32947 +    MPRINTF (ctxt, 1, "elan4mmu_pteload: %s he=%p tagidx=%d pteidx=%d\n", HE_GET_PTE(he,0,pteidx) ? "reloading" : "loading", he, tagidx, pteidx);
32948 +
32949 +    if (HE_GET_PTE(he,tagidx,pteidx) != HE_TYPE_INVALID &&                                             /* invalid -> valid */
32950 +       (elan4mmu_readpte (dev, he, tagidx, pteidx) & PTE_PPN_MASK) != (newpte & PTE_PPN_MASK)) /* or same phys address */
32951 +    {
32952 +       spin_unlock (&ctxt->ctxt_mmulock);
32953 +       return -EINVAL;
32954 +    }
32955 +    
32956 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
32957 +    
32958 +    HE_SET_PTE(he, tagidx, pteidx, type);
32959 +
32960 +    spin_unlock (&ctxt->ctxt_mmulock);
32961 +    return 0;
32962 +}
32963 +
32964 +int
32965 +elan4mmu_pteload_page (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, struct page *page, int perm)
32966 +{
32967 +    ELAN4_DEV        *dev    = ctxt->ctxt_dev;
32968 +    unsigned int      pteidx = E4MMU_SHIFT_ADDR(vaddr, dev->dev_pageshift[tbl]) & 3;
32969 +    unsigned int      tagidx;
32970 +    unsigned int      type;
32971 +    E4_uint64        newpte;
32972 +    int                      topaddr;
32973 +    ELAN4_HASH_ENTRY *he;
32974 +    ELAN4_PTE_PAGE   *pg;
32975 +
32976 +    MPRINTF (ctxt, 1, "elan4mmu_pteload_page: ctx=%d tbl=%d pteidx=%d vaddr=%llx page=%p\n",
32977 +            ctxt->ctxt_num, tbl, pteidx, (unsigned long long)vaddr, page);
32978 +
32979 +    spin_lock (&ctxt->ctxt_mmulock);
32980 +    if ((he = elan4mmu_ptealloc (ctxt, tbl, vaddr, &tagidx)) == NULL)
32981 +    {
32982 +       MPRINTF (ctxt, 1, "elan4mmu_pteload_page: ctx=%d failed ENOMEM\n", ctxt->ctxt_num);
32983 +       spin_unlock (&ctxt->ctxt_mmulock);
32984 +       return -ENOMEM;
32985 +    }
32986 +    
32987 +    pg = &he->he_pg[tagidx][pteidx];
32988 +    
32989 +    if (HE_GET_PTE(he,tagidx,pteidx) != HE_TYPE_INVALID && pg->pg_page != page)                        /* invalid -> valid, or same page*/
32990 +    {
32991 +       MPRINTF (ctxt, 1, "elan4mmu_pteload_page: ctx=%d failed: pg_page=%p page=%p PTE=%x EINVAL\n",
32992 +                ctxt->ctxt_num, pg->pg_page, page, HE_GET_PTE(he,tagidx,pteidx));
32993 +
32994 +       spin_unlock (&ctxt->ctxt_mmulock);
32995 +       return -EINVAL;
32996 +    }
32997 +
32998 +    if (HE_GET_PTE (he,tagidx,pteidx) == HE_TYPE_INVALID)
32999 +    {
33000 +       if ((ctxt->ctxt_features & ELAN4_FEATURE_PIN_DOWN) != 0)
33001 +           page_cache_get (page);
33002 +
33003 +       pg->pg_page = page;
33004 +       
33005 +       if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP)
33006 +       {
33007 +           struct scatterlist sg;
33008 +
33009 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
33010 +           sg.address = NULL;
33011 +#endif
33012 +           sg.page    = page;
33013 +           sg.offset  = 0;
33014 +           sg.length  = PAGE_SIZE;
33015 +
33016 +           if (pci_map_sg (dev->dev_osdep.pdev, &sg, 1, PCI_DMA_BIDIRECTIONAL) == 0 || sg.length == 0)
33017 +           {
33018 +               spin_unlock (&ctxt->ctxt_mmulock);
33019 +               return -EFAULT;
33020 +           }
33021 +
33022 +           pg->pg_dma_addr = sg.dma_address | (vaddr & (PAGE_SIZE-1));
33023 +
33024 +           MPRINTF (ctxt, 1, "elan4mmu_pteload_page: pci_map_sg -> %lx\n", pg->pg_dma_addr);
33025 +       }
33026 +       else
33027 +       {
33028 +           pg->pg_dma_addr = (page_to_pfn (page) << PAGE_SHIFT) | (vaddr & (PAGE_SIZE-1));
33029 +
33030 +           MPRINTF (ctxt, 1, "elan4mmu_pteload_page: directmap -> %lx\n", pg->pg_dma_addr);
33031 +       }
33032 +    }
33033 +
33034 +#if defined(__BIG_ENDIAN__)
33035 +    type = PTE_SetPerm (perm) | PTE_PciNotLocal | PTE_BigEndian;
33036 +#else
33037 +    type = PTE_SetPerm (perm) | PTE_PciNotLocal;
33038 +#endif
33039 +
33040 +    topaddr = elan4mmu_alloc_topaddr (dev, pg->pg_dma_addr, type);
33041 +
33042 +    if (dev->dev_topaddrmode)
33043 +       newpte = dev->dev_pteval | (pg->pg_dma_addr >> PTE_PADDR_SHIFT) | (type & ~0xc) | (topaddr << 2);
33044 +    else
33045 +       newpte = dev->dev_pteval | ((pg->pg_dma_addr >> PTE_PADDR_SHIFT) & ~PTE_TOPADDR_MASK) | (((E4_uint64) topaddr) << 45) | type;
33046 +
33047 +    ASSERT (HE_GET_PTE(he,tagidx,pteidx) == HE_TYPE_INVALID ||                                         /* invalid -> valid */
33048 +           (elan4mmu_readpte (dev, he, tagidx, pteidx) & PTE_PPN_MASK) == (newpte & PTE_PPN_MASK));    /* or same phys address */
33049 +    
33050 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
33051 +    
33052 +    HE_SET_PTE (he, tagidx, pteidx, HE_TYPE_PAGE);
33053 +    
33054 +    spin_unlock (&ctxt->ctxt_mmulock);
33055 +
33056 +    return 0;
33057 +}
33058 +
33059 +void
33060 +elan4mmu_pteunload (ELAN4_CTXT *ctxt, ELAN4_HASH_ENTRY *he, unsigned int tagidx, unsigned int pteidx)
33061 +{
33062 +    ELAN4_DEV     *dev = ctxt->ctxt_dev;
33063 +    ELAN4_PTE_PAGE *pg = &he->he_pg[tagidx][pteidx];
33064 +
33065 +    switch (HE_GET_PTE(he,tagidx,pteidx))
33066 +    {
33067 +    case HE_TYPE_PAGE:
33068 +       MPRINTF (DBG_DEVICE, 1, "elan4mmu_pteunload: he=%p tagidx=%d pteidx=%d page=%p -> %lx\n",
33069 +                he, tagidx, pteidx, pg->pg_page, pg->pg_dma_addr);
33070 +
33071 +       if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP)
33072 +       {
33073 +           struct scatterlist sg;
33074 +
33075 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
33076 +           sg.address     = NULL;
33077 +#endif
33078 +           sg.page        = pg->pg_page;
33079 +           sg.offset      = 0;
33080 +           sg.length      = PAGE_SIZE;
33081 +           sg.dma_address = pg->pg_dma_addr;
33082 +           
33083 +           pci_unmap_sg (dev->dev_osdep.pdev, &sg, 1, PCI_DMA_BIDIRECTIONAL);
33084 +       }
33085 +
33086 +       if ((ctxt->ctxt_features & ELAN4_FEATURE_PIN_DOWN) != 0)
33087 +           page_cache_release (pg->pg_page);
33088 +       
33089 +       pg->pg_page     = NULL;
33090 +       pg->pg_dma_addr = 0;
33091 +       break;
33092 +    }
33093 +
33094 +    HE_SET_PTE(he, tagidx, pteidx, HE_TYPE_INVALID);
33095 +                                   
33096 +    elan4mmu_writepte (dev, he, tagidx, pteidx, 0);
33097 +}
33098 +
33099 +
33100 +void
33101 +elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len)
33102 +{
33103 +    ELAN4_DEV        *dev       = ctxt->ctxt_dev;
33104 +    unsigned          ctxnum    = ctxt->ctxt_num;
33105 +    unsigned long     tagspan   = (1 << (dev->dev_pageshift[tbl] + 2));
33106 +    E4_Addr           end       = start + len - 1;
33107 +    int                      needflush = 0;
33108 +    unsigned          baseidx, topidx;
33109 +    unsigned          hashidx, tagidx, pteidx;
33110 +    ELAN4_HASH_ENTRY *he, *prevhe, *next;
33111 +    
33112 +    MPRINTF (ctxt, 0, "elan4mmu_unload_range: tbl=%d start=%llx end=%llx len=%lx\n", tbl, start, end, len);
33113 +
33114 +    /* determine how much of the hash table we've got to scan */
33115 +    
33116 +    /* GNAT 6760: When we have a Main page size which maps onto multiple Elan pages
33117 +     * we need to do something a bit more clever here or else it takes ms per page invalidate
33118 +     * This change helps in the meantime
33119 +     */
33120 +    /* if (len <= (1 << dev->dev_pageshift[tbl])) */
33121 +    if (len <= PAGE_SIZE)
33122 +    {
33123 +       baseidx = E4MMU_HASH_INDEX (ctxnum, start, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
33124 +       topidx  = E4MMU_HASH_INDEX (ctxnum, end,   dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
33125 +
33126 +       if (baseidx != topidx)
33127 +       {
33128 +           /* GNAT 6760: Need to search whole of the hash table (slow!) */
33129 +           baseidx = 0;
33130 +           topidx  = dev->dev_hashsize[tbl] - 1;
33131 +       }
33132 +    }
33133 +    else
33134 +    {
33135 +       baseidx = 0;
33136 +       topidx  = dev->dev_hashsize[tbl] - 1;
33137 +    }
33138 +
33139 +    MPRINTF (ctxt, 1, "elan4mmu_unload_range: baseidx=%d topidx=%d\n", baseidx, topidx);
33140 +
33141 +    spin_lock (&ctxt->ctxt_mmulock);
33142 +
33143 +    /* 1st - invalidate the tag for all hash blocks which are completely invalidated,
33144 +     *       and remember the first/last hash blocks */
33145 +    for (hashidx = baseidx; hashidx <= topidx; hashidx++)
33146 +       for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
33147 +           for (tagidx = 0; tagidx < 2; tagidx++)
33148 +               if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
33149 +               {
33150 +                   E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
33151 +                   E4_Addr top  = base + (tagspan -1);
33152 +                   
33153 +                   if (start < top && end > base)
33154 +                   {
33155 +                       unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl];
33156 +                       unsigned tidx = (end   >= top)  ? 3 : (end   & (tagspan-1)) >> dev->dev_pageshift[tbl];
33157 +                       
33158 +                       MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx hashidx=%d bidx=%d tidx=%d\n", he, base, top, hashidx, bidx, tidx);
33159 +                       
33160 +                       for (pteidx = bidx; pteidx <= tidx; pteidx++)
33161 +                           if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID)
33162 +                           {
33163 +                               elan4mmu_invalidatepte (dev, he, tagidx, pteidx);
33164 +                               needflush = 1;
33165 +                           }
33166 +                   }
33167 +                   else if (base >= start && top <= end)               /* hash entry completely spanned */
33168 +                   {                                                   /* so invalidate the tag */
33169 +                       MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned\n", he, base, top);
33170 +
33171 +                       he->he_tag[tagidx] &= ~HE_TAG_VALID;
33172 +                       
33173 +                       elan4mmu_synctag (dev, he, tagidx);
33174 +                       needflush = 1;
33175 +                   }
33176 +               }
33177 +
33178 +    if (needflush)
33179 +    {
33180 +       /* 2nd invalidate the first/last hash blocks if they are partially invalidated
33181 +        * and flush the tlb/hash copy blocks */
33182 +       elan4mmu_flush_tlb_hash (dev, tbl, baseidx, topidx);
33183 +       
33184 +       /* 3rd free off the hash entries which are completely invalidated */
33185 +       for (hashidx = baseidx; hashidx <= topidx; hashidx++)
33186 +           for (prevhe = NULL, he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = next)
33187 +           {
33188 +               next = he_ctxt_next (he, ctxnum);
33189 +               
33190 +               for (tagidx = 0; tagidx < 2; tagidx++)
33191 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
33192 +                   {
33193 +                       E4_Addr base = E4MMU_TAG2VADDR (he->he_tag[tagidx], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
33194 +                       E4_Addr top  = base + (tagspan -1);
33195 +                       
33196 +                       if (start < top && end > base)
33197 +                       {
33198 +                           unsigned bidx = (start <= base) ? 0 : (start & (tagspan-1)) >> dev->dev_pageshift[tbl];
33199 +                           unsigned tidx = (end   >= top)  ? 3 : (end   & (tagspan-1)) >> dev->dev_pageshift[tbl];
33200 +                           
33201 +                           MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx bidx=%d tidx=%d\n", he, base, top, bidx, tidx);
33202 +                           
33203 +                           for (pteidx = bidx; pteidx <= tidx; pteidx++)
33204 +                               if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID)
33205 +                                   elan4mmu_pteunload (ctxt, he,tagidx, pteidx);
33206 +                       }
33207 +                       
33208 +                       if ((base >= start && top <= end) || he->he_pte[tagidx] == 0)   /* hash entry completely spanned or all pte's cleared */
33209 +                       {                                                                       /* so invalidate the pte's and free it */
33210 +                           
33211 +                           MPRINTF (ctxt, 1, "elan4mmu_unload_range: he=%p base=%llx top=%llx spanned or empty\n", he, base, top);
33212 +                           
33213 +                           elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx);
33214 +                       }
33215 +                   }
33216 +               
33217 +               prevhe = he_ctxt_unlink (ctxt, tbl, hashidx, prevhe, he, next);
33218 +           }
33219 +    }
33220 +    spin_unlock (&ctxt->ctxt_mmulock);
33221 +}
33222 +
33223 +void
33224 +elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt)
33225 +{
33226 +    ELAN4_DEV *dev    = ctxt->ctxt_dev;
33227 +    int        ctxnum = ctxt->ctxt_num;
33228 +    ELAN4_HASH_ENTRY *he;
33229 +    int tbl, hashidx, tagidx, pteidx;
33230 +
33231 +    MPRINTF (ctxt, 0, "elan4mmu_invalidate_ctxt: invalidating ctxnum=%d\n", ctxnum);
33232 +
33233 +    spin_lock (&ctxt->ctxt_mmulock);
33234 +
33235 +    /* 1st invalidate all tags belonging to me */
33236 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
33237 +       for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
33238 +           for (he = ctxt->ctxt_mmuhash[tbl][hashidx]; he != NULL; he = he_ctxt_next (he, ctxnum))
33239 +               for (tagidx = 0; tagidx < 2; tagidx++)
33240 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum) /* own tag block */
33241 +                   {
33242 +                       MPRINTF (ctxt, 1, "elan4mmu_invalidate_ctxt: he=%p addr=%llx hashidx=%d tagidx=%d\n", 
33243 +                                he, he->he_tag[tagidx] & TAG_ADDRESS_MASK, hashidx, tagidx);
33244 +
33245 +                       he->he_tag[tagidx] &= ~HE_TAG_VALID;
33246 +                       
33247 +                       elan4mmu_synctag (dev, he, tagidx);
33248 +                   }
33249 +
33250 +    /* 2nd flush the tlb & cached hash block */
33251 +    elan4mmu_flush_tlb (dev);
33252 +    
33253 +    /* 3rd invalidate all pte's and free off the hash entries */
33254 +    for (tbl = 0; tbl < NUM_HASH_TABLES; tbl++)
33255 +       for (hashidx = 0; hashidx < dev->dev_hashsize[tbl]; hashidx++)
33256 +           while ((he = ctxt->ctxt_mmuhash[tbl][hashidx]) != NULL)
33257 +           {
33258 +               ctxt->ctxt_mmuhash[tbl][hashidx] = he_ctxt_next (he, ctxnum);
33259 +
33260 +               for (tagidx = 0; tagidx < 2; tagidx++)
33261 +                   if ((he->he_tag[tagidx] & TAG_CONTEXT_MASK) == ctxnum)
33262 +                   {
33263 +                       for (pteidx = 0; pteidx < 4; pteidx++)
33264 +                           if (HE_GET_PTE(he, tagidx, pteidx) != HE_TYPE_INVALID)
33265 +                               elan4mmu_pteunload (ctxt, he, tagidx, pteidx);
33266 +
33267 +                       elan4mmu_free_hent (dev, tbl, hashidx, he, tagidx);
33268 +                   }
33269 +           }
33270 +    spin_unlock (&ctxt->ctxt_mmulock);
33271 +}
33272 +
33273 +ELAN4_HASH_CACHE *
33274 +elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep)
33275 +{
33276 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
33277 +    E4_Addr           end      = start + (npages << dev->dev_pageshift[tbl]) - 1;
33278 +    unsigned long     tagshift = dev->dev_pageshift[tbl] + 2;
33279 +    E4_Addr           tagspan  = 1 << tagshift;
33280 +    E4_Addr           base     = (start & ~(tagspan-1));
33281 +    E4_Addr           top      = (end   & ~(tagspan-1)) + (tagspan-1);
33282 +    unsigned int      nhes     = (top - base + 1) >> tagshift;
33283 +    ELAN4_HASH_CACHE *hc;
33284 +    unsigned int      tagidx,  pteidx;
33285 +    E4_Addr           addr;
33286 +    int                      i;
33287 +    
33288 +    MPRINTF (ctxt, 0, "elan4mmu_reserve: start=%llx npages=%d\n", start, npages);
33289 +    MPRINTF (ctxt, 0, "         pageshift=%d tagspan=%lx base=%llx top=%llx end=%llx nhes=%d\n",
33290 +            dev->dev_pageshift[tbl], tagspan, base, top, end, nhes);
33291 +
33292 +    KMEM_ALLOC (hc, ELAN4_HASH_CACHE *, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]), cansleep);
33293 +
33294 +    if (hc == NULL)
33295 +       return NULL;
33296 +
33297 +    hc->hc_start = start;
33298 +    hc->hc_end   = end;
33299 +    hc->hc_tbl   = tbl;
33300 +
33301 +    spin_lock (&ctxt->ctxt_mmulock);
33302 +    for (addr = base, i = 0; i < nhes; addr += tagspan, i++)
33303 +    {
33304 +       unsigned bidx = (i == 0)        ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0;
33305 +       unsigned tidx = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3;
33306 +
33307 +       
33308 +       if ((hc->hc_hes[i] = elan4mmu_ptealloc (ctxt, tbl, addr & ~(tagspan-1), &tagidx)) == NULL)
33309 +           goto failed;
33310 +
33311 +
33312 +       MPRINTF (ctxt, 2, "elan4mmu_reserve: tbl=%d addr=%llx -> hashidx=%d tagidx=%d\n", tbl, addr & ~(tagspan-1), 
33313 +                E4MMU_HASH_INDEX (ctxt->ctxt_num, (addr & ~(tagspan-1)), dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1), tagidx);
33314 +                        
33315 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
33316 +       {
33317 +           ASSERT (HE_GET_PTE (hc->hc_hes[i], tagidx, pteidx) == HE_TYPE_INVALID);
33318 +
33319 +           MPRINTF (ctxt, 2, "elan4mmu_reserve: i=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n",
33320 +                    i, addr, hc->hc_hes[i], tagidx, pteidx);
33321 +
33322 +           HE_SET_PTE (hc->hc_hes[i], tagidx, pteidx, HE_TYPE_RESERVED);
33323 +       }
33324 +    }
33325 +    spin_unlock (&ctxt->ctxt_mmulock);
33326 +
33327 +    return hc;
33328 +
33329 + failed:
33330 +    for (i--, addr -= tagspan; i >= 0; i--, addr -= tagspan)
33331 +    {
33332 +       unsigned bidx    = (i == 0) ? (start & (tagspan-1)) >> dev->dev_pageshift[tbl] : 0;
33333 +       unsigned tidx    = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[tbl] : 3;
33334 +       unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1);
33335 +       unsigned tagidx  = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[tbl], dev->dev_hashsize[tbl]-1)) ? 0 : 1;
33336 +           
33337 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
33338 +           HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, HE_TYPE_INVALID);
33339 +
33340 +       if (hc->hc_hes[i]->he_pte[tagidx] == 0)
33341 +           elan4mmu_free_hent (dev, tbl, hashidx, hc->hc_hes[i], tagidx);
33342 +    }
33343 +    spin_unlock (&ctxt->ctxt_mmulock);
33344 +
33345 +    KMEM_FREE (hc, offsetof (ELAN4_HASH_CACHE, hc_hes[nhes]));
33346 +    
33347 +    return NULL;
33348 +}
33349 +
33350 +void
33351 +elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc)
33352 +{
33353 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
33354 +    E4_Addr          start    = hc->hc_start;
33355 +    E4_Addr           end      = hc->hc_end;
33356 +    unsigned long     tagshift = dev->dev_pageshift[hc->hc_tbl] + 2;
33357 +    E4_Addr           tagspan  = 1 << tagshift;
33358 +    E4_Addr           base     = (start & ~(tagspan-1));
33359 +    E4_Addr           top      = (end   & ~(tagspan-1)) + (tagspan-1);
33360 +    unsigned int      nhes     = (top - base + 1) >> tagshift;
33361 +    ELAN4_HASH_ENTRY *prevhe, *he, *next;
33362 +    E4_Addr           addr;
33363 +    unsigned int      pteidx;
33364 +    int                      i;
33365 +
33366 +    spin_lock (&ctxt->ctxt_mmulock);
33367 +
33368 +    MPRINTF (ctxt, 0, "elan4mmu_release: base=%llx top=%llx\n", base, top);
33369 +
33370 +    for (addr = base, i = 0; i < nhes; addr += tagspan, i++)
33371 +    {
33372 +       unsigned bidx    = (i == 0)        ? (start & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 0;
33373 +       unsigned tidx    = (i == (nhes-1)) ? (end   & (tagspan-1)) >> dev->dev_pageshift[hc->hc_tbl] : 3;
33374 +       unsigned hashidx = E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1);
33375 +       unsigned tagidx  = (addr == E4MMU_TAG2VADDR (hc->hc_hes[i]->he_tag[0], hashidx, dev->dev_pageshift[hc->hc_tbl], dev->dev_hashsize[hc->hc_tbl]-1)) ? 0 : 1;
33376 +           
33377 +       for (pteidx = bidx; pteidx <= tidx; pteidx++)
33378 +       {
33379 +           elan4mmu_invalidatepte (dev, hc->hc_hes[i], tagidx, pteidx);
33380 +
33381 +           HE_SET_PTE(hc->hc_hes[i], tagidx, pteidx, HE_TYPE_INVALID);
33382 +       }
33383 +
33384 +       MPRINTF (ctxt, 2, "elan4mmu_release: i=%d addr=%llx he=%p (hashidx=%d tagidx=%d pteidx=%d) pte=%x\n",
33385 +                i, addr, hc->hc_hes[i], hashidx, tagidx, pteidx, hc->hc_hes[i]->he_pte[tagidx]);
33386 +
33387 +       /* remove from context hash */
33388 +       /* need to move to the  hc->hc_hes[i] in the ctxt list and set prevhe, he, next */
33389 +       prevhe = NULL;
33390 +       he = ctxt->ctxt_mmuhash[hc->hc_tbl][hashidx];
33391 +       next = he_ctxt_next (he, ctxt->ctxt_num);
33392 +
33393 +       while(he != hc->hc_hes[i]) {
33394 +           prevhe = he;
33395 +           he = next;
33396 +           next = he_ctxt_next (he, ctxt->ctxt_num);
33397 +       }
33398 +
33399 +       if (he->he_pte[tagidx] == 0) 
33400 +           elan4mmu_free_hent (dev, hc->hc_tbl, hashidx, he, tagidx);
33401 +
33402 +       he_ctxt_unlink (ctxt, hc->hc_tbl, hashidx, prevhe, he, next);
33403 +    }
33404 +    spin_unlock (&ctxt->ctxt_mmulock);
33405 +}
33406 +
33407 +void
33408 +elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte)
33409 +{
33410 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
33411 +    unsigned int      tbl      = hc->hc_tbl;
33412 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
33413 +    E4_Addr           tagspan  = 1 << tagshift;
33414 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
33415 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
33416 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
33417 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
33418 +
33419 +    MPRINTF (ctxt, 2, "elan4mmu_set_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d) newpte=%llx\n", idx, addr, he, tagidx, pteidx, newpte);
33420 +
33421 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
33422 +
33423 +    elan4mmu_writepte (dev, he, tagidx, pteidx, newpte);
33424 +}
33425 +
33426 +E4_uint64
33427 +elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx)
33428 +{
33429 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
33430 +    unsigned int      tbl      = hc->hc_tbl;
33431 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
33432 +    E4_Addr           tagspan  = 1 << tagshift;
33433 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
33434 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
33435 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
33436 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
33437 +
33438 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
33439 +
33440 +    return elan4mmu_readpte (dev, he, tagidx, pteidx);
33441 +}
33442 +
33443 +void
33444 +elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx)
33445 +{
33446 +    ELAN4_DEV        *dev      = ctxt->ctxt_dev;
33447 +    unsigned int      tbl      = hc->hc_tbl;
33448 +    unsigned int      tagshift = dev->dev_pageshift[tbl] + 2;
33449 +    E4_Addr           tagspan  = 1 << tagshift;
33450 +    E4_Addr           addr     = hc->hc_start + (idx << dev->dev_pageshift[tbl]);
33451 +    ELAN4_HASH_ENTRY *he       = hc->hc_hes[(addr - (hc->hc_start & ~(tagspan-1))) >> tagshift];
33452 +    unsigned          pteidx   = E4MMU_SHIFT_ADDR(addr, dev->dev_pageshift[tbl]) & 3;
33453 +    unsigned          tagidx   = he->he_tag[0] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID) ? 0 : 1;
33454 +
33455 +    MPRINTF (ctxt, 2, "elan4mmu_clear_pte: idx=%d addr=%llx he=%p (tagidx=%d pteidx=%d)\n", idx, addr, he, tagidx, pteidx);
33456 +
33457 +    ASSERT (he->he_tag[tagidx] == (E4MMU_TAG (addr, ctxt->ctxt_num) | HE_TAG_VALID));
33458 +
33459 +    elan4mmu_invalidatepte (dev, he, tagidx, pteidx);
33460 +}
33461 +
33462 +int 
33463 +elan4mmu_display_mmuhash(ELAN4_DEV *dev, int tbl, int *index_ptr, char *page, int count)
33464 +{
33465 +    char             *p = page;
33466 +    unsigned long     flags;
33467 +    ELAN4_HASH_ENTRY *he;
33468 +    int               index = *index_ptr;
33469 +
33470 +    spin_lock_irqsave (&dev->dev_mmulock, flags);
33471 +
33472 +    he = &dev->dev_mmuhash[tbl][index];
33473 +
33474 +    /* move to the next entry that actually has contents in its chain */
33475 +    while ((he->he_next == NULL) && ((he->he_tag[0] & TAG_CONTEXT_MASK) == INVALID_CONTEXT)
33476 +          && ((he->he_tag[1] & TAG_CONTEXT_MASK) == INVALID_CONTEXT))
33477 +    {
33478 +       index++;
33479 +       if ( index >= dev->dev_hashsize[tbl] ) {
33480 +           /* didnt find anything and have looped */
33481 +           spin_unlock_irqrestore (&dev->dev_mmulock, flags);
33482 +           *index_ptr = dev->dev_hashsize[tbl];
33483 +           return (p - page);
33484 +       }
33485 +       he = &dev->dev_mmuhash[tbl][index];
33486 +    }
33487 +    *index_ptr = index; /* the actual one we will print */
33488 +
33489 +
33490 +    while (he) {
33491 +       if ( ((p - page)+175) > count ) {
33492 +           /* might not fit in */
33493 +           p += sprintf( p , "...\n");
33494 +           he = NULL;
33495 +       } else {
33496 +           int ctxt0_bit = 0;
33497 +           int ctxt1_bit = 0;
33498 +           ELAN4_CTXT *ctxt0;
33499 +           ELAN4_CTXT *ctxt1;
33500 +
33501 +           if ( (he->he_tag[0] &  TAG_CONTEXT_MASK) != INVALID_CONTEXT) {
33502 +               ctxt0 = elan4_localctxt (dev, (he->he_tag[0] &  TAG_CONTEXT_MASK));
33503 +               ctxt0_bit = ctxt0->shuffle_needed[0];
33504 +           }
33505 +           if ( (he->he_tag[1] &  TAG_CONTEXT_MASK) != INVALID_CONTEXT) {
33506 +               ctxt1 = elan4_localctxt (dev, (he->he_tag[1] &  TAG_CONTEXT_MASK));
33507 +               ctxt1_bit = ctxt1->shuffle_needed[0];
33508 +           }
33509 +
33510 +           p += sprintf(p ,"(%4d,%1d) he%s %p  entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx-%d,0x%016llx-%d) pte(0x%010x,0x%010x)%s\n",
33511 +                        index,tbl, (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he, 
33512 +                        he->he_entry, he->he_next,  he->he_prev, he->he_chain[0], he->he_chain[1],
33513 +                        (long long)he->he_tag[0], ctxt0_bit, (long long)he->he_tag[1], ctxt1_bit, he->he_pte[0], he->he_pte[1],
33514 +                        (he->he_next)? (( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == 
33515 +                                          ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" ");
33516 +           he = he->he_next;
33517 +       }
33518 +    }
33519 +
33520 +    spin_unlock_irqrestore (&dev->dev_mmulock, flags);
33521 +
33522 +    return (p - page);
33523 +}
33524 +
33525 +int 
33526 +elan4mmu_display_ctxt_mmuhash(ELAN4_CTXT *ctxt, int tbl, int *index_ptr, char *page, int count)
33527 +{
33528 +    ELAN4_HASH_ENTRY *he;
33529 +    char             *p      = page;
33530 +    ELAN4_DEV        *dev    = ctxt->ctxt_dev;
33531 +    int               index  = *index_ptr;
33532 +
33533 +    spin_lock (&ctxt->ctxt_mmulock);
33534 +
33535 +    he = ctxt->ctxt_mmuhash[tbl][index];
33536 +    while (! he ) { 
33537 +       index++;
33538 +       if ( index >= dev->dev_hashsize[tbl] ) {
33539 +           /* didnt find anything and have looped */
33540 +           spin_unlock (&ctxt->ctxt_mmulock);
33541 +           *index_ptr = dev->dev_hashsize[tbl];
33542 +           return (p - page);
33543 +       }
33544 +       he = ctxt->ctxt_mmuhash[tbl][index];    
33545 +    }
33546 +    *index_ptr = index; /* the actual one we will print */
33547 +
33548 +    while (he) {
33549 +       if ( ((p - page)+175) > count ) {
33550 +           /* might not fit in */
33551 +           p += sprintf( p , "...\n");
33552 +           he = NULL;
33553 +       } else {
33554 +           p  += sprintf(p ,"(%4d,%1d) he%s %p  entry 0x%010lx he_(%p,%p) chain(%p,%p) tag(0x%016llx,0x%016llx) pte(0x%010x,0x%010x)%s\n",
33555 +                         index,tbl, (he==&dev->dev_mmuhash[tbl][index])?"*":" ", he,
33556 +                         he->he_entry, he->he_next, he->he_prev, he->he_chain[0], he->he_chain[1],
33557 +                         (long long)he->he_tag[0], (long long)he->he_tag[1], he->he_pte[0], he->he_pte[1],  
33558 +                         (he->he_next)?(( ((he->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK) == 
33559 +                                          ((he->he_next->he_entry >> TAG_CHAINPTR_HIGH_SHIFT) & TAG_CHAINPTR_30TO19_MASK))?" ":"*"):" ");
33560 +
33561 +           he =  he_ctxt_next (he, ctxt->ctxt_num);
33562 +       }
33563 +    }
33564 +
33565 +    spin_unlock (&ctxt->ctxt_mmulock);
33566 +
33567 +    return (p - page);
33568 +}
33569 +
33570 +int 
33571 +elan4mmu_display_bucket_mmuhash(ELAN4_DEV *dev, int tbl, int *buckets, int nBuckets, char *page, int c)
33572 +{   
33573 +    ELAN4_HASH_ENTRY *he;
33574 +    unsigned long     flags;
33575 +    char *p = page;
33576 +    int   b;
33577 +    int   index;
33578 +    int   count;
33579 +    int   totals[nBuckets];
33580 +
33581 +    for (b=0;b<nBuckets;b++) 
33582 +       totals[b]=0;
33583 +
33584 +    for (index=0; index < dev->dev_hashsize[tbl]; index++) {
33585 +
33586 +       /* how long is this chain */
33587 +       spin_lock_irqsave (&dev->dev_mmulock, flags);
33588 +
33589 +       he = &dev->dev_mmuhash[tbl][index];
33590 +       count = 0;
33591 +       while (he) {
33592 +           count++;
33593 +           ASSERT(count < 1000000); /* seems we have a loop */
33594 +           he = he->he_next;
33595 +       }
33596 +       spin_unlock_irqrestore (&dev->dev_mmulock, flags);
33597 +
33598 +       /* bucket the lenth */
33599 +       for(b=0;b<nBuckets;b++) 
33600 +           if ( count <= buckets[b] ) {
33601 +               totals[b]++;
33602 +               b = nBuckets;
33603 +           }
33604 +    }
33605 +
33606 +    for (b=0; b < nBuckets; b++) 
33607 +       p += sprintf(p, " %5d ", buckets[b]);
33608 +    p += sprintf(p, "\n");
33609 +    for (b=0; b < nBuckets; b++) 
33610 +       p += sprintf(p, " %5d ", totals[b]);
33611 +    p += sprintf(p, "\n");
33612 +
33613 +    return (p - page);
33614 +}
33615 +
33616 +EXPORT_SYMBOL(elan4mmu_flush_tlb);
33617 +EXPORT_SYMBOL(elan4mmu_pteload);
33618 +EXPORT_SYMBOL(elan4mmu_unload_range);
33619 +EXPORT_SYMBOL(elan4mmu_reserve);
33620 +EXPORT_SYMBOL(elan4mmu_release);
33621 +EXPORT_SYMBOL(elan4mmu_set_pte);
33622 +EXPORT_SYMBOL(elan4mmu_get_pte);
33623 +EXPORT_SYMBOL(elan4mmu_clear_pte);
33624 +EXPORT_SYMBOL(elan4mmu_do_shuffle);
33625 +EXPORT_SYMBOL(elan4mmu_set_shuffle);
33626 +
33627 +/*
33628 + * Local variables:
33629 + * c-file-style: "stroustrup"
33630 + * End:
33631 + */
33632 diff -urN clean/drivers/net/qsnet/elan4/mmu_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/mmu_Linux.c
33633 --- clean/drivers/net/qsnet/elan4/mmu_Linux.c   1969-12-31 19:00:00.000000000 -0500
33634 +++ linux-2.6.9/drivers/net/qsnet/elan4/mmu_Linux.c     2005-07-14 09:34:12.000000000 -0400
33635 @@ -0,0 +1,262 @@
33636 +/*
33637 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
33638 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
33639 + *
33640 + *    For licensing information please see the supplied COPYING file
33641 + *
33642 + */
33643 +
33644 +#ident "@(#)$Id: mmu_Linux.c,v 1.17.2.1 2005/07/14 13:34:12 david Exp $"
33645 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu_Linux.c,v $*/
33646 +
33647 +#include <qsnet/kernel.h>
33648 +
33649 +#include <elan4/debug.h>
33650 +#include <elan4/device.h>
33651 +
33652 +#include <linux/pci.h>
33653 +#include <linux/version.h>
33654 +
33655 +int
33656 +elan4mmu_sdram_aliascheck (ELAN4_CTXT *ctxt, E4_Addr addr, sdramaddr_t phys)
33657 +{
33658 +    ELAN4_DEV *dev = ctxt->ctxt_dev;
33659 +
33660 +    /*
33661 +     * On MPSAS we don't allocate a large enough context table, so 
33662 +     * if we see an address/context pair which would "alias" because
33663 +     * they differ in unchecked hash bits to a previous pteload, 
33664 +     * then we kill the application.
33665 +     */
33666 +    unsigned hashval = (E4MMU_SHIFT_ADDR(addr, (dev->dev_pageshift[0]) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(ctxt->ctxt_num));
33667 +    
33668 +    if (dev->dev_rsvd_hashval[0] == 0xFFFFFFFF)
33669 +       dev->dev_rsvd_hashval[0] = hashval & dev->dev_rsvd_hashmask[0];
33670 +    
33671 +    if ((hashval & dev->dev_rsvd_hashmask[0]) != dev->dev_rsvd_hashval[0])
33672 +    {
33673 +       printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx ctxnum=%x -> [%x] overlaps %x - %x [hashidx=%x]\n", (unsigned long long) addr, 
33674 +               ctxt->ctxt_num, hashval, hashval & dev->dev_rsvd_hashmask[0], dev->dev_rsvd_hashval[0],
33675 +               E4MMU_HASH_INDEX (ctxt->ctxt_num, addr, dev->dev_pageshift[0], dev->dev_hashsize[0]-1));
33676 +       
33677 +       return 0;
33678 +    }
33679 +
33680 +    if (((addr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (phys & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT))))
33681 +    {
33682 +       printk ("elan4mmu_sdram_aliascheck: vaddr=%016llx incorrectly alias sdram at %lx\n", (unsigned long long) addr, phys);
33683 +       return 0;
33684 +    }
33685 +
33686 +    return 1;
33687 +}
33688 +
33689 +int
33690 +elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type)
33691 +{
33692 +#if defined(__i386) && !defined(CONFIG_X86_PAE)
33693 +    if (dev->dev_topaddrvalid == 0)
33694 +    {
33695 +       dev->dev_topaddrvalid = 1;
33696 +
33697 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(0), 0);
33698 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(1), 0);
33699 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(2), 0);
33700 +       pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(3), 0);
33701 +    }
33702 +    return (0);
33703 +#else
33704 +    register int i;
33705 +    E4_uint16 match;
33706 +
33707 +    if (dev->dev_topaddrmode)                                  /* ExtraMasterAddrBits=1 => match {paddr[63:50],type[3:2]} */
33708 +       match = ((paddr >> 48) & ~3) | ((type >> 2) & 3);
33709 +    else                                                       /* ExtraMasterAddrBits=0 => match {paddr[63:48]} */
33710 +       match = (paddr >> 48);
33711 +    
33712 +    MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: mode=%d paddr=%lx type=%x match=%x [%x %x.%x.%x.%x]\n",
33713 +            dev->dev_topaddrmode, paddr, type, match, dev->dev_topaddrvalid,
33714 +            dev->dev_topaddr[0], dev->dev_topaddr[1], dev->dev_topaddr[2], dev->dev_topaddr[3]);
33715 +    
33716 +    for (i = 0; i < 4; i++)
33717 +       if ((dev->dev_topaddrvalid & (1 << i)) && dev->dev_topaddr[i] == match)
33718 +           return (i);
33719 +    
33720 +    for (i = 0; i < 4; i++)
33721 +    {
33722 +       if ((dev->dev_topaddrvalid & (1 << i)) == 0)
33723 +       {
33724 +           MPRINTF (DBG_DEVICE, 2, "elan4mmu_alloc_topaddr: allocate slot %d for %x\n", i, match);
33725 +
33726 +           dev->dev_topaddrvalid |= (1 << i);
33727 +           dev->dev_topaddr[i] = match;
33728 +
33729 +           pci_write_config_word (dev->dev_osdep.pdev, PCI_ELAN_TOPPHYSADDR(i), match);
33730 +           return (i);
33731 +       }
33732 +    }
33733 +
33734 +    panic ("elan4mmu_alloc_topaddr: all topaddrs in use\n");
33735 +    return (0);
33736 +#endif
33737 +}
33738 +
33739 +/*
33740 + * Convert a physical address into an pte.  This should generate a "local" pte for 
33741 + * physical addresses which are elan4 sdram or elan4 command queues.  For elan4
33742 + * registers and other addresses on the same bus, this should be the local pci 
33743 + * bus address.  All other addresses should access the physical address via the
33744 + * PCI bridge.
33745 + */
33746 +
33747 +int
33748 +elan4mmu_categorise_paddr (ELAN4_DEV *dev, physaddr_t *physp)
33749 +{
33750 +    physaddr_t sdram_base = dev->dev_sdram_phys;
33751 +    physaddr_t sdram_top  = dev->dev_sdram_phys + pci_resource_len (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM);
33752 +    physaddr_t regs_base  = dev->dev_regs_phys;
33753 +    physaddr_t regs_top   = dev->dev_regs_phys + pci_resource_len (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS);
33754 +    physaddr_t phys       = *physp;
33755 +    int        iscommand;
33756 +
33757 +    if (phys >= sdram_base && phys <= sdram_top)
33758 +    {
33759 +       (*physp) = (phys ^ sdram_base);
33760 +       return HE_TYPE_SDRAM;
33761 +    }
33762 +    
33763 +    if (phys >= regs_base && phys < regs_top)
33764 +    {
33765 +       if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
33766 +           iscommand = (phys < (regs_base + ELAN4_REVA_REG_OFFSET));
33767 +       else
33768 +           iscommand = (phys < (regs_base + ELAN4_REVB_I2C_OFFSET));
33769 +       
33770 +       if (iscommand)
33771 +       {
33772 +           (*physp) = phys ^ regs_base;
33773 +
33774 +           return HE_TYPE_COMMAND;
33775 +       }
33776 +       else
33777 +       {
33778 +           u32 blow, bhigh;
33779 +
33780 +           /* compute a local pci address from our register BAR */
33781 +           pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_2, &blow);
33782 +           pci_read_config_dword (dev->dev_osdep.pdev, PCI_BASE_ADDRESS_3, &bhigh);
33783 +
33784 +           (*physp) = (((physaddr_t) bhigh) << 32) | (blow & PCI_BASE_ADDRESS_MEM_MASK) | (phys ^ regs_base);
33785 +
33786 +           return HE_TYPE_REGS;
33787 +       }
33788 +    }
33789 +
33790 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
33791 +    if (VALID_PAGE (virt_to_page (phys_to_virt (phys))))
33792 +#else
33793 +    if (virt_addr_valid (phys_to_virt (phys)))
33794 +#endif
33795 +       return HE_TYPE_PAGE;
33796 +    
33797 +    return HE_TYPE_OTHER;
33798 +}
33799 +
33800 +E4_uint64
33801 +elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t phys, unsigned perm)
33802 +{
33803 +    unsigned int type = 0;
33804 +    E4_uint64    pte;
33805 +
33806 +    switch (elan4mmu_categorise_paddr (dev, &phys))
33807 +    {
33808 +    case HE_TYPE_SDRAM:
33809 +       type = PTE_SetPerm (perm);
33810 +       break;
33811 +       
33812 +    case HE_TYPE_COMMAND:
33813 +       type = PTE_SetPerm (perm) | PTE_CommandQueue;
33814 +       break;
33815 +       
33816 +    case HE_TYPE_REGS:
33817 +       type = PTE_SetPerm (perm) | PTE_PciNotLocal;
33818 +       break;
33819 +
33820 +    case HE_TYPE_PAGE:
33821 +       if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP)
33822 +       {
33823 +           struct scatterlist list;
33824 +           
33825 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
33826 +           list.address = NULL;
33827 +#endif
33828 +           list.page    = virt_to_page (phys_to_virt (phys));;
33829 +           list.offset  = (phys & (PAGE_SIZE-1));
33830 +           list.length  = (1 << dev->dev_pageshift[0]);
33831 +           
33832 +           if (pci_map_sg (dev->dev_osdep.pdev, &list, 1, PCI_DMA_BIDIRECTIONAL) == 0)
33833 +           {
33834 +               printk ("elan4mmu_phys2pte: pci_map_sg failed\n");
33835 +               return -EFAULT;
33836 +           }
33837 +
33838 +           type = PTE_SetPerm (perm) | PTE_PciNotLocal | dev->dev_pteval;
33839 +           phys = list.dma_address;
33840 +           break;
33841 +       }
33842 +       /* DROPTHROUGH */
33843 +       
33844 +    case HE_TYPE_OTHER:
33845 +       if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_PCI_MAP)
33846 +           return -EFAULT;
33847 +
33848 +       type = PTE_SetPerm (perm) | PTE_PciNotLocal | dev->dev_pteval;
33849 +       break;
33850 +    }
33851 +
33852 +    if ((type & PTE_PciNotLocal) == 0)
33853 +       pte = (phys >> PTE_PADDR_SHIFT) | type;
33854 +    else
33855 +    {
33856 +       unsigned topaddr = elan4mmu_alloc_topaddr (dev, phys, type);
33857 +       
33858 +       if (dev->dev_topaddrmode)
33859 +           pte = (phys >> PTE_PADDR_SHIFT) | (type & ~0xc) | (topaddr << 2);
33860 +       else
33861 +           pte = ((phys >> PTE_PADDR_SHIFT) & ~PTE_TOPADDR_MASK) | (((E4_uint64) topaddr) << 45) | type;
33862 +    }
33863 +
33864 +    return pte;
33865 +}
33866 +
33867 +physaddr_t
33868 +elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte)
33869 +{
33870 +    physaddr_t sdram_base = dev->dev_sdram_phys;
33871 +    physaddr_t regs_base  = dev->dev_regs_phys;
33872 +    physaddr_t phys;
33873 +    
33874 +    if (pte & PTE_PciNotLocal)
33875 +    {
33876 +       if (dev->dev_topaddrmode)
33877 +           phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 2) & 3] & 0xfffc) << 48) | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT);
33878 +       else
33879 +           phys = ((physaddr_t)(dev->dev_topaddr[(pte >> 45) & 3] & 0xffff) << 48)| ((pte & PTE_PPN_MASK & ~PTE_TOPADDR_MASK) << PTE_PADDR_SHIFT);
33880 +
33881 +#if defined(__alpha)
33882 +           phys ^= alpha_mv.pci_dac_offset;
33883 +#elif defined(__sparc)
33884 +           phys ^= 0xfffe000000000000;
33885 +#endif
33886 +       return phys;
33887 +    }
33888 +    
33889 +    if (pte & PTE_CommandQueue)
33890 +       return (regs_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT));
33891 +    
33892 +    /* sdram */
33893 +    return (sdram_base | ((pte & PTE_PPN_MASK) << PTE_PADDR_SHIFT));
33894 +}
33895 +
33896 +EXPORT_SYMBOL(elan4mmu_phys2pte);
33897 +EXPORT_SYMBOL(elan4mmu_pte2phys);
33898 diff -urN clean/drivers/net/qsnet/elan4/neterr.c linux-2.6.9/drivers/net/qsnet/elan4/neterr.c
33899 --- clean/drivers/net/qsnet/elan4/neterr.c      1969-12-31 19:00:00.000000000 -0500
33900 +++ linux-2.6.9/drivers/net/qsnet/elan4/neterr.c        2005-07-20 07:35:36.000000000 -0400
33901 @@ -0,0 +1,270 @@
33902 +/*
33903 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
33904 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
33905 + * 
33906 + *    For licensing information please see the supplied COPYING file
33907 + *
33908 + */
33909 +
33910 +#ident "@(#)$Id: neterr.c,v 1.8.2.1 2005/07/20 11:35:36 mike Exp $"
33911 +/*      $Source: /cvs/master/quadrics/elan4mod/neterr.c,v $*/
33912 +
33913 +#include <qsnet/kernel.h>
33914 +
33915 +#include <elan4/sdram.h>
33916 +#include <elan4/debug.h>
33917 +#include <elan4/device.h>
33918 +#include <elan4/commands.h>
33919 +#include <elan4/trtype.h>
33920 +#include <elan4/neterr.h>
33921 +
33922 +typedef struct neterr_inputq
33923 +{
33924 +    E4_InputQueue      inputq;                                 /* input queue */
33925 +    E4_Event32         qevent;                                 /* input queue event */
33926 +    E4_uint64          sent;                                   /* # messages sent (cq flow control)*/
33927 +} NETERR_INPUTQ;
33928 +
33929 +#define NETERR_NSLOTS  64                                      /* single page of queue space (4Kb) */
33930 +
33931 +#define NETERR_RETRIES 16
33932 +#define NETERR_CQ_SIZE CQ_Size8K
33933 +#define NETERR_CQ_MSGS (CQ_Size(NETERR_CQ_SIZE) / (21*8))
33934 +#define NETERR_VP_COUNT        64                                      /* this *must* be > NETERR_CQ_MSGS */
33935 +#define NETERR_VP_BASE 1                                       /* use vp 1 upwards */
33936 +
33937 +void
33938 +elan4_neterr_interrupt (ELAN4_DEV *dev, void *arg)
33939 +{
33940 +    E4_Addr          qfptr  = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr));
33941 +    E4_Addr          qbptr  = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr));
33942 +    E4_Addr          qfirst = DEVICE_NETERR_SLOTS_ADDR;
33943 +    E4_Addr          qlast  = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE;
33944 +    ELAN4_CQ        *cq     = dev->dev_neterr_intcq;
33945 +    int              count  = 0;
33946 +    ELAN4_CTXT      *ctxt;
33947 +    ELAN4_NETERR_MSG msg;
33948 +
33949 +    while (qfptr != qbptr)
33950 +    {
33951 +       elan4_sdram_copyq_from_sdram (dev, dev->dev_neterr_slots + (qfptr - qfirst), &msg, ELAN4_NETERR_MSG_SIZE);
33952 +
33953 +       ctxt = elan4_networkctxt (dev, msg.msg_context);
33954 +
33955 +       if (ctxt != NULL && ctxt->ctxt_ops->op_neterrmsg)
33956 +           ctxt->ctxt_ops->op_neterrmsg (ctxt, &msg);
33957 +       else
33958 +           PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_interrupt: no process - sender %d.%d\n", msg.msg_sender.loc_node, msg.msg_sender.loc_context);
33959 +
33960 +       count++;
33961 +
33962 +       /* move on the from pointer */
33963 +       qfptr = (qfptr == qlast) ? qfirst : qfptr + ELAN4_NETERR_MSG_SIZE;
33964 +
33965 +       elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfptr);
33966 +    }
33967 +
33968 +    if (count == 0)
33969 +    {
33970 +       printk ("elan4_neterr_interrupt: spurious\n");
33971 +       return;
33972 +    }
33973 +
33974 +    /* Issue the waitevent to the interrupt queue */
33975 +    writeq (WAIT_EVENT_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)),   (void *)cq->cq_mapping);
33976 +    writeq (  E4_EVENT_INIT_VALUE (-32 * count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),       (void *)cq->cq_mapping);
33977 +    writeq (  DEVICE_NETERR_INTCQ_ADDR,                                                                (void *)cq->cq_mapping);
33978 +    writeq (INTERRUPT_CMD | (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT),            (void *)cq->cq_mapping);
33979 +
33980 +    pioflush_reg (dev);
33981 +}
33982 +
33983 +int
33984 +elan4_neterr_init (ELAN4_DEV *dev)
33985 +{
33986 +    unsigned int intqaddr;
33987 +    E4_Addr     qfirst, qlast;
33988 +    
33989 +    if ((dev->dev_neterr_inputq = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0)
33990 +       return 0;
33991 +
33992 +    if ((dev->dev_neterr_slots = elan4_sdram_alloc (dev, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE))) == 0)
33993 +       return 0;
33994 +
33995 +    if ((dev->dev_neterr_msgcq = elan4_alloccq (&dev->dev_ctxt, NETERR_CQ_SIZE, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority)) == NULL)
33996 +       return 0;
33997 +
33998 +    if ((dev->dev_neterr_intcq = elan4_alloccq (&dev->dev_ctxt, CQ_Size1K, CQ_WaitEventEnableBit | CQ_InterruptEnableBit, CQ_Priority)) == NULL)
33999 +       return 0;
34000 +
34001 +    intqaddr = (dev->dev_cqoffset + elan4_cq2num (dev->dev_neterr_intcq)) * CQ_CommandMappingSize;
34002 +    qfirst   = DEVICE_NETERR_SLOTS_ADDR;
34003 +    qlast    = qfirst + (NETERR_NSLOTS-1) * ELAN4_NETERR_MSG_SIZE;
34004 +
34005 +    spin_lock_init (&dev->dev_neterr_lock);
34006 +
34007 +    /* Register an interrupt operation */
34008 +    dev->dev_neterr_intop.op_function = elan4_neterr_interrupt;
34009 +    dev->dev_neterr_intop.op_arg      = NULL;
34010 +
34011 +    elan4_register_intop (dev, &dev->dev_neterr_intop);
34012 +
34013 +    /* Initialise the inputq descriptor and event */
34014 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_fptr), qfirst);
34015 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_bptr), qfirst);
34016 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_control), E4_InputQueueControl (qfirst, qlast, ELAN4_NETERR_MSG_SIZE));
34017 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, inputq.q_event), DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent));
34018 +    
34019 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_CountAndType), E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
34020 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WritePtr), DEVICE_NETERR_INTCQ_ADDR);
34021 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, qevent.ev_WriteValue), (dev->dev_neterr_intop.op_cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD);
34022 +
34023 +    elan4_sdram_writeq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent), 0);
34024 +
34025 +    /* Map them all into the device context */
34026 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, HE_TYPE_SDRAM, (dev->dev_neterr_inputq >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_RemoteAll));
34027 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR,  HE_TYPE_SDRAM, (intqaddr >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_LocDataWrite) | PTE_CommandQueue);
34028 +    elan4mmu_pteload (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR,  HE_TYPE_SDRAM, (dev->dev_neterr_slots >> PTE_PADDR_SHIFT) | PTE_SetPerm(PERM_DataReadWrite));
34029 +
34030 +    /* finally attach to the neterr context */
34031 +    if (elan4_attach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM) != 0)
34032 +       panic ("elan4_neterr_init: failed to attach to neterr context\n");
34033 +
34034 +    /* and drop the context filter */
34035 +    elan4_set_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM, E4_FILTER_HIGH_PRI);
34036 +
34037 +    return 1;
34038 +}
34039 +
34040 +void
34041 +elan4_neterr_destroy (ELAN4_DEV *dev)
34042 +{
34043 +    if (dev->dev_neterr_intcq)
34044 +    {
34045 +       elan4_detach_filter (&dev->dev_ctxt, ELAN4_NETERR_CONTEXT_NUM);
34046 +       
34047 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_SLOTS_ADDR,  1 << dev->dev_pageshift[0]);
34048 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INTCQ_ADDR,  1 << dev->dev_pageshift[0]);
34049 +       elan4mmu_unload_range (&dev->dev_ctxt, 0, DEVICE_NETERR_INPUTQ_ADDR, 1 << dev->dev_pageshift[0]);
34050 +
34051 +       spin_lock_destroy (&dev->dev_neterr_lock);
34052 +    }
34053 +
34054 +    if (dev->dev_neterr_intcq)
34055 +       elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_intcq);
34056 +    dev->dev_neterr_intcq = NULL;
34057 +
34058 +    if (dev->dev_neterr_msgcq)
34059 +       elan4_freecq (&dev->dev_ctxt, dev->dev_neterr_msgcq);
34060 +    dev->dev_neterr_msgcq = NULL;
34061 +
34062 +    if (dev->dev_neterr_slots)
34063 +       elan4_sdram_free (dev, dev->dev_neterr_slots, roundup (NETERR_NSLOTS * ELAN4_NETERR_MSG_SIZE, SDRAM_PAGE_SIZE));
34064 +    dev->dev_neterr_slots = 0;
34065 +    
34066 +    if (dev->dev_neterr_inputq)
34067 +       elan4_sdram_free (dev, dev->dev_neterr_inputq, SDRAM_PAGE_SIZE);
34068 +    dev->dev_neterr_inputq = 0;
34069 +}
34070 +
34071 +int
34072 +elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg)
34073 +{
34074 +    ELAN4_CQ  *cq = dev->dev_neterr_msgcq;
34075 +    E4_uint64  sent;
34076 +    E4_VirtualProcessEntry route;
34077 +    unsigned int vp;
34078 +    unsigned long flags;
34079 +
34080 +    spin_lock_irqsave (&dev->dev_neterr_lock, flags);
34081 +
34082 +    sent = elan4_sdram_readq (dev, dev->dev_neterr_inputq + offsetof (NETERR_INPUTQ, sent));
34083 +
34084 +    PRINTF (DBG_DEVICE, DBG_NETERR, "elan4_neterr_sendmsg: nodeid=%d retries=%d cookie=%llx sender=%d,%d%s\n", 
34085 +           nodeid, retries, msg->msg_cookies[0], msg->msg_sender.loc_node, msg->msg_sender.loc_context,
34086 +           (dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS ? " - no cq space" : "");
34087 +
34088 +    if ((dev->dev_neterr_queued - sent) >= NETERR_CQ_MSGS)
34089 +    {
34090 +       spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
34091 +       return 0;
34092 +    }
34093 +
34094 +    vp = NETERR_VP_BASE + (dev->dev_neterr_queued % NETERR_VP_COUNT);
34095 +
34096 +    if (elan4_generate_route (&dev->dev_position, &route, ELAN4_NETERR_CONTEXT_NUM, nodeid, nodeid, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI) < 0)
34097 +    {
34098 +       spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
34099 +       return 0;
34100 +    }
34101 +
34102 +    elan4_write_route (dev, dev->dev_routetable, vp, &route);
34103 +
34104 +    writeq ((GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(retries)),                             (void *)cq->cq_mapping);
34105 +    writeq (NOP_CMD,                                                                           (void *)cq->cq_mapping);
34106 +    
34107 +    writeq (OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp),             (void *)cq->cq_mapping);
34108 +    writeq (SEND_TRANS_CMD | (TR_INPUT_Q_GETINDEX << 16),                                      (void *)cq->cq_mapping);
34109 +    writeq (  DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq),                    (void *)cq->cq_mapping);
34110 +
34111 +    writeq (SEND_TRANS_CMD | (TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD) << 16),                 (void *)cq->cq_mapping);
34112 +    writeq (  0 /* address */,                                                                 (void *)cq->cq_mapping);
34113 +    writeq (  ((E4_uint64 *) msg)[0],                                                          (void *)cq->cq_mapping);
34114 +    writeq (  ((E4_uint64 *) msg)[1],                                                          (void *)cq->cq_mapping);
34115 +    writeq (  ((E4_uint64 *) msg)[2],                                                          (void *)cq->cq_mapping);
34116 +    writeq (  ((E4_uint64 *) msg)[3],                                                          (void *)cq->cq_mapping);
34117 +    writeq (  ((E4_uint64 *) msg)[4],                                                          (void *)cq->cq_mapping);
34118 +    writeq (  ((E4_uint64 *) msg)[5],                                                          (void *)cq->cq_mapping);
34119 +    writeq (  ((E4_uint64 *) msg)[6],                                                          (void *)cq->cq_mapping);
34120 +    writeq (  ((E4_uint64 *) msg)[7],                                                          (void *)cq->cq_mapping);
34121 +
34122 +    writeq (SEND_TRANS_CMD | (TR_INPUT_Q_COMMIT << 16),                                                (void *)cq->cq_mapping);
34123 +    writeq (  DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq),                    (void *)cq->cq_mapping);
34124 +    writeq (  0 /* cookie */,                                                                  (void *)cq->cq_mapping);
34125 +    
34126 +    writeq (GUARD_CMD | GUARD_CHANNEL(0) | GUARD_RESET(NETERR_RETRIES),                                (void *)cq->cq_mapping);
34127 +    writeq (WRITE_DWORD_CMD | (DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, sent)),    (void *)cq->cq_mapping);
34128 +    writeq (  ++dev->dev_neterr_queued,                                                                (void *)cq->cq_mapping);
34129 +
34130 +    pioflush_reg (dev);
34131 +
34132 +    spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
34133 +
34134 +    return 1;
34135 +}
34136 +
34137 +int
34138 +elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap)
34139 +{
34140 +    E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans];
34141 +    unsigned long flags;
34142 +
34143 +    switch (IPROC_TrapValue (hdrp->IProcStatusCntxAndTrType))
34144 +    {
34145 +    case InputEopErrorOnWaitForEop:
34146 +    case InputEopErrorTrap:
34147 +    case InputCrcErrorAfterPAckOk:
34148 +       return 1;
34149 +
34150 +    case InputEventEngineTrapped:
34151 +       printk ("elan%d: device_iproc_trap: InputEventEngineTrapped - Trans=%x TrAddr=%llx\n", 
34152 +               dev->dev_instance, (int)IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType), (long long) hdrp->TrAddr);
34153 +
34154 +       if ((IPROC_TransactionType (hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) &&
34155 +           hdrp->TrAddr == DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, inputq))
34156 +       {
34157 +           spin_lock_irqsave (&dev->dev_neterr_lock, flags);
34158 +           writeq ((DEVICE_NETERR_INPUTQ_ADDR + offsetof (NETERR_INPUTQ, qevent)) | SET_EVENT_CMD, (void *)(dev->dev_neterr_msgcq->cq_mapping));
34159 +           spin_unlock_irqrestore (&dev->dev_neterr_lock, flags);
34160 +           return 1;
34161 +       }
34162 +       
34163 +    default:
34164 +       return 0;
34165 +    }
34166 +}
34167 +/*
34168 + * Local variables:
34169 + * c-file-style: "stroustrup"
34170 + * End:
34171 + */
34172 diff -urN clean/drivers/net/qsnet/elan4/procfs_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/procfs_Linux.c
34173 --- clean/drivers/net/qsnet/elan4/procfs_Linux.c        1969-12-31 19:00:00.000000000 -0500
34174 +++ linux-2.6.9/drivers/net/qsnet/elan4/procfs_Linux.c  2005-09-07 10:35:03.000000000 -0400
34175 @@ -0,0 +1,1426 @@
34176 +/*
34177 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
34178 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
34179 + *
34180 + *    For licensing information please see the supplied COPYING file
34181 + *
34182 + */
34183 +
34184 +#ident "@(#)$Id: procfs_Linux.c,v 1.43.2.6 2005/09/07 14:35:03 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
34185 +/*      $Source: /cvs/master/quadrics/elan4mod/procfs_Linux.c,v $*/
34186 +
34187 +#include <qsnet/kernel.h>
34188 +
34189 +#include <qsnet/module.h>
34190 +#include <linux/proc_fs.h>
34191 +#include <linux/ctype.h>
34192 +
34193 +#include <qsnet/procfs_linux.h>
34194 +
34195 +#include <elan4/i2c.h>
34196 +#include <elan4/debug.h>
34197 +#include <elan4/device.h>
34198 +#include <elan4/user.h>
34199 +#include <elan4/mmu.h>
34200 +/*
34201 + *
34202 + * procfs format for elan4:
34203 + *
34204 + * /proc/qsnet/elan4/config
34205 + *    elan4_debug
34206 + *    elan4_debug_toconsole
34207 + *    elan4_debug_tobuffer
34208 + *    elan4_debug_display_ctxt
34209 + *    elan4_debug_ignore_ctxt
34210 + *    elan4_debug_ignore_type
34211 + *    elan4_debug_mmu
34212 + *    elan4_mainint_punt_loops
34213 + *    user_p2p_route_options
34214 + *    user_bcast_route_options
34215 + *
34216 + * /proc/qsnet/elan4/deviceN
34217 + *    stats
34218 + *    position
34219 + *    vpd
34220 + */
34221 +
34222 +struct proc_dir_entry *elan4_procfs_root;
34223 +struct proc_dir_entry *elan4_config_root;
34224 +
34225 +/* borrowed from fs/proc/proc_misc - helper for proc_read_int */
34226 +static int 
34227 +proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
34228 +{
34229 +    if (len <= off+count) *eof = 1;
34230 +    *start = page + off;
34231 +    len -= off;
34232 +    if (len>count) len = count;
34233 +    if (len<0) len = 0;
34234 +    return len;
34235 +}
34236 +
34237 +static int
34238 +proc_read_devinfo (char *page, char **start, off_t off,
34239 +                   int count, int *eof, void *data)
34240 +{
34241 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34242 +    int        len = 0;
34243 +
34244 +    if (! dev)
34245 +       len = sprintf (page, "<unknown>\n");
34246 +    else
34247 +    {
34248 +       len += sprintf (page + len, "dev_vendor_id            0x%x\n", dev->dev_devinfo.dev_vendor_id);
34249 +       len += sprintf (page + len, "dev_device_id            0x%x\n", dev->dev_devinfo.dev_vendor_id);
34250 +       len += sprintf (page + len, "dev_revision_id          0x%x\n", dev->dev_devinfo.dev_revision_id);
34251 +       len += sprintf (page + len, "dev_instance             0x%x\n", dev->dev_devinfo.dev_instance);
34252 +       len += sprintf (page + len, "dev_rail                 0x%x\n", dev->dev_devinfo.dev_rail);
34253 +       len += sprintf (page + len, "dev_driver_version       0x%x\n", dev->dev_devinfo.dev_driver_version);
34254 +       len += sprintf (page + len, "dev_params_mask          0x%x\n", dev->dev_devinfo.dev_params_mask);
34255 +       len += sprintf (page + len, "dev_params:                  \n");
34256 +       len += sprintf (page + len, " 0  - PciCmdQPadFlag     0x%x\n", dev->dev_devinfo.dev_params.values[0]);
34257 +       len += sprintf (page + len, " 1  - EventCopyWinPt     0x%x\n", dev->dev_devinfo.dev_params.values[1]);
34258 +       len += sprintf (page + len, " 2  - PciWriteCombining  0x%x\n", dev->dev_devinfo.dev_params.values[2]);
34259 +       len += sprintf (page + len, " 3  -                    0x%x\n", dev->dev_devinfo.dev_params.values[3]);
34260 +       len += sprintf (page + len, " 4  -                    0x%x\n", dev->dev_devinfo.dev_params.values[4]);
34261 +       len += sprintf (page + len, " 5  -                    0x%x\n", dev->dev_devinfo.dev_params.values[5]);
34262 +       len += sprintf (page + len, " 6  -                    0x%x\n", dev->dev_devinfo.dev_params.values[6]);
34263 +       len += sprintf (page + len, " 7  -                    0x%x\n", dev->dev_devinfo.dev_params.values[7]);
34264 +       len += sprintf (page + len, " 8  -                    0x%x\n", dev->dev_devinfo.dev_params.values[8]);
34265 +       len += sprintf (page + len, " 9  -                    0x%x\n", dev->dev_devinfo.dev_params.values[9]);
34266 +       len += sprintf (page + len, " 10 -                    0x%x\n", dev->dev_devinfo.dev_params.values[10]);
34267 +       len += sprintf (page + len, " 11 - features           0x%x\n", dev->dev_devinfo.dev_params.values[11]);
34268 +       len += sprintf (page + len, "dev_num_down_links_value 0x%x\n", dev->dev_devinfo.dev_num_down_links_value);
34269 +    }
34270 +
34271 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
34272 +}
34273 +
34274 +static int
34275 +proc_read_position (char *page, char **start, off_t off,
34276 +                   int count, int *eof, void *data)
34277 +{
34278 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34279 +    int        len;
34280 +
34281 +    if (dev->dev_position.pos_mode == ELAN_POS_UNKNOWN)
34282 +       len = sprintf (page, "<unknown>\n");
34283 +    else
34284 +       len = sprintf (page, 
34285 +                      "NodeId                 %d\n"
34286 +                      "NumLevels              %d\n"
34287 +                      "NumNodes               %d\n",
34288 +                      dev->dev_position.pos_nodeid, 
34289 +                      dev->dev_position.pos_levels, 
34290 +                      dev->dev_position.pos_nodes);
34291 +
34292 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
34293 +}
34294 +
34295 +static int
34296 +proc_write_position (struct file *file, const char *buf, unsigned long count, void *data)
34297 +{
34298 +    ELAN4_DEV *dev     = (ELAN4_DEV *) data;
34299 +    unsigned  nodeid   = ELAN_INVALID_NODE;
34300 +    unsigned  numnodes = 0;
34301 +    char     *page, *p;
34302 +    int       res;
34303 +    ELAN_POSITION pos;
34304 +
34305 +    if (count == 0)
34306 +       return (0);
34307 +
34308 +    if (count >= PAGE_SIZE)
34309 +       return (-EINVAL);
34310 +
34311 +    if ((page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
34312 +       return (-ENOMEM);
34313 +
34314 +    MOD_INC_USE_COUNT;
34315 +
34316 +    if (copy_from_user (page, buf, count))
34317 +       res = -EFAULT;
34318 +    else
34319 +    {
34320 +       page[count] = '\0';
34321 +       
34322 +       if (page[count-1] == '\n')
34323 +           page[count-1] = '\0';
34324 +
34325 +       if (! strcmp (page, "<unknown>"))
34326 +       {
34327 +           pos.pos_mode      = ELAN_POS_UNKNOWN;
34328 +           pos.pos_nodeid    = ELAN_INVALID_NODE;
34329 +           pos.pos_nodes     = 0;
34330 +           pos.pos_levels    = 0;
34331 +       }
34332 +       else
34333 +       {
34334 +           for (p = page; *p; )
34335 +           {
34336 +               while (isspace (*p))
34337 +                   p++;
34338 +               
34339 +               if (! strncmp (p, "NodeId=", strlen("NodeId=")))
34340 +                   nodeid   = simple_strtoul (p + strlen ("NodeId="), NULL, 0);
34341 +               if (! strncmp (p, "NumNodes=", strlen ("NumNodes=")))
34342 +                   numnodes = simple_strtoul (p + strlen ("NumNodes="), NULL, 0);
34343 +               
34344 +               while (*p && !isspace(*p))
34345 +                   p++;
34346 +           }
34347 +
34348 +           if (elan4_compute_position (&pos, nodeid, numnodes, dev->dev_devinfo.dev_num_down_links_value) != 0)
34349 +               printk ("elan%d: invalid values for NodeId=%d NumNodes=%d\n", dev->dev_instance, nodeid, numnodes);
34350 +           else
34351 +           {
34352 +               printk ("elan%d: setting NodeId=%d NumNodes=%d NumLevels=%d\n", dev->dev_instance, pos.pos_nodeid,
34353 +                       pos.pos_nodes, pos.pos_levels);
34354 +
34355 +               if (elan4_set_position (dev, &pos) < 0)
34356 +                   printk ("elan%d: failed to set device position\n", dev->dev_instance);
34357 +           }
34358 +       }
34359 +    }
34360 +
34361 +    MOD_DEC_USE_COUNT;
34362 +    free_page ((unsigned long) page);
34363 +
34364 +    return (count);
34365 +}
34366 +
34367 +static int
34368 +proc_read_temp (char *page, char **start, off_t off,
34369 +               int count, int *eof, void *data)
34370 +{
34371 +    ELAN4_DEV    *dev = (ELAN4_DEV *) data;
34372 +    unsigned char values[2];
34373 +    int           len;
34374 +
34375 +    if (i2c_disable_auto_led_update (dev) < 0)
34376 +       len = sprintf (page, "<unknown>");
34377 +    else
34378 +    {
34379 +       if (i2c_read (dev, I2C_TEMP_ADDR, 2, values) < 0)
34380 +           len = sprintf (page, "<not-present>");
34381 +       else
34382 +           len = sprintf (page, "%s%d%s\n", (values[0] & 0x80) ? "-" : "",
34383 +                          (values[0] & 0x80) ? -((signed char)values[0]) - 1 : values[0],
34384 +                          (values[1] & 0x80) ? ".5" : ".0");
34385 +
34386 +       i2c_enable_auto_led_update (dev);
34387 +    }
34388 +
34389 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
34390 +}
34391 +
34392 +static int
34393 +proc_read_eccerr (char *page, char **start, off_t off,
34394 +                 int count, int *eof, void *data)
34395 +{
34396 +    ELAN4_DEV    *dev = (ELAN4_DEV *) data;
34397 +    char          errstr[200];
34398 +    register int  i, len = 0;
34399 +
34400 +    *page = '\0';
34401 +
34402 +    for (i = 0; i < sizeof (dev->dev_sdramerrs)/sizeof(dev->dev_sdramerrs[0]); i++)
34403 +       if (dev->dev_sdramerrs[i].ErrorCount != 0)
34404 +           len += sprintf (page + len, "%s occured %0d times\n",
34405 +                           elan4_sdramerr2str (dev, dev->dev_sdramerrs[i].EccStatus, dev->dev_sdramerrs[i].ConfigReg, errstr),
34406 +                           dev->dev_sdramerrs[i].ErrorCount);
34407 +
34408 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
34409 +}
34410 +
34411 +static int
34412 +proc_read_vpd (char *page, char **start, off_t off,
34413 +              int count, int *eof, void *data)
34414 +{
34415 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34416 +    int        len;
34417 +
34418 +    if ( elan4_read_vpd (dev, NULL, page) )
34419 +       len = sprintf (page, "no vpd tags found\n");
34420 +    else
34421 +       len = strlen(page)+1;
34422 +
34423 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
34424 +}
34425 +
34426 +static int
34427 +proc_read_linkportkey (char *page, char **start, off_t off,
34428 +                      int count, int *eof, void *data)
34429 +{
34430 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34431 +    int        len;
34432 +
34433 +    len = sprintf (page, "%llx\n", read_reg64 (dev, LinkPortLock));
34434 +
34435 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
34436 +}
34437 +
34438 +static int
34439 +proc_write_linkportkey (struct file *file, const char *buf, unsigned long count, void *data)
34440 +{
34441 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34442 +    int               res = 0;
34443 +    char       tmpbuf[30];
34444 +
34445 +    if (count > sizeof (tmpbuf) - 1)
34446 +       return -EINVAL;
34447 +
34448 +    MOD_INC_USE_COUNT;
34449 +
34450 +    if (copy_from_user (tmpbuf, buf, count))
34451 +       res = -EFAULT;
34452 +    else
34453 +    {
34454 +       tmpbuf[count] = '\0';
34455 +
34456 +       write_reg64 (dev, LinkPortLock, simple_strtoull (tmpbuf, NULL, 16));
34457 +    }
34458 +
34459 +    MOD_DEC_USE_COUNT;
34460 +
34461 +    return (count);
34462 +}
34463 +
34464 +static int
34465 +proc_read_stats_translations (char *page, char **start, off_t off, int count, int *eof, void *data)
34466 +{      
34467 +    ELAN4_TRANS_STATS *pr  = (ELAN4_TRANS_STATS *)data;
34468 +    int                tbl = pr->tbl;
34469 +    ELAN4_DEV         *dev = list_entry(pr, ELAN4_DEV, trans_stats[tbl] );
34470 +    char              *p   = page;
34471 +
34472 +    if (off) return (0);
34473 +
34474 +    p += elan4mmu_display_bucket_mmuhash(dev, tbl, pr->buckets, ELAN4_TRANS_STATS_NUM_BUCKETS , p, count);
34475 +
34476 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34477 +}
34478 +
34479 +static int
34480 +proc_write_stats_translations (struct file *file, const char *buf, unsigned long count, void *data)
34481 +{
34482 +    ELAN4_TRANS_STATS *pr  = (ELAN4_TRANS_STATS *)data;
34483 +    int                b0, b1, b2, b3, b4, b5, b6;
34484 +
34485 +    int               res = 0;
34486 +    char       tmpbuf[30];
34487 +
34488 +    if (count > sizeof (tmpbuf) - 1)
34489 +       return -EINVAL;
34490 +
34491 +    MOD_INC_USE_COUNT;
34492 +
34493 +    if (copy_from_user (tmpbuf, buf, count))
34494 +       res = -EFAULT;
34495 +    else
34496 +    {
34497 +       tmpbuf[count] = '\0';
34498 +       sscanf(tmpbuf,"%d %d %d %d %d %d %d\n", &b0, &b1, &b2, &b3, &b4, &b5, &b6);
34499 +
34500 +       pr->buckets[0] = b0;
34501 +       pr->buckets[1] = b1;
34502 +       pr->buckets[2] = b2;
34503 +       pr->buckets[3] = b3;
34504 +       pr->buckets[4] = b4;
34505 +       pr->buckets[5] = b5;
34506 +       pr->buckets[6] = b6;
34507 +       pr->buckets[6] = 99999999;
34508 +
34509 +       b0 = 1;
34510 +       for(b1=0 ; b1 < ELAN4_TRANS_STATS_NUM_BUCKETS; b1++) {
34511 +           if ( pr->buckets[b1] < b0) 
34512 +               pr->buckets[b1] = 99999999;
34513 +           b0 =  pr->buckets[b1];
34514 +       }
34515 +    }
34516 +
34517 +    MOD_DEC_USE_COUNT;
34518 +
34519 +    return (count);
34520 +}
34521 +static int 
34522 +elan4_read_mmuhash_reduction_func (char *page, char **start, off_t off, int count, int *eof, void *data)
34523 +{      
34524 +    char              *p   = page;
34525 +
34526 +    if (off) return (0);
34527 +
34528 +    p += sprintf(p ,"elan4mmu hash reduction : %s\nend reductions    %d\nmiddle reductions %d\nmiddle failed     %d\n", 
34529 +                elan4_mmuhash_chain_reduction?"On":"Off",
34530 +                elan4_mmuhash_chain_end_reduce,
34531 +                elan4_mmuhash_chain_middle_reduce,
34532 +                elan4_mmuhash_chain_middle_fail);
34533 +    p += sprintf(p ,"shuffle attempts  %d\nshuffle done      %d\n", 
34534 +                elan4_mmuhash_shuffle_attempts,
34535 +                elan4_mmuhash_shuffle_done
34536 +                );
34537 +
34538 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34539 +}
34540 +
34541 +static int
34542 +elan4_write_mmuhash_reduction_func (struct file *file, const char *buf, unsigned long count, void *data)
34543 +{
34544 +    int               res = 0;
34545 +    char       tmpbuf[30];
34546 +
34547 +    if (count > sizeof (tmpbuf) - 1)
34548 +       return -EINVAL;
34549 +
34550 +    MOD_INC_USE_COUNT;
34551 +
34552 +    if (copy_from_user (tmpbuf, buf, count))
34553 +       res = -EFAULT;
34554 +    else
34555 +    {
34556 +       if (tmpbuf[0] == '0') elan4_mmuhash_chain_reduction = 0;
34557 +       if (tmpbuf[0] == '1') elan4_mmuhash_chain_reduction = 1;
34558 +
34559 +       tmpbuf[count] = '\0';
34560 +    }
34561 +
34562 +    MOD_DEC_USE_COUNT;
34563 +
34564 +    return (count);
34565 +}
34566 +
34567 +typedef struct elan4_trans_private
34568 +{
34569 +    ELAN4_DEV              *pr_dev;
34570 +    ELAN4_CTXT             *pr_ctxt;
34571 +    int                     pr_index;
34572 +    int                     pr_tbl;
34573 +    char                  *pr_page;
34574 +    unsigned               pr_off;
34575 +    unsigned               pr_len;
34576 +
34577 +    int                     pr_changed;
34578 +} ELAN4_TRANS_PRIVATE;
34579 +
34580 +static int
34581 +elan4_ctxt_trans_open (struct inode *inode, struct file *file)
34582 +{
34583 +    ELAN4_CTXT_TRANS_INDEX *trans  = (ELAN4_CTXT_TRANS_INDEX *)( PDE(inode)->data );
34584 +    ELAN4_TRANS_PRIVATE    *pr;
34585 +
34586 +    if ((pr = kmalloc (sizeof (ELAN4_TRANS_PRIVATE), GFP_KERNEL)) == NULL)
34587 +       return (-ENOMEM);
34588 +    
34589 +    pr->pr_tbl   = trans->tbl;
34590 +    pr->pr_ctxt  = list_entry(trans, ELAN4_CTXT, trans_index[trans->tbl] );
34591 +
34592 +    pr->pr_index = 0;
34593 +
34594 +    pr->pr_len     = 0;
34595 +    pr->pr_off     = 0;
34596 +    pr->pr_changed = 1;
34597 +    pr->pr_page    = NULL;
34598 +
34599 +    file->private_data = (void *) pr;
34600 +
34601 +    MOD_INC_USE_COUNT;
34602 +    return (0);
34603 +}
34604 +
34605 +static ssize_t
34606 +elan4_ctxt_trans_read (struct file *file, char *buf, size_t count, loff_t *ppos)
34607 +{
34608 +    ELAN4_TRANS_PRIVATE *pr   = (ELAN4_TRANS_PRIVATE *) file->private_data;
34609 +    ELAN4_CTXT          *ctxt = pr->pr_ctxt;
34610 +    ELAN4_DEV           *dev  = ctxt->ctxt_dev;
34611 +    int                  error;
34612 +
34613 +    if ( pr->pr_index >= dev->dev_hashsize[pr->pr_tbl] ) 
34614 +       return (0);
34615 +
34616 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
34617 +       return (error);
34618 +
34619 +    if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
34620 +       return (-ENOMEM);
34621 +
34622 +    if (pr->pr_off >= pr->pr_len)
34623 +    {
34624 +       if (elan4mmu_display_ctxt_mmuhash(ctxt, pr->pr_tbl, &pr->pr_index, pr->pr_page, count)) 
34625 +           pr->pr_len     = strlen (pr->pr_page);
34626 +       else
34627 +           pr->pr_len     = 0;
34628 +
34629 +       pr->pr_off     = 0;
34630 +       pr->pr_changed = 0;
34631 +       pr->pr_index++; 
34632 +    }
34633 +
34634 +    if (count >= (pr->pr_len - pr->pr_off))
34635 +       count = pr->pr_len - pr->pr_off;
34636 +
34637 +    copy_to_user (buf, pr->pr_page + pr->pr_off, count);
34638 +
34639 +    pr->pr_off += count;
34640 +    *ppos      += count;
34641 +
34642 +    if (pr->pr_off >= pr->pr_len)
34643 +    {
34644 +       free_page ((unsigned long) pr->pr_page);
34645 +       pr->pr_page = NULL;
34646 +    }
34647 +
34648 +    return (count);
34649 +}
34650 +
34651 +static int
34652 +elan4_ctxt_trans_release (struct inode *inode, struct file *file)
34653 +{
34654 +    ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data;
34655 +
34656 +    if (pr->pr_page)
34657 +       free_page ((unsigned long) pr->pr_page);
34658 +    kfree (pr);
34659 +    
34660 +    MOD_DEC_USE_COUNT;
34661 +    return (0);
34662 +}
34663 +
34664 +static struct file_operations qsnet_ctxt_trans_fops = 
34665 +{
34666 +       open:    elan4_ctxt_trans_open,
34667 +       release: elan4_ctxt_trans_release,
34668 +       read:    elan4_ctxt_trans_read,
34669 +};
34670 +
34671 +void
34672 +proc_insertctxt(ELAN4_DEV *dev, ELAN4_CTXT *ctxt)
34673 +{ 
34674 +    struct proc_dir_entry *p;
34675 +    char name[32];
34676 +    int  t;
34677 +
34678 +    /* GNAT 7565: Need to hold kernel lock when adding/removing
34679 +     * procfs entries outside the module init/fini paths
34680 +     */
34681 +    lock_kernel();
34682 +
34683 +    if (dev->dev_osdep.ctxtdir) 
34684 +    {
34685 +       sprintf(name,"%d", ctxt->ctxt_num);
34686 +       if ((ctxt->procdir = proc_mkdir (name, dev->dev_osdep.ctxtdir)) != NULL) 
34687 +       {
34688 +           for (t = 0; t < NUM_HASH_TABLES; t++) 
34689 +           {
34690 +               sprintf(name , "translations_%d", t);
34691 +               
34692 +               ctxt->trans_index[t].tbl   = t;
34693 +               ctxt->trans_index[t].index = 0;
34694 +               
34695 +               if ((p = create_proc_entry (name, 0, ctxt->procdir)) != NULL)
34696 +               {
34697 +                   p->proc_fops = &qsnet_ctxt_trans_fops;
34698 +                   p->data      = & ctxt->trans_index[t];
34699 +                   p->owner     = THIS_MODULE;
34700 +               }
34701 +           }
34702 +       }
34703 +    }
34704 +
34705 +    unlock_kernel();
34706 +}
34707 +
34708 +void 
34709 +proc_removectxt(ELAN4_DEV *dev, ELAN4_CTXT *ctxt)
34710 +{
34711 +    int  t;
34712 +    char name[32];
34713 +
34714 +    /* GNAT 7565: Need to hold kernel lock when adding/removing
34715 +     * procfs entries outside the module init/fini paths
34716 +     */
34717 +    lock_kernel();
34718 +
34719 +    if (dev->dev_osdep.ctxtdir && ctxt->procdir != NULL) 
34720 +    {
34721 +       for (t = 0; t < NUM_HASH_TABLES; t++) 
34722 +       {
34723 +           sprintf(name , "translations_%d", t);
34724 +           remove_proc_entry (name, ctxt->procdir);
34725 +       }
34726 +       
34727 +       sprintf(name,"%d", ctxt->ctxt_num);
34728 +       remove_proc_entry (name, dev->dev_osdep.ctxtdir);
34729 +    }
34730 +
34731 +    unlock_kernel();
34732 +}
34733 +
34734 +static struct device_info 
34735 +{
34736 +    char *name;
34737 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
34738 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
34739 +    unsigned minrev;
34740 +} device_info[] = {
34741 +    {"devinfo",     proc_read_devinfo,     NULL,                   0},
34742 +    {"position",    proc_read_position,    proc_write_position,    0},
34743 +    {"temp",        proc_read_temp,        NULL,                   1},
34744 +    {"eccerr",      proc_read_eccerr,      NULL,                   0},
34745 +    {"vpd",         proc_read_vpd,         NULL,                   0},
34746 +    {"linkportkey", proc_read_linkportkey, proc_write_linkportkey, 0},
34747 +};
34748 +
34749 +static int
34750 +proc_read_link_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34751 +{
34752 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34753 +    char      *p   = page;
34754 +
34755 +    p += sprintf (p, "%20s %ld\n", "link_errors", dev->dev_stats.s_link_errors);
34756 +    p += sprintf (p, "%20s %ld\n", "lock_errors", dev->dev_stats.s_lock_errors);
34757 +    p += sprintf (p, "%20s %ld\n", "deskew_errors", dev->dev_stats.s_deskew_errors);
34758 +    p += sprintf (p, "%20s %ld\n", "phase_errors", dev->dev_stats.s_phase_errors);
34759 +
34760 +    p += sprintf (p, "%20s %ld\n", "data_errors", dev->dev_stats.s_data_errors);
34761 +    p += sprintf (p, "%20s %ld\n", "fifo_overflow0", dev->dev_stats.s_fifo_overflow0);
34762 +    p += sprintf (p, "%20s %ld\n", "fifo_overflow1", dev->dev_stats.s_fifo_overflow1);
34763 +    p += sprintf (p, "%20s %ld\n", "mod45changed", dev->dev_stats.s_mod45changed);
34764 +    p += sprintf (p, "%20s %ld\n", "pack_not_seen", dev->dev_stats.s_pack_not_seen);
34765 +
34766 +    p += sprintf (p, "%20s %ld\n", "linkport_keyfail", dev->dev_stats.s_linkport_keyfail);
34767 +    p += sprintf (p, "%20s %ld\n", "eop_reset", dev->dev_stats.s_eop_reset);
34768 +    p += sprintf (p, "%20s %ld\n", "bad_length", dev->dev_stats.s_bad_length);
34769 +    p += sprintf (p, "%20s %ld\n", "crc_error", dev->dev_stats.s_crc_error);
34770 +    p += sprintf (p, "%20s %ld\n", "crc_bad", dev->dev_stats.s_crc_bad);
34771 +
34772 +    p += sprintf (p, "%20s %ld\n", "cproc_timeout", dev->dev_stats.s_cproc_timeout);
34773 +    p += sprintf (p, "%20s %ld\n", "dproc_timeout", dev->dev_stats.s_dproc_timeout);
34774 +
34775 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34776 +}
34777 +
34778 +static char *
34779 +proc_sprintf_bucket_stat (char *p, char *name, unsigned long *stats, int *buckets)
34780 +{
34781 +    int i;
34782 +
34783 +    p += sprintf (p, "%20s ", name);
34784 +
34785 +    for (i = 0; i < ELAN4_DEV_STATS_BUCKETS-1; i++)
34786 +       p += sprintf (p, "%ld(<=%d) ", stats[i], buckets[i]);
34787 +    p += sprintf (p, "%ld(>%d)\n", stats[i], buckets[i-1]);
34788 +
34789 +    return p;
34790 +}
34791 +
34792 +static int
34793 +proc_read_intr_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34794 +{
34795 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34796 +    char      *p   = page;
34797 +
34798 +    p += sprintf (p, "%20s %ld\n", "interrupts",       dev->dev_stats.s_interrupts);
34799 +    p += sprintf (p, "%20s %ld\n", "haltints",         dev->dev_stats.s_haltints);
34800 +
34801 +    p += sprintf (p, "%20s %ld\n", "mainint_punts",    dev->dev_stats.s_mainint_punts);
34802 +    p += sprintf (p, "%20s %ld\n", "mainint_rescheds", dev->dev_stats.s_mainint_rescheds);
34803 +
34804 +    p  = proc_sprintf_bucket_stat (p, "mainints", dev->dev_stats.s_mainints, MainIntBuckets);
34805 +
34806 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34807 +}
34808 +
34809 +static int
34810 +proc_read_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34811 +{
34812 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34813 +    char      *p   = page;
34814 +
34815 +    p += sprintf (p, "%20s %ld\n", "cproc_traps", dev->dev_stats.s_cproc_traps);
34816 +    p += sprintf (p, "%20s %ld\n", "dproc_traps", dev->dev_stats.s_dproc_traps);
34817 +    p += sprintf (p, "%20s %ld\n", "eproc_traps", dev->dev_stats.s_eproc_traps);
34818 +    p += sprintf (p, "%20s %ld\n", "iproc_traps", dev->dev_stats.s_iproc_traps);
34819 +    p += sprintf (p, "%20s %ld\n", "tproc_traps", dev->dev_stats.s_tproc_traps);
34820 +
34821 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34822 +}
34823 +
34824 +static int
34825 +proc_read_cproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34826 +{
34827 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34828 +    char      *p   = page;
34829 +    int        i;
34830 +    extern char *const CProcTrapNames[];
34831 +
34832 +    for (i = 0; i < sizeof (dev->dev_stats.s_cproc_trap_types)/sizeof(dev->dev_stats.s_cproc_trap_types[0]); i++)
34833 +       p += sprintf (p, "%-40s %ld\n", CProcTrapNames[i], dev->dev_stats.s_cproc_trap_types[i]);
34834 +
34835 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34836 +}
34837 +
34838 +static int
34839 +proc_read_dproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34840 +{
34841 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34842 +    char      *p   = page;
34843 +    int        i;
34844 +    extern char *const DProcTrapNames[];
34845 +
34846 +    for (i = 0; i < sizeof (dev->dev_stats.s_dproc_trap_types)/sizeof(dev->dev_stats.s_dproc_trap_types[0]); i++)
34847 +       p += sprintf (p, "%-40s %ld\n", DProcTrapNames[i], dev->dev_stats.s_dproc_trap_types[i]);
34848 +
34849 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34850 +}
34851 +
34852 +static int
34853 +proc_read_eproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34854 +{
34855 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34856 +    char      *p   = page;
34857 +    int        i;
34858 +    extern char *const EProcTrapNames[];
34859 +
34860 +    for (i = 0; i < sizeof (dev->dev_stats.s_eproc_trap_types)/sizeof(dev->dev_stats.s_eproc_trap_types[0]); i++)
34861 +       p += sprintf (p, "%-40s %ld\n", EProcTrapNames[i], dev->dev_stats.s_eproc_trap_types[i]);
34862 +
34863 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34864 +}
34865 +
34866 +static int
34867 +proc_read_iproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34868 +{
34869 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34870 +    char      *p   = page;
34871 +    int        i;
34872 +    extern char *const IProcTrapNames[];
34873 +
34874 +    for (i = 0; i < sizeof (dev->dev_stats.s_iproc_trap_types)/sizeof(dev->dev_stats.s_iproc_trap_types[0]); i++)
34875 +       p += sprintf (p, "%-40s %ld\n", IProcTrapNames[i], dev->dev_stats.s_iproc_trap_types[i]);
34876 +
34877 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34878 +}
34879 +
34880 +static int
34881 +proc_read_tproc_trap_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34882 +{
34883 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34884 +    char      *p   = page;
34885 +    int        i;
34886 +    extern char *const TProcTrapNames[];
34887 +
34888 +    for (i = 0; i < sizeof (dev->dev_stats.s_tproc_trap_types)/sizeof(dev->dev_stats.s_tproc_trap_types[0]); i++)
34889 +       p += sprintf (p, "%-40s %ld\n", TProcTrapNames[i], dev->dev_stats.s_tproc_trap_types[i]);
34890 +
34891 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34892 +}
34893 +
34894 +static int
34895 +proc_read_sdram_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34896 +{
34897 +    ELAN4_DEV *dev = (ELAN4_DEV *) data;
34898 +    char      *p   = page;
34899 +
34900 +    p += sprintf (p, "%20s %ld\n",  "correctable_errors", dev->dev_stats.s_correctable_errors);
34901 +    p += sprintf (p, "%20s %ld\n",  "multiple_errors",    dev->dev_stats.s_multiple_errors);
34902 +    p += sprintf (p, "%20s %ldK\n", "sdram_bytes_free",   dev->dev_stats.s_sdram_bytes_free/1024);
34903 +
34904 +    return (proc_calc_metrics (page, start, off, count, eof, p - page));
34905 +}
34906 +
34907 +void
34908 +elan4_ringbuf_store (ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev)
34909 +{
34910 +    int newend;
34911 +    
34912 +    ASSERT (kmutex_is_locked (&dev->dev_lock));
34913 +
34914 +    memcpy(&ringbuf->routes[ringbuf->end], route, sizeof(E4_VirtualProcessEntry));
34915 +    newend = ringbuf->end + 1;
34916 +    if (newend >= DEV_STASH_ROUTE_COUNT)
34917 +        newend -= DEV_STASH_ROUTE_COUNT;
34918 +    if (newend == ringbuf->start)
34919 +        ringbuf->start += 1;
34920 +    if (ringbuf->start >= DEV_STASH_ROUTE_COUNT)
34921 +        ringbuf->start -= DEV_STASH_ROUTE_COUNT;
34922 +    ringbuf->end = newend;
34923 +}
34924 +       
34925 +static int
34926 +proc_read_dproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
34927 +{
34928 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
34929 +       char      *p   = page;
34930 +       unsigned int *dproc_timeout;
34931 +
34932 +       dproc_timeout = dev->dev_dproc_timeout;
34933 +
34934 +       if (!dproc_timeout) 
34935 +               p += sprintf (p, "No stats available\n");
34936 +       else
34937 +       {
34938 +               int i;
34939 +
34940 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
34941 +                       if (dproc_timeout[i] != 0) 
34942 +                               p += sprintf (p, "Node %d: %u errors\n", i, dproc_timeout[i]);
34943 +       }
34944 +
34945 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
34946 +}
34947 +
34948 +static int
34949 +proc_read_dproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
34950 +{
34951 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
34952 +       char      *p   = page;
34953 +       ELAN4_ROUTE_RINGBUF *ringbuf;
34954 +       char      routestr[33];
34955 +
34956 +       ringbuf = &dev->dev_dproc_timeout_routes;
34957 +
34958 +       if (!ringbuf) 
34959 +               p += sprintf (p, "No stats available\n");
34960 +       else
34961 +       {
34962 +               int start;
34963 +               int end;
34964 +               int i;
34965 +
34966 +               memset(&routestr, 0, 33);
34967 +               
34968 +               kmutex_lock(&dev->dev_lock);
34969 +               
34970 +               start = ringbuf->start;
34971 +               end = ringbuf->end;
34972 +
34973 +               if (end < start)
34974 +                       end = DEV_STASH_ROUTE_COUNT;
34975 +
34976 +               for (i=start; i<end; i++) 
34977 +               {
34978 +                       elan4_route2str (&ringbuf->routes[i], routestr);
34979 +                       p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
34980 +               }
34981 +
34982 +               if (ringbuf->end < start)
34983 +               {
34984 +                       start = 0;
34985 +                       end = ringbuf->end;
34986 +                       for (i=start; i<end; i++)
34987 +                       {
34988 +                               elan4_route2str (&ringbuf->routes[i], routestr);
34989 +                               p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
34990 +                       }
34991 +               }
34992 +
34993 +               kmutex_unlock(&dev->dev_lock);
34994 +       }
34995 +
34996 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
34997 +}
34998 +
34999 +
35000 +static int
35001 +proc_read_cproc_timeout_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
35002 +{
35003 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
35004 +       char      *p   = page;
35005 +       unsigned int *cproc_timeout;
35006 +
35007 +       cproc_timeout = dev->dev_cproc_timeout;
35008 +
35009 +       if (!cproc_timeout) 
35010 +               p += sprintf (p, "No stats available\n");
35011 +       else
35012 +       {
35013 +               int i;
35014 +
35015 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
35016 +                       if (cproc_timeout[i] != 0) 
35017 +                               p += sprintf (p, "Node %d: %u errors\n", i, cproc_timeout[i]);
35018 +       }
35019 +
35020 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
35021 +}
35022 +
35023 +static int
35024 +proc_read_cproc_timeout_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
35025 +{
35026 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
35027 +       char      *p   = page;
35028 +       ELAN4_ROUTE_RINGBUF *ringbuf;
35029 +       char      routestr[33];
35030 +
35031 +       ringbuf = &dev->dev_cproc_timeout_routes;
35032 +
35033 +       if (!ringbuf) 
35034 +               p += sprintf (p, "No stats available\n");
35035 +       else
35036 +       {
35037 +               int start;
35038 +               int end;
35039 +               int i;
35040 +
35041 +               memset(&routestr, 0, 33);
35042 +
35043 +               kmutex_lock(&dev->dev_lock);
35044 +               
35045 +               start = ringbuf->start;
35046 +               end = ringbuf->end;
35047 +
35048 +               if (end < start)
35049 +                       end = DEV_STASH_ROUTE_COUNT;
35050 +
35051 +               for (i=start; i<end; i++) 
35052 +               {
35053 +                       elan4_route2str (&ringbuf->routes[i], routestr);
35054 +                       p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
35055 +               }
35056 +
35057 +               if (ringbuf->end < start)
35058 +               {
35059 +                       start = 0;
35060 +                       end = ringbuf->end;
35061 +                       for (i=start; i<end; i++)
35062 +                       {
35063 +                               elan4_route2str (&ringbuf->routes[i], routestr);
35064 +                               p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
35065 +                       }
35066 +               }
35067 +
35068 +               kmutex_unlock(&dev->dev_lock);
35069 +       }
35070 +
35071 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
35072 +}
35073 +
35074 +static int
35075 +proc_read_traperr_stats (char *page, char **start, off_t off, int count, int *eof, void *data)
35076 +{
35077 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
35078 +       char      *p   = page;
35079 +       unsigned int *ack_errors;
35080 +
35081 +       ack_errors = dev->dev_ack_errors;
35082 +
35083 +       if (!ack_errors) 
35084 +               p += sprintf (p, "No stats available\n");
35085 +       else
35086 +       {
35087 +               int i;
35088 +
35089 +               for (i=0; i<dev->dev_position.pos_nodes; i++) 
35090 +                       if (ack_errors[i] != 0) 
35091 +                               p += sprintf (p, "Node %d: %u errors\n", i, ack_errors[i]);
35092 +       }
35093 +
35094 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
35095 +}
35096 +
35097 +static int
35098 +proc_read_ackerror_routes (char *page, char **start, off_t off, int count, int *eof, void *data)
35099 +{
35100 +       ELAN4_DEV *dev = (ELAN4_DEV *) data;
35101 +       char      *p   = page;
35102 +       ELAN4_ROUTE_RINGBUF *ringbuf;
35103 +       char      routestr[33];
35104 +
35105 +       ringbuf = &dev->dev_ack_error_routes;
35106 +
35107 +       if (!ringbuf) 
35108 +               p += sprintf (p, "No stats available\n");
35109 +       else
35110 +       {
35111 +               int start;
35112 +               int end;
35113 +               int i;
35114 +
35115 +               memset(&routestr, 0, 33);
35116 +
35117 +               kmutex_lock(&dev->dev_lock);
35118 +               
35119 +               start = ringbuf->start;
35120 +               end = ringbuf->end;
35121 +
35122 +               if (end < start)
35123 +                       end = DEV_STASH_ROUTE_COUNT;
35124 +
35125 +               for (i=start; i<end; i++) 
35126 +               {
35127 +                       elan4_route2str (&ringbuf->routes[i], routestr);
35128 +                       p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
35129 +               }
35130 +
35131 +               if (ringbuf->end < start)
35132 +               {
35133 +                       start = 0;
35134 +                       end = ringbuf->end;
35135 +                       for (i=start; i<end; i++)
35136 +                       {
35137 +                               elan4_route2str (&ringbuf->routes[i], routestr);
35138 +                               p += sprintf (p, "Route %llx %llx->%s\n", (long long)ringbuf->routes[i].Values[0], (long long)ringbuf->routes[i].Values[1], routestr);
35139 +                       }
35140 +               }
35141 +
35142 +               kmutex_unlock(&dev->dev_lock);
35143 +       }
35144 +
35145 +       return (proc_calc_metrics (page, start, off, count, eof, p - page));
35146 +}
35147 +
35148 +static struct stats_info 
35149 +{
35150 +    char *name;
35151 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
35152 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); 
35153 +} stats_info[] = {
35154 +    {"link",     proc_read_link_stats, NULL},
35155 +    {"intr",     proc_read_intr_stats, NULL},
35156 +    {"trap",     proc_read_trap_stats, NULL},
35157 +    {"cproc",    proc_read_cproc_trap_stats, NULL},
35158 +    {"dproc",    proc_read_dproc_trap_stats, NULL},
35159 +    {"eproc",    proc_read_eproc_trap_stats, NULL},
35160 +    {"iproc",    proc_read_iproc_trap_stats, NULL},
35161 +    {"tproc",    proc_read_tproc_trap_stats, NULL},
35162 +    {"sdram",    proc_read_sdram_stats, NULL},
35163 +    {"trapdmaerr", proc_read_traperr_stats, NULL},
35164 +    {"dproctimeout", proc_read_dproc_timeout_stats, NULL},
35165 +    {"cproctimeout", proc_read_cproc_timeout_stats, NULL},
35166 +    {"dproctimeoutroutes", proc_read_dproc_timeout_routes, NULL},
35167 +    {"cproctimeoutroutes", proc_read_cproc_timeout_routes, NULL},
35168 +    {"ackerrroutes", proc_read_ackerror_routes, NULL},
35169 +};
35170 +
35171 +static int
35172 +proc_read_sysconfig (char *page, char **start, off_t off, int count, int *eof, void *data)
35173 +{
35174 +    ELAN4_DEV *dev        = (ELAN4_DEV *) data;
35175 +    E4_uint32  syscontrol = dev->dev_syscontrol;
35176 +    int               len       = 0;
35177 +
35178 +   *eof = 1;
35179 +   if (off != 0)
35180 +      return (0);
35181 +
35182 +    if (syscontrol & CONT_EN_ALL_SETS)
35183 +       len += sprintf (page + len, "%sEN_ALL_SETS", len == 0 ? "" : " ");
35184 +    if (syscontrol & CONT_MMU_ENABLE)
35185 +       len += sprintf (page + len, "%sMMU_ENABLE", len == 0 ? "" : " ");
35186 +    if (syscontrol & CONT_CACHE_HASH_TABLE)
35187 +       len += sprintf (page + len, "%sCACHE_HASH_TABLE", len == 0 ? "" : " ");
35188 +    if (syscontrol & CONT_CACHE_CHAINS)
35189 +       len += sprintf (page + len, "%sCACHE_CHAINS", len == 0 ? "" : " ");
35190 +    if (syscontrol & CONT_CACHE_ROOT_CNTX)
35191 +       len += sprintf (page + len, "%sCACHE_ROOT_CNTX", len == 0 ? "" : " ");
35192 +    if (syscontrol & CONT_CACHE_STEN_ROUTES)
35193 +       len += sprintf (page + len, "%sCACHE_STEN_ROUTES", len == 0 ? "" : " ");
35194 +    if (syscontrol & CONT_CACHE_DMA_ROUTES)
35195 +       len += sprintf (page + len, "%sCACHE_DMA_ROUTES", len == 0 ? "" : " ");
35196 +    if (syscontrol & CONT_INHIBIT_MAX_CHAIN_ITEMS)
35197 +       len += sprintf (page + len, "%sINHIBIT_MAX_CHAIN_ITEMS", len == 0 ? "" : " ");
35198 +
35199 +    len += sprintf (page + len, "%sTABLE0_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_MASK_SIZE_SHIFT) & PAGE_MASK_MASK);
35200 +    len += sprintf (page + len, "%sTABLE0_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE0_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK);
35201 +    len += sprintf (page + len, "%sTABLE1_MASK_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_MASK_SIZE_SHIFT) & PAGE_MASK_MASK);
35202 +    len += sprintf (page + len, "%sTABLE1_PAGE_SIZE=%d", len == 0 ? "" : " ", (syscontrol >> CONT_TABLE1_PAGE_SIZE_SHIFT) & PAGE_SIZE_MASK);
35203 +
35204 +    if (syscontrol & CONT_2K_NOT_1K_DMA_PACKETS)
35205 +       len += sprintf (page + len, "%s2K_NOT_1K_DMA_PACKETS", len == 0 ? "" : " ");
35206 +    if (syscontrol & CONT_ALIGN_ALL_DMA_PACKETS)
35207 +       len += sprintf (page + len, "%sALIGN_ALL_DMA_PACKETS", len == 0 ? "" : " ");
35208 +    if (syscontrol & CONT_DIRECT_MAP_PCI_WRITES)
35209 +       len += sprintf (page + len, "%sDIRECT_MAP_PCI_WRITES", len == 0 ? "" : " ");
35210 +
35211 +    len += sprintf (page + len, "\n");
35212 +
35213 +   *start = page;
35214 +   return (len);
35215 +}
35216 +
35217 +static int
35218 +proc_write_sysconfig (struct file *file, const char *ubuffer, unsigned long count, void *data)
35219 +{
35220 +    ELAN4_DEV *dev       = (ELAN4_DEV *) data;
35221 +    unsigned long page   = __get_free_page (GFP_KERNEL);
35222 +    char         *buffer = (char *)page;
35223 +    int            add   = 0;
35224 +    int            sub   = 0;
35225 +    
35226 +    count = MIN (count, PAGE_SIZE - 1);
35227 +    if (copy_from_user (buffer, ubuffer, count))
35228 +    {
35229 +       free_page (page);
35230 +       return (-EFAULT);
35231 +    }
35232 +   
35233 +    buffer[count] = 0;                         /* terminate string */
35234 +
35235 +    while (*buffer != 0)
35236 +    {
35237 +       char *ptr;
35238 +       char *end;
35239 +       int   ch;
35240 +       int   val;
35241 +       int   op;
35242 +
35243 +       ch = *buffer;
35244 +       if (ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n')
35245 +       {
35246 +           buffer++;
35247 +           continue;
35248 +       }
35249 +      
35250 +       op = *buffer;
35251 +       if (op == '+' || op == '-')
35252 +           buffer++;
35253 +
35254 +       for (end = buffer; *end != 0; end++)
35255 +           if (*end == ' ' || *end == '\t' ||
35256 +               *end == '\r' || *end == '\n')
35257 +               break;
35258 +      
35259 +       if (end == buffer)
35260 +           break;
35261 +      
35262 +       ch = *end;
35263 +       *end = 0;
35264 +
35265 +       for (ptr = buffer; *ptr != 0; ptr++)
35266 +           if ('a' <= *ptr && *ptr <= 'z')
35267 +               *ptr = *ptr + 'A' - 'a';
35268 +      
35269 +       if (!strcmp (buffer, "EN_ALL_SETS"))
35270 +           val = CONT_EN_ALL_SETS;
35271 +       if (!strcmp (buffer, "CACHE_HASH_TABLE"))
35272 +           val = CONT_CACHE_HASH_TABLE;
35273 +       else if (!strcmp (buffer, "CACHE_CHAINS"))
35274 +           val = CONT_CACHE_CHAINS;
35275 +       else if (!strcmp (buffer, "CACHE_ROOT_CNTX"))
35276 +           val = CONT_CACHE_ROOT_CNTX;
35277 +       else if (!strcmp (buffer, "CACHE_STEN_ROUTES"))
35278 +           val = CONT_CACHE_STEN_ROUTES;
35279 +       else if (!strcmp (buffer, "CACHE_DMA_ROUTES"))
35280 +           val = CONT_CACHE_DMA_ROUTES;
35281 +       else if (!strcmp (buffer, "2K_NOT_1K_DMA_PACKETS"))
35282 +           val = CONT_2K_NOT_1K_DMA_PACKETS;
35283 +       else if (!strcmp (buffer, "ALIGN_ALL_DMA_PACKETS"))
35284 +           val = CONT_ALIGN_ALL_DMA_PACKETS;
35285 +       else
35286 +           val = 0;
35287 +
35288 +       if (op == '+')
35289 +           add |= val;
35290 +       else if (op == '-')
35291 +           sub |= val;
35292 +
35293 +       *end = ch;
35294 +       buffer = end;
35295 +    }
35296 +
35297 +    if ((add | sub) & CONT_EN_ALL_SETS)
35298 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
35299 +
35300 +    CHANGE_SYSCONTROL (dev, add, sub);
35301 +   
35302 +    if ((add | sub) & CONT_EN_ALL_SETS)
35303 +       elan4_sdram_flushcache (dev, 0, E4_CacheSize);
35304 +
35305 +    free_page (page);
35306 +    return (count);
35307 +}
35308 +
35309 +static struct config_info 
35310 +{
35311 +    char *name;
35312 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
35313 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data); 
35314 +} config_info[] = {
35315 +    {"sysconfig",  proc_read_sysconfig, proc_write_sysconfig},
35316 +};
35317 +
35318 +static int
35319 +elan4_trans_open (struct inode *inode, struct file *file)
35320 +{
35321 +    ELAN4_TRANS_INDEX   *trans  = (ELAN4_TRANS_INDEX *)( PDE(inode)->data );
35322 +    ELAN4_TRANS_PRIVATE *pr;
35323 +
35324 +    if ((pr = kmalloc (sizeof (ELAN4_TRANS_PRIVATE), GFP_KERNEL)) == NULL)
35325 +       return (-ENOMEM);
35326 +    
35327 +    pr->pr_tbl   = trans->tbl;
35328 +    pr->pr_dev   = list_entry(trans, ELAN4_DEV, trans_index[trans->tbl] );
35329 +    pr->pr_index = 0;
35330 +
35331 +    pr->pr_len     = 0;
35332 +    pr->pr_off     = 0;
35333 +    pr->pr_changed = 1;
35334 +    pr->pr_page    = NULL;
35335 +
35336 +    file->private_data = (void *) pr;
35337 +
35338 +    MOD_INC_USE_COUNT;
35339 +    return (0);
35340 +}
35341 +
35342 +static ssize_t
35343 +elan4_trans_read (struct file *file, char *buf, size_t count, loff_t *ppos)
35344 +{
35345 +    ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data;
35346 +    ELAN4_DEV           *dev = pr->pr_dev;
35347 +    int                  error;
35348 +
35349 +    if ( pr->pr_index >= dev->dev_hashsize[pr->pr_tbl] ) 
35350 +       return (0);
35351 +
35352 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
35353 +       return (error);
35354 +
35355 +    if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
35356 +       return (-ENOMEM);
35357 +
35358 +    if (pr->pr_off >= pr->pr_len)
35359 +    {
35360 +       if (elan4mmu_display_mmuhash(dev, pr->pr_tbl, &pr->pr_index, pr->pr_page, count)) 
35361 +           pr->pr_len     = strlen (pr->pr_page);
35362 +       else
35363 +           pr->pr_len     = 0; 
35364 +
35365 +       pr->pr_off     = 0;
35366 +       pr->pr_changed = 0;
35367 +       pr->pr_index++; 
35368 +    }
35369 +
35370 +    if (count >= (pr->pr_len - pr->pr_off))
35371 +       count = pr->pr_len - pr->pr_off;
35372 +
35373 +    copy_to_user (buf, pr->pr_page + pr->pr_off, count);
35374 +
35375 +    pr->pr_off += count;
35376 +    *ppos      += count;
35377 +
35378 +    if (pr->pr_off >= pr->pr_len)
35379 +    {
35380 +       free_page ((unsigned long) pr->pr_page);
35381 +       pr->pr_page = NULL;
35382 +    }
35383 +
35384 +    return (count);
35385 +}
35386 +
35387 +static int
35388 +elan4_trans_release (struct inode *inode, struct file *file)
35389 +{
35390 +    ELAN4_TRANS_PRIVATE *pr = (ELAN4_TRANS_PRIVATE *) file->private_data;
35391 +
35392 +    if (pr->pr_page)
35393 +       free_page ((unsigned long) pr->pr_page);
35394 +    kfree (pr);
35395 +    
35396 +    MOD_DEC_USE_COUNT;
35397 +    return (0);
35398 +}
35399 +
35400 +static struct file_operations qsnet_trans_fops = 
35401 +{
35402 +       open:    elan4_trans_open,
35403 +       release: elan4_trans_release,
35404 +       read:    elan4_trans_read,
35405 +};
35406 +
35407 +void
35408 +elan4_procfs_device_init (ELAN4_DEV *dev)
35409 +{
35410 +    struct proc_dir_entry *p;
35411 +    char name[NAME_MAX];
35412 +    int i;
35413 +
35414 +    sprintf (name, "device%d", dev->dev_instance);
35415 +    dev->dev_osdep.procdir  = proc_mkdir (name, elan4_procfs_root);
35416 +
35417 +    for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++)
35418 +    {
35419 +       if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev)
35420 +           continue;
35421 +
35422 +       if ((p = create_proc_entry (device_info[i].name, 0, dev->dev_osdep.procdir)) != NULL)
35423 +       {
35424 +           p->read_proc  = device_info[i].read_func;
35425 +           p->write_proc = device_info[i].write_func;
35426 +           p->data       = dev;
35427 +           p->owner      = THIS_MODULE;
35428 +       }
35429 +    }
35430 +
35431 +    for(i = 0; i < NUM_HASH_TABLES; i++) {
35432 +       sprintf (name, "translations_%d",i);
35433 +
35434 +       dev->trans_index[i].tbl   = i;
35435 +
35436 +       if ((p = create_proc_entry (name, 0, dev->dev_osdep.procdir)) != NULL)
35437 +       {
35438 +           p->proc_fops = &qsnet_trans_fops;
35439 +           p->data      = & dev->trans_index[i];
35440 +           p->owner     = THIS_MODULE;
35441 +       }
35442 +    }
35443 +
35444 +    dev->dev_osdep.configdir = proc_mkdir ("config", dev->dev_osdep.procdir);
35445 +    for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++)
35446 +    {
35447 +       if ((p = create_proc_entry (config_info[i].name, 0, dev->dev_osdep.configdir)) != NULL)
35448 +       {
35449 +           p->read_proc  = config_info[i].read_func;
35450 +           p->write_proc = config_info[i].write_func;
35451 +           p->data       = dev;
35452 +           p->owner      = THIS_MODULE;
35453 +       }
35454 +    }
35455 +
35456 +    dev->dev_osdep.statsdir = proc_mkdir ("stats", dev->dev_osdep.procdir);
35457 +    for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++)
35458 +    {
35459 +       if ((p = create_proc_entry (stats_info[i].name, 0, dev->dev_osdep.statsdir)) != NULL)
35460 +       {
35461 +           p->read_proc  = stats_info[i].read_func;
35462 +           p->write_proc = stats_info[i].write_func;
35463 +           p->data       = dev;
35464 +           p->owner      = THIS_MODULE;
35465 +       }
35466 +    }
35467 +    for(i = 0; i < NUM_HASH_TABLES; i++) {
35468 +       sprintf (name, "translations_%d",i);
35469 +
35470 +       dev->trans_stats[i].tbl   = i;
35471 +       dev->trans_stats[i].buckets[0] = 1;
35472 +       dev->trans_stats[i].buckets[1] = 5;
35473 +       dev->trans_stats[i].buckets[2] = 10;
35474 +       dev->trans_stats[i].buckets[3] = 50;
35475 +       dev->trans_stats[i].buckets[4] = 100;
35476 +       dev->trans_stats[i].buckets[5] = 200;
35477 +       dev->trans_stats[i].buckets[6] = 99999999;
35478 +
35479 +       if ((p = create_proc_entry (name, 0, dev->dev_osdep.statsdir)) != NULL)
35480 +       {
35481 +           p->read_proc  = proc_read_stats_translations;
35482 +           p->write_proc = proc_write_stats_translations;
35483 +           p->data       = & dev->trans_stats[i];
35484 +           p->owner      = THIS_MODULE;
35485 +       }
35486 +    }
35487 +
35488 +    dev->dev_osdep.ctxtdir = proc_mkdir ("ctxt", dev->dev_osdep.procdir);
35489 +}
35490 +
35491 +void
35492 +elan4_procfs_device_fini (ELAN4_DEV *dev)
35493 +{
35494 +    char name[NAME_MAX];
35495 +    int i;
35496 +
35497 +    if (dev->dev_osdep.ctxtdir)
35498 +       remove_proc_entry ("ctxt", dev->dev_osdep.procdir);
35499 +
35500 +    for (i = 0; i < sizeof (stats_info)/sizeof (stats_info[0]); i++)
35501 +       remove_proc_entry (stats_info[i].name, dev->dev_osdep.statsdir);
35502 +
35503 +    for (i = 0; i < NUM_HASH_TABLES; i++) {
35504 +       sprintf(name , "translations_%d", i);
35505 +       remove_proc_entry (name, dev->dev_osdep.statsdir);
35506 +    }
35507 +    remove_proc_entry ("stats", dev->dev_osdep.procdir);
35508 +
35509 +    for (i = 0; i < sizeof (config_info)/sizeof (config_info[0]); i++)
35510 +       remove_proc_entry (config_info[i].name, dev->dev_osdep.configdir);
35511 +    remove_proc_entry ("config", dev->dev_osdep.procdir);
35512 +
35513 +    for (i = 0; i < sizeof (device_info)/sizeof (device_info[0]); i++)
35514 +    {
35515 +       if (dev->dev_devinfo.dev_revision_id < device_info[i].minrev)
35516 +           continue;
35517 +       
35518 +       remove_proc_entry (device_info[i].name, dev->dev_osdep.procdir);
35519 +    }
35520 +    
35521 +    for (i = 0; i < NUM_HASH_TABLES; i++) {
35522 +       sprintf(name , "translations_%d", i);
35523 +       remove_proc_entry (name, dev->dev_osdep.procdir);
35524 +    }
35525 +
35526 +    sprintf (name, "device%d", dev->dev_instance);
35527 +    remove_proc_entry (name, elan4_procfs_root);
35528 +}
35529 +
35530 +void
35531 +elan4_procfs_init(void)
35532 +{
35533 +    struct proc_dir_entry *p;
35534 +    
35535 +    elan4_procfs_root = proc_mkdir("elan4", qsnet_procfs_root);
35536 +    elan4_config_root = proc_mkdir("config", elan4_procfs_root);
35537 +
35538 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug",              &elan4_debug,              0);
35539 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug_toconsole",    &elan4_debug_toconsole,    0);
35540 +    qsnet_proc_register_hex (elan4_config_root, "elan4_debug_tobuffer",     &elan4_debug_tobuffer,     0);
35541 +    qsnet_proc_register_int (elan4_config_root, "elan4_debug_mmu",          &elan4_debug_mmu,          0);
35542 +    qsnet_proc_register_int (elan4_config_root, "elan4_mainint_punt_loops", &elan4_mainint_punt_loops, 0);
35543 +    qsnet_proc_register_hex (elan4_config_root, "user_p2p_route_options",   &user_p2p_route_options,   0);
35544 +    qsnet_proc_register_hex (elan4_config_root, "user_bcast_route_options", &user_bcast_route_options, 0);
35545 +    qsnet_proc_register_int (elan4_config_root, "user_dproc_retry_count",   &user_dproc_retry_count,   0);
35546 +    qsnet_proc_register_int (elan4_config_root, "user_cproc_retry_count",   &user_cproc_retry_count,   0);
35547 +    qsnet_proc_register_int (elan4_config_root, "user_pagefault_enabled",   &user_pagefault_enabled,   0);
35548 +    qsnet_proc_register_int (elan4_config_root, "num_fault_save",           &num_fault_save,           0);
35549 +    qsnet_proc_register_int (elan4_config_root, "min_fault_pages",          &min_fault_pages,          0);
35550 +    qsnet_proc_register_int (elan4_config_root, "max_fault_pages",          &max_fault_pages,          0);
35551 +    qsnet_proc_register_int (elan4_config_root, "assfail_mode",             &assfail_mode,             0);
35552 +   
35553 +    if ((p = create_proc_entry ("mmuhash_reduction", 0, elan4_config_root)) != NULL)
35554 +    {
35555 +       p->read_proc  = elan4_read_mmuhash_reduction_func;
35556 +       p->write_proc = elan4_write_mmuhash_reduction_func;
35557 +       p->data       = NULL;
35558 +       p->owner      = THIS_MODULE;
35559 +    } 
35560 +
35561 +#if defined(IOPROC_PATCH_APPLIED)
35562 +    qsnet_proc_register_int (elan4_config_root, "user_ioproc_enabled",      &user_ioproc_enabled,       0);
35563 +#endif
35564 +}
35565 +
35566 +void
35567 +elan4_procfs_fini(void)
35568 +{
35569 +#if defined(IOPROC_PATCH_APPLIED)
35570 +    remove_proc_entry ("user_ioproc_enabled",      elan4_config_root);
35571 +#endif
35572 +
35573 +    remove_proc_entry ("mmuhash_reduction",        elan4_config_root);
35574 +
35575 +    remove_proc_entry ("assfail_mode",             elan4_config_root);
35576 +    remove_proc_entry ("max_fault_pages",          elan4_config_root);
35577 +    remove_proc_entry ("min_fault_pages",          elan4_config_root);
35578 +    remove_proc_entry ("num_fault_save",           elan4_config_root);
35579 +    remove_proc_entry ("user_pagefault_enabled",   elan4_config_root);
35580 +    remove_proc_entry ("user_cproc_retry_count",   elan4_config_root);
35581 +    remove_proc_entry ("user_dproc_retry_count",   elan4_config_root);
35582 +    remove_proc_entry ("user_bcast_route_options", elan4_config_root);
35583 +    remove_proc_entry ("user_p2p_route_options",   elan4_config_root);
35584 +    remove_proc_entry ("elan4_mainint_punt_loops", elan4_config_root);
35585 +    remove_proc_entry ("elan4_debug_mmu",          elan4_config_root);
35586 +    remove_proc_entry ("elan4_debug_tobuffer",     elan4_config_root);
35587 +    remove_proc_entry ("elan4_debug_toconsole",    elan4_config_root);
35588 +    remove_proc_entry ("elan4_debug",              elan4_config_root);
35589 +
35590 +    remove_proc_entry ("config", elan4_procfs_root);
35591 +    remove_proc_entry ("elan4", qsnet_procfs_root);
35592 +}
35593 +
35594 +EXPORT_SYMBOL(elan4_procfs_root);
35595 +EXPORT_SYMBOL(elan4_config_root);
35596 +
35597 +/*
35598 + * Local variables:
35599 + * c-file-style: "stroustrup"
35600 + * End:
35601 + */
35602 diff -urN clean/drivers/net/qsnet/elan4/quadrics_version.h linux-2.6.9/drivers/net/qsnet/elan4/quadrics_version.h
35603 --- clean/drivers/net/qsnet/elan4/quadrics_version.h    1969-12-31 19:00:00.000000000 -0500
35604 +++ linux-2.6.9/drivers/net/qsnet/elan4/quadrics_version.h      2005-09-07 10:39:49.000000000 -0400
35605 @@ -0,0 +1 @@
35606 +#define QUADRICS_VERSION "5.11.3qsnet"
35607 diff -urN clean/drivers/net/qsnet/elan4/regions.c linux-2.6.9/drivers/net/qsnet/elan4/regions.c
35608 --- clean/drivers/net/qsnet/elan4/regions.c     1969-12-31 19:00:00.000000000 -0500
35609 +++ linux-2.6.9/drivers/net/qsnet/elan4/regions.c       2004-10-21 11:31:12.000000000 -0400
35610 @@ -0,0 +1,609 @@
35611 +/*
35612 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
35613 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
35614 + * 
35615 + *    For licensing information please see the supplied COPYING file
35616 + *
35617 + */
35618 +
35619 +#ident "@(#)$Id: regions.c,v 1.22 2004/10/21 15:31:12 david Exp $"
35620 +/*      $Source: /cvs/master/quadrics/elan4mod/regions.c,v $*/
35621 +
35622 +#include <qsnet/kernel.h>
35623 +
35624 +#include <elan4/debug.h>
35625 +#include <elan4/device.h>
35626 +#include <elan4/user.h>
35627 +
35628 +/*================================================================================*/
35629 +/* elan address region management */
35630 +USER_RGN *
35631 +user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail)
35632 +{
35633 +    USER_RGN *rgn;
35634 +    USER_RGN *hirgn;
35635 +    USER_RGN *lorgn;
35636 +    E4_Addr   base;
35637 +    E4_Addr   lastaddr;
35638 +    int              forward;
35639 +    
35640 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex));
35641 +
35642 +    if (uctx->uctx_ergns == NULL)
35643 +       return (NULL);
35644 +
35645 +    rgn = uctx->uctx_ergnlast;
35646 +    if (rgn == NULL)
35647 +       rgn = uctx->uctx_ergns;
35648 +
35649 +    forward = 0;
35650 +    if ((base = rgn->rgn_ebase) < addr)
35651 +    {
35652 +       if (addr <= (base + rgn->rgn_len - 1))
35653 +           return (rgn);                                       /* ergnlast contained addr */
35654 +       
35655 +       hirgn = uctx->uctx_etail;
35656 +       
35657 +       if ((lastaddr = (hirgn->rgn_ebase + hirgn->rgn_len - 1)) < addr)
35658 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
35659 +       
35660 +       if ((addr - base) > (lastaddr - addr))
35661 +           rgn = hirgn;
35662 +       else
35663 +       {
35664 +           rgn = rgn->rgn_enext;
35665 +           forward++;
35666 +       }
35667 +    }
35668 +    else
35669 +    {
35670 +       lorgn = uctx->uctx_ergns;
35671 +
35672 +       if (lorgn->rgn_ebase > addr)
35673 +           return (lorgn);                                     /* lowest regions is higher than addr */
35674 +       if ((addr - lorgn->rgn_ebase) < (base - addr))
35675 +       {
35676 +           rgn = lorgn;                                        /* search forward from head */
35677 +           forward++;
35678 +       }
35679 +    }
35680 +    if (forward)
35681 +    {
35682 +       while ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr)
35683 +           rgn = rgn->rgn_enext;
35684 +
35685 +       if (rgn->rgn_ebase <= addr)
35686 +           uctx->uctx_ergnlast = rgn;
35687 +       return (rgn);
35688 +    }
35689 +    else
35690 +    {
35691 +       while (rgn->rgn_ebase > addr)
35692 +           rgn = rgn->rgn_eprev;
35693 +
35694 +       if ((rgn->rgn_ebase + rgn->rgn_len - 1) < addr)
35695 +           return (rgn->rgn_enext);
35696 +       else
35697 +       {
35698 +           uctx->uctx_ergnlast = rgn;
35699 +           return (rgn);
35700 +       }
35701 +    }
35702 +}
35703 +
35704 +static int
35705 +user_addrgn_elan (USER_CTXT *uctx, USER_RGN  *nrgn)
35706 +{
35707 +    USER_RGN *rgn   = user_findrgn_elan (uctx, nrgn->rgn_ebase, 1);
35708 +    E4_Addr   nbase = nrgn->rgn_ebase;
35709 +    E4_Addr   ntop  = nbase + nrgn->rgn_len - 1;
35710 +    E4_Addr   base;
35711 +
35712 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
35713 +
35714 +    if (rgn == NULL)
35715 +    {
35716 +       uctx->uctx_ergns = uctx->uctx_etail = nrgn;
35717 +       nrgn->rgn_enext = nrgn->rgn_eprev = NULL;
35718 +    }
35719 +    else
35720 +    {
35721 +       base = rgn->rgn_ebase;
35722 +       
35723 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
35724 +       {                                                       /* so insert after region (and hence at end */
35725 +           nrgn->rgn_eprev = rgn;                              /* of list */
35726 +           nrgn->rgn_enext = NULL;
35727 +           rgn->rgn_enext = uctx->uctx_etail = nrgn;
35728 +       }
35729 +       else
35730 +       {
35731 +           if (nbase >= base || ntop >= base)                  /* overlapping region */
35732 +               return (-1);
35733 +           
35734 +           nrgn->rgn_enext = rgn;                              /* insert before region */
35735 +           nrgn->rgn_eprev = rgn->rgn_eprev;
35736 +           rgn->rgn_eprev  = nrgn;
35737 +           if (uctx->uctx_ergns == rgn)
35738 +               uctx->uctx_ergns = nrgn;
35739 +           else
35740 +               nrgn->rgn_eprev->rgn_enext = nrgn;
35741 +       }
35742 +    }
35743 +    uctx->uctx_ergnlast = nrgn;
35744 +    
35745 +    return (0);
35746 +}
35747 +
35748 +static USER_RGN *
35749 +user_removergn_elan (USER_CTXT *uctx, USER_RGN  *rgn)
35750 +{
35751 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
35752 +
35753 +    uctx->uctx_ergnlast = rgn->rgn_enext;
35754 +    if (rgn == uctx->uctx_etail)
35755 +       uctx->uctx_etail = rgn->rgn_eprev;
35756 +    else
35757 +       rgn->rgn_enext->rgn_eprev = rgn->rgn_eprev;
35758 +    
35759 +    if (rgn == uctx->uctx_ergns)
35760 +       uctx->uctx_ergns = rgn->rgn_enext;
35761 +    else
35762 +       rgn->rgn_eprev->rgn_enext = rgn->rgn_enext;
35763 +
35764 +    return (rgn);
35765 +}
35766 +
35767 +USER_RGN *
35768 +user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr)
35769 +{
35770 +    USER_RGN *rgn = user_findrgn_elan (uctx, addr, 0);
35771 +
35772 +    if (rgn != NULL && rgn->rgn_ebase <= addr && addr <= (rgn->rgn_ebase + rgn->rgn_len - 1))
35773 +       return (rgn);
35774 +
35775 +    return (NULL);
35776 +}
35777 +
35778 +/* main address region management */
35779 +USER_RGN *
35780 +user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail)
35781 +{
35782 +    USER_RGN  *rgn;
35783 +    USER_RGN  *hirgn;
35784 +    USER_RGN  *lorgn;
35785 +    virtaddr_t lastaddr;
35786 +    virtaddr_t base;
35787 +    int               forward;
35788 +    
35789 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) || kmutex_is_locked (&uctx->uctx_rgnmutex));
35790 +
35791 +    if (uctx->uctx_mrgns == NULL)
35792 +       return (NULL);
35793 +    
35794 +    rgn = uctx->uctx_mrgnlast;
35795 +    if (rgn == NULL)
35796 +       rgn = uctx->uctx_mrgns;
35797 +    
35798 +    forward = 0;
35799 +    if ((base = rgn->rgn_mbase) < addr)
35800 +    {
35801 +       if (addr <= (base + rgn->rgn_len - 1))
35802 +           return (rgn);                                       /* ergnlast contained addr */
35803 +       
35804 +       hirgn = uctx->uctx_mtail;
35805 +       if ((lastaddr = hirgn->rgn_mbase + hirgn->rgn_len - 1) < addr)
35806 +           return (tail ? hirgn : NULL);                       /* addr is out of range */
35807 +       
35808 +       if ((addr - base) > (lastaddr - addr))
35809 +           rgn = hirgn;
35810 +       else
35811 +       {
35812 +           rgn = rgn->rgn_mnext;
35813 +           forward++;
35814 +       }
35815 +    }
35816 +    else
35817 +    {
35818 +       lorgn = uctx->uctx_mrgns;
35819 +       if (lorgn->rgn_mbase > addr)
35820 +           return (lorgn);                                     /* lowest regions is higher than addr */
35821 +       if ((addr - lorgn->rgn_mbase) < (base - addr))
35822 +       {
35823 +           rgn = lorgn;                                        /* search forward from head */
35824 +           forward++;
35825 +       }
35826 +    }
35827 +    if (forward)
35828 +    {
35829 +       while ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
35830 +           rgn = rgn->rgn_mnext;
35831 +
35832 +       if (rgn->rgn_mbase <= addr)
35833 +           uctx->uctx_mrgnlast = rgn;
35834 +       return (rgn);
35835 +    }
35836 +    else
35837 +    {
35838 +       while (rgn->rgn_mbase > addr)
35839 +           rgn = rgn->rgn_mprev;
35840 +
35841 +       if ((rgn->rgn_mbase + rgn->rgn_len - 1) < addr)
35842 +           return (rgn->rgn_mnext);
35843 +       else
35844 +       {
35845 +           uctx->uctx_mrgnlast = rgn;
35846 +           return (rgn);
35847 +       }
35848 +    }
35849 +}
35850 +
35851 +static int
35852 +user_addrgn_main (USER_CTXT *uctx, USER_RGN *nrgn)
35853 +{
35854 +    USER_RGN  *rgn   = user_findrgn_main (uctx, nrgn->rgn_mbase, 1);
35855 +    virtaddr_t nbase = nrgn->rgn_mbase;
35856 +    virtaddr_t ntop  = nbase + nrgn->rgn_len - 1;
35857 +    virtaddr_t base;
35858 +
35859 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
35860 +
35861 +    if (rgn == NULL)
35862 +    {
35863 +       uctx->uctx_mrgns = uctx->uctx_mtail = nrgn;
35864 +       nrgn->rgn_mnext = nrgn->rgn_mprev = NULL;
35865 +    }
35866 +    else
35867 +    {
35868 +       base = rgn->rgn_mbase;
35869 +
35870 +       if ((base + rgn->rgn_len - 1) < nbase)                  /* top of region below requested address */
35871 +       {                                                       /* so insert after region (and hence at end */
35872 +           nrgn->rgn_mprev = rgn;                              /* of list */
35873 +           nrgn->rgn_mnext = NULL;
35874 +           rgn->rgn_mnext = uctx->uctx_mtail = nrgn;
35875 +       }
35876 +       else
35877 +       {
35878 +           if (nbase >= base || ntop >= base)                  /* overlapping region */
35879 +               return (-1);
35880 +
35881 +           nrgn->rgn_mnext = rgn;                              /* insert before region */
35882 +           nrgn->rgn_mprev = rgn->rgn_mprev;
35883 +           rgn->rgn_mprev  = nrgn;
35884 +           if (uctx->uctx_mrgns == rgn)
35885 +               uctx->uctx_mrgns = nrgn;
35886 +           else
35887 +               nrgn->rgn_mprev->rgn_mnext = nrgn;
35888 +       }
35889 +    }
35890 +    uctx->uctx_mrgnlast = nrgn;
35891 +    
35892 +    return (0);
35893 +}
35894 +
35895 +static USER_RGN *
35896 +user_removergn_main (USER_CTXT *uctx, USER_RGN *rgn)
35897 +{
35898 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_rgnlock) && kmutex_is_locked (&uctx->uctx_rgnmutex));
35899 +
35900 +    uctx->uctx_mrgnlast = rgn->rgn_mnext;
35901 +    if (rgn == uctx->uctx_mtail)
35902 +       uctx->uctx_mtail = rgn->rgn_mprev;
35903 +    else
35904 +       rgn->rgn_mnext->rgn_mprev = rgn->rgn_mprev;
35905 +    
35906 +    if (rgn == uctx->uctx_mrgns)
35907 +       uctx->uctx_mrgns = rgn->rgn_mnext;
35908 +    else
35909 +       rgn->rgn_mprev->rgn_mnext = rgn->rgn_mnext;
35910 +
35911 +    return (rgn);
35912 +}
35913 +
35914 +/* Remove whole region from both lists */
35915 +static void
35916 +user_removergn (USER_CTXT *uctx, USER_RGN *rgn)
35917 +{
35918 +    spin_lock (&uctx->uctx_rgnlock);
35919 +
35920 +    elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, rgn->rgn_len);
35921 +           
35922 +    user_removergn_elan (uctx, rgn);
35923 +    user_removergn_main (uctx, rgn);
35924 +    
35925 +    spin_unlock (&uctx->uctx_rgnlock);
35926 +    
35927 +    KMEM_FREE (rgn, sizeof (USER_RGN));
35928 +}
35929 +
35930 +/* Remove all allocated regions */
35931 +void
35932 +user_freergns (USER_CTXT *uctx)
35933 +{
35934 +    kmutex_lock (&uctx->uctx_rgnmutex);
35935 +
35936 +    while (uctx->uctx_mrgns)
35937 +       user_removergn(uctx, uctx->uctx_mrgns);
35938 +
35939 +    kmutex_unlock (&uctx->uctx_rgnmutex);
35940 +
35941 +    ASSERT (uctx->uctx_ergns == NULL);
35942 +}
35943 +
35944 +USER_RGN *
35945 +user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr)
35946 +{
35947 +    USER_RGN *rgn = user_findrgn_main (uctx, addr, 0);
35948 +    
35949 +    if (rgn != NULL && rgn->rgn_mbase <= addr && addr <= (rgn->rgn_mbase + rgn->rgn_len - 1))
35950 +       return (rgn);
35951 +    return (NULL);
35952 +}
35953 +
35954 +int
35955 +user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm)
35956 +{
35957 +    USER_RGN *nrgn;
35958 +
35959 +    PRINTF4 (uctx, DBG_PERM, "user_setperm: user %lx elan %llx len %lx perm %x\n", maddr, (long long) eaddr, len, perm);
35960 +
35961 +    if ((maddr & PAGEOFFSET) || (eaddr & PAGEOFFSET) || (len & PAGEOFFSET))
35962 +    {
35963 +        PRINTF0 (uctx, DBG_PERM, "user_setperm:  alignment failure\n");
35964 +       return (-EINVAL);
35965 +    }
35966 +    
35967 +    if ((maddr + len - 1) <= maddr || (eaddr + len - 1) <= eaddr) 
35968 +    {
35969 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  range failure\n");
35970 +       return (-EINVAL);
35971 +    }
35972 +
35973 +    KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1);
35974 +    
35975 +    if (nrgn == NULL)
35976 +       return (-ENOMEM);
35977 +
35978 +    nrgn->rgn_mbase = maddr;
35979 +    nrgn->rgn_ebase = eaddr;
35980 +    nrgn->rgn_len   = len;
35981 +    nrgn->rgn_perm  = perm;
35982 +
35983 +    kmutex_lock (&uctx->uctx_rgnmutex);
35984 +    spin_lock (&uctx->uctx_rgnlock);
35985 +
35986 +    if (user_addrgn_elan (uctx, nrgn) < 0)
35987 +    {
35988 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  elan address exists\n");
35989 +       spin_unlock (&uctx->uctx_rgnlock);
35990 +       kmutex_unlock (&uctx->uctx_rgnmutex);
35991 +
35992 +       KMEM_FREE (nrgn, sizeof (USER_RGN));
35993 +       return (-EINVAL);
35994 +    }
35995 +    
35996 +    if (user_addrgn_main (uctx, nrgn) < 0)
35997 +    {
35998 +       PRINTF0 (uctx, DBG_PERM, "user_setperm:  main address exists\n");
35999 +       user_removergn_elan (uctx, nrgn);
36000 +       
36001 +       spin_unlock (&uctx->uctx_rgnlock);
36002 +       kmutex_unlock (&uctx->uctx_rgnmutex);
36003 +
36004 +       KMEM_FREE (nrgn, sizeof (USER_RGN));
36005 +       return (-EINVAL);
36006 +    }
36007 +    spin_unlock (&uctx->uctx_rgnlock);
36008 +
36009 +    if ((perm & PERM_Preload))
36010 +       user_preload_main (uctx, maddr, len);
36011 +
36012 +    kmutex_unlock (&uctx->uctx_rgnmutex);
36013 +
36014 +    return (0);
36015 +}
36016 +
36017 +void
36018 +user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len)
36019 +{
36020 +    E4_Addr       raddr;
36021 +    E4_Addr       rtop;
36022 +    USER_RGN     *nrgn;
36023 +    USER_RGN     *rgn;
36024 +    USER_RGN     *rgn_next;
36025 +    unsigned long ssize;
36026 +    int                  res;
36027 +
36028 +    PRINTF2 (uctx, DBG_PERM, "user_clrperm: elan %llx len %lx\n", addr, len);
36029 +
36030 +    raddr = (addr & PAGEMASK);
36031 +    rtop = ((addr + len - 1) & PAGEMASK) + (PAGESIZE-1);
36032 +
36033 +    kmutex_lock (&uctx->uctx_rgnmutex);
36034 +    
36035 +    for (rgn = user_findrgn_elan (uctx, addr, 0); rgn != NULL; rgn = rgn_next)
36036 +    {
36037 +       if (rtop < rgn->rgn_ebase)                              /* rtop was in a gap */
36038 +           break;
36039 +       
36040 +       rgn_next = rgn->rgn_enext;                              /* Save next region pointer */
36041 +       
36042 +       PRINTF (uctx, DBG_PERM, "              elan %llx->%llx main %p->%p\n", 
36043 +               rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1,
36044 +               rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1);
36045 +
36046 +       if (raddr <= rgn->rgn_ebase && rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
36047 +       {
36048 +           /* whole region is cleared */
36049 +
36050 +           PRINTF (uctx, DBG_PERM, "              whole region\n");
36051 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1);
36052 +           user_removergn (uctx, rgn);
36053 +       }
36054 +       else if (raddr <= rgn->rgn_ebase)
36055 +       {
36056 +           /* clearing at beginning, so shrink size and increment base ptrs */
36057 +           ssize = rtop - rgn->rgn_ebase + 1;
36058 +           
36059 +           PRINTF (uctx, DBG_PERM, "              clear at beginning %x\n", ssize);
36060 +
36061 +           spin_lock (&uctx->uctx_rgnlock);
36062 +
36063 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", rgn->rgn_ebase, rgn->rgn_ebase + ssize-1);
36064 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, rgn->rgn_ebase, ssize);
36065 +
36066 +           rgn->rgn_mbase += ssize;
36067 +           rgn->rgn_ebase += ssize;
36068 +           rgn->rgn_len   -= ssize;
36069 +           
36070 +           spin_unlock(&uctx->uctx_rgnlock);
36071 +       }
36072 +       else if (rtop >= (rgn->rgn_ebase + rgn->rgn_len - 1))
36073 +       {
36074 +           /* clearing at end, so just shrink length of region */
36075 +           ssize = (rgn->rgn_ebase + rgn->rgn_len - 1) - raddr + 1;
36076 +
36077 +           PRINTF (uctx, DBG_PERM, "              clear at end %x\n", ssize);
36078 +
36079 +           spin_lock (&uctx->uctx_rgnlock);
36080 +
36081 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", raddr, raddr+ssize-1);
36082 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, ssize);
36083 +
36084 +           rgn->rgn_len -= ssize;
36085 +
36086 +           spin_unlock(&uctx->uctx_rgnlock);
36087 +       }
36088 +       else
36089 +       {
36090 +           /* the section to go is in the middle,  so need to  */
36091 +           /* split it into two regions */
36092 +           KMEM_ALLOC (nrgn, USER_RGN *, sizeof (USER_RGN), 1);
36093 +
36094 +           spin_lock (&uctx->uctx_rgnlock);
36095 +
36096 +           PRINTF (uctx, DBG_PERM, "              unload elan %llx->%llx\n", raddr, rtop);
36097 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* XXXX tbl */, raddr, rtop - raddr + 1);
36098 +
36099 +           nrgn->rgn_mbase = rgn->rgn_mbase + (rtop - rgn->rgn_ebase + 1);
36100 +           nrgn->rgn_ebase = rtop + 1;
36101 +           nrgn->rgn_len   = (rgn->rgn_ebase + rgn->rgn_len - 1) - rtop;
36102 +           nrgn->rgn_perm  = rgn->rgn_perm;
36103 +
36104 +           PRINTF (uctx, DBG_PERM, "              new elan %llx->%llx main %p->%p\n", 
36105 +                   nrgn->rgn_ebase, nrgn->rgn_ebase + nrgn->rgn_len-1,
36106 +                   nrgn->rgn_mbase, nrgn->rgn_mbase + nrgn->rgn_len-1);
36107 +
36108 +           rgn->rgn_len = (raddr - rgn->rgn_ebase);            /* shrink original region */
36109 +
36110 +           PRINTF (uctx, DBG_PERM, "              old elan %llx->%llx main %p->%p\n", 
36111 +                   rgn->rgn_ebase, rgn->rgn_ebase + rgn->rgn_len-1,
36112 +                   rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len-1);
36113 +
36114 +           res = user_addrgn_elan (uctx, nrgn);                /* insert new region */
36115 +           ASSERT (res == 0);                                  /* which cannot fail */
36116 +
36117 +           res = user_addrgn_main (uctx, nrgn);        
36118 +           ASSERT (res == 0);
36119 +
36120 +           spin_unlock(&uctx->uctx_rgnlock);
36121 +       }
36122 +    }
36123 +    kmutex_unlock (&uctx->uctx_rgnmutex);
36124 +}
36125 +
36126 +int
36127 +user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access)
36128 +{
36129 +    USER_RGN *rgn;
36130 +
36131 +    PRINTF3 (uctx, DBG_PERM, "user_checkperm: elan %lx len %lx access %x\n", raddr, rsize, access);
36132 +    
36133 +    if ((raddr + rsize - 1) < raddr)
36134 +       return (-ENOMEM);
36135 +    
36136 +    kmutex_lock (&uctx->uctx_rgnmutex);
36137 +    if ((rgn = user_rgnat_elan (uctx, raddr)) == (USER_RGN *) NULL)
36138 +    {
36139 +       kmutex_unlock (&uctx->uctx_rgnmutex);
36140 +       return (-ENOMEM);
36141 +    }
36142 +    else
36143 +    {
36144 +       register int ssize;
36145 +       
36146 +       for (; rsize != 0; rsize -= ssize, raddr += ssize)
36147 +       {
36148 +           if (raddr > (rgn->rgn_ebase + rgn->rgn_len - 1))
36149 +           {
36150 +               rgn  = rgn->rgn_enext;
36151 +               
36152 +               if (rgn == NULL || raddr != rgn->rgn_ebase)
36153 +               {
36154 +                   kmutex_unlock (&uctx->uctx_rgnmutex);
36155 +                   return (-ENOMEM);
36156 +               }
36157 +           }
36158 +           if ((raddr + rsize - 1) > (rgn->rgn_ebase + rgn->rgn_len - 1))
36159 +               ssize = ((rgn->rgn_ebase + rgn->rgn_len - 1) - raddr) + 1;
36160 +           else
36161 +               ssize = rsize;
36162 +           
36163 +           PRINTF4 (uctx, DBG_PERM, "user_checkperm : rgn %lx -> %lx perm %x access %x\n",
36164 +                    rgn->rgn_ebase, rgn->rgn_ebase + (E4_Addr)rgn->rgn_len, rgn->rgn_perm, access);
36165 +
36166 +           if (ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, access))
36167 +           {
36168 +               kmutex_unlock (&uctx->uctx_rgnmutex);
36169 +               return (-EACCES);
36170 +           }
36171 +       }
36172 +    }
36173 +    
36174 +    kmutex_unlock (&uctx->uctx_rgnmutex);
36175 +    
36176 +    return (0);
36177 +}
36178 +
36179 +virtaddr_t
36180 +user_elan2main (USER_CTXT *uctx, E4_Addr addr)
36181 +{
36182 +    USER_RGN  *rgn;
36183 +    virtaddr_t raddr;
36184 +    
36185 +    spin_lock (&uctx->uctx_rgnlock);
36186 +    
36187 +    if ((rgn = user_rgnat_elan (uctx, addr)) == (USER_RGN *) NULL)
36188 +       raddr = (virtaddr_t) 0;
36189 +    else
36190 +       raddr = rgn->rgn_mbase + (addr - rgn->rgn_ebase);
36191 +
36192 +    spin_unlock (&uctx->uctx_rgnlock);
36193 +
36194 +    return (raddr);
36195 +}
36196 +
36197 +E4_Addr
36198 +user_main2elan (USER_CTXT *uctx, virtaddr_t addr)
36199 +{
36200 +    USER_RGN *rgn;
36201 +    E4_Addr   raddr;
36202 +
36203 +    spin_lock (&uctx->uctx_rgnlock);
36204 +    
36205 +    if ((rgn = user_rgnat_main (uctx, addr)) == (USER_RGN *) NULL)
36206 +       raddr = (virtaddr_t) 0;
36207 +    else
36208 +       raddr = rgn->rgn_ebase + (addr - rgn->rgn_mbase);
36209 +    
36210 +    spin_unlock (&uctx->uctx_rgnlock);
36211 +
36212 +    return (raddr);
36213 +}
36214 +
36215 +/*
36216 + * Local variables:
36217 + * c-file-style: "stroustrup"
36218 + * End:
36219 + */
36220 diff -urN clean/drivers/net/qsnet/elan4/routetable.c linux-2.6.9/drivers/net/qsnet/elan4/routetable.c
36221 --- clean/drivers/net/qsnet/elan4/routetable.c  1969-12-31 19:00:00.000000000 -0500
36222 +++ linux-2.6.9/drivers/net/qsnet/elan4/routetable.c    2005-04-15 08:38:22.000000000 -0400
36223 @@ -0,0 +1,254 @@
36224 +/*
36225 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
36226 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
36227 + * 
36228 + *    For licensing information please see the supplied COPYING file
36229 + *
36230 + */
36231 +
36232 +#ident "@(#)$Id: routetable.c,v 1.17 2005/04/15 12:38:22 mike Exp $"
36233 +/*      $Source: /cvs/master/quadrics/elan4mod/routetable.c,v $*/
36234 +
36235 +#include <qsnet/kernel.h>
36236 +
36237 +#include <elan4/sdram.h>
36238 +#include <elan4/debug.h>
36239 +#include <elan4/device.h>
36240 +
36241 +ELAN4_ROUTE_TABLE *
36242 +elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size)
36243 +{
36244 +    ELAN4_ROUTE_TABLE *tbl;
36245 +
36246 +    KMEM_ZALLOC (tbl, ELAN4_ROUTE_TABLE *, sizeof (ELAN4_ROUTE_TABLE), 1);
36247 +
36248 +    if (tbl == (ELAN4_ROUTE_TABLE *) NULL)
36249 +       return (NULL);
36250 +    
36251 +    tbl->tbl_size    = (size & E4_VPT_SIZE_MASK);
36252 +    tbl->tbl_entries = elan4_sdram_alloc (dev, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
36253 +
36254 +    if (tbl->tbl_entries == 0)
36255 +    {
36256 +       KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE));
36257 +       return ((ELAN4_ROUTE_TABLE *) NULL);
36258 +    }
36259 +
36260 +    spin_lock_init (&tbl->tbl_lock);
36261 +
36262 +    /* zero the route table */
36263 +    elan4_sdram_zeroq_sdram (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
36264 +
36265 +    return (tbl);
36266 +}
36267 +
36268 +void
36269 +elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl)
36270 +{
36271 +    elan4_sdram_free (dev, tbl->tbl_entries, (E4_VPT_MIN_ENTRIES << tbl->tbl_size) * sizeof (E4_VirtualProcessEntry));
36272 +    
36273 +    spin_lock_destroy (&tbl->tbl_lock);
36274 +
36275 +    KMEM_FREE (tbl, sizeof (ELAN4_ROUTE_TABLE));
36276 +}
36277 +
36278 +void
36279 +elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry)
36280 +{
36281 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
36282 +    
36283 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), entry->Values[1]);
36284 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), entry->Values[0]);
36285 +    pioflush_sdram (dev);
36286 +}
36287 +
36288 +void
36289 +elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry)
36290 +{
36291 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
36292 +    
36293 +    entry->Values[0] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]));
36294 +    entry->Values[1] = elan4_sdram_readq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]));
36295 +}
36296 +
36297 +void
36298 +elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp)
36299 +{
36300 +    ASSERT (vp < (E4_VPT_MIN_ENTRIES << tbl->tbl_size));
36301 +
36302 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[0]), 0);
36303 +    elan4_sdram_writeq (dev, tbl->tbl_entries + (vp * sizeof (E4_VirtualProcessEntry)) + offsetof (E4_VirtualProcessEntry, Values[1]), 0);
36304 +    pioflush_sdram (dev);
36305 +}
36306 +
36307 +static void
36308 +pack_them_routes (E4_VirtualProcessEntry *entry, E4_uint16 first, E4_uint8 *packed, unsigned ctx)
36309 +{
36310 +    E4_uint64 value0 = first;
36311 +    E4_uint64 value1 = ROUTE_CTXT_VALUE(ctx);
36312 +    E4_uint32 ThirdRouteBCastVal;
36313 +    register int i;
36314 +
36315 +    for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
36316 +    {
36317 +       value0 |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
36318 +       value1 |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
36319 +    }
36320 +
36321 +    /* DMA fix for large broadcast route values that fall into the double issue of route value 3 bug. */
36322 +    /* NOTE - this is only required when the link is running in Mod45 mode, it could be automatically
36323 +     *        disabled when Mod44 is detected */
36324 +    
36325 +    /* First seach for the alignment type. The bug is only sensitive to an odd bcast aligment on the 3rd word. */
36326 +    for (i=4;i<16;i++)
36327 +       if (((value0 >> (i*4)) & 0xc) == 4)
36328 +           i++;
36329 +    
36330 +    if (i == 17)
36331 +    {
36332 +       ThirdRouteBCastVal = value1 & 0xcccccccc;
36333 +       if      (((value1 & 0xfffff0000000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x04444444))
36334 +           value1 |= 0x140000000ULL;
36335 +       else if (((value1 & 0xfffffff00000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00044444))
36336 +           value1 |= 0x1400000ULL;
36337 +       else if (((value1 & 0xfffffffff000ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000444))
36338 +           value1 |= 0x14000ULL;
36339 +       else if (((value1 & 0xfffffffffff0ULL) == 0ULL) && (ThirdRouteBCastVal == 0x00000004))
36340 +           value1 |= 0x140ULL;
36341 +    }
36342 +    
36343 +    entry->Values[0] = value0;
36344 +    entry->Values[1] = value1;
36345 +}
36346 +
36347 +int
36348 +elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctx, unsigned lowid, unsigned highid, unsigned options)
36349 +{
36350 +    unsigned int broadcast    = (lowid != highid);
36351 +    unsigned int noadaptive   = 0;
36352 +    int          padbcast     = 0;
36353 +    E4_uint16    first;
36354 +    int                 rb;
36355 +    E4_uint8     packed[ROUTE_NUM_PACKED];
36356 +    int                 level, llink, hlink;
36357 +
36358 +    /* sanity check on lowid highid */
36359 +    if (highid < lowid)           return (-EINVAL);
36360 +    if (lowid  < 0)               return (-EINVAL);
36361 +    if (highid >= pos->pos_nodes) return (-EINVAL);
36362 +
36363 + regenerate_routes:
36364 +    first = 0;
36365 +    rb    = 0;
36366 +
36367 +    switch (pos->pos_mode)
36368 +    {
36369 +    case ELAN_POS_MODE_LOOPBACK:
36370 +       if (lowid != highid || lowid != pos->pos_nodeid)
36371 +           return (-EINVAL);
36372 +       
36373 +       route->Values[0] = FIRST_MYLINK;
36374 +       route->Values[1] = ROUTE_CTXT_VALUE (ctx);
36375 +       return (0);
36376 +
36377 +    case ELAN_POS_MODE_BACKTOBACK:
36378 +       if (lowid != highid || lowid == pos->pos_nodeid)
36379 +           return (-EINVAL);
36380 +       
36381 +       route->Values[0] = FIRST_MYLINK;
36382 +       route->Values[1] = ROUTE_CTXT_VALUE (ctx);
36383 +       return (0);
36384 +
36385 +    case ELAN_POS_MODE_SWITCHED:
36386 +    {
36387 +       unsigned char *arityp  = &pos->pos_arity[pos->pos_levels - 1];
36388 +       unsigned int   spanned = *arityp;
36389 +       unsigned int   broadcasting = 0;
36390 +       
36391 +       bzero (packed, sizeof (packed));
36392 +
36393 +       /* XXXX compute noadaptive ? */
36394 +
36395 +       for (level = 0; 
36396 +            level < pos->pos_levels && ! ((pos->pos_nodeid / spanned) == (lowid / spanned) &&
36397 +                                          (pos->pos_nodeid / spanned) ==  (highid / spanned)); 
36398 +            level++, spanned *= *(--arityp))
36399 +       {
36400 +           if (first == 0)
36401 +               first = (broadcast || noadaptive) ? FIRST_BCAST_TREE : FIRST_ADAPTIVE;
36402 +           else if (broadcast && padbcast)
36403 +           {
36404 +               padbcast = 0;
36405 +               packed[rb++] = PACKED_BCAST0(4, 4);
36406 +               packed[rb++] = PACKED_BCAST1(4, 4);
36407 +           }
36408 +           else
36409 +               packed[rb++] = (broadcast || noadaptive) ? PACKED_BCAST_TREE : PACKED_ADAPTIVE;    
36410 +       }
36411 +
36412 +       while (level >= 0)
36413 +       {
36414 +           spanned /= *arityp;
36415 +           
36416 +           llink = (lowid  / spanned) % *arityp;
36417 +           hlink = (highid / spanned) % *arityp;
36418 +           
36419 +           if (llink != hlink || broadcasting)
36420 +           {
36421 +               broadcasting = 1;
36422 +               
36423 +               if (first == 0)
36424 +                   first = FIRST_BCAST (hlink, llink);
36425 +               else
36426 +               {
36427 +                   packed[rb++] = PACKED_BCAST0(hlink, llink);
36428 +                   
36429 +                   if ((rb % 4) == 0 && PACKED_BCAST1(hlink, llink) == 0)
36430 +                   {
36431 +                       padbcast = 1;
36432 +                       goto regenerate_routes;
36433 +                   }
36434 +                   
36435 +                   packed[rb++] = PACKED_BCAST1(hlink, llink);
36436 +               }
36437 +           }
36438 +           else
36439 +           {
36440 +               if (first == 0)
36441 +                   first = FIRST_ROUTE(llink);
36442 +               else
36443 +                   packed[rb++] = PACKED_ROUTE(llink);
36444 +           }
36445 +           
36446 +           level--;
36447 +           arityp++;
36448 +       }
36449 +
36450 +       pack_them_routes (route, first | (options & FIRST_OPTIONS_MASK), packed, ctx);
36451 +       return (0);
36452 +    }
36453 +    }
36454 +
36455 +    return (-EINVAL);
36456 +}
36457 +
36458 +int
36459 +elan4_check_route (ELAN_POSITION *postiion, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags)
36460 +{
36461 +    /* XXXX - TBD */
36462 +    return (0);
36463 +}
36464 +
36465 +EXPORT_SYMBOL(elan4_alloc_routetable);
36466 +EXPORT_SYMBOL(elan4_free_routetable);
36467 +EXPORT_SYMBOL(elan4_write_route);
36468 +EXPORT_SYMBOL(elan4_read_route);
36469 +EXPORT_SYMBOL(elan4_invalidate_route);
36470 +EXPORT_SYMBOL(elan4_generate_route);
36471 +EXPORT_SYMBOL(elan4_check_route);
36472 +
36473 +/*
36474 + * Local variables:
36475 + * c-file-style: "stroustrup"
36476 + * End:
36477 + */
36478 diff -urN clean/drivers/net/qsnet/elan4/sdram.c linux-2.6.9/drivers/net/qsnet/elan4/sdram.c
36479 --- clean/drivers/net/qsnet/elan4/sdram.c       1969-12-31 19:00:00.000000000 -0500
36480 +++ linux-2.6.9/drivers/net/qsnet/elan4/sdram.c 2005-07-20 07:35:36.000000000 -0400
36481 @@ -0,0 +1,1039 @@
36482 +/*
36483 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
36484 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
36485 + * 
36486 + *    For licensing information please see the supplied COPYING file
36487 + *
36488 + */
36489 +
36490 +#ident "@(#)$Id: sdram.c,v 1.34.2.1 2005/07/20 11:35:36 mike Exp $"
36491 +/*      $Source: /cvs/master/quadrics/elan4mod/sdram.c,v $*/
36492 +
36493 +#include <qsnet/kernel.h>
36494 +
36495 +#include <elan4/debug.h>
36496 +#include <elan4/device.h>
36497 +
36498 +EXPORT_SYMBOL_GPL(elan4_sdram_readb);
36499 +EXPORT_SYMBOL_GPL(elan4_sdram_readw);
36500 +EXPORT_SYMBOL_GPL(elan4_sdram_readl);
36501 +EXPORT_SYMBOL_GPL(elan4_sdram_readq);
36502 +EXPORT_SYMBOL_GPL(elan4_sdram_writeb);
36503 +EXPORT_SYMBOL_GPL(elan4_sdram_writew);
36504 +EXPORT_SYMBOL_GPL(elan4_sdram_writel);
36505 +EXPORT_SYMBOL_GPL(elan4_sdram_writeq);
36506 +EXPORT_SYMBOL_GPL(elan4_sdram_zerob_sdram);
36507 +EXPORT_SYMBOL_GPL(elan4_sdram_zerow_sdram);
36508 +EXPORT_SYMBOL_GPL(elan4_sdram_zerol_sdram);
36509 +EXPORT_SYMBOL_GPL(elan4_sdram_zeroq_sdram);
36510 +EXPORT_SYMBOL_GPL(elan4_sdram_copyb_from_sdram);
36511 +EXPORT_SYMBOL_GPL(elan4_sdram_copyw_from_sdram);
36512 +EXPORT_SYMBOL_GPL(elan4_sdram_copyl_from_sdram);
36513 +EXPORT_SYMBOL_GPL(elan4_sdram_copyq_from_sdram);
36514 +EXPORT_SYMBOL_GPL(elan4_sdram_copyb_to_sdram);
36515 +EXPORT_SYMBOL_GPL(elan4_sdram_copyw_to_sdram);
36516 +EXPORT_SYMBOL_GPL(elan4_sdram_copyl_to_sdram);
36517 +EXPORT_SYMBOL_GPL(elan4_sdram_copyq_to_sdram);
36518 +EXPORT_SYMBOL_GPL(elan4_sdram_alloc);
36519 +EXPORT_SYMBOL_GPL(elan4_sdram_free);
36520 +EXPORT_SYMBOL_GPL(elan4_sdram_flushcache);
36521 +
36522 +#define SDRAM_MIN_BANK_SIZE            ((1 << 15) * 8)         /* 256 Kbytes */
36523 +
36524 +static inline ELAN4_SDRAM_BANK *
36525 +sdramaddr_to_bank (ELAN4_DEV *dev, sdramaddr_t saddr)
36526 +{
36527 +    register int i;
36528 +    
36529 +    for (i = 0; i < dev->dev_sdram_numbanks; i++)
36530 +    {
36531 +       ELAN4_SDRAM_BANK *bank = &dev->dev_sdram_banks[i];
36532 +
36533 +       if (saddr >= bank->b_base && saddr < (bank->b_base + bank->b_size))
36534 +           return (bank);
36535 +    }
36536 +    printk ("sdramaddr_to_bank: sdram address %lx not in a sdram bank\n", saddr);
36537 +    BUG();
36538 +
36539 +    return (NULL);     /* NOTREACHED */
36540 +}
36541 +
36542 +static inline int
36543 +sdramaddr_to_bankoffset (ELAN4_DEV *dev, sdramaddr_t saddr)
36544 +{
36545 +    return (saddr & (sdramaddr_to_bank (dev, saddr)->b_size-1));
36546 +}
36547 +
36548 +static inline int
36549 +sdramaddr_to_bit(ELAN4_DEV *dev, int indx, sdramaddr_t saddr)
36550 +{
36551 +    return (sdramaddr_to_bankoffset(dev, saddr) >> (SDRAM_MIN_BLOCK_SHIFT+(indx)));
36552 +}
36553 +
36554 +static inline ioaddr_t
36555 +sdramaddr_to_ioaddr (ELAN4_DEV *dev, sdramaddr_t saddr)
36556 +{
36557 +    ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, saddr);
36558 +
36559 +    return (bank->b_ioaddr + (saddr - bank->b_base));
36560 +}
36561 +
36562 +unsigned char
36563 +elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t off)
36564 +{
36565 +    return (__elan4_readb (dev, sdramaddr_to_ioaddr(dev, off)));
36566 +}
36567 +
36568 +unsigned short
36569 +elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t off)
36570 +{
36571 +    return (__elan4_readw (dev, sdramaddr_to_ioaddr(dev, off)));
36572 +}
36573 +
36574 +unsigned int
36575 +elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t off)
36576 +{
36577 +    return (__elan4_readl (dev, sdramaddr_to_ioaddr(dev, off)));
36578 +}
36579 +
36580 +unsigned long long
36581 +elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t off)
36582 +{
36583 +    return (__elan4_readq (dev, sdramaddr_to_ioaddr(dev, off)));
36584 +}
36585 +
36586 +void
36587 +elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t off, unsigned char val)
36588 +{
36589 +    writeb (val, (void *) sdramaddr_to_ioaddr(dev, off));
36590 +
36591 +    mb();
36592 +}
36593 +
36594 +void
36595 +elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t off, unsigned short val)
36596 +{
36597 +    writew (val, (void *) sdramaddr_to_ioaddr(dev, off));
36598 +
36599 +    mb();
36600 +}
36601 +
36602 +void
36603 +elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t off, unsigned int val)
36604 +{
36605 +    writel (val, (void *) (sdramaddr_to_ioaddr(dev, off)));
36606 +
36607 +    mb();
36608 +}
36609 +
36610 +void
36611 +elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t off, unsigned long long val)
36612 +{
36613 +    writeq (val, (void *) (sdramaddr_to_ioaddr(dev, off)));
36614 +
36615 +    mb();
36616 +}
36617 +
36618 +void
36619 +elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
36620 +{
36621 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36622 +    ioaddr_t lim  = dest + nbytes;
36623 +
36624 +    for (; dest < lim; dest += sizeof (u8))
36625 +       writeb (0, (void *) dest);
36626 +}
36627 +
36628 +void
36629 +elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
36630 +{
36631 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36632 +    ioaddr_t lim  = dest + nbytes;
36633 +
36634 +    for (; dest < lim; dest += sizeof (u8))
36635 +       writeb (0, (void *) dest);
36636 +}
36637 +
36638 +void
36639 +elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
36640 +{
36641 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36642 +    ioaddr_t lim  = dest + nbytes;
36643 +
36644 +    for (; dest < lim; dest += sizeof (u32))
36645 +       writel (0, (void *) dest);
36646 +}
36647 +
36648 +void
36649 +elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t to, int nbytes)
36650 +{
36651 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36652 +    ioaddr_t lim  = dest + nbytes;
36653 +
36654 +#ifdef CONFIG_MPSAS
36655 +    if (sas_memset_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, 0, nbytes) == 0)
36656 +       return;
36657 +#endif
36658 +
36659 +    for (; dest < lim; dest += sizeof (u64))
36660 +       writeq (0, (void *) dest);
36661 +}
36662 +
36663 +void
36664 +elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
36665 +{
36666 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
36667 +    u8      *dest = (u8 *) to;
36668 +    ioaddr_t lim  = src + nbytes;
36669 +
36670 +    for (; src < lim; src += sizeof (u8))
36671 +       *dest++ = __elan4_readb (dev, src);
36672 +}
36673 +
36674 +void
36675 +elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
36676 +{
36677 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
36678 +    u16     *dest = (u16 *) to;
36679 +    ioaddr_t lim  = src + nbytes;
36680 +
36681 +    for (; src < lim; src += sizeof (u16))
36682 +       *dest++ = __elan4_readw (dev, src);
36683 +}
36684 +
36685 +void
36686 +elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
36687 +{
36688 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
36689 +    u32     *dest = (u32 *) to;
36690 +    ioaddr_t lim  = src + nbytes;
36691 +
36692 +    for (; src < lim; src += sizeof (u32))
36693 +       *dest++ = __elan4_readl (dev, src);
36694 +}
36695 +
36696 +void
36697 +elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes)
36698 +{
36699 +    ioaddr_t src  = sdramaddr_to_ioaddr (dev, from);
36700 +    u64     *dest = (u64 *) to;
36701 +    ioaddr_t lim  = src + nbytes;
36702 +
36703 +#ifdef CONFIG_MPSAS
36704 +    if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, from, (unsigned long) to, nbytes) == 0)
36705 +       return;
36706 +#endif
36707 +
36708 +    for (; src < lim; src += sizeof (u64))
36709 +       *dest++ = __elan4_readq (dev, src);
36710 +}
36711 +
36712 +void
36713 +elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
36714 +{
36715 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36716 +    u8      *src  = (u8 *) from;
36717 +    ioaddr_t lim  = dest + nbytes;
36718 +
36719 +    for (; dest < lim; dest += sizeof (u8))
36720 +       writeb (*src++, (void *) (dest));
36721 +
36722 +    mb();
36723 +}
36724 +
36725 +void
36726 +elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
36727 +{
36728 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36729 +    u16     *src  = (u16 *) from;
36730 +    ioaddr_t lim  = dest + nbytes;
36731 +
36732 +    for (; dest < lim; dest += sizeof (u16))
36733 +       writew (*src++, (void *) (dest));
36734 +
36735 +    mb();
36736 +}
36737 +
36738 +void
36739 +elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
36740 +{
36741 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36742 +    u32     *src  = (u32 *) from;
36743 +    ioaddr_t lim  = dest + nbytes;
36744 +
36745 +    for (; dest < lim; dest += sizeof (u16))
36746 +       writew (*src++, (void *) (dest));
36747 +
36748 +    mb();
36749 +}
36750 +
36751 +void
36752 +elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes)
36753 +{
36754 +    ioaddr_t dest = sdramaddr_to_ioaddr (dev, to);
36755 +    u64     *src  = (u64 *) from;
36756 +    ioaddr_t lim  = dest + nbytes;
36757 +
36758 +#ifdef CONFIG_MPSAS
36759 +    if (sas_copyto_dev (dev->dev_osdep.pdev, ELAN4_BAR_SDRAM, to, (unsigned long) from, nbytes) == 0)
36760 +       return;
36761 +#endif
36762 +
36763 +    for (; dest < lim; dest += sizeof (u64))
36764 +       writeq (*src++, (void *) (dest));
36765 +
36766 +    mb();
36767 +}
36768 +
36769 +/* sdram buddy allocator */
36770 +typedef struct sdramblock
36771 +{
36772 +    sdramaddr_t        next;
36773 +    sdramaddr_t prev;
36774 +} sdramblock_t;
36775 +
36776 +static inline sdramaddr_t
36777 +read_next (ELAN4_DEV *dev, sdramaddr_t block)
36778 +{
36779 +    return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next)));
36780 +}
36781 +
36782 +static inline sdramaddr_t
36783 +read_prev (ELAN4_DEV *dev, sdramaddr_t block)
36784 +{
36785 +    return __elan4_readl (dev, sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev)));
36786 +}
36787 +
36788 +static inline void
36789 +write_next (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val)
36790 +{
36791 +    writel (val, (void *) (sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, next))));
36792 +}
36793 +
36794 +static inline void
36795 +write_prev (ELAN4_DEV *dev, sdramaddr_t block, sdramaddr_t val)
36796 +{
36797 +    writel (val, (void *) (sdramaddr_to_ioaddr (dev, block + offsetof (sdramblock_t, prev))));
36798 +}
36799 +
36800 +static inline void
36801 +freelist_insert (ELAN4_DEV *dev, int idx, sdramaddr_t block)
36802 +{
36803 +    sdramaddr_t next = dev->dev_sdram_freelists[(idx)];
36804 +
36805 +    /*
36806 +     * block->prev = NULL;
36807 +     * block->next = next;
36808 +     * if (next != NULL)
36809 +     *    next->prev = block;
36810 +     * freelist = block;
36811 +     */
36812 +    write_prev (dev, block, (sdramaddr_t) 0);
36813 +    write_next (dev, block, next);
36814 +    if (next != (sdramaddr_t) 0)
36815 +       write_prev (dev, next, block);
36816 +    dev->dev_sdram_freelists[idx] = block;
36817 +
36818 +    dev->dev_sdram_freecounts[idx]++;
36819 +    dev->dev_stats.s_sdram_bytes_free += (SDRAM_MIN_BLOCK_SIZE << idx);
36820 +
36821 +    mb();
36822 +}
36823 +
36824 +static inline void
36825 +freelist_remove (ELAN4_DEV *dev,int idx, sdramaddr_t block)
36826 +{
36827 +    /*
36828 +     * if (block->prev)
36829 +     *     block->prev->next = block->next;
36830 +     * else
36831 +     *     dev->dev_sdram_freelists[idx] = block->next;
36832 +     * if (block->next)
36833 +     *     block->next->prev = block->prev;
36834 +     */
36835 +    sdramaddr_t blocknext = read_next (dev, block);
36836 +    sdramaddr_t blockprev = read_prev (dev, block);
36837 +
36838 +    if (blockprev)
36839 +       write_next (dev, blockprev, blocknext);
36840 +    else
36841 +       dev->dev_sdram_freelists[idx] = blocknext;
36842 +    if (blocknext)
36843 +       write_prev (dev, blocknext, blockprev);
36844 +
36845 +    dev->dev_sdram_freecounts[idx]--;
36846 +    dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx);
36847 +
36848 +    mb();
36849 +}
36850 +
36851 +static inline void
36852 +freelist_removehead(ELAN4_DEV *dev, int idx, sdramaddr_t block)
36853 +{
36854 +    sdramaddr_t blocknext = read_next (dev, block);
36855 +
36856 +    if ((dev->dev_sdram_freelists[idx] = blocknext) != 0)
36857 +       write_prev (dev, blocknext, 0);
36858 +
36859 +    dev->dev_sdram_freecounts[idx]--;
36860 +    dev->dev_stats.s_sdram_bytes_free -= (SDRAM_MIN_BLOCK_SIZE << idx);
36861 +
36862 +    mb();
36863 +}
36864 +
36865 +#ifdef DEBUG
36866 +static int
36867 +display_blocks (ELAN4_DEV *dev, int indx, char *string)
36868 +{
36869 +    sdramaddr_t block;
36870 +    int nbytes = 0;
36871 +
36872 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "%s - indx %d\n", string, indx);
36873 +    for (block = dev->dev_sdram_freelists[indx]; block != (sdramaddr_t) 0; block = read_next (dev, block))
36874 +    {
36875 +       PRINTF (DBG_DEVICE, DBG_SDRAM, "  %x\n", block);
36876 +       nbytes += (SDRAM_MIN_BLOCK_SIZE << indx);
36877 +    }
36878 +
36879 +    return (nbytes);
36880 +}
36881 +
36882 +void
36883 +elan4_sdram_display (ELAN4_DEV *dev, char *string)
36884 +{
36885 +    int indx;
36886 +    int nbytes = 0;
36887 +    
36888 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_display: dev=%p\n", dev);
36889 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
36890 +       if (dev->dev_sdram_freelists[indx] != (sdramaddr_t) 0)
36891 +           nbytes += display_blocks (dev, indx, string);
36892 +    PRINTF (DBG_DEVICE, DBG_SDRAM, "\n%d bytes free - %d pages free\n", nbytes, nbytes/SDRAM_PAGE_SIZE);
36893 +}
36894 +
36895 +void
36896 +elan4_sdram_verify (ELAN4_DEV *dev)
36897 +{
36898 +    int indx, size, nbits, i, b;
36899 +    sdramaddr_t block;
36900 +
36901 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
36902 +    {
36903 +       unsigned count = 0;
36904 +
36905 +       for (block = dev->dev_sdram_freelists[indx]; block; block = read_next (dev, block), count++)
36906 +       {
36907 +           ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block);
36908 +           unsigned         off  = sdramaddr_to_bankoffset (dev, block);
36909 +           int              bit  = sdramaddr_to_bit (dev, indx, block);
36910 +
36911 +           if ((block & (size-1)) != 0)
36912 +               printk ("elan4_sdram_verify: block=%lx indx=%x - not aligned\n", block, indx);
36913 +           
36914 +           if (bank == NULL || off > bank->b_size)
36915 +               printk ("elan4_sdram_verify: block=%lx indx=%x - outside bank\n", block, indx);
36916 +           else if (BT_TEST (bank->b_bitmaps[indx], bit) == 0)
36917 +               printk ("elan4_sdram_verify: block=%lx indx=%x - bit not set\n", block, indx);
36918 +           else
36919 +           {
36920 +               for (i = indx-1, nbits = 2; i >= 0; i--, nbits <<= 1)
36921 +               {
36922 +                   bit = sdramaddr_to_bit (dev, i, block);
36923 +
36924 +                   for (b = 0; b < nbits; b++)
36925 +                       if (BT_TEST(bank->b_bitmaps[i], bit + b))
36926 +                           printk ("elan4_sdram_verify: block=%lx indx=%x - also free i=%d bit=%x\n", block, indx, i, bit+b);
36927 +               }
36928 +           }
36929 +       }
36930 +
36931 +       if (dev->dev_sdram_freecounts[indx] != count)
36932 +           printk ("elan4_sdram_verify: indx=%x expected %d got %d\n", indx, dev->dev_sdram_freecounts[indx], count);
36933 +    }
36934 +}
36935 +
36936 +#endif
36937 +
36938 +static void
36939 +free_block (ELAN4_DEV *dev, sdramaddr_t block, int indx)
36940 +{
36941 +    ELAN4_SDRAM_BANK *bank = sdramaddr_to_bank (dev, block);
36942 +    unsigned         bit  = sdramaddr_to_bit (dev, indx, block);
36943 +    unsigned         size = SDRAM_MIN_BLOCK_SIZE << indx;
36944 +
36945 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: block=%x indx=%d bit=%x\n", block, indx, bit);
36946 +
36947 +    ASSERT ((block & (size-1)) == 0);
36948 +    ASSERT (BT_TEST (bank->b_bitmaps[indx], bit) == 0);
36949 +
36950 +    while (BT_TEST (bank->b_bitmaps[indx], bit ^ 1))
36951 +    {
36952 +       sdramaddr_t buddy = block ^ size;
36953 +       
36954 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: merge block=%x buddy=%x indx=%d\n", block, buddy, indx);
36955 +       
36956 +       BT_CLEAR (bank->b_bitmaps[indx], bit ^ 1);
36957 +       
36958 +       freelist_remove (dev, indx, buddy);
36959 +       
36960 +       block = (block < buddy) ? block : buddy;
36961 +       indx++;
36962 +       size <<= 1;
36963 +       bit >>= 1;
36964 +    }
36965 +    
36966 +    PRINTF3 (DBG_DEVICE, DBG_SDRAM, "free_block: free block=%x indx=%d bit=%x\n", block, indx, bit);
36967 +    
36968 +    freelist_insert (dev, indx, block);
36969 +    
36970 +    BT_SET (bank->b_bitmaps[indx], bit);
36971 +}
36972 +
36973 +void
36974 +elan4_sdram_init (ELAN4_DEV *dev)
36975 +{
36976 +    int indx;
36977 +
36978 +    spin_lock_init (&dev->dev_sdram_lock);
36979 +
36980 +    for (indx = 0; indx < SDRAM_NUM_FREE_LISTS; indx++)
36981 +    {
36982 +       dev->dev_sdram_freelists[indx]  = (sdramaddr_t) 0;
36983 +       dev->dev_sdram_freecounts[indx] = 0;
36984 +    }
36985 +}
36986 +
36987 +void
36988 +elan4_sdram_fini (ELAN4_DEV *dev)
36989 +{
36990 +    spin_lock_destroy (&dev->dev_sdram_lock);
36991 +}
36992 +
36993 +#ifdef CONFIG_MPSAS
36994 +/* size of Elan SDRAM in simulation */
36995 +#define SDRAM_used_addr_bits           (16)
36996 +#define SDRAM_SIMULATION_BANK_SIZE     ((1 << SDRAM_used_addr_bits) * 8)       /* 128 kbytes */
36997 +
36998 +static int
36999 +elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
37000 +{
37001 +    printk ("elan%d: memory bank %d is %d Kb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (SDRAM_SIMULATION_BANK_SIZE / 1024));
37002 +
37003 +    bank->b_size = SDRAM_SIMULATION_BANK_SIZE;
37004 +
37005 +    return 1;
37006 +}
37007 +
37008 +#else
37009 +
37010 +static void
37011 +initialise_cache_tags (ELAN4_DEV *dev, unsigned addr)
37012 +{
37013 +    register int set, line;
37014 +
37015 +    mb();
37016 +
37017 +    /* Initialise the whole cache to hold sdram at "addr" as direct mapped */
37018 +
37019 +    for (set = 0; set < E4_NumCacheSets; set++)
37020 +       for (line = 0; line < E4_NumCacheLines; line++)
37021 +           write_tag (dev, Tags[set][line], addr | (set << 13) | (1 << 11));
37022 +
37023 +    read_tag (dev, Tags[set][line]);   /* read it back to guarantee the memory system is quite again */
37024 +    mb();
37025 +}
37026 +
37027 +static __inline__ int
37028 +sdram_GreyToBinary(int GreyVal, int NoOfBits)
37029 +{
37030 +    int Bit;
37031 +    int BinaryVal=0;
37032 +    for (Bit=(1 << (NoOfBits-1)); Bit != 0; Bit >>= 1)
37033 +       BinaryVal ^= (GreyVal & Bit) ^ ((BinaryVal >> 1) & Bit);
37034 +    return (BinaryVal);
37035 +}
37036 +
37037 +static __inline__ int
37038 +sdram_BinaryToGrey(int BinaryVal)
37039 +{
37040 +    return (BinaryVal ^ (BinaryVal >> 1));
37041 +}
37042 +
37043 +void
37044 +elan4_sdram_setup_delay_lines (ELAN4_DEV *dev, int factor)
37045 +{
37046 +    /* This is used to fix the SDRAM delay line values */
37047 +    int i, AutoGenDelayValue=0;
37048 +    int NewDelayValue;
37049 +
37050 +    if (dev->dev_sdram_cfg & SDRAM_FIXED_DELAY_ENABLE)   /* already setup. */
37051 +       return;
37052 +
37053 +    /* now get an average of 10 dll values */
37054 +    for (i=0;i<10;i++)
37055 +       AutoGenDelayValue += sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(read_reg64 (dev, SDRamConfigReg)),
37056 +                                              SDRAM_FIXED_DLL_DELAY_BITS);
37057 +
37058 +    NewDelayValue = factor + (AutoGenDelayValue / 10); /* Mean of 10 values */
37059 +
37060 +    dev->dev_sdram_cfg = (dev->dev_sdram_cfg & ~(SDRAM_FIXED_DLL_DELAY_MASK << SDRAM_FIXED_DLL_DELAY_SHIFT)) |
37061 +                         SDRAM_FIXED_DELAY_ENABLE | SDRAM_FIXED_DLL_DELAY(sdram_BinaryToGrey(NewDelayValue));
37062 +
37063 +    write_reg64 (dev, SDRamConfigReg, dev->dev_sdram_cfg);     /* Put back the new value */
37064 +
37065 +    pioflush_reg (dev);
37066 +}
37067 +
37068 +static int
37069 +elan4_sdram_probe_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
37070 +{
37071 +    unsigned long      mappedsize = bank->b_size;
37072 +    ioaddr_t           ioaddr;
37073 +    unsigned long long value, size;
37074 +    register int       i;
37075 +    extern int         sdram_bank_limit;
37076 +
37077 +    if (mappedsize > SDRAM_MAX_BLOCK_SIZE)
37078 +       mappedsize = SDRAM_MAX_BLOCK_SIZE;
37079 +
37080 +    while ((ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, mappedsize, &bank->b_handle)) == 0)
37081 +    {
37082 +       if (mappedsize <= (64*1024*1024))                       /* boards normally populated with 64mb, so winge if we can't see this much */
37083 +           printk ("elan%d: could not map bank %d size %dMb\n", dev->dev_instance, (int)(bank - dev->dev_sdram_banks), (int)mappedsize/(1024*1024));
37084 +
37085 +       if ((mappedsize >>= 1) < (1024*1024))
37086 +           return 0;
37087 +    }
37088 +
37089 +    /* first probe to see if the memory bank is present */
37090 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
37091 +       initialise_cache_tags (dev, E4_CacheSize);
37092 +
37093 +    for (i = 0; i < 64; i++)
37094 +    {
37095 +       unsigned long long pattern = (1ull << i);
37096 +
37097 +       writeq (pattern, (void *)ioaddr);                                       /* write pattern at base  */
37098 +
37099 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
37100 +           initialise_cache_tags (dev, 0);
37101 +
37102 +       writeq (~pattern, (void *)(ioaddr + E4_CacheSize));                     /* write ~pattern at cachesize */
37103 +
37104 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
37105 +          initialise_cache_tags (dev, E4_CacheSize);
37106 +       
37107 +       writeq (~pattern, (void *)(ioaddr + 2*E4_CacheSize));                   /* write ~pattern at 2*cachesize */
37108 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
37109 +           initialise_cache_tags (dev, 2*E4_CacheSize);
37110 +       
37111 +       value = __elan4_readq (dev, ioaddr);                            /* read pattern back at 0 */
37112 +       
37113 +       if (value != pattern)
37114 +       {
37115 +           printk ("elan%d: sdram bank %d not present\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
37116 +           elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
37117 +           return 0;
37118 +       }
37119 +    }
37120 +    
37121 +    /* sdram bank is present, so work out it's size.  We store the maximum size at the base
37122 +     * and then store the address at each address on every power of two address until
37123 +     * we reach the minimum mappable size (PAGESIZE), we then read back the value at the
37124 +     * base to determine the bank size */
37125 +    writeq (mappedsize, (void *)(ioaddr));
37126 +    if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
37127 +        initialise_cache_tags (dev, 0);
37128 +
37129 +    for (size = mappedsize >> 1; size > PAGE_SIZE; size >>= 1)
37130 +    {
37131 +       writeq (size, (void *)(ioaddr + (long)size));
37132 +        if (dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA)
37133 +           initialise_cache_tags (dev, size);
37134 +    }
37135 +
37136 +    if ((size = __elan4_readq (dev, ioaddr)) < SDRAM_MIN_BANK_SIZE)
37137 +    {
37138 +       printk ("elan%d: memory bank %d dubious\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
37139 +       elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
37140 +       return 0;
37141 +    }
37142 +
37143 +    if (sdram_bank_limit == 0 || size <= (sdram_bank_limit * 1024 * 1024))
37144 +       printk ("elan%d: memory bank %d is %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024)));
37145 +    else
37146 +    {
37147 +       size = (sdram_bank_limit * 1024 * 1024);
37148 +       printk ("elan%d: limit bank %d to %d Mb\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks), (int) (size / (1024*1024)));
37149 +    }
37150 +
37151 +    bank->b_size = size;
37152 +
37153 +    elan4_unmap_device (dev, ioaddr, mappedsize, &bank->b_handle);
37154 +    return 1;
37155 +}
37156 +#endif
37157 +
37158 +int
37159 +elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
37160 +{
37161 +    int indx, size;
37162 +
37163 +    bank->b_ioaddr = 0;
37164 +
37165 +    if (! elan4_sdram_probe_bank (dev, bank))
37166 +       return 0;
37167 +
37168 +    if ((bank->b_ioaddr = elan4_map_device (dev, ELAN4_BAR_SDRAM, bank->b_base, bank->b_size, &bank->b_handle)) == (ioaddr_t) 0)
37169 +    {
37170 +       printk ("elan%d: could not map sdrambank %d\n", dev->dev_instance, (int) (bank - dev->dev_sdram_banks));
37171 +       return 0;
37172 +    }
37173 +
37174 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1) /* allocate the buddy allocator bitmaps */
37175 +       KMEM_ZALLOC (bank->b_bitmaps[indx], bitmap_t *, sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size), 1);
37176 +    
37177 +    return 1;
37178 +}
37179 +
37180 +void
37181 +elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
37182 +{
37183 +    int indx, size;
37184 +
37185 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size <= bank->b_size; indx++, size <<= 1)
37186 +       KMEM_FREE (bank->b_bitmaps[indx], sizeof (bitmap_t) * BT_BITOUL(bank->b_size/size));
37187 +    
37188 +    elan4_unmap_device (dev, bank->b_ioaddr, bank->b_size, &bank->b_handle);
37189 +}
37190 +
37191 +void
37192 +elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank)
37193 +{
37194 +    sdramaddr_t base = bank->b_base;
37195 +    sdramaddr_t top  = bank->b_base + bank->b_size;
37196 +    register int indx;
37197 +    register unsigned long size;
37198 +
37199 +    /* align to the minimum block size */
37200 +    base = (base + SDRAM_MIN_BLOCK_SIZE - 1) & ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
37201 +    top &= ~((sdramaddr_t) SDRAM_MIN_BLOCK_SIZE-1);
37202 +
37203 +    /* don't allow 0 as a valid "base" */
37204 +    if (base == 0)
37205 +       base = SDRAM_MIN_BLOCK_SIZE;
37206 +
37207 +    /* carve the bottom to the biggest boundary */
37208 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
37209 +    {
37210 +       if ((base & size) == 0)
37211 +           continue;
37212 +
37213 +       if ((base + size) > top)
37214 +           break;
37215 +
37216 +       free_block (dev, base, indx);
37217 +       
37218 +       base += size;
37219 +    }
37220 +
37221 +    /* carve the top down to the biggest boundary */
37222 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; indx < SDRAM_NUM_FREE_LISTS; indx++, size <<= 1)
37223 +    {
37224 +       if ((top & size) == 0)
37225 +           continue;
37226 +
37227 +       if ((top - size) < base)
37228 +           break;
37229 +
37230 +       free_block (dev, (top - size), indx);
37231 +       
37232 +       top -= size;
37233 +    }
37234 +
37235 +    /* now free of the space in between */
37236 +    while (base < top)
37237 +    {
37238 +       free_block (dev, base, (SDRAM_NUM_FREE_LISTS-1));
37239 +
37240 +       base += SDRAM_MAX_BLOCK_SIZE;
37241 +    }
37242 +}
37243 +
37244 +sdramaddr_t
37245 +elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes)
37246 +{
37247 +    sdramaddr_t block;
37248 +    register int i, indx;
37249 +    unsigned long size;
37250 +    unsigned long flags;
37251 +
37252 +    spin_lock_irqsave (&dev->dev_sdram_lock, flags);
37253 +
37254 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
37255 +       ;
37256 +
37257 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: nbytes=%d indx=%d\n", nbytes, indx);
37258 +
37259 +    /* need to split a bigger block up */
37260 +    for (i = indx; i < SDRAM_NUM_FREE_LISTS; i++, size <<= 1)
37261 +       if (dev->dev_sdram_freelists[i])
37262 +           break;
37263 +    
37264 +    if (i == SDRAM_NUM_FREE_LISTS)
37265 +    {
37266 +       spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
37267 +       printk ("elan4_sdram_alloc: %d bytes failed\n", nbytes);
37268 +       return ((sdramaddr_t) 0);
37269 +    }
37270 +    
37271 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_alloc: use block=%x indx=%d\n", dev->dev_sdram_freelists[i], i);
37272 +
37273 +    /* remove the block from the free list */
37274 +    freelist_removehead (dev, i, (block = dev->dev_sdram_freelists[i]));
37275 +
37276 +    /* clear the approriate bit in the bitmap */
37277 +    BT_CLEAR (sdramaddr_to_bank (dev, block)->b_bitmaps[i], sdramaddr_to_bit (dev,i, block));
37278 +
37279 +    /* and split it up as required */
37280 +    while (i-- > indx)
37281 +       free_block (dev, block + (size >>= 1), i);
37282 +
37283 +    spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
37284 +
37285 +    ASSERT ((block & ((SDRAM_MIN_BLOCK_SIZE << (indx))-1)) == 0);
37286 +
37287 +#ifdef CONFIG_MPSAS
37288 +    elan4_sdram_zeroq_sdram (dev, block, sizeof (sdramblock_t));
37289 +#endif
37290 +
37291 +    return ((sdramaddr_t) block);
37292 +}
37293 +
37294 +void
37295 +elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t block, int nbytes)
37296 +{
37297 +    register int indx;
37298 +    unsigned long size;
37299 +    unsigned long flags;
37300 +
37301 +    spin_lock_irqsave (&dev->dev_sdram_lock, flags);
37302 +
37303 +    for (indx = 0, size = SDRAM_MIN_BLOCK_SIZE; size < nbytes; indx++, size <<= 1)
37304 +       ;
37305 +
37306 +    PRINTF2 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_free: indx=%d block=%x\n", indx, block);
37307 +
37308 +    free_block (dev, block, indx);
37309 +
37310 +    spin_unlock_irqrestore (&dev->dev_sdram_lock, flags);
37311 +}
37312 +
37313 +void
37314 +elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t addr, int len)
37315 +{
37316 +    int set, off;
37317 +
37318 +    SET_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
37319 +
37320 +    /*
37321 +     * if flushing more than a single set (8K), then you have to flush the whole cache.
37322 +     *   NOTE - in the real world we will probably want to generate a burst across
37323 +     *          the pci bus.
37324 +     */
37325 +    if (len >= E4_CacheSetSize)
37326 +    {
37327 +       PRINTF3 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => whole cache\n", addr, len, addr + len);
37328 +
37329 +#ifdef CONFIG_MPSAS
37330 +       elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space, E4_CacheSize);
37331 +#else
37332 +       for (set = 0; set < E4_NumCacheSets; set++)
37333 +           for (off = 0; off < E4_CacheSetSize; off += E4_CacheLineSize)
37334 +               elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
37335 +#endif
37336 +    }
37337 +    else
37338 +    {
37339 +       unsigned base    = addr & ~(E4_CACHELINE_SIZE-1);
37340 +       unsigned top     = (addr + len + (E4_CACHELINE_SIZE-1)) & ~(E4_CACHELINE_SIZE-1);
37341 +       unsigned baseoff = base & (E4_CacheSetSize-1);
37342 +       unsigned topoff  = top  & (E4_CacheSetSize-1);
37343 +
37344 +       if ((base ^ top) & E4_CacheSetSize)                     /* wraps */
37345 +       {
37346 +           PRINTF7 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => split cache (%x,%x %x,%x)\n", 
37347 +                    addr, len, addr + len, 0, topoff, baseoff, E4_CacheSetSize);
37348 +
37349 +#ifdef CONFIG_MPSAS
37350 +           for (set = 0; set < E4_NumCacheSets; set++)
37351 +           {
37352 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize), topoff);
37353 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, E4_CacheSetSize - baseoff);
37354 +           }
37355 +#else
37356 +           for (set = 0; set < E4_NumCacheSets; set++)
37357 +           {
37358 +               for (off = 0; off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE)
37359 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
37360 +               
37361 +               for (off = (base & (E4_CacheSetSize-1)); off < E4_CacheSetSize; off += E4_CACHELINE_SIZE)
37362 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
37363 +           }
37364 +#endif
37365 +       }
37366 +       else
37367 +       {
37368 +           PRINTF5 (DBG_DEVICE, DBG_SDRAM, "elan4_sdram_flushcache: addr=%x len=%x (%x) => part cache (%x,%x)\n", 
37369 +                    addr, len, addr + len, baseoff, topoff);
37370 +
37371 +#ifdef CONFIG_MPSAS
37372 +           for (set = 0; set < E4_NumCacheSets; set++)
37373 +               elan4_sdram_zeroq_sdram (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + baseoff, topoff - baseoff);
37374 +#else
37375 +           for (set = 0; set < E4_NumCacheSets; set++)
37376 +               for (off = (base & (E4_CacheSetSize-1)); off < (top & (E4_CacheSetSize-1)); off += E4_CACHELINE_SIZE)
37377 +                   elan4_sdram_writeq (dev, dev->dev_cacheflush_space + (set * E4_CacheSetSize) + off, 0);
37378 +#endif
37379 +       }
37380 +    }
37381 +    pioflush_sdram (dev);
37382 +    
37383 +    CLEAR_SYSCONTROL (dev, dev_direct_map_pci_writes, CONT_DIRECT_MAP_PCI_WRITES);
37384 +}
37385 +
37386 +static char *
37387 +get_correctableErr_bitpos(uint SyndromeBits)
37388 +{
37389 +    switch (SyndromeBits)
37390 +    {
37391 +    case 0x00: return ("NoErr");
37392 +    case 0x31: return ("00"); 
37393 +    case 0x32: return ("01"); 
37394 +    case 0xc4: return ("02"); 
37395 +    case 0xc8: return ("03"); 
37396 +    case 0x26: return ("04"); 
37397 +    case 0x91: return ("05"); 
37398 +    case 0x89: return ("06"); 
37399 +    case 0x64: return ("07"); 
37400 +    case 0xc1: return ("08"); 
37401 +    case 0xf2: return ("09"); 
37402 +    case 0x34: return ("10"); 
37403 +    case 0xf8: return ("11"); 
37404 +    case 0xf1: return ("12"); 
37405 +    case 0xc2: return ("13"); 
37406 +    case 0xf4: return ("14"); 
37407 +    case 0x38: return ("15"); 
37408 +    case 0xd6: return ("16"); 
37409 +    case 0xa1: return ("17"); 
37410 +    case 0x79: return ("18"); 
37411 +    case 0xa4: return ("19"); 
37412 +    case 0xd9: return ("20"); 
37413 +    case 0xa2: return ("21"); 
37414 +    case 0x76: return ("22"); 
37415 +    case 0xa8: return ("23"); 
37416 +    case 0xe6: return ("24"); 
37417 +    case 0x51: return ("25"); 
37418 +    case 0xb9: return ("26"); 
37419 +    case 0x54: return ("27"); 
37420 +    case 0xe9: return ("28"); 
37421 +    case 0x52: return ("29"); 
37422 +    case 0xb6: return ("30"); 
37423 +    case 0x58: return ("31"); 
37424 +    case 0x13: return ("32"); 
37425 +    case 0x23: return ("33"); 
37426 +    case 0x4c: return ("34"); 
37427 +    case 0x8c: return ("35"); 
37428 +    case 0x62: return ("36"); 
37429 +    case 0x19: return ("37"); 
37430 +    case 0x98: return ("38"); 
37431 +    case 0x46: return ("39"); 
37432 +    case 0x1c: return ("40"); 
37433 +    case 0x2f: return ("41"); 
37434 +    case 0x43: return ("42"); 
37435 +    case 0x8f: return ("43"); 
37436 +    case 0x1f: return ("44"); 
37437 +    case 0x2c: return ("45"); 
37438 +    case 0x4f: return ("46"); 
37439 +    case 0x83: return ("47"); 
37440 +    case 0x6d: return ("48"); 
37441 +    case 0x1a: return ("49"); 
37442 +    case 0x97: return ("50"); 
37443 +    case 0x4a: return ("51"); 
37444 +    case 0x9d: return ("52"); 
37445 +    case 0x2a: return ("53"); 
37446 +    case 0x67: return ("54"); 
37447 +    case 0x8a: return ("55"); 
37448 +    case 0x6e: return ("56"); 
37449 +    case 0x15: return ("57"); 
37450 +    case 0x9b: return ("58"); 
37451 +    case 0x45: return ("59"); 
37452 +    case 0x9e: return ("60"); 
37453 +    case 0x25: return ("61"); 
37454 +    case 0x6b: return ("62"); 
37455 +    case 0x85: return ("63"); 
37456 +    case 0x01: return ("C0"); 
37457 +    case 0x02: return ("C1"); 
37458 +    case 0x04: return ("C2"); 
37459 +    case 0x08: return ("C3"); 
37460 +    case 0x10: return ("C4"); 
37461 +    case 0x20: return ("C5"); 
37462 +    case 0x40: return ("C6"); 
37463 +    case 0x80: return ("C7"); 
37464 +
37465 +    case 0x07: case 0x0b: case 0x0d: case 0x0e: case 0x3d: case 0x3e: case 0x70: case 0x7c: // T  
37466 +    case 0xb0: case 0xbc: case 0xc7: case 0xcb: case 0xd0: case 0xd3: case 0xe0: case 0xe3: // T  
37467 +       return ("triple");
37468 +
37469 +    case 0x0f: case 0x55: case 0x5a: case 0xa5: case 0xaa: case 0xf0: case 0xff: // Q  
37470 +       return ("quadruple");
37471 +
37472 +    case 0x16: case 0x29: case 0x37: case 0x3b: case 0x49: case 0x57: case 0x5b: case 0x5d: case 0x5e: case 0x61: // M  
37473 +    case 0x68: case 0x73: case 0x75: case 0x7a: case 0x7f: case 0x86: case 0x92: case 0x94: case 0xa7: case 0xab: // M  
37474 +    case 0xad: case 0xae: case 0xb3: case 0xb5: case 0xba: case 0xbf: case 0xcd: case 0xce: case 0xd5: case 0xda: // M  
37475 +    case 0xdc: case 0xdf: case 0xe5: case 0xea: case 0xec: case 0xef: case 0xf7: case 0xfb: case 0xfd: case 0xfe: // M  
37476 +       return ("multiple");
37477 +
37478 +    default:   // all other cases
37479 +       return ("double");
37480 +    }
37481 +}
37482 +
37483 +char *
37484 +elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, E4_uint64 ConfigReg, char *str)
37485 +{
37486 +    E4_uint64 StartupSyndrome    = dev->dev_sdram_initial_ecc_val;
37487 +    int       RisingDQSsyndrome  = ((ECC_RisingDQSSyndrome(status) == ECC_RisingDQSSyndrome(StartupSyndrome)) ?
37488 +                                   0 : ECC_RisingDQSSyndrome(status));
37489 +    int              FallingDQSsyndrome = ((ECC_FallingDQSSyndrome(status) == ECC_FallingDQSSyndrome(StartupSyndrome)) ?
37490 +                                   0 : ECC_FallingDQSSyndrome(status));
37491 +    E4_uint64 Addr = ECC_Addr(status);
37492 +    int       Bank = (Addr >> 6) & 3;
37493 +    int       Cas  = ((Addr >> 3) & 7) | ((Addr >> (8 - 3)) & 0xf8) | ((Addr >> (25 - 8)) & 0x100) |
37494 +                    ((Addr >> (27 - 9)) & 0x200) | ((Addr >> (29 - 10)) & 0xc00);
37495 +    int       Ras  = ((Addr >> 13) & 0xfff) | ((Addr >> (26 - 12)) & 0x1000) | ((Addr >> (28 - 13)) & 0x2000) |
37496 +                    ((Addr >> (30 - 14)) & 0x4000);
37497 +
37498 +    sprintf (str, "Addr=%07llx Bank=%x Ras=%x Cas=%x Falling DQS=%s Rising DQS=%s Syndrome=%x%s%s%s%s Type=%s SDRamDelay=%s,%0d",              /* 41 + 16 + 8 + 15 + 24 + 13 + 22 + 10 + 10 == 151 */
37499 +            (long long)Addr, Bank, Ras, Cas,
37500 +            get_correctableErr_bitpos(FallingDQSsyndrome),
37501 +            get_correctableErr_bitpos(RisingDQSsyndrome),
37502 +            (int)ECC_Syndrome(status),
37503 +            ECC_UncorrectableErr(status)   ? " Uncorrectable" : "",
37504 +            ECC_MultUncorrectErrs(status)  ? " Multiple-Uncorrectable" : "",
37505 +            ECC_CorrectableErr(status)     ? " Correctable" : "",
37506 +            ECC_MultCorrectErrs(status)    ? " Multiple-Correctable" : "",
37507 +            (status & 0x0010000000000000ull)  ? "W" :
37508 +            (status & 0x0020000000000000ull)  ? "R" :
37509 +            (status & 0x0030000000000000ull)  ? "C" : "-",
37510 +            (ConfigReg & SDRAM_FIXED_DELAY_ENABLE)  ? "F" : "A",
37511 +            sdram_GreyToBinary(SDRAM_GET_DLL_DELAY(ConfigReg), SDRAM_FIXED_DLL_DELAY_BITS));
37512 +
37513 +    return str;
37514 +}
37515 +
37516 +/*
37517 + * Local variables:
37518 + * c-file-style: "stroustrup"
37519 + * End:
37520 + */
37521 diff -urN clean/drivers/net/qsnet/elan4/trap.c linux-2.6.9/drivers/net/qsnet/elan4/trap.c
37522 --- clean/drivers/net/qsnet/elan4/trap.c        1969-12-31 19:00:00.000000000 -0500
37523 +++ linux-2.6.9/drivers/net/qsnet/elan4/trap.c  2005-07-20 07:35:36.000000000 -0400
37524 @@ -0,0 +1,781 @@
37525 +/*
37526 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
37527 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
37528 + * 
37529 + *    For licensing information please see the supplied COPYING file
37530 + *
37531 + */
37532 +
37533 +#ident "@(#)$Id: trap.c,v 1.23.2.1 2005/07/20 11:35:36 mike Exp $"
37534 +/*      $Source: /cvs/master/quadrics/elan4mod/trap.c,v $*/
37535 +
37536 +#include <qsnet/kernel.h>
37537 +
37538 +#include <elan4/debug.h>
37539 +#include <elan4/device.h>
37540 +
37541 +#include <elan4/trtype.h>
37542 +#include <elan4/commands.h>
37543 +
37544 +char * const PermTypes[16] = 
37545 +{
37546 +    "Disabled",       "Unused",          "LocalDataRead", "LocalDataWrite",
37547 +    "LocalRead",      "LocalExecute",    "ReadOnly",      "LocalWrite",
37548 +    "LocalEventOnly", "LocalEventWrite", "RemoteEvent",   "RemoteAll",
37549 +    "RemoteReadOnly", "RemoteWriteOnly", "DataReadWrite", "NoFault",
37550 +};
37551 +
37552 +char * const AccTypes[] =
37553 +{
37554 +    "LocalDataRead ", "LocalDataWrite", "RemoteRead    ", "RemoteWrite   ",
37555 +    "Execute       ", "LocalEvent    ", "Unused        ", "RemoteEvent   "
37556 +};
37557 +char * const DataTypes[] = {"Byte ", "HWord", "Word ", "DWord"};
37558 +char * const PhysTypes[] = {"Special Read", "Special Write", "Physical Read", "Physical Write"};
37559 +    
37560 +char * const EProcTrapNames[] = {
37561 +    "EventProcNoFault",
37562 +    "EventProcAddressAlignment",
37563 +    "EventProcMemoryFault",
37564 +    "EventProcCountWrapError",
37565 +};
37566 +
37567 +char * const CProcTrapNames[] = {
37568 +    "CommandProcNoFault",
37569 +    "CommandProcInserterError",
37570 +    "CommandProcPermissionTrap",
37571 +    "CommandProcSendTransInvalid",
37572 +    "CommandProcSendTransExpected",
37573 +    "CommandProcDmaQueueOverflow",
37574 +    "CommandProcInterruptQueueOverflow",
37575 +    "CommandProcMemoryFault",
37576 +    "CommandProcRouteFetchFault",
37577 +    "CommandProcFailCountZero",
37578 +    "CommandProcAddressAlignment",
37579 +    "CommandProcWaitTrap",
37580 +    "CommandProcMultipleGuards",
37581 +    "CommandProcOpenOnGuardedChan",
37582 +    "CommandProcThreadQueueOverflow",
37583 +    "CommandProcBadData",
37584 +};
37585 +
37586 +char *const CProcInsertError[] = {
37587 +    "No Error",
37588 +    "Overflowed",
37589 +    "Invalid Write Size",
37590 +    "Invalid Write Order",
37591 +};
37592 +
37593 +char * const DProcTrapNames[] = {
37594 +    "DmaProcNoFault",
37595 +    "DmaProcRouteFetchFault",
37596 +    "DmaProcFailCountError",
37597 +    "DmaProcPacketAckError",
37598 +    "DmaProcRunQueueReadFault",
37599 +    "DmaProcQueueOverFlow",
37600 +    "DmaProcPrefetcherFault",  /* addy: Added new trap type for Prefetcher faults */
37601 +};
37602 +
37603 +char *const IProcTrapNames[] = {
37604 +    "InputNoFault",
37605 +    "InputAddressAlignment",
37606 +    "InputMemoryFault",
37607 +    "InputInvalidTransType",
37608 +    "InputDmaQueueOverflow",
37609 +    "InputEventEngineTrapped",
37610 +    "InputCrcErrorAfterPAckOk",
37611 +    "InputEopErrorOnWaitForEop",
37612 +    "InputEopErrorTrap",
37613 +    "InputDiscardAfterAckOk",
37614 +};
37615 +
37616 +char *const TProcTrapNames[] = {
37617 +    "HaltThread",
37618 +    "TrapForTooManyInstructions",
37619 +    "InstAccessException",
37620 +    "Unimplemented",
37621 +    "DataAccessException",
37622 +    "DataAlignmentError",
37623 +    "TrapForUsingBadData",
37624 +};
37625 +
37626 +#define declare_spaces(space, str)             char space[64]; do { int i; for (i = 0; i < strlen(str); i++) spaces[i] = ' '; space[i] = '\0'; } while (0)
37627 +#define declare_prefix(space, spaces, str)     char space[64]; do { strcpy (space, spaces); strcat (space, str); } while (0)
37628 +
37629 +void
37630 +elan4_display_farea (void *type, int mode, char *str, E4_FaultSave *farea)
37631 +{
37632 +    E4_uint32 FSR = FaultSaveFSR(farea->FSRAndFaultContext);
37633 +
37634 +    declare_spaces(spaces, str);
37635 +    
37636 +    elan4_debugf (type, mode, "%s Fault occurred at %016llx for context %4x\n", str,
37637 +                 farea->FaultAddress, FaultSaveContext(farea->FSRAndFaultContext));
37638 +    
37639 +    if (FSR & AT_VirtualWriteAccBit)                           /* Virtual write access */
37640 +       elan4_debugf (type, mode, "%s FSR=%x: Virtual Write. DWSize=0x%x EndP=0x%x Access=%s DT=%s\n",
37641 +                     spaces, FSR, FSR & AT_VirtualWriteSizeMask,
37642 +                     (FSR >> AT_VirtualWriteEndPtrShift) & AT_VirtualWriteEndPtrMask,
37643 +                     AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask],
37644 +                     DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]);
37645 +    else if (FSR & AT_VirtualReadAccBit)                       /* Virtual read access */
37646 +       elan4_debugf (type, mode, "%s FSR=%x: Virtual Read. DWSize=0x%x Access=%s DT=%s\n",
37647 +                     spaces, FSR, FSR & AT_VirtualReadSizeMask,
37648 +                     AccTypes[(FSR >> AT_PermBitsShift) & AT_PermBitsMask],
37649 +                     DataTypes[(FSR >> AT_BlkDataTyShift) & AT_BlkDataTyMask]);
37650 +    else
37651 +       elan4_debugf (type, mode, "%s FSR=%x: %s. Size=0x%x\n", spaces,
37652 +                     FSR, PhysTypes[(FSR >> AT_SelBitsShift) & AT_SelBitsMask],
37653 +                     FSR & AT_OtherSizeMask);
37654 +    elan4_debugf (type, mode, "%s FSR: %s %s%s %sWalking\n", spaces,
37655 +                 (FSR & AT_NonAlloc) ? "NonAlloc" : "Alloc",
37656 +                 (FSR & AT_DmaData) ? "Dma " : "",
37657 +                 (FSR & FSR_WalkForThread) ? "ThreadAcc" : "UnitsAcc",
37658 +                 (FSR & FSR_Walking) ? "" : "Not");
37659 +    PRINTF (type, mode, "%s FSR: %s%sHashTable=%s\n", spaces,
37660 +           (FSR & FSR_NoTranslationsFound) ? "NoTranslationsFound " : "",
37661 +           (FSR & FSR_WalkingProtectionFault) ? "WalkingProtectionFault " : "",
37662 +           (FSR & FSR_HashTable1) ? "1" : "0");
37663 +    if (FSR & (FSR_RouteVProcErr | FSR_FaultForBadData))
37664 +       elan4_debugf (type, mode, "%s FSR: %s%s\n", spaces,
37665 +                     (FSR & FSR_RouteVProcErr) ? "RouteVProcErr " : "",
37666 +                     (FSR & FSR_FaultForBadData) ? "FaultForBadData " : "");
37667 +}
37668 +
37669 +void
37670 +elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap)
37671 +{
37672 +    declare_spaces (spaces, str);
37673 +
37674 +    elan4_debugf (type, mode, "%s Status=%016llx %s EventAddr=%016llx CountAndType=%016llx\n", str,
37675 +                 trap->tr_status, EProcTrapNames[EPROC_TrapType(trap->tr_status)],
37676 +                 trap->tr_eventaddr, trap->tr_event.ev_CountAndType);
37677 +    elan4_debugf (type, mode, "%s Param=%016llx.%016llx\n", spaces,
37678 +                 trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
37679 +
37680 +    elan4_display_farea (type, mode, strcat (spaces, EPROC_Port0Fault(trap->tr_status) ? " EPROC0" : " EPROC1"), &trap->tr_faultarea);
37681 +}
37682 +
37683 +void
37684 +elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap)
37685 +{
37686 +    declare_spaces(spaces, str);
37687 +
37688 +    elan4_debugf (type, mode, "%s Status=%llx %s Command=%llx\n", str, trap->tr_status, 
37689 +                 CProcTrapNames[CPROC_TrapType(trap->tr_status)], trap->tr_command);
37690 +    elan4_debugf (type, mode, "%s Desc=%016llx %016llx %016llx %016llx\n", str,
37691 +                 trap->tr_qdesc.CQ_QueuePtrs, trap->tr_qdesc.CQ_HoldingValue,
37692 +                 trap->tr_qdesc.CQ_AckBuffers, trap->tr_qdesc.CQ_Control);
37693 +
37694 +    switch (CPROC_TrapType (trap->tr_status))
37695 +    {
37696 +    case CommandProcInserterError:
37697 +       elan4_debugf (type, mode, "%s   %s\n", str, CProcInsertError[CQ_RevB_ErrorType(trap->tr_qdesc.CQ_QueuePtrs)]);
37698 +       break;
37699 +
37700 +    case CommandProcWaitTrap:
37701 +       elan4_display_eproc_trap (type, mode, spaces, &trap->tr_eventtrap);
37702 +       break;
37703 +
37704 +    default:
37705 +       elan4_display_farea (type, mode, spaces, &trap->tr_faultarea);
37706 +       break;
37707 +    }
37708 +}
37709 +
37710 +void
37711 +elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap)
37712 +{
37713 +    declare_spaces (spaces, str);
37714 +
37715 +    elan4_debugf (type, mode, "%s status %llx - %s\n", str,
37716 +                 trap->tr_status, DProcTrapNames[DPROC_TrapType(trap->tr_status)]);
37717 +
37718 +    elan4_debugf (type, mode, "%s DESC %016llx %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_typeSize, 
37719 +                 trap->tr_desc.dma_cookie, trap->tr_desc.dma_vproc, trap->tr_desc.dma_srcAddr);
37720 +    elan4_debugf (type, mode, "%s      %016llx %016llx %016llx\n", spaces, trap->tr_desc.dma_dstAddr, 
37721 +                 trap->tr_desc.dma_srcEvent, trap->tr_desc.dma_dstEvent);
37722 +
37723 +    if (DPROC_PrefetcherFault (trap->tr_status))
37724 +       elan4_display_farea (type, mode, spaces, &trap->tr_prefetchFault);
37725 +}
37726 +
37727 +void
37728 +elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap)
37729 +{
37730 +    register int i;
37731 +    declare_spaces (spaces, str);
37732 +
37733 +    elan4_debugf (type, mode, "%s PC=%016llx nPC=%016llx State=%016llx Status=%016llx -%s%s%s%s\n", str,
37734 +                 trap->tr_pc, trap->tr_npc, trap->tr_state, trap->tr_status, 
37735 +                 (trap->tr_state & TS_TrapForTooManyInstructions) ? " TrapForTooManyInstructions" : "",
37736 +                 (trap->tr_state & TS_Unimplemented)              ? " Unimplemented"              : "",
37737 +                 (trap->tr_state & TS_DataAlignmentError)         ? " DataAlignmentError"         : "",
37738 +                 (trap->tr_state & TS_InstAccessException)        ? " InstAccessException"        : "",
37739 +                 (trap->tr_state & TS_DataAccessException)        ? " DataAlignmentError"         : "");
37740 +    
37741 +    for (i = 0; i < 64; i += 4)
37742 +       elan4_debugf (type, mode, "%s r%d - %016llx %016llx %016llx %016llx\n", spaces, i,
37743 +                     trap->tr_regs[i], trap->tr_regs[i+1], trap->tr_regs[i+2], trap->tr_regs[i+3]);
37744 +    
37745 +    if (trap->tr_state & TS_InstAccessException)
37746 +    {
37747 +       declare_prefix (prefix, spaces, "Inst");
37748 +
37749 +       elan4_display_farea (type, mode, prefix, &trap->tr_instFault);
37750 +    }
37751 +
37752 +    if (trap->tr_state & TS_DataAccessException)
37753 +    {
37754 +       declare_prefix (prefix, spaces, "Data");
37755 +       elan4_display_farea (type, mode, prefix, &trap->tr_dataFault);
37756 +    }
37757 +}
37758 +
37759 +void
37760 +elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap)
37761 +{
37762 +    register int i;
37763 +    declare_spaces (spaces, str);
37764 +
37765 +    for (i = 0; i < trap->tr_numTransactions; i++)
37766 +    {
37767 +       E4_IprocTrapHeader *hdrp    = &trap->tr_transactions[i];
37768 +       E4_uint64           status  = hdrp->IProcStatusCntxAndTrType;
37769 +       E4_Addr             addr    = hdrp->TrAddr;
37770 +       char               *typeString;
37771 +       char                buffer[256];
37772 +       char               *ptr = buffer;
37773 +       
37774 +       if (IPROC_EOPTrap(status))
37775 +       {
37776 +           switch (IPROC_EOPType(status))
37777 +           {
37778 +           case EOP_GOOD:        typeString = "EopGood";   break;
37779 +           case EOP_BADACK:      typeString = "EopBadAck"; break;
37780 +           case EOP_ERROR_RESET: typeString = "EopReset";  break;
37781 +           default:              typeString = "EopBad";    break;
37782 +           }
37783 +           
37784 +           ptr += sprintf (ptr, "%15s Cntx=%-6d", typeString, IPROC_NetworkContext(status));
37785 +       }
37786 +       else
37787 +       {
37788 +           if (IPROC_BadLength(status))
37789 +               typeString = "BadLength";
37790 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_DISCARD)
37791 +               typeString = "DiscardCrc";
37792 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_ERROR)
37793 +               typeString = "ErrorCrc Remote Network error";
37794 +           else if (IPROC_TransCRCStatus(status) == CRC_STATUS_BAD)
37795 +               typeString = "BadCrc Cable error into this node.";
37796 +           else
37797 +           {
37798 +               if ((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK)
37799 +                   typeString = "WriteBlock";
37800 +               else
37801 +               {
37802 +                   switch (IPROC_TransactionType(status) & TR_OPCODE_MASK)
37803 +                   {
37804 +                   case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK: typeString = "SetEvent";        break;
37805 +                   case TR_REMOTEDMA & TR_OPCODE_MASK:         typeString = "RemoteDma";       break;
37806 +                   case TR_SENDDISCARD & TR_OPCODE_MASK:       typeString = "SendDiscard";     break;
37807 +                   case TR_GTE & TR_OPCODE_MASK:               typeString = "GTE";             break;
37808 +                   case TR_LT & TR_OPCODE_MASK:                typeString = "LT";              break;
37809 +                   case TR_EQ & TR_OPCODE_MASK:                typeString = "EQ";              break;
37810 +                   case TR_NEQ & TR_OPCODE_MASK:               typeString = "NEQ";             break;
37811 +                   case TR_IDENTIFY & TR_OPCODE_MASK:          typeString = "Idenfity";        break;
37812 +                   case TR_ADDWORD & TR_OPCODE_MASK:           typeString = "AddWord";         break;
37813 +                   case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:    typeString = "InputQCommit";    break;
37814 +                   case TR_TESTANDWRITE & TR_OPCODE_MASK:      typeString = "TestAndWrite";    break;
37815 +                   case TR_INPUT_Q_GETINDEX & TR_OPCODE_MASK:  typeString = "InputQGetIndex";  break;
37816 +                   case TR_TRACEROUTE_TRANS & TR_OPCODE_MASK:  typeString = "TraceRoute";      break;
37817 +                   default:                                    typeString = "Unknown";         break;
37818 +                   }
37819 +               }
37820 +           }
37821 +
37822 +           ptr += sprintf (ptr, "%15s Cntx=%-6d Addr=%016llx", typeString, IPROC_NetworkContext(status), (unsigned long long) addr);
37823 +       }
37824 +       
37825 +       
37826 +       if (IPROC_TrapValue(status) != InputNoFault)
37827 +       {
37828 +           ptr += sprintf (ptr, " TrType=%2d ChanTrapped=%x GoodAck=%x BadAck=%x InputterChan=%d", IPROC_TrapValue(status),
37829 +                           IPROC_ChannelTrapped(status), IPROC_GoodAckSent(status), IPROC_BadAckSent(status),
37830 +                           IPROC_InputterChan(status));
37831 +           if (IPROC_EOPTrap(status))
37832 +               ptr += sprintf (ptr, " EOPType=%d", IPROC_EOPType(status));
37833 +           else
37834 +               ptr += sprintf (ptr, " %s%s%s%s", 
37835 +                               IPROC_FirstTrans(status) ? " FirstTrans" : "",
37836 +                               IPROC_LastTrans(status) ? " LastTrans" : "",
37837 +                               (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP) ? " WaitForEop" : "",
37838 +                               (IPROC_GoodAckSent(status) &  (1 << IPROC_Channel(status))) ? " AckSent" : "");
37839 +       }
37840 +       
37841 +       elan4_debugf (type, mode, "%s %s\n", str, buffer);
37842 +
37843 +       str = spaces;
37844 +    }
37845 +
37846 +    elan4_display_farea (type, mode, spaces, &trap->tr_faultarea);
37847 +}
37848 +
37849 +#define elan4_sdram_copy_faultarea(dev, unit, farea) \
37850 +    elan4_sdram_copyq_from_sdram ((dev), (dev)->dev_faultarea + (unit) * sizeof (E4_FaultSave), (E4_uint64 *) farea, sizeof (E4_FaultSave));
37851 +
37852 +void
37853 +elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent)
37854 +{
37855 +    /* only one of the memory ports can fault at a time */
37856 +    ASSERT (EPROC_TrapType(status) != EventProcMemoryFault || (EPROC_Port0Fault(status) ^ EPROC_Port1Fault(status)) == 1);
37857 +
37858 +    trap->tr_status = status;
37859 +    
37860 +    if (EPROC_Port0Fault(status))
37861 +       elan4_sdram_copy_faultarea (dev, CUN_EventProc0, &trap->tr_faultarea);
37862 +    if (EPROC_Port1Fault(status))
37863 +       elan4_sdram_copy_faultarea (dev, CUN_EventProc1, &trap->tr_faultarea);
37864 +
37865 +    if (iswaitevent)
37866 +    {
37867 +       /*
37868 +        * for waitevents the Event address is always taken from the command processor
37869 +        * 
37870 +        * if we trapped during the copy then we take the "Event" from the event processor
37871 +        * since we need to complete the copy.  Otherwise we'll be reissuing the original
37872 +        * command again
37873 +        */
37874 +       E4_uint32 fsr = FaultSaveFSR(trap->tr_faultarea.FSRAndFaultContext);
37875 +
37876 +       trap->tr_eventaddr = read_reg64 (dev, CommandHold) ^ WAIT_EVENT_CMD;
37877 +
37878 +       if (EPROC_TrapType(trap->tr_status) == EventProcMemoryFault && 
37879 +           (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite))
37880 +       {
37881 +           trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType);
37882 +           trap->tr_event.ev_Params[0]    = read_reg64 (dev, EventParameters[0]);
37883 +           trap->tr_event.ev_Params[1]    = read_reg64 (dev, EventParameters[1]);
37884 +       }
37885 +       else
37886 +       {
37887 +           trap->tr_event.ev_Params[0]    = read_reg64 (dev, CommandCopy[5]);
37888 +           trap->tr_event.ev_CountAndType = read_reg64 (dev, CommandCopy[4]);
37889 +           trap->tr_event.ev_Params[1]    = read_reg64 (dev, CommandCopy[6]);
37890 +
37891 +       }
37892 +    }
37893 +    else
37894 +    {
37895 +       trap->tr_eventaddr             = read_reg64 (dev, EventAddress);
37896 +       trap->tr_event.ev_CountAndType = read_reg64 (dev, EventCountAndType);
37897 +       trap->tr_event.ev_Params[0]    = read_reg64 (dev, EventParameters[0]);
37898 +       trap->tr_event.ev_Params[1]    = read_reg64 (dev, EventParameters[1]);
37899 +    }
37900 +
37901 +    BumpDevStat (dev, s_eproc_trap_types[EPROC_TrapType(status)]);
37902 +}
37903 +
37904 +int 
37905 +cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq, int chan)
37906 +{
37907 +       /* cq = ucq->ucq_cq */
37908 +       if ((cq->cq_perm & CQ_STENEnableBit) != 0)
37909 +       {
37910 +            sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc));
37911 +           E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
37912 +           sdramaddr_t   insertPtr    = (queuePtrs & CQ_PtrMask);
37913 +           sdramaddr_t   commandPtr   = CQ_CompletedPtr (queuePtrs);
37914 +           unsigned int  cqSize       = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
37915 +
37916 +           if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue))
37917 +           {
37918 +               E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue));
37919 +
37920 +               for (; (oooMask & 1) != 0; oooMask >>= 1)
37921 +                   insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1));
37922 +           }
37923 +
37924 +           while (commandPtr != insertPtr)
37925 +           {
37926 +               E4_uint64    command = elan4_sdram_readq (dev, commandPtr);
37927 +               unsigned int cmdSize;
37928 +
37929 +                switch (__categorise_command (command, &cmdSize))
37930 +               {
37931 +               case 0:
37932 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
37933 +                   break;
37934 +
37935 +               case 1: /* open */
37936 +                   if (((chan << 4) == (command & (1<<4))))
37937 +                       /* Matches supplied channel */
37938 +                       return (command >> 32);
37939 +                   else
37940 +                       (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
37941 +                   break;
37942 +
37943 +               case 2:
37944 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
37945 +               case 3:
37946 +                   printk ("cproc_open_extract_vp: invalid command %llx\n", (long long)command);
37947 +                   return -1;
37948 +               }
37949 +           } /* while */
37950 +       }
37951 +
37952 +       return -1;
37953 +}
37954 +
37955 +void
37956 +elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum)
37957 +{
37958 +    /* extract the state from the device */
37959 +    elan4_sdram_copy_faultarea (dev, CUN_CommandProc, &trap->tr_faultarea);
37960 +
37961 +    trap->tr_status  = status;
37962 +    trap->tr_command = read_reg64 (dev, CommandHold);
37963 +    
37964 +    elan4_sdram_copyq_from_sdram (dev, dev->dev_cqaddr + (cqnum * sizeof (E4_CommandQueueDesc)), &trap->tr_qdesc, sizeof (E4_CommandQueueDesc));
37965 +
37966 +    if (CPROC_TrapType (status) == CommandProcWaitTrap)
37967 +       elan4_extract_eproc_trap (dev, read_reg64 (dev, EProcStatus), &trap->tr_eventtrap, 1);
37968 +
37969 +    BumpDevStat (dev, s_cproc_trap_types[CPROC_TrapType(status)]);
37970 +
37971 +    if (PackValue(trap->tr_qdesc.CQ_AckBuffers, 0) == PackTimeout || PackValue(trap->tr_qdesc.CQ_AckBuffers, 1) == PackTimeout)
37972 +       BumpDevStat (dev, s_cproc_timeout);
37973 +}
37974 +
37975 +void
37976 +elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit)
37977 +{
37978 +    trap->tr_status = status;
37979 +    
37980 +    if (unit == 0)
37981 +    {
37982 +       trap->tr_desc.dma_typeSize   = read_reg64 (dev, Dma0Desc.dma_typeSize);
37983 +       trap->tr_desc.dma_cookie     = read_reg64 (dev, Dma0Desc.dma_cookie);
37984 +       trap->tr_desc.dma_vproc      = read_reg64 (dev, Dma0Desc.dma_vproc);
37985 +       trap->tr_desc.dma_srcAddr    = read_reg64 (dev, Dma0Desc.dma_srcAddr);
37986 +       trap->tr_desc.dma_dstAddr    = read_reg64 (dev, Dma0Desc.dma_dstAddr);
37987 +       trap->tr_desc.dma_srcEvent   = read_reg64 (dev, Dma0Desc.dma_srcEvent);
37988 +       trap->tr_desc.dma_dstEvent   = read_reg64 (dev, Dma0Desc.dma_dstEvent);
37989 +       
37990 +       elan4_sdram_copy_faultarea (dev, CUN_DProcPA0, &trap->tr_packAssemFault);
37991 +    }
37992 +    else
37993 +    {
37994 +       trap->tr_desc.dma_typeSize   = read_reg64 (dev, Dma1Desc.dma_typeSize);
37995 +       trap->tr_desc.dma_cookie     = read_reg64 (dev, Dma1Desc.dma_cookie);
37996 +       trap->tr_desc.dma_vproc      = read_reg64 (dev, Dma1Desc.dma_vproc);
37997 +       trap->tr_desc.dma_srcAddr    = read_reg64 (dev, Dma1Desc.dma_srcAddr);
37998 +       trap->tr_desc.dma_dstAddr    = read_reg64 (dev, Dma1Desc.dma_dstAddr);
37999 +       trap->tr_desc.dma_srcEvent   = read_reg64 (dev, Dma1Desc.dma_srcEvent);
38000 +       trap->tr_desc.dma_dstEvent   = read_reg64 (dev, Dma1Desc.dma_dstEvent);
38001 +       
38002 +       elan4_sdram_copy_faultarea (dev, CUN_DProcPA1, &trap->tr_packAssemFault);
38003 +    }
38004 +    
38005 +    if (DPROC_PrefetcherFault (trap->tr_status))
38006 +    {
38007 +       elan4_sdram_copy_faultarea (dev, (CUN_DProcData0 | DPROC_FaultUnitNo(trap->tr_status)), &trap->tr_prefetchFault);
38008 +       /* addy: Added new trap type for Prefetcher faults */
38009 +       BumpDevStat (dev, s_dproc_trap_types[6]);
38010 +    }
38011 +    else if (DPROC_PacketTimeout (trap->tr_status))
38012 +       BumpDevStat (dev, s_dproc_timeout);
38013 +    else
38014 +       BumpDevStat (dev, s_dproc_trap_types[DPROC_TrapType(status)]);
38015 +}    
38016 +
38017 +void
38018 +elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap)
38019 +{
38020 +    int i;
38021 +
38022 +    trap->tr_status = status;
38023 +    trap->tr_state  = read_reg64 (dev, Thread_Trap_State);
38024 +    trap->tr_pc     = read_reg64 (dev, PC_W);
38025 +    trap->tr_npc    = read_reg64 (dev, nPC_W);
38026 +    trap->tr_dirty  = read_reg64 (dev, DirtyBits);
38027 +    trap->tr_bad    = read_reg64 (dev, BadBits);
38028 +
38029 +#ifdef CONFIG_MPSAS
38030 +    if (sas_copyfrom_dev (dev->dev_osdep.pdev, ELAN4_BAR_REGISTERS, 
38031 +                         ((dev->dev_devinfo.dev_revision_id == PCI_REVISION_ID_ELAN4_REVA) ? ELAN4_REVA_REG_OFFSET : ELAN4_REVB_REG_OFFSET) +
38032 +                         offsetof (E4_Registers, Regs.TProcRegs), (unsigned long) &trap->tr_regs, 64*sizeof (E4_uint64)) < 0)
38033 +    {
38034 +       for (i = 0; i < 64; i++)
38035 +           if (trap->tr_dirty & ((E4_uint64) 1 << i))
38036 +               trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]);
38037 +    }
38038 +
38039 +    for (i = 0; i < 64; i++)
38040 +       if (! (trap->tr_dirty & ((E4_uint64) 1 << i)))
38041 +           trap->tr_regs[i] = 0xdeadbabedeadbabeULL;
38042 +#else
38043 +    for (i = 0; i < 64; i++)
38044 +    {
38045 +       if (trap->tr_dirty & ((E4_uint64) 1 << i))
38046 +           trap->tr_regs[i] = read_reg64 (dev, TProcRegs[i]);
38047 +       else
38048 +           trap->tr_regs[i] = 0xdeadbabedeadbabeULL;
38049 +    }
38050 +#endif
38051 +    
38052 +    if (trap->tr_state & TS_DataAccessException)
38053 +       elan4_sdram_copy_faultarea (dev, CUN_TProcData0 | TS_DataPortNo (trap->tr_state), &trap->tr_dataFault);
38054 +
38055 +    if (trap->tr_state & TS_InstAccessException)
38056 +       elan4_sdram_copy_faultarea (dev, CUN_TProcInst, &trap->tr_instFault);
38057 +
38058 +    for (i = 0; i < 7; i++)
38059 +       if (trap->tr_state & (1 << i))
38060 +           BumpDevStat (dev, s_tproc_trap_types[i]);
38061 +}
38062 +
38063 +void
38064 +elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit)
38065 +{
38066 +    sdramaddr_t hdroff  = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrHeader[0][unit]);
38067 +    sdramaddr_t dataoff = dev->dev_inputtraparea + offsetof (E4_IprocTrapState, TrData[0][unit]);
38068 +    register int i, j;
38069 +    int                  CurrUnitNo    = (unit >= 2) ? CUN_IProcHighPri : CUN_IProcLowPri;
38070 +    sdramaddr_t CurrFaultArea = dev->dev_faultarea + (CurrUnitNo * sizeof (E4_FaultSave));
38071 +
38072 +    /* Finally copy the fault area */
38073 +    elan4_sdram_copy_faultarea (dev, CurrUnitNo, &trap->tr_faultarea);
38074 +
38075 +    /*
38076 +     * Clear out the fault save area after reading to allow a fault on the write of the back pointer of
38077 +     * an InputQCommit to be obsurved if a simultaneous event proc trap occurs.
38078 +     */
38079 +    elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FSRAndFaultContext), 0x0ULL);
38080 +    elan4_sdram_writeq (dev, CurrFaultArea + offsetof(E4_FaultSave, FaultAddress), 0x0ULL);
38081 +
38082 +    /* copy the transaction headers */
38083 +    trap->tr_transactions[0].IProcStatusCntxAndTrType = status;
38084 +    trap->tr_transactions[0].TrAddr                   = elan4_sdram_readq (dev, hdroff + offsetof (E4_IprocTrapHeader, TrAddr));
38085 +    
38086 +    for (i = 0; !IPROC_EOPTrap(trap->tr_transactions[i].IProcStatusCntxAndTrType);)
38087 +    {
38088 +       if (IPROC_BadLength (trap->tr_transactions[i].IProcStatusCntxAndTrType))
38089 +           BumpDevStat (dev, s_bad_length);
38090 +       else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_BAD)
38091 +           BumpDevStat (dev, s_crc_bad);
38092 +       else if (IPROC_TransCRCStatus (trap->tr_transactions[i].IProcStatusCntxAndTrType) == CRC_STATUS_ERROR)
38093 +           BumpDevStat (dev, s_crc_error);
38094 +
38095 +       BumpDevStat (dev, s_iproc_trap_types[IPROC_TrapValue (trap->tr_transactions[i].IProcStatusCntxAndTrType)]);
38096 +
38097 +       hdroff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapHeader);
38098 +
38099 +       if (++i == MAX_TRAPPED_TRANS)
38100 +           break;
38101 +
38102 +       elan4_sdram_copyq_from_sdram (dev, hdroff, &trap->tr_transactions[i], sizeof (E4_IprocTrapHeader));
38103 +    }
38104 +    
38105 +    if (IPROC_EOPType (trap->tr_transactions[i].IProcStatusCntxAndTrType) == EOP_ERROR_RESET)
38106 +       BumpDevStat (dev, s_eop_reset);
38107 +
38108 +    /* Remember the number of transactions we've copied */
38109 +    trap->tr_numTransactions = i + 1;
38110 +    
38111 +    /* Copy all the data blocks in one go */
38112 +    for (i = 0; i < MIN (trap->tr_numTransactions, MAX_TRAPPED_TRANS); i++, dataoff += NO_OF_INPUT_CHANNELS*sizeof (E4_IprocTrapData))
38113 +    {
38114 +       if (IPROC_BadLength(status) || IPROC_TransCRCStatus (status) != CRC_STATUS_GOOD)
38115 +           elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, TRANS_DATA_DWORDS*sizeof(E4_uint64));
38116 +       else
38117 +       {
38118 +           int trtype  = IPROC_TransactionType(trap->tr_transactions[i].IProcStatusCntxAndTrType);
38119 +           int ndwords = (trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT;
38120 +
38121 +           elan4_sdram_copyq_from_sdram (dev, dataoff, trap->tr_dataBuffers[i].Data, ndwords*sizeof(E4_uint64));
38122 +
38123 +           for (j = ndwords; j < TRANS_DATA_DWORDS; j++)
38124 +               trap->tr_dataBuffers[i].Data[j] = 0xbeec0f212345678ull;
38125 +       }
38126 +    }
38127 +    
38128 +}
38129 +
38130 +void
38131 +elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap)
38132 +{
38133 +    int i;
38134 +
38135 +    trap->tr_flags          = 0;
38136 +    trap->tr_trappedTrans    = TR_TRANS_INVALID;
38137 +    trap->tr_waitForEopTrans = TR_TRANS_INVALID;
38138 +    trap->tr_identifyTrans   = TR_TRANS_INVALID;
38139 +
38140 +    if (trap->tr_numTransactions > MAX_TRAPPED_TRANS)
38141 +       trap->tr_flags = TR_FLAG_TOOMANY_TRANS;
38142 +
38143 +    /*
38144 +     * Now scan all the transactions received 
38145 +     */
38146 +    for (i = 0; i < MIN(trap->tr_numTransactions, MAX_TRAPPED_TRANS) ; i++)
38147 +    {
38148 +       E4_IprocTrapHeader *hdrp   = &trap->tr_transactions[i];
38149 +       E4_uint64           status = hdrp->IProcStatusCntxAndTrType;
38150 +
38151 +       if (trap->tr_identifyTrans == TR_TRANS_INVALID)
38152 +       {
38153 +           switch (IPROC_TransactionType (status) & (TR_OPCODE_MASK | TR_SIZE_MASK))
38154 +           {
38155 +           case TR_IDENTIFY          & (TR_OPCODE_MASK | TR_SIZE_MASK):
38156 +           case TR_REMOTEDMA         & (TR_OPCODE_MASK | TR_SIZE_MASK):
38157 +           case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK):
38158 +           case TR_INPUT_Q_COMMIT    & (TR_OPCODE_MASK | TR_SIZE_MASK):
38159 +           case TR_ADDWORD           & (TR_OPCODE_MASK | TR_SIZE_MASK):
38160 +           case TR_TESTANDWRITE      & (TR_OPCODE_MASK | TR_SIZE_MASK):
38161 +               trap->tr_identifyTrans = i;
38162 +               break;
38163 +           }
38164 +       }
38165 +
38166 +       if (IPROC_TrapValue(status) == InputNoFault)            /* We're looking at transactions stored before the trap */
38167 +           continue;                                           /* these should only be identifies */
38168 +       
38169 +       if (trap->tr_trappedTrans == TR_TRANS_INVALID)          /* Remember the transaction which caused the */
38170 +           trap->tr_trappedTrans = i;                          /* trap */
38171 +
38172 +       if (IPROC_GoodAckSent (status) & (1 << IPROC_InputterChan (status)))
38173 +           trap->tr_flags |= TR_FLAG_ACK_SENT;
38174 +           
38175 +       if (IPROC_EOPTrap(status))                              /* Check for EOP */
38176 +       {
38177 +           ASSERT (i == trap->tr_numTransactions - 1);
38178 +
38179 +           switch (IPROC_EOPType(status))
38180 +           {
38181 +           case EOP_GOOD:
38182 +               /* if we get an EOP_GOOD then the outputer should have received a PAckOk. */  
38183 +               /* unless it was a flood, in which case someone must have sent an ack */
38184 +               /* but not necessarily us */
38185 +               break;
38186 +
38187 +           case EOP_BADACK:
38188 +               /* if we get an EOP_BADACK then the outputer did not receive a PAckOk even if
38189 +                * we sent a PAckOk. WFlag this to ignore the AckSent. */
38190 +               trap->tr_flags |= TR_FLAG_EOP_BAD;
38191 +               break;
38192 +
38193 +           case EOP_ERROR_RESET:
38194 +               /* if we get an EOP_ERROR_RESET then the outputer may or may not have got a PAckOk. */
38195 +               trap->tr_flags |= TR_FLAG_EOP_ERROR;
38196 +               break;
38197 +
38198 +           default:
38199 +               printk ("elan4_inspect_iproc_trap: unknown eop type %d", IPROC_EOPType(status));
38200 +               BUG();
38201 +               /* NOTREACHED */
38202 +           }
38203 +           continue;
38204 +       }
38205 +       else
38206 +       {
38207 +           if (IPROC_BadLength(status) || (IPROC_TransCRCStatus (status) == CRC_STATUS_ERROR ||
38208 +                                           IPROC_TransCRCStatus (status) == CRC_STATUS_BAD))
38209 +           {
38210 +               {
38211 +                   register int j;
38212 +                   if (IPROC_BadLength(status))
38213 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped on bad length data. status=%016llx Address=%016llx\n",
38214 +                                status, hdrp->TrAddr);
38215 +                   else
38216 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: Trapped with bad CRC. status=%016llx Address=%016llx\n",
38217 +                                status, hdrp->TrAddr);
38218 +                   for (j = 0; j < TRANS_DATA_DWORDS; j++)
38219 +                       PRINTF2 (DBG_DEVICE, DBG_INTR, "LinkError: DataBuffers[%d] : %016llx\n", j, trap->tr_dataBuffers[i].Data[j]);
38220 +               }
38221 +
38222 +               trap->tr_flags |= TR_FLAG_BAD_TRANS;
38223 +               continue;
38224 +           }
38225 +           
38226 +           if (IPROC_TransCRCStatus (status) == CRC_STATUS_DISCARD)
38227 +               continue;
38228 +
38229 +           if ((((IPROC_TransactionType(status) & TR_BLOCK_OPCODE_MASK) == TR_WRITEBLOCK) ||
38230 +                (IPROC_TransactionType(status) == TR_TRACEROUTE_TRANS)) &&
38231 +               (trap->tr_flags & TR_FLAG_ACK_SENT) && trap->tr_identifyTrans == TR_TRANS_INVALID)
38232 +           {
38233 +               /* 
38234 +                * Writeblock after the ack is sent without an identify transaction - this is 
38235 +                * considered to be a DMA packet and requires the next packet to be nacked - since 
38236 +                * the DMA processor will send this in a deterministic time and there's an upper 
38237 +                * limit on the network latency (the output timeout) we just need to hold the context 
38238 +                * filter up for a while.
38239 +                */
38240 +               trap->tr_flags |= TR_FLAG_DMA_PACKET;
38241 +           }
38242 +           
38243 +           if (IPROC_LastTrans(status) && (IPROC_TransactionType(status) & TR_WAIT_FOR_EOP))
38244 +           {
38245 +               /*
38246 +                * WaitForEop transactions - if we have to do network error fixup
38247 +                * then we may need to execute/ignore this transaction dependant
38248 +                * on whether the source will be resending it.
38249 +                */
38250 +               trap->tr_waitForEopTrans = i;
38251 +           }
38252 +
38253 +           /*
38254 +            * This is a special case caused by a minor input processor bug.
38255 +            * If simultaneous InputMemoryFault and InputEventEngineTrapped occur then the chip will probably return
38256 +            * InputEventEngineTrapped even though the write of the back pointer has not occured and must be done by
38257 +            * the trap handler.
38258 +            * In this case the fault address will equal q->q_bptr. If there has been only EventEngineTrap then the
38259 +            * the fault address should be zero as the trap handler now always zeros this after every input trap.
38260 +            */
38261 +           if ((IPROC_TransactionType (status) & TR_OPCODE_MASK) == (TR_INPUT_Q_COMMIT & TR_OPCODE_MASK) &&
38262 +               trap->tr_faultarea.FaultAddress == hdrp->TrAddr + offsetof(E4_InputQueue, q_bptr) &&
38263 +               IPROC_TrapValue(status) == InputEventEngineTrapped)
38264 +           {
38265 +               hdrp->IProcStatusCntxAndTrType = (status & 0xFFFFFFF0FFFFFFFFull) | ((E4_uint64) InputMemoryFault << 32);
38266 +           }
38267 +       }
38268 +
38269 +       PRINTF (DBG_DEVICE, DBG_INTR, "inspect[%d] status=%llx TrapValue=%d -> flags %x\n", i, status, IPROC_TrapValue(status), trap->tr_flags);
38270 +    }
38271 +}
38272 +
38273 +E4_uint64
38274 +elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq)
38275 +{
38276 +    sdramaddr_t cqdesc     = dev->dev_cqaddr + elan4_cq2num(cq) * sizeof (E4_CommandQueueDesc);
38277 +    E4_uint64   cqcontrol  = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
38278 +    E4_uint32   extractOff = CQ_ExtractPtr (cqcontrol) & (CQ_Size(cq->cq_size)-1);
38279 +    
38280 +    if (extractOff == 0)
38281 +       extractOff = CQ_Size(cq->cq_size) - sizeof (E4_uint64);
38282 +    else
38283 +       extractOff -= sizeof (E4_uint64);
38284 +
38285 +    return (elan4_sdram_readq (dev, cq->cq_space + extractOff));
38286 +}
38287 +
38288 +EXPORT_SYMBOL(elan4_extract_eproc_trap);
38289 +EXPORT_SYMBOL(elan4_display_eproc_trap);
38290 +EXPORT_SYMBOL(elan4_extract_cproc_trap);
38291 +EXPORT_SYMBOL(elan4_display_cproc_trap);
38292 +EXPORT_SYMBOL(elan4_extract_dproc_trap);
38293 +EXPORT_SYMBOL(elan4_display_dproc_trap);
38294 +EXPORT_SYMBOL(elan4_extract_tproc_trap);
38295 +EXPORT_SYMBOL(elan4_display_tproc_trap);
38296 +EXPORT_SYMBOL(elan4_extract_iproc_trap);
38297 +EXPORT_SYMBOL(elan4_inspect_iproc_trap);
38298 +EXPORT_SYMBOL(elan4_display_iproc_trap);
38299 +
38300 +
38301 +/*
38302 + * Local variables:
38303 + * c-file-style: "stroustrup"
38304 + * End:
38305 + */
38306 diff -urN clean/drivers/net/qsnet/elan4/user.c linux-2.6.9/drivers/net/qsnet/elan4/user.c
38307 --- clean/drivers/net/qsnet/elan4/user.c        1969-12-31 19:00:00.000000000 -0500
38308 +++ linux-2.6.9/drivers/net/qsnet/elan4/user.c  2005-07-19 09:45:36.000000000 -0400
38309 @@ -0,0 +1,3443 @@
38310 +/*
38311 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
38312 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
38313 + * 
38314 + *    For licensing information please see the supplied COPYING file
38315 + *
38316 + */
38317 +
38318 +#ident "@(#)$Id: user.c,v 1.89.2.2 2005/07/19 13:45:36 daniel Exp $"
38319 +/*      $Source: /cvs/master/quadrics/elan4mod/user.c,v $*/
38320 +
38321 +#include <qsnet/kernel.h>
38322 +#include <qsnet/kpte.h>
38323 +
38324 +#include <elan/elanmod.h>
38325 +#include <elan4/debug.h>
38326 +#include <elan4/device.h>
38327 +#include <elan4/user.h>
38328 +
38329 +#include <elan4/trtype.h>
38330 +#include <elan4/commands.h>
38331 +
38332 +#include <stdarg.h>
38333 +
38334 +/* allow this code to compile against an Eagle elanmod */
38335 +#ifdef __ELANMOD_DEVICE_H
38336 +#define elan_attach_cap(cap,rnum,args,func)    elanmod_attach_cap(cap,args,func)
38337 +#define elan_detach_cap(cap,rnum)              elanmod_detach_cap(cap)
38338 +#endif
38339 +
38340 +#define NETERR_MSGS    16
38341 +
38342 +int user_p2p_route_options   = FIRST_TIMEOUT(3);
38343 +int user_bcast_route_options = FIRST_TIMEOUT(3);
38344 +int user_dproc_retry_count   = 15;
38345 +int user_cproc_retry_count   = 2;
38346 +int user_ioproc_enabled      = 1;
38347 +int user_pagefault_enabled   = 1;
38348 +
38349 +int num_fault_save           = 30;
38350 +int min_fault_pages          = 1;
38351 +int max_fault_pages          = 128;
38352 +
38353 +static int
38354 +user_validate_cap (USER_CTXT *uctx, ELAN_CAPABILITY *cap, unsigned use)
38355 +{
38356 +    /* Don't allow a user process to attach to system context */
38357 +    if (ELAN4_SYSTEM_CONTEXT (cap->cap_lowcontext) || ELAN4_SYSTEM_CONTEXT (cap->cap_highcontext))
38358 +    {
38359 +       PRINTF3 (DBG_DEVICE, DBG_VP,"user_validate_cap: lctx %x hctx %x high %x\n", cap->cap_lowcontext, cap->cap_highcontext, ELAN4_KCOMM_BASE_CONTEXT_NUM);
38360 +       PRINTF0 (DBG_DEVICE, DBG_VP,"user_validate_cap: user process cant attach to system cap\n");
38361 +       return (EINVAL);
38362 +    }
38363 +    
38364 +    return elanmod_classify_cap(&uctx->uctx_position, cap, use);
38365 +}
38366 +
38367 +static __inline__ void
38368 +__user_signal_trap (USER_CTXT *uctx)
38369 +{
38370 +    switch (uctx->uctx_trap_state)
38371 +    {
38372 +    case UCTX_TRAP_IDLE:
38373 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: deliver signal %d to pid %d\n", uctx->uctx_trap_signo, uctx->uctx_trap_pid);
38374 +
38375 +       if (uctx->uctx_trap_signo)
38376 +           kill_proc (uctx->uctx_trap_pid, uctx->uctx_trap_signo, 1);
38377 +       break;
38378 +
38379 +    case UCTX_TRAP_SLEEPING:
38380 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: wakeup sleeping trap handler\n");
38381 +
38382 +       kcondvar_wakeupone (&uctx->uctx_wait, &uctx->uctx_spinlock);
38383 +       break;
38384 +    }
38385 +    uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
38386 +}
38387 +
38388 +static void
38389 +user_signal_timer (unsigned long arg)
38390 +{
38391 +    USER_CTXT    *uctx = (USER_CTXT *) arg;
38392 +    unsigned long flags;
38393 +
38394 +    PRINTF (uctx, DBG_TRAP, "user_signal_timer: state=%d pid=%d signal=%d (now %d start %d)\n",
38395 +           uctx->uctx_trap_state, uctx->uctx_trap_pid, uctx->uctx_trap_signo, jiffies,
38396 +           uctx->uctx_int_start);
38397 +
38398 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38399 +    __user_signal_trap (uctx);
38400 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38401 +}
38402 +
38403 +#define MAX_INTS_PER_TICK      50
38404 +#define MIN_INTS_PER_TICK      20
38405 +
38406 +static void
38407 +user_shuffle_signal_trap (USER_CTXT *uctx)
38408 +{
38409 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
38410 +
38411 +    PRINTF (uctx, DBG_TRAP, "user_shuffle_signal_trap: signal=%d%s\n", 
38412 +           uctx->uctx_trap_signo, timer_pending(&uctx->uctx_shuffle_timer) ? " (timer-pending)" : "");
38413 +
38414 +    if (timer_pending (&uctx->uctx_shuffle_timer))
38415 +       return;
38416 +
38417 +    uctx->uctx_shuffle_timer.expires =  jiffies + (HZ*2);
38418 +
38419 +    add_timer (&uctx->uctx_shuffle_timer);
38420 +}
38421 +
38422 +static void
38423 +user_signal_trap (USER_CTXT *uctx)
38424 +{
38425 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
38426 +
38427 +    PRINTF (uctx, DBG_TRAP, "user_signal_trap: state=%d pid=%d signal=%d%s\n", uctx->uctx_trap_state,
38428 +           uctx->uctx_trap_pid, uctx->uctx_trap_signo, timer_pending(&uctx->uctx_int_timer) ? " (timer-pending)" : "");
38429 +
38430 +    uctx->uctx_int_count++;
38431 +
38432 +    if (timer_pending (&uctx->uctx_int_timer))
38433 +       return;
38434 +
38435 +    if (uctx->uctx_int_count > ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK))
38436 +    {
38437 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: deferring signal for %d ticks (count %d ticks %d -> %d)\n", 
38438 +               uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start),
38439 +               ((int)(jiffies - uctx->uctx_int_start) * MAX_INTS_PER_TICK));
38440 +
38441 +       /* We're interrupting too fast, so defer this signal */
38442 +       uctx->uctx_int_timer.expires = jiffies + (++uctx->uctx_int_delay);
38443 +
38444 +       add_timer (&uctx->uctx_int_timer);
38445 +    }
38446 +    else
38447 +    {
38448 +       __user_signal_trap (uctx);
38449 +
38450 +       PRINTF (uctx, DBG_TRAP, "user_signal_trap: check signal for %d ticks (count %d ticks %d -> %d)\n", 
38451 +               uctx->uctx_int_delay + 1, uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start),
38452 +               (int)(jiffies - uctx->uctx_int_start) * MIN_INTS_PER_TICK);
38453 +           
38454 +       if (uctx->uctx_int_count < ((int) (jiffies - uctx->uctx_int_start)) * MIN_INTS_PER_TICK)
38455 +       {
38456 +           PRINTF (uctx, DBG_TRAP, "user_signal_trap: reset interrupt throttle (count %d ticks %d)\n", 
38457 +                   uctx->uctx_int_count, (int) (jiffies - uctx->uctx_int_start));
38458 +
38459 +           uctx->uctx_int_start = jiffies;
38460 +           uctx->uctx_int_count = 0;
38461 +           uctx->uctx_int_delay = 0;
38462 +       }
38463 +    }
38464 +}
38465 +
38466 +static void
38467 +user_neterr_timer (unsigned long arg)
38468 +{
38469 +    USER_CTXT *uctx = (USER_CTXT *) arg;
38470 +    unsigned long flags;
38471 +    
38472 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38473 +
38474 +    uctx->uctx_status |= UCTX_NETERR_TIMER;
38475 +    
38476 +    user_signal_trap (uctx);
38477 +
38478 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38479 +}
38480 +
38481 +static void
38482 +user_flush_dma_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull)
38483 +{
38484 +    E4_uint64          qptrs = read_reg64 (dev, DProcLowPriPtrs);
38485 +    E4_uint32          qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
38486 +    E4_uint32          qfptr = E4_QueueFrontPointer (qptrs);
38487 +    E4_uint32          qbptr = E4_QueueBackPointer (qptrs);
38488 +    E4_DProcQueueEntry qentry;
38489 +
38490 +    while ((qfptr != qbptr) || qfull)
38491 +    {
38492 +       E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize));
38493 +
38494 +       if (DMA_Context (typeSize) == uctx->uctx_ctxt.ctxt_num)
38495 +       {
38496 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry));
38497 +
38498 +           PRINTF4 (uctx, DBG_SWAP, "user_flush_dma_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Desc.dma_typeSize, 
38499 +                    qentry.Desc.dma_cookie, qentry.Desc.dma_vproc, qentry.Desc.dma_srcAddr);
38500 +           PRINTF3 (uctx, DBG_SWAP, "                         %016llx %016llx %016llx\n", qentry.Desc.dma_dstAddr, 
38501 +                    qentry.Desc.dma_srcEvent, qentry.Desc.dma_dstEvent);
38502 +
38503 +           if (RING_QUEUE_REALLY_FULL (uctx->uctx_dmaQ))
38504 +           {
38505 +               PRINTF (uctx, DBG_SWAP, "user_flush_dma_runqueue: queue overflow\n");
38506 +               uctx->uctx_status |= UCTX_DPROC_QUEUE_OVERFLOW;
38507 +           }
38508 +           else
38509 +           {
38510 +               *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = qentry.Desc;
38511 +               (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
38512 +           }
38513 +           
38514 +           qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
38515 +           qentry.Desc.dma_cookie   = 0;
38516 +           qentry.Desc.dma_vproc    = 0;
38517 +           qentry.Desc.dma_srcAddr  = 0;
38518 +           qentry.Desc.dma_dstAddr  = 0;
38519 +           qentry.Desc.dma_srcEvent = 0;
38520 +           qentry.Desc.dma_dstEvent = 0;
38521 +
38522 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
38523 +       }
38524 +
38525 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
38526 +       qfull = 0;
38527 +    }
38528 +}
38529 +
38530 +static void
38531 +user_flush_thread_runqueue (ELAN4_DEV *dev, USER_CTXT *uctx, int qfull)
38532 +{
38533 +    E4_uint64          qptrs = read_reg64 (dev, TProcLowPriPtrs);
38534 +    E4_uint32          qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
38535 +    E4_uint32          qfptr = E4_QueueFrontPointer (qptrs);
38536 +    E4_uint32          qbptr = E4_QueueBackPointer (qptrs);
38537 +    E4_TProcQueueEntry qentry;
38538 +
38539 +    while ((qfptr != qbptr) || qfull)
38540 +    {
38541 +       E4_uint64 context = elan4_sdram_readq (dev, qfptr + offsetof (E4_TProcQueueEntry, Context));
38542 +
38543 +       if (TPROC_Context (context) == uctx->uctx_ctxt.ctxt_num)
38544 +       {
38545 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_TProcQueueEntry));
38546 +
38547 +           PRINTF (uctx, DBG_SWAP, "user_flush_thread_runqueue: %016llx %016llx %016llx %016llx\n", qentry.Regs.Registers[0],
38548 +                   qentry.Regs.Registers[1], qentry.Regs.Registers[2], qentry.Regs.Registers[3]);
38549 +           PRINTF (uctx, DBG_SWAP, "                            %016llx %016llx %016llx\n", 
38550 +                   qentry.Regs.Registers[4], qentry.Regs.Registers[5], qentry.Regs.Registers[6]);
38551 +
38552 +           if (RING_QUEUE_REALLY_FULL (uctx->uctx_threadQ))
38553 +               uctx->uctx_status |= UCTX_TPROC_QUEUE_OVERFLOW;
38554 +           else
38555 +           {
38556 +               *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = qentry.Regs;
38557 +               (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
38558 +           }
38559 +           
38560 +           /* change the thread to execute the suspend sequence */
38561 +           qentry.Regs.Registers[0] = dev->dev_tproc_suspend;
38562 +           qentry.Regs.Registers[1] = dev->dev_tproc_space;
38563 +           qentry.Context           = dev->dev_ctxt.ctxt_num;
38564 +
38565 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_TProcQueueEntry));
38566 +       }
38567 +       
38568 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_TProcQueueEntry)) & (qsize-1));
38569 +       qfull = 0;
38570 +    }
38571 +}
38572 +
38573 +static void
38574 +user_flush_dmas (ELAN4_DEV *dev, void *arg, int qfull)
38575 +{
38576 +    USER_CTXT        *uctx = (USER_CTXT *) arg;
38577 +    unsigned long     flags;
38578 +    
38579 +    ASSERT ((read_reg32 (dev, InterruptReg) & INT_DProcHalted) != 0);
38580 +
38581 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38582 +
38583 +    if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0)
38584 +    {
38585 +       PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: status %x - no more reasons\n", uctx->uctx_status);
38586 +
38587 +       uctx->uctx_status &= ~UCTX_STOPPING;
38588 +
38589 +       user_signal_trap (uctx);
38590 +    }
38591 +    else
38592 +    {
38593 +       user_flush_dma_runqueue (dev, uctx, qfull);
38594 +
38595 +       uctx->uctx_status = (uctx->uctx_status | UCTX_STOPPED) & ~UCTX_STOPPING;
38596 +    
38597 +       PRINTF1 (uctx, DBG_SWAP, "user_flush_dmas: statux %x - stopped\n", uctx->uctx_status);
38598 +
38599 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
38600 +    }
38601 +
38602 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38603 +}
38604 +
38605 +static void
38606 +user_flush (ELAN4_DEV *dev, void *arg)
38607 +{
38608 +    USER_CTXT        *uctx = (USER_CTXT *) arg;
38609 +    struct list_head *entry;
38610 +    unsigned long     flags;
38611 +
38612 +    ASSERT ((read_reg32 (dev, InterruptReg) & (INT_Halted|INT_Discarding)) == (INT_Halted|INT_Discarding));
38613 +
38614 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38615 +
38616 +    if ((uctx->uctx_status & (UCTX_SWAPPED_REASONS|UCTX_STOPPED_REASONS)) == 0)
38617 +    {
38618 +       PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - no more reasons\n", uctx->uctx_status);
38619 +
38620 +       uctx->uctx_status &= ~UCTX_STOPPING;
38621 +
38622 +       user_signal_trap (uctx);
38623 +    }
38624 +    else
38625 +    {
38626 +       PRINTF1 (uctx, DBG_SWAP, "user_flush: status %x - flushing context\n", uctx->uctx_status);
38627 +
38628 +       list_for_each (entry, &uctx->uctx_cqlist) {
38629 +           USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
38630 +
38631 +           if (ucq->ucq_state == UCQ_RUNNING)
38632 +           {
38633 +               /* NOTE: since the inserter can still be running we modify the permissions
38634 +                *       to zero then when the extractor starts up again it will trap */
38635 +               PRINTF1 (uctx, DBG_SWAP, "user_flush: stopping cq indx=%d\n", elan4_cq2idx(ucq->ucq_cq));
38636 +
38637 +               elan4_updatecq (dev, ucq->ucq_cq, 0, 0);
38638 +           }
38639 +       }
38640 +       
38641 +       user_flush_thread_runqueue (dev, uctx, TPROC_LowRunQueueFull(read_reg64 (dev, TProcStatus)));
38642 +
38643 +       /* since we can't determine whether the dma run queue is full or empty, we use a dma
38644 +        * halt operation to do the flushing - as the reason for halting the dma processor 
38645 +        * will be released when we return, we keep it halted until the flush has completed */
38646 +       elan4_queue_dma_flushop (dev, &uctx->uctx_dma_flushop, 0);
38647 +
38648 +       if (uctx->uctx_status & UCTX_EXITING)
38649 +           elan4_flush_icache_halted (&uctx->uctx_ctxt);
38650 +    }
38651 +
38652 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38653 +}
38654 +
38655 +static void
38656 +user_set_filter (USER_CTXT *uctx, E4_uint32 state)
38657 +{
38658 +    struct list_head *entry;
38659 +
38660 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
38661 +
38662 +    list_for_each (entry, &uctx->uctx_cent_list) {
38663 +       USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link);
38664 +
38665 +       elan4_set_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext, state);
38666 +    }
38667 +}
38668 +
38669 +static void
38670 +user_start_nacking (USER_CTXT *uctx, unsigned reason)
38671 +{
38672 +    PRINTF2 (uctx, DBG_SWAP, "user_start_nacking: status %x reason %x\n", uctx->uctx_status, reason);
38673 +
38674 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
38675 +
38676 +    if (UCTX_NACKING(uctx))
38677 +       uctx->uctx_status |= reason;
38678 +    else
38679 +    {
38680 +       uctx->uctx_status |= reason;
38681 +
38682 +       user_set_filter (uctx, E4_FILTER_STATS | E4_FILTER_DISCARD_ALL);
38683 +    }
38684 +}
38685 +
38686 +static void
38687 +user_stop_nacking (USER_CTXT *uctx, unsigned reason)
38688 +{
38689 +    PRINTF2 (uctx, DBG_SWAP, "user_stop_nacking: status %x reason %x\n", uctx->uctx_status, reason);
38690 +    
38691 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
38692 +    
38693 +    uctx->uctx_status &= ~reason;
38694 +    
38695 +    if (! UCTX_NACKING (uctx))
38696 +       user_set_filter (uctx, E4_FILTER_STATS);
38697 +}
38698 +
38699 +static void
38700 +user_start_stopping (USER_CTXT *uctx, unsigned reason)
38701 +{
38702 +    ELAN4_DEV *dev =uctx->uctx_ctxt.ctxt_dev;
38703 +
38704 +    PRINTF2 (uctx, DBG_SWAP, "user_start_stopping: status %x reason %x\n", uctx->uctx_status, reason);
38705 +
38706 +    ASSERT (! (uctx->uctx_status & UCTX_STOPPED));
38707 +
38708 +    user_start_nacking (uctx, reason);
38709 +    
38710 +    if ((uctx->uctx_status & UCTX_STOPPING) != 0)
38711 +       return;
38712 +    
38713 +    uctx->uctx_status |= UCTX_STOPPING;
38714 +
38715 +    /* queue the halt operation to  remove all threads/dmas/cqs from the run queues */
38716 +    /*    and also flush through the context filter change */
38717 +    elan4_queue_haltop (dev, &uctx->uctx_haltop);
38718 +}
38719 +
38720 +static void
38721 +user_stop_stopping (USER_CTXT *uctx, unsigned reason)
38722 +{
38723 +    PRINTF2 (uctx, DBG_SWAP, "user_stop_stopping: status %x reason %x\n", uctx->uctx_status, reason);
38724 +    
38725 +    user_stop_nacking (uctx, reason);
38726 +
38727 +    if (UCTX_RUNNABLE (uctx))
38728 +    {
38729 +       uctx->uctx_status &= ~UCTX_STOPPED;
38730 +
38731 +       PRINTF1 (uctx, DBG_SWAP, "user_stop_stopping: no more reasons => %x\n", uctx->uctx_status);
38732 +
38733 +       user_signal_trap (uctx);
38734 +    }
38735 +}
38736 +
38737 +void
38738 +user_swapout (USER_CTXT *uctx, unsigned reason)
38739 +{
38740 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
38741 +    unsigned long flags;
38742 +    
38743 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38744 +    
38745 +    PRINTF2 (uctx, DBG_SWAP, "user_swapout: status %x reason %x\n", uctx->uctx_status, reason);
38746 +    
38747 +    user_start_nacking (uctx, reason);
38748 +    
38749 +    while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING) &&                /* wait for someone else to finish */
38750 +          uctx->uctx_trap_count > 0)                                           /* and for trap handlers to notice */
38751 +    {                                                                          /* and exit */
38752 +       PRINTF1 (uctx, DBG_SWAP, "user_swapout: waiting for %d trap handlers to exit/previous swapout\n", uctx->uctx_trap_count);
38753 +
38754 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
38755 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);
38756 +    }
38757 +
38758 +    if (uctx->uctx_status & UCTX_SWAPPED)                                      /* already swapped out */
38759 +    {
38760 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38761 +       return;
38762 +    }
38763 +    
38764 +    uctx->uctx_status |= (UCTX_SWAPPING|UCTX_STOPPING);                                /* mark the context as swapping & stopping */
38765 +    
38766 +    /* queue the halt operation to  remove all threads/dmas/cqs from the run queues */
38767 +    /*    and also flush through the context filter change */
38768 +    elan4_queue_haltop (dev, &uctx->uctx_haltop);
38769 +    
38770 +    while (! (uctx->uctx_status & UCTX_STOPPED))
38771 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);
38772 +
38773 +    /* all state has been removed from the elan - we can now "tidy" it up */
38774 +
38775 +    PRINTF0 (uctx, DBG_SWAP, "user_swapout: swapped out\n");
38776 +    
38777 +    uctx->uctx_status = (uctx->uctx_status & ~UCTX_SWAPPING) | UCTX_SWAPPED;
38778 +    
38779 +    kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
38780 +
38781 +    PRINTF1 (uctx, DBG_SWAP, "user_swapout: all done - status %x\n", uctx->uctx_status);
38782 +
38783 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38784 +}
38785 +
38786 +void
38787 +user_swapin (USER_CTXT *uctx, unsigned reason)
38788 +{
38789 +    unsigned long flags;
38790 +
38791 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38792 +
38793 +    ASSERT (uctx->uctx_status & UCTX_SWAPPED_REASONS);
38794 +
38795 +    PRINTF2 (uctx, DBG_SWAP, "user_swapin: status %x reason %x\n", uctx->uctx_status, reason);
38796 +
38797 +    while (uctx->uctx_status & (UCTX_SWAPPING|UCTX_STOPPING))                  /* wait until other threads have */
38798 +       kcondvar_wait (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags);         /* completed their swap operation */
38799 +
38800 +    ASSERT (uctx->uctx_status & (UCTX_SWAPPED | UCTX_STOPPED));
38801 +
38802 +    user_stop_nacking (uctx, reason);
38803 +
38804 +    if (! (uctx->uctx_status & UCTX_SWAPPED_REASONS))
38805 +    {
38806 +       uctx->uctx_status &= ~UCTX_SWAPPED;
38807 +
38808 +       /* no longer swapped out - wakeup anyone sleeping waiting for swapin */
38809 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
38810 +
38811 +       if (! (uctx->uctx_status & UCTX_STOPPED_REASONS))
38812 +       {
38813 +           uctx->uctx_status &= ~UCTX_STOPPED;
38814 +           user_signal_trap (uctx);
38815 +       }
38816 +    }
38817 +
38818 +    PRINTF1 (uctx, DBG_SWAP, "user_swapin: all done - status %x\n", uctx->uctx_status);
38819 +
38820 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38821 +}
38822 +
38823 +void
38824 +user_destroy_callback (void *arg, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map)
38825 +{
38826 +    USER_CTXT *uctx = (USER_CTXT *) arg;
38827 +
38828 +    PRINTF (uctx, DBG_VP, "user_destroy_callback: %s\n", map == NULL ? "cap destoyed" : "map destroyed");
38829 +}
38830 +
38831 +int
38832 +user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap)
38833 +{
38834 +    ELAN4_DEV       *dev = uctx->uctx_ctxt.ctxt_dev;
38835 +    USER_CTXT_ENTRY *cent;
38836 +    unsigned long flags;
38837 +    int ctype, res;
38838 +    
38839 +    if ((ctype = user_validate_cap (uctx, cap, ELAN_USER_ATTACH)) < 0)
38840 +       return ctype;
38841 +
38842 +    if ((ctype == ELAN_CAP_RMS) && (res = elan_attach_cap (cap, dev->dev_devinfo.dev_rail, uctx, user_destroy_callback)) != 0)
38843 +    {
38844 +       /* NOTE: elan_attach_cap returns +ve errnos */
38845 +       return -res;
38846 +    }
38847 +
38848 +    KMEM_ALLOC (cent, USER_CTXT_ENTRY *, sizeof (USER_CTXT_ENTRY), 1);
38849 +    if (cent == NULL)
38850 +    {
38851 +       if (ctype == ELAN_CAP_RMS)
38852 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
38853 +
38854 +       return -ENOMEM;
38855 +    }
38856 +
38857 +    KMEM_ALLOC (cent->cent_cap, ELAN_CAPABILITY *, ELAN_CAP_SIZE(cap), 1);
38858 +    if (cent->cent_cap == NULL)
38859 +    {
38860 +       if (ctype == ELAN_CAP_RMS)
38861 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
38862 +
38863 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
38864 +       return -ENOMEM;
38865 +    }
38866 +
38867 +    memcpy (cent->cent_cap, cap, ELAN_CAP_SIZE(cap));
38868 +
38869 +    if ((res = elan4_attach_filter (&uctx->uctx_ctxt, cap->cap_mycontext)) != 0)
38870 +    {
38871 +       if (ctype == ELAN_CAP_RMS)
38872 +           elan_detach_cap (cap, dev->dev_devinfo.dev_rail);
38873 +       
38874 +       KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cap));
38875 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
38876 +
38877 +       return res;
38878 +    }
38879 +
38880 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38881 +
38882 +    list_add_tail (&cent->cent_link, &uctx->uctx_cent_list);
38883 +
38884 +    if (! UCTX_NACKING (uctx))
38885 +       user_set_filter (uctx, E4_FILTER_STATS);
38886 +
38887 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38888 +
38889 +    return (0);
38890 +    
38891 +}
38892 +
38893 +void
38894 +user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap)
38895 +{
38896 +    ELAN4_DEV         *dev = uctx->uctx_ctxt.ctxt_dev;
38897 +    struct list_head  *entry;
38898 +    struct list_head  *next;
38899 +    struct list_head   list;
38900 +    unsigned long      flags;
38901 +
38902 +    INIT_LIST_HEAD (&list);
38903 +
38904 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38905 +    
38906 +    PRINTF (uctx, DBG_NETWORK_CTX, cap ? "user_detach: network context %d\n" : "user_detach: all network contexts\n", cap ? cap->cap_mycontext : 0);
38907 +
38908 +    list_for_each_safe (entry, next, &uctx->uctx_cent_list) {
38909 +       USER_CTXT_ENTRY *cent = list_entry (entry, USER_CTXT_ENTRY, cent_link);
38910 +
38911 +       if (cap == NULL || ELAN_CAP_MATCH (cap, cent->cent_cap))
38912 +       {
38913 +           PRINTF1 (uctx, DBG_NETWORK_CTX, "user_detach: detach from network context %d\n", cent->cent_cap->cap_mycontext);
38914 +           
38915 +           elan4_detach_filter (&uctx->uctx_ctxt, cent->cent_cap->cap_mycontext);
38916 +
38917 +           list_del (&cent->cent_link);
38918 +           list_add_tail (&cent->cent_link, &list);
38919 +       }
38920 +    }
38921 +
38922 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38923 +
38924 +    while (! list_empty (&list))
38925 +    {
38926 +       USER_CTXT_ENTRY *cent = list_entry (list.next, USER_CTXT_ENTRY, cent_link);
38927 +
38928 +       list_del (&cent->cent_link);
38929 +
38930 +       if (user_validate_cap (uctx, cent->cent_cap, ELAN_USER_DETACH) == ELAN_CAP_RMS)
38931 +           elan_detach_cap (cent->cent_cap, dev->dev_devinfo.dev_rail); 
38932 +       
38933 +       KMEM_FREE (cent->cent_cap, ELAN_CAP_SIZE (cent->cent_cap));
38934 +       KMEM_FREE (cent, sizeof (USER_CTXT_ENTRY));
38935 +    }
38936 +}
38937 +
38938 +void
38939 +user_block_inputter (USER_CTXT *uctx, unsigned blocked)
38940 +{
38941 +    unsigned long flags;
38942 +    int isblocked;
38943 +
38944 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
38945 +    
38946 +    isblocked = (uctx->uctx_status & UCTX_USER_FILTERING);
38947 +
38948 +    if (blocked && !isblocked)
38949 +       user_start_nacking (uctx, UCTX_USER_FILTERING);
38950 +
38951 +    if (!blocked && isblocked)
38952 +       user_stop_nacking (uctx, UCTX_USER_FILTERING);
38953 +
38954 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
38955 +}
38956 +
38957 +static USER_VPSEG *
38958 +user_install_vpseg (USER_CTXT *uctx, unsigned process, unsigned entries)
38959 +{
38960 +    struct list_head *entry;
38961 +    USER_VPSEG       *seg;
38962 +
38963 +    if ((process + entries) > (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
38964 +       return (NULL);
38965 +
38966 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
38967 +
38968 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
38969 +       seg = list_entry (entry, USER_VPSEG, vps_link);
38970 +
38971 +       if (process <= (seg->vps_process + seg->vps_entries-1) && 
38972 +           (process + entries - 1) >= seg->vps_process)
38973 +           return ((USER_VPSEG *) NULL);
38974 +    }
38975 +
38976 +    KMEM_ZALLOC (seg, USER_VPSEG *, sizeof (USER_VPSEG), 1);
38977 +    
38978 +    if (seg == (USER_VPSEG *) NULL)
38979 +       return ((USER_VPSEG *) NULL);
38980 +
38981 +    seg->vps_process = process;
38982 +    seg->vps_entries = entries;
38983 +
38984 +    list_add_tail (&seg->vps_link, &uctx->uctx_vpseg_list);
38985 +
38986 +    return (seg);
38987 +}
38988 +
38989 +static void
38990 +user_remove_vpseg (USER_CTXT *uctx, USER_VPSEG *seg)
38991 +{
38992 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
38993 +
38994 +    list_del (&seg->vps_link);
38995 +    
38996 +    switch (seg->vps_type)
38997 +    {
38998 +    case USER_VPSEG_P2P:
38999 +       /* These pointers (union) are only valid for P2P segs */
39000 +       if (seg->vps_p2p_routes)
39001 +           KMEM_FREE (seg->vps_p2p_routes, sizeof (E4_VirtualProcessEntry) * seg->vps_entries);
39002 +       
39003 +       if (seg->vps_p2p_cap)
39004 +           KMEM_FREE (seg->vps_p2p_cap, ELAN_CAP_SIZE(seg->vps_p2p_cap));
39005 +
39006 +       break;
39007 +       
39008 +    case USER_VPSEG_BCAST:
39009 +       ;
39010 +    }
39011 +
39012 +    KMEM_FREE (seg, sizeof (USER_VPSEG));
39013 +}
39014 +
39015 +static USER_VPSEG *
39016 +user_find_vpseg (USER_CTXT *uctx, unsigned low, unsigned high)
39017 +{
39018 +    struct list_head *entry;
39019 +
39020 +    ASSERT (kmutex_is_locked (&uctx->uctx_vpseg_lock));
39021 +
39022 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
39023 +       USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link);
39024 +
39025 +       if (seg->vps_process <= low && (seg->vps_process + seg->vps_entries) > high)
39026 +           return (seg);
39027 +    }
39028 +
39029 +    return ((USER_VPSEG *) NULL);
39030 +}
39031 +
39032 +static ELAN_LOCATION 
39033 +user_process2location (USER_CTXT *uctx, USER_VPSEG *seg, unsigned process)
39034 +{
39035 +    ELAN_LOCATION location;
39036 +    int           nnodes, nctxs;
39037 +    int           nodeOff, ctxOff, vpOff;
39038 +
39039 +    location.loc_node    = ELAN_INVALID_NODE;
39040 +    location.loc_context = -1;
39041 +
39042 +    if (seg == NULL)
39043 +       seg = user_find_vpseg (uctx, process, process);
39044 +
39045 +    if (seg == NULL || (seg->vps_type != USER_VPSEG_P2P))
39046 +       return (location);
39047 +
39048 +    nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap);
39049 +    nctxs  = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap);
39050 +
39051 +    switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK)
39052 +    {
39053 +    case ELAN_CAP_TYPE_BLOCK:
39054 +       for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
39055 +       {
39056 +           for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
39057 +           {
39058 +               if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
39059 +               {
39060 +                   if (vpOff++ == (process - seg->vps_process))
39061 +                   { 
39062 +                       location.loc_node    = seg->vps_p2p_cap->cap_lownode + nodeOff;
39063 +                       location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff;
39064 +                       goto found;
39065 +                   }
39066 +               }
39067 +           }
39068 +       }
39069 +       break;
39070 +       
39071 +    case ELAN_CAP_TYPE_CYCLIC:
39072 +       for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
39073 +       {
39074 +           for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
39075 +           {
39076 +               if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
39077 +               {                                   
39078 +                   if (vpOff++ ==  (process - seg->vps_process))
39079 +                   { 
39080 +                       location.loc_node    = seg->vps_p2p_cap->cap_lownode + nodeOff;
39081 +                       location.loc_context = seg->vps_p2p_cap->cap_lowcontext + ctxOff;
39082 +                       goto found;
39083 +                   }
39084 +               }
39085 +           }
39086 +       }
39087 +       break;  
39088 +    }
39089 +       
39090 + found:
39091 +    return (location);
39092 +}
39093 +
39094 +static unsigned 
39095 +user_location2process (USER_CTXT *uctx, ELAN_LOCATION location)
39096 +{
39097 +    unsigned int      process = ELAN_INVALID_PROCESS;
39098 +    struct list_head *entry;
39099 +    int               nnodes, nctxs;
39100 +    int               nodeOff, ctxOff, vpOff;
39101 +
39102 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39103 +    list_for_each (entry, &uctx->uctx_vpseg_list) {
39104 +       USER_VPSEG *seg = list_entry (entry, USER_VPSEG, vps_link);
39105 +
39106 +       if (seg->vps_type != USER_VPSEG_P2P)
39107 +           continue;
39108 +
39109 +       if (location.loc_node >= seg->vps_p2p_cap->cap_lownode && location.loc_node <= seg->vps_p2p_cap->cap_highnode &&
39110 +           location.loc_context >= seg->vps_p2p_cap->cap_lowcontext && location.loc_context <= seg->vps_p2p_cap->cap_highcontext)
39111 +       {
39112 +           nnodes = ELAN_CAP_NUM_NODES (seg->vps_p2p_cap);
39113 +           nctxs  = ELAN_CAP_NUM_CONTEXTS (seg->vps_p2p_cap);
39114 +
39115 +           switch (seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_MASK)
39116 +           {
39117 +           case ELAN_CAP_TYPE_BLOCK:
39118 +               for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
39119 +               {
39120 +                   for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
39121 +                   {
39122 +                       if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
39123 +                       {
39124 +                           if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff &&
39125 +                               location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff)
39126 +                           {
39127 +                               process = seg->vps_process + vpOff;
39128 +                               goto found;
39129 +                           }
39130 +                           vpOff++;
39131 +                       }
39132 +                   }
39133 +               }
39134 +               break;
39135 +       
39136 +           case ELAN_CAP_TYPE_CYCLIC:
39137 +               for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
39138 +               {
39139 +                   for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
39140 +                   {
39141 +                       if ((seg->vps_p2p_cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (seg->vps_p2p_cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
39142 +                       {
39143 +                           if (location.loc_node == seg->vps_p2p_cap->cap_lownode + nodeOff &&
39144 +                               location.loc_context == seg->vps_p2p_cap->cap_lowcontext + ctxOff)
39145 +                           {
39146 +                               process = seg->vps_process + vpOff;
39147 +                               goto found;
39148 +                           }
39149 +                           vpOff++;
39150 +                       }
39151 +                   }
39152 +               }
39153 +               break;
39154 +           }
39155 +       }
39156 +    }
39157 + found:
39158 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39159 +
39160 +    return (process);
39161 +}
39162 +
39163 +static void
39164 +user_loadroute_vpseg (USER_CTXT *uctx, USER_VPSEG *seg, ELAN_POSITION *pos)
39165 +{
39166 +    ELAN4_DEV             *dev    = uctx->uctx_ctxt.ctxt_dev;
39167 +    ELAN_CAPABILITY       *cap    = seg->vps_p2p_cap;
39168 +    unsigned               nnodes = ELAN_CAP_NUM_NODES (cap);
39169 +    unsigned               nctxs  = ELAN_CAP_NUM_CONTEXTS (cap);
39170 +    E4_VirtualProcessEntry route;
39171 +    unsigned              nodeOff;
39172 +    unsigned              ctxOff;
39173 +    unsigned              vpOff;
39174 +
39175 +    switch (cap->cap_type & ELAN_CAP_TYPE_MASK)
39176 +    {
39177 +    case ELAN_CAP_TYPE_BLOCK:
39178 +       for (nodeOff = 0, vpOff = 0; nodeOff < nnodes; nodeOff++)
39179 +       {
39180 +           for (ctxOff = 0; ctxOff < nctxs; ctxOff++)
39181 +           {
39182 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, ctxOff + (nodeOff * nctxs)))
39183 +               {
39184 +                   if (seg->vps_p2p_routes != NULL)
39185 +                       route = seg->vps_p2p_routes[vpOff];
39186 +                   else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff,
39187 +                                                  cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0)
39188 +                   {
39189 +                       vpOff++;
39190 +                       continue;
39191 +                   }
39192 +
39193 +                   PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n",
39194 +                            seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff,
39195 +                            route.Values[0], route.Values[1]);
39196 +                   
39197 +                   elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route);
39198 +                                             
39199 +                   vpOff++;
39200 +               }
39201 +           }
39202 +       }
39203 +       break;
39204 +
39205 +    case ELAN_CAP_TYPE_CYCLIC:
39206 +       for (ctxOff = 0, vpOff = 0; ctxOff < nctxs; ctxOff++)
39207 +       {
39208 +           for (nodeOff = 0; nodeOff < nnodes; nodeOff++)
39209 +           {
39210 +               if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) || BT_TEST (cap->cap_bitmap, nodeOff + (ctxOff * nnodes)))
39211 +               {
39212 +                   if (seg->vps_p2p_routes != NULL)
39213 +                       route = seg->vps_p2p_routes[vpOff];
39214 +                   else if (elan4_generate_route (&uctx->uctx_position, &route, cap->cap_lowcontext + ctxOff,
39215 +                                                  cap->cap_lownode + nodeOff, cap->cap_lownode + nodeOff, user_p2p_route_options) < 0)
39216 +                   {
39217 +                       vpOff++;
39218 +                       continue;
39219 +                   }
39220 +
39221 +                   PRINTF5 (uctx, DBG_VP, "user_loadroute_vpseg: virtual process %d -> node %d context %d [%016llx.%016llx]\n",
39222 +                            seg->vps_process + vpOff, cap->cap_lownode + nodeOff, cap->cap_lowcontext + ctxOff,
39223 +                            route.Values[0], route.Values[1]);
39224 +                   
39225 +                   elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process + vpOff, &route);
39226 +                                             
39227 +                   vpOff++;
39228 +               }
39229 +           }
39230 +       }
39231 +       break;
39232 +    }
39233 +}
39234 +
39235 +static int
39236 +user_loadroute_bcast (USER_CTXT *uctx, USER_VPSEG *seg)
39237 +{
39238 +    ELAN4_DEV             *dev = uctx->uctx_ctxt.ctxt_dev;
39239 +    ELAN_POSITION         *pos = &uctx->uctx_position;
39240 +    E4_VirtualProcessEntry route;
39241 +    USER_VPSEG            *aseg;
39242 +    int                    res;
39243 +    ELAN_LOCATION          low;
39244 +    ELAN_LOCATION          high;
39245 +
39246 +    if ((aseg = user_find_vpseg (uctx, seg->vps_bcast_lowvp, seg->vps_bcast_highvp)) == NULL || aseg->vps_type != USER_VPSEG_P2P)
39247 +       return (-EINVAL);
39248 +    
39249 +#ifdef use_elanmod
39250 +    if ((res = user_validate_cap (dev, aseg->vps_p2p_cap, ELAN_USER_BROADCAST)) < 0)
39251 +       return (res);
39252 +#endif
39253 +    
39254 +    low  = user_process2location (uctx, aseg, seg->vps_bcast_lowvp);
39255 +    high = user_process2location (uctx, aseg, seg->vps_bcast_highvp);
39256 +
39257 +    if (low.loc_context != high.loc_context)
39258 +       return (-EINVAL);
39259 +
39260 +    /* NOTE: if loopback can only broadcast to ourself - 
39261 +     *       if back-to-back can only broadcast to other node */
39262 +    if ((pos->pos_mode == ELAN_POS_MODE_LOOPBACK   && low.loc_node != high.loc_node && low.loc_node != pos->pos_nodeid) ||
39263 +       (pos->pos_mode == ELAN_POS_MODE_BACKTOBACK && low.loc_node != high.loc_node && low.loc_node == pos->pos_nodeid))
39264 +    {
39265 +       return (-EINVAL);
39266 +    }
39267 +    
39268 +    if ((res = elan4_generate_route (pos, &route, low.loc_context, low.loc_node, high.loc_node, user_bcast_route_options)) < 0)
39269 +       return (res);
39270 +
39271 +    PRINTF (uctx, DBG_VP, "user_loadroute_bcast: virtual process %d -> nodes %d.%d context %d [%016llx.%016llx]\n",
39272 +           seg->vps_process, low.loc_node, high.loc_node, low.loc_context, route.Values[0], route.Values[1]);
39273 +    
39274 +    elan4_write_route (dev, uctx->uctx_routetable, seg->vps_process, &route);
39275 +    return (0);
39276 +}
39277 +
39278 +int
39279 +user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap)
39280 +{
39281 +    USER_VPSEG      *seg;
39282 +    ELAN_CAPABILITY *ncap;
39283 +    unsigned         entries;
39284 +
39285 +    if ((cap->cap_type & ELAN_CAP_TYPE_NO_BITMAP) == 0)
39286 +       entries = bt_nbits (cap->cap_bitmap , ELAN_CAP_BITMAPSIZE(cap));
39287 +    else
39288 +       entries = ELAN_CAP_BITMAPSIZE(cap);
39289 +    
39290 +    if ((process + entries) > (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
39291 +       return (-EINVAL);
39292 +
39293 +    KMEM_ALLOC (ncap, ELAN_CAPABILITY *, ELAN_CAP_SIZE (cap), 1);
39294 +
39295 +    if (ncap == NULL)
39296 +       return (-ENOMEM);
39297 +    
39298 +    memcpy (ncap, cap, ELAN_CAP_SIZE (cap));
39299 +
39300 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39301 +
39302 +    if ((seg = user_install_vpseg (uctx, process, entries)) == NULL)
39303 +    {
39304 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39305 +       return (-EINVAL);
39306 +    }
39307 +    
39308 +    seg->vps_type       = USER_VPSEG_P2P;
39309 +    seg->vps_p2p_cap    = ncap;
39310 +    seg->vps_p2p_routes = NULL;
39311 +
39312 +    user_loadroute_vpseg (uctx, seg, &uctx->uctx_position);
39313 +    
39314 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39315 +
39316 +    return (0);
39317 +}
39318 +
39319 +int
39320 +user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp)
39321 +{
39322 +    USER_VPSEG *seg;
39323 +    int         res;
39324 +
39325 +    if (lowvp > highvp || process >= (E4_VPT_MIN_ENTRIES << uctx->uctx_routetable->tbl_size))
39326 +       return (-EINVAL);
39327 +
39328 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39329 +
39330 +    if ((seg = user_install_vpseg (uctx, process, 1)) == NULL)
39331 +    {
39332 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39333 +       return (-EINVAL);
39334 +    }
39335 +
39336 +    seg->vps_type         = USER_VPSEG_BCAST;
39337 +    seg->vps_bcast_lowvp  = lowvp;
39338 +    seg->vps_bcast_highvp = highvp;
39339 +
39340 +    if ((res = user_loadroute_bcast (uctx, seg)) < 0)
39341 +       user_remove_vpseg (uctx, seg);
39342 +
39343 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39344 +    return (res);
39345 +}
39346 +
39347 +int
39348 +user_removevp (USER_CTXT *uctx, unsigned process)
39349 +{
39350 +    USER_VPSEG *seg;
39351 +
39352 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39353 +    
39354 +    if (process == ELAN_INVALID_PROCESS)
39355 +       seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link);
39356 +    else
39357 +       seg = user_find_vpseg (uctx, process, process);
39358 +
39359 +    if (seg == NULL)
39360 +    {
39361 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39362 +       return (-EINVAL);
39363 +    }
39364 +
39365 +    do {
39366 +       ELAN4_DEV    *dev = uctx->uctx_ctxt.ctxt_dev;
39367 +       int i;
39368 +
39369 +       for (i = 0; i < seg->vps_entries; i++)
39370 +           elan4_invalidate_route (dev, uctx->uctx_routetable, seg->vps_process + i);
39371 +
39372 +       user_remove_vpseg (uctx, seg);
39373 +
39374 +    } while (process == ELAN_INVALID_PROCESS && (seg = list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link)) != NULL);
39375 +
39376 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39377 +
39378 +    return (0);
39379 +}
39380 +
39381 +int
39382 +user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route)
39383 +{
39384 +    ELAN4_DEV    *dev = uctx->uctx_ctxt.ctxt_dev;
39385 +    USER_VPSEG   *seg;
39386 +    ELAN_LOCATION location;
39387 +
39388 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39389 +
39390 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
39391 +    {
39392 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39393 +       return (-EINVAL);
39394 +    }
39395 +
39396 +    /* check that the route supplied is valid and goes to the correct place */
39397 +    location = user_process2location (uctx, seg, process);
39398 +
39399 +    if (elan4_check_route (&uctx->uctx_position, location, route, 0) != 0)
39400 +    {
39401 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39402 +       return (-EINVAL);
39403 +    }
39404 +
39405 +    if (seg->vps_p2p_routes == NULL)
39406 +       KMEM_ZALLOC (seg->vps_p2p_routes, E4_VirtualProcessEntry *, sizeof (E4_VirtualProcessEntry) * seg->vps_entries, 1);
39407 +    
39408 +    if (seg->vps_p2p_routes == NULL)
39409 +    {
39410 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39411 +       return (-ENOMEM);
39412 +    }
39413 +    
39414 +    seg->vps_p2p_routes[process - seg->vps_process].Values[0] = route->Values[0];
39415 +    seg->vps_p2p_routes[process - seg->vps_process].Values[1] = ROUTE_CTXT_VALUE(location.loc_context) | (route->Values[1] & ~ROUTE_CTXT_MASK);
39416 +    
39417 +    PRINTF (uctx, DBG_ROUTE, "user_set_route: vp=%d -> %016llx%016llx\n", process, 
39418 +           seg->vps_p2p_routes[process - seg->vps_process].Values[1], seg->vps_p2p_routes[process - seg->vps_process].Values[0]);
39419 +
39420 +    elan4_write_route (dev, uctx->uctx_routetable, process, &seg->vps_p2p_routes[process - seg->vps_process]);
39421 +
39422 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39423 +
39424 +    return (0);
39425 +}
39426 +
39427 +int
39428 +user_reset_route (USER_CTXT *uctx, unsigned process)
39429 +{
39430 +    ELAN4_DEV             *dev = uctx->uctx_ctxt.ctxt_dev;
39431 +    E4_VirtualProcessEntry route;
39432 +    ELAN_LOCATION          location;
39433 +    USER_VPSEG            *seg;
39434 +
39435 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39436 +
39437 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
39438 +    {
39439 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39440 +       return (-EINVAL);
39441 +    }
39442 +
39443 +    if (seg->vps_p2p_routes != NULL)
39444 +    {
39445 +       seg->vps_p2p_routes[process - seg->vps_process].Values[0] = 0;
39446 +       seg->vps_p2p_routes[process - seg->vps_process].Values[1] = 0;
39447 +    }
39448 +    
39449 +    /* generate the default route to this location */
39450 +    location = user_process2location (uctx, seg, process);
39451 +
39452 +    PRINTF (uctx, DBG_ROUTE, "user_reset_route: vp=%d\n", process);
39453 +
39454 +    if (elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, 0) < 0)
39455 +       elan4_invalidate_route (dev, uctx->uctx_routetable, process);
39456 +    else
39457 +       elan4_write_route (dev, uctx->uctx_routetable, process, &route);
39458 +
39459 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39460 +
39461 +    return (0);
39462 +}
39463 +
39464 +int
39465 +user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route)
39466 +{
39467 +    ELAN4_DEV  *dev = uctx->uctx_ctxt.ctxt_dev;
39468 +    USER_VPSEG   *seg;
39469 +    
39470 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39471 +
39472 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
39473 +    {
39474 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39475 +       return (-EINVAL);
39476 +    }
39477 +
39478 +    elan4_read_route (dev, uctx->uctx_routetable, process, route);
39479 +
39480 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39481 +    return (0);
39482 +}
39483 +
39484 +int
39485 +user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error)
39486 +{
39487 +    ELAN4_DEV  *dev = uctx->uctx_ctxt.ctxt_dev;
39488 +    USER_VPSEG *seg;
39489 +    
39490 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39491 +
39492 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL || seg->vps_type != USER_VPSEG_P2P)
39493 +    {
39494 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39495 +       return (-EINVAL);
39496 +    }
39497 +
39498 +    elan4_read_route (dev, uctx->uctx_routetable, process, route);
39499 +
39500 +    *error = elan4_check_route (&uctx->uctx_position, user_process2location (uctx, seg, process), route, 0);
39501 +
39502 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39503 +    return (0);
39504 +}
39505 +
39506 +int
39507 +user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg)
39508 +{
39509 +    USER_VPSEG   *seg;
39510 +    ELAN_LOCATION location;
39511 +    unsigned long flags;
39512 +    int                  res, found = 0;
39513 +    struct list_head *el;
39514 +
39515 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39516 +    /* determine the location of the virtual process */
39517 +    if ((seg = user_find_vpseg (uctx, vp, vp)) == NULL)
39518 +    {
39519 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d has no vpseg\n", vp);
39520 +
39521 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39522 +       return -EINVAL;
39523 +    }
39524 +
39525 +    switch (seg->vps_type)
39526 +    {
39527 +    case USER_VPSEG_P2P:
39528 +       location = user_process2location (uctx, seg, vp);
39529 +       break;
39530 +
39531 +    case USER_VPSEG_BCAST:
39532 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: vp=%d is a bcast vp\n", vp);
39533 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39534 +       return -EINVAL;
39535 +    }
39536 +
39537 +    /*  check that we're attached to the network context */
39538 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39539 +    list_for_each (el , &uctx->uctx_cent_list) {
39540 +       USER_CTXT_ENTRY *cent = list_entry (el, USER_CTXT_ENTRY, cent_link);
39541 +       
39542 +       if (cent->cent_cap->cap_mycontext == nctx)
39543 +           found++;
39544 +    }
39545 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39546 +    
39547 +    if (! found)
39548 +    {
39549 +       PRINTF (uctx, DBG_NETERR, "user_send_neterr_msg: nctx=%d not attached\n", nctx);
39550 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39551 +
39552 +       return -EINVAL;
39553 +    }
39554 +
39555 +    /* Update the fields which the user might have "faked" */
39556 +    msg->msg_context            = location.loc_context;
39557 +    msg->msg_sender.loc_node    = uctx->uctx_position.pos_nodeid;
39558 +    msg->msg_sender.loc_context = nctx;
39559 +
39560 +    res = elan4_neterr_sendmsg (uctx->uctx_ctxt.ctxt_dev, location.loc_node, retries, msg);
39561 +
39562 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39563 +
39564 +    return (res);
39565 +}
39566 +
39567 +
39568 +static int
39569 +user_resolvevp (USER_CTXT *uctx, unsigned process)
39570 +{
39571 +    int                    res = 0;
39572 +    USER_VPSEG            *seg;
39573 +    ELAN_LOCATION          location;
39574 +    E4_VirtualProcessEntry route;
39575 +
39576 +    PRINTF1 (uctx, DBG_VP, "user_resolvevp: process=%d\n", process);
39577 +
39578 +    kmutex_lock (&uctx->uctx_vpseg_lock);
39579 +
39580 +    if ((seg = user_find_vpseg (uctx, process, process)) == NULL)
39581 +    {
39582 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
39583 +       return (-EINVAL);
39584 +    }
39585 +
39586 +    switch (seg->vps_type)
39587 +    {
39588 +    case USER_VPSEG_P2P:
39589 +#ifdef use_elanmod
39590 +       if ((res = user_validate_cap (uctx, seg->vps_p2p_cap, ELAN_USER_P2P)) != 0)
39591 +           break;
39592 +#endif
39593 +
39594 +       location = user_process2location (uctx, seg, process);
39595 +
39596 +       PRINTF (uctx, DBG_VP, "user_resolvevp: vp=%d -> node=%d ctx=%d\n", process, location.loc_node, location.loc_context);
39597 +       
39598 +       if (seg->vps_p2p_routes != NULL && seg->vps_p2p_routes[process - seg->vps_process].Values[0] != 0)
39599 +           route = seg->vps_p2p_routes[process - seg->vps_process];
39600 +       else if ((res = elan4_generate_route (&uctx->uctx_position, &route, location.loc_context, location.loc_node, location.loc_node, user_p2p_route_options)) < 0)
39601 +           break;;
39602 +       
39603 +       elan4_write_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, process, &route);
39604 +       break;
39605 +
39606 +    case USER_VPSEG_BCAST:
39607 +       res = user_loadroute_bcast (uctx, seg);
39608 +       break;
39609 +       
39610 +    default:
39611 +       res = -EINVAL;
39612 +       break;
39613 +    }
39614 +
39615 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
39616 +    return (res);
39617 +}
39618 +
39619 +static void
39620 +user_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
39621 +{
39622 +    USER_CTXT    *uctx = (USER_CTXT *) ctxt;
39623 +    unsigned long flags;
39624 +
39625 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39626 +
39627 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_eprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
39628 +    {
39629 +       PRINTF (uctx, DBG_EPROC, "user_eproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
39630 +
39631 +       uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR;
39632 +    }
39633 +    else
39634 +    {
39635 +       elan4_extract_eproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps), 0);
39636 +       
39637 +       DBGCMD (ctxt, DBG_EPROC, elan4_display_eproc_trap (ctxt, DBG_EPROC, "user_eproc_trap", RING_QUEUE_BACK(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps)));
39638 +       
39639 +       if (RING_QUEUE_ADD (uctx->uctx_eprocTrapQ))
39640 +           user_start_stopping (uctx, UCTX_EPROC_QUEUE_FULL);
39641 +    }
39642 +
39643 +    user_signal_trap (uctx);
39644 +
39645 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39646 +}
39647 +
39648 +static void
39649 +user_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
39650 +{
39651 +    USER_CTXT        *uctx = (USER_CTXT *) ctxt;
39652 +    USER_CQ          *ucq  = NULL;
39653 +    struct list_head *entry;
39654 +    unsigned long     flags;
39655 +
39656 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39657 +    
39658 +    list_for_each (entry, &uctx->uctx_cqlist) {
39659 +       ucq = list_entry (entry, USER_CQ, ucq_link);
39660 +
39661 +       if (elan4_cq2num(ucq->ucq_cq) == cqnum)
39662 +           break;
39663 +    }
39664 +
39665 +    ASSERT (ucq != NULL);
39666 +
39667 +    if (ucq->ucq_state != UCQ_RUNNING && CPROC_TrapType (status) == CommandProcInserterError)
39668 +    {
39669 +       PRINTF (ctxt, DBG_TRAP, "user_cproc_trap CommandProcInserterError\n");
39670 +       ucq->ucq_errored++;
39671 +    }
39672 +    else
39673 +    {
39674 +       ASSERT (ucq->ucq_state == UCQ_RUNNING);
39675 +
39676 +       elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &ucq->ucq_trap, cqnum);
39677 +
39678 +       DBGCMD (ctxt, DBG_CPROC, elan4_display_cproc_trap (ctxt, DBG_CPROC, "user_cproc_trap", &ucq->ucq_trap));
39679 +
39680 +       ucq->ucq_state = UCQ_TRAPPED;
39681 +       
39682 +    }
39683 +
39684 +    user_signal_trap (uctx);
39685 +       
39686 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39687 +}
39688 +
39689 +static void
39690 +user_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
39691 +{
39692 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
39693 +    unsigned long flags;
39694 +
39695 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39696 +
39697 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_dprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
39698 +    {
39699 +       PRINTF (uctx, DBG_DPROC, "user_dproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
39700 +
39701 +       uctx->uctx_status |= UCTX_DPROC_QUEUE_ERROR;
39702 +    }
39703 +    else
39704 +    {
39705 +       ELAN4_DPROC_TRAP *trap = RING_QUEUE_BACK (uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps);
39706 +       
39707 +       elan4_extract_dproc_trap (ctxt->ctxt_dev, status, trap, unit);
39708 +
39709 +       DBGCMD (ctxt, DBG_DPROC, elan4_display_dproc_trap (ctxt, DBG_DPROC, "user_dproc_trap", trap));
39710 +
39711 +       if (!DPROC_PrefetcherFault (status) && DPROC_TrapType(status) == DmaProcFailCountError && !RING_QUEUE_FULL (uctx->uctx_dmaQ))
39712 +       {
39713 +           trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count);
39714 +
39715 +           *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = trap->tr_desc;
39716 +    
39717 +           (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
39718 +       }
39719 +       else
39720 +       {
39721 +           if (RING_QUEUE_ADD (uctx->uctx_dprocTrapQ))
39722 +               user_start_stopping (uctx, UCTX_DPROC_QUEUE_FULL);
39723 +       }
39724 +    }
39725 +
39726 +    user_signal_trap (uctx);
39727 +
39728 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39729 +}
39730 +
39731 +static void
39732 +user_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
39733 +{
39734 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
39735 +    unsigned long flags;
39736 +
39737 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39738 +
39739 +    if (RING_QUEUE_REALLY_FULL (uctx->uctx_tprocTrapQ) || (uctx->uctx_status & UCTX_STOPPED))
39740 +    {
39741 +       PRINTF (uctx, DBG_TPROC, "user_tproc_trap: %s\n", (uctx->uctx_status & UCTX_STOPPED) ? "context stopped" : "trap queue overflow");
39742 +
39743 +       uctx->uctx_status |= UCTX_TPROC_QUEUE_ERROR;
39744 +    }
39745 +    else
39746 +    {
39747 +       elan4_extract_tproc_trap (ctxt->ctxt_dev, status, RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps));
39748 +       
39749 +       DBGCMD (ctxt, DBG_TPROC, elan4_display_tproc_trap (ctxt, DBG_TPROC, "user_tproc_trap", RING_QUEUE_BACK (uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps)));
39750 +       
39751 +       if (RING_QUEUE_ADD (uctx->uctx_tprocTrapQ))
39752 +           user_start_stopping (uctx, UCTX_TPROC_QUEUE_FULL);
39753 +    }
39754 +    user_signal_trap (uctx);
39755 +
39756 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39757 +}
39758 +
39759 +static void
39760 +user_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
39761 +{
39762 +    USER_CTXT       *uctx  = (USER_CTXT *) ctxt;
39763 +    USER_IPROC_TRAP *utrap = &uctx->uctx_iprocTrap[unit & 1];
39764 +    unsigned long    flags;
39765 +
39766 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39767 +
39768 +    ASSERT (utrap->ut_state == UTS_IPROC_RUNNING);
39769 +
39770 +    elan4_extract_iproc_trap (ctxt->ctxt_dev, status, &utrap->ut_trap, unit);
39771 +    DBGCMD (ctxt, DBG_IPROC, elan4_display_iproc_trap (ctxt, DBG_IPROC, "user_iproc_trap", &utrap->ut_trap));
39772 +
39773 +    utrap->ut_state = UTS_IPROC_TRAPPED;
39774 +
39775 +    user_start_nacking (uctx, unit ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
39776 +
39777 +    user_signal_trap (uctx);
39778 +
39779 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39780 +}
39781 +
39782 +static void
39783 +user_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
39784 +{
39785 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
39786 +    unsigned long flags;
39787 +
39788 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39789 +
39790 +    PRINTF1 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx\n", cookie);
39791 +
39792 +    switch (cookie)
39793 +    {
39794 +    case ELAN4_INT_COOKIE_DDCQ:
39795 +       uctx->uctx_ddcq_intr--;
39796 +
39797 +       user_signal_trap (uctx);
39798 +       break;
39799 +
39800 +    default:
39801 +       if (uctx->uctx_intcookie_table == NULL || intcookie_fire (uctx->uctx_intcookie_table, cookie) != 0)
39802 +       {
39803 +           PRINTF2 (uctx, DBG_TRAP, "user_interrupt: cookie=%llx %s\n", cookie, uctx->uctx_intcookie_table ? "not found" : "no table");
39804 +           uctx->uctx_status |= UCTX_EPROC_QUEUE_ERROR;
39805 +           user_signal_trap (uctx);
39806 +       }
39807 +       break;
39808 +    }
39809 +
39810 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39811 +}
39812 +static void
39813 +user_needs_shuffle (ELAN4_CTXT *ctxt, int tbl, int hashidx)
39814 +{
39815 +    USER_CTXT     *uctx = (USER_CTXT *) ctxt;
39816 +    unsigned long  flags;
39817 +
39818 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39819 +
39820 +    elan4mmu_set_shuffle(ctxt, tbl, hashidx);
39821 +
39822 +    if (ctxt->shuffle_needed[tbl]) 
39823 +       user_shuffle_signal_trap (uctx);
39824 +
39825 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39826 +
39827 +}
39828 +static void
39829 +user_neterrmsg (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg)
39830 +{
39831 +    USER_CTXT *uctx = (USER_CTXT *) ctxt;
39832 +    unsigned long flags;
39833 +
39834 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39835 +    
39836 +    if (! RING_QUEUE_FULL (uctx->uctx_msgQ))
39837 +    {
39838 +       memcpy (RING_QUEUE_BACK (uctx->uctx_msgQ, uctx->uctx_msgs), msg, sizeof (ELAN4_NETERR_MSG));
39839 +
39840 +       (void) RING_QUEUE_ADD (uctx->uctx_msgQ);
39841 +    
39842 +       user_signal_trap (uctx);
39843 +    }
39844 +    
39845 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39846 +}
39847 +
39848 +ELAN4_TRAP_OPS user_trap_ops = 
39849 +{
39850 +    user_eproc_trap,
39851 +    user_cproc_trap,
39852 +    user_dproc_trap,
39853 +    user_tproc_trap,
39854 +    user_iproc_trap,
39855 +    user_interrupt,
39856 +    user_neterrmsg,
39857 +    user_needs_shuffle,
39858 +};
39859 +
39860 +static int
39861 +deliver_trap (ELAN4_USER_TRAP *utrapp, int type, unsigned proc, void *trap, ...)
39862 +{
39863 +    register int i, len;
39864 +    va_list ap;
39865 +
39866 +    PRINTF (NULL, DBG_TRAP, "deliver_trap: type=%d proc=%d\n", type, proc);
39867 +
39868 +    switch (proc)
39869 +    {
39870 +    case UTS_CPROC:      len = sizeof (ELAN4_CPROC_TRAP); break;
39871 +    case UTS_DPROC:      len = sizeof (ELAN4_DPROC_TRAP); break;
39872 +    case UTS_EPROC:      len = sizeof (ELAN4_EPROC_TRAP); break;
39873 +    case UTS_IPROC:      len = sizeof (ELAN4_IPROC_TRAP); break;
39874 +    case UTS_TPROC:      len = sizeof (ELAN4_TPROC_TRAP); break;
39875 +    case UTS_NETERR_MSG: len = sizeof (ELAN4_NETERR_MSG); break;
39876 +    default:             len = 0; break;
39877 +    }
39878 +
39879 +    if (put_user (type, &utrapp->ut_type) || put_user (proc, &utrapp->ut_proc) || copy_to_user (&utrapp->ut_trap, trap, len))
39880 +       return (UTS_EFAULT);
39881 +
39882 +    va_start (ap, trap);
39883 +    for (i = 0; i < sizeof (utrapp->ut_args)/sizeof (utrapp->ut_args[0]); i++)
39884 +       if (put_user (va_arg (ap, unsigned long), &utrapp->ut_args[i]))
39885 +           return (UTS_EFAULT);
39886 +    va_end (ap);
39887 +
39888 +    return (type);
39889 +}
39890 +
39891 +static int
39892 +user_pagefault (USER_CTXT *uctx, E4_FaultSave *farea)
39893 +{
39894 +    E4_Addr      addr = farea->FaultAddress;
39895 +    E4_uint32    fsr  = FaultSaveFSR(farea->FSRAndFaultContext);
39896 +    FAULT_SAVE  *entry;
39897 +    FAULT_SAVE **predp;
39898 +    int count;
39899 +
39900 +    PRINTF2 (uctx, DBG_FAULT, "user_pagefault: addr=%llx fsr %x\n", (unsigned long long) addr, fsr);
39901 +    
39902 +    if ((fsr & FSR_FaultForBadData) != 0)                      /* Memory ECC error during walk */
39903 +    {
39904 +       PRINTF0 (uctx, DBG_FAULT, "user_pagefault: ECC error during walk\n");
39905 +       return (-EFAULT);
39906 +    }
39907 +    
39908 +    if ((fsr & FSR_FaultForMaxChainCount) != 0)                        /* Have walked a chain of 1024 items */
39909 +    {
39910 +       PRINTF0 (uctx, DBG_FAULT, "user_pagefault: pte chain too long\n");
39911 +       return (-EFAULT);
39912 +    }
39913 +    
39914 +    if (! user_pagefault_enabled)
39915 +       return (-EFAULT);
39916 +
39917 +    if (uctx->uctx_num_fault_save)
39918 +    {
39919 +        spin_lock (&uctx->uctx_fault_lock);
39920 +        for( predp = &uctx->uctx_fault_list; (entry = *predp)->next != NULL; predp = &entry->next)
39921 +        {
39922 +           if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1)))
39923 +               break;
39924 +        }
39925 +
39926 +        *predp = entry->next;
39927 +        entry->next = uctx->uctx_fault_list;
39928 +        uctx->uctx_fault_list = entry;
39929 +
39930 +        if (entry->addr == (addr & ~((E4_Addr) PAGE_SIZE-1)))
39931 +        {
39932 +           if ((entry->count <<= 1) > max_fault_pages)
39933 +               entry->count = max_fault_pages;
39934 +        }
39935 +        else
39936 +           entry->count = min_fault_pages;
39937 +
39938 +        entry->addr = (addr & ~((E4_Addr) PAGE_SIZE-1))+(entry->count * PAGE_SIZE);
39939 +        count = entry->count;
39940 +        spin_unlock (&uctx->uctx_fault_lock);
39941 +
39942 +        if (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), count * PAGESIZE, fsr) == 0)
39943 +           return 0;
39944 +
39945 +       /* else pre-faulting has failed, try just this page */
39946 +    }
39947 +
39948 +    return (user_load_range (uctx, addr & ~((E4_Addr) PAGE_SIZE-1), PAGE_SIZE, fsr));
39949 +
39950 +}
39951 +
39952 +static int
39953 +queue_dma_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_DMA *dma)
39954 +{
39955 +    unsigned long flags;
39956 +
39957 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39958 +
39959 +    if (RING_QUEUE_FULL (uctx->uctx_dmaQ))
39960 +    {
39961 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39962 +       
39963 +       PRINTF (uctx, DBG_DPROC, "queue_dma_for_retry: overflow\n");
39964 +       
39965 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_DPROC_QUEUE_OVERFLOW));
39966 +    }
39967 +
39968 +    *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma;
39969 +    
39970 +    (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
39971 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39972 +
39973 +    return (UTS_FINISHED);
39974 +}
39975 +
39976 +static int
39977 +queue_thread_for_retry (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, E4_ThreadRegs *regs)
39978 +{
39979 +    unsigned long flags;
39980 +    
39981 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
39982 +
39983 +    if (RING_QUEUE_FULL (uctx->uctx_threadQ))
39984 +    {
39985 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39986 +
39987 +       PRINTF (uctx, DBG_TPROC, "queue_thread_for_retry: overflow\n");
39988 +
39989 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, UCTX_TPROC_QUEUE_OVERFLOW));
39990 +    }
39991 +
39992 +    *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs;
39993 +    (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
39994 +    
39995 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
39996 +
39997 +    return (UTS_FINISHED);
39998 +}
39999 +
40000 +static int
40001 +fixup_eproc_trap (USER_CTXT *uctx, ELAN4_EPROC_TRAP *trap, int waitevent)
40002 +{
40003 +    E4_FaultSave *farea = &trap->tr_faultarea;
40004 +    E4_uint32     fsr   = FaultSaveFSR(farea->FSRAndFaultContext);
40005 +    E4_uint64     CountAndType;
40006 +    E4_uint64     CopySource;
40007 +    E4_uint64     CopyDest;
40008 +
40009 +    /*
40010 +     * Event processor can trap as follows :
40011 +     *   1) Event location read                (faddr == event location & Event Permission)
40012 +     *   2) Event location write       (faddr == event location & Event Permission)
40013 +     *   3) Copy Source read           Read Access
40014 +     *   4) Copy/Write dest write      other
40015 +     *
40016 +     *  NOTE - it is possible to see both 3) and 4) together - but only with physical errors.
40017 +     */
40018 +    if (AT_Perm(fsr) == AT_PermLocalDataRead || AT_Perm(fsr) == AT_PermLocalDataWrite)
40019 +    {
40020 +       /* 
40021 +        * We complete the copy/write by issuing a waitevent 0 of the approriate type.
40022 +        *   - NB mask off bottom bits of EventAddr in case of partial setevent
40023 +        */
40024 +       E4_uint64 EventAddr = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1);
40025 +
40026 +       if (! user_ddcq_check (uctx, 4))
40027 +           return (0);
40028 +       
40029 +       if ((trap->tr_event.ev_CountAndType & E4_EVENT_COPY_TYPE_MASK) == E4_EVENT_WRITE)
40030 +       {
40031 +           /* case 4) faulted on write word to destination */
40032 +
40033 +           CountAndType = trap->tr_event.ev_CountAndType & E4_EVENT_TYPE_MASK;
40034 +           
40035 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: write Event=%llx CountAndType=%llx\n", EventAddr, CountAndType);
40036 +           PRINTF (uctx, DBG_TRAP, "                  WritePtr=%llx WriteValue=%llx\n", 
40037 +                   trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue);
40038 +
40039 +           user_ddcq_waitevent (uctx, EventAddr, CountAndType, trap->tr_event.ev_WritePtr, trap->tr_event.ev_WriteValue);
40040 +       }
40041 +       else
40042 +       {
40043 +           /* case 3) or case 4) faulted on read/write of copy */
40044 +           if (AT_Perm (fsr) == AT_PermLocalDataRead)
40045 +           {
40046 +               CountAndType = (trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | EPROC_CopySize(trap->tr_status);
40047 +               CopySource   = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE;
40048 +               CopyDest     = trap->tr_event.ev_CopyDest;
40049 +           }
40050 +           else
40051 +           {
40052 +               CountAndType = ((trap->tr_event.ev_CountAndType & E4_EVENT_DATA_TYPE_MASK) | 
40053 +                               ((EPROC_CopySize(trap->tr_status) + EVENT_COPY_NDWORDS) & E4_EVENT_COPY_SIZE_MASK));
40054 +               CopySource   = trap->tr_event.ev_CopySource - EVENT_COPY_BLOCK_SIZE;
40055 +               CopyDest     = trap->tr_event.ev_CopyDest - EVENT_COPY_BLOCK_SIZE;
40056 +           }
40057 +           
40058 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: copy Event=%llx CountAndType=%llx\n", EventAddr, CountAndType);
40059 +           PRINTF (uctx, DBG_TRAP, "                  CopySource=%llx CopyDest=%llx\n", CopySource, CopyDest);
40060 +
40061 +           user_ddcq_waitevent (uctx, EventAddr, CountAndType, CopySource, CopyDest);
40062 +       }
40063 +    }
40064 +    else
40065 +    {
40066 +       E4_uint64 EventAddr  = trap->tr_eventaddr & ~((E4_uint64) E4_EVENT_ALIGN-1);
40067 +       E4_uint32 EventCount = trap->tr_eventaddr & (E4_EVENT_ALIGN-1);
40068 +
40069 +       /* case 1) or 2) - just reissue the event */
40070 +       if (! waitevent)
40071 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: setevent EventAddr=%llx EventCount=%x\n", EventAddr, EventCount);
40072 +       else
40073 +       {
40074 +           PRINTF (uctx, DBG_TRAP, "fixup_eproc_trap: waitevent Event=%llx CountAndType=%llx\n", EventAddr, trap->tr_event.ev_CountAndType);
40075 +           PRINTF (uctx, DBG_TRAP, "                  Param[0]=%llx Param[1]=%llx\n",
40076 +                    trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
40077 +       }
40078 +
40079 +       if (! user_ddcq_check (uctx, waitevent ? 4 : 2))
40080 +           return (0);
40081 +       
40082 +       if (waitevent)
40083 +           user_ddcq_waitevent (uctx, EventAddr, trap->tr_event.ev_CountAndType, 
40084 +                                 trap->tr_event.ev_Params[0], trap->tr_event.ev_Params[1]);
40085 +       else
40086 +           user_ddcq_seteventn (uctx, EventAddr, EventCount);
40087 +    }
40088 +
40089 +    return (1);
40090 +}
40091 +
40092 +
40093 +static int
40094 +resolve_eproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_EPROC_TRAP *trap)
40095 +{
40096 +    switch (EPROC_TrapType (trap->tr_status))
40097 +    {
40098 +    case EventProcNoFault:
40099 +       PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcNoFault\n");
40100 +
40101 +       return (UTS_FINISHED);
40102 +       
40103 +    case EventProcAddressAlignment:
40104 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_EPROC, trap));
40105 +
40106 +    case EventProcMemoryFault:
40107 +       PRINTF (uctx, DBG_TRAP, "resolve_eproc_trap: EventProcMemoryFault @ %llx\n", trap->tr_faultarea.FaultAddress);
40108 +
40109 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
40110 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_EPROC, trap));
40111 +
40112 +       return (UTS_FINISHED);
40113 +       
40114 +    case EventProcCountWrapError:
40115 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_EPROC, trap));
40116 +
40117 +    default:
40118 +       printk ("resolve_eproc_trap: bad trap type %d\n", EPROC_TrapType (trap->tr_status));
40119 +       BUG();
40120 +    }
40121 +
40122 +    return (UTS_FINISHED);
40123 +}
40124 +
40125 +static int
40126 +resolve_cproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, USER_CQ *ucq)
40127 +{
40128 +    ELAN4_DEV        *dev    = uctx->uctx_ctxt.ctxt_dev;
40129 +    ELAN4_CPROC_TRAP *trap   = &ucq->ucq_trap;
40130 +    E4_uint64         command;
40131 +    int               res;
40132 +    int               chan;
40133 +
40134 +    ELAN_LOCATION location;
40135 +    int vp, node;
40136 +
40137 +    PRINTF2 (uctx, DBG_CPROC, "resolve_cproc_trap: cq %p is trapped - Status %lx\n", ucq, trap->tr_status);
40138 +    
40139 +    switch (CPROC_TrapType (trap->tr_status))
40140 +    {
40141 +    case CommandProcDmaQueueOverflow:
40142 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcDmaQueueOverflow\n");
40143 +       /*
40144 +        * XXXX: should wait for the queue to become emptier if we're 
40145 +        *       responsible for it being very full
40146 +        */
40147 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40148 +       break;
40149 +
40150 +    case CommandProcInterruptQueueOverflow:
40151 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcInterruptQueueOverflow\n");
40152 +       /*
40153 +        * XXXX: should wait for the queue to become emptier if we're
40154 +        *       responsible for it being very full
40155 +        */
40156 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40157 +       break;
40158 +       
40159 +    case CommandProcWaitTrap:
40160 +       PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcWaitTrap\n");
40161 +       
40162 +       if ((res = resolve_eproc_trap (uctx, utrapp, &trap->tr_eventtrap)) != UTS_FINISHED)
40163 +       {
40164 +           ucq->ucq_state = UCQ_STOPPED;
40165 +
40166 +           return (res);
40167 +       }
40168 +       
40169 +       if (fixup_eproc_trap (uctx, &trap->tr_eventtrap, 1) == 0)
40170 +           return UTS_RESCHEDULE;
40171 +
40172 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40173 +       break;
40174 +       
40175 +    case CommandProcMemoryFault:
40176 +       PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcMemoryFault at %llx\n", trap->tr_faultarea.FaultAddress);
40177 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
40178 +       {
40179 +           ucq->ucq_state = UCQ_STOPPED;
40180 +
40181 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
40182 +       }
40183 +       
40184 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40185 +       break;
40186 +       
40187 +    case CommandProcRouteFetchFault:
40188 +       command = elan4_trapped_open_command (dev, ucq->ucq_cq);
40189 +       
40190 +       PRINTF1 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcRouteFetchFault to vp %d\n", (int) (command >> 32));
40191 +       
40192 +       if (user_resolvevp (uctx, (unsigned) (command >> 32)) != 0)
40193 +       {
40194 +           ucq->ucq_state = UCQ_STOPPED;
40195 +
40196 +           return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq), (long) (command >> 32)));
40197 +       }
40198 +
40199 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40200 +       break;
40201 +       
40202 +    case CommandProcFailCountZero:
40203 +       PRINTF0 (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcFailCountZero - reset failcount\n");
40204 +       
40205 +       /* Update CPROC timeout route statistics */
40206 +       for (chan = 0; chan <= 1; chan++)
40207 +       {
40208 +           /* Was there a timeout on this channel ? */
40209 +           if (PackValue(trap->tr_qdesc.CQ_AckBuffers, chan) == PackTimeout)
40210 +           {
40211 +               /* Find the last open command for that channel to extract the relevant vp */
40212 +               if ((vp = cproc_open_extract_vp(uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq, chan)) != -1)
40213 +               {
40214 +                   E4_VirtualProcessEntry route;
40215 +                   
40216 +                   kmutex_lock (&uctx->uctx_vpseg_lock);
40217 +                   location = user_process2location(uctx, NULL, vp);
40218 +                   elan4_read_route (uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, vp, &route);
40219 +                   kmutex_unlock (&uctx->uctx_vpseg_lock);
40220 +                   node = location.loc_node;
40221 +                   
40222 +                   kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
40223 +                   
40224 +                   if ((node >= 0) && (node < uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes))
40225 +                   {
40226 +                       uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout[node]++;
40227 +                       
40228 +                       elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_cproc_timeout_routes,
40229 +                                           &route, uctx->uctx_ctxt.ctxt_dev);
40230 +                   }
40231 +                   
40232 +                   kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
40233 +               }
40234 +           }
40235 +       }
40236 +           
40237 +       /* NOTE - we must not modify the ChannelNotCompleted bits - so modify */
40238 +       /*        the restart count with a part-word store */
40239 +       elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, user_cproc_retry_count);
40240 +
40241 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40242 +       break;
40243 +
40244 +    case CommandProcAddressAlignment:
40245 +       ucq->ucq_state = UCQ_STOPPED;
40246 +
40247 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
40248 +
40249 +    case CommandProcPermissionTrap:
40250 +    {
40251 +       sdramaddr_t cqdesc = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc));
40252 +       E4_uint64   control = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_Control));
40253 +
40254 +       PRINTF (uctx, DBG_CPROC, "resolve_cproc_trap: CommandProcPermissionTrap - %s\n",
40255 +               (control & CQ_PermissionMask) != ucq->ucq_cq->cq_perm ? "resume from stop" : "permission denied");
40256 +       
40257 +       if ((control & CQ_PermissionMask) == ucq->ucq_cq->cq_perm)
40258 +           return (deliver_trap (utrapp, UTS_PERMISSION_DENIED, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
40259 +
40260 +       elan4_updatecq (dev, ucq->ucq_cq, ucq->ucq_cq->cq_perm, 0);
40261 +
40262 +       ucq->ucq_state = UCQ_NEEDS_RESTART;
40263 +       break;
40264 +    }
40265 +    
40266 +    case CommandProcBadData:
40267 +       ucq->ucq_state = UCQ_STOPPED;
40268 +
40269 +       return (deliver_trap (utrapp, UTS_INVALID_COMMAND, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
40270 +
40271 +    default:
40272 +       ucq->ucq_state = UCQ_STOPPED;
40273 +
40274 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_CPROC, trap, elan4_cq2idx(ucq->ucq_cq)));
40275 +    }
40276 +
40277 +    return (UTS_FINISHED);
40278 +}
40279 +
40280 +static int
40281 +resolve_dproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_DPROC_TRAP *trap)
40282 +{
40283 +    ELAN_LOCATION location;
40284 +    int node;
40285 +    E4_VirtualProcessEntry route;
40286 +
40287 +    if (DPROC_PrefetcherFault (trap->tr_status))
40288 +    {
40289 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: PrefetcherFault at %llx\n", trap->tr_prefetchFault.FaultAddress);
40290 +
40291 +       if (user_pagefault (uctx, &trap->tr_prefetchFault) != 0)
40292 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_DPROC, trap));
40293 +       
40294 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc));
40295 +    }
40296 +    
40297 +    switch (DPROC_TrapType (trap->tr_status))
40298 +    {
40299 +    case DmaProcRouteFetchFault:
40300 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcRouteFetchFault vp %d\n", trap->tr_desc.dma_vproc);
40301 +
40302 +       if (user_resolvevp (uctx, trap->tr_desc.dma_vproc) != 0)
40303 +           return (deliver_trap (utrapp, UTS_INVALID_VPROC, UTS_DPROC, trap, trap->tr_desc.dma_vproc));
40304 +       
40305 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* immediate */));
40306 +       
40307 +    case DmaProcFailCountError:
40308 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcFailCountError - vp %d cookie %llx\n",
40309 +               trap->tr_desc.dma_vproc, trap->tr_desc.dma_cookie);
40310 +
40311 +       trap->tr_desc.dma_typeSize |= DMA_FailCount (user_dproc_retry_count);
40312 +
40313 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
40314 +
40315 +    case DmaProcPacketAckError:
40316 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcPacketAckError - %d%s\n", DPROC_PacketAckValue (trap->tr_status), 
40317 +               DPROC_PacketTimeout (trap->tr_status) ? " timeout" : "");
40318 +
40319 +       kmutex_lock (&uctx->uctx_vpseg_lock);
40320 +       location = user_process2location(uctx, NULL, trap->tr_desc.dma_vproc);
40321 +       elan4_read_route(uctx->uctx_ctxt.ctxt_dev, uctx->uctx_routetable, trap->tr_desc.dma_vproc, &route);
40322 +       kmutex_unlock (&uctx->uctx_vpseg_lock);
40323 +       node = location.loc_node;
40324 +
40325 +       /* Update dproc route timeout statistics */
40326 +       if ((node >= 0) && (node < uctx->uctx_ctxt.ctxt_dev->dev_position.pos_nodes))
40327 +       {
40328 +           kmutex_lock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
40329 +           
40330 +           if ((route.Values[0] != 0) || (route.Values[1] != 0))
40331 +           {
40332 +               if (DPROC_PacketTimeout (trap->tr_status))
40333 +               {
40334 +                   uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout[node]++;
40335 +                   elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_dproc_timeout_routes,
40336 +                                       &route, uctx->uctx_ctxt.ctxt_dev);
40337 +               }
40338 +               else
40339 +               {
40340 +                   uctx->uctx_ctxt.ctxt_dev->dev_ack_errors[node]++;
40341 +                   elan4_ringbuf_store(&uctx->uctx_ctxt.ctxt_dev->dev_ack_error_routes,
40342 +                                       &route, uctx->uctx_ctxt.ctxt_dev);
40343 +               }
40344 +           }
40345 +           
40346 +           kmutex_unlock(&uctx->uctx_ctxt.ctxt_dev->dev_lock);
40347 +       }
40348 +
40349 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
40350 +
40351 +    case DmaProcQueueOverflow:
40352 +       PRINTF (uctx, DBG_DPROC, "resolve_dproc_trap: DmaProcQueueOverflow\n");
40353 +       return (queue_dma_for_retry (uctx, utrapp, &trap->tr_desc /* XXXX - backoff for some time later */));
40354 +       
40355 +    case DmaProcRunQueueReadFault:
40356 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_DPROC, trap));
40357 +       
40358 +    default:
40359 +       printk ("resolve_dproc_trap: unknown trap type : %d\n", DPROC_TrapType(trap->tr_status));
40360 +       BUG();
40361 +    }
40362 +    return UTS_FINISHED;
40363 +}
40364 +
40365 +int
40366 +resolve_tproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, ELAN4_TPROC_TRAP *trap)
40367 +{
40368 +    PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trap state = %lx\n", trap->tr_state);
40369 +
40370 +    if (trap->tr_state & TS_TrapForTooManyInstructions)
40371 +       return (deliver_trap (utrapp, UTS_BAD_TRAP, UTS_TPROC, trap));
40372 +    
40373 +    if (trap->tr_state & TS_Unimplemented)
40374 +       return (deliver_trap (utrapp, UTS_UNIMP_INSTR, UTS_TPROC, trap));
40375 +    
40376 +    if (trap->tr_state & TS_DataAlignmentError)
40377 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_TPROC, trap));
40378 +    
40379 +    if ((trap->tr_state & TS_InstAccessException) && user_pagefault (uctx, &trap->tr_instFault) != 0)
40380 +       return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap));
40381 +    
40382 +    if ((trap->tr_state & TS_DataAccessException) && user_pagefault (uctx, &trap->tr_dataFault) != 0)
40383 +       return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_TPROC, trap));
40384 +    
40385 +    /* If we're restarting from trap - then just need to re-issue it */
40386 +    if (trap->tr_pc == uctx->uctx_trestart_addr || (trap->tr_state & TS_TrappedFlag))
40387 +    {
40388 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: trapped in trap code PC=%llx SP=%llx\n", trap->tr_pc, trap->tr_regs[1]);
40389 +
40390 +       trap->tr_regs[0] = uctx->uctx_trestart_addr;
40391 +    }
40392 +    else
40393 +    {
40394 +       E4_uint64 *sp = (E4_uint64 *) user_elan2main (uctx, trap->tr_regs[1]);
40395 +       int        i, reload;
40396 +
40397 +       /* need to store the register on the stack see */
40398 +       /*  lib_tproc_trampoline_elan4_thread.S for stack layout */
40399 +#define TS_STACK_OFF(REG)      ((((REG)&7)) - (((REG)>>3)*8) - 8)
40400 +       for (reload = 0, i = 0; i < 64; i++)
40401 +       {
40402 +           if (trap->tr_dirty & ((E4_uint64) 1 << i))
40403 +           {
40404 +               PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: %%r%d  [%016llx] -> %p\n", i, trap->tr_regs[i], &sp[TS_STACK_OFF(i)]);
40405 +
40406 +               sulonglong ((u64 *) &sp[TS_STACK_OFF(i)], trap->tr_regs[i]);
40407 +               
40408 +               reload |= (1 << (i >> 3));
40409 +           }
40410 +       }
40411 +#undef TS_STACK_OFF
40412 +
40413 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: pc %llx npc %llx\n", trap->tr_pc, trap->tr_npc);
40414 +       PRINTF (uctx, DBG_TPROC, "resolve_tproc_trap: CC %x reload %x\n", (int) (trap->tr_state >> TS_XCCshift), reload);
40415 +
40416 +       trap->tr_regs[0] = uctx->uctx_trestart_addr;
40417 +       trap->tr_regs[2] = trap->tr_pc;
40418 +       trap->tr_regs[3] = trap->tr_npc;
40419 +       trap->tr_regs[4] = (trap->tr_state >> TS_XCCshift) & TS_XCCmask;
40420 +       trap->tr_regs[5] = reload;
40421 +    }
40422 +
40423 +    return (queue_thread_for_retry (uctx, utrapp, (E4_ThreadRegs *) trap->tr_regs));
40424 +}
40425 +
40426 +static int
40427 +resolve_iproc_trap (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int channel)
40428 +{
40429 +    USER_IPROC_TRAP  *utrap = &uctx->uctx_iprocTrap[channel];
40430 +    ELAN4_IPROC_TRAP *trap  = &utrap->ut_trap;
40431 +    unsigned long     flags;
40432 +
40433 +    elan4_inspect_iproc_trap (trap);
40434 +
40435 +    if (trap->tr_flags & TR_FLAG_TOOMANY_TRANS)
40436 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
40437 +
40438 +    ASSERT (trap->tr_trappedTrans >= 0 && trap->tr_trappedTrans < trap->tr_numTransactions);
40439 +
40440 +    switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType))
40441 +    {
40442 +    case InputMemoryFault:
40443 +       if (user_pagefault (uctx, &trap->tr_faultarea) != 0)
40444 +       {
40445 +           utrap->ut_state = UTS_IPROC_STOPPED;
40446 +           
40447 +           return (deliver_trap (utrapp, UTS_INVALID_ADDR, UTS_IPROC, trap, channel));
40448 +       }
40449 +       break;
40450 +
40451 +    case InputDmaQueueOverflow:
40452 +    case InputEventEngineTrapped:
40453 +       /* nothing to do for these 2 - restarting will simulate the transactions */
40454 +       break;
40455 +
40456 +    case InputEopErrorOnWaitForEop:
40457 +    case InputEopErrorTrap:
40458 +       break;
40459 +
40460 +    case InputCrcErrorAfterPAckOk:
40461 +       PRINTF (DBG_DEVICE, DBG_IPROC, "InputCrcErrorAfterPAckOk: flags %x\n", trap->tr_flags);
40462 +
40463 +       ASSERT ((trap->tr_flags & TR_FLAG_ACK_SENT) && ((trap->tr_flags & (TR_FLAG_DMA_PACKET|TR_FLAG_BAD_TRANS)) ||
40464 +                                                       ((trap->tr_flags & TR_FLAG_EOP_ERROR) && trap->tr_identifyTrans == TR_TRANS_INVALID)));
40465 +       break;
40466 +
40467 +    case InputDiscardAfterAckOk:
40468 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
40469 +
40470 +    case InputAddressAlignment:
40471 +       return (deliver_trap (utrapp, UTS_ALIGNMENT_ERROR, UTS_IPROC, trap, channel));
40472 +
40473 +    case InputInvalidTransType:
40474 +       return (deliver_trap (utrapp, UTS_INVALID_TRANS, UTS_IPROC, trap, channel));
40475 +
40476 +    default:
40477 +       printk ("resolve_iproc_trap: unknown trap type %d\n", IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType));
40478 +       BUG();
40479 +       /* NOTREACHED */
40480 +    }
40481 +
40482 +    if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD))
40483 +    {
40484 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40485 +
40486 +       utrap->ut_state = UTS_IPROC_RUNNING;
40487 +
40488 +       user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
40489 +       
40490 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40491 +    }
40492 +    else if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID)))
40493 +    {
40494 +       /* 
40495 +        * TR_FLAG_DMA_PACKET   means a DMA packet has faulted.
40496 +        *
40497 +        * TR_FLAG_BAD_TRANS    means we have a transaction with a bad CRC after the transaction
40498 +        *                      which sent the ack - this implies it's an overlapped ack DMA packet
40499 +        *
40500 +        * TR_FLAG_EOP_ERROR    means we've received an EOP reset - if we hadn't seen an identify
40501 +        *                      transaction then it's a DMA packet.
40502 +        *
40503 +        * To ensure that the DMA processor works correctly the next packet must be NACKed to 
40504 +        * cause it to resend this one.
40505 +        */
40506 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: %s during DMA packet\n",
40507 +               (trap->tr_flags & TR_FLAG_BAD_TRANS) ? "BadTransaction" : (trap->tr_flags & TR_FLAG_EOP_ERROR) ? "EopError" : "trap");
40508 +
40509 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40510 +
40511 +       if (trap->tr_flags & TR_FLAG_DMA_PACKET)
40512 +       {
40513 +           if (! (trap->tr_flags & TR_FLAG_BAD_TRANS))
40514 +               utrap->ut_state = UTS_IPROC_EXECUTE_PACKET;
40515 +           else
40516 +           {
40517 +               kcondvar_t waithere;
40518 +
40519 +               /* We must ensure that the next packet is always nacked, so
40520 +                * we wait here for an output timeout before dropping the 
40521 +                * context filter - we just pause here for 4 mS */
40522 +               kcondvar_init (&waithere);
40523 +               kcondvar_timedwait (&waithere, &uctx->uctx_spinlock, &flags, lbolt + (HZ/250) + 1);;
40524 +               kcondvar_destroy (&waithere);
40525 +
40526 +               utrap->ut_state = UTS_IPROC_RUNNING;
40527 +               
40528 +               user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
40529 +           }
40530 +       }
40531 +       else
40532 +       {
40533 +           utrap->ut_state = UTS_IPROC_RUNNING;
40534 +
40535 +           user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
40536 +       }
40537 +
40538 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40539 +    }
40540 +    else if (trap->tr_flags & TR_FLAG_EOP_ERROR)
40541 +    {
40542 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: EopError with identify\n");
40543 +
40544 +       utrap->ut_state = UTS_IPROC_NETWORK_ERROR;
40545 +    }
40546 +    else
40547 +    {
40548 +       PRINTF (uctx, DBG_IPROC, "resolve_iproc_trap: execute packet\n");
40549 +
40550 +       utrap->ut_state = UTS_IPROC_EXECUTE_PACKET;
40551 +    }
40552 +
40553 +    return UTS_FINISHED;
40554 +}
40555 +
40556 +
40557 +static int
40558 +resolve_cproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40559 +{
40560 +    struct list_head *entry;
40561 +    int res = UTS_FINISHED;
40562 +
40563 +    kmutex_lock (&uctx->uctx_cqlock);
40564 +    list_for_each (entry, &uctx->uctx_cqlist) {
40565 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
40566 +
40567 +       if (ucq->ucq_state == UCQ_TRAPPED)
40568 +       {
40569 +           res = resolve_cproc_trap (uctx, utrapp, ucq);
40570 +
40571 +           if (res != UTS_FINISHED)
40572 +               break;
40573 +       }
40574 +
40575 +       if (ucq->ucq_errored)
40576 +       {
40577 +           ucq->ucq_errored = 0;
40578 +           res = deliver_trap (utrapp, UTS_CPROC_ERROR, UTS_CPROC, &ucq->ucq_trap, elan4_cq2idx(ucq->ucq_cq));
40579 +           break;
40580 +       }
40581 +    }
40582 +    kmutex_unlock (&uctx->uctx_cqlock);
40583 +
40584 +    return (res);
40585 +}
40586 +
40587 +static int
40588 +resolve_eproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40589 +{
40590 +    unsigned long flags;
40591 +    int res;
40592 +
40593 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40594 +    while (! RING_QUEUE_EMPTY (uctx->uctx_eprocTrapQ))
40595 +    {
40596 +       ELAN4_EPROC_TRAP trap = *RING_QUEUE_FRONT (uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps);
40597 +
40598 +       (void) RING_QUEUE_REMOVE (uctx->uctx_eprocTrapQ);
40599 +
40600 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40601 +
40602 +       if ((res = resolve_eproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
40603 +           return (res);
40604 +
40605 +       if (fixup_eproc_trap (uctx, &trap, 0) == 0)
40606 +       {
40607 +           PRINTF (uctx, DBG_EPROC, "resolve_eproc_trap: could not fixup eproc trap - requeue it\n");
40608 +
40609 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40610 +           if (RING_QUEUE_REALLY_FULL(uctx->uctx_eprocTrapQ))
40611 +           {
40612 +               PRINTF (uctx, DBG_EPROC, "resolve_eproc_trap: queue overflow\n");
40613 +               uctx->uctx_status |= UCTX_EPROC_QUEUE_OVERFLOW;
40614 +           }
40615 +           else
40616 +           {
40617 +               /* Requeue at front to preserve setevent ordering */
40618 +               /* GNAT 7504: Must move fptr before writing over it */
40619 +               (void) RING_QUEUE_ADD_FRONT(uctx->uctx_eprocTrapQ);
40620 +               
40621 +               *RING_QUEUE_FRONT(uctx->uctx_eprocTrapQ, uctx->uctx_eprocTraps) = trap;
40622 +           }
40623 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40624 +
40625 +           return UTS_RESCHEDULE;
40626 +       }
40627 +       
40628 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40629 +    }
40630 +
40631 +    if (uctx->uctx_status & UCTX_EPROC_QUEUE_FULL)
40632 +       user_stop_stopping (uctx, UCTX_EPROC_QUEUE_FULL);
40633 +
40634 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40635 +    return (UTS_FINISHED);
40636 +}
40637 +           
40638 +static int
40639 +resolve_dproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40640 +{
40641 +    unsigned long flags;
40642 +    int res;
40643 +    
40644 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40645 +    while (! RING_QUEUE_EMPTY (uctx->uctx_dprocTrapQ))
40646 +    {
40647 +       ELAN4_DPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_dprocTrapQ, uctx->uctx_dprocTraps);
40648 +       
40649 +       (void) RING_QUEUE_REMOVE (uctx->uctx_dprocTrapQ);
40650 +
40651 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40652 +
40653 +       if ((res = resolve_dproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
40654 +           return (res);
40655 +       
40656 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40657 +    }
40658 +
40659 +    if (uctx->uctx_status & UCTX_DPROC_QUEUE_FULL)
40660 +       user_stop_stopping (uctx, UCTX_DPROC_QUEUE_FULL);
40661 +    
40662 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40663 +    return (UTS_FINISHED);
40664 +}
40665 +
40666 +static int
40667 +resolve_tproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40668 +{
40669 +    unsigned long flags;
40670 +    int res;
40671 +    
40672 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40673 +    while (! RING_QUEUE_EMPTY (uctx->uctx_tprocTrapQ))
40674 +    {
40675 +       ELAN4_TPROC_TRAP trap = *RING_QUEUE_FRONT(uctx->uctx_tprocTrapQ, uctx->uctx_tprocTraps);
40676 +       
40677 +       (void) RING_QUEUE_REMOVE (uctx->uctx_tprocTrapQ);
40678 +
40679 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40680 +
40681 +       if ((res = resolve_tproc_trap (uctx, utrapp, &trap)) != UTS_FINISHED)
40682 +           return (res);
40683 +       
40684 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40685 +    }
40686 +
40687 +    if (uctx->uctx_status & UCTX_TPROC_QUEUE_FULL)
40688 +       user_stop_stopping (uctx, UCTX_TPROC_QUEUE_FULL);
40689 +
40690 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40691 +    return (UTS_FINISHED);
40692 +}
40693 +
40694 +static int
40695 +resolve_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40696 +{
40697 +    unsigned long flags;
40698 +    int i, res;
40699 +
40700 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40701 +    for (i = 0; i < 2; i++)
40702 +       if (uctx->uctx_iprocTrap[i].ut_state == UTS_IPROC_TRAPPED)
40703 +       {
40704 +           uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_RESOLVING;
40705 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
40706 +           
40707 +           if ((res = resolve_iproc_trap (uctx, utrapp, i)) != UTS_FINISHED)
40708 +               return (res);
40709 +           
40710 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40711 +       }
40712 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40713 +    
40714 +    return (UTS_FINISHED);
40715 +}
40716 +
40717 +static int
40718 +resolve_all_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40719 +{
40720 +    int res;
40721 +
40722 +    if ((res = resolve_iproc_traps (uctx, utrapp)) != UTS_FINISHED ||
40723 +       (res = resolve_cproc_traps (uctx, utrapp)) != UTS_FINISHED ||
40724 +       (res = resolve_eproc_traps (uctx, utrapp)) != UTS_FINISHED ||
40725 +       (res = resolve_dproc_traps (uctx, utrapp)) != UTS_FINISHED ||
40726 +       (res = resolve_tproc_traps (uctx, utrapp)) != UTS_FINISHED)
40727 +       return (res);
40728 +
40729 +    if (uctx->uctx_status & UCTX_OVERFLOW_REASONS)
40730 +    {
40731 +       PRINTF (uctx, DBG_TRAP, "resolve_all_traps: overflow reasons %x\n", uctx->uctx_status);
40732 +
40733 +       return (deliver_trap (utrapp, UTS_QUEUE_OVERFLOW, UTS_NOPROC, NULL, uctx->uctx_status));
40734 +    }
40735 +
40736 +    if (uctx->uctx_status & UCTX_ERROR_REASONS)
40737 +       return (deliver_trap (utrapp, UTS_QUEUE_ERROR, UTS_NOPROC, NULL, uctx->uctx_status));
40738 +
40739 +    return (UTS_FINISHED);
40740 +}
40741 +
40742 +static int
40743 +execute_iproc_traps (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40744 +{
40745 +    unsigned long flags;
40746 +    int i;
40747 +
40748 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40749 +    for (i = 0; i < 2; i++)
40750 +       switch (uctx->uctx_iprocTrap[i].ut_state)
40751 +       {
40752 +       case UTS_IPROC_EXECUTE_PACKET:
40753 +           uctx->uctx_iprocTrap[i].ut_state = UTS_IPROC_EXECUTING;
40754 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
40755 +           
40756 +           return (deliver_trap (utrapp, UTS_EXECUTE_PACKET, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i));
40757 +
40758 +       case UTS_IPROC_NETWORK_ERROR:
40759 +           spin_unlock_irqrestore(&uctx->uctx_spinlock, flags);
40760 +           
40761 +           return (deliver_trap (utrapp, UTS_NETWORK_ERROR_TRAP, UTS_IPROC, &uctx->uctx_iprocTrap[i].ut_trap, i));
40762 +       }
40763 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40764 +    
40765 +    return (UTS_FINISHED);
40766 +}
40767 +
40768 +static int
40769 +progress_neterr (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp)
40770 +{
40771 +    unsigned long flags;
40772 +
40773 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40774 +    if (! RING_QUEUE_EMPTY (uctx->uctx_msgQ))
40775 +    {
40776 +       ELAN4_NETERR_MSG msg = *RING_QUEUE_FRONT (uctx->uctx_msgQ, uctx->uctx_msgs);
40777 +       
40778 +       (void) RING_QUEUE_REMOVE (uctx->uctx_msgQ);
40779 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40780 +       
40781 +       return deliver_trap (utrapp, UTS_NETWORK_ERROR_MSG, UTS_NETERR_MSG, &msg, user_location2process (uctx, msg.msg_sender));
40782 +    }
40783 +    
40784 +    if (uctx->uctx_status & UCTX_NETERR_TIMER)
40785 +    {
40786 +       uctx->uctx_status &= ~UCTX_NETERR_TIMER;
40787 +
40788 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40789 +       
40790 +       return deliver_trap (utrapp, UTS_NETWORK_ERROR_TIMER, UTS_NOPROC, NULL);
40791 +    }
40792 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40793 +    
40794 +    return (UTS_FINISHED);
40795 +}
40796 +
40797 +static void
40798 +restart_command_queues (USER_CTXT *uctx)
40799 +{
40800 +    struct list_head *entry;
40801 +
40802 +    ASSERT (SPINLOCK_HELD (&uctx->uctx_spinlock));
40803 +    
40804 +    list_for_each (entry, &uctx->uctx_cqlist) {
40805 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
40806 +       
40807 +       if (ucq->ucq_state == UCQ_NEEDS_RESTART)
40808 +       {
40809 +           ucq->ucq_state = UCQ_RUNNING;
40810 +           
40811 +           elan4_restartcq (uctx->uctx_ctxt.ctxt_dev, ucq->ucq_cq);
40812 +       }
40813 +    }
40814 +}
40815 +
40816 +static int
40817 +restart_dmas (USER_CTXT *uctx)
40818 +{
40819 +    PRINTF (uctx, DBG_TRAP, "restart_dmas: back=%d front=%d\n", uctx->uctx_dmaQ.q_back, uctx->uctx_dmaQ.q_front);
40820 +
40821 +    while (! RING_QUEUE_EMPTY (uctx->uctx_dmaQ))
40822 +    {
40823 +       if (! user_ddcq_check (uctx, 7))
40824 +           return (0);
40825 +
40826 +       user_ddcq_run_dma (uctx, RING_QUEUE_FRONT(uctx->uctx_dmaQ, uctx->uctx_dmas));
40827 +       
40828 +       (void) RING_QUEUE_REMOVE (uctx->uctx_dmaQ);
40829 +    }
40830 +
40831 +    return (1);
40832 +}
40833 +
40834 +static int
40835 +restart_threads (USER_CTXT *uctx)
40836 +{
40837 +    PRINTF (uctx, DBG_TRAP, "restart_threads: back=%d front=%d\n", uctx->uctx_threadQ.q_back, uctx->uctx_threadQ.q_front);
40838 +
40839 +    while (! RING_QUEUE_EMPTY (uctx->uctx_threadQ))
40840 +    {
40841 +       if (! user_ddcq_check (uctx, 7))
40842 +           return (0);
40843 +
40844 +       user_ddcq_run_thread (uctx, RING_QUEUE_FRONT(uctx->uctx_threadQ, uctx->uctx_threads));
40845 +       
40846 +       (void) RING_QUEUE_REMOVE (uctx->uctx_threadQ);
40847 +    }
40848 +
40849 +    return (1);
40850 +}
40851 +
40852 +int
40853 +user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr)
40854 +{
40855 +    PRINTF2 (uctx, DBG_RESUME, "user_resume_eproc_trap: addr=%llx -> %s\n", addr, user_ddcq_check(uctx, 2) ? "success" : "EAGAIN");
40856 +
40857 +    if (! user_ddcq_check (uctx, 2))
40858 +       return (-EAGAIN);
40859 +
40860 +    user_ddcq_setevent (uctx, addr);
40861 +
40862 +    return (0);
40863 +}
40864 +
40865 +int
40866 +user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx)
40867 +{
40868 +    struct list_head *entry;
40869 +    unsigned long flags;
40870 +
40871 +    PRINTF1 (uctx, DBG_RESUME, "user_resume_cproc_trap: indx=%d\n", indx);
40872 +
40873 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40874 +
40875 +    list_for_each (entry, &uctx->uctx_cqlist) {
40876 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
40877 +       
40878 +       if (elan4_cq2idx(ucq->ucq_cq) == indx && ucq->ucq_state == UCQ_STOPPED && !(ucq->ucq_flags & UCQ_SYSTEM))
40879 +       {
40880 +           ucq->ucq_state = UCQ_NEEDS_RESTART;
40881 +           
40882 +           user_signal_trap (uctx);
40883 +
40884 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40885 +           return (0);
40886 +       }
40887 +    }
40888 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40889 +
40890 +    return (-EINVAL);
40891 +}
40892 +
40893 +int
40894 +user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma)
40895 +{
40896 +    unsigned long flags;
40897 +    int res = 0;
40898 +
40899 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40900 +    if (RING_QUEUE_FULL (uctx->uctx_dmaQ))
40901 +       res = -ENOMEM;
40902 +    else
40903 +    {
40904 +       *RING_QUEUE_BACK (uctx->uctx_dmaQ, uctx->uctx_dmas) = *dma;
40905 +       (void) RING_QUEUE_ADD (uctx->uctx_dmaQ);
40906 +
40907 +       user_signal_trap (uctx);
40908 +    }
40909 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40910 +    return (res);
40911 +}
40912 +
40913 +int
40914 +user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs)
40915 +{
40916 +    unsigned long flags;
40917 +    int res = 0;
40918 +
40919 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40920 +    if (RING_QUEUE_FULL (uctx->uctx_threadQ))
40921 +       res = -ENOMEM;
40922 +    else
40923 +    {
40924 +       *RING_QUEUE_BACK (uctx->uctx_threadQ, uctx->uctx_threads) = *regs;
40925 +       (void) RING_QUEUE_ADD (uctx->uctx_threadQ);
40926 +
40927 +       user_signal_trap (uctx);
40928 +    }
40929 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40930 +    return (res);
40931 +}
40932 +
40933 +int
40934 +user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans,
40935 +                       E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap)
40936 +{
40937 +    unsigned long flags;
40938 +    int res = 0;
40939 +
40940 +    if (channel >= 2)
40941 +       return (-EINVAL);
40942 +
40943 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
40944 +    if (uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_STOPPED &&
40945 +       uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_EXECUTING &&
40946 +       uctx->uctx_iprocTrap[channel].ut_state != UTS_IPROC_NETWORK_ERROR)
40947 +       res = -EINVAL;
40948 +    else
40949 +    {
40950 +       ELAN4_IPROC_TRAP *trap = &uctx->uctx_iprocTrap[channel].ut_trap;
40951 +
40952 +       if (trans < trap->tr_numTransactions)
40953 +       {
40954 +           PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> execute\n", trans);
40955 +
40956 +           uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_EXECUTE_PACKET;
40957 +           trap->tr_trappedTrans                  = trans;
40958 +           trap->tr_transactions[trans]           = *hdrp;
40959 +           trap->tr_dataBuffers[trans]            = *datap;
40960 +       }
40961 +       else
40962 +       {
40963 +           PRINTF1 (uctx, DBG_RESUME, "user_resume_iproc_trap: trans=%d -> running\n", trans);
40964 +
40965 +           uctx->uctx_iprocTrap[channel].ut_state = UTS_IPROC_RUNNING;
40966 +       
40967 +           user_stop_nacking (uctx, channel ? UCTX_IPROC_CH0_TRAPPED : UCTX_IPROC_CH1_TRAPPED);
40968 +       }
40969 +    }
40970 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
40971 +    
40972 +    return (res);
40973 +}
40974 +
40975 +int
40976 +__categorise_command (E4_uint64 command, int *cmdSize)
40977 +{
40978 +    switch (command & 0x3)
40979 +    {
40980 +    case RUN_THREAD_CMD: *cmdSize = 7; break;
40981 +       
40982 +    default:
40983 +       switch (command & 0x7)
40984 +       {
40985 +       case WRITE_DWORD_CMD: *cmdSize = 2; break;
40986 +       case ADD_DWORD_CMD:   *cmdSize = 2; break;
40987 +           
40988 +       default:
40989 +           switch (command & 0xF)
40990 +           {
40991 +           case OPEN_STEN_PKT_CMD:
40992 +               *cmdSize = 1;
40993 +               return 1;
40994 +               
40995 +           case COPY64_CMD:    *cmdSize = 2; break;
40996 +           case GUARD_CMD:     *cmdSize = 1; break;
40997 +           case INTERRUPT_CMD: *cmdSize = 1; break;
40998 +           case RUN_DMA_CMD:   *cmdSize = 7; break;
40999 +               
41000 +           default:
41001 +               switch (command & 0x1f)
41002 +               {
41003 +               case SEND_TRANS_CMD:
41004 +                   *cmdSize = 2 + (((command >> 16) & TR_SIZE_MASK) >> TR_SIZE_SHIFT);
41005 +                   return 2;
41006 +                   
41007 +               case SET_EVENT_CMD:    *cmdSize = 1; break;
41008 +               case SET_EVENTN_CMD:   *cmdSize = 2; break;
41009 +               case WAIT_EVENT_CMD:   *cmdSize = 4; break;
41010 +
41011 +               default:
41012 +                   switch (command & 0x3f)
41013 +                   {
41014 +                   case NOP_CMD:            *cmdSize = 1; break;
41015 +                   case MAKE_EXT_CLEAN_CMD: *cmdSize = 1; break;
41016 +                   default:
41017 +                       return 3;
41018 +                   }
41019 +                   break;
41020 +               }
41021 +           }
41022 +       }
41023 +    }
41024 +
41025 +    return 0;
41026 +}
41027 +
41028 +int
41029 +__whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize)
41030 +{
41031 +    /* Move onto next command */
41032 +    while (cmdSize-- && (*commandPtr) != insertPtr)
41033 +       *commandPtr = ((*commandPtr) & ~(cqSize-1)) | (((*commandPtr) + sizeof (E4_uint64)) & (cqSize-1));
41034 +
41035 +    return cmdSize == -1;
41036 +}
41037 +
41038 +int
41039 +user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop)
41040 +{
41041 +    ELAN4_DEV        *dev   = uctx->uctx_ctxt.ctxt_dev;
41042 +    int                      found = 0;
41043 +    struct list_head *el;
41044 +
41045 +    user_swapout (uctx, UCTX_NETERR_FIXUP);
41046 +
41047 +    kmutex_lock (&uctx->uctx_cqlock);
41048 +    list_for_each (el, &uctx->uctx_cqlist) {
41049 +       USER_CQ *ucq = list_entry (el, USER_CQ, ucq_link);
41050 +       
41051 +       if ((ucq->ucq_cq->cq_perm & CQ_STENEnableBit) != 0)
41052 +       {
41053 +           sdramaddr_t   cqdesc       = dev->dev_cqaddr + (elan4_cq2num(ucq->ucq_cq) * sizeof (E4_CommandQueueDesc));
41054 +           E4_uint64     queuePtrs    = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_QueuePtrs));
41055 +           sdramaddr_t   insertPtr    = (queuePtrs & CQ_PtrMask);
41056 +           sdramaddr_t   commandPtr   = CQ_CompletedPtr (queuePtrs);
41057 +           unsigned int  cqSize       = CQ_Size ((queuePtrs >> CQ_SizeShift) & CQ_SizeMask);
41058 +           E4_uint64     openCommand  = 0;
41059 +
41060 +           if (dev->dev_devinfo.dev_revision_id != PCI_REVISION_ID_ELAN4_REVA && (queuePtrs & CQ_RevB_ReorderingQueue))
41061 +           {
41062 +               E4_uint32 oooMask = elan4_sdram_readl (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_HoldingValue));
41063 +               
41064 +               for (; (oooMask & 1) != 0; oooMask >>= 1)
41065 +                   insertPtr = (insertPtr & ~(cqSize-1)) | ((insertPtr + sizeof (E4_uint64)) & (cqSize-1));
41066 +           }
41067 +
41068 +           while (commandPtr != insertPtr)
41069 +           {
41070 +               E4_uint64    command = elan4_sdram_readq (dev, commandPtr);
41071 +               sdramaddr_t  identifyPtr;
41072 +               unsigned int cmdSize;
41073 +               
41074 +               switch (__categorise_command (command, &cmdSize))
41075 +               {
41076 +               case 0:
41077 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
41078 +                   break;
41079 +                   
41080 +               case 1:
41081 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d OPEN %llx\n", elan4_cq2num (ucq->ucq_cq), command);
41082 +                   
41083 +                   if ((command >> 32) == vp)
41084 +                       openCommand = command;
41085 +
41086 +                   (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
41087 +                   break;
41088 +                   
41089 +               case 2:
41090 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cq=%d SENDTRANS %llx\n", elan4_cq2num (ucq->ucq_cq), command);
41091 +                   
41092 +                   if (openCommand == 0)
41093 +                       (void) __whole_command (&commandPtr, insertPtr, cqSize, cmdSize);
41094 +                   else
41095 +                   {
41096 +                       switch ((command >> 16) & (TR_OPCODE_MASK | TR_SIZE_MASK))
41097 +                       {
41098 +                       case TR_IDENTIFY  & (TR_OPCODE_MASK | TR_SIZE_MASK):
41099 +                       case TR_REMOTEDMA & (TR_OPCODE_MASK | TR_SIZE_MASK):
41100 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_IDENTIFY/TR_REMOTEDMA\n");
41101 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + sizeof (E4_uint64)) & (cqSize-1));
41102 +                           break;
41103 +                           
41104 +                       case TR_SETEVENT_IDENTIFY & (TR_OPCODE_MASK | TR_SIZE_MASK):
41105 +                       case TR_INPUT_Q_COMMIT    & (TR_OPCODE_MASK | TR_SIZE_MASK):
41106 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_SETEVENT_IDENTIFY/TR_INPUT_Q_COMMIT\n");
41107 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 2*sizeof (E4_uint64)) & (cqSize-1));
41108 +                           break;
41109 +                           
41110 +                       case TR_ADDWORD & (TR_OPCODE_MASK | TR_SIZE_MASK):
41111 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_ADDWORD\n");
41112 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 3*sizeof (E4_uint64)) & (cqSize-1));
41113 +                           break;
41114 +                           
41115 +                       case TR_TESTANDWRITE & (TR_OPCODE_MASK | TR_SIZE_MASK):
41116 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: TR_TESTANDWRITE\n");
41117 +                           identifyPtr = (commandPtr & ~(cqSize-1)) | ((commandPtr + 4*sizeof (E4_uint64)) & (cqSize-1));
41118 +                           break;
41119 +                           
41120 +                       default:
41121 +                           identifyPtr = 0;
41122 +                       }
41123 +                       
41124 +                       if (! __whole_command (&commandPtr, insertPtr, cqSize, cmdSize))
41125 +                       {
41126 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: not whole command\n");
41127 +                           openCommand = 0;
41128 +                       }
41129 +
41130 +                       else if (identifyPtr)
41131 +                       {
41132 +                           E4_uint64 tcookie = elan4_sdram_readq (dev, identifyPtr);
41133 +                           
41134 +                           PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie=%llx [%llx]\n", tcookie, cookie);
41135 +                           
41136 +                           if (tcookie == cookie)
41137 +                           {
41138 +                               unsigned int vchan = (openCommand >> 4) & 0x1f;
41139 +                               
41140 +                               PRINTF (uctx, DBG_NETERR, "user_neterr_sten: cookie matches - vchan=%d\n", vchan);
41141 +                               
41142 +                               if (! waitforeop)
41143 +                               {
41144 +                                   /* Alter the CQ_AckBuffer for this channel to indicate an
41145 +                                    * ack was received */
41146 +                                   E4_uint64 value  = elan4_sdram_readq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers));
41147 +                                   E4_uint64 nvalue = ((value & ~((E4_uint64)0xf << ((vchan & 0xf) << 2))) |
41148 +                                                       ((E4_uint64) PackOk << ((vchan & 0xf) << 2)));
41149 +                                   
41150 +                                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: CQ_AckBuffers %llx -> %llx\n", value, nvalue);
41151 +                                   
41152 +                                   elan4_sdram_writeq (dev, cqdesc + offsetof (E4_CommandQueueDesc, CQ_AckBuffers), nvalue);
41153 +                                   pioflush_sdram (dev);
41154 +                               }
41155 +                               
41156 +                               found++;
41157 +                           }
41158 +                           openCommand = 0;
41159 +                       }
41160 +                       
41161 +                       if ((command >> 16) & TR_LAST_AND_SEND_ACK)
41162 +                           openCommand = 0;
41163 +                   }
41164 +                   break;
41165 +                   
41166 +               case 3:
41167 +                   PRINTF (uctx, DBG_NETERR, "user_neterr_sten: invalid command %llx\n", command);
41168 +                   kmutex_unlock (&uctx->uctx_cqlock);
41169 +                   return -EINVAL;
41170 +               }
41171 +               
41172 +           }
41173 +       }
41174 +    }
41175 +    kmutex_unlock (&uctx->uctx_cqlock);
41176 +
41177 +    user_swapin (uctx, UCTX_NETERR_FIXUP);
41178 +
41179 +    return found;
41180 +}
41181 +
41182 +int
41183 +user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop)
41184 +{
41185 +    unsigned long flags;
41186 +    int found = 0;
41187 +    int idx;
41188 +
41189 +    user_swapout (uctx, UCTX_NETERR_FIXUP);
41190 +
41191 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41192 +    RING_QUEUE_ITERATE (uctx->uctx_dmaQ, idx) {
41193 +       E4_DMA *dma = &uctx->uctx_dmas[idx];
41194 +
41195 +       if (dma->dma_vproc == vp && dma->dma_cookie == cookie)
41196 +       {
41197 +           PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness");
41198 +
41199 +           if (! waitforeop) 
41200 +           {
41201 +               dma->dma_dstEvent = 0;
41202 +               dma->dma_typeSize = DMA_ShMemWrite | DMA_Context (dma->dma_typeSize);
41203 +           }
41204 +           found++;
41205 +       }
41206 +    }
41207 +
41208 +    RING_QUEUE_ITERATE (uctx->uctx_dprocTrapQ, idx) {
41209 +       ELAN4_DPROC_TRAP *trap = &uctx->uctx_dprocTraps[idx];
41210 +
41211 +       if (trap->tr_desc.dma_vproc == vp && trap->tr_desc.dma_cookie == cookie)
41212 +       {
41213 +           PRINTF (uctx, DBG_NETERR, "user_neterr_dma: dmaTrapQ matches %s\n", waitforeop ? "waitforeop" : "remove remoteness");
41214 +
41215 +           if (! waitforeop) 
41216 +           {
41217 +               trap->tr_desc.dma_dstEvent = 0;
41218 +               trap->tr_desc.dma_typeSize = DMA_ShMemWrite | DMA_Context (trap->tr_desc.dma_typeSize);
41219 +           }
41220 +           found++;
41221 +       }
41222 +    }
41223 +
41224 +    /* The device driver command queue should be empty at this point ! */
41225 +    if (user_ddcq_flush (uctx) == 0)
41226 +       found = -EAGAIN;
41227 +
41228 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41229 +
41230 +    /* The device driver command queue should be empty at this point ! */
41231 +    if (user_ddcq_flush (uctx) == 0)
41232 +       found = -EAGAIN;
41233 +    
41234 +    user_swapin (uctx, UCTX_NETERR_FIXUP);
41235 +
41236 +    return found;
41237 +}
41238 +
41239 +int
41240 +user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks)
41241 +{
41242 +    unsigned long entered = jiffies;
41243 +    unsigned int  need_reenter = 0;
41244 +    unsigned long flags;
41245 +    int           res;
41246 +    int           tbl;
41247 +
41248 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41249 +
41250 +    PRINTF1 (uctx, DBG_TRAP, "user_trap_handler: entered state=%d\n", uctx->uctx_trap_state);
41251 +
41252 +    uctx->uctx_trap_count++;
41253 +    
41254 +    for (;;)
41255 +    {
41256 +       if (uctx->uctx_status & UCTX_SWAPPED_REASONS)
41257 +       {
41258 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting on swapped reasons\n");
41259 +           
41260 +           res = UTS_FINISHED;
41261 +           goto no_more_to_do;
41262 +       }
41263 +
41264 +       if ((long) (jiffies - entered) > HZ)
41265 +       {
41266 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting for reschedule\n");
41267 +           res = UTS_RESCHEDULE;
41268 +           goto no_more_to_do;
41269 +       }
41270 +       
41271 +       switch (uctx->uctx_trap_state)
41272 +       {
41273 +       case UCTX_TRAP_ACTIVE:
41274 +           uctx->uctx_trap_state = UCTX_TRAP_SLEEPING;
41275 +           
41276 +           if (nticks == 0 || need_reenter || kcondvar_timedwaitsig (&uctx->uctx_wait, &uctx->uctx_spinlock, &flags, lbolt + nticks) != CV_RET_NORMAL)
41277 +           {
41278 +               PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: exiting by kcondvar_timedwaitsig\n");
41279 +
41280 +               res = UTS_FINISHED;
41281 +               goto no_more_to_do;
41282 +           }
41283 +
41284 +           /* Have slept above, so resample entered */
41285 +           entered = jiffies;
41286 +           
41287 +           uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
41288 +           continue;
41289 +
41290 +       case UCTX_TRAP_IDLE:
41291 +       case UCTX_TRAP_SIGNALLED:
41292 +           uctx->uctx_trap_state = UCTX_TRAP_ACTIVE;
41293 +           break;
41294 +       }
41295 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41296 +
41297 +       PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: resolve traps - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status);
41298 +
41299 +       switch ((res = resolve_all_traps (uctx, utrapp)))
41300 +       {
41301 +       case UTS_FINISHED:
41302 +           break;
41303 +           
41304 +       case UTS_RESCHEDULE:
41305 +           need_reenter++;
41306 +           break;
41307 +
41308 +       default:
41309 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41310 +           goto no_more_to_do;
41311 +       }
41312 +
41313 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41314 +       if (! user_ddcq_flush (uctx))
41315 +       {
41316 +           PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq not flushed - re-enter\n");
41317 +           need_reenter++;
41318 +           continue;
41319 +       }
41320 +       spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41321 +
41322 +       for(tbl=0; tbl < NUM_HASH_TABLES; tbl++)
41323 +           if (uctx->uctx_ctxt.shuffle_needed[tbl]) 
41324 +               elan4mmu_do_shuffle (&uctx->uctx_ctxt, tbl);
41325 +
41326 +       if ((res = progress_neterr (uctx, utrapp)) != UTS_FINISHED)
41327 +       {
41328 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41329 +           goto no_more_to_do;
41330 +       }
41331 +
41332 +       if ((res = execute_iproc_traps (uctx, utrapp)) != UTS_FINISHED)
41333 +       {
41334 +           spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41335 +           goto no_more_to_do;
41336 +       }
41337 +
41338 +       PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: restart items - state=%d status=%x\n", uctx->uctx_trap_state, uctx->uctx_status);
41339 +
41340 +       spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41341 +       if (UCTX_RUNNABLE (uctx))
41342 +       {
41343 +           restart_command_queues (uctx);
41344 +
41345 +           if (! restart_threads (uctx) || ! restart_dmas (uctx))
41346 +           {
41347 +               PRINTF0 (uctx, DBG_TRAP, "user_trap_handler: ddcq full - re-enter\n");
41348 +               need_reenter++;
41349 +           }
41350 +       }
41351 +    }
41352 + no_more_to_do:
41353 +    uctx->uctx_trap_state = UCTX_TRAP_IDLE;
41354 +
41355 +    /*
41356 +     * Always ensure that the command queue is flushed with a flow control
41357 +     * write, so that on the next trap we (hopefully) find it empty and so
41358 +     * can immediately restart the context.   Also if we need to be re-enter
41359 +     * the trap handler and don't have an interrupt outstanding, then issue
41360 +     * one now.
41361 +     */
41362 +    user_ddcq_flush (uctx);
41363 +    if (need_reenter && uctx->uctx_ddcq_intr == 0)
41364 +    {
41365 +       uctx->uctx_ddcq_intr++;
41366 +       user_ddcq_intr (uctx);
41367 +    }
41368 +
41369 +    if (--uctx->uctx_trap_count == 0 && (uctx->uctx_status & UCTX_SWAPPING))
41370 +       kcondvar_wakeupall (&uctx->uctx_wait, &uctx->uctx_spinlock);
41371 +
41372 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41373 +
41374 +    /* Should update the user trap area in this case as deliver_trap()
41375 +     * has not been called 
41376 +     */
41377 +    if (res == UTS_RESCHEDULE)
41378 +       put_user (res, &utrapp->ut_type);
41379 +
41380 +    PRINTF2 (uctx, DBG_TRAP, "user_trap_handler: finished state=%d res=%d\n", uctx->uctx_trap_state, res);
41381 +
41382 +    return (res == UTS_EFAULT ? -EFAULT : 0);
41383 +}
41384 +
41385 +USER_CQ *
41386 +user_alloccq (USER_CTXT *uctx, unsigned cqsize, unsigned perm, unsigned uflags)
41387 +{
41388 +    USER_CQ      *ucq;
41389 +    unsigned long flags;
41390 +
41391 +    KMEM_ZALLOC (ucq, USER_CQ *, sizeof (USER_CQ), 1);
41392 +
41393 +    if (ucq == (USER_CQ *) NULL)
41394 +       return ERR_PTR(-ENOMEM);
41395 +    
41396 +    /* NOTE - do not allow the user to create high-priority queues as we only flush through the low-priority run queues */
41397 +    if ((ucq->ucq_cq = elan4_alloccq (&uctx->uctx_ctxt, cqsize, perm, (uflags & UCQ_REORDER) ? CQ_Reorder : 0)) == NULL)
41398 +    {
41399 +       KMEM_FREE (ucq, sizeof (USER_CQ));
41400 +       
41401 +       PRINTF2 (uctx, DBG_CQ, "user_alloccq: failed elan4_allocq cqsize %d uflags %x\n", cqsize, uflags);
41402 +
41403 +       return ERR_PTR(-ENOMEM);
41404 +    }
41405 +    
41406 +    atomic_set (&ucq->ucq_ref, 1);
41407 +
41408 +    ucq->ucq_state = UCQ_RUNNING;
41409 +    ucq->ucq_flags = uflags;
41410 +    
41411 +    PRINTF3 (uctx, DBG_CQ, "user_alloccq: ucq=%p idx=%d cqnum=%d\n", ucq, elan4_cq2idx (ucq->ucq_cq), elan4_cq2num(ucq->ucq_cq));
41412 +
41413 +    /* chain it onto the context */
41414 +    kmutex_lock (&uctx->uctx_cqlock);
41415 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41416 +    list_add (&ucq->ucq_link, &uctx->uctx_cqlist);
41417 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41418 +    kmutex_unlock (&uctx->uctx_cqlock);
41419 +
41420 +    return (ucq);
41421 +}
41422 +
41423 +USER_CQ *
41424 +user_findcq (USER_CTXT *uctx, unsigned idx)
41425 +{
41426 +    struct list_head *entry;
41427 +    unsigned long flags;
41428 +
41429 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41430 +    list_for_each (entry, &uctx->uctx_cqlist) {
41431 +       USER_CQ *ucq = list_entry (entry, USER_CQ, ucq_link);
41432 +
41433 +       if (elan4_cq2idx(ucq->ucq_cq) == idx)
41434 +       {
41435 +           atomic_inc (&ucq->ucq_ref);
41436 +           spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41437 +           return (ucq);
41438 +       }
41439 +    }
41440 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41441 +
41442 +    return (NULL);
41443 +}
41444 +
41445 +void
41446 +user_dropcq (USER_CTXT *uctx, USER_CQ *ucq)
41447 +{
41448 +    unsigned long flags;
41449 +
41450 +    PRINTF2 (uctx, DBG_CQ, "user_dropcq: ucq=%p ref=%d\n", ucq, atomic_read (&ucq->ucq_ref));
41451 +
41452 +    kmutex_lock (&uctx->uctx_cqlock);
41453 +    if (! atomic_dec_and_test (&ucq->ucq_ref))
41454 +    {
41455 +       kmutex_unlock (&uctx->uctx_cqlock);
41456 +       return;
41457 +    }
41458 +
41459 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41460 +    list_del (&ucq->ucq_link);
41461 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41462 +
41463 +    kmutex_unlock (&uctx->uctx_cqlock);
41464 +
41465 +    elan4_freecq (&uctx->uctx_ctxt, ucq->ucq_cq);
41466 +
41467 +    KMEM_FREE (ucq, sizeof (USER_CQ));
41468 +}
41469 +
41470 +int
41471 +user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, 
41472 +                       unsigned ntproc_traps, unsigned nthreads, unsigned ndmas)
41473 +{
41474 +    ELAN4_DPROC_TRAP *dprocs;
41475 +    ELAN4_EPROC_TRAP *eprocs;
41476 +    ELAN4_TPROC_TRAP *tprocs;
41477 +    E4_DMA           *dmas;
41478 +    E4_ThreadRegs    *threads;
41479 +    ELAN4_NETERR_MSG *msgs;
41480 +    unsigned long     flags;
41481 +
41482 +    int nmsgs = NETERR_MSGS;
41483 +
41484 +    /* bounds check the values that have been passed in */
41485 +    if (ndproc_traps < 2 || ndproc_traps > 10000 ||
41486 +       ntproc_traps < 1 || ntproc_traps > 100   ||
41487 +       neproc_traps < 6 || neproc_traps > 10000 ||
41488 +       nthreads     < 2 || nthreads     > 10000 ||
41489 +       ndmas        < 2 || ndmas        > 10000)
41490 +       return -EINVAL;
41491 +
41492 +    if (uctx->uctx_dmas != NULL)
41493 +       return -EBUSY;
41494 +
41495 +    KMEM_ZALLOC (dprocs, ELAN4_DPROC_TRAP *, ndproc_traps * sizeof (ELAN4_DPROC_TRAP), 1);
41496 +    KMEM_ZALLOC (eprocs, ELAN4_EPROC_TRAP *, neproc_traps * sizeof (ELAN4_EPROC_TRAP), 1);
41497 +    KMEM_ZALLOC (tprocs, ELAN4_TPROC_TRAP *, ntproc_traps * sizeof (ELAN4_TPROC_TRAP), 1);
41498 +    KMEM_ZALLOC (threads, E4_ThreadRegs *, nthreads * sizeof (E4_ThreadRegs), 1);
41499 +    KMEM_ZALLOC (dmas, E4_DMA *, ndmas * sizeof (E4_DMA), 1);
41500 +    KMEM_ZALLOC (msgs, ELAN4_NETERR_MSG *, nmsgs * sizeof (ELAN4_NETERR_MSG), 1);
41501 +
41502 +    if (dprocs == NULL || eprocs == NULL || tprocs == NULL || dmas == NULL || threads == NULL || msgs == NULL)
41503 +    {
41504 +       if (dprocs != NULL) KMEM_FREE (dprocs, ndproc_traps * sizeof (ELAN4_DPROC_TRAP));
41505 +       if (eprocs != NULL) KMEM_FREE (eprocs, neproc_traps * sizeof (ELAN4_EPROC_TRAP));
41506 +       if (tprocs != NULL) KMEM_FREE (tprocs, ntproc_traps * sizeof (ELAN4_TPROC_TRAP));
41507 +       if (threads != NULL) KMEM_FREE (threads, nthreads * sizeof (E4_ThreadRegs));
41508 +       if (dmas != NULL) KMEM_FREE (dmas, ndmas * sizeof (E4_DMA));
41509 +       if (msgs != NULL) KMEM_FREE (msgs, nmsgs * sizeof (ELAN4_NETERR_MSG));
41510 +       
41511 +       return -ENOMEM;
41512 +    }
41513 +    
41514 +    spin_lock_irqsave (&uctx->uctx_spinlock, flags);
41515 +
41516 +    uctx->uctx_dprocTraps = dprocs;
41517 +    uctx->uctx_eprocTraps = eprocs;
41518 +    uctx->uctx_tprocTraps = tprocs;
41519 +    uctx->uctx_threads    = threads;
41520 +    uctx->uctx_dmas       = dmas;
41521 +    uctx->uctx_msgs       = msgs;
41522 +
41523 +    RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, ndproc_traps, 1 /* 1 for 2nd dma */);
41524 +    RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, ntproc_traps, 0);
41525 +    RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, neproc_traps, 5 /* 1 for command, 2 for dma, 2 for inputter */);
41526 +    RING_QUEUE_INIT (uctx->uctx_threadQ,    nthreads,     1);
41527 +    RING_QUEUE_INIT (uctx->uctx_dmaQ,       ndmas,        1);
41528 +    RING_QUEUE_INIT (uctx->uctx_msgQ,       nmsgs,        0);
41529 +
41530 +    spin_unlock_irqrestore (&uctx->uctx_spinlock, flags);
41531 +    
41532 +    return 0;
41533 +}
41534 +
41535 +USER_CTXT *
41536 +user_alloc (ELAN4_DEV *dev)
41537 +{
41538 +    USER_CTXT *uctx;
41539 +    int res;
41540 +    int i;
41541 +
41542 +    /* Allocate and initialise the context private data */
41543 +    KMEM_ZALLOC (uctx, USER_CTXT *, sizeof  (USER_CTXT), 1);
41544 +
41545 +    if (uctx == NULL)
41546 +       return ERR_PTR(-ENOMEM);
41547 +
41548 +    if (elan4_get_position (dev, &uctx->uctx_position) == ELAN_POS_UNKNOWN)
41549 +    {
41550 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
41551 +       return ERR_PTR(-EAGAIN);
41552 +    }
41553 +    
41554 +    if ((res = elan4_insertctxt (dev, &uctx->uctx_ctxt, &user_trap_ops)) != 0)
41555 +    {
41556 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
41557 +       return ERR_PTR(res);
41558 +    }
41559 +
41560 +    if (! user_ioproc_enabled)
41561 +       uctx->uctx_ctxt.ctxt_features |= ELAN4_FEATURE_NO_IOPROC | ELAN4_FEATURE_PIN_DOWN;
41562 +    if (! user_pagefault_enabled)
41563 +       uctx->uctx_ctxt.ctxt_features |= ELAN4_FEATURE_NO_PAGEFAULT;
41564 +
41565 +    KMEM_GETPAGES (uctx->uctx_upage, ELAN4_USER_PAGE *, btopr (sizeof (ELAN4_USER_PAGE)), 1);
41566 +    if (uctx->uctx_upage == NULL)
41567 +    {
41568 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
41569 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
41570 +       return ERR_PTR(-ENOMEM);
41571 +    }
41572 +    
41573 +    if ((uctx->uctx_trampoline = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE)) == 0)
41574 +    {
41575 +       KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
41576 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
41577 +
41578 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
41579 +       return ERR_PTR(-ENOMEM);
41580 +    }
41581 +    
41582 +    if ((uctx->uctx_routetable = elan4_alloc_routetable (dev, 4 /* 512 << 4 == 8192 entries */)) == NULL)
41583 +    {
41584 +       elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE);
41585 +       KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
41586 +       elan4_removectxt (dev, &uctx->uctx_ctxt);
41587 +
41588 +       KMEM_FREE (uctx, sizeof (USER_CTXT));
41589 +       return ERR_PTR(-ENOMEM);
41590 +    }
41591 +
41592 +    elan4_set_routetable (&uctx->uctx_ctxt, uctx->uctx_routetable);
41593 +
41594 +    /* initialise the trap and swap queues to be really full */
41595 +    RING_QUEUE_INIT (uctx->uctx_dprocTrapQ, 0, 1);
41596 +    RING_QUEUE_INIT (uctx->uctx_tprocTrapQ, 0, 1);
41597 +    RING_QUEUE_INIT (uctx->uctx_eprocTrapQ, 0, 1);
41598 +    RING_QUEUE_INIT (uctx->uctx_threadQ, 0, 1);
41599 +    RING_QUEUE_INIT (uctx->uctx_dmaQ, 0, 1);
41600 +
41601 +    INIT_LIST_HEAD (&uctx->uctx_cent_list);
41602 +    INIT_LIST_HEAD (&uctx->uctx_vpseg_list);
41603 +    INIT_LIST_HEAD (&uctx->uctx_cqlist);
41604 +
41605 +    uctx->uctx_haltop.op_function = user_flush;
41606 +    uctx->uctx_haltop.op_arg      = uctx;
41607 +    uctx->uctx_haltop.op_mask     = INT_Halted|INT_Discarding;
41608 +
41609 +    uctx->uctx_dma_flushop.op_function = user_flush_dmas;
41610 +    uctx->uctx_dma_flushop.op_arg      = uctx;
41611 +
41612 +    kmutex_init (&uctx->uctx_vpseg_lock);
41613 +    kmutex_init (&uctx->uctx_cqlock);
41614 +    kmutex_init (&uctx->uctx_rgnmutex);
41615 +
41616 +    spin_lock_init (&uctx->uctx_spinlock);
41617 +    spin_lock_init (&uctx->uctx_rgnlock);
41618 +    spin_lock_init (&uctx->uctx_fault_lock);
41619 +
41620 +    kcondvar_init (&uctx->uctx_wait);
41621 +
41622 +    if ((uctx->uctx_ddcq = user_alloccq (uctx, CQ_Size1K, CQ_EnableAllBits, UCQ_SYSTEM)) == NULL)
41623 +    {
41624 +       user_free (uctx);
41625 +       return ERR_PTR(-ENOMEM);
41626 +    }
41627 +
41628 +    uctx->uctx_trap_count = 0;
41629 +    uctx->uctx_trap_state = UCTX_TRAP_IDLE;
41630 +    uctx->uctx_status     = 0 /* UCTX_DETACHED | UCTX_SWAPPED | UCTX_STOPPED */;
41631 +
41632 +    init_timer (&uctx->uctx_int_timer);
41633 +
41634 +    uctx->uctx_int_timer.function = user_signal_timer;
41635 +    uctx->uctx_int_timer.data     = (unsigned long) uctx;
41636 +    uctx->uctx_int_start          = jiffies;
41637 +    uctx->uctx_int_count          = 0;
41638 +    uctx->uctx_int_delay          = 0;
41639 +
41640 +    init_timer (&uctx->uctx_shuffle_timer);
41641 +
41642 +    uctx->uctx_shuffle_timer.function = user_signal_timer;
41643 +    uctx->uctx_shuffle_timer.data     = (unsigned long) uctx;
41644 +
41645 +
41646 +    init_timer (&uctx->uctx_neterr_timer);
41647 +    uctx->uctx_neterr_timer.function = user_neterr_timer;
41648 +    uctx->uctx_neterr_timer.data     = (unsigned long) uctx;
41649 +
41650 +    uctx->uctx_upage->upage_ddcq_completed = 0;
41651 +    uctx->uctx_ddcq_completed              = 0;
41652 +    uctx->uctx_ddcq_insertcnt              = 0;
41653 +
41654 +    uctx->uctx_num_fault_save = num_fault_save;
41655 +    if (uctx->uctx_num_fault_save) 
41656 +    {  
41657 +       KMEM_ZALLOC (uctx->uctx_faults, FAULT_SAVE *, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save), 1);
41658 +        if ( uctx->uctx_faults == NULL) 
41659 +       {
41660 +           user_free (uctx);
41661 +           return ERR_PTR(-ENOMEM);
41662 +        }
41663 +    
41664 +        for (i = 0; i < uctx->uctx_num_fault_save; i++)
41665 +           uctx->uctx_faults[i].next = (i == (uctx->uctx_num_fault_save-1) ? NULL : &uctx->uctx_faults[i+1]);
41666 +
41667 +    }
41668 +    uctx->uctx_fault_list = uctx->uctx_faults;
41669 +
41670 +    return (uctx);
41671 +}
41672 +
41673 +void
41674 +user_free (USER_CTXT *uctx)
41675 +{
41676 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
41677 +
41678 +    user_swapout (uctx, UCTX_EXITING);
41679 +
41680 +    /* Detach from all input contexts */
41681 +    user_detach (uctx, NULL);
41682 +
41683 +    /* since we're single threaded here - (called from close()) */
41684 +    /* we don't need to hold the lock to drop the command queues */
41685 +    /* since they cannot be mapped into user space */
41686 +    while (! list_empty (&uctx->uctx_cqlist))
41687 +       user_dropcq (uctx, list_entry (uctx->uctx_cqlist.next, USER_CQ, ucq_link));
41688 +
41689 +    /* Free off all of vpseg_list */
41690 +    kmutex_lock (&uctx->uctx_vpseg_lock);
41691 +    while (! list_empty (&uctx->uctx_vpseg_list))
41692 +       user_remove_vpseg(uctx, list_entry (uctx->uctx_vpseg_list.next, USER_VPSEG, vps_link));
41693 +    kmutex_unlock (&uctx->uctx_vpseg_lock);
41694 +    
41695 +    if (timer_pending (&uctx->uctx_int_timer))
41696 +       del_timer_sync (&uctx->uctx_int_timer);
41697 +    
41698 +    if (timer_pending (&uctx->uctx_shuffle_timer))
41699 +       del_timer_sync (&uctx->uctx_shuffle_timer);
41700 +
41701 +    if (timer_pending (&uctx->uctx_neterr_timer))
41702 +       del_timer_sync (&uctx->uctx_neterr_timer);
41703 +
41704 +    if (uctx->uctx_dprocTraps)
41705 +       KMEM_FREE (uctx->uctx_dprocTraps, uctx->uctx_dprocTrapQ.q_size * sizeof (ELAN4_DPROC_TRAP));
41706 +    if (uctx->uctx_tprocTraps)
41707 +       KMEM_FREE (uctx->uctx_tprocTraps, uctx->uctx_tprocTrapQ.q_size * sizeof (ELAN4_TPROC_TRAP));
41708 +    if (uctx->uctx_eprocTraps)
41709 +       KMEM_FREE (uctx->uctx_eprocTraps, uctx->uctx_eprocTrapQ.q_size * sizeof (ELAN4_EPROC_TRAP));
41710 +    if (uctx->uctx_dmas)
41711 +       KMEM_FREE (uctx->uctx_dmas, uctx->uctx_dmaQ.q_size * sizeof (E4_DMA));
41712 +    if (uctx->uctx_msgs)
41713 +       KMEM_FREE (uctx->uctx_msgs, NETERR_MSGS * sizeof (ELAN4_NETERR_MSG));
41714 +    if (uctx->uctx_threads)
41715 +       KMEM_FREE (uctx->uctx_threads, uctx->uctx_threadQ.q_size * sizeof (E4_ThreadRegs));
41716 +    if (uctx->uctx_faults)
41717 +       KMEM_FREE (uctx->uctx_faults, (sizeof(FAULT_SAVE) * uctx->uctx_num_fault_save));
41718 +
41719 +    if (uctx->uctx_intcookie_table)
41720 +       intcookie_free_table (uctx->uctx_intcookie_table);
41721 +
41722 +    elan4_set_routetable (&uctx->uctx_ctxt, NULL);
41723 +    elan4_free_routetable (dev, uctx->uctx_routetable);
41724 +
41725 +    /* Free off all USER_RGNs */
41726 +    user_freergns(uctx);
41727 +
41728 +    elan4_sdram_free (dev, uctx->uctx_trampoline, SDRAM_PAGE_SIZE);
41729 +
41730 +    /* Clear the PG_Reserved bit before free to avoid a memory leak */
41731 +    ClearPageReserved(pte_page(*find_pte_kernel((unsigned long) uctx->uctx_upage)));
41732 +    KMEM_FREEPAGES (uctx->uctx_upage, btopr (sizeof (ELAN4_USER_PAGE)));
41733 +
41734 +    elan4_removectxt (dev, &uctx->uctx_ctxt);
41735 +
41736 +    kcondvar_destroy (&uctx->uctx_wait);
41737 +
41738 +    spin_lock_destroy (&uctx->uctx_rgnlock);
41739 +    spin_lock_destroy (&uctx->uctx_spinlock);
41740 +
41741 +    kmutex_destroy (&uctx->uctx_rgnmutex);
41742 +    kmutex_destroy (&uctx->uctx_cqlock);
41743 +    kmutex_destroy (&uctx->uctx_vpseg_lock);
41744 +
41745 +    KMEM_FREE (uctx, sizeof (USER_CTXT));
41746 +}
41747 +
41748 +/*
41749 + * Local variables:
41750 + * c-file-style: "stroustrup"
41751 + * End:
41752 + */
41753 diff -urN clean/drivers/net/qsnet/elan4/user_ddcq.c linux-2.6.9/drivers/net/qsnet/elan4/user_ddcq.c
41754 --- clean/drivers/net/qsnet/elan4/user_ddcq.c   1969-12-31 19:00:00.000000000 -0500
41755 +++ linux-2.6.9/drivers/net/qsnet/elan4/user_ddcq.c     2005-07-20 07:35:36.000000000 -0400
41756 @@ -0,0 +1,230 @@
41757 +/*
41758 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
41759 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
41760 + * 
41761 + *    For licensing information please see the supplied COPYING file
41762 + *
41763 + */
41764 +
41765 +#ident "@(#)$Id: user_ddcq.c,v 1.16.2.1 2005/07/20 11:35:36 mike Exp $"
41766 +/*      $Source: /cvs/master/quadrics/elan4mod/user_ddcq.c,v $*/
41767 +
41768 +#include <qsnet/kernel.h>
41769 +
41770 +#include <elan4/debug.h>
41771 +#include <elan4/device.h>
41772 +#include <elan4/user.h>
41773 +#include <elan4/commands.h>
41774 +
41775 +#if PAGE_SIZE < CQ_CommandMappingSize
41776 +#  define ELAN4_COMMAND_QUEUE_MAPPING  PAGE_SIZE
41777 +#else
41778 +#  define ELAN4_COMMAND_QUEUE_MAPPING  CQ_CommandMappingSize
41779 +#endif
41780 +
41781 +/* The user device driver command queue is used for re-issuing 
41782 + * trapped items.  It is allocated as a 1K command queue, and
41783 + * we insert command flow writes event 256 bytes (32 dwords).
41784 + */
41785 +#define USER_CTRLFLOW_COUNT    32
41786 +
41787 +/* Flow control of the device driver command queue is handled by periodically 
41788 + * inserting dword writes into the command stream.   When you need to know 
41789 + * that the queue has been flushed, then you insert an extra contorl flow
41790 + * write into the command queue.   Should the queue not be flushed, but the
41791 + * trap handler be returning to user space, then it will also insert and 
41792 + * extra interrupt command to ensure that it is re-entered after the queue
41793 + * has been flushed.
41794 + *
41795 + * Note - we account the space for the interrupt command on each control 
41796 + * flow write so that we do not overflow the queue even if we end up 
41797 + * inserting an interrupt for every command flow write.  In general only
41798 + * a single interrupt should get inserted....
41799 + */
41800 +
41801 +#define user_ddcq_command_write(value,off) do { \
41802 +    PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_write: cmdptr=%x off=%d value=%llx\n", cmdptr, off, value);\
41803 +    writeq(value, (void *)(cmdptr + (off << 3))); \
41804 +} while (0)
41805 +
41806 +#define user_ddcq_command_space(uctx)  \
41807 +    ((CQ_Size (uctx->uctx_ddcq->ucq_cq->cq_size)>>3) - ((uctx)->uctx_ddcq_insertcnt - (uctx)->uctx_upage->upage_ddcq_completed))
41808 +
41809 +#define user_ddcq_command_flow_write(uctx) do { \
41810 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
41811 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
41812 +\
41813 +    (uctx)->uctx_ddcq_completed = ((uctx)->uctx_ddcq_insertcnt += 3);\
41814 +\
41815 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_write: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \
41816 +           (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \
41817 +    user_ddcq_command_write (GUARD_CMD       | GUARD_ALL_CHANNELS,      0);\
41818 +    user_ddcq_command_write (WRITE_DWORD_CMD | (uctx)->uctx_upage_addr, 1);\
41819 +    user_ddcq_command_write ((uctx)->uctx_ddcq_completed,               2);\
41820 +} while (0)
41821 +
41822 +#define user_ddcq_command_flow_intr(uctx) do { \
41823 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
41824 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
41825 +\
41826 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_command_flow_intr: completed=%llx [%llx] addr=%llx\n", (uctx)->uctx_ddcq_completed, \
41827 +           (uctx)->uctx_upage->upage_ddcq_completed, (uctx)->uctx_upage_addr); \
41828 +    user_ddcq_command_write (INTERRUPT_CMD   | ELAN4_INT_COOKIE_DDCQ,   3);\
41829 +} while (0)
41830 +
41831 +#define user_ddcq_command_prologue(uctx, count) do { \
41832 +   E4_uint64 iptr   = (uctx)->uctx_ddcq_insertcnt; \
41833 +   ioaddr_t  cmdptr = (uctx)->uctx_ddcq->ucq_cq->cq_mapping + ((iptr<<3) & ((ELAN4_COMMAND_QUEUE_MAPPING >> 1)-1));\
41834 +   PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_prologue: iptr=%llx cmdptr=%x\n", iptr, cmdptr);
41835 +
41836 +#define user_ddcq_command_epilogue(uctx, count, extra) \
41837 +   (uctx)->uctx_ddcq_insertcnt = iptr + (count);\
41838 +\
41839 +   PRINTF(uctx, DBG_DDCQ, "user_ddcq_command_epilogue: iptr=%llx + %x + %x - completed %llx\n", iptr, count, extra, (uctx)->uctx_ddcq_completed);\
41840 +   if (((iptr) + (count) + (extra)) > ((uctx)->uctx_ddcq_completed + USER_CTRLFLOW_COUNT))\
41841 +       user_ddcq_command_flow_write(uctx); \
41842 +} while (0)
41843 +
41844 +int
41845 +user_ddcq_check (USER_CTXT *uctx, unsigned num)
41846 +{
41847 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: insert=%llx completed=%llx [%llx] space=%d num=%d\n", 
41848 +           uctx->uctx_ddcq_insertcnt, uctx->uctx_ddcq_completed,
41849 +           uctx->uctx_upage->upage_ddcq_completed,
41850 +           user_ddcq_command_space (uctx),
41851 +           num
41852 +       );
41853 +
41854 +    /* Ensure that there is enough space for the command we want to issue,
41855 +     * PLUS the guard/writeword for the control flow flush.
41856 +     * PLUS the interrupt command for rescheduling */
41857 +    if (user_ddcq_command_space (uctx) > (num + 4))
41858 +    {
41859 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: loads of space\n");
41860 +
41861 +       return (1);
41862 +    }
41863 +    
41864 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_check: not enough space - reschedule\n");
41865 +
41866 +    uctx->uctx_trap_state = UCTX_TRAP_SIGNALLED;
41867 +    return (0);
41868 +}
41869 +
41870 +int
41871 +user_ddcq_flush (USER_CTXT *uctx)
41872 +{
41873 +    ELAN4_DEV *dev = uctx->uctx_ctxt.ctxt_dev;
41874 +    USER_CQ   *ucq = uctx->uctx_ddcq;
41875 +
41876 +    switch (ucq->ucq_state)
41877 +    {
41878 +    case UCQ_TRAPPED:
41879 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: command queue is trapped\n");
41880 +       return (0);
41881 +       
41882 +    case UCQ_NEEDS_RESTART:
41883 +       PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: restarting command queue\n");
41884 +
41885 +       if (UCTX_RUNNABLE (uctx))
41886 +       {
41887 +           ucq->ucq_state = UCQ_RUNNING;
41888 +           elan4_restartcq (dev, ucq->ucq_cq);
41889 +       }
41890 +       break;
41891 +    }
41892 +
41893 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_flush: insertcnt=%llx completed=%llx [%llx]\n", 
41894 +           uctx->uctx_ddcq_insertcnt, uctx->uctx_ddcq_completed, uctx->uctx_upage->upage_ddcq_completed);
41895 +
41896 +    if (uctx->uctx_ddcq_completed != uctx->uctx_ddcq_insertcnt)
41897 +       user_ddcq_command_flow_write (uctx);
41898 +
41899 +    return (uctx->uctx_ddcq_completed == uctx->uctx_upage->upage_ddcq_completed);
41900 +}
41901 +
41902 +void
41903 +user_ddcq_intr (USER_CTXT *uctx)
41904 +{
41905 +    user_ddcq_command_flow_intr (uctx);
41906 +}
41907 +
41908 +void
41909 +user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma)
41910 +{
41911 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_dma: cookie=%llx vproc=%llx\n",  dma->dma_cookie, dma->dma_vproc);
41912 +
41913 +    user_ddcq_command_prologue(uctx, 7) {
41914 +
41915 +       user_ddcq_command_write ((dma->dma_typeSize & ~DMA_ContextMask) | RUN_DMA_CMD, 0);
41916 +       user_ddcq_command_write (dma->dma_cookie,                                      1);
41917 +       user_ddcq_command_write (dma->dma_vproc,                                       2);
41918 +       user_ddcq_command_write (dma->dma_srcAddr,                                     3);
41919 +       user_ddcq_command_write (dma->dma_dstAddr,                                     4);
41920 +       user_ddcq_command_write (dma->dma_srcEvent,                                    5);
41921 +       user_ddcq_command_write (dma->dma_dstEvent,                                    6);
41922 +
41923 +    } user_ddcq_command_epilogue (uctx, 7, 0);
41924 +}
41925 +
41926 +void
41927 +user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs)
41928 +{
41929 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_run_thread: PC=%llx SP=%llx\n", regs->Registers[0], regs->Registers[1]);
41930 +
41931 +    user_ddcq_command_prologue(uctx, 7) {
41932 +
41933 +       user_ddcq_command_write (regs->Registers[0] | RUN_THREAD_CMD, 0);
41934 +       user_ddcq_command_write (regs->Registers[1],                  1);
41935 +       user_ddcq_command_write (regs->Registers[2],                  2);
41936 +       user_ddcq_command_write (regs->Registers[3],                  3);
41937 +       user_ddcq_command_write (regs->Registers[4],                  4);
41938 +       user_ddcq_command_write (regs->Registers[5],                  5);
41939 +       user_ddcq_command_write (regs->Registers[6],                  6);
41940 +       
41941 +    } user_ddcq_command_epilogue (uctx, 7, 0);
41942 +}
41943 +
41944 +void
41945 +user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr)
41946 +{
41947 +    user_ddcq_command_prologue (uctx, 1) {
41948 +
41949 +       user_ddcq_command_write (SET_EVENT_CMD | addr, 0);
41950 +    
41951 +    } user_ddcq_command_epilogue (uctx, 1, 0);
41952 +}
41953 +
41954 +void
41955 +user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count)
41956 +{
41957 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_seteventn: addr=%llx count=%lx\n", addr, count);
41958 +
41959 +    user_ddcq_command_prologue (uctx, 2) {
41960 +
41961 +       user_ddcq_command_write (SET_EVENTN_CMD, 0);
41962 +       user_ddcq_command_write (addr | count,   1);
41963 +
41964 +    } user_ddcq_command_epilogue (uctx, 2, 0);
41965 +}
41966 +
41967 +void
41968 +user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1)
41969 +{
41970 +    PRINTF (uctx, DBG_DDCQ, "user_ddcq_waitevent: addr=%llx CountAndType=%llx Param=%llx,%llx\n", addr, CountAndType, Param0, Param1);
41971 +
41972 +    user_ddcq_command_prologue (uctx, 4) {
41973 +
41974 +       user_ddcq_command_write (WAIT_EVENT_CMD | addr, 0);
41975 +       user_ddcq_command_write (CountAndType,          1);
41976 +       user_ddcq_command_write (Param0,                2);
41977 +       user_ddcq_command_write (Param1,                3);
41978 +
41979 +    } user_ddcq_command_epilogue (uctx, 4, 0);
41980 +}
41981 +
41982 +/*
41983 + * Local variables:
41984 + * c-file-style: "stroustrup"
41985 + * End:
41986 + */
41987 diff -urN clean/drivers/net/qsnet/elan4/user_Linux.c linux-2.6.9/drivers/net/qsnet/elan4/user_Linux.c
41988 --- clean/drivers/net/qsnet/elan4/user_Linux.c  1969-12-31 19:00:00.000000000 -0500
41989 +++ linux-2.6.9/drivers/net/qsnet/elan4/user_Linux.c    2005-06-09 10:46:55.000000000 -0400
41990 @@ -0,0 +1,349 @@
41991 +/*
41992 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
41993 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
41994 + * 
41995 + *    For licensing information please see the supplied COPYING file
41996 + *
41997 + */
41998 +
41999 +#ident "@(#)$Id: user_Linux.c,v 1.35.2.1 2005/06/09 14:46:55 addy Exp $"
42000 +/*      $Source: /cvs/master/quadrics/elan4mod/user_Linux.c,v $*/
42001 +
42002 +#include <qsnet/kernel.h>
42003 +#include <qsnet/kpte.h>
42004 +
42005 +#include <linux/pci.h>
42006 +#include <linux/pagemap.h>
42007 +
42008 +#ifdef CONFIG_HUGETLB_PAGE
42009 +#include <linux/hugetlb.h>
42010 +#endif
42011 +
42012 +#include <elan4/debug.h>
42013 +#include <elan4/device.h>
42014 +#include <elan4/user.h>
42015 +
42016 +extern struct vm_operations_struct mem_vm_ops;
42017 +extern struct vm_operations_struct user_vm_ops;
42018 +extern int mem_pteload (struct vm_area_struct *vma, unsigned long pgoff, ELAN4_CTXT *ctxt, E4_Addr eaddr, int perm);
42019 +extern int user_pteload (struct vm_area_struct *vma, unsigned long pgoff, ELAN4_CTXT *ctxt, E4_Addr eaddr, int perm);
42020 +
42021 +static inline int
42022 +user_load_page (USER_CTXT *uctx, struct vm_area_struct *vma, unsigned long maddr, E4_Addr eaddr, int perm, int writeable)
42023 +{
42024 +    ELAN4_DEV   *dev  = uctx->uctx_ctxt.ctxt_dev;
42025 +    struct page *page = NULL;
42026 +    int i, res = 0;
42027 +    
42028 +    if (get_user_pages (current, current->mm, maddr, 1, writeable, 0, &page, NULL) == 1)
42029 +    {
42030 +       /* NOTE - the page can't be paged out since we've pinned it down.
42031 +        *        it also can't be munmap'd since we hold the mmap_sem */
42032 +       
42033 +       PRINTF (uctx, DBG_FAULT, "user_load_page: %lx %s page %p\n", maddr, writeable ? "writeable" : "readonly",  page);
42034 +       
42035 +       for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[0]))
42036 +           if ((res = elan4mmu_pteload_page (&uctx->uctx_ctxt, 0, eaddr + i, page, perm)) < 0)
42037 +               break;
42038 +       
42039 +       page_cache_release (page);
42040 +    }
42041 +    else
42042 +    {
42043 +       if (vma && vma->vm_ops == &mem_vm_ops)
42044 +           res = mem_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm);
42045 +       else if (vma && vma->vm_ops == &user_vm_ops)
42046 +           res = user_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm);
42047 +       else
42048 +           res = -EINVAL;
42049 +    }
42050 +
42051 +    return res;
42052 +}
42053 +
42054 +int
42055 +user_load_range (USER_CTXT *uctx, E4_Addr eaddr, unsigned long nbytes, E4_uint32 fsr)
42056 +{
42057 +    struct mm_struct      *mm        = current->mm;
42058 +    int                    writeable = (AT_Perm(fsr) == AT_PermLocalDataWrite ||
42059 +                                       AT_Perm(fsr) == AT_PermRemoteWrite    ||
42060 +                                       AT_Perm(fsr) == AT_PermLocalEvent     ||
42061 +                                       AT_Perm(fsr) == AT_PermRemoteEvent);
42062 +    struct vm_area_struct *vma;
42063 +    int                    perm;
42064 +    unsigned long          len;
42065 +    unsigned long          maddr;
42066 +    int                           res = 0;
42067 +
42068 +    kmutex_lock (&uctx->uctx_rgnmutex);
42069 +
42070 +    while (nbytes > 0)
42071 +    {
42072 +       USER_RGN *rgn = user_rgnat_elan (uctx, eaddr);
42073 +
42074 +       if (rgn == NULL || ELAN4_INCOMPAT_ACCESS (rgn->rgn_perm, AT_Perm (fsr)))
42075 +       {
42076 +           PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx -> %s\n", eaddr, rgn == NULL ? "no mapping" : "no permission");
42077 +
42078 +           kmutex_unlock (&uctx->uctx_rgnmutex);
42079 +           return (rgn == NULL ? -EFAULT : -EPERM);
42080 +       }
42081 +
42082 +       if (writeable)
42083 +           perm = rgn->rgn_perm;
42084 +       else if (AT_Perm(fsr) == AT_PermExecute)
42085 +           perm = PERM_LocRead | (rgn->rgn_perm & ~PERM_Mask);
42086 +       else
42087 +           perm = ELAN4_PERM_READONLY (rgn->rgn_perm & PERM_Mask) | (rgn->rgn_perm & ~PERM_Mask);
42088 +
42089 +       PRINTF (uctx, DBG_FAULT, "user_load_range: rgn=%p [%llx.%lx.%x]\n", rgn, rgn->rgn_ebase, rgn->rgn_mbase, rgn->rgn_len);
42090 +
42091 +       len = ((rgn->rgn_ebase + rgn->rgn_len) - eaddr);
42092 +       if (len > nbytes)
42093 +           len = nbytes;
42094 +       nbytes -= len;
42095 +       
42096 +       maddr = rgn->rgn_mbase + (eaddr - rgn->rgn_ebase);
42097 +
42098 +       PRINTF (uctx, DBG_FAULT, "user_load_range: eaddr=%llx->%llx -> %lx->%lx len=%x perm=%x\n", eaddr, 
42099 +               eaddr + len, maddr, maddr + len, len, perm);
42100 +
42101 +       down_read (&mm->mmap_sem);
42102 +       while (len > 0)
42103 +       {
42104 +#if defined(conditional_schedule)
42105 +           conditional_schedule();
42106 +#endif
42107 +           if ((vma = find_vma_intersection (mm, maddr, maddr + PAGE_SIZE)) == NULL ||
42108 +               (writeable && !(vma->vm_flags & VM_WRITE)))
42109 +           {
42110 +               PRINTF (DBG_USER, DBG_FAULT, "user_load_range: %s %lx\n", vma ? "not writeble at" : "no vma for", maddr);
42111 +               up_read (&mm->mmap_sem);
42112 +               kmutex_unlock (&uctx->uctx_rgnmutex);
42113 +               return (-EFAULT);
42114 +           }
42115 +
42116 +           if ((res = user_load_page (uctx, vma, maddr, eaddr, perm, writeable)) < 0)
42117 +           {
42118 +               PRINTF (DBG_USER, DBG_FAULT, "user_load_range: failed to load page res=%d\n", res);
42119 +               up_read (&mm->mmap_sem);
42120 +               kmutex_unlock (&uctx->uctx_rgnmutex);
42121 +               return res;
42122 +           }
42123 +           
42124 +           eaddr += PAGE_SIZE;
42125 +           maddr += PAGE_SIZE;
42126 +           len   -= PAGE_SIZE;
42127 +       }
42128 +       up_read (&mm->mmap_sem);
42129 +    }
42130 +    kmutex_unlock (&uctx->uctx_rgnmutex);
42131 +
42132 +    PRINTF (uctx, DBG_FAULT, "user_load_range: alldone\n");
42133 +
42134 +    return (0);
42135 +}
42136 +
42137 +void
42138 +user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len)
42139 +{
42140 +    virtaddr_t             lim = addr + len - 1;
42141 +    struct vm_area_struct *vma;
42142 +
42143 +    down_read (&current->mm->mmap_sem);
42144 +
42145 +    if ((vma = find_vma (current->mm, addr)) != NULL)
42146 +    {
42147 +       do {
42148 +           unsigned long start = vma->vm_start;
42149 +           unsigned long end   = vma->vm_end;
42150 +
42151 +           if ((start-1) >= lim)
42152 +               break;
42153 +
42154 +           if (start < addr) start = addr;
42155 +           if ((end-1) > lim) end = lim+1;
42156 +               
42157 +           if (vma->vm_flags & VM_IO)
42158 +               continue;
42159 +
42160 +           user_unload_main (uctx, start, end - start);
42161 +
42162 +           if (get_user_pages (current, current->mm, start, (end - start)/PAGE_SIZE, 
42163 +                               (vma->vm_flags & VM_WRITE) != 0, 0, NULL, NULL) > 0)
42164 +               user_update_main (uctx, vma->vm_mm, vma, start, end - start);
42165 +           
42166 +           else if (vma->vm_ops == &mem_vm_ops)
42167 +               user_update_main (uctx, vma->vm_mm, vma, start, end - start);
42168 +           else if (vma->vm_ops == &user_vm_ops)
42169 +               user_update_main (uctx, vma->vm_mm, vma, start, end - start);
42170 +
42171 +       } while ((vma = find_vma (current->mm, vma->vm_end)) != NULL);
42172 +    }
42173 +    up_read (&current->mm->mmap_sem);
42174 +}
42175 +
42176 +static void
42177 +user_update_range (USER_CTXT *uctx, int tbl, struct mm_struct *mm, struct vm_area_struct *vma, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, int perm)
42178 +{
42179 +    ELAN4_DEV    *dev    = uctx->uctx_ctxt.ctxt_dev;
42180 +    int           roperm = ELAN4_PERM_READONLY(perm & PERM_Mask) | (perm & ~PERM_Mask);
42181 +    int           i, write;
42182 +    pte_t        *ptep;
42183 +    struct page  *page;
42184 +       
42185 +    if (vma && vma->vm_ops == &mem_vm_ops)
42186 +    {
42187 +       mem_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm);
42188 +       return;
42189 +    }
42190 +
42191 +    if (vma && vma->vm_ops == &user_vm_ops)
42192 +    {
42193 +       user_pteload (vma, maddr, &uctx->uctx_ctxt, eaddr, perm);
42194 +       return;
42195 +    }
42196 +    
42197 +#ifdef CONFIG_HUGETLB_PAGE
42198 +    /* If the kernel has hugetlb support compiled in, then 
42199 +     * we can't walk the pte's unless we know for sure that
42200 +     * they're normal ptes. */
42201 +
42202 +    if (vma == NULL || is_vm_hugetlb_page (vma))
42203 +       return;
42204 +#endif
42205 +
42206 +    while (len > 0)
42207 +    {
42208 +       if ((ptep = find_pte_map (mm, maddr)) != NULL)
42209 +       {
42210 +           write = (pte_write(*ptep) && pte_dirty(*ptep));
42211 +
42212 +#if LINUX_VERSION_CODE < KERNEL_VERSION (2, 5, 0)
42213 +           page = pte_page (*ptep);
42214 +           if (! VALID_PAGE (page))
42215 +               page = NULL;
42216 +#else
42217 +           {
42218 +               unsigned long pfn;
42219 +
42220 +               pfn  = pte_pfn (*ptep);
42221 +               page = pfn_valid (pfn) ? pfn_to_page (pfn) : NULL;
42222 +           }
42223 +#endif
42224 +           pte_unmap (ptep);
42225 +
42226 +           PRINTF (uctx, DBG_IOPROC, "user_update_range: maddr=%lx eaddr=%llx -> page %p %lx %s\n", maddr, eaddr, page, page_to_pfn (page), write ? "writeable" : "read-only");
42227 +
42228 +           if (page != NULL)
42229 +               for (i = 0; i < PAGE_SIZE; i += (1 << dev->dev_pageshift[tbl]))
42230 +                   elan4mmu_pteload_page (&uctx->uctx_ctxt, tbl, eaddr + i, page, write ? perm : roperm);
42231 +       }
42232 +
42233 +       eaddr += PAGE_SIZE;
42234 +       maddr += PAGE_SIZE;
42235 +       len   -= PAGE_SIZE;
42236 +    }
42237 +}
42238 +
42239 +void
42240 +user_update_main (USER_CTXT *uctx, struct mm_struct *mm, struct vm_area_struct *vma, virtaddr_t start, unsigned long len)
42241 +{
42242 +    USER_RGN     *rgn;
42243 +    unsigned long ssize;
42244 +    virtaddr_t    end = start + len - 1;
42245 +
42246 +    spin_lock (&uctx->uctx_rgnlock);
42247 +
42248 +    PRINTF (uctx, DBG_IOPROC, "user_update_main: start=%lx end=%lx\n", start, end);
42249 +
42250 +    for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext)
42251 +    {
42252 +       if (end < rgn->rgn_mbase)
42253 +           break;
42254 +       
42255 +       if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1)) 
42256 +       {
42257 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1);
42258 +
42259 +           user_update_range (uctx, 0  /* tbl */, mm, vma, rgn->rgn_mbase, rgn->rgn_ebase, rgn->rgn_len, rgn->rgn_perm);
42260 +       }
42261 +       else if (start <= rgn->rgn_mbase)
42262 +       {
42263 +           ssize = end - rgn->rgn_mbase + 1;
42264 +
42265 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize);
42266 +
42267 +           user_update_range (uctx, 0 /* tbl */, mm, vma, rgn->rgn_mbase, rgn->rgn_ebase, ssize, rgn->rgn_perm);
42268 +       }
42269 +       else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
42270 +       {
42271 +           ssize = (rgn->rgn_mbase + rgn->rgn_len) - start;
42272 +
42273 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: end   %lx -> %lx\n", start, start + ssize);
42274 +
42275 +           user_update_range (uctx, 0 /* tbl */, mm, vma, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize, rgn->rgn_perm);
42276 +       }
42277 +       else
42278 +       {
42279 +           PRINTF (uctx, DBG_IOPROC, "user_update_main: middle %lx -> %lx\n", start, end);
42280 +
42281 +           user_update_range (uctx, 0 /* tbl */, mm, vma, start, rgn->rgn_ebase + (start - rgn->rgn_mbase), len, rgn->rgn_perm);  
42282 +       }
42283 +    }
42284 +    spin_unlock (&uctx->uctx_rgnlock);
42285 +}
42286 +
42287 +void
42288 +user_unload_main (USER_CTXT *uctx, virtaddr_t start, unsigned long len)
42289 +{
42290 +    USER_RGN     *rgn;
42291 +    unsigned long ssize;
42292 +    virtaddr_t    end = start + len - 1;
42293 +
42294 +    spin_lock (&uctx->uctx_rgnlock);
42295 +
42296 +    PRINTF (uctx, DBG_IOPROC, "user_unload_main: start=%lx end=%lx\n", start, end);
42297 +
42298 +    for (rgn = user_findrgn_main (uctx, start, 0); rgn != NULL; rgn = rgn->rgn_mnext)
42299 +    {
42300 +       if (end < rgn->rgn_mbase)
42301 +           break;
42302 +       
42303 +       if (start <= rgn->rgn_mbase && end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
42304 +       {
42305 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: whole %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + rgn->rgn_len - 1);
42306 +
42307 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, rgn->rgn_len);
42308 +       }
42309 +       else if (start <= rgn->rgn_mbase)
42310 +       {
42311 +           ssize = end - rgn->rgn_mbase + 1;
42312 +
42313 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: start %lx -> %lx\n", rgn->rgn_mbase, rgn->rgn_mbase + ssize);
42314 +
42315 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase, ssize);
42316 +       }
42317 +       else if (end >= (rgn->rgn_mbase + rgn->rgn_len - 1))
42318 +       {
42319 +           ssize = (rgn->rgn_mbase + rgn->rgn_len) - start;
42320 +           
42321 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: end   %lx -> %lx\n", start, start + ssize);
42322 +           
42323 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), ssize);
42324 +       }
42325 +       else
42326 +       {
42327 +
42328 +           PRINTF (uctx, DBG_IOPROC, "user_unload_main: middle %lx -> %lx\n", start, end);
42329 +
42330 +           elan4mmu_unload_range (&uctx->uctx_ctxt, 0 /* tbl */, rgn->rgn_ebase + (start - rgn->rgn_mbase), len);
42331 +       }
42332 +    }
42333 +    spin_unlock (&uctx->uctx_rgnlock);
42334 +}
42335 +/*
42336 + * Local variables:
42337 + * c-file-style: "stroustrup"
42338 + * End:
42339 + */
42340 diff -urN clean/drivers/net/qsnet/ep/asm_elan4_thread.S linux-2.6.9/drivers/net/qsnet/ep/asm_elan4_thread.S
42341 --- clean/drivers/net/qsnet/ep/asm_elan4_thread.S       1969-12-31 19:00:00.000000000 -0500
42342 +++ linux-2.6.9/drivers/net/qsnet/ep/asm_elan4_thread.S 2003-09-23 09:55:11.000000000 -0400
42343 @@ -0,0 +1,78 @@
42344 +/*
42345 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
42346 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
42347 + *
42348 + *    For licensing information please see the supplied COPYING file
42349 + *
42350 + */
42351 +
42352 +#ident "@(#)$Id: asm_elan4_thread.S,v 1.1 2003/09/23 13:55:11 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
42353 +/*      $Source: /cvs/master/quadrics/epmod/asm_elan4_thread.S,v $*/
42354 +
42355 +#include <elan4/events.h>
42356 +#include <elan4/commands.h>
42357 +
42358 +/*
42359 + * c_reschedule (E4_uint64 *commandport)
42360 + */            
42361 +       .global c_reschedule
42362 +c_reschedule:
42363 +       add             %sp, -128, %sp
42364 +       st64            %r16, [%sp]                     // preserve call preserved registers
42365 +       st64            %r24, [%sp + 64]                // - see CALL_USED_REGISTERS.
42366 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
42367 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
42368 +       nop                                             // BUG FIX: E4 RevA
42369 +       nop                                             // BUG FIX: E4 RevA
42370 +       
42371 +       mov             %r7, %r18                       // (%r2) return pc
42372 +1:     call            2f
42373 +        mov            %sp, %r17                       // (%r1) SP
42374 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
42375 +       mov             NOP_CMD, %r23                   // "nop" command
42376 +       st64suspend     %r16, [%r8]
42377 +3:     ld64            [%sp], %r16
42378 +       ld64            [%sp + 64], %r24                // restore call preserved register
42379 +       jmpl            %r2+8, %r0                      // and return
42380 +        add            %sp, 128, %sp
42381 +       
42382 +
42383 +/*
42384 + * c_waitevent (E4_uint64 *commandport, E4_Event *event, E4_uint64 count)
42385 + */
42386 +       .global c_waitevent
42387 +c_waitevent:
42388 +       add             %sp, -192, %sp
42389 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
42390 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
42391 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
42392 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
42393 +       nop                                             // BUG FIX: E4 RevA
42394 +       nop                                             // BUG FIX: E4 RevA
42395 +
42396 +       mov             %r7, %r18                       // (%r2) return pc
42397 +1:     call            2f
42398 +        mov            %sp, %r17                       // (%r1) SP
42399 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
42400 +       st32            %r16, [%sp]                     // event source block
42401 +       mov             MAKE_EXT_CLEAN_CMD, %r23        // "flush command queue desc" command
42402 +       st8             %r23, [%sp+56]                  // event source block
42403 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
42404 +       mov             %r23,%r23                       // BUG FIX: E4 RevA
42405 +       nop                                             // BUG FIX: E4 RevA
42406 +       nop                                             // BUG FIX: E4 RevA
42407 +       
42408 +
42409 +       or              %r9, WAIT_EVENT_CMD, %r16
42410 +       sll8            %r10, 32, %r17
42411 +       or              %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17
42412 +       mov             %sp, %r18
42413 +       mov             %r8, %r19
42414 +       
42415 +       st32suspend     %r16, [%r8]
42416 +       
42417 +3:     ld64            [%sp + 64], %r16                // restore call preserved register
42418 +       ld64            [%sp + 128], %r24
42419 +       jmpl            %r2+8, %r0                      // and return
42420 +        add            %sp, 192, %sp
42421 +
42422 diff -urN clean/drivers/net/qsnet/ep/assym_elan4.h linux-2.6.9/drivers/net/qsnet/ep/assym_elan4.h
42423 --- clean/drivers/net/qsnet/ep/assym_elan4.h    1969-12-31 19:00:00.000000000 -0500
42424 +++ linux-2.6.9/drivers/net/qsnet/ep/assym_elan4.h      2005-09-07 10:39:44.000000000 -0400
42425 @@ -0,0 +1,20 @@
42426 +/*
42427 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
42428 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
42429 + *
42430 + *    For licensing information please see the supplied COPYING file
42431 + *
42432 + */
42433 +
42434 +#ident "@(#)$Id: genassym_elan4.c,v 1.3 2004/04/25 11:26:07 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
42435 +/*      $Source: /cvs/master/quadrics/epmod/genassym_elan4.c,v $*/
42436 +
42437 +/* Generated by genassym_elan4 - do not modify */
42438 +
42439 +#define EP4_RCVR_THREAD_STALL  0
42440 +#define EP4_RCVR_PENDING_TAILP 128
42441 +#define EP4_RCVR_PENDING_HEAD  136
42442 +#define EP4_RCVR_DEBUG         176
42443 +#define EP4_RXD_NEXT           664
42444 +#define EP4_RXD_QUEUED         728
42445 +#define EP4_RXD_DEBUG          944
42446 diff -urN clean/drivers/net/qsnet/ep/cm.c linux-2.6.9/drivers/net/qsnet/ep/cm.c
42447 --- clean/drivers/net/qsnet/ep/cm.c     1969-12-31 19:00:00.000000000 -0500
42448 +++ linux-2.6.9/drivers/net/qsnet/ep/cm.c       2005-05-24 05:19:12.000000000 -0400
42449 @@ -0,0 +1,2835 @@
42450 +/*
42451 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
42452 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
42453 + *
42454 + *    For licensing information please see the supplied COPYING file
42455 + *
42456 + */
42457 +
42458 +#ident "@(#)$Id: cm.c,v 1.90 2005/05/24 09:19:12 david Exp $"
42459 +/*      $Source: /cvs/master/quadrics/epmod/cm.c,v $ */
42460 +
42461 +#include <qsnet/kernel.h>
42462 +
42463 +#include <elan/kcomm.h>
42464 +
42465 +#include "kcomm_vp.h"
42466 +#include "debug.h"
42467 +#include "cm.h"
42468 +#include <elan/epsvc.h>
42469 +
42470 +#include <qsnet/procfs_linux.h>
42471 +
42472 +#if defined(LINUX)
42473 +#include "conf_linux.h"
42474 +#endif
42475 +
42476 +int BranchingRatios[CM_MAX_LEVELS];
42477 +
42478 +int MachineId      = -1;
42479 +int BrokenLevel    = -1;                       /* Simulates Broken Network */
42480 +int RejoinCheck    = 1;
42481 +int RejoinPanic    = 0;
42482 +
42483 +static int
42484 +SegmentNo (CM_RAIL *cmRail, u_int nodeid, u_int lvl)
42485 +{
42486 +    int i;
42487 +
42488 +    ASSERT (lvl < cmRail->NumLevels);
42489 +    
42490 +    for (i = 0; i < lvl; i++)
42491 +       nodeid /= cmRail->Levels[i].NumSegs;
42492 +    
42493 +    return (nodeid % cmRail->Levels[lvl].NumSegs);
42494 +}
42495 +
42496 +static int
42497 +ClusterIds (CM_RAIL *cmRail, int clvl, int *clmin, int *clmax)
42498 +{
42499 +    int clid  = cmRail->Rail->Position.pos_nodeid - cmRail->Levels[clvl].MinNodeId;
42500 +
42501 +    if (clvl == 0)
42502 +       *clmin = *clmax = clid;
42503 +    else
42504 +    {
42505 +       *clmin = cmRail->Levels[clvl - 1].MinNodeId - cmRail->Levels[clvl].MinNodeId;
42506 +       *clmax = *clmin + cmRail->Levels[clvl - 1].NumNodes - 1;
42507 +    }
42508 +    return (clid);
42509 +}
42510 +
42511 +static void
42512 +__Schedule_Timer (CM_RAIL *cmRail, long tick)
42513 +{
42514 +    if (! timer_pending (&cmRail->HeartbeatTimer) || AFTER (cmRail->NextRunTime, tick))
42515 +    {
42516 +       cmRail->NextRunTime = tick;
42517 +
42518 +       mod_timer (&cmRail->HeartbeatTimer, tick);
42519 +    }
42520 +}
42521 +
42522 +static void
42523 +__Schedule_Discovery (CM_RAIL *cmRail)         /* we urgently need to schedule discovery */
42524 +{
42525 +    __Schedule_Timer (cmRail, cmRail->NextDiscoverTime = lbolt);
42526 +}
42527 +
42528 +static int
42529 +MsgBusy (CM_RAIL *cmRail, int msgNumber)
42530 +{
42531 +    switch (ep_outputq_state (cmRail->Rail, cmRail->MsgQueue, msgNumber))
42532 +    {
42533 +    case EP_OUTPUTQ_BUSY:                      /* still busy */
42534 +       return 1;
42535 +       
42536 +    case EP_OUTPUTQ_FAILED:                    /* NACKed */
42537 +    {
42538 +#if defined(DEBUG_PRINTF)
42539 +       CM_MSG  *msg  = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber);
42540 +       uint8_t  type  = msg->Hdr.Type;
42541 +       uint16_t nmaps = msg->Hdr.NumMaps;
42542 +       int16_t  off   = msg->Payload.Statemaps[CM_MSG_MAP(0)].offset;
42543 +       
42544 +       CPRINTF4 (((type == CM_MSG_TYPE_DISCOVER_LEADER) || (type == CM_MSG_TYPE_DISCOVER_SUBORDINATE))  ? 6 : 3, /* we expect broadcasts to be NACKed */
42545 +                 "%s: msg %d type %d  failed%s\n", cmRail->Rail->Name, msgNumber, type, 
42546 +                 (type != CM_MSG_TYPE_HEARTBEAT) ? "" : nmaps == 0 ? ": null heartbeat" :
42547 +                 off == STATEMAP_RESET ? ": heartbeat with R statemaps" : ": heartbeat with statemaps");
42548 +#endif
42549 +       return 0;
42550 +    }
42551 +    
42552 +    case EP_OUTPUTQ_FINISHED:
42553 +       return 0;
42554 +
42555 +    default:
42556 +       panic ("MsgBusy - bad return code from ep_outputq_state\n");
42557 +       /* NOTREACHED */
42558 +    }
42559 +    return 0;
42560 +}
42561 +
42562 +static void
42563 +LaunchMessage (CM_RAIL *cmRail, int msgNumber, int vp, int qnum, int retries, int type, int lvl, int nmaps)
42564 +{
42565 +    CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, msgNumber);
42566 +    CM_HDR *hdr = &msg->Hdr;
42567 +
42568 +    ASSERT (nmaps >= 0 && nmaps <= CM_MSG_MAXMAPS);
42569 +    ASSERT (SPINLOCK_HELD (&cmRail->Lock));
42570 +
42571 +    hdr->Version   = CM_MSG_VERSION;
42572 +    hdr->ParamHash = cmRail->ParamHash;
42573 +    hdr->Timestamp = cmRail->Timestamp;
42574 +    hdr->Checksum  = 0;
42575 +    hdr->NodeId    = cmRail->Rail->Position.pos_nodeid;
42576 +    hdr->MachineId = MachineId;
42577 +    hdr->NumMaps   = nmaps;
42578 +    hdr->Level     = lvl;
42579 +    hdr->Type      = type;
42580 +    hdr->Checksum  = CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps));
42581 +
42582 +    if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf)))                       /* Simulate broken network? */
42583 +       return;
42584 +
42585 +    if (ep_outputq_send (cmRail->Rail, cmRail->MsgQueue, msgNumber, 
42586 +                        CM_MSG_SIZE(nmaps), vp, qnum, retries));
42587 +       IncrStat (cmRail, LaunchMessageFail);
42588 +}
42589 +
42590 +static int
42591 +SendMessage (CM_RAIL *cmRail, int nodeId, int lvl, int type)
42592 +{
42593 +    int        msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg;
42594 +    int n         = CM_NUM_SPARE_MSG_BUFFERS;
42595 +    int retries;
42596 +
42597 +    ASSERT (type == CM_MSG_TYPE_IMCOMING ||    /* other types must use SendToSgmt */
42598 +           type == CM_MSG_TYPE_REJOIN);
42599 +   
42600 +    while (n-- > 0 && MsgBusy (cmRail, msgNumber))     /* search for idle "spare" buffer */
42601 +    {
42602 +       if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS)
42603 +           cmRail->NextSpareMsg = 0;
42604 +      
42605 +       msgNumber = CM_NUM_NODE_MSG_BUFFERS + cmRail->NextSpareMsg;
42606 +    }
42607 +
42608 +    if (n == 0)                                        /* all "spare" message buffers busy */
42609 +    {
42610 +       CPRINTF3 (3, "%s: all spare message buffers busy: trying to send type %d to %d\n",
42611 +                 cmRail->Rail->Name, type, nodeId);
42612 +       return (0);
42613 +    }
42614 +
42615 +    /* NB IMCOMING may be echoed by MANY nodes, so we don't (and musn't) have any retries */
42616 +    retries = (type == CM_MSG_TYPE_IMCOMING) ? 0 : CM_P2P_DMA_RETRIES;
42617 +   
42618 +    LaunchMessage (cmRail, msgNumber, EP_VP_NODE (nodeId), EP_SYSTEMQ_INTR, /* eager receive */
42619 +                  retries, type, lvl, 0);
42620 +   
42621 +    if (++(cmRail->NextSpareMsg) == CM_NUM_SPARE_MSG_BUFFERS) /* check this one last next time */
42622 +       cmRail->NextSpareMsg = 0;
42623 +
42624 +    return (1);
42625 +}
42626 +
42627 +static int
42628 +SendToSgmt (CM_RAIL *cmRail, CM_SGMT *sgmt, int type)
42629 +{    
42630 +    bitmap_t         seg;
42631 +    int              offset;
42632 +    int              nmaps;
42633 +    int              sidx;
42634 +    int              clvl;
42635 +    
42636 +    ASSERT (sgmt->Level <= cmRail->TopLevel);
42637 +
42638 +    if (MsgBusy (cmRail, sgmt->MsgNumber))             /* previous message still busy */
42639 +    {
42640 +       CPRINTF3 (3, "%s: node message buffer busy: trying to send type %d to %d\n",
42641 +                 cmRail->Rail->Name, type, sgmt->NodeId);
42642 +      
42643 +       return (0);
42644 +    }
42645 +
42646 +    switch (type)
42647 +    {
42648 +    case CM_MSG_TYPE_RESOLVE_LEADER:
42649 +    case CM_MSG_TYPE_DISCOVER_LEADER:
42650 +       ASSERT (sgmt->State == CM_SGMT_ABSENT);
42651 +       ASSERT (sgmt->Level == ((cmRail->Role == CM_ROLE_LEADER_CANDIDATE) ? cmRail->TopLevel : cmRail->TopLevel - 1));
42652 +       ASSERT (sgmt->Level < cmRail->NumLevels);
42653 +       ASSERT (sgmt->Sgmt == cmRail->Levels[sgmt->Level].MySgmt);
42654 +      
42655 +       /* broadcast to me and all my peers at this level (== my segment in the level above) */
42656 +       sidx = (sgmt->Level == cmRail->NumLevels - 1) ? 0 : cmRail->Levels[sgmt->Level + 1].MySgmt;
42657 +
42658 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level + 1, sidx), 
42659 +                      EP_SYSTEMQ_INTR, 0,              /* eager rx; no retries */
42660 +                      type, sgmt->Level, 0);
42661 +       return (1);
42662 +      
42663 +    case CM_MSG_TYPE_DISCOVER_SUBORDINATE:
42664 +       ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt);
42665 +       ASSERT (sgmt->State == CM_SGMT_WAITING);
42666 +       ASSERT (sgmt->Level > 0);                       /* broadcasting just to subtree */
42667 +      
42668 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_BCAST (sgmt->Level, sgmt->Sgmt), 
42669 +                      EP_SYSTEMQ_INTR, 0,              /* eager rx; no retries */
42670 +                      CM_MSG_TYPE_DISCOVER_SUBORDINATE, sgmt->Level, 0);
42671 +       return (1);
42672 +      
42673 +    case CM_MSG_TYPE_NOTIFY:
42674 +       ASSERT (sgmt->State == CM_SGMT_PRESENT);
42675 +      
42676 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId),
42677 +                      EP_SYSTEMQ_INTR, CM_P2P_DMA_RETRIES, /* eager rx; lots of retries */
42678 +                      CM_MSG_TYPE_NOTIFY, sgmt->Level, 0);
42679 +       return (1);
42680 +      
42681 +    case CM_MSG_TYPE_HEARTBEAT:
42682 +    {
42683 +       CM_MSG *msg = ep_outputq_msg (cmRail->Rail, cmRail->MsgQueue, sgmt->MsgNumber);
42684 +       CM_HDR *hdr = &msg->Hdr;
42685 +
42686 +       ASSERT (sgmt->State == CM_SGMT_PRESENT);
42687 +       
42688 +       hdr->AckSeq = sgmt->AckSeq;
42689 +   
42690 +       if (!sgmt->MsgAcked)                    /* Current message not acknowledged */
42691 +       {
42692 +           /* must have been something significant to require an ack */
42693 +           ASSERT (sgmt->SendMaps);
42694 +           ASSERT (sgmt->NumMaps > 0);
42695 +           
42696 +           CPRINTF3 (3, "%s: retrying heartbeat to %d (%d entries)\n", cmRail->Rail->Name, sgmt->NodeId, sgmt->NumMaps);
42697 +
42698 +           IncrStat (cmRail, RetryHeartbeat);
42699 +
42700 +           nmaps = sgmt->NumMaps;
42701 +       }
42702 +       else
42703 +       {
42704 +           nmaps = 0;
42705 +      
42706 +           if (sgmt->SendMaps)                 /* can send maps */
42707 +           {
42708 +               for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
42709 +               {
42710 +                   if (!sgmt->Maps[clvl].OutputMapValid)
42711 +                       continue;
42712 +                   
42713 +                   while ((offset = statemap_findchange (sgmt->Maps[clvl].OutputMap, &seg, 1)) >= 0)
42714 +                   {
42715 +                       CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)];
42716 +
42717 +                       sgmt->Maps[clvl].SentChanges = 1;
42718 +                       
42719 +                       map->level  = clvl;
42720 +                       map->offset = offset;
42721 +                       map->seg[0] = seg & 0xffff;
42722 +                       map->seg[1] = (seg >> 16) & 0xffff;
42723 +#if (BT_ULSHIFT == 6)
42724 +                       map->seg[2] = (seg >> 32) & 0xffff;
42725 +                       map->seg[3] = (seg >> 48) & 0xffff;
42726 +#elif (BT_ULSHIFT != 5)
42727 +#error "Bad value for BT_ULSHIFT"
42728 +#endif
42729 +                       if (++nmaps == CM_MSG_MAXMAPS)
42730 +                           goto msg_full;
42731 +                   }
42732 +
42733 +                   if (sgmt->Maps[clvl].SentChanges)
42734 +                   {
42735 +                       CM_STATEMAP_ENTRY *map = &msg->Payload.Statemaps[CM_MSG_MAP(nmaps)];
42736 +
42737 +                       sgmt->Maps[clvl].SentChanges = 0;
42738 +
42739 +                       map->level  = clvl;
42740 +                       map->offset = STATEMAP_NOMORECHANGES;
42741 +                       
42742 +                       if (++nmaps == CM_MSG_MAXMAPS)
42743 +                           goto msg_full;
42744 +                   }
42745 +               }
42746 +           }
42747 +           
42748 +           ASSERT (nmaps < CM_MSG_MAXMAPS);
42749 +
42750 +       msg_full:
42751 +           sgmt->NumMaps = nmaps;              /* remember how many incase we retry */
42752 +
42753 +           if (nmaps == 0)                     /* no changes to send */
42754 +               hdr->Seq = sgmt->MsgSeq;        /* this one can be dropped */
42755 +           else
42756 +           {
42757 +               hdr->Seq = ++(sgmt->MsgSeq);    /* on to next message number */
42758 +               sgmt->MsgAcked = 0;             /* need this one to be acked before I can send another */
42759 +
42760 +               IncrStat (cmRail, MapChangesSent);
42761 +           }
42762 +       }
42763 +
42764 +       LaunchMessage (cmRail, sgmt->MsgNumber, EP_VP_NODE (sgmt->NodeId), 
42765 +                      EP_SYSTEMQ_POLLED,  CM_P2P_DMA_RETRIES, /* polled receive, lots of retries */
42766 +                      CM_MSG_TYPE_HEARTBEAT, sgmt->Level, nmaps);
42767 +
42768 +       IncrStat (cmRail, HeartbeatsSent);
42769 +
42770 +       return (1);
42771 +    }
42772 +
42773 +    default:                                   /* other types must use SendMessage */
42774 +       printk ("SendToSgmt: invalid type %d\n", type);
42775 +       ASSERT (0);
42776 +
42777 +       return (1);
42778 +    }
42779 +}
42780 +
42781 +static char *
42782 +GlobalStatusString (statemap_t *map, int idx)
42783 +{
42784 +    char *strings[] = {"....", "S...", "C...", "R...", 
42785 +                      ".s..", "Ss..", "Cs..", "Rs..", 
42786 +                      "..r.", "S.r.", "C.r.", "R.r.", 
42787 +                      ".sr.", "Ssr.", "Csr.", "Rsr.", 
42788 +                      "...R", "S..R", "C..R", "R..R", 
42789 +                      ".s.R", "Ss.R", "Cs.R", "Rs.R", 
42790 +                      "..rR", "S.rR", "C.rR", "R.rR", 
42791 +                      ".srR", "SsrR", "CsrR", "RsrR"};
42792 +    
42793 +    return (strings[statemap_getbits (map, idx * CM_GSTATUS_BITS, CM_GSTATUS_BITS)]);
42794 +}
42795 +
42796 +static char *
42797 +MapString (char *name, statemap_t *map, int nnodes, char *trailer)
42798 +{
42799 +    static char *space;
42800 +    int          i;
42801 +
42802 +    if (space == NULL)
42803 +       KMEM_ALLOC (space, char *, EP_MAX_NODES*(CM_GSTATUS_BITS+1), 0);
42804 +
42805 +    if (space == NULL)
42806 +       return ("<cannot allocate memory>");
42807 +    else
42808 +    {
42809 +       char *ptr = space;
42810 +
42811 +       sprintf (space, "%s ", name); ptr += strlen (ptr);
42812 +       for (i = 0; i < nnodes; i++, ptr += strlen (ptr))
42813 +           sprintf (ptr, "%s%s", i == 0 ? "" : ",", GlobalStatusString (map, i));
42814 +       sprintf (ptr, " %s", trailer);
42815 +       return (space);
42816 +    }
42817 +}
42818 +
42819 +void
42820 +DisplayMap (DisplayInfo *di, CM_RAIL *cmRail, char *name, statemap_t *map, int nnodes, char *trailer)
42821 +{
42822 +    char  linebuf[256];
42823 +    char *ptr = linebuf;
42824 +    int   i;
42825 +
42826 +#define NODES_PER_LINE 32
42827 +    for (i = 0; i < nnodes; i++)
42828 +    {
42829 +       if (ptr == linebuf)
42830 +       {
42831 +           sprintf (ptr, "%4d", i);
42832 +           ptr += strlen (ptr);
42833 +       }
42834 +       
42835 +       sprintf (ptr, ",%s", GlobalStatusString (map, i));
42836 +       ptr += strlen (ptr);
42837 +
42838 +       if ((i % NODES_PER_LINE) == (NODES_PER_LINE-1) || (i == (nnodes-1)))
42839 +       {
42840 +           (di->func)(di->arg, "%s: %s %s %s\n", cmRail->Rail->Name, name, linebuf, trailer);
42841 +           ptr = linebuf;
42842 +       }
42843 +    }
42844 +#undef NODES_PER_LINE
42845 +}
42846 +
42847 +void
42848 +DisplayNodeMaps (DisplayInfo *di, CM_RAIL *cmRail)
42849 +{
42850 +    int   lvl;
42851 +    int   clvl;
42852 +    char  mapname[128];
42853 +    
42854 +    (di->func)(di->arg, "%s: Node %d maps...\n", cmRail->Rail->Name, cmRail->Rail->Position.pos_nodeid);
42855 +
42856 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
42857 +    {
42858 +       int nnodes = cmRail->Levels[clvl].NumNodes;
42859 +
42860 +       (di->func)(di->arg, "%s: Cluster level %d: Connected %ld - %s%s\n", 
42861 +                  cmRail->Rail->Name, clvl, cmRail->Levels[clvl].Connected,
42862 +                  cmRail->Levels[clvl].Online     ? "Online" : "Offline",
42863 +                  cmRail->Levels[clvl].Restarting ? ", Restarting" : "");
42864 +
42865 +       for (lvl = 0; lvl < cmRail->TopLevel && lvl <= clvl; lvl++)
42866 +       {
42867 +           CM_LEVEL *level = &cmRail->Levels[lvl];
42868 +
42869 +           sprintf (mapname, "%10s%2d", "Level", lvl);
42870 +           DisplayMap (di, cmRail, mapname, level->SubordinateMap[clvl], nnodes,
42871 +                       level->SubordinateMapValid[clvl] ? "" : "(invalid)");
42872 +       }
42873 +
42874 +       sprintf (mapname, "%12s", "Local");
42875 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LocalMap, nnodes, "");
42876 +
42877 +       sprintf (mapname, "%12s", "Subtree");
42878 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].SubTreeMap, nnodes, 
42879 +                   cmRail->Levels[clvl].SubTreeMapValid ? "" : "(invalid)");
42880 +
42881 +       sprintf (mapname, "%12s", "Global");
42882 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].GlobalMap, nnodes, 
42883 +                   cmRail->Levels[clvl].GlobalMapValid ? "" : "(invalid)");
42884 +
42885 +       sprintf (mapname, "%12s", "LastGlobal");
42886 +       DisplayMap (di, cmRail, mapname, cmRail->Levels[clvl].LastGlobalMap, nnodes, "");
42887 +    }
42888 +}
42889 +
42890 +void
42891 +DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail)
42892 +{
42893 +    int   lvl;
42894 +    int   sidx;
42895 +    
42896 +    (di->func)(di->arg, "%s: Node %d segments...\n", cmRail->Rail->Name, cmRail->NodeId);
42897 +    
42898 +    for (lvl = 0; lvl <= cmRail->TopLevel && lvl < cmRail->NumLevels; lvl++)
42899 +    {
42900 +       (di->func)(di->arg, "   level %d: ", lvl);
42901 +       
42902 +       for (sidx = 0; sidx < ((lvl == cmRail->TopLevel) ? 1 : cmRail->Levels[lvl].NumSegs); sidx++)
42903 +       {
42904 +           CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx];
42905 +               
42906 +           if (sgmt->State == CM_SGMT_PRESENT)
42907 +               (di->func)(di->arg, "[%d, in: %d out: %d %s%s]", 
42908 +                       sgmt->NodeId,
42909 +                       sgmt->AckSeq,
42910 +                       sgmt->MsgSeq,
42911 +                       sgmt->MsgAcked ? "A" : "-",
42912 +                       sgmt->SendMaps ? "!" : "-");
42913 +           else
42914 +               (di->func)(di->arg, "[%s]", (sgmt->State == CM_SGMT_ABSENT ? "absent" :
42915 +                                sgmt->State == CM_SGMT_WAITING ? "waiting" :
42916 +                                sgmt->State == CM_SGMT_COMING ? "coming" : "UNKNOWN"));
42917 +       }
42918 +       (di->func)(di->arg, "\n");
42919 +    }
42920 +}
42921 +
42922 +
42923 +static void
42924 +StartConnecting (CM_RAIL *cmRail, CM_SGMT *sgmt, int NodeId, int Timestamp)
42925 +{
42926 +    int clvl;
42927 +
42928 +    CPRINTF4 (2, "%s: lvl %d subtree %d node %d -> connecting\n", cmRail->Rail->Name, sgmt->Level, sgmt->Sgmt, NodeId);
42929 +
42930 +    /* Only reconnect the same guy if he was reborn */
42931 +    ASSERT (sgmt->State != CM_SGMT_PRESENT ||
42932 +           (sgmt->NodeId == NodeId && sgmt->Timestamp != Timestamp));
42933 +   
42934 +    /* After we've connected to a new peer, we wait to receive
42935 +     * STATEMAP_RESET before we accumulate changes and we wait for a
42936 +     * complete map to be received before we propagate changes to other
42937 +     * nodes.
42938 +     *
42939 +     * If I'm the subordinate, I can start sending maps right away, since
42940 +     * the leader is ready for them already.  If I'm the leader, I hold off
42941 +     * sending maps until I've seen the subordinate's first heartbeat,
42942 +     * because the subordinate might miss my NOTIFY message, still think
42943 +     * she's a leader candidate and ignore my heartbeats.
42944 +     */
42945 +    sgmt->SendMaps = (sgmt->Level == cmRail->TopLevel); /* I can send maps to my leader (she NOTIFIED me) */
42946 +
42947 +    for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
42948 +    {
42949 +       statemap_reset (sgmt->Maps[clvl].CurrentInputMap);
42950 +       statemap_reset (sgmt->Maps[clvl].InputMap);
42951 +       statemap_reset (sgmt->Maps[clvl].OutputMap);
42952 +       
42953 +       sgmt->Maps[clvl].InputMapValid = 0;
42954 +       sgmt->Maps[clvl].OutputMapValid = 0;
42955 +       sgmt->Maps[clvl].SentChanges = 0;
42956 +
42957 +       if (sgmt->Level == cmRail->TopLevel)    /* connection to leader */
42958 +       {
42959 +           ASSERT (sgmt->Sgmt == 0);
42960 +           ASSERT (cmRail->Role == CM_ROLE_SUBORDINATE);
42961 +
42962 +           if (cmRail->Levels[clvl].SubTreeMapValid) /* already got a subtree map to send up */
42963 +           {
42964 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap);
42965 +               sgmt->Maps[clvl].OutputMapValid = 1;
42966 +
42967 +               statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
42968 +           }
42969 +       }
42970 +       else                                    /* connection to subordinate */
42971 +       {
42972 +           ASSERT (sgmt->Sgmt != cmRail->Levels[sgmt->Level].MySgmt);
42973 +
42974 +           if (cmRail->Levels[clvl].GlobalMapValid) /* already got a global map to broadcast */
42975 +           {
42976 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap);
42977 +               sgmt->Maps[clvl].OutputMapValid = 1;
42978 +           }
42979 +       }
42980 +    }
42981 +    
42982 +    /* Initialise sequence counters */
42983 +    sgmt->MsgSeq = sgmt->AckSeq = 0;
42984 +    sgmt->MsgAcked = 1;                        /* ready to send a new sequenced message */
42985 +   
42986 +    sgmt->State      = CM_SGMT_PRESENT;
42987 +    sgmt->NodeId     = NodeId;
42988 +    sgmt->UpdateTick = lbolt;
42989 +    sgmt->Timestamp  = Timestamp;
42990 +}
42991 +
42992 +static void
42993 +StartSubTreeDiscovery (CM_RAIL *cmRail, CM_SGMT *sgmt)
42994 +{
42995 +    sgmt->State = CM_SGMT_WAITING;
42996 +    sgmt->UpdateTick = lbolt;
42997 +    sgmt->WaitingTick = lbolt;
42998 +
42999 +    if (sgmt->Level > 0)
43000 +       __Schedule_Discovery (cmRail);
43001 +}
43002 +
43003 +void
43004 +StartSubordinateDiscovery (CM_RAIL *cmRail)
43005 +{
43006 +    int       i;
43007 +    int       lvl = cmRail->TopLevel - 1;
43008 +    CM_LEVEL *level = &cmRail->Levels[lvl];
43009 +
43010 +    ASSERT (lvl >= 0 && lvl < cmRail->NumLevels);
43011 +
43012 +    for (i = 0; i < level->NumSegs; i++)
43013 +    {
43014 +        CM_SGMT *sgmt = &level->Sgmts[i];
43015 +       
43016 +       if (i != level->MySgmt)         /* No-one should connect here */
43017 +           StartSubTreeDiscovery (cmRail, sgmt);
43018 +    }
43019 +}
43020 +
43021 +void
43022 +StartLeaderDiscovery (CM_RAIL *cmRail)
43023 +{
43024 +    int       i;
43025 +    int       clvl;
43026 +    CM_LEVEL *level = &cmRail->Levels[cmRail->TopLevel];
43027 +
43028 +    ASSERT (cmRail->TopLevel < cmRail->NumLevels);
43029 +
43030 +    for (clvl = cmRail->TopLevel; clvl < cmRail->NumLevels; clvl++)
43031 +    {
43032 +        cmRail->Levels[clvl].GlobalMapValid = 0;
43033 +       cmRail->Levels[clvl].SubTreeMapValid = 0;
43034 +        level->SubordinateMapValid[clvl] = 0;
43035 +    }
43036 +
43037 +    for (i = 0; i < level->NumSegs; i++)
43038 +    {
43039 +        CM_SGMT *sgmt = &level->Sgmts[i];
43040 +       
43041 +       sgmt->State = CM_SGMT_ABSENT;
43042 +    }
43043 +
43044 +    cmRail->DiscoverStartTick = lbolt;
43045 +    cmRail->Role = CM_ROLE_LEADER_CANDIDATE;
43046 +   
43047 +    __Schedule_Discovery (cmRail);
43048 +}
43049 +
43050 +static void
43051 +RaiseTopLevel (CM_RAIL *cmRail)
43052 +{
43053 +    ASSERT (cmRail->NumLevels != 0);
43054 +    ASSERT (cmRail->TopLevel < cmRail->NumLevels);
43055 +
43056 +    CPRINTF2 (2, "%s: RaiseTopLevel %d\n", cmRail->Rail->Name, cmRail->TopLevel + 1);
43057 +
43058 +    if (++cmRail->TopLevel == cmRail->NumLevels)       /* whole machine leader? */
43059 +       cmRail->Role = CM_ROLE_LEADER;
43060 +    else
43061 +       StartLeaderDiscovery (cmRail);          /* look for my leader */
43062 +
43063 +    StartSubordinateDiscovery (cmRail);                /* and any direct subordinates */
43064 +}
43065 +
43066 +static void
43067 +LowerTopLevel (CM_RAIL *cmRail, int lvl)
43068 +{
43069 +    ASSERT (cmRail->NumLevels != 0);
43070 +    ASSERT (lvl < cmRail->NumLevels);
43071 +
43072 +    CPRINTF2 (2, "%s: LowerTopLevel %d\n", cmRail->Rail->Name, lvl);
43073 +
43074 +    if (lvl == 0)
43075 +       cmRail->Timestamp = lbolt;
43076 +
43077 +    cmRail->TopLevel = lvl;
43078 +
43079 +    StartLeaderDiscovery (cmRail);             /* look for my leader */
43080 +}
43081 +
43082 +static int
43083 +IShouldLead (CM_RAIL *cmRail, CM_MSG *msg)
43084 +{
43085 +    /* NB, this function MUST be consistently calculated on any nodes, just
43086 +     * from the info supplied in the message.  Otherwise leadership
43087 +     * arbitration during concurrent discovery will fail.
43088 +     */
43089 +    return (cmRail->NodeId < msg->Hdr.NodeId);
43090 +}
43091 +
43092 +static int
43093 +SumCheck (CM_MSG *msg)
43094 +{
43095 +    CM_HDR   *hdr   = &msg->Hdr;
43096 +    uint16_t  sum   = hdr->Checksum;
43097 +    uint16_t  nmaps = hdr->NumMaps;
43098 +
43099 +    if (nmaps > CM_MSG_MAXMAPS) {
43100 +       printk ("SumCheck: nmaps %d > CM_MSG_MAXMAPS\n", nmaps);
43101 +       return 0;
43102 +    }
43103 +    
43104 +    if ((hdr->Type != CM_MSG_TYPE_HEARTBEAT) && nmaps != 0) {
43105 +       printk ("SumCheck: type(%d) not HEARTBEAT and nmaps(%d) != 0\n", hdr->Type, nmaps);
43106 +       return 0;
43107 +    }
43108 +
43109 +    hdr->Checksum = 0;
43110 +    
43111 +    if (CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)) != sum) {
43112 +       printk ("SumCheck: checksum failed %x %x\n", CheckSum ((char *)msg + CM_MSG_BASE(nmaps), CM_MSG_SIZE(nmaps)), sum);
43113 +
43114 +       return 0;
43115 +    }
43116 +       
43117 +    return 1;
43118 +}
43119 +
43120 +static void
43121 +ProcessMessage (EP_RAIL *rail, void *arg, void *msgbuf)
43122 +{
43123 +    CM_RAIL       *cmRail = (CM_RAIL *) arg;
43124 +    CM_MSG         *msg    = (CM_MSG *) msgbuf;
43125 +    CM_HDR         *hdr    = &msg->Hdr;
43126 +    int             lvl;
43127 +    int             sidx;
43128 +    CM_LEVEL       *level;
43129 +    CM_SGMT        *sgmt;
43130 +    bitmap_t        seg;
43131 +    int             i;
43132 +    int            delay;
43133 +    static long    tlast;
43134 +    static int     count;
43135 +
43136 +    /* Poll the message Version field until the message has completely
43137 +     * arrived in main memory. */
43138 +    for (delay = 1; hdr->Version == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
43139 +       DELAY (delay);
43140 +
43141 +    /* Display a message every 60 seconds if we see an "old" format message */
43142 +    if (hdr->Version == EP_SYSTEMQ_UNRECEIVED && (((lbolt - tlast) > 60*HZ) ? (count = 0) : ++count) < 1)
43143 +    {
43144 +       printk ("%s: received old protocol message (type %d from node %d)\n", cmRail->Rail->Name, 
43145 +               ((uint8_t *) msg)[20], ((uint16_t *) msg)[4]);
43146 +
43147 +       tlast = lbolt;
43148 +       goto finished;
43149 +    }
43150 +
43151 +    if (hdr->Version != CM_MSG_VERSION || hdr->ParamHash != cmRail->ParamHash || hdr->MachineId != MachineId)
43152 +    {
43153 +       CPRINTF8 (1, "%s: invalid message : Version %08x (%08x) ParamHash %08x (%08x) MachineId %04x (%04x) Nodeid %d\n", cmRail->Rail->Name,
43154 +                 hdr->Version, CM_MSG_VERSION, hdr->ParamHash, cmRail->ParamHash, hdr->MachineId, MachineId, hdr->NodeId);
43155 +       goto finished;
43156 +    }
43157 +
43158 +    if (!SumCheck (msg))
43159 +    {
43160 +       printk ("%s: checksum failed on msg from %d?\n", cmRail->Rail->Name, hdr->NodeId);
43161 +       goto finished;
43162 +    }
43163 +    
43164 +    if (hdr->NodeId == cmRail->NodeId)         /* ignore my own broadcast */       
43165 +    {
43166 +       CPRINTF3 (6, "%s: node %d type %d: ignored (MESSAGE FROM ME)\n", 
43167 +                 cmRail->Rail->Name, hdr->NodeId, hdr->Type);
43168 +
43169 +       if (hdr->Type != CM_MSG_TYPE_DISCOVER_LEADER && hdr->Type != CM_MSG_TYPE_RESOLVE_LEADER)
43170 +           printk ("%s: node %d type %d: ignored (MESSAGE FROM ME)\n", 
43171 +                   cmRail->Rail->Name, hdr->NodeId, hdr->Type);
43172 +       goto finished;
43173 +    }
43174 +
43175 +    lvl = hdr->Level;
43176 +    level = &cmRail->Levels[lvl];
43177 +
43178 +    if (BrokenLevel != -1 && (lvl >= ((BrokenLevel >> (cmRail->Rail->Number*4)) & 0xf)))                       /* Simulate broken network? */
43179 +       goto finished;
43180 +    
43181 +    if (lvl >= cmRail->NumLevels ||            /* from outer space  */
43182 +       hdr->NodeId < level->MinNodeId ||       /* from outside this level's subtree */
43183 +       hdr->NodeId >= level->MinNodeId + level->NumNodes)
43184 +    {
43185 +       printk ("%s: lvl %d node %d type %d: ignored (%s)\n", 
43186 +               cmRail->Rail->Name, lvl, hdr->NodeId, hdr->Type, 
43187 +               lvl >= cmRail->NumLevels ? "level too big for machine" : "outside subtree");
43188 +       goto finished;
43189 +    }
43190 +
43191 +    sidx = SegmentNo (cmRail, hdr->NodeId, lvl);
43192 +    sgmt = &level->Sgmts[sidx];
43193 +    
43194 +    switch (hdr->Type)
43195 +    {
43196 +    case CM_MSG_TYPE_RESOLVE_LEADER:
43197 +       if (lvl >= cmRail->TopLevel)
43198 +       {
43199 +           CPRINTF4 (6, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: ignored (above my level)\n", 
43200 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43201 +           break;
43202 +       }
43203 +
43204 +       /* someone else thinks they lead at the same level as me */
43205 +       CPRINTF4 (1, "%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", 
43206 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43207 +       
43208 +       printk ("%s: lvl %d sidx %d node %d RESOLVE_LEADER: !REJOIN (putsch)\n", 
43209 +               cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43210 +       
43211 +
43212 +       SendMessage (cmRail, hdr->NodeId, lvl, CM_MSG_TYPE_REJOIN);
43213 +       break;
43214 +       
43215 +    case CM_MSG_TYPE_DISCOVER_LEADER:
43216 +       if (lvl > cmRail->TopLevel)
43217 +       {
43218 +           CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (above my level)\n", 
43219 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43220 +           break;
43221 +       }
43222 +
43223 +       if (sidx == level->MySgmt)              /* someone I led thinks they lead some of my subtrees */
43224 +       {
43225 +           CPRINTF4 (1, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", 
43226 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43227 +
43228 +           printk ("%s: lvl %d sidx %d node %d DISCOVER_LEADER: !REJOIN (putsch)\n", 
43229 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43230 +
43231 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
43232 +           break;
43233 +       }       
43234 +
43235 +       if (lvl < cmRail->TopLevel)                     /* I'm the leader of this level */
43236 +       {
43237 +           if (sgmt->State == CM_SGMT_PRESENT &&       /* someone thinks someone I lead is dead */
43238 +               sgmt->NodeId != hdr->NodeId)
43239 +           {
43240 +               /* My subordinate's death could be noticed by one of her peers
43241 +                * before I do.  If she _is_ dead, I'll notice before long and
43242 +                * NOTIFY this discover.  If this discover completes before I
43243 +                * detect my subordinate's death, the discovering node will
43244 +                * try to take over from me, and then I'll RESET her.
43245 +                */
43246 +               CPRINTF4 (6, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: ignored (got established subordinate)\n", 
43247 +                         cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43248 +               return;
43249 +           }
43250 +
43251 +           if (sgmt->State != CM_SGMT_PRESENT || /* New connection */
43252 +               sgmt->Timestamp != hdr->Timestamp) /* new incarnation */
43253 +               StartConnecting (cmRail, sgmt, hdr->NodeId, hdr->Timestamp);
43254 +         
43255 +           CPRINTF4 (2, "%s: lvl %d sidx %d node %d DISCOVER_LEADER: !NOTIFY)\n", 
43256 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43257 +         
43258 +           SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_NOTIFY);
43259 +           break;
43260 +       }
43261 +
43262 +       ASSERT (lvl == cmRail->TopLevel);
43263 +
43264 +       if (cmRail->Role == CM_ROLE_SUBORDINATE)
43265 +       {
43266 +           /* I think my leader is alive, in which case she'll NOTIFY this
43267 +            * DISCOVER.  If she's dead, I'll start to become a leader
43268 +            * candidate and handle this appropriately.
43269 +            */
43270 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: ignored (I'm a subordinate)\n", 
43271 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
43272 +           break;
43273 +       }
43274 +       
43275 +       ASSERT (cmRail->Role == CM_ROLE_LEADER_CANDIDATE);
43276 +
43277 +       /* A peer at this level is bidding for leadership along with me */
43278 +       if (IShouldLead (cmRail, msg))
43279 +       {
43280 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: but I should lead\n", 
43281 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
43282 +
43283 +           /* So there _is_ someone there; She'll be seeing my DISCOVER
43284 +            * messages and extending her discovery period, so that when I
43285 +            * become leader, I'll NOTIFY her.  In the meantime I'll flag her
43286 +            * activity, so she remains WAITING.
43287 +            */
43288 +           sgmt->UpdateTick = lbolt;
43289 +           break;
43290 +       }
43291 +       
43292 +       /* Defer to sender... */
43293 +       CPRINTF3 (6, "%s: lvl %d node %d DISCOVER: delaying me becoming leader\n", 
43294 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
43295 +       
43296 +       StartLeaderDiscovery (cmRail);
43297 +       break;
43298 +
43299 +    case CM_MSG_TYPE_DISCOVER_SUBORDINATE:
43300 +       if (lvl <= cmRail->TopLevel)
43301 +       {
43302 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (from my subtree)\n", 
43303 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
43304 +           break;
43305 +       }
43306 +       
43307 +       if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE)
43308 +       {
43309 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (I'm not looking for a leader)\n", 
43310 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
43311 +           break;
43312 +       }
43313 +       
43314 +       if (hdr->Level > cmRail->BroadcastLevel && AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT))
43315 +       {
43316 +           CPRINTF3 (6, "%s: lvl %d node %d DISCOVER_SUBORDINATE: ignored (broadcast level too low)\n",
43317 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
43318 +           break;
43319 +       }
43320 +
43321 +       CPRINTF3 (2, "%s: lvl %d node %d DISCOVER_SUBORDINATE: !IMCOMING\n", 
43322 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
43323 +       
43324 +       SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_IMCOMING);
43325 +       break;
43326 +
43327 +    case CM_MSG_TYPE_IMCOMING:
43328 +       if (lvl > cmRail->TopLevel ||           /* from peer or node above me */
43329 +           sgmt->State == CM_SGMT_PRESENT ||   /* already got a subtree */
43330 +           sgmt->State == CM_SGMT_ABSENT)      /* already written off this subtree */
43331 +       {
43332 +           CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: ignored\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43333 +           break;
43334 +       }
43335 +
43336 +       CPRINTF4 (2, "%s: lvl %d sidx %d node %d IMCOMING: waiting...\n", cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43337 +
43338 +       sgmt->State = CM_SGMT_COMING;
43339 +       sgmt->UpdateTick = lbolt;
43340 +       break;
43341 +       
43342 +    case CM_MSG_TYPE_NOTIFY:
43343 +       if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE || /* I'm not looking for a leader */
43344 +           lvl != cmRail->TopLevel)            /* at this level */
43345 +       {
43346 +           /* If this person really should be my leader, my existing leader
43347 +            * will time out, and I'll discover this one. */
43348 +           CPRINTF4 (2, "%s: lvl %d node %d NOTIFY: ignored (%s)\n", 
43349 +                     cmRail->Rail->Name, lvl, hdr->NodeId,
43350 +                     lvl < cmRail->TopLevel ? "already leader" : 
43351 +                     lvl > cmRail->TopLevel ? "lvl too high" : "already subordinate");
43352 +           break;
43353 +       }
43354 +
43355 +       CPRINTF3 (2, "%s: lvl %d node %d NOTIFY: becoming subordinate\n", 
43356 +                 cmRail->Rail->Name, lvl, hdr->NodeId);
43357 +       
43358 +       cmRail->Role = CM_ROLE_SUBORDINATE;             /* Now I've found my level */
43359 +       StartConnecting (cmRail, &level->Sgmts[0], hdr->NodeId, hdr->Timestamp);
43360 +       break;
43361 +
43362 +    case CM_MSG_TYPE_HEARTBEAT:
43363 +       if (lvl > cmRail->TopLevel)
43364 +       {
43365 +           CPRINTF3 (2, "%s: lvl %d node %d H/BEAT: ignored (lvl too high)\n", 
43366 +                     cmRail->Rail->Name, lvl, hdr->NodeId);
43367 +           break;
43368 +       }
43369 +
43370 +       if (lvl == cmRail->TopLevel)                    /* heartbeat from my leader */
43371 +       {
43372 +           if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE) /* but I've not got one */
43373 +           {
43374 +               /* I'm probably a new incarnation of myself; I'll keep doing
43375 +                * discovery until my previous existence's leader NOTIFY's me.
43376 +                * If I was this node's leader, she'll time me out (I'm not
43377 +                * sending heartbeats to her) and we'll fight it out for
43378 +                * leadership. */
43379 +               CPRINTF3 (2, "%s: lvl %d node %d H/BEAT ignored (no leader)\n", 
43380 +                         cmRail->Rail->Name, lvl, hdr->NodeId);
43381 +               break;
43382 +           }
43383 +           sidx = 0;
43384 +           sgmt = &level->Sgmts[0];
43385 +       }
43386 +      
43387 +       if (sgmt->State != CM_SGMT_PRESENT ||   /* not fully connected with this guy */
43388 +           sgmt->NodeId != hdr->NodeId ||      /* someone else impersonating my peer */
43389 +           sgmt->Timestamp != hdr->Timestamp)  /* new incarnation of my peer */
43390 +       {
43391 +           CPRINTF4 (1, "%s: lvl %d sidx %d node %d H/BEAT: !REJOIN\n", 
43392 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId);
43393 +         
43394 +           printk ("%s: lvl %d sidx %d node %d H/BEAT: !REJOIN %s\n",
43395 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId,
43396 +                   sgmt->State != CM_SGMT_PRESENT ? "not present" :
43397 +                   sgmt->NodeId != hdr->NodeId ? "someone else" : "new incarnation");
43398 +           
43399 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
43400 +           break;
43401 +       }
43402 +
43403 +       if (!((hdr->Seq == sgmt->AckSeq) ||     /* NOT duplicate message or */
43404 +             (hdr->Seq == (CM_SEQ)(sgmt->AckSeq + 1))) || /* expected message */
43405 +           !((hdr->AckSeq == sgmt->MsgSeq) ||  /* NOT expected ack or */
43406 +             (hdr->AckSeq == (CM_SEQ)(sgmt->MsgSeq - 1)))) /* duplicate ack */
43407 +       {
43408 +           CPRINTF9 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", 
43409 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, 
43410 +                     (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq);
43411 +        
43412 +           printk ("%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (out-of-seq) M(%d,a%d) S%d,A%d\n", 
43413 +                   cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, 
43414 +                   (int)hdr->Seq, (int)hdr->AckSeq, (int)sgmt->MsgSeq, (int)sgmt->AckSeq);
43415 +        
43416 +           SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
43417 +           break;
43418 +       }
43419 +
43420 +       IncrStat (cmRail, HeartbeatsRcvd);
43421 +
43422 +       sgmt->UpdateTick = lbolt;
43423 +       sgmt->SendMaps = 1;
43424 +
43425 +       if (sgmt->MsgSeq == hdr->AckSeq)                /* acking current message */
43426 +           sgmt->MsgAcked = 1;                 /* can send the next one */
43427 +
43428 +       if (hdr->Seq == sgmt->AckSeq)           /* discard duplicate (or NULL heartbeat) */
43429 +       {
43430 +           CPRINTF6 (6, "%s: lvl %d sidx %d node %d type %d: %s H/BEAT\n", 
43431 +                     cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type,
43432 +                     hdr->NumMaps == 0 ? "null" : "duplicate");
43433 +           break;
43434 +       }
43435 +
43436 +       CPRINTF7 (6, "%s: lvl %d sidx %d node %d type %d: seq %d maps %d H/BEAT\n", 
43437 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, hdr->Seq, hdr->NumMaps);
43438 +
43439 +       sgmt->AckSeq = hdr->Seq;                        /* ready to receive next one */
43440 +       
43441 +       for (i = 0; i < hdr->NumMaps; i++)
43442 +       {
43443 +           CM_STATEMAP_ENTRY *map  = &msg->Payload.Statemaps[CM_MSG_MAP(i)];
43444 +           int                clvl = map->level;
43445 +           
43446 +           if (clvl < 0)                       /* end of message */
43447 +               break;
43448 +
43449 +           if (clvl < sgmt->Level)             /* bad level */
43450 +           {
43451 +               CPRINTF6 (1, "%s: lvl %d sidx %d node %d type %d: H/BEAT !REJOIN (bad clevel %d)\n", 
43452 +                         cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type, clvl);
43453 +
43454 +               SendMessage (cmRail, hdr->NodeId, hdr->Level, CM_MSG_TYPE_REJOIN);
43455 +               goto finished;
43456 +           }
43457 +
43458 +           if (map->offset == STATEMAP_NOMORECHANGES) /* end of atomic changes */
43459 +           {
43460 +               if (!sgmt->Maps[clvl].InputMapValid || /* not set InputMap yet */
43461 +                   statemap_changed (sgmt->Maps[clvl].CurrentInputMap)) /* previously applied changes */
43462 +               {
43463 +                   CPRINTF3 (4, "%s: received new clvl %d map from %d\n", cmRail->Rail->Name, clvl, sgmt->NodeId);
43464 +
43465 +                   statemap_setmap (sgmt->Maps[clvl].InputMap, sgmt->Maps[clvl].CurrentInputMap);
43466 +                   sgmt->Maps[clvl].InputMapValid = 1;
43467 +
43468 +                   statemap_clearchanges (sgmt->Maps[clvl].CurrentInputMap);
43469 +               }
43470 +               continue;
43471 +           }
43472 +           
43473 +           seg = ((bitmap_t)map->seg[0])
43474 +               | (((bitmap_t)map->seg[1]) << 16)
43475 +#if (BT_ULSHIFT == 6)
43476 +               | (((bitmap_t)map->seg[2]) << 32)
43477 +               | (((bitmap_t)map->seg[3]) << 48)
43478 +#elif (BT_ULSHIFT != 5)
43479 +#error "Bad value for BT_ULSHIFT"
43480 +#endif
43481 +               ;
43482 +           statemap_setseg (sgmt->Maps[clvl].CurrentInputMap, map->offset, seg);
43483 +       }
43484 +       break;
43485 +
43486 +    case CM_MSG_TYPE_REJOIN:
43487 +       CPRINTF5 (1, "%s: lvl %d sidx %d node %d type %d: REJOIN\n",
43488 +                 cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type);
43489 +       printk ("%s: lvl %d sidx %d node %d type %d: REJOIN\n", 
43490 +               cmRail->Rail->Name, lvl, sidx, hdr->NodeId, hdr->Type);
43491 +
43492 +       LowerTopLevel (cmRail, 0);
43493 +
43494 +       IncrStat (cmRail, RejoinRequest);
43495 +       break;
43496 +
43497 +    default:
43498 +       printk ("%s: lvl=%d unknown message type %d\n", cmRail->Rail->Name, lvl, hdr->Type);
43499 +       break;
43500 +    }
43501 + finished:
43502 +    hdr->Version = EP_SYSTEMQ_UNRECEIVED;
43503 +}
43504 +
43505 +static void
43506 +PollInputQueues (CM_RAIL *cmRail)
43507 +{
43508 +    ep_poll_inputq (cmRail->Rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail);
43509 +    ep_poll_inputq (cmRail->Rail, cmRail->PolledQueue, 0, ProcessMessage, cmRail);
43510 +}
43511 +
43512 +static void
43513 +IntrQueueCallback (EP_RAIL *rail, void *arg)
43514 +{
43515 +    CM_RAIL *cmRail = (CM_RAIL *) arg;
43516 +    unsigned long flags;
43517 +
43518 +    /* If the lock is held, then don't bother spinning for it, 
43519 +     * since the messages will be received at this, or the
43520 +     * next heartbeat */
43521 +    local_irq_save (flags);
43522 +    if (spin_trylock (&cmRail->Lock))
43523 +    {
43524 +       if (AFTER (lbolt, cmRail->NextRunTime + MSEC2TICKS(CM_TIMER_SCHEDULE_TIMEOUT)))
43525 +           printk ("%s: heartbeat timer stuck - scheduled\n", cmRail->Rail->Name);
43526 +       else
43527 +           ep_poll_inputq (rail, cmRail->IntrQueue, 0, ProcessMessage, cmRail);
43528 +       spin_unlock (&cmRail->Lock);
43529 +    }
43530 +    local_irq_restore (flags);
43531 +}
43532 +
43533 +char *
43534 +sprintClPeers (char *str, CM_RAIL *cmRail, int clvl)
43535 +{
43536 +   int clLo     = cmRail->Levels[clvl].MinNodeId;
43537 +   int clHi     = clLo + cmRail->Levels[clvl].NumNodes - 1;
43538 +   int subClLo  = (clvl == 0) ? cmRail->NodeId : cmRail->Levels[clvl - 1].MinNodeId;
43539 +   int subClHi  = subClLo + ((clvl == 0) ? 0 : cmRail->Levels[clvl - 1].NumNodes - 1);
43540 +   
43541 +   if (subClHi == clHi)
43542 +      sprintf (str, "[%d-%d]", clLo, subClLo - 1);
43543 +   else if (subClLo == clLo)
43544 +      sprintf (str, "[%d-%d]", subClHi + 1, clHi);
43545 +   else
43546 +      sprintf (str, "[%d-%d][%d-%d]", clLo, subClLo - 1, subClHi + 1, clHi);
43547 +
43548 +   return (str);
43549 +}
43550 +
43551 +static void
43552 +RestartComms (CM_RAIL *cmRail, int clvl)
43553 +{
43554 +    int             base;
43555 +    int             nodeId;
43556 +    int             lstat;
43557 +    int             numClNodes;
43558 +    int             subClMin;
43559 +    int             subClMax;
43560 +    int             myClId;
43561 +    int             thisClId;
43562 +    
43563 +    myClId     = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
43564 +    base       = myClId * CM_GSTATUS_BITS;
43565 +    numClNodes = cmRail->Levels[clvl].NumNodes;
43566 +
43567 +    statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
43568 +                     CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
43569 +    cmRail->Levels[clvl].Restarting = 1;
43570 +
43571 +    if (cmRail->Levels[clvl].Online)
43572 +    {
43573 +       cmRail->Levels[clvl].Online = 0;
43574 +       
43575 +       for (thisClId = 0; thisClId < numClNodes; thisClId++)
43576 +       {
43577 +           if (thisClId == subClMin)   /* skip sub-cluster; it's just someone in this cluster */
43578 +           {                           /* that wants me to restart */
43579 +               thisClId = subClMax;
43580 +               continue;
43581 +           }
43582 +           
43583 +           nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
43584 +           base   = thisClId * CM_GSTATUS_BITS;
43585 +           lstat  = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
43586 +           
43587 +           if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN)
43588 +           {
43589 +               switch (ep_disconnect_node (cmRail->Rail, nodeId))
43590 +               {
43591 +               case EP_NODE_CONNECTING:
43592 +                   /* gstat must == RUNNING */
43593 +                   cmRail->Levels[clvl].Connected--;
43594 +                   break;
43595 +               case EP_NODE_DISCONNECTED:
43596 +                   /* CLOSING || STARTING || (lstat & RESTART) */
43597 +                   break;
43598 +               }
43599 +           }
43600 +       }
43601 +    }
43602 +}
43603 +
43604 +static void
43605 +UpdateGlobalStatus (CM_RAIL *cmRail)
43606 +{
43607 +    char            clNodeStr[32];                             /* [%d-%d][%d-%d] */
43608 +    int             nodeId;
43609 +    int             offset;
43610 +    int             base;
43611 +    bitmap_t        gstat;
43612 +    bitmap_t        lgstat;
43613 +    bitmap_t        lstat;
43614 +    int             clvl;
43615 +    int             numClNodes;
43616 +    int             subClMin;
43617 +    int             subClMax;
43618 +    int             myClId;
43619 +    int             thisClId;
43620 +    int             lastClId;
43621 +
43622 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
43623 +    {
43624 +       if (!cmRail->Levels[clvl].GlobalMapValid || /* not got the global map yet */
43625 +           !statemap_changed (cmRail->Levels[clvl].GlobalMap)) /* no changes to respond to */
43626 +       {
43627 +           CPRINTF2 (6, "%s: Got invalid or unchanged clvl %d global map\n", cmRail->Rail->Name, clvl);
43628 +           continue;
43629 +       }
43630 +       
43631 +       CPRINTF2 (5, "%s: Got valid changed clvl %d global map\n", cmRail->Rail->Name, clvl);
43632 +       
43633 +       lastClId = -1;
43634 +       myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
43635 +       numClNodes = cmRail->Levels[clvl].NumNodes;
43636 +       
43637 +       while ((offset = statemap_findchange (cmRail->Levels[clvl].GlobalMap, &gstat, 1)) >= 0)
43638 +       {
43639 +           /*
43640 +            * Check every node that this segment covers - however
43641 +            * if the last node we checked in the previous segmemt
43642 +            * is also the first node in this segment, then skip
43643 +            * it.
43644 +            */
43645 +           if ((thisClId = (offset/CM_GSTATUS_BITS)) == lastClId)
43646 +               thisClId++;
43647 +           lastClId = (offset + BT_NBIPUL - 1)/CM_GSTATUS_BITS;
43648 +           
43649 +           /* check each node that might have changed */
43650 +           for ( ; thisClId <= lastClId && thisClId < numClNodes; thisClId++)
43651 +           {
43652 +               base = thisClId * CM_GSTATUS_BITS;
43653 +               nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
43654 +
43655 +               if (thisClId >= subClMin && thisClId <= subClMax) /* skip sub-cluster */
43656 +                   continue;
43657 +
43658 +               /* This isn't me; I need to sense what this node is driving
43659 +                * (just the starting and running bits) and respond
43660 +                * appropriately...
43661 +                */
43662 +               lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
43663 +               gstat  = statemap_getbits (cmRail->Levels[clvl].GlobalMap,     base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
43664 +
43665 +               if (lgstat == gstat)            /* no change in peer state */
43666 +                   continue;
43667 +
43668 +               CPRINTF5 (3, "%s: Node %d: lgstat %s, gstat %s, lstat %s\n", cmRail->Rail->Name, nodeId,
43669 +                         GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
43670 +                         GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
43671 +                         GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
43672 +
43673 +               /* What I'm currently driving as my acknowledgement */
43674 +               lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
43675 +
43676 +               switch (gstat)
43677 +               {
43678 +               case CM_GSTATUS_STARTING:
43679 +                   if ((lgstat == CM_GSTATUS_ABSENT || lgstat == CM_GSTATUS_CLOSING) && lstat == CM_GSTATUS_MAY_START)
43680 +                   {
43681 +                       CPRINTF2 (1, "%s: ===================node %d STARTING\n", cmRail->Rail->Name, nodeId);
43682 +                       
43683 +                       ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED);
43684 +
43685 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
43686 +                       continue;
43687 +                   }
43688 +                   break;
43689 +                   
43690 +               case CM_GSTATUS_RUNNING:
43691 +                   if ((lgstat == CM_GSTATUS_ABSENT   && lstat == CM_GSTATUS_MAY_START) ||
43692 +                       (lgstat == CM_GSTATUS_STARTING && lstat == CM_GSTATUS_MAY_RUN))
43693 +                   {
43694 +                       CPRINTF3 (1, "%s: ===================node %d%s RUNNING\n", cmRail->Rail->Name, nodeId,
43695 +                                 lgstat == CM_GSTATUS_ABSENT ? " Already" : "");
43696 +
43697 +                       ASSERT (cmRail->Rail->Nodes[nodeId].State == EP_NODE_DISCONNECTED);
43698 +
43699 +                       if (cmRail->Levels[clvl].Online)
43700 +                       {
43701 +                           ep_connect_node (cmRail->Rail, nodeId);
43702 +
43703 +                           cmRail->Levels[clvl].Connected++;
43704 +                       }
43705 +
43706 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
43707 +                       continue;
43708 +                   }
43709 +                   break;
43710 +                   
43711 +               case CM_GSTATUS_CLOSING:
43712 +                   CPRINTF4 (1, "%s: ===================node %d CLOSING%s%s\n", cmRail->Rail->Name, nodeId,
43713 +                             (lstat & CM_GSTATUS_RESTART) ? " for Restart" : "",
43714 +                             cmRail->Levels[clvl].Online ? "" : " (offline)");
43715 +
43716 +                   if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN)
43717 +                   {
43718 +                       switch (ep_disconnect_node (cmRail->Rail, nodeId))
43719 +                       {
43720 +                       case EP_NODE_CONNECTING:
43721 +                           cmRail->Levels[clvl].Connected--;
43722 +                           /* DROPTHROUGH */
43723 +                       case EP_NODE_DISCONNECTED:
43724 +                           lstat = CM_GSTATUS_MAY_START;
43725 +                           break;
43726 +                       }
43727 +                   }
43728 +
43729 +                   if ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_START) /* clear restart if we've disconnected */
43730 +                       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
43731 +                   continue;
43732 +                   
43733 +               default:
43734 +                   break;
43735 +               }
43736 +
43737 +               /* "unexpected" state change forces me to ask her to restart */
43738 +               if (! (lstat & CM_GSTATUS_RESTART))             /* not requesting restart already */
43739 +               {
43740 +                   CPRINTF5 (1, "%s: ===================node %d %s, old %s new %s\n", cmRail->Rail->Name, nodeId,
43741 +                             (gstat == CM_GSTATUS_ABSENT)  ? "ABSENT" : "REQUEST RESTART", 
43742 +                             GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
43743 +                             GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId));
43744 +
43745 +                   /* request restart */
43746 +                   if (cmRail->Levels[clvl].Online && lstat == CM_GSTATUS_MAY_RUN)
43747 +                   {
43748 +                       switch (ep_disconnect_node (cmRail->Rail, nodeId))
43749 +                       {
43750 +                       case EP_NODE_CONNECTING:
43751 +                           cmRail->Levels[clvl].Connected--;
43752 +                           /* DROPTHROUGH */
43753 +                       case EP_NODE_DISCONNECTED:
43754 +                           lstat = CM_GSTATUS_MAY_START;
43755 +                           break;
43756 +                       }
43757 +                   }
43758 +
43759 +                   statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
43760 +                   continue;
43761 +               }
43762 +
43763 +               continue;
43764 +           }
43765 +       }
43766 +    
43767 +       /* Now check myself - see what everyone else thinks I'm doing */
43768 +       base   = myClId * CM_GSTATUS_BITS;
43769 +       lstat  = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
43770 +       gstat  = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS);
43771 +       lgstat = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS);
43772 +
43773 +       if (lgstat == gstat)                    /* my state in this cluster hasn't changed */
43774 +       {
43775 +           CPRINTF3 (6, "%s: my clvl %d global status unchanged from %s\n", cmRail->Rail->Name,
43776 +                     clvl, GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId));
43777 +           goto all_done;
43778 +       }
43779 +
43780 +       if ((gstat & CM_GSTATUS_RESTART) != 0)  /* someone wants me to restart */
43781 +       {
43782 +           if ((lstat & CM_GSTATUS_STATUS_MASK) == CM_GSTATUS_CLOSING) /* I'm already restarting */
43783 +               goto all_done;
43784 +           
43785 +           CPRINTF2 (1, "%s: ===================RESTART REQUEST from %s\n", cmRail->Rail->Name,
43786 +                     sprintClPeers (clNodeStr, cmRail, clvl));
43787 +           
43788 +           printk ("%s: Restart Request from %s\n", cmRail->Rail->Name,
43789 +                   sprintClPeers (clNodeStr, cmRail, clvl));
43790 +           
43791 +           RestartComms (cmRail, clvl);
43792 +           goto all_done;
43793 +       }
43794 +       
43795 +       CPRINTF6 (5, "%s: clvl %d: lgstat %s gstat %s, lstat %s%s\n", cmRail->Rail->Name, clvl,
43796 +                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, myClId),
43797 +                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, myClId),
43798 +                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, myClId),
43799 +                 (gstat != lstat) ? " (IGNORED)" : "");
43800 +                       
43801 +       if (gstat != lstat)                     /* not everyone agrees with me */
43802 +           goto all_done;
43803 +
43804 +       switch (lstat)
43805 +       {
43806 +       default:
43807 +           ASSERT (0);                         /* I never drive this */
43808 +           
43809 +       case CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START: /* I can restart now (have seen restart go away) */
43810 +           ASSERT (!cmRail->Levels[clvl].Online);
43811 +           
43812 +           CPRINTF2 (1,"%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name,
43813 +                     sprintClPeers (clNodeStr, cmRail, clvl));
43814 +           printk ("%s: ===================NODES %s AGREE I MAY START\n", cmRail->Rail->Name,
43815 +                   sprintClPeers (clNodeStr, cmRail, clvl));
43816 +           
43817 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
43818 +                             CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
43819 +           goto all_done;
43820 +           
43821 +       case CM_GSTATUS_STARTING | CM_GSTATUS_MAY_RUN:
43822 +           ASSERT (!cmRail->Levels[clvl].Online);
43823 +           
43824 +           CPRINTF2 (1, "%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name,
43825 +                     sprintClPeers (clNodeStr, cmRail, clvl));
43826 +           printk ("%s: ===================NODES %s AGREE I MAY RUN\n", cmRail->Rail->Name,
43827 +                   sprintClPeers (clNodeStr, cmRail, clvl));
43828 +           
43829 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, 
43830 +                             CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
43831 +           goto all_done;
43832 +           
43833 +       case CM_GSTATUS_RUNNING | CM_GSTATUS_MAY_RUN:
43834 +           if (! cmRail->Levels[clvl].Online)
43835 +           {
43836 +               CPRINTF2 (1, "%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name,
43837 +                         sprintClPeers (clNodeStr, cmRail, clvl));
43838 +               printk ("%s: ===================NODES %s AGREE I'M RUNNING\n", cmRail->Rail->Name,
43839 +                       sprintClPeers (clNodeStr, cmRail, clvl));
43840 +               
43841 +               cmRail->Levels[clvl].Online = 1;
43842 +               
43843 +               for (thisClId = 0; thisClId < numClNodes; thisClId++)
43844 +               {
43845 +                   if (thisClId == subClMin)   /* skip sub-cluster */
43846 +                   {
43847 +                       thisClId = subClMax;
43848 +                       continue;
43849 +                   }
43850 +                   
43851 +                   nodeId = cmRail->Levels[clvl].MinNodeId + thisClId;
43852 +                   
43853 +                   base  = thisClId * CM_GSTATUS_BITS;
43854 +                   lstat = statemap_getbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_BITS);
43855 +                   gstat = statemap_getbits (cmRail->Levels[clvl].GlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
43856 +                   
43857 +                   /* Only connect to her if I see her as running and I'm not requesting her 
43858 +                    * to restart - this means that I was offline when I saw her transition
43859 +                    * to running and haven't seen her in a "bad" state since. */
43860 +                   if (gstat == CM_GSTATUS_RUNNING && ! (lstat & CM_GSTATUS_RESTART))
43861 +                   {
43862 +                       CPRINTF5 (1, "%s: node %d lgstat %s gstat %s, lstat %s -> CONNECT\n", cmRail->Rail->Name, nodeId,
43863 +                                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
43864 +                                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
43865 +                                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
43866 +                       
43867 +                       if (lstat == CM_GSTATUS_MAY_START)
43868 +                           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_RUN, CM_GSTATUS_BITS);
43869 +
43870 +                       ep_connect_node (cmRail->Rail, nodeId);
43871 +
43872 +                       cmRail->Levels[clvl].Connected++;
43873 +                   }
43874 +               }
43875 +           }
43876 +           goto all_done;
43877 +       }
43878 +
43879 +    all_done:
43880 +       statemap_setmap (cmRail->Levels[clvl].LastGlobalMap, cmRail->Levels[clvl].GlobalMap);
43881 +    }
43882 +}
43883 +
43884 +static void
43885 +ReduceGlobalMap (CM_RAIL *cmRail, int clvl)
43886 +{
43887 +    int       lvl;
43888 +    int       sidx;
43889 +    int       recompute;
43890 +    CM_LEVEL *level;
43891 +    int       cTopLevel;
43892 +    int       cRole;
43893 +
43894 +    if (clvl < cmRail->TopLevel)
43895 +    {
43896 +       cTopLevel = clvl + 1;
43897 +       cRole = CM_ROLE_LEADER;
43898 +    }
43899 +    else
43900 +    {
43901 +       cTopLevel = cmRail->TopLevel;
43902 +       cRole = cmRail->Role;
43903 +    }
43904 +    
43905 +    /* Update cmRail->Levels[*].SubordinateMap[clvl] for all subordinate levels */
43906 +    for (lvl = 0; lvl < cTopLevel; lvl++)
43907 +    {
43908 +       level = &cmRail->Levels[lvl];
43909 +
43910 +       /* We need to recompute this level's statemap if...
43911 +        * . Previous level's statemap has changes to propagate OR
43912 +        * . This level's statemap has not been computed yet OR
43913 +        * . A subordinate at this level has sent me a change.
43914 +        * Note that we can only do this if all subordinates from this
43915 +        * level down are present with valid statemaps, or absent (i.e. not
43916 +        * timing out).
43917 +        */
43918 +
43919 +       ASSERT (lvl == 0 || cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]);
43920 +
43921 +       recompute = !level->SubordinateMapValid[clvl] ||
43922 +                   (lvl > 0 && statemap_changed (cmRail->Levels[lvl - 1].SubordinateMap[clvl]));
43923 +         
43924 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
43925 +       {
43926 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
43927 +
43928 +           if (!(sgmt->State == CM_SGMT_ABSENT || /* absent nodes contribute zeros */
43929 +                 (sgmt->State == CM_SGMT_PRESENT && /* present nodes MUST have received a map to contribute */
43930 +                  sgmt->Maps[clvl].InputMapValid)))
43931 +           {
43932 +               CPRINTF5 (5, "%s: waiting for clvl %d lvl %d seg %d node %d\n", cmRail->Rail->Name,
43933 +                         clvl, lvl, sidx, sgmt->NodeId);
43934 +
43935 +               /* Gotta wait for this guy, so we can't compute this level,
43936 +                * or any higher levels. */
43937 +               return;
43938 +           }
43939 +
43940 +           if (statemap_changed (sgmt->Maps[clvl].InputMap))
43941 +           {
43942 +               ASSERT (sgmt->Maps[clvl].InputMapValid);
43943 +
43944 +               recompute = 1;
43945 +
43946 +               CPRINTF7 (5, "%s: %s clvl %d map from @ %d %d (%d) - %s\n",
43947 +                         cmRail->Rail->Name, sgmt->State == CM_SGMT_ABSENT ? "newly absent" : "got new",
43948 +                         clvl, lvl, sidx, sgmt->NodeId, 
43949 +                         MapString ("Input", sgmt->Maps[clvl].InputMap, cmRail->Levels[clvl].NumNodes, ""));
43950 +           }
43951 +       }
43952 +
43953 +       if (recompute)
43954 +       {
43955 +           if (lvl == 0)
43956 +               statemap_reset (cmRail->Levels[clvl].TmpMap);
43957 +           else
43958 +           {
43959 +               ASSERT (cmRail->Levels[lvl - 1].SubordinateMapValid[clvl]);
43960 +               
43961 +               statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[lvl - 1].SubordinateMap[clvl]);
43962 +               statemap_clearchanges (cmRail->Levels[lvl - 1].SubordinateMap[clvl]);
43963 +           }
43964 +        
43965 +           for (sidx = 0; sidx < level->NumSegs; sidx++)
43966 +           {
43967 +               CM_SGMT *sgmt = &level->Sgmts[sidx];
43968 +               
43969 +               if (sgmt->State != CM_SGMT_ABSENT)      /* absent nodes contribute zeroes */
43970 +               {
43971 +                   ASSERT (sgmt->State == CM_SGMT_PRESENT);
43972 +                   ASSERT (sgmt->Maps[clvl].InputMapValid);
43973 +                   statemap_ormap (cmRail->Levels[clvl].TmpMap, sgmt->Maps[clvl].InputMap);
43974 +               }
43975 +               statemap_clearchanges (sgmt->Maps[clvl].InputMap);
43976 +           }
43977 +
43978 +           statemap_setmap (level->SubordinateMap[clvl], cmRail->Levels[clvl].TmpMap);
43979 +           level->SubordinateMapValid[clvl] = 1;
43980 +
43981 +           CPRINTF4 (5, "%s: recompute clvl %d level %d statemap - %s\n", cmRail->Rail->Name, clvl, lvl,
43982 +                     MapString ("level", level->SubordinateMap[clvl], cmRail->Levels[clvl].NumNodes, ""));
43983 +       }
43984 +    }
43985 +
43986 +    if (cRole == CM_ROLE_LEADER_CANDIDATE)     /* don't know this cluster's leader yet */
43987 +       return;
43988 +
43989 +    ASSERT (cTopLevel == 0 || cmRail->Levels[cTopLevel - 1].SubordinateMapValid[clvl]);
43990 +
43991 +    /* Update SubTreeMap */
43992 +    
43993 +    if (!cmRail->Levels[clvl].SubTreeMapValid || 
43994 +       statemap_changed (cmRail->Levels[clvl].LocalMap) ||
43995 +       (cTopLevel > 0 && statemap_changed (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl])))
43996 +    {
43997 +       statemap_copy (cmRail->Levels[clvl].TmpMap, cmRail->Levels[clvl].LocalMap);
43998 +       statemap_clearchanges (cmRail->Levels[clvl].LocalMap);
43999 +
44000 +       if (cTopLevel > 0)
44001 +       {
44002 +           statemap_ormap (cmRail->Levels[clvl].TmpMap, cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]);
44003 +           statemap_clearchanges (cmRail->Levels[cTopLevel - 1].SubordinateMap[clvl]);
44004 +       }
44005 +
44006 +       statemap_setmap (cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].TmpMap);
44007 +       cmRail->Levels[clvl].SubTreeMapValid = 1;
44008 +
44009 +       CPRINTF3 (5, "%s: recompute clvl %d subtree map - %s\n", cmRail->Rail->Name, clvl,
44010 +                 MapString ("subtree", cmRail->Levels[clvl].SubTreeMap, cmRail->Levels[clvl].NumNodes, ""));
44011 +    }
44012 +
44013 +    if (cRole == CM_ROLE_SUBORDINATE)          /* got a leader (Not me) */
44014 +    {                                          /* => send SubTreeMap to her */
44015 +       CM_SGMT *leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0];
44016 +
44017 +       ASSERT (leader->State == CM_SGMT_PRESENT);
44018 +       ASSERT (cmRail->Levels[clvl].SubTreeMapValid);
44019 +
44020 +       if (!leader->Maps[clvl].OutputMapValid ||
44021 +           statemap_changed (cmRail->Levels[clvl].SubTreeMap))
44022 +       {
44023 +           statemap_setmap (leader->Maps[clvl].OutputMap, cmRail->Levels[clvl].SubTreeMap);
44024 +           leader->Maps[clvl].OutputMapValid = 1;
44025 +
44026 +           statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
44027 +
44028 +           CPRINTF3 (5, "%s: sending clvl %d subtree map to leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId);
44029 +       }
44030 +    }
44031 +}
44032 +
44033 +void
44034 +BroadcastGlobalMap (CM_RAIL *cmRail, int clvl)
44035 +{
44036 +    int       lvl;
44037 +    int       sidx;
44038 +    CM_LEVEL *level;
44039 +    CM_SGMT  *leader;
44040 +    int       cTopLevel;
44041 +    int       cRole;
44042 +
44043 +    if (clvl < cmRail->TopLevel)
44044 +    {
44045 +       cTopLevel = clvl + 1;
44046 +       cRole = CM_ROLE_LEADER;
44047 +    }
44048 +    else
44049 +    {
44050 +       cTopLevel = cmRail->TopLevel;
44051 +       cRole = cmRail->Role;
44052 +    }
44053 +
44054 +    switch (cRole)
44055 +    {
44056 +    default:
44057 +       ASSERT (0);
44058 +       
44059 +    case CM_ROLE_LEADER_CANDIDATE:             /* don't know this cluster's leader yet */
44060 +       return;
44061 +
44062 +    case CM_ROLE_LEADER:                       /* cluster leader: */
44063 +       ASSERT (clvl < cmRail->TopLevel);               /* set GlobalMap from SubTreeMap */
44064 +       
44065 +       if (!cmRail->Levels[clvl].SubTreeMapValid)      /* can't set global map */
44066 +           return;
44067 +
44068 +       if (cmRail->Levels[clvl].GlobalMapValid &&      /* already set global map */
44069 +           !statemap_changed (cmRail->Levels[clvl].SubTreeMap)) /* no changes to propagate */
44070 +           return;
44071 +       
44072 +       statemap_setmap (cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].SubTreeMap);
44073 +       cmRail->Levels[clvl].GlobalMapValid = 1;
44074 +       statemap_clearchanges (cmRail->Levels[clvl].SubTreeMap);
44075 +
44076 +       CPRINTF2 (5, "%s: whole cluster %d leader setting global map\n", cmRail->Rail->Name, clvl);
44077 +
44078 +       UpdateGlobalStatus (cmRail);
44079 +       break;
44080 +       
44081 +    case CM_ROLE_SUBORDINATE:                  /* cluster subordinate: */
44082 +       ASSERT (clvl >= cmRail->TopLevel);              /* receive GlobalMap from leader */
44083 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
44084 +       
44085 +       leader = &cmRail->Levels[cmRail->TopLevel].Sgmts[0];
44086 +       ASSERT (leader->State == CM_SGMT_PRESENT);
44087 +
44088 +       if (!leader->Maps[clvl].InputMapValid)  /* can't set global map */
44089 +           return;
44090 +       
44091 +       if (cmRail->Levels[clvl].GlobalMapValid &&      /* already set global map */
44092 +           !statemap_changed (leader->Maps[clvl].InputMap)) /* no changes to propagate */
44093 +           return;
44094 +
44095 +       statemap_setmap (cmRail->Levels[clvl].GlobalMap, leader->Maps[clvl].InputMap);
44096 +       cmRail->Levels[clvl].GlobalMapValid = 1;
44097 +       statemap_clearchanges (leader->Maps[clvl].InputMap);
44098 +
44099 +       CPRINTF3 (5, "%s: getting clvl %d global map from leader (%d)\n", cmRail->Rail->Name, clvl, leader->NodeId);
44100 +
44101 +       UpdateGlobalStatus (cmRail);
44102 +       break;
44103 +    }
44104 +
44105 +    CPRINTF3 (5, "%s: clvl %d %s\n", cmRail->Rail->Name, clvl,
44106 +             MapString ("global", cmRail->Levels[clvl].GlobalMap, cmRail->Levels[clvl].NumNodes, ""));
44107 +    
44108 +    /* Broadcast global map to all subordinates */
44109 +    for (lvl = 0; lvl < cTopLevel; lvl++)
44110 +    {
44111 +       level = &cmRail->Levels[lvl];
44112 +       
44113 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
44114 +       {
44115 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
44116 +           
44117 +           if (sgmt->State == CM_SGMT_PRESENT)
44118 +           {
44119 +               statemap_setmap (sgmt->Maps[clvl].OutputMap, cmRail->Levels[clvl].GlobalMap);
44120 +               sgmt->Maps[clvl].OutputMapValid = 1;
44121 +               
44122 +               CPRINTF5 (5, "%s: sending clvl %d global map to subordinate %d %d (%d)\n", 
44123 +                         cmRail->Rail->Name, clvl, lvl, sidx, sgmt->NodeId);
44124 +           }
44125 +       }
44126 +    }
44127 +}
44128 +
44129 +static void
44130 +CheckPeerPulse (CM_RAIL *cmRail, CM_SGMT *sgmt)
44131 +{
44132 +    int clvl, sendRejoin;
44133 +    
44134 +    switch (sgmt->State)
44135 +    {
44136 +    case CM_SGMT_ABSENT:
44137 +       break;
44138 +
44139 +    case CM_SGMT_WAITING:                      /* waiting for a subtree */
44140 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT)))
44141 +           break;
44142 +      
44143 +       CPRINTF3 (2, "%s: lvl %d subtree %d contains no live nodes\n", cmRail->Rail->Name, 
44144 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
44145 +
44146 +       sgmt->State = CM_SGMT_ABSENT;
44147 +       for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
44148 +       {
44149 +           statemap_zero (sgmt->Maps[clvl].InputMap);          /* need to start propagating zeros (flags change) */
44150 +           sgmt->Maps[clvl].InputMapValid = 1;         /* and must indicate that the map is now valid */
44151 +       }
44152 +       break;
44153 +
44154 +    case CM_SGMT_COMING:                               /* lost/waiting subtree sent me IMCOMING */
44155 +       ASSERT (sgmt->Level > 0);                       /* we only do subtree discovery below our own level */
44156 +
44157 +       if (AFTER (lbolt, sgmt->WaitingTick + MSEC2TICKS(CM_WAITING_TIMEOUT)))
44158 +       {
44159 +           CPRINTF3 (1, "%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name,
44160 +                     sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
44161 +           printk ("%s: lvl %d subtree %d waiting too long\n", cmRail->Rail->Name,
44162 +                   sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
44163 +
44164 +           sgmt->State = CM_SGMT_ABSENT;
44165 +           for (clvl = sgmt->Level; clvl < cmRail->NumLevels; clvl++)
44166 +           {
44167 +               statemap_zero (sgmt->Maps[clvl].InputMap);              /* need to start propagating zeros (flags change) */
44168 +               sgmt->Maps[clvl].InputMapValid = 1;             /* and must indicate that the map is now valid */
44169 +           }
44170 +           break;
44171 +       }
44172 +
44173 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_DISCOVER_TIMEOUT)))
44174 +           break;
44175 +
44176 +       CPRINTF3 (2, "%s: lvl %d subtree %d hasn't connected yet\n", cmRail->Rail->Name,
44177 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]));
44178 +
44179 +       sgmt->State = CM_SGMT_WAITING;
44180 +       sgmt->UpdateTick = lbolt;
44181 +
44182 +       if (sgmt->Level > 0)
44183 +           __Schedule_Discovery (cmRail);
44184 +       break;
44185 +      
44186 +    case CM_SGMT_PRESENT:
44187 +       if (!AFTER (lbolt, sgmt->UpdateTick + MSEC2TICKS(CM_HEARTBEAT_TIMEOUT)))
44188 +           break;
44189 +
44190 +       if (sgmt->Level == cmRail->TopLevel)            /* leader died */
44191 +       {
44192 +           sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0);
44193 +
44194 +           CPRINTF4 (1, "%s: leader (%d) node %d JUST DIED%s\n", 
44195 +                     cmRail->Rail->Name, sgmt->Level, sgmt->NodeId,
44196 +                     sendRejoin ? ": !REJOIN" : "");
44197 +           
44198 +           printk ("%s: lvl %d leader (%d) JUST DIED%s\n", 
44199 +                   cmRail->Rail->Name, sgmt->Level, sgmt->NodeId,
44200 +                   sendRejoin ? ": !REJOIN" : "");
44201 +       
44202 +           if (sendRejoin)
44203 +           {
44204 +               /* she's not sent us any heartbeats even though she responded to a discover
44205 +                * so tell her to rejoin the tree at the bottom, this will mean that she 
44206 +                * has to run the heartbeat timer before being able to rejoin the tree. */
44207 +               SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN);
44208 +           }
44209 +
44210 +           StartLeaderDiscovery (cmRail);
44211 +           break;
44212 +       }
44213 +
44214 +       sendRejoin = (sgmt->State == CM_SGMT_PRESENT && sgmt->AckSeq == 0);
44215 +
44216 +       CPRINTF5 (2, "%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, 
44217 +                 sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId,
44218 +                 sendRejoin ? ": !REJOIN" : "");
44219 +       printk ("%s: lvl %d subordinate %d (%d) JUST DIED%s\n", cmRail->Rail->Name, 
44220 +               sgmt->Level, (int) (sgmt - &cmRail->Levels[sgmt->Level].Sgmts[0]), sgmt->NodeId,
44221 +               sendRejoin ? ": !REJOIN" : "");
44222 +
44223 +       if (sendRejoin)
44224 +       {
44225 +           /* she's not sent us any heartbeats even though she responded to a discover
44226 +            * so tell her to rejoin the tree at the bottom, this will mean that she 
44227 +            * has to run the heartbeat timer before being able to rejoin the tree. */
44228 +           SendMessage (cmRail, sgmt->NodeId, sgmt->Level, CM_MSG_TYPE_REJOIN);
44229 +       }
44230 +
44231 +       StartSubTreeDiscovery (cmRail, sgmt);
44232 +       break;
44233 +        
44234 +    default:
44235 +       ASSERT (0);
44236 +    }
44237 +}
44238 +
44239 +static void
44240 +CheckPeerPulses (CM_RAIL *cmRail)
44241 +{
44242 +    int lvl;
44243 +    int sidx;
44244 +   
44245 +    /* check children are alive */
44246 +    for (lvl = 0; lvl < cmRail->TopLevel; lvl++)
44247 +       for (sidx = 0; sidx < cmRail->Levels[lvl].NumSegs; sidx++)
44248 +           CheckPeerPulse (cmRail, &cmRail->Levels[lvl].Sgmts[sidx]);
44249 +
44250 +    /* check leader is alive */
44251 +    if (cmRail->Role == CM_ROLE_SUBORDINATE)
44252 +    {
44253 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
44254 +       ASSERT (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT);
44255 +      
44256 +       CheckPeerPulse (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0]);
44257 +    }
44258 +}
44259 +
44260 +static void
44261 +SendHeartbeats (CM_RAIL *cmRail)
44262 +{
44263 +    int lvl;
44264 +
44265 +    /* Send heartbeats to my children */
44266 +    for (lvl = 0; lvl < cmRail->TopLevel; lvl++)
44267 +    {
44268 +       CM_LEVEL *level = &cmRail->Levels[lvl];
44269 +       int       sidx;
44270 +       
44271 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
44272 +       {
44273 +           CM_SGMT *sgmt = &cmRail->Levels[lvl].Sgmts[sidx];
44274 +
44275 +           if (sgmt->State == CM_SGMT_PRESENT)
44276 +               SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_HEARTBEAT);
44277 +       }
44278 +    }
44279 +
44280 +    /* Send heartbeat to my leader */
44281 +    if (cmRail->Role == CM_ROLE_SUBORDINATE)
44282 +    {
44283 +       ASSERT (cmRail->TopLevel < cmRail->NumLevels);
44284 +       SendToSgmt (cmRail, &cmRail->Levels[cmRail->TopLevel].Sgmts[0], CM_MSG_TYPE_HEARTBEAT);
44285 +    }
44286 +}
44287 +
44288 +static int
44289 +BroadcastDiscover (CM_RAIL *cmRail)
44290 +{
44291 +    int       sidx;
44292 +    int              lvl;
44293 +    int       msgType;
44294 +    CM_LEVEL *level;
44295 +    int       urgent;
44296 +
44297 +    ASSERT (cmRail->TopLevel <= cmRail->NumLevels);
44298 +    ASSERT ((cmRail->Role == CM_ROLE_LEADER) ? (cmRail->TopLevel == cmRail->NumLevels) :
44299 +           (cmRail->Role == CM_ROLE_SUBORDINATE) ? (cmRail->Levels[cmRail->TopLevel].Sgmts[0].State == CM_SGMT_PRESENT) :
44300 +           (cmRail->Role == CM_ROLE_LEADER_CANDIDATE));
44301 +
44302 +    if (cmRail->Role != CM_ROLE_LEADER_CANDIDATE)      /* got a leader/lead whole machine */
44303 +    {
44304 +       urgent = 0;                             /* non-urgent leader discovery */
44305 +       lvl = cmRail->TopLevel - 1;             /* on nodes I lead (resolves leader conflicts) */
44306 +       msgType = CM_MSG_TYPE_RESOLVE_LEADER;
44307 +    }
44308 +    else
44309 +    {
44310 +       urgent = 1;                             /* urgent leader discovery */
44311 +       lvl = cmRail->TopLevel;                 /* on nodes I'd like to lead */
44312 +       msgType = CM_MSG_TYPE_DISCOVER_LEADER;
44313 +    }
44314 +
44315 +    if (lvl >= 0)
44316 +    {
44317 +       if (lvl > cmRail->BroadcastLevel)
44318 +       {
44319 +           /* Unable to broadcast at this level in the spanning tree, so we 
44320 +            * just continue doing discovery until we are able to broadcast */
44321 +           CPRINTF4 (6, "%s: broadcast level %d too low to discover %d at level %d\n",
44322 +                     cmRail->Rail->Name, cmRail->BroadcastLevel, msgType, lvl);
44323 +
44324 +           cmRail->DiscoverStartTick = lbolt;
44325 +       }
44326 +       else
44327 +       {
44328 +           level = &cmRail->Levels[lvl];
44329 +           SendToSgmt (cmRail, &level->Sgmts[level->MySgmt], msgType);
44330 +       }
44331 +    }
44332 +    
44333 +    while (lvl > 0)
44334 +    {
44335 +       level = &cmRail->Levels[lvl];
44336 +      
44337 +       for (sidx = 0; sidx < level->NumSegs; sidx++)
44338 +       {
44339 +           CM_SGMT *sgmt = &level->Sgmts[sidx];
44340 +        
44341 +           if (sgmt->State == CM_SGMT_WAITING)
44342 +           {
44343 +               ASSERT (sidx != level->MySgmt);
44344 +               /* Do subordinate discovery.  Existing subordinates will
44345 +                * ignore it, but leader candidates will send IMCOMING.
44346 +                * This is always urgent since we'll assume a subtree is
44347 +                * absent if I don't get IMCOMING within the timeout.
44348 +                */
44349 +               SendToSgmt (cmRail, sgmt, CM_MSG_TYPE_DISCOVER_SUBORDINATE);
44350 +               urgent = 1;
44351 +           }
44352 +       }
44353 +       lvl--;
44354 +    }
44355 +   
44356 +    return (urgent);
44357 +}
44358 +
44359 +static void
44360 +CheckBroadcast (CM_RAIL *cmRail)
44361 +{
44362 +    int  clvl;
44363 +
44364 +    for (clvl = cmRail->NumLevels-1; clvl >= 0 && cmRail->Rail->SwitchBroadcastLevel < cmRail->Levels[clvl].SwitchLevel; clvl--)
44365 +       ;
44366 +
44367 +    if (cmRail->OfflineReasons || cmRail->Rail->System->Shutdown)
44368 +       clvl = -1;
44369 +
44370 +    /* if the level at which we can broadcast drops, then we must rejoin the
44371 +     * spanning tree at the highest level for which broadcast is good. */
44372 +    if (cmRail->BroadcastLevel > clvl && clvl < (int)(cmRail->Role == CM_ROLE_LEADER ? cmRail->TopLevel - 1 : cmRail->TopLevel))
44373 +    {
44374 +       printk ("%s: REJOINING at level %d because %s\n", cmRail->Rail->Name, clvl+1, 
44375 +               (cmRail->OfflineReasons & CM_OFFLINE_MANAGER) ? "of manager thread" :
44376 +               (cmRail->OfflineReasons & CM_OFFLINE_PROCFS)  ? "force offline"  : 
44377 +               cmRail->Rail->System->Shutdown ? "system shutdown" : "broadcast level changed");
44378 +       LowerTopLevel (cmRail, clvl+1);
44379 +    }
44380 +    
44381 +    if (cmRail->BroadcastLevel != clvl)
44382 +    {
44383 +       cmRail->BroadcastLevel     = clvl;
44384 +       cmRail->BroadcastLevelTick = lbolt;
44385 +    }
44386 +
44387 +    /* schedule the update thread, to withdraw from comms with 
44388 +     * nodes "outside" of the valid broadcastable range. */
44389 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
44390 +    {
44391 +       if (cmRail->BroadcastLevel < clvl)
44392 +       {
44393 +           if (AFTER (lbolt, cmRail->BroadcastLevelTick + EP_WITHDRAW_TIMEOUT) && 
44394 +               !(cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST))
44395 +           {
44396 +               printk ("%s: Withdraw at Level %d\n", cmRail->Rail->Name, clvl);
44397 +               cmRail->Levels[clvl].OfflineReasons |= CM_OFFLINE_BROADCAST;
44398 +           }
44399 +       }
44400 +       else
44401 +       {
44402 +           if (cmRail->Levels[clvl].OfflineReasons & CM_OFFLINE_BROADCAST)
44403 +           {
44404 +               printk ("%s: Rejoin at Level %d\n", cmRail->Rail->Name, clvl);
44405 +               cmRail->Levels[clvl].OfflineReasons &= ~CM_OFFLINE_BROADCAST;
44406 +           }
44407 +       }
44408 +    }
44409 +       
44410 +}
44411 +
44412 +static void
44413 +CheckManager (CM_RAIL *cmRail)
44414 +{
44415 +    long time,  state = ep_kthread_state (&cmRail->Rail->System->ManagerThread, &time);
44416 +
44417 +    if (state == KT_STATE_RUNNING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_RUNNING_TIMEOUT)))
44418 +       state = KT_STATE_SLEEPING;
44419 +    if (state != KT_STATE_SLEEPING && BEFORE (lbolt, time + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT)))
44420 +       state = KT_STATE_SLEEPING;
44421 +
44422 +    if ((cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state == KT_STATE_SLEEPING)
44423 +    {
44424 +       printk ("%s: manager thread unstuck\n", cmRail->Rail->Name);
44425 +
44426 +       cmRail->OfflineReasons &= ~CM_OFFLINE_MANAGER;
44427 +    }
44428 +
44429 +    if (!(cmRail->OfflineReasons & CM_OFFLINE_MANAGER) && state != KT_STATE_SLEEPING)
44430 +    {
44431 +       printk ("%s: manager thread stuck - %s\n", cmRail->Rail->Name,
44432 +               state == KT_STATE_SCHEDULED ? "scheduled" : 
44433 +               state == KT_STATE_RUNNING ? "running" : 
44434 +               state == KT_STATE_STALLED ? "stalled" : "unknown");
44435 +
44436 +       cmRail->OfflineReasons |= CM_OFFLINE_MANAGER;
44437 +    }
44438 +}
44439 +
44440 +static void
44441 +CheckOfflineReasons (CM_RAIL *cmRail, int clvl)
44442 +{
44443 +    int subClMin, subClMax, myClId;
44444 +    char clNodeStr[32];                                /* [%d-%d][%d-%d] */
44445 +
44446 +    if (cmRail->Levels[clvl].OfflineReasons)
44447 +    {
44448 +       if (cmRail->Levels[clvl].Online)
44449 +       {
44450 +           printk ("%s: Withdraw from %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl));
44451 +           
44452 +           RestartComms (cmRail, clvl);
44453 +       }
44454 +    }
44455 +    else
44456 +    {
44457 +       if (cmRail->Levels[clvl].Restarting && cmRail->Levels[clvl].Connected == 0)
44458 +       {
44459 +           printk ("%s: Rejoin with %s\n", cmRail->Rail->Name, sprintClPeers (clNodeStr, cmRail, clvl));
44460 +
44461 +           myClId = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
44462 +           
44463 +           ASSERT (statemap_getbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS, CM_GSTATUS_BITS) == 
44464 +                   (CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START | CM_GSTATUS_RESTART));
44465 +    
44466 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, myClId * CM_GSTATUS_BITS,
44467 +                             CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
44468 +
44469 +           cmRail->Levels[clvl].Restarting = 0;
44470 +       }
44471 +    }
44472 +}
44473 +
44474 +void
44475 +DoHeartbeatWork (CM_RAIL *cmRail)
44476 +{
44477 +    long now = lbolt;
44478 +    int  clvl;
44479 +
44480 +    if ((RejoinCheck || RejoinPanic) &&
44481 +       AFTER (now, cmRail->NextRunTime + MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT))) /* If I've been unresponsive for too long */
44482 +    {
44483 +       /* I'd better reconnect to the network because I've not been playing the game */
44484 +       CPRINTF4 (1, "%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now,  cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT));
44485 +       printk ("%s: REJOINING because I was too slow (heartbeat) [%ld,%ld,(%ld)]\n", cmRail->Rail->Name, now,  cmRail->NextRunTime, (long int)MSEC2TICKS (CM_TIMER_SCHEDULE_TIMEOUT));
44486 +       
44487 +       LowerTopLevel (cmRail, 0);
44488 +       
44489 +       IncrStat (cmRail, RejoinTooSlow);
44490 +       
44491 +       if (RejoinPanic)
44492 +           panic ("ep: REJOINING because I was too slow (heartbeat)\n");
44493 +    }
44494 +    
44495 +    PollInputQueues (cmRail);
44496 +    
44497 +    if (! BEFORE (now, cmRail->NextDiscoverTime))
44498 +    {
44499 +       if (BroadcastDiscover (cmRail))         /* urgent discovery required? */
44500 +           cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_URGENT_DISCOVER_INTERVAL);
44501 +       else
44502 +           cmRail->NextDiscoverTime = now + MSEC2TICKS (CM_PERIODIC_DISCOVER_INTERVAL);
44503 +       
44504 +       if (cmRail->Role == CM_ROLE_LEADER_CANDIDATE && AFTER (now, cmRail->DiscoverStartTick + MSEC2TICKS (CM_DISCOVER_TIMEOUT)))
44505 +           RaiseTopLevel (cmRail);
44506 +    }
44507 +    
44508 +    if (! BEFORE (now, cmRail->NextHeartbeatTime))
44509 +    {
44510 +       CheckPosition (cmRail->Rail);
44511 +       CheckPeerPulses (cmRail);
44512 +       CheckBroadcast (cmRail);
44513 +       CheckManager (cmRail);
44514 +       
44515 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
44516 +       {
44517 +           CheckOfflineReasons (cmRail, clvl);
44518 +           ReduceGlobalMap (cmRail, clvl);
44519 +           BroadcastGlobalMap (cmRail, clvl);
44520 +       }
44521 +       
44522 +       SendHeartbeats (cmRail);
44523 +       
44524 +       /* Compute the next heartbeat time, but "drift" it towards the last
44525 +        * periodic discovery time we saw from the whole machine leader */
44526 +       cmRail->NextHeartbeatTime = now + MSEC2TICKS (CM_HEARTBEAT_INTERVAL);
44527 +    }
44528 +
44529 +    if (AFTER (cmRail->NextHeartbeatTime, cmRail->NextDiscoverTime))
44530 +       cmRail->NextRunTime = cmRail->NextDiscoverTime;
44531 +    else 
44532 +       cmRail->NextRunTime = cmRail->NextHeartbeatTime;
44533 +}
44534 +
44535 +#define CM_SVC_INDICATOR_OFFSET(CMRAIL,CLVL,IND,NODEID)     ( ( CMRAIL->Levels[CLVL].NumNodes * CM_GSTATUS_BITS ) \
44536 +                                                               + ( CMRAIL->Levels[CLVL].NumNodes * IND ) \
44537 +                                                               + ( NODEID - CMRAIL->Levels[CLVL].MinNodeId ) )
44538 +int
44539 +cm_svc_indicator_set (EP_RAIL *rail, int svc_indicator)
44540 +{
44541 +    CM_RAIL *cmRail = rail->ClusterRail;
44542 +    unsigned long flags;
44543 +    int           clvl;
44544 +
44545 +    EPRINTF2 (DBG_SVC,"cm_svc_indicator_set: rail %p ind %d\n", rail, svc_indicator);
44546 +
44547 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
44548 +    {
44549 +       EPRINTF1 (DBG_SVC,"cm_svc_indicator_set: service indicator %d not registered\n", svc_indicator);
44550 +       return (-1);
44551 +    }
44552 +
44553 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
44554 +       return (-2);
44555 +    
44556 +    spin_lock_irqsave (&cmRail->Lock, flags);
44557 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)  {
44558 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 1, 1); 
44559 +       EPRINTF3 (DBG_SVC,"cm_svc_indicator_set: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId));
44560 +    }
44561 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44562 +
44563 +    return (0);
44564 +}
44565 +
44566 +int
44567 +cm_svc_indicator_clear (EP_RAIL *rail, int svc_indicator)
44568 +{
44569 +    CM_RAIL *cmRail = rail->ClusterRail;
44570 +    unsigned long flags;
44571 +    int           clvl;
44572 +
44573 +    EPRINTF2 (DBG_SVC, "cm_svc_indicator_clear: rail %p ind %d\n", rail, svc_indicator);
44574 +
44575 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
44576 +    {
44577 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_clear: service indicator %d not registered\n", svc_indicator);
44578 +       return (-1);
44579 +    }
44580 +
44581 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
44582 +       return (-2);
44583 +
44584 +    spin_lock_irqsave (&cmRail->Lock, flags);
44585 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)  {
44586 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId), 0, 1); 
44587 +       EPRINTF3 (DBG_SVC, "cm_svc_indicator_clear: clvl %d nodeId %d offset %d\n", clvl, cmRail->NodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, cmRail->NodeId));
44588 +    }
44589 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44590 +
44591 +    return (0);
44592 +}
44593 +
44594 +int
44595 +cm_svc_indicator_is_set (EP_RAIL *rail, int svc_indicator, int nodeId)
44596 +{
44597 +    CM_RAIL *cmRail = rail->ClusterRail;
44598 +    unsigned long flags;
44599 +    int           clvl;
44600 +    bitmap_t      bits;
44601 +
44602 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: rail %p ind %d nodeId %d (me=%d)\n", rail, svc_indicator, nodeId, cmRail->NodeId);
44603 +
44604 +    if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS)
44605 +    {
44606 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: service indicator %d not registered\n", svc_indicator);
44607 +       return (0);
44608 +    }
44609 +
44610 +    if (rail->State == EP_RAIL_STATE_UNINITIALISED) 
44611 +       return (0);
44612 +    
44613 +    spin_lock_irqsave (&cmRail->Lock, flags);
44614 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
44615 +       if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
44616 +           break;
44617 +
44618 +    if ( clvl == cmRail->NumLevels) { 
44619 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_is_set: node out of range %d \n", nodeId); 
44620 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
44621 +       return (0);
44622 +    }
44623 +
44624 +    if ( cmRail->NodeId == nodeId ) 
44625 +       bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
44626 +    else
44627 +       bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
44628 +
44629 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_is_set: clvl %d nodeId %d offset %d %x\n", clvl, nodeId, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), bits);
44630 +
44631 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44632 +
44633 +    return  ( (bits == 0) ? (0) : (1) );
44634 +}
44635 +
44636 +int
44637 +cm_svc_indicator_bitmap (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes)
44638 +{
44639 +    /* or in the bit map */  
44640 +    CM_RAIL      *cmRail = rail->ClusterRail;
44641 +    int           nodeId, clvl;
44642 +    bitmap_t      bits;
44643 +    unsigned long flags;
44644 +    int           clip_out_low, clip_out_high;
44645 +    int           curr_low, curr_high;
44646 +    int           check_low, check_high;
44647 +
44648 +    EPRINTF4 (DBG_SVC, "cm_svc_indicator_bitmap: rail %p ind %d low %d high %d\n", rail, svc_indicator, low, (low + nnodes));
44649 +
44650 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
44651 +    {
44652 +       EPRINTF1 (DBG_SVC, "cm_svc_indicator_bitmap: service indicator %d not registered\n", svc_indicator);
44653 +       return (-1);
44654 +    }
44655 +
44656 +    if (rail->State != EP_RAIL_STATE_RUNNING) 
44657 +       return (-2);
44658 +
44659 +    spin_lock_irqsave (&cmRail->Lock, flags);
44660 +    
44661 +    clip_out_low = clip_out_high = -1; /* all in */
44662 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++) {
44663 +
44664 +       /* curr_high/low is the range of the current lvl */
44665 +       curr_low  = cmRail->Levels[clvl].MinNodeId;
44666 +       curr_high = cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes;
44667 +
44668 +       /* find out how much of low high is in this range and only check that part */
44669 +       check_low  = ( low  < curr_low)  ? curr_low  : low; 
44670 +       check_high = ( (low + nnodes) > curr_high) ? curr_high : (low + nnodes);
44671 +
44672 +       EPRINTF6 (DBG_SVC, "cm_svc_indicator_bitmap: curr(%d,%d) check(%d,%d) clip(%d,%d)\n", curr_low, curr_high, check_low, check_high, clip_out_low, clip_out_high);
44673 +
44674 +       for(nodeId = check_low; nodeId < check_high; nodeId++) {
44675 +
44676 +           if (  (clip_out_low <= nodeId) && (nodeId <= clip_out_high))
44677 +               nodeId = clip_out_high; /* step over the cliped out section */
44678 +           else {
44679 +
44680 +               if ( cmRail->NodeId == nodeId ) 
44681 +                   bits = statemap_getbits (cmRail->Levels[clvl].LocalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
44682 +               else
44683 +                   bits = statemap_getbits (cmRail->Levels[clvl].GlobalMap, CM_SVC_INDICATOR_OFFSET (cmRail, clvl, svc_indicator, nodeId), 1);
44684 +               
44685 +               if ( bits ) {
44686 +                   EPRINTF2 (DBG_SVC, "cm_svc_indicator_bitmap: its set nodeId %d (clvl %d)\n", nodeId, clvl);
44687 +                   BT_SET ( bitmap , nodeId - low );
44688 +               }
44689 +           }
44690 +       }
44691 +
44692 +       /* widen the clip out range */
44693 +       clip_out_low  = curr_low;
44694 +       clip_out_high = curr_high -1; 
44695 +    }
44696 +
44697 +    spin_unlock_irqrestore (&cmRail->Lock, flags);      
44698 +
44699 +    return (0);
44700 +}
44701 +
44702 +
44703 +static void
44704 +cm_heartbeat_timer (unsigned long arg)
44705 +{
44706 +    CM_RAIL *cmRail = (CM_RAIL *) arg;
44707 +    unsigned long flags;
44708 +
44709 +    spin_lock_irqsave (&cmRail->Lock, flags);
44710 +
44711 +    ASSERT (cmRail->Rail->State == EP_RAIL_STATE_RUNNING);
44712 +
44713 +    DoHeartbeatWork (cmRail);
44714 +    
44715 +    __Schedule_Timer (cmRail, cmRail->NextRunTime);
44716 +
44717 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44718 +}
44719 +
44720 +void
44721 +DisplayRailDo (DisplayInfo *di, EP_RAIL *rail)
44722 +{
44723 +    CM_RAIL *cmRail = rail->ClusterRail;
44724 +    unsigned long flags;
44725 +    int  i, j;
44726 +
44727 +    if (rail->State != EP_RAIL_STATE_RUNNING)
44728 +       return;
44729 +
44730 +    spin_lock_irqsave (&cmRail->Lock, flags);
44731 +
44732 +    (di->func)(di->arg, "NodeId=%d NodeLevel=%d NumLevels=%d NumNodes=%d\n", 
44733 +           cmRail->NodeId, cmRail->TopLevel, cmRail->NumLevels, cmRail->Rail->Position.pos_nodes);
44734 +    
44735 +    (di->func)(di->arg, "[");
44736 +
44737 +    for (i = 0; i < cmRail->NumLevels; i++)
44738 +    {
44739 +       if (i > 0)
44740 +           (di->func)(di->arg, ",");
44741 +       
44742 +       if (i < cmRail->TopLevel)
44743 +       {
44744 +           (di->func)(di->arg, "L ");
44745 +         
44746 +           for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
44747 +               switch (cmRail->Levels[i].Sgmts[j].State)
44748 +               {
44749 +               case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[j].NodeId); break;
44750 +               case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break;
44751 +               case CM_SGMT_COMING:  (di->func)(di->arg, "c%4s", ""); break;
44752 +               case CM_SGMT_ABSENT:  (di->func)(di->arg, ".%4s", ""); break;
44753 +               default:              (di->func)(di->arg, "?%4s", ""); break;
44754 +               }
44755 +       }
44756 +       else
44757 +           switch (cmRail->Role)
44758 +           {
44759 +           case CM_ROLE_LEADER_CANDIDATE:      
44760 +               (di->func)(di->arg,"l "); 
44761 +               for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
44762 +                   (di->func)(di->arg,"     ");
44763 +               break;
44764 +         
44765 +           case CM_ROLE_SUBORDINATE:       
44766 +               switch (cmRail->Levels[i].Sgmts[0].State)
44767 +               {
44768 +               case CM_SGMT_PRESENT: (di->func)(di->arg, "p%-4d", cmRail->Levels[i].Sgmts[0].NodeId); break;
44769 +               case CM_SGMT_WAITING: (di->func)(di->arg, "w%4s", ""); break;
44770 +               case CM_SGMT_COMING:  (di->func)(di->arg, "c%4s", ""); break;
44771 +               case CM_SGMT_ABSENT:  (di->func)(di->arg, ".%4s", ""); break;
44772 +               default:              (di->func)(di->arg, "?%4s", ""); break;
44773 +               }
44774 +               for (j = 1; j < cmRail->Levels[i].NumSegs; j++)
44775 +                   (di->func)(di->arg, "     ");
44776 +               break;
44777 +         
44778 +           default:
44779 +               (di->func)(di->arg, "####");
44780 +               break;
44781 +           }
44782 +    }
44783 +    (di->func)(di->arg, "]\n");
44784 +
44785 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44786 +}
44787 +
44788 +void
44789 +DisplayRail (EP_RAIL *rail) 
44790 +{
44791 +    if (rail->State == EP_RAIL_STATE_RUNNING)
44792 +       DisplayRailDo (&di_ep_debug, rail);
44793 +}
44794 +
44795 +void
44796 +DisplayStatus (EP_RAIL *rail)
44797 +{
44798 +    if (rail->State == EP_RAIL_STATE_RUNNING)
44799 +    {
44800 +       CM_RAIL *cmRail = rail->ClusterRail;
44801 +       unsigned long flags;
44802 +       
44803 +       spin_lock_irqsave (&cmRail->Lock, flags);
44804 +       
44805 +       DisplayNodeMaps (&di_ep_debug, cmRail);
44806 +       
44807 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
44808 +    }
44809 +}
44810 +
44811 +void
44812 +DisplaySegs (EP_RAIL *rail)
44813 +{
44814 +    if (rail->State == EP_RAIL_STATE_RUNNING)
44815 +    {
44816 +       CM_RAIL *cmRail = rail->ClusterRail;
44817 +       unsigned long flags;
44818 +       
44819 +       spin_lock_irqsave (&cmRail->Lock, flags);
44820 +       
44821 +       DisplayNodeSgmts (&di_ep_debug, cmRail);
44822 +       
44823 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
44824 +    }
44825 +}
44826 +
44827 +static void
44828 +LoadBroadcastRoute (CM_RAIL *cmRail, int lvl, int sidx)
44829 +{
44830 +    EP_RAIL *rail  = cmRail->Rail;
44831 +    int      nsegs = cmRail->Levels[0].NumSegs;
44832 +    int      vp    = EP_VP_BCAST(lvl, sidx);
44833 +    int      nodes = 1;
44834 +    int      baseNode;
44835 +    int      i;
44836 +
44837 +    ASSERT (lvl > 0 && lvl <= cmRail->NumLevels);
44838 +    ASSERT (sidx == 0 || lvl < cmRail->NumLevels);
44839 +
44840 +    ASSERT (vp >= EP_VP_BCAST_BASE && vp < EP_VP_BCAST_BASE + EP_VP_BCAST_COUNT);
44841 +
44842 +    for (i = 1; i <= lvl; i++)
44843 +    {
44844 +       nodes *= nsegs;
44845 +       nsegs = (i == cmRail->NumLevels) ? 1 : cmRail->Levels[i].NumSegs;
44846 +    }
44847 +
44848 +    baseNode = ((cmRail->NodeId / (nodes * nsegs)) * nsegs + sidx) * nodes;
44849 +
44850 +    CPRINTF5 (2, "%s: broadcast vp lvl %d sidx %d [%d,%d]\n", 
44851 +             cmRail->Rail->Name, lvl, sidx, baseNode, baseNode + nodes - 1);
44852 +    
44853 +    rail->Operations.LoadSystemRoute (rail, vp, baseNode, baseNode + nodes - 1);
44854 +}
44855 +
44856 +static void
44857 +LoadRouteTable (CM_RAIL *cmRail)
44858 +{
44859 +    EP_RAIL *rail = cmRail->Rail;
44860 +    int      i, j;
44861 +   
44862 +   if (cmRail->NumNodes > EP_MAX_NODES)
44863 +   {
44864 +       printk ("More nodes (%d) than point-to-point virtual process table entries (%d)\n", cmRail->NumNodes, EP_MAX_NODES);
44865 +       panic ("LoadRouteTable\n");
44866 +   }
44867 +
44868 +   for (i = 0; i < cmRail->NumNodes; i++)
44869 +       rail->Operations.LoadSystemRoute (rail, EP_VP_NODE(i), i, i);
44870 +
44871 +   /* Generate broadcast routes for subtrees */
44872 +   for (i = 1; i < cmRail->NumLevels; i++)
44873 +      for (j = 0; j < cmRail->Levels[i].NumSegs; j++)
44874 +         LoadBroadcastRoute (cmRail, i, j);
44875 +
44876 +   /* Generate broadcast route for whole machine */
44877 +   LoadBroadcastRoute (cmRail, cmRail->NumLevels, 0);
44878 +
44879 +   /* Finally invalidate all the data routes */
44880 +   for (i = 0; i < cmRail->NumNodes; i++)
44881 +       rail->Operations.UnloadNodeRoute (cmRail->Rail, i);
44882 +}
44883 +
44884 +void
44885 +cm_node_disconnected (EP_RAIL *rail, unsigned nodeId)
44886 +{
44887 +    CM_RAIL *cmRail = rail->ClusterRail;
44888 +    int      base, lstat, lgstat;
44889 +    int             clvl, subClMin, subClMax;
44890 +    int      thisClId, myClId;
44891 +    unsigned long flags;
44892 +
44893 +    ASSERT (nodeId != cmRail->NodeId);
44894 +
44895 +    spin_lock_irqsave (&cmRail->Lock, flags);
44896 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
44897 +       if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
44898 +           break;
44899 +
44900 +    myClId   = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
44901 +    thisClId = nodeId - cmRail->Levels[clvl].MinNodeId;
44902 +    base     = thisClId * CM_GSTATUS_BITS;
44903 +    lstat    = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
44904 +    lgstat   = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap, base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
44905 +
44906 +    ASSERT ((lstat & CM_GSTATUS_ACK_MASK) == CM_GSTATUS_MAY_RUN);
44907 +
44908 +    CPRINTF7 (2, "%s: cm_node_disconnected: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s -> %sMAY_START\n",
44909 +             cmRail->Rail->Name, nodeId, clvl,
44910 +             GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
44911 +             GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
44912 +             GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId),
44913 +             ((lgstat != CM_GSTATUS_CLOSING) && (lstat & CM_GSTATUS_RESTART)) ? "RESTART|" : "");
44914 +    
44915 +    switch (lgstat)
44916 +    {
44917 +    case CM_GSTATUS_CLOSING:
44918 +       /* delayed ack of closing - set MAY_START and clear RESTART */
44919 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, base, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
44920 +       break;
44921 +    case CM_GSTATUS_STARTING:
44922 +    case CM_GSTATUS_RUNNING:
44923 +       IASSERT (! cmRail->Levels[clvl].Online || lstat & CM_GSTATUS_RESTART);
44924 +       break;
44925 +    case CM_GSTATUS_ABSENT:
44926 +       IASSERT (lstat & CM_GSTATUS_RESTART);
44927 +    }
44928 +
44929 +    cmRail->Levels[clvl].Connected--;
44930 +
44931 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44932 +}
44933 +
44934 +void
44935 +cm_restart_node (EP_RAIL *rail, unsigned nodeId)
44936 +{
44937 +    CM_RAIL *cmRail = rail->ClusterRail;
44938 +    int      base, lstat, lgstat;
44939 +    int             clvl, subClMin, subClMax;
44940 +    int      thisClId, myClId;
44941 +    unsigned long flags;
44942 +
44943 +    spin_lock_irqsave (&cmRail->Lock, flags);
44944 +    if (nodeId == rail->Position.pos_nodeid)
44945 +    {
44946 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
44947 +           RestartComms (cmRail, clvl);
44948 +    }
44949 +    else
44950 +    {
44951 +       for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
44952 +           if (nodeId >= cmRail->Levels[clvl].MinNodeId && nodeId < (cmRail->Levels[clvl].MinNodeId + cmRail->Levels[clvl].NumNodes))
44953 +               break;
44954 +       
44955 +       myClId   = ClusterIds (cmRail, clvl, &subClMin, &subClMax);
44956 +       thisClId = nodeId - cmRail->Levels[clvl].MinNodeId;
44957 +       base     = thisClId * CM_GSTATUS_BITS;
44958 +       lstat    = statemap_getbits (cmRail->Levels[clvl].LocalMap,  base, CM_GSTATUS_BITS);
44959 +       lgstat   = statemap_getbits (cmRail->Levels[clvl].LastGlobalMap,  base, CM_GSTATUS_BITS) & CM_GSTATUS_STATUS_MASK;
44960 +
44961 +       CPRINTF6 (2, "%s: cm_restart_node: Node %d: clvl %d, lgstat %s, gstat %s, lstat %s\n",
44962 +                 cmRail->Rail->Name, nodeId, clvl,
44963 +                 GlobalStatusString (cmRail->Levels[clvl].LastGlobalMap, thisClId),
44964 +                 GlobalStatusString (cmRail->Levels[clvl].GlobalMap, thisClId),
44965 +                 GlobalStatusString (cmRail->Levels[clvl].LocalMap, thisClId));
44966 +       
44967 +       if (lgstat != CM_GSTATUS_CLOSING)
44968 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, base, lstat | CM_GSTATUS_RESTART, CM_GSTATUS_BITS);
44969 +    }
44970 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44971 +}
44972 +
44973 +void
44974 +cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason)
44975 +{
44976 +    CM_RAIL *cmRail = rail->ClusterRail;
44977 +    unsigned long flags;
44978 +
44979 +    spin_lock_irqsave (&cmRail->Lock, flags);
44980 +    if (offline)
44981 +       cmRail->OfflineReasons |= reason;
44982 +    else
44983 +       cmRail->OfflineReasons &= ~reason;
44984 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
44985 +}
44986 +
44987 +static void
44988 +cm_remove_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail)
44989 +{
44990 +    CM_SUBSYS  *sys    = (CM_SUBSYS *) subsys;
44991 +    CM_RAIL    *cmRail = sys->Rails[rail->Number];
44992 +    int i, lvl, clvl;
44993 +
44994 +    cm_procfs_rail_fini (cmRail);
44995 +
44996 +    sys->Rails[rail->Number] = NULL;
44997 +    rail->ClusterRail        = NULL;
44998 +
44999 +    del_timer_sync (&cmRail->HeartbeatTimer);
45000 +
45001 +    cmRail->NextRunTime      = 0;
45002 +    cmRail->NextDiscoverTime = 0;
45003 +    cmRail->NextHeartbeatTime = 0;
45004 +    
45005 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
45006 +    {
45007 +       for (lvl = 0; lvl <= clvl; lvl++)
45008 +       {
45009 +           CM_LEVEL *level = &cmRail->Levels[lvl];
45010 +           
45011 +           statemap_destroy (level->SubordinateMap[clvl]);
45012 +           
45013 +           for (i = 0; i < level->NumSegs; i++)
45014 +           {
45015 +               statemap_destroy (level->Sgmts[i].Maps[clvl].CurrentInputMap);
45016 +               statemap_destroy (level->Sgmts[i].Maps[clvl].InputMap);
45017 +               statemap_destroy (level->Sgmts[i].Maps[clvl].OutputMap);
45018 +           }
45019 +       }
45020 +       
45021 +       cmRail->Levels[clvl].Online = 0;
45022 +       
45023 +       statemap_destroy (cmRail->Levels[clvl].TmpMap);
45024 +       statemap_destroy (cmRail->Levels[clvl].GlobalMap);
45025 +       statemap_destroy (cmRail->Levels[clvl].LastGlobalMap);
45026 +       statemap_destroy (cmRail->Levels[clvl].SubTreeMap);
45027 +       statemap_destroy (cmRail->Levels[clvl].LocalMap);
45028 +    }
45029 +    
45030 +    spin_lock_destroy (&cmRail->Lock);
45031 +
45032 +    ep_free_inputq (cmRail->Rail, cmRail->PolledQueue);
45033 +    ep_free_inputq (cmRail->Rail, cmRail->IntrQueue);
45034 +    ep_free_outputq (cmRail->Rail, cmRail->MsgQueue);
45035 +
45036 +    KMEM_FREE (cmRail, sizeof (CM_RAIL));
45037 +}
45038 +
45039 +static int
45040 +cm_add_rail (EP_SUBSYS *subsys, EP_SYS *epsys, EP_RAIL *rail)
45041 +{
45042 +    CM_SUBSYS     *sys = (CM_SUBSYS *) subsys;
45043 +    ELAN_POSITION *pos = &rail->Position;
45044 +    CM_RAIL       *cmRail;
45045 +    int lvl, n, nn, clvl, span, i;
45046 +    unsigned long flags;
45047 +
45048 +    KMEM_ZALLOC (cmRail, CM_RAIL *, sizeof (CM_RAIL), 1);
45049 +
45050 +    if (cmRail == NULL)
45051 +       return (ENOMEM);
45052 +    
45053 +    cmRail->Rail     = rail;
45054 +    cmRail->NodeId   = pos->pos_nodeid;
45055 +    cmRail->NumNodes = pos->pos_nodes;
45056 +
45057 +    spin_lock_init (&cmRail->Lock);
45058 +
45059 +    if ((cmRail->IntrQueue   = ep_alloc_inputq (rail, EP_SYSTEMQ_INTR,   sizeof (CM_MSG), CM_INPUTQ_ENTRIES, IntrQueueCallback, cmRail)) == NULL ||
45060 +       (cmRail->PolledQueue = ep_alloc_inputq (rail, EP_SYSTEMQ_POLLED, sizeof (CM_MSG), CM_INPUTQ_ENTRIES, NULL, 0)) == NULL ||
45061 +       (cmRail->MsgQueue    = ep_alloc_outputq (rail, sizeof (CM_MSG), CM_NUM_MSG_BUFFERS)) == NULL)
45062 +    {
45063 +       goto failed;
45064 +    }
45065 +
45066 +    /* point to first "spare" message buffer */
45067 +    cmRail->NextSpareMsg = 0;
45068 +
45069 +    /* Compute the branching ratios from the switcy arity */
45070 +    for (lvl = 0; lvl < CM_MAX_LEVELS; lvl++)
45071 +       BranchingRatios[lvl] = (lvl < pos->pos_levels) ? pos->pos_arity[pos->pos_levels - lvl - 1] : 4;
45072 +    
45073 +    /* now determine the number of levels of hierachy we have */
45074 +    /* and how many nodes per level there are */
45075 +    for (lvl = 0, nn = 1, n = pos->pos_nodes; 
45076 +        n > 1; 
45077 +        nn *= BranchingRatios[lvl], n = n / BranchingRatios[lvl], lvl++)
45078 +    {
45079 +       int       nSegs = (n > BranchingRatios[lvl]) ? BranchingRatios[lvl] : n;
45080 +       int       nNodes = nn * nSegs;
45081 +       CM_LEVEL *level = &cmRail->Levels[lvl];
45082 +
45083 +       for (clvl = 0, span = pos->pos_arity[pos->pos_levels - clvl - 1]; 
45084 +            span < nNodes && clvl < pos->pos_levels - 1;
45085 +            clvl++, span *= pos->pos_arity[pos->pos_levels - clvl - 1])
45086 +           ;
45087 +       
45088 +       level->SwitchLevel = clvl;
45089 +       level->MinNodeId = (pos->pos_nodeid / nNodes) * nNodes;
45090 +       level->NumNodes = nNodes;
45091 +       level->NumSegs = nSegs;
45092 +    }
45093 +    
45094 +    cmRail->NumLevels      = lvl;
45095 +    cmRail->BroadcastLevel = lvl-1;
45096 +
45097 +    CPRINTF4 (2, "%s: NodeId=%d NumNodes=%d NumLevels=%d\n", 
45098 +             rail->Name, pos->pos_nodeid, pos->pos_nodes, cmRail->NumLevels);
45099 +
45100 +    LoadRouteTable (cmRail);
45101 +    
45102 +    /* Init SGMT constants */
45103 +    for (lvl = 0; lvl < cmRail->NumLevels; lvl++)
45104 +    {
45105 +       CM_LEVEL *level = &cmRail->Levels[lvl];
45106 +
45107 +       level->MySgmt = SegmentNo (cmRail, cmRail->NodeId, lvl);
45108 +       
45109 +       for (i = 0; i < CM_SGMTS_PER_LEVEL; i++)
45110 +       {
45111 +           CM_SGMT *sgmt = &level->Sgmts[i];
45112 +         
45113 +           sgmt->MsgNumber = lvl * CM_SGMTS_PER_LEVEL + i;
45114 +           sgmt->Level = lvl;
45115 +           sgmt->Sgmt = i;
45116 +       }
45117 +    }
45118 +
45119 +    /* Init maps for each cluster level */
45120 +    for (clvl = 0; clvl < cmRail->NumLevels; clvl++)
45121 +    {
45122 +       int nNodes = cmRail->Levels[clvl].NumNodes;
45123 +       int mapBits = (nNodes * CM_GSTATUS_BITS) + (nNodes * EP_SVC_NUM_INDICATORS);
45124 +       int clmin;
45125 +       int clmax;
45126 +       int clid = ClusterIds (cmRail, clvl, &clmin, &clmax);
45127 +
45128 +       for (lvl = 0; lvl <= clvl; lvl++)
45129 +       {
45130 +           CM_LEVEL *level = &cmRail->Levels[lvl];
45131 +
45132 +           level->SubordinateMap[clvl] = statemap_create (mapBits);
45133 +
45134 +           for (i = 0; i < level->NumSegs; i++)
45135 +           {
45136 +               level->Sgmts[i].Maps[clvl].CurrentInputMap = statemap_create (mapBits);
45137 +               level->Sgmts[i].Maps[clvl].InputMap        = statemap_create (mapBits);
45138 +               level->Sgmts[i].Maps[clvl].OutputMap       = statemap_create (mapBits);
45139 +           }
45140 +       }
45141 +       
45142 +       cmRail->Levels[clvl].Online = 0;
45143 +
45144 +       cmRail->Levels[clvl].TmpMap        = statemap_create (mapBits);
45145 +       cmRail->Levels[clvl].GlobalMap     = statemap_create (mapBits);
45146 +       cmRail->Levels[clvl].LastGlobalMap = statemap_create (mapBits);
45147 +       cmRail->Levels[clvl].SubTreeMap    = statemap_create (mapBits);
45148 +       cmRail->Levels[clvl].LocalMap      = statemap_create (mapBits);
45149 +
45150 +       /* Flag everyone outside my next lower cluster as sensed offline... */
45151 +       for (i = 0; i < clmin; i++)
45152 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
45153 +       
45154 +       for (i = clmax + 1; i < nNodes; i++)
45155 +           statemap_setbits (cmRail->Levels[clvl].LocalMap, i * CM_GSTATUS_BITS, CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
45156 +       
45157 +       /* ...and set my own state */
45158 +       statemap_setbits (cmRail->Levels[clvl].LocalMap, clid * CM_GSTATUS_BITS,
45159 +                         CM_GSTATUS_CLOSING | CM_GSTATUS_MAY_START, CM_GSTATUS_BITS);
45160 +    }
45161 +    
45162 +    /* compute parameter hash to add to messages */
45163 +    cmRail->ParamHash = EP_PROTOCOL_VERSION;
45164 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_PERIODIC_DISCOVER_INTERVAL;
45165 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_URGENT_DISCOVER_INTERVAL;
45166 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_INTERVAL;
45167 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_DMA_RETRIES;
45168 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_P2P_MSG_RETRIES;
45169 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_BCAST_MSG_RETRIES;
45170 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_TIMER_SCHEDULE_TIMEOUT;
45171 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_HEARTBEAT_TIMEOUT;
45172 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_DISCOVER_TIMEOUT;
45173 +    cmRail->ParamHash = cmRail->ParamHash * 127 + BT_NBIPUL;
45174 +    cmRail->ParamHash = cmRail->ParamHash * 127 + CM_GSTATUS_BITS;
45175 +    cmRail->ParamHash = cmRail->ParamHash * 127 + EP_SVC_NUM_INDICATORS;
45176 +    cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumLevels;
45177 +    cmRail->ParamHash = cmRail->ParamHash * 127 + cmRail->NumNodes;
45178 +    for (i = 0; i < cmRail->NumLevels; i++)
45179 +       cmRail->ParamHash = cmRail->ParamHash * 127 + BranchingRatios[i];
45180 +    
45181 +    spin_lock_irqsave (&cmRail->Lock, flags);
45182 +
45183 +    /* Initialise the timer, but don't add it yet, since
45184 +     * __Schedule_Heartbeat() will do this. */
45185 +
45186 +    init_timer (&cmRail->HeartbeatTimer);
45187 +    
45188 +    cmRail->HeartbeatTimer.function = cm_heartbeat_timer;
45189 +    cmRail->HeartbeatTimer.data     = (unsigned long) cmRail;
45190 +    cmRail->HeartbeatTimer.expires  = lbolt + hz;
45191 +
45192 +    /* Indicate that heartbeats should be sent 
45193 +     * as soon as the timer is run from inside 
45194 +     * LowerTopLevel */
45195 +    cmRail->NextHeartbeatTime = lbolt;
45196 +    
45197 +    /* start discovering who else is out there */
45198 +    LowerTopLevel (cmRail, 0);
45199 +
45200 +    /* connect to myself straight away - I know I'm here */
45201 +    ep_connect_node (rail, cmRail->NodeId);
45202 +    
45203 +    /* add to all rails */
45204 +    sys->Rails[rail->Number] = cmRail;
45205 +    rail->ClusterRail = (void *) cmRail;
45206 +
45207 +    spin_unlock_irqrestore (&cmRail->Lock, flags);
45208 +
45209 +    /* Enable the input queues */
45210 +    ep_enable_inputq (rail, cmRail->PolledQueue);
45211 +    ep_enable_inputq (rail, cmRail->IntrQueue);
45212 +
45213 +    /* Create the procfs entries */
45214 +    cm_procfs_rail_init (cmRail);
45215 +
45216 +    return 0;
45217 +
45218 + failed:
45219 +    cm_remove_rail (subsys, epsys, rail);
45220 +    return -ENOMEM;
45221 +}
45222 +
45223 +static void
45224 +cm_fini (EP_SUBSYS *subsys, EP_SYS *epsys)
45225 +{
45226 +    CM_SUBSYS *sys = (CM_SUBSYS *) subsys;
45227 +
45228 +    cm_procfs_fini(sys);
45229 +    
45230 +    KMEM_FREE (sys, sizeof (CM_SUBSYS));
45231 +}
45232 +
45233 +int
45234 +cm_init (EP_SYS *sys)
45235 +{
45236 +    CM_SUBSYS *subsys;
45237 +
45238 +    KMEM_ZALLOC (subsys, CM_SUBSYS *, sizeof (CM_SUBSYS), 1);
45239 +
45240 +    if (subsys == NULL)
45241 +       return (ENOMEM);
45242 +
45243 +    subsys->Subsys.Sys        = sys;
45244 +    subsys->Subsys.Name              = "cm";
45245 +    subsys->Subsys.Destroy    = cm_fini;
45246 +    subsys->Subsys.AddRail    = cm_add_rail;
45247 +    subsys->Subsys.RemoveRail = cm_remove_rail;
45248 +
45249 +    ep_subsys_add (sys, &subsys->Subsys);
45250 +
45251 +    cm_procfs_init (subsys);
45252 +
45253 +    /*
45254 +     * Initialise the machineid if it wasn't specified by
45255 +     * the modules.conf file - otherwise truncate it to 
45256 +     * 16 bits.
45257 +     */
45258 +    if (MachineId != -1)
45259 +       MachineId = (uint16_t) MachineId;
45260 +    else
45261 +    {
45262 +#if defined(LINUX_ALPHA)
45263 +       MachineId = (uint16_t)((5 << 12) | HZ);
45264 +#elif defined(LINUX_SPARC)
45265 +       MachineId = (uint16_t)((4 << 12) | HZ);
45266 +#elif defined(LINUX_I386)
45267 +       MachineId = (uint16_t)((3 << 12) | HZ);
45268 +#elif defined( LINUX_IA64)
45269 +       MachineId = (uint16_t)((2 << 12) | HZ);
45270 +#elif defined(LINUX_X86_64)
45271 +       MachineId = (uint16_t)((1 << 12) | HZ);
45272 +#else
45273 +       MachineId = (uint16_t)((0 << 12) | HZ);
45274 +#endif
45275 +    }
45276 +
45277 +    return (0);
45278 +}
45279 +
45280 +/*
45281 + * Local variables:
45282 + * c-file-style: "stroustrup"
45283 + * End:
45284 + */
45285 diff -urN clean/drivers/net/qsnet/ep/cm.h linux-2.6.9/drivers/net/qsnet/ep/cm.h
45286 --- clean/drivers/net/qsnet/ep/cm.h     1969-12-31 19:00:00.000000000 -0500
45287 +++ linux-2.6.9/drivers/net/qsnet/ep/cm.h       2005-03-30 09:06:34.000000000 -0500
45288 @@ -0,0 +1,396 @@
45289 +/*
45290 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45291 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45292 + *
45293 + *    For licensing information please see the supplied COPYING file
45294 + *
45295 + */
45296 +
45297 +#ifndef __ELAN_CM_H
45298 +#define __ELAN_CM_H
45299 +
45300 +#ident "@(#)$Id: cm.h,v 1.16 2005/03/30 14:06:34 mike Exp $"
45301 +/*      $Source: /cvs/master/quadrics/epmod/cm.h,v $*/
45302 +
45303 +#include <elan/statemap.h>
45304 +
45305 +#if defined(DIGITAL_UNIX)
45306 +/*
45307 + * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible
45308 + * for handling all PCI interrupts and "funneled" operations.  When a kernel thread
45309 + * is made runnable, the scheduler will choose which cpu it will run on at that time,
45310 + * and will only execute a higher priority thread from another cpu's run queue when 
45311 + * it becomes totally idle (apparently also including user processes).  Also the 
45312 + * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed
45313 + * at "preemptable" places - so again have no guarantee on when they will execute if
45314 + * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64
45315 + * is incapable of scheduling a high priority kernel  thread within a deterministic time
45316 + * of when it should have become runnable - wonderfull.
45317 + *
45318 + * Hence the solution Compaq have proposed it to schedule a timeout onto all of the
45319 + * cpu's timeouts lists at the maximum frequency that we could want to execute code,
45320 + * then to handle the scheduling of work between these ourselves.  With a bit of luck
45321 + * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance
45322 + * to do our important work.
45323 + *
45324 + * However ..... this still is not reliable, since timeouts under Tru64 are still 
45325 + * only run when the currently running kernel thread "co-operates" by calling one
45326 + * of a number of functions which is permitted to run the "lwc"s AND is not holding
45327 + * any spinlocks AND is running ai IPL 0.   However Compaq are unable to provide
45328 + * any upper limit on the time between the "lwc"'s being run and so it is possible
45329 + * for all 4 cpus to not run them for an unbounded time.
45330 + *
45331 + * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to 
45332 + * hardclock() to "solve" this problem for Memory Channel.  However, since it
45333 + * is called within the clock interrupt it is not permissible to aquire any
45334 + * spinlocks, nor to run for "too long".  This means that it is not possible to
45335 + * call the heartbeat algorithm from this hook.  
45336 + *
45337 + * Our solution to these limitations is to use the hook to cause an elan interrupt 
45338 + * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device 
45339 + * to trap and ep_cprocTrap() can then run the heartbeat code.  However there is a lock 
45340 + * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to 
45341 + * use a trylock and if we fail, then hope that when the interrupt is delievered again
45342 + * some time later we will succeed.
45343 + *
45344 + * However this only works if the kernel is able to respond to the Elan interrupt,
45345 + * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has
45346 + * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval.
45347 + *
45348 + * In fact this is exactly the mechanism that other operating systems use to
45349 + * execute timeouts, since the hardclock interrupt posts a low priority 
45350 + * "soft interrupt" which "pre-eempts" the currently running thread and then
45351 + * executes the timeouts.To block timeouts you use splsoftclock() the same as 
45352 + * in Tru64.
45353 + */
45354 +#define PER_CPU_TIMEOUT                        TRUE
45355 +#endif
45356 +
45357 +
45358 +#define CM_SGMTS_PER_LEVEL             8                       /* maximum nodes in each segment */
45359 +#define CM_MAX_LEVELS                  6                       /* maximum depth of tree */
45360 +
45361 +/* message buffers/dmas/events etc */
45362 +#define CM_NUM_NODE_MSG_BUFFERS                (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */
45363 +#define CM_NUM_SPARE_MSG_BUFFERS       8                       /* spare msg buffers for non-connected nodes */
45364 +#define CM_NUM_MSG_BUFFERS             (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS)
45365 +
45366 +#define CM_INPUTQ_ENTRIES              128                     /* # entries in input queue */
45367 +
45368 +#define CM_PERIODIC_DISCOVER_INTERVAL  (5000)          /* 5s (infrequent resolution of established leader conflicts) */
45369 +#define CM_URGENT_DISCOVER_INTERVAL    (50)            /* 0.05s (more frequently than heartbeats 'cause they don't retry) */
45370 +#define CM_HEARTBEAT_INTERVAL          (125)           /* 0.125s */
45371 +#define CM_TIMER_SCHEDULE_TIMEOUT      (4000)          /* 4s     Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */
45372 +#define CM_THREAD_SCHEDULE_TIMEOUT     (30000)         /* 30s    Maximum time before a thread that's scheduled to run gets to run */
45373 +#define CM_THREAD_RUNNING_TIMEOUT      (30000)         /* 30s    Don't expect the manager thread to be running longer than this */
45374 +
45375 +#ifdef PER_CPU_TIMEOUT
45376 +#define CM_PERCPU_TIMEOUT_INTERVAL     (50)            /* 0.05s (must be less than all above intervals) */
45377 +#define CM_PACEMAKER_INTERVAL          (500)           /* 0.05s */
45378 +
45379 +#define CM_HEARTBEAT_OVERDUE           (250)           /* 0.25s Maximum time a timeout can be overdue before taking extreme action */
45380 +#endif
45381 +
45382 +#define CM_P2P_DMA_RETRIES             31
45383 +
45384 +/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES
45385 + * attempts to send one to be successfully received */
45386 +#define CM_P2P_MSG_RETRIES             8
45387 +
45388 +/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts
45389 + * to send one to be successfully received. */
45390 +#define CM_BCAST_MSG_RETRIES           40
45391 +
45392 +/* Heartbeat timeout allows for a node stalling and still getting its
45393 + * heartbeat. The 2 is to allow for unsynchronised polling times. */
45394 +#define CM_HEARTBEAT_TIMEOUT           (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL)
45395 +
45396 +/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people
45397 + * who don't see discovery are considered dead by their leader.  This
45398 + * ensures that by the time a node "discovers" it is a leader of a segment,
45399 + * the previous leader of that segment will have been deemed to be dead by
45400 + * its the parent segment's leader */
45401 +#define CM_DISCOVER_TIMEOUT            (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL)
45402 +
45403 +#define CM_WAITING_TIMEOUT             (CM_DISCOVER_TIMEOUT * 100)
45404 +
45405 +/*
45406 + * Convert all timeouts specified in mS into "ticks"
45407 + */
45408 +#define MSEC2TICKS(MSEC)               (((MSEC)*HZ)/1000)
45409 +
45410 +
45411 +/* statemap entry */
45412 +typedef struct cm_state_entry
45413 +{
45414 +    int16_t           level;                   /* cluster level to apply to */
45415 +    int16_t          offset;                   /* from statemap_findchange() */
45416 +    uint16_t          seg[BT_NBIPUL/16];       /* ditto */
45417 +} CM_STATEMAP_ENTRY;
45418 +
45419 +/* offset is >= 0 for a change to apply and */
45420 +#define STATEMAP_NOMORECHANGES (-1)            /* end of a set of updates */
45421 +#define STATEMAP_RESET         (-2)            /* reset the target map */
45422 +#define STATEMAP_NOOP          (-3)            /* null token */
45423 +
45424 +/* CM message format */
45425 +typedef int8_t CM_SEQ;                         /* heartbeat sequence numbers; at least 2 bits, signed */
45426 +
45427 +/*
45428 + * The message header is received into the last 64 byte block of 
45429 + * the input queue and the Version *MUST* be the last word of the 
45430 + * block to ensure that we can see that the whole of the message
45431 + * has reached main memory after we've seen the input queue pointer
45432 + * have been updated.
45433 + */
45434 +typedef struct ep_cm_hdr
45435 +{
45436 +    uint32_t          Pad0;
45437 +    uint32_t          Pad1;
45438 +
45439 +    uint8_t           Type;
45440 +    uint8_t           Level;
45441 +    CM_SEQ            Seq;                     /* precision at least 2 bits each*/
45442 +    CM_SEQ            AckSeq;
45443 +    
45444 +    uint16_t          NumMaps;
45445 +    uint16_t          MachineId;
45446 +
45447 +    uint16_t          NodeId;
45448 +    uint16_t          Checksum;
45449 +
45450 +    uint32_t           Timestamp;
45451 +    uint32_t           ParamHash;
45452 +    uint32_t          Version;
45453 +} CM_HDR;
45454 +
45455 +#define CM_HDR_SIZE        sizeof (CM_HDR)
45456 +
45457 +typedef struct cm_msg
45458 +{
45459 +    union {
45460 +       CM_STATEMAP_ENTRY   Statemaps[1];               /* piggy-backed statemap updates start here */
45461 +       uint8_t             Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE];
45462 +    } Payload;
45463 +    
45464 +    CM_HDR                 Hdr;
45465 +} CM_MSG;
45466 +
45467 +/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */
45468 +#define CM_MSG_MAXMAPS         (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY))
45469 +#define CM_MSG_MAP(mapno)      (CM_MSG_MAXMAPS - (mapno) - 1)
45470 +
45471 +/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */
45472 +#define CM_MSG_BASE(nmaps)     (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps]))
45473 +#define CM_MSG_SIZE(nmaps)     (sizeof (CM_MSG) - CM_MSG_BASE(nmaps))
45474 +
45475 +#define CM_MSG_VERSION                         0xcad00005
45476 +#define CM_MSG_TYPE_RESOLVE_LEADER             0
45477 +#define CM_MSG_TYPE_DISCOVER_LEADER            1
45478 +#define CM_MSG_TYPE_NOTIFY                     2
45479 +#define CM_MSG_TYPE_DISCOVER_SUBORDINATE       3
45480 +#define CM_MSG_TYPE_IMCOMING                   4
45481 +#define CM_MSG_TYPE_HEARTBEAT                  5
45482 +#define CM_MSG_TYPE_REJOIN                     6
45483 +
45484 +/* CM machine segment */
45485 +typedef struct cm_sgmtMaps
45486 +{
45487 +    u_char       InputMapValid;                        /* Input map has been set */
45488 +    u_char       OutputMapValid;               /* Output map has been set */
45489 +    u_char       SentChanges;                  /* got an outstanding STATEMAP_NOMORECHANGES to send */
45490 +    statemap_t  *OutputMap;                    /* state to send */
45491 +    statemap_t  *InputMap;                     /* state received */
45492 +    statemap_t  *CurrentInputMap;              /* state being received */
45493 +} CM_SGMTMAPS;
45494 +
45495 +typedef struct cm_sgmt
45496 +{
45497 +   u_char       State;
45498 +   u_char       SendMaps;
45499 +   u_char       MsgAcked;
45500 +   CM_SEQ      MsgSeq;
45501 +   CM_SEQ      AckSeq;
45502 +   u_int       NodeId;
45503 +   long                UpdateTick;
45504 +   long                WaitingTick;
45505 +   uint32_t    Timestamp;
45506 +   CM_SGMTMAPS  Maps[CM_MAX_LEVELS];           /* Maps[i] == state for cluster level i */
45507 +   u_short      MsgNumber;                     /* msg buffer to use */
45508 +   u_short     NumMaps;                        /* # maps in message buffer */
45509 +   u_short      Level;
45510 +   u_short      Sgmt;
45511 +} CM_SGMT;
45512 +
45513 +#define CM_SGMT_ABSENT         0               /* no one there at all */
45514 +#define CM_SGMT_WAITING                1               /* waiting for subtree to connect */
45515 +#define CM_SGMT_COMING         2               /* expecting a subtree to reconnect */
45516 +#define CM_SGMT_PRESENT                3               /* connected */
45517 +
45518 +typedef struct cm_level
45519 +{
45520 +    int               SwitchLevel;
45521 +    u_int             MinNodeId;
45522 +    u_int              NumNodes;
45523 +    u_int              NumSegs;
45524 +    u_int              MySgmt;
45525 +   
45526 +    /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */
45527 +    u_char             SubordinateMapValid[CM_MAX_LEVELS];
45528 +    statemap_t        *SubordinateMap[CM_MAX_LEVELS];
45529 +
45530 +    /* maps/flags for this cluster level */
45531 +    u_int              Online:1;                               /* I've gone online (seen myself running) */
45532 +    u_int             Restarting:1;                            /* driving my owm restart bit */
45533 +    u_char            OfflineReasons;                          /* forced offline by broadcast */
45534 +
45535 +    u_char             GlobalMapValid;
45536 +    u_char             SubTreeMapValid;
45537 +    u_long            Connected;
45538 +
45539 +    statemap_t        *LocalMap;               /* state bits I drive */
45540 +    statemap_t        *SubTreeMap;             /* OR of my and my subtree states */
45541 +    statemap_t        *GlobalMap;              /* OR of all node states */
45542 +    statemap_t        *LastGlobalMap;          /* last map I saw */
45543 +    statemap_t        *TmpMap;                 /* scratchpad */
45544 +
45545 +    CM_SGMT           Sgmts[CM_SGMTS_PER_LEVEL];
45546 +} CM_LEVEL;
45547 +
45548 +#define CM_ROLE_LEADER_CANDIDATE       0
45549 +#define CM_ROLE_LEADER                 1
45550 +#define CM_ROLE_SUBORDINATE            2
45551 +
45552 +/* global status bits */
45553 +#define CM_GSTATUS_STATUS_MASK         0x03    /* bits nodes drive to broadcast their status */
45554 +#define CM_GSTATUS_ABSENT              0x00    /* Off the network */
45555 +#define CM_GSTATUS_STARTING            0x01    /* I'm waiting for everyone to see me online */
45556 +#define CM_GSTATUS_RUNNING              0x03   /* up and running */
45557 +#define CM_GSTATUS_CLOSING             0x02    /* I'm waiting for everyone to see me offline */
45558 +
45559 +#define CM_GSTATUS_ACK_MASK            0x0c    /* bits node drive to ack other status */
45560 +#define CM_GSTATUS_MAY_START           0x04    /* Everyone thinks I may not start */
45561 +#define CM_GSTATUS_MAY_RUN             0x08    /* Everyone thinks I may not run */
45562 +
45563 +#define CM_GSTATUS_RESTART             0x10    /* Someone thinks I should restart */
45564 +#define CM_GSTATUS_BITS                        5
45565 +
45566 +#define CM_GSTATUS_BASE(node)          ((node) * CM_GSTATUS_BITS)
45567 +
45568 +#if defined(PER_CPU_TIMEOUT)
45569 +typedef struct cm_timeout_data
45570 +{
45571 +    long               ScheduledAt;                            /* lbolt timeout was scheduled to run at */
45572 +
45573 +    unsigned long       EarlyCount;                            /* # times run early than NextRun */
45574 +    unsigned long      MissedCount;                            /* # times run on time - but someone else was running it */
45575 +    unsigned long       WastedCount;                           /* # times we failed to get the spinlock */
45576 +    unsigned long      WorkCount;                              /* # times we're the one running */
45577 +
45578 +    unsigned long      WorstDelay;                             /* worst scheduling delay */
45579 +    unsigned long      BestDelay;                              /* best scheduling delay */
45580 +
45581 +    unsigned long      WorstLockDelay;                         /* worst delay before getting rail->Lock */
45582 +
45583 +    unsigned long      WorstHearbeatDelay;                     /* worst delay before calling DoHeartbeatWork */
45584 +} CM_TIMEOUT_DATA;
45585 +#endif
45586 +
45587 +typedef struct cm_rail
45588 +{
45589 +    EP_RAIL          *Rail;                                    /* rail we're associated with */
45590 +    struct list_head   Link;                                   /*   and linked on the CM_SUBSYS */
45591 +
45592 +    uint32_t          ParamHash;                               /* hash of critical parameters */
45593 +    uint32_t           Timestamp;
45594 +    long              DiscoverStartTick;                       /* when discovery start */
45595 +
45596 +    unsigned int       NodeId;                                 /* my node id */
45597 +    unsigned int       NumNodes;                               /*   and number of nodes */
45598 +    unsigned int       NumLevels;                              /* number of levels computed from machine size */
45599 +    int                       BroadcastLevel;
45600 +    long              BroadcastLevelTick;
45601 +    unsigned int       TopLevel;                               /* level at which I'm not a leader */
45602 +    unsigned char      Role;                                   /* state at TopLevel */
45603 +
45604 +    EP_INPUTQ        *PolledQueue;                             /* polled input queue */
45605 +    EP_INPUTQ        *IntrQueue;                               /* intr input queue */
45606 +    EP_OUTPUTQ       *MsgQueue;                                /* message  */
45607 +    unsigned int       NextSpareMsg;                           /* next "spare" message buffer to use */
45608 +
45609 +    EP_CM_RAIL_STATS   Stats;                                  /* statistics */
45610 +
45611 +    kmutex_t          Mutex;
45612 +    spinlock_t        Lock;
45613 +    
45614 +    long              NextHeartbeatTime;                       /* next time to check/send heartbeats */
45615 +    long              NextDiscoverTime;                        /* next time to progress discovery  */
45616 +    long              NextRunTime;                             /* the earlier of the above two or intr requires inputq poll*/
45617 +
45618 +    unsigned int       OfflineReasons;                         /* forced offline by procfs/manager thread stuck */
45619 +
45620 +#if defined(PER_CPU_TIMEOUT)
45621 +    spinlock_t        HeartbeatTimeoutsLock;                   /* spinlock to sequentialise per-cpu timeouts */
45622 +    long              HeartbeatTimeoutsStarted;                /* bitmap of which timeouts have started */
45623 +    long              HeartbeatTimeoutsStopped;                /* bitmap of which timeouts have stopped */
45624 +    long              HeartbeatTimeoutsShouldStop;             /* flag to indicate timeouts should stop */
45625 +    kcondvar_t        HeartbeatTimeoutsWait;                   /* place to sleep waiting for timeouts to stop */
45626 +    long              HeartbeatTimeoutRunning;                 /* someone is running the timeout - don't try for the lock */
45627 +
45628 +    long              HeartbeatTimeoutOverdue;                 /* heartbeat seen as overdue - interrupt requested */
45629 +
45630 +    CM_TIMEOUT_DATA   *HeartbeatTimeoutsData;                  /* per timeout data */
45631 +#else
45632 +    struct timer_list  HeartbeatTimer;                         /* timer for heartbeat/discovery */
45633 +#endif
45634 +
45635 +    CM_LEVEL           Levels[CM_MAX_LEVELS];
45636 +} CM_RAIL;
45637 +
45638 +/* OfflineReasons (both per-rail and  */
45639 +#define CM_OFFLINE_BROADCAST           (1 << 0)
45640 +#define CM_OFFLINE_PROCFS              (1 << 1)
45641 +#define CM_OFFLINE_MANAGER             (1 << 2)
45642 +
45643 +typedef struct cm_subsys
45644 +{
45645 +    EP_SUBSYS          Subsys;
45646 +    CM_RAIL            *Rails[EP_MAX_RAILS];
45647 +} CM_SUBSYS;
45648 +
45649 +extern int  MachineId;
45650 +
45651 +extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId);
45652 +extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId);
45653 +extern void cm_restart_comms (CM_RAIL *cmRail);
45654 +extern int  cm_init (EP_SYS *sys);
45655 +
45656 +extern void DisplayRail(EP_RAIL *rail);
45657 +extern void DisplaySegs (EP_RAIL *rail);
45658 +extern void DisplayStatus (EP_RAIL *rail);
45659 +
45660 +extern void DisplayNodeMaps  (DisplayInfo *di, CM_RAIL *cmRail);
45661 +extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail);
45662 +extern void DisplayRailDo    (DisplayInfo *di, EP_RAIL *rail);
45663 +
45664 +extern int    cm_read_cluster(EP_RAIL *rail,char *page);
45665 +extern void   cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason);
45666 +
45667 +extern int    cm_svc_indicator_set      (EP_RAIL *rail, int svc_indicator);
45668 +extern int    cm_svc_indicator_clear    (EP_RAIL *rail, int svc_indicator);
45669 +extern int    cm_svc_indicator_is_set   (EP_RAIL *rail, int svc_indicator, int nodeId);
45670 +extern int    cm_svc_indicator_bitmap   (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
45671 +
45672 +/* cm_procfs.c */
45673 +extern void   cm_procfs_init (CM_SUBSYS *subsys);
45674 +extern void   cm_procfs_fini (CM_SUBSYS *subsys);
45675 +extern void   cm_procfs_rail_init (CM_RAIL *rail);
45676 +extern void   cm_procfs_rail_fini (CM_RAIL *rail);
45677 +
45678 +/*
45679 + * Local variables:
45680 + * c-file-style: "stroustrup"
45681 + * End:
45682 + */
45683 +#endif /* __ELAN_CM_H */
45684 +
45685 diff -urN clean/drivers/net/qsnet/ep/cm_procfs.c linux-2.6.9/drivers/net/qsnet/ep/cm_procfs.c
45686 --- clean/drivers/net/qsnet/ep/cm_procfs.c      1969-12-31 19:00:00.000000000 -0500
45687 +++ linux-2.6.9/drivers/net/qsnet/ep/cm_procfs.c        2004-05-14 05:23:13.000000000 -0400
45688 @@ -0,0 +1,254 @@
45689 +/*
45690 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45691 + *    Copyright (c) 2002-2005 by Quadrics Ltd.
45692 + *
45693 + *    For licensing information please see the supplied COPYING file
45694 + *
45695 + */
45696 +
45697 +#ident "@(#)$Id: cm_procfs.c,v 1.5 2004/05/14 09:23:13 daniel Exp $"
45698 +/*      $Source: /cvs/master/quadrics/epmod/cm_procfs.c,v $ */
45699 +
45700 +#include <qsnet/kernel.h>
45701 +
45702 +#include <elan/kcomm.h>
45703 +
45704 +#include "kcomm_vp.h"
45705 +#include "debug.h"
45706 +#include "cm.h"
45707 +#include <elan/epsvc.h>
45708 +
45709 +#include <qsnet/procfs_linux.h>
45710 +
45711 +extern char *sprintClPeers (char *str, CM_RAIL *cmRail, int clvl);
45712 +
45713 +static int
45714 +proc_read_cluster(char *page, char **start, off_t off,
45715 +               int count, int *eof, void *data)
45716 +{
45717 +    CM_RAIL *cmRail = (CM_RAIL *) data;
45718 +    char    *p      = page;
45719 +
45720 +    page[0] = 0;
45721 +
45722 +    if (cmRail->Rail->State != EP_RAIL_STATE_RUNNING)
45723 +       p += sprintf(p, "<not running>\n");
45724 +    else
45725 +    {
45726 +       CM_LEVEL *cmLevel;
45727 +       unsigned long flags;
45728 +       int  i, j;
45729 +       char clNodeStr[32]; /* [%d-%d][%d-%d] */
45730 +       char seperate_with;
45731 +
45732 +       struct { int val; char *name; } bitvals[] = {
45733 +           {CM_OFFLINE_BROADCAST, "Broadcast"},
45734 +           {CM_OFFLINE_PROCFS,    "Offline"},
45735 +           {CM_OFFLINE_MANAGER,   "Manager"}};
45736 +       
45737 +       spin_lock_irqsave (&cmRail->Lock, flags);
45738 +       
45739 +       for (i = 0; i < cmRail->NumLevels; i++)
45740 +       {
45741 +           cmLevel = &cmRail->Levels[i];
45742 +           
45743 +           p += sprintf(p, "%23s %7s ", sprintClPeers (clNodeStr, cmRail, i), cmLevel->Online?"Online":"Offline");
45744 +           
45745 +           if ((cmLevel->Online ) | ( cmLevel->Connected > 0))
45746 +               p += sprintf(p, "Connected=%lu ", cmLevel->Connected);
45747 +           
45748 +           seperate_with = '<';
45749 +           
45750 +           if ( cmLevel->Restarting ) {
45751 +               p += sprintf(p, "%cRestarting", seperate_with);
45752 +               seperate_with = ',';
45753 +           }
45754 +           
45755 +           if ( ! (cmLevel->GlobalMapValid & cmLevel->SubTreeMapValid )) {
45756 +               p += sprintf(p, "%cMap Not Valid", seperate_with);
45757 +               seperate_with = ',';
45758 +           }
45759 +           
45760 +           if ( cmLevel->OfflineReasons ) {
45761 +               for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++)
45762 +                   if (cmLevel->OfflineReasons & bitvals[j].val) {
45763 +                       p += sprintf(p, "%c%s", seperate_with, bitvals[j].name);
45764 +                       seperate_with = ',';
45765 +                   }
45766 +           }
45767 +           if ( cmRail->OfflineReasons ) {
45768 +               for (j = 0; j < sizeof (bitvals)/sizeof(bitvals[0]); j++)
45769 +                   if (cmRail->OfflineReasons & bitvals[j].val) {
45770 +                       p += sprintf(p, "%c%s", seperate_with, bitvals[j].name);
45771 +                       seperate_with = ',';
45772 +                   }
45773 +           }
45774 +           
45775 +           if ( seperate_with != '<' ) 
45776 +               p += sprintf(p,">\n");
45777 +           else
45778 +               p += sprintf(p,"\n");
45779 +       }
45780 +       
45781 +       spin_unlock_irqrestore (&cmRail->Lock, flags);
45782 +    }
45783 +
45784 +    return qsnet_proc_calc_metrics (page, start, off, count, eof, p - page);
45785 +}
45786 +
45787 +static struct rail_info
45788 +{
45789 +    char *name;
45790 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
45791 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
45792 +} rail_info[] = {
45793 +    {"cluster", proc_read_cluster, NULL},
45794 +};
45795 +
45796 +struct proc_dir_entry *svc_indicators_root;
45797 +
45798 +typedef struct svc_indicator_data
45799 +{
45800 +    int       svc_indicator;
45801 +    EP_RAIL  *rail;
45802 +} SVC_INDICATOR_DATA;
45803 +
45804 +static SVC_INDICATOR_DATA svc_indicator_data[EP_SVC_NUM_INDICATORS][EP_MAX_RAILS];
45805 +static char              *svc_indicator_names[EP_SVC_NUM_INDICATORS] = EP_SVC_NAMES;
45806 +
45807 +static int
45808 +proc_read_svc_indicator_rail_bitmap (char *page, char **start, off_t off,
45809 +                                    int count, int *eof, void *data)
45810 +{
45811 +    SVC_INDICATOR_DATA  *svc_data = (SVC_INDICATOR_DATA  *)data;
45812 +    unsigned int        nnodes   = ep_numnodes (ep_system());
45813 +    bitmap_t           *bitmap;
45814 +
45815 +    KMEM_ZALLOC (bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1);
45816 +
45817 +    cm_svc_indicator_bitmap (svc_data->rail, svc_data->svc_indicator, bitmap, 0, nnodes);
45818 +
45819 +    ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes);
45820 +    
45821 +    KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)));
45822 +    
45823 +    strcat (page, "\n");
45824 +
45825 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
45826 +}
45827 +
45828 +static int
45829 +proc_read_svc_indicator_bitmap(char *page, char **start, off_t off,
45830 +                              int count, int *eof, void *data)
45831 +{
45832 +    unsigned int         num      = (unsigned long) data;
45833 +    EP_SYS              *sys      = ep_system();
45834 +    unsigned int         nnodes   = ep_numnodes (sys);
45835 +    bitmap_t            *bitmap;
45836 +
45837 +    KMEM_ALLOC(bitmap, bitmap_t *, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)), 1);
45838 +     
45839 +    ep_svc_indicator_bitmap (sys, num, bitmap, 0, nnodes);
45840 +
45841 +    ep_sprintf_bitmap (page, PAGESIZE, bitmap, 0, 0, nnodes);
45842 +    
45843 +    KMEM_FREE (bitmap, (BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t)));
45844 +    
45845 +    strcat (page, "\n");
45846 +
45847 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
45848 +}
45849 +
45850 +void
45851 +cm_procfs_rail_init (CM_RAIL *cmRail)
45852 +{
45853 +    EP_RAIL *rail = cmRail->Rail;
45854 +    struct proc_dir_entry *p;
45855 +    int i;
45856 +    
45857 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
45858 +    {
45859 +       if ((p = create_proc_entry (rail_info[i].name, 0, cmRail->Rail->ProcDir)) != NULL)
45860 +       {
45861 +           p->read_proc  = rail_info[i].read_func;
45862 +           p->write_proc = rail_info[i].write_func;
45863 +           p->data       = cmRail;
45864 +           p->owner      = THIS_MODULE;
45865 +       }
45866 +    }
45867 +
45868 +    if ((rail->SvcIndicatorDir = proc_mkdir ("svc_indicators", cmRail->Rail->ProcDir)) != NULL)
45869 +    {
45870 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
45871 +       {
45872 +           if ((p = create_proc_entry (svc_indicator_names[i], 0, rail->SvcIndicatorDir)) != NULL)
45873 +           {
45874 +               svc_indicator_data[i][rail->Number].svc_indicator = i;
45875 +               svc_indicator_data[i][rail->Number].rail          = rail; 
45876 +               
45877 +               p->write_proc = NULL;
45878 +               p->read_proc  = proc_read_svc_indicator_rail_bitmap;
45879 +               p->data       = (void *)&svc_indicator_data[i][rail->Number];
45880 +               p->owner      = THIS_MODULE;
45881 +           }
45882 +       }
45883 +    }
45884 +}
45885 +
45886 +void
45887 +cm_procfs_rail_fini (CM_RAIL *cmRail)
45888 +{
45889 +    EP_RAIL *rail = cmRail->Rail;
45890 +    int i;
45891 +
45892 +    if (rail->SvcIndicatorDir)
45893 +    {
45894 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
45895 +           remove_proc_entry (svc_indicator_names[i], rail->SvcIndicatorDir);
45896 +
45897 +       remove_proc_entry ("svc_indicators", cmRail->Rail->ProcDir);
45898 +    }
45899 +
45900 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
45901 +       remove_proc_entry (rail_info[i].name, cmRail->Rail->ProcDir);
45902 +}
45903 +
45904 +void
45905 +cm_procfs_init (CM_SUBSYS *subsys)
45906 +{
45907 +    struct proc_dir_entry *p;
45908 +    int i;
45909 +
45910 +    qsnet_proc_register_hex (ep_config_root, "machine_id",      &MachineId,      0);
45911 +
45912 +    if ((svc_indicators_root = proc_mkdir("svc_indicators", ep_procfs_root)) != NULL)
45913 +    {
45914 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
45915 +       {
45916 +           if ((p = create_proc_entry (svc_indicator_names[i], 0, svc_indicators_root)) != NULL)
45917 +           {
45918 +               p->write_proc = NULL;
45919 +               p->read_proc  = proc_read_svc_indicator_bitmap;
45920 +               p->data       = (void *)(long) i;
45921 +               p->owner      = THIS_MODULE;
45922 +           }
45923 +       }
45924 +       
45925 +    }
45926 +}
45927 +
45928 +void
45929 +cm_procfs_fini (CM_SUBSYS *subsys)
45930 +{
45931 +    int i;
45932 +
45933 +    if (svc_indicators_root)
45934 +    {
45935 +       for (i = 0; i < EP_SVC_NUM_INDICATORS; i++)
45936 +           remove_proc_entry (svc_indicator_names[i], svc_indicators_root);
45937 +       
45938 +       remove_proc_entry ("svc_indicators",   ep_procfs_root);
45939 +    }
45940 +
45941 +    remove_proc_entry ("machine_id",      ep_config_root);
45942 +}
45943 diff -urN clean/drivers/net/qsnet/ep/commands_elan4.c linux-2.6.9/drivers/net/qsnet/ep/commands_elan4.c
45944 --- clean/drivers/net/qsnet/ep/commands_elan4.c 1969-12-31 19:00:00.000000000 -0500
45945 +++ linux-2.6.9/drivers/net/qsnet/ep/commands_elan4.c   2005-07-20 08:01:33.000000000 -0400
45946 @@ -0,0 +1,173 @@
45947 +/*
45948 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
45949 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
45950 + *
45951 + *    For licensing information please see the supplied COPYING file
45952 + *
45953 + */
45954 +
45955 +#ident "@(#)$Id: commands_elan4.c,v 1.2.10.1 2005/07/20 12:01:33 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
45956 +/*      $Source: /cvs/master/quadrics/epmod/commands_elan4.c,v $*/
45957 +
45958 +#include <qsnet/kernel.h>
45959 +
45960 +#include <elan/kcomm.h>
45961 +
45962 +#include "kcomm_vp.h"
45963 +#include "kcomm_elan4.h"
45964 +#include "debug.h"
45965 +
45966 +#include <elan4/trtype.h>
45967 +
45968 +static __inline__ void
45969 +elan4_command_write (ELAN4_CQ *cq, E4_uint64 val, unsigned off)
45970 +{
45971 +    writeq (val, (void *)(cq->cq_mapping + offsetof (E4_CommandPort, Command[off])));
45972 +}
45973 +
45974 +void
45975 +elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag)
45976 +{
45977 +    elan4_command_write (cq, tag | NOP_CMD, 0);
45978 +}
45979 +
45980 +void
45981 +elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data)
45982 +{
45983 +    elan4_command_write (cq, addr | WRITE_DWORD_CMD, 0);
45984 +    elan4_command_write (cq, data, 1);
45985 +}
45986 +
45987 +void
45988 +elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data)
45989 +{
45990 +    elan4_command_write (cq, addr | ADD_DWORD_CMD, 0);
45991 +    elan4_command_write (cq, data,                 1);
45992 +}
45993 +
45994 +void
45995 +elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype)
45996 +{
45997 +    elan4_command_write (cq, from | (datatype << COPY64_DATA_TYPE_SHIFT) | COPY64_CMD, 0);
45998 +    elan4_command_write (cq, to   | (datatype << COPY64_DATA_TYPE_SHIFT),              1);
45999 +}
46000 +
46001 +void
46002 +elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie)
46003 +{
46004 +    elan4_command_write (cq, (cookie << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD, 0);
46005 +}
46006 +
46007 +
46008 +void 
46009 +elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs)
46010 +{
46011 +    elan4_command_write (cq, regs->Registers[0] | RUN_THREAD_CMD, 0);
46012 +    elan4_command_write (cq, regs->Registers[1],                  1);
46013 +    elan4_command_write (cq, regs->Registers[2],                  2);
46014 +    elan4_command_write (cq, regs->Registers[3],                  3);
46015 +    elan4_command_write (cq, regs->Registers[4],                  4);
46016 +    elan4_command_write (cq, regs->Registers[5],                  5);
46017 +    elan4_command_write (cq, regs->Registers[6],                  6);
46018 +}
46019 +
46020 +void
46021 +elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma)
46022 +{
46023 +    E4_uint64 *dmaptr = (E4_uint64 *) dma;
46024 +
46025 +    elan4_command_write (cq, dmaptr[0] | RUN_DMA_CMD, 0);
46026 +    elan4_command_write (cq, dmaptr[1],               1);
46027 +    elan4_command_write (cq, dmaptr[2],               2);
46028 +    elan4_command_write (cq, dmaptr[3],               3);
46029 +    elan4_command_write (cq, dmaptr[4],               4);
46030 +    elan4_command_write (cq, dmaptr[5],               5);
46031 +    elan4_command_write (cq, dmaptr[6],               6);
46032 +}
46033 +
46034 +void
46035 +elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event)
46036 +{
46037 +    elan4_command_write (cq, event | SET_EVENT_CMD, 0);
46038 +}
46039 +
46040 +void
46041 +elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count)
46042 +{
46043 +    elan4_command_write (cq, SET_EVENTN_CMD,0);
46044 +    elan4_command_write (cq, event | count, 1);
46045 +}
46046 +    
46047 +void
46048 +elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1)
46049 +{
46050 +    elan4_command_write (cq, event | WAIT_EVENT_CMD, 0);
46051 +    elan4_command_write (cq, candt,                  1);
46052 +    elan4_command_write (cq, param0,                 2);
46053 +    elan4_command_write (cq, param1,                 3);
46054 +}
46055 +
46056 +void
46057 +elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command)
46058 +{
46059 +    elan4_command_write (cq, command | OPEN_STEN_PKT_CMD, 0);
46060 +}
46061 +
46062 +void
46063 +elan4_guard (ELAN4_CQ *cq, E4_uint64 command)
46064 +{
46065 +    elan4_command_write (cq, command | GUARD_CMD, 0);
46066 +}
46067 +
46068 +void
46069 +elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr)
46070 +{
46071 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
46072 +    elan4_command_write (cq, addr,                               1);
46073 +}
46074 +
46075 +void
46076 +elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0)
46077 +{
46078 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
46079 +    elan4_command_write (cq, addr,                               1);
46080 +    elan4_command_write (cq, p0,                                 2);
46081 +}
46082 +
46083 +void
46084 +elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1)
46085 +{
46086 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
46087 +    elan4_command_write (cq, addr,                               1);
46088 +    elan4_command_write (cq, p0,                                 2);
46089 +    elan4_command_write (cq, p1,                                 3);
46090 +}
46091 +
46092 +void
46093 +elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...)
46094 +{
46095 +    E4_uint32    ndword = ((trtype & TR_SIZE_MASK) >> TR_SIZE_SHIFT);
46096 +    va_list      ap;
46097 +    register int i;
46098 +
46099 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
46100 +    elan4_command_write (cq, addr,                               1);
46101 +    
46102 +    va_start (ap, addr);
46103 +    for (i = 2; i < ndword+2; i++) 
46104 +       elan4_command_write (cq, va_arg (ap, E4_uint64), i);
46105 +    va_end (ap);
46106 +}
46107 +
46108 +void
46109 +elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr)
46110 +{
46111 +    E4_uint32    ndword = ((trtype &TR_SIZE_MASK) >> TR_SIZE_SHIFT);
46112 +    register int i;
46113 +
46114 +    elan4_command_write (cq, (trtype << 16) | SEND_TRANS_CMD, 0);
46115 +    elan4_command_write (cq, addr,                            1);
46116 +    for (i = 2; i < ndword+2; i++)
46117 +       elan4_command_write (cq, *ptr++, i);
46118 +}
46119 +
46120 diff -urN clean/drivers/net/qsnet/ep/conf_linux.c linux-2.6.9/drivers/net/qsnet/ep/conf_linux.c
46121 --- clean/drivers/net/qsnet/ep/conf_linux.c     1969-12-31 19:00:00.000000000 -0500
46122 +++ linux-2.6.9/drivers/net/qsnet/ep/conf_linux.c       2005-09-07 10:35:03.000000000 -0400
46123 @@ -0,0 +1,311 @@
46124 +/*
46125 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46126 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46127 + *
46128 + *    For licensing information please see the supplied COPYING file
46129 + *
46130 + */
46131 +
46132 +#ident "@(#)$Id: conf_linux.c,v 1.40.2.3 2005/09/07 14:35:03 mike Exp $"
46133 +/*      $Source: /cvs/master/quadrics/epmod/conf_linux.c,v $ */
46134 +
46135 +#include <qsnet/kernel.h>
46136 +#include <qsnet/autoconf.h>
46137 +
46138 +#include <elan/kcomm.h>
46139 +#include <elan/epsvc.h>
46140 +#include <elan/epcomms.h>
46141 +
46142 +#include "cm.h"
46143 +
46144 +#include "conf_linux.h"
46145 +
46146 +#include <linux/init.h>
46147 +#include <qsnet/module.h>
46148 +#include <linux/reboot.h>
46149 +#include <linux/notifier.h>
46150 +
46151 +/* Module parameters */
46152 +unsigned int epdebug        = 0;
46153 +unsigned int epdebug_console = 0;
46154 +unsigned int epdebug_cmlevel = 0;
46155 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
46156 +unsigned int epdebug_check_sum = 0;
46157 +#endif
46158 +int         disabled        = 0;
46159 +int          sdram_assert    = 0;
46160 +int          assfail_mode    = 0;
46161 +int         txd_stabilise   = 7;
46162 +int         portals_envelopes = 0;
46163 +
46164 +/* External module parameters */
46165 +extern int     MaxSwitchLevels;
46166 +extern int      RejoinCheck;
46167 +extern int      RejoinPanic;
46168 +extern int      PositionCheck;
46169 +extern int      MachineId;
46170 +
46171 +/* Module globals */
46172 +EP_SYS          epsys;
46173 +
46174 +#ifdef MODULE
46175 +MODULE_AUTHOR("Quadrics Ltd");
46176 +MODULE_DESCRIPTION("Elan Kernel Comms");
46177 +
46178 +MODULE_LICENSE("GPL");
46179 +
46180 +module_param(epdebug,         uint, 0);
46181 +module_param(epdebug_console, uint, 0);
46182 +module_param(epdebug_cmlevel, uint, 0);
46183 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
46184 +module_param(epdebug_check_sum, uint, 0);
46185 +#endif
46186 +module_param(disabled,        uint, 0);
46187 +
46188 +module_param(MachineId,       uint, 0);
46189 +module_param(RejoinPanic,     uint, 0);
46190 +module_param(RejoinCheck,     uint, 0);
46191 +module_param(PositionCheck,   uint, 0);
46192 +module_param(MaxSwitchLevels, uint, 0);
46193 +
46194 +module_param(sdram_assert,    uint, 0);
46195 +module_param(assfail_mode,    uint, 0);
46196 +module_param(txd_stabilise,   uint, 0);
46197 +module_param(portals_envelopes,uint, 0);
46198 +
46199 +/* epcomms.c large message service functions */
46200 +EXPORT_SYMBOL(ep_alloc_xmtr);
46201 +EXPORT_SYMBOL(ep_free_xmtr);
46202 +EXPORT_SYMBOL(ep_transmit_message);
46203 +EXPORT_SYMBOL(ep_multicast_message);
46204 +EXPORT_SYMBOL(ep_transmit_rpc);
46205 +
46206 +EXPORT_SYMBOL(ep_alloc_rcvr);
46207 +EXPORT_SYMBOL(ep_free_rcvr);
46208 +EXPORT_SYMBOL(ep_queue_receive);
46209 +EXPORT_SYMBOL(ep_requeue_receive);
46210 +EXPORT_SYMBOL(ep_rpc_put);
46211 +EXPORT_SYMBOL(ep_rpc_get);
46212 +EXPORT_SYMBOL(ep_complete_rpc);
46213 +EXPORT_SYMBOL(ep_complete_receive);
46214 +
46215 +EXPORT_SYMBOL(ep_poll_transmits);
46216 +EXPORT_SYMBOL(ep_enable_txcallbacks);
46217 +EXPORT_SYMBOL(ep_disable_txcallbacks);
46218 +
46219 +/* epcomms.c functions for accessing fields of rxds/txds */
46220 +EXPORT_SYMBOL(ep_rxd_arg);
46221 +EXPORT_SYMBOL(ep_rxd_len);
46222 +EXPORT_SYMBOL(ep_rxd_isrpc);
46223 +EXPORT_SYMBOL(ep_rxd_envelope);
46224 +EXPORT_SYMBOL(ep_rxd_payload);
46225 +EXPORT_SYMBOL(ep_rxd_node);
46226 +EXPORT_SYMBOL(ep_rxd_status);
46227 +EXPORT_SYMBOL(ep_rxd_statusblk);
46228 +EXPORT_SYMBOL(ep_txd_node);
46229 +EXPORT_SYMBOL(ep_txd_statusblk);
46230 +
46231 +/* kmap.c, nmh.c - handling mapping of pages into network memory */
46232 +EXPORT_SYMBOL(ep_dvma_reserve);
46233 +EXPORT_SYMBOL(ep_dvma_release);
46234 +EXPORT_SYMBOL(ep_dvma_load);
46235 +EXPORT_SYMBOL(ep_dvma_unload);
46236 +EXPORT_SYMBOL(ep_nmd_subset);
46237 +EXPORT_SYMBOL(ep_nmd_merge);
46238 +
46239 +EXPORT_SYMBOL(ep_system);
46240 +
46241 +/* kcomm.c */
46242 +EXPORT_SYMBOL(ep_nodeid);
46243 +EXPORT_SYMBOL(ep_numnodes);
46244 +EXPORT_SYMBOL(ep_waitfor_nodeid);
46245 +
46246 +/* railhints.c */
46247 +EXPORT_SYMBOL(ep_pickRail);
46248 +EXPORT_SYMBOL(ep_xmtr_bcastrail);
46249 +EXPORT_SYMBOL(ep_xmtr_prefrail);
46250 +EXPORT_SYMBOL(ep_xmtr_availrails);
46251 +EXPORT_SYMBOL(ep_xmtr_noderails);
46252 +EXPORT_SYMBOL(ep_rcvr_prefrail);
46253 +EXPORT_SYMBOL(ep_rcvr_availrails);
46254 +EXPORT_SYMBOL(ep_rxd_railmask);
46255 +
46256 +EXPORT_SYMBOL(ep_svc_indicator_bitmap);
46257 +EXPORT_SYMBOL(ep_svc_indicator_is_set);
46258 +EXPORT_SYMBOL(ep_svc_indicator_clear);
46259 +EXPORT_SYMBOL(ep_svc_indicator_set);
46260 +
46261 +/* cm.c */
46262 +EXPORT_SYMBOL(cm_svc_indicator_clear);
46263 +EXPORT_SYMBOL(cm_svc_indicator_set);
46264 +EXPORT_SYMBOL(cm_svc_indicator_is_set);
46265 +EXPORT_SYMBOL(cm_svc_indicator_bitmap);
46266 +
46267 +#endif
46268 +
46269 +EP_SYS *
46270 +ep_system()
46271 +{
46272 +    return (&epsys);
46273 +}
46274 +
46275 +void
46276 +ep_mod_inc_usecount()
46277 +{
46278 +    MOD_INC_USE_COUNT;
46279 +} 
46280 +
46281 +void
46282 +ep_mod_dec_usecount()
46283 +{
46284 +    MOD_DEC_USE_COUNT;
46285 +}
46286 +
46287 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
46288 +
46289 +#include <linux/dump.h>
46290 +
46291 +static int
46292 +ep_dump_event (struct notifier_block *self, unsigned long event, void *buffer)
46293 +{
46294 +    if (event == DUMP_BEGIN)
46295 +       ep_shutdown (&epsys);
46296 +
46297 +    return (NOTIFY_DONE);
46298 +}
46299 +static struct notifier_block ep_dump_notifier = 
46300 +{
46301 +    notifier_call:     ep_dump_event,
46302 +    priority:          0,
46303 +};
46304 +
46305 +#endif
46306 +
46307 +static int
46308 +ep_reboot_event (struct notifier_block *self, unsigned long event, void *buffer)
46309 +{
46310 +    if ((event == SYS_RESTART || event == SYS_HALT || event == SYS_POWER_OFF))
46311 +       ep_shutdown (&epsys);
46312 +
46313 +    return (NOTIFY_DONE);
46314 +}
46315 +
46316 +static struct notifier_block ep_reboot_notifier = 
46317 +{
46318 +    notifier_call:     ep_reboot_event,
46319 +    priority:          0,
46320 +};
46321 +
46322 +#if !defined(NO_PANIC_NOTIFIER)
46323 +static int
46324 +ep_panic_event (struct notifier_block *self, unsigned long event, void *buffer)
46325 +{
46326 +    ep_shutdown (&epsys);
46327 +
46328 +    return (NOTIFY_DONE);
46329 +}
46330 +
46331 +static struct notifier_block ep_panic_notifier = 
46332 +{
46333 +    notifier_call:     ep_panic_event,
46334 +    priority:          0,
46335 +};
46336 +#endif
46337 +
46338 +/*
46339 + * Module configuration. 
46340 + */
46341 +#ifdef MODULE
46342 +static int __init ep_init(void)
46343 +#else
46344 +__initfunc(int ep_init(void))
46345 +#endif
46346 +{
46347 +    register int rmask = 0;
46348 +
46349 +    ep_procfs_init ();
46350 +
46351 +    ep_sys_init (&epsys);
46352 +
46353 +#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE)
46354 +    rmask = ep4_create_rails (&epsys, disabled);
46355 +#endif
46356 +    
46357 +    /* If we've brought up an elan4 rail, then disable all elan3 rails. */
46358 +    if ((rmask & ~disabled) != 0)
46359 +       disabled = ~rmask;
46360 +
46361 +#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE)
46362 +    rmask = ep3_create_rails (&epsys, disabled);
46363 +#endif
46364 +
46365 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
46366 +    register_dump_notifier (&ep_dump_notifier);
46367 +#endif
46368 +    register_reboot_notifier (&ep_reboot_notifier);
46369 +
46370 +#if !defined(NO_PANIC_NOTIFIER)
46371 +    notifier_chain_register (&panic_notifier_list, &ep_panic_notifier);
46372 +#endif
46373 +
46374 +    return (0);
46375 +}
46376 +
46377 +/*
46378 + * Module removal.
46379 + */
46380 +#ifdef MODULE
46381 +static void
46382 +__exit ep_exit(void)
46383 +{
46384 +    register int i;
46385 +
46386 +#if defined(CONFIG_DUMP) || defined(CONFIG_DUMP_MODULE)
46387 +    unregister_dump_notifier (&ep_dump_notifier);
46388 +#endif
46389 +    unregister_reboot_notifier (&ep_reboot_notifier);
46390 +
46391 +#if !defined(NO_PANIC_NOTIFIER)
46392 +    notifier_chain_unregister (&panic_notifier_list, &ep_panic_notifier);
46393 +#endif
46394 +
46395 +    for (i = 0; i < EP_MAX_RAILS; i++)
46396 +    {
46397 +       if (epsys.Rails[i])
46398 +       {
46399 +           switch (epsys.Rails[i]->State)
46400 +           {
46401 +           case EP_RAIL_STATE_UNINITIALISED:
46402 +               break;
46403 +
46404 +           case EP_RAIL_STATE_STARTED:
46405 +           case EP_RAIL_STATE_RUNNING:
46406 +           case EP_RAIL_STATE_INCOMPATIBLE:
46407 +               /* remove per-rail CM proc entries */
46408 +               ep_stop_rail (epsys.Rails[i]);
46409 +               break;
46410 +           }
46411 +
46412 +           /* remove EP proc rail entries after per-rail CM entries */
46413 +           ep_procfs_rail_fini (epsys.Rails[i]);
46414 +           ep_destroy_rail (epsys.Rails[i]);
46415 +       }
46416 +    }
46417 +
46418 +    ep_sys_fini (&epsys);
46419 +
46420 +    ep_procfs_fini ();
46421 +}
46422 +
46423 +/* Declare the module init and exit functions */
46424 +module_init(ep_init);
46425 +module_exit(ep_exit);
46426 +
46427 +#endif
46428 +
46429 +
46430 +/*
46431 + * Local variables:
46432 + * c-file-style: "stroustrup"
46433 + * End:
46434 + */
46435 diff -urN clean/drivers/net/qsnet/ep/conf_linux.h linux-2.6.9/drivers/net/qsnet/ep/conf_linux.h
46436 --- clean/drivers/net/qsnet/ep/conf_linux.h     1969-12-31 19:00:00.000000000 -0500
46437 +++ linux-2.6.9/drivers/net/qsnet/ep/conf_linux.h       2003-10-02 10:16:07.000000000 -0400
46438 @@ -0,0 +1,29 @@
46439 +/*
46440 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46441 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46442 + *
46443 + *    For licensing information please see the supplied COPYING file
46444 + *
46445 + */
46446 +
46447 +#ident "@(#)$Id: conf_linux.h,v 1.6 2003/10/02 14:16:07 mike Exp $"
46448 +/*      $Source: /cvs/master/quadrics/epmod/conf_linux.h,v $*/
46449 +
46450 +#ifndef __ELAN_CONF_LINUX_H
46451 +#define __ELAN_CONF_LINUX_H
46452 +
46453 +extern void ep_procfs_init(void);
46454 +extern void ep_procfs_fini(void);
46455 +extern void ep_procfs_rail_init(EP_RAIL *rail);
46456 +extern void ep_procfs_rail_fini(EP_RAIL *rail);
46457 +
46458 +extern void ep_procfs_svc_indicator_create(int svc_indicator, char *name);
46459 +extern void ep_procfs_svc_indicator_remove(int svc_indicator, char *name);
46460 +
46461 +#endif /* __ELAN_CONF_LINUX_H */
46462 +
46463 +/*
46464 + * Local variables:
46465 + * c-file-style: "stroustrup"
46466 + * End:
46467 + */
46468 diff -urN clean/drivers/net/qsnet/ep/debug.c linux-2.6.9/drivers/net/qsnet/ep/debug.c
46469 --- clean/drivers/net/qsnet/ep/debug.c  1969-12-31 19:00:00.000000000 -0500
46470 +++ linux-2.6.9/drivers/net/qsnet/ep/debug.c    2004-11-12 05:55:03.000000000 -0500
46471 @@ -0,0 +1,145 @@
46472 +/*
46473 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46474 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46475 + *
46476 + *    For licensing information please see the supplied COPYING file
46477 + *
46478 + */
46479 +
46480 +#ident "@(#)$Id: debug.c,v 1.30 2004/11/12 10:55:03 mike Exp $"
46481 +/*      $Source: /cvs/master/quadrics/epmod/debug.c,v $*/
46482 +
46483 +#include <qsnet/kernel.h>
46484 +
46485 +#include <elan/kcomm.h>
46486 +
46487 +#include "debug.h"
46488 +
46489 +DisplayInfo di_ep_debug = {ep_debugf, DBG_DEBUG};
46490 +
46491 +/*
46492 + * Generate a partial bitmap string, for the bitmap from offset "off" for "count" bits,
46493 + * to allow for displaying of subsets, treat entry 0 of the bitmap as having value "base".
46494 + */
46495 +int
46496 +ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int off, int nbits)
46497 +{
46498 +    char entry[12];                                            /* space for N-N */
46499 +    register int i, j, len;
46500 +    register int notstart = off;
46501 +    register int notfirst = 0;
46502 +    char *p = str;
46503 +    
46504 +    for (i = off; i < nbits; i++)
46505 +    {
46506 +       if (BT_TEST (bitmap, i))
46507 +       {
46508 +           for (j = i+1; j < nbits; j++)
46509 +               if (! BT_TEST (bitmap, j))
46510 +                   break;
46511 +
46512 +           if (j == (i+1))
46513 +               len = (int)sprintf (entry, "%d", base + i);
46514 +           else
46515 +               len = (int)sprintf (entry, "%d-%d", base + i, base + j-1);
46516 +           
46517 +           /* NOTE the 2 is for: one for comma, one for (possible) closing bracket */
46518 +           if ((p - str) <= (nbytes - (len+3)))
46519 +               p += (int)sprintf (p, "%c%s", notfirst++ ? ',' : notstart ? ' ' : '[', entry);
46520 +           else
46521 +           {
46522 +               /* no more space on this line, so move onto next */
46523 +               sprintf (p, "%c", notfirst++ ? ',' : '[');
46524 +
46525 +               return (i);
46526 +           }
46527 +
46528 +           i = j;
46529 +       }
46530 +    }
46531 +    
46532 +    if (!notfirst)
46533 +       sprintf (str, "<empty>");
46534 +    else
46535 +       strcpy (p, "]");
46536 +
46537 +    return (-1);
46538 +}
46539 +
46540 +void
46541 +ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits)
46542 +{
46543 +    /* Tru64 kernel printf() truncates lines at 128 bytes - the man pages for printf (9)
46544 +     * do not mention this restriction, nor that it does not terminate the line with a
46545 +     * carriage return, this  is pretty naff. 
46546 +     * Linux has a similar limit though is much more generous at 1024 - and you can just 
46547 +     * look at the code to see why this has been done.
46548 +     *
46549 +     * Our nodeset information could well be longer than 128 characters,  so we're going to 
46550 +     * have to split it into a number of lines. */
46551 +
46552 +#define LINEBUF_SIZE           128
46553 +    char *p, linebuf[LINEBUF_SIZE+1];                          /* +1 for null termination */
46554 +    int i, noff, off = 0;
46555 +
46556 +    do {
46557 +       if (off == 0)
46558 +           p = linebuf + (int)sprintf (linebuf, "%s: %s ", prefix, tag);
46559 +       else
46560 +       {
46561 +           p = linebuf + (int)sprintf (linebuf, "%s:  ", prefix);
46562 +           for (i = 0; tag[i] != '\0'; i++)
46563 +               *p++ = ' ';
46564 +       }
46565 +
46566 +       noff = ep_sprintf_bitmap (p, &linebuf[LINEBUF_SIZE-1]-p, bitmap, base, off, nbits);
46567 +
46568 +       printk ("%s\n", linebuf);
46569 +
46570 +    } while ((off = noff) != -1);
46571 +
46572 +#undef LINEBUF_SIZE
46573 +}
46574 +
46575 +void
46576 +ep_debugf (long mode, char *fmt, ...)
46577 +{
46578 +   va_list ap;
46579 +   char prefix[32];
46580 +   
46581 +   va_start (ap, fmt);
46582 +#if defined(LINUX)
46583 +   sprintf (prefix, "[%08d.%04d] ", (int) lbolt, current->pid);
46584 +#else
46585 +   sprintf (prefix, "[%08d.----] ", (int) lbolt);
46586 +#endif
46587 +   qsnet_vdebugf ((mode & epdebug_console ? QSNET_DEBUG_CONSOLE: 0) | QSNET_DEBUG_BUFFER, prefix, fmt, ap);
46588 +   va_end (ap);
46589 +}
46590 +
46591 +int
46592 +ep_assfail (EP_RAIL *rail, const char *ex, const char *func, const char *file, const int line)
46593 +{
46594 +    qsnet_debugf (QSNET_DEBUG_BUFFER, "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
46595 +    
46596 +    printk (KERN_EMERG "ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
46597 +    
46598 +    if (panicstr)
46599 +       return (0);
46600 +    
46601 +    if (assfail_mode & 1)                              /* return to BUG() */
46602 +       return 1;
46603 +    
46604 +    if (assfail_mode & 2)
46605 +       panic ("ep: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
46606 +    if (assfail_mode & 4)
46607 +       epdebug = 0;
46608 +    
46609 +    return 0;
46610 +}
46611 +
46612 +/*
46613 + * Local variables:
46614 + * c-file-style: "stroustrup"
46615 + * End:
46616 + */
46617 diff -urN clean/drivers/net/qsnet/ep/debug_elan4.c linux-2.6.9/drivers/net/qsnet/ep/debug_elan4.c
46618 --- clean/drivers/net/qsnet/ep/debug_elan4.c    1969-12-31 19:00:00.000000000 -0500
46619 +++ linux-2.6.9/drivers/net/qsnet/ep/debug_elan4.c      2004-05-19 06:21:04.000000000 -0400
46620 @@ -0,0 +1,59 @@
46621 +/*
46622 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46623 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46624 + *
46625 + *    For licensing information please see the supplied COPYING file
46626 + *
46627 + */
46628 +
46629 +#ident "@(#)$Id: debug_elan4.c,v 1.1 2004/05/19 10:21:04 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
46630 +/*      $Source: /cvs/master/quadrics/epmod/debug_elan4.c,v $*/
46631 +
46632 +#include <qsnet/kernel.h>
46633 +
46634 +#include <elan/kcomm.h>
46635 +
46636 +#include "kcomm_vp.h"
46637 +#include "kcomm_elan4.h"
46638 +#include "conf_linux.h"
46639 +#include "debug.h"
46640 +
46641 +static void
46642 +ep4_display_ecqs (EP4_RAIL *rail)
46643 +{
46644 +    struct list_head *el;
46645 +    unsigned long flags;
46646 +    int i;
46647 +
46648 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
46649 +    for (i = 0; i <EP4_NUM_ECQ; i++)
46650 +    {
46651 +       list_for_each (el, &rail->r_ecq_list[i]) {
46652 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
46653 +
46654 +           ep_debugf (DBG_DEBUG, "ECQ: type %d: avail %d cqnum %d\n", i, ecq->ecq_avail, elan4_cq2num (ecq->ecq_cq));
46655 +       }
46656 +    }
46657 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
46658 +}
46659 +
46660 +void
46661 +ep4_debug_rail (EP_RAIL *r)
46662 +{
46663 +    EP4_RAIL *rail = (EP4_RAIL *) r;
46664 +    EP_SYS   *sys  = rail->r_generic.System;
46665 +
46666 +    ep_debugf (DBG_DEBUG, "ep%d: is elan4 %d rev %c\n", rail->r_generic.Number,
46667 +              rail->r_generic.Devinfo.dev_instance, 'a' + rail->r_generic.Devinfo.dev_revision_id);
46668 +
46669 +    ep4_display_ecqs (rail);
46670 +
46671 +    ep_display_alloc (&sys->Allocator);
46672 +    ep_display_rmap (sys->Allocator.ResourceMap);
46673 +
46674 +    ep_display_alloc (&rail->r_generic.ElanAllocator);
46675 +    ep_display_alloc (&rail->r_generic.MainAllocator);
46676 +
46677 +    ep_display_rmap (rail->r_generic.ElanAllocator.ResourceMap);
46678 +}
46679 +
46680 diff -urN clean/drivers/net/qsnet/ep/debug.h linux-2.6.9/drivers/net/qsnet/ep/debug.h
46681 --- clean/drivers/net/qsnet/ep/debug.h  1969-12-31 19:00:00.000000000 -0500
46682 +++ linux-2.6.9/drivers/net/qsnet/ep/debug.h    2005-04-05 12:36:28.000000000 -0400
46683 @@ -0,0 +1,111 @@
46684 +/*
46685 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46686 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46687 + *
46688 + *    For licensing information please see the supplied COPYING file
46689 + *
46690 + */
46691 +
46692 +#ifndef _ELAN3_EPDEBUG_H
46693 +#define _ELAN3_EPDEBUG_H
46694 +
46695 +#ident "$Id: debug.h,v 1.21 2005/04/05 16:36:28 mike Exp $"
46696 +/*      $Source: /cvs/master/quadrics/epmod/debug.h,v $ */
46697 +
46698 +extern unsigned int epdebug;
46699 +extern unsigned int epdebug_console;
46700 +extern unsigned int epdebug_cmlevel;
46701 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
46702 +extern unsigned int epdebug_check_sum;
46703 +#endif
46704 +#define DBG_CONFIG             0x00000001                      /* Module configuration */
46705 +#define DBG_PROBE              0x00000002
46706 +#define DBG_ROUTETABLE         0x00000004
46707 +#define DBG_STATEMAP           0x00000008
46708 +
46709 +#define DBG_CM                 0x00000020
46710 +#define DBG_XMTR               0x00000040
46711 +#define DBG_RCVR               0x00000080
46712 +#define DBG_FORWARD            0x00000100
46713 +#define DBG_DISCON             0x00000200
46714 +#define DBG_EPTRAP             0x00000400
46715 +#define DBG_COMMAND            0x00000800
46716 +#define DBG_RETRY              0x00001000
46717 +#define DBG_DEBUG              0x00002000
46718 +#define DBG_NETWORK_ERROR      0x00004000
46719 +#define DBG_MSGSYS             0x00008000
46720 +#define DBG_MANAGER            0x00010000
46721 +#define DBG_KMAP               0x00020000
46722 +#define DBG_FAILOVER           0x00040000
46723 +#define DBG_MAPNMD             0x00080000
46724 +#define DBG_KMSG               0x00100000
46725 +#define DBG_SVC                 0x00200000
46726 +#define DBG_STABILISE          0x00400000
46727 +
46728 +#if defined(DEBUG_PRINTF)
46729 +
46730 +#  define EPRINTF0(m,fmt)                      ((epdebug&(m)) ? ep_debugf(m,fmt)                     : (void)0)
46731 +#  define EPRINTF1(m,fmt,a)                    ((epdebug&(m)) ? ep_debugf(m,fmt,a)                   : (void)0)
46732 +#  define EPRINTF2(m,fmt,a,b)                  ((epdebug&(m)) ? ep_debugf(m,fmt,a,b)                 : (void)0)
46733 +#  define EPRINTF3(m,fmt,a,b,c)                        ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c)               : (void)0)
46734 +#  define EPRINTF4(m,fmt,a,b,c,d)              ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d)             : (void)0)
46735 +#  define EPRINTF5(m,fmt,a,b,c,d,e)            ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e)           : (void)0)
46736 +#  define EPRINTF6(m,fmt,a,b,c,d,e,f)          ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f)         : (void)0)
46737 +#  define EPRINTF7(m,fmt,a,b,c,d,e,f,g)                ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g)       : (void)0)
46738 +#  define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h)      ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h)     : (void)0)
46739 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i)    ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i)   : (void)0)
46740 +#  define EPRINTF10(m,fmt,a,b,c,d,e,f,g,h,i,j) ((epdebug&(m)) ? ep_debugf(m,fmt,a,b,c,d,e,f,g,h,i,j) : (void)0)
46741 +
46742 +#  define CPRINTF0(lvl,fmt)                    (((lvl) <= epdebug_cmlevel) ? EPRINTF0(DBG_CM,fmt)                   : (void)0)
46743 +#  define CPRINTF1(lvl,fmt,a)                  (((lvl) <= epdebug_cmlevel) ? EPRINTF1(DBG_CM,fmt,a)                 : (void)0)
46744 +#  define CPRINTF2(lvl,fmt,a,b)                        (((lvl) <= epdebug_cmlevel) ? EPRINTF2(DBG_CM,fmt,a,b)               : (void)0)
46745 +#  define CPRINTF3(lvl,fmt,a,b,c)              (((lvl) <= epdebug_cmlevel) ? EPRINTF3(DBG_CM,fmt,a,b,c)             : (void)0)
46746 +#  define CPRINTF4(lvl,fmt,a,b,c,d)            (((lvl) <= epdebug_cmlevel) ? EPRINTF4(DBG_CM,fmt,a,b,c,d)           : (void)0)
46747 +#  define CPRINTF5(lvl,fmt,a,b,c,d,e)          (((lvl) <= epdebug_cmlevel) ? EPRINTF5(DBG_CM,fmt,a,b,c,d,e)         : (void)0)
46748 +#  define CPRINTF6(lvl,fmt,a,b,c,d,e,f)                (((lvl) <= epdebug_cmlevel) ? EPRINTF6(DBG_CM,fmt,a,b,c,d,e,f)       : (void)0)
46749 +#  define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g)      (((lvl) <= epdebug_cmlevel) ? EPRINTF7(DBG_CM,fmt,a,b,c,d,e,f,g)     : (void)0)
46750 +#  define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h)    (((lvl) <= epdebug_cmlevel) ? EPRINTF8(DBG_CM,fmt,a,b,c,d,e,f,g,h)   : (void)0)
46751 +#  define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i)  (((lvl) <= epdebug_cmlevel) ? EPRINTF9(DBG_CM,fmt,a,b,c,d,e,f,g,h,i) : (void)0)
46752 +
46753 +#if defined __GNUC__
46754 +extern void ep_debugf (long mode, char *fmt, ...) __attribute__ ((format (printf,2,3)));
46755 +#else
46756 +extern void ep_debugf (long mode, char *fmt, ...);
46757 +#endif
46758 +
46759 +#else
46760 +
46761 +#  define EPRINTF0(m,fmt)                      (0)
46762 +#  define EPRINTF1(m,fmt,a)                    (0)
46763 +#  define EPRINTF2(m,fmt,a,b)                  (0)
46764 +#  define EPRINTF3(m,fmt,a,b,c)                        (0)
46765 +#  define EPRINTF4(m,fmt,a,b,c,d)              (0)
46766 +#  define EPRINTF5(m,fmt,a,b,c,d,e)            (0)
46767 +#  define EPRINTF6(m,fmt,a,b,c,d,e,f)          (0)
46768 +#  define EPRINTF7(m,fmt,a,b,c,d,e,f,g)                (0)
46769 +#  define EPRINTF8(m,fmt,a,b,c,d,e,f,g,h)      (0)
46770 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i)    (0)
46771 +#  define EPRINTF9(m,fmt,a,b,c,d,e,f,g,h,i,j)  (0)
46772 +
46773 +#  define CPRINTF0(lvl,fmt)                    (0)
46774 +#  define CPRINTF1(lvl,fmt,a)                  (0)
46775 +#  define CPRINTF2(lvl,fmt,a,b)                        (0)
46776 +#  define CPRINTF3(lvl,fmt,a,b,c)              (0)
46777 +#  define CPRINTF4(lvl,fmt,a,b,c,d)            (0)
46778 +#  define CPRINTF5(lvl,fmt,a,b,c,d,e)          (0)
46779 +#  define CPRINTF6(lvl,fmt,a,b,c,d,e,f)                (0)
46780 +#  define CPRINTF7(lvl,fmt,a,b,c,d,e,f,g)      (0)
46781 +#  define CPRINTF8(lvl,fmt,a,b,c,d,e,f,g,h)    (0)
46782 +#  define CPRINTF9(lvl,fmt,a,b,c,d,e,f,g,h,i)  (0)
46783 +
46784 +#endif /* DEBUG */
46785 +
46786 +extern DisplayInfo di_ep_debug;
46787 +
46788 +/*
46789 + * Local variables:
46790 + * c-file-style: "stroustrup"
46791 + * End:
46792 + */
46793 +#endif /* _ELAN3_EPDEBUG_H */
46794 +
46795 diff -urN clean/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S linux-2.6.9/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S
46796 --- clean/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S       1969-12-31 19:00:00.000000000 -0500
46797 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_asm_elan4_thread.S 2004-04-25 07:25:43.000000000 -0400
46798 @@ -0,0 +1,133 @@
46799 +/*
46800 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46801 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46802 + *
46803 + *    For licensing information please see the supplied COPYING file
46804 + *
46805 + */
46806 +
46807 +#ident "@(#)$Id: epcomms_asm_elan4_thread.S,v 1.5 2004/04/25 11:25:43 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
46808 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_asm_elan4_thread.S,v $*/
46809 +
46810 +#include <elan4/events.h>
46811 +#include <elan4/commands.h>
46812 +
46813 +#include "assym_elan4.h"
46814 +
46815 +/* XXXXX - registers.h */
46816 +#define E4_MAIN_INT_SHIFT              14
46817 +
46818 +/*
46819 + * c_waitevent_interrupt (E4_uint64 *commandport, E4_Event *event, E4_uint64 count, E4_uint64 intcookie)
46820 + */
46821 +       .global c_waitevent_interrupt
46822 +c_waitevent_interrupt:
46823 +       add             %sp, -192, %sp
46824 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
46825 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
46826 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
46827 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
46828 +       nop                                             // BUG FIX: E4 RevA
46829 +       nop                                             // BUG FIX: E4 RevA
46830 +
46831 +       mov             %r7, %r18                       // (%r2) return pc
46832 +1:     call            2f
46833 +        mov            %sp, %r17                       // (%r1) SP
46834 +2:     add             %r7, (3f-1b), %r16              // (%r0) PC
46835 +       st32            %r16, [%sp]                     // event source block
46836 +       mov             MAKE_EXT_CLEAN_CMD, %r23
46837 +       st8             %r23, [%sp+56]                  // event source block
46838 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
46839 +       mov             %r23,%r23                       // BUG FIX: E4 RevA
46840 +       nop                                             // BUG FIX: E4 RevA
46841 +       nop                                             // BUG FIX: E4 RevA
46842 +       
46843 +       or              %r9, WAIT_EVENT_CMD, %r16                                               ! WAIT_EVENT_CMD | event
46844 +       sll8            %r10, 32, %r17
46845 +       or              %r17, E4_EVENT_TYPE_VALUE(E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), %r17  !   ev_CountAndType
46846 +       mov             %sp, %r18                                                               !   ev_Source
46847 +       mov             %r8, %r19                                                               !   ev_Dest
46848 +       sll8            %r11, E4_MAIN_INT_SHIFT, %r20
46849 +       or              %r20, INTERRUPT_CMD, %r20                                               ! INTERRUPT_CMD | (cookie << E4_MAIN_INT_SHIFT)
46850 +       mov             NOP_CMD, %r21
46851 +       mov             NOP_CMD, %r22
46852 +       mov             NOP_CMD, %r23
46853 +
46854 +       st64suspend     %r16, [%r8]
46855 +       
46856 +3:     ld64            [%sp + 64], %r16                // restore call preserved register
46857 +       ld64            [%sp + 128], %r24
46858 +       jmpl            %r2+8, %r0                      // and return
46859 +        add            %sp, 192, %sp
46860 +
46861 +
46862 +#define EP4_RCVR_PENDING_STALLED               1               /* indicates thread has stalled for no descriptor (rcvr_pending_head) */
46863 +
46864 +#define RXD_DEBUG(VAL,RXD,TMP) \
46865 +       mov     VAL, TMP; \
46866 +       st8     TMP, [RXD + EP4_RXD_DEBUG]
46867 +
46868 +       
46869 +       /*
46870 +        * %r2  - rcvr elan
46871 +        * %r3  - rxd elan
46872 +        */
46873 +       .global c_queue_rxd
46874 +c_queue_rxd:
46875 +       RXD_DEBUG(1, %r3, %r23)
46876 +       
46877 +       ld16    [%r2 + EP4_RCVR_PENDING_TAILP], %r18    /* r18 == tailp, r19 = head */
46878 +       add     %r3, EP4_RXD_NEXT, %r4
46879 +       
46880 +       st8     %r0, [%r3 + EP4_RXD_NEXT]               /* rxd->rxd_next = NULL */
46881 +       st8     %r4, [%r2 + EP4_RCVR_PENDING_TAILP]     /* tailp = &rxd->rxd_next */
46882 +       st8     %r3, [%r18]                             /* *tailp = rxd */
46883 +
46884 +       cmp     %r19, EP4_RCVR_PENDING_STALLED          /* thread stalled ? */
46885 +       beq     1f
46886 +        mov    %r18, %r16                              /* must have used %r16, %r19, %r23 */
46887 +       mov     %r3, %r23
46888 +
46889 +       RXD_DEBUG(2, %r3, %r23)
46890 +       
46891 +       st8suspend %r16, [%r3 + EP4_RXD_QUEUED]         /* no - mark as queued - all done */
46892 +
46893 +1:     st8     %r16, [%r3 + EP4_RXD_QUEUED]            /* mark as queued */
46894 +
46895 +       RXD_DEBUG(3, %r3, %r23)
46896 +
46897 +       mov     %r3, %r8                                /* return rxd from c_stall_thread */
46898 +       ba      .epcomms_resume_thread                  /* resume the thread */
46899 +        ld64   [%r2 + EP4_RCVR_THREAD_STALL], %r0
46900 +
46901 +       /*
46902 +        *  c_stall_thread (EP4_RCVR_ELAN *rcvrElan)
46903 +        */
46904 +       .global c_stall_thread
46905 +c_stall_thread:
46906 +       add             %sp, -192, %sp
46907 +       st64            %r16, [%sp + 64]                // preserve call preserved registers
46908 +       st64            %r24, [%sp + 128]               // - see CALL_USED_REGISTERS.
46909 +       mov             %r16,%r16                       // BUG FIX: E4 RevA
46910 +       mov             %r24,%r24                       // BUG FIX: E4 RevA
46911 +       nop                                             // BUG FIX: E4 RevA
46912 +       nop                                             // BUG FIX: E4 RevA
46913 +
46914 +       mov             EP4_RCVR_PENDING_STALLED, %r9   // Mark rcvr as stalled
46915 +       st8             %r9, [%r8 + EP4_RCVR_PENDING_HEAD]
46916 +
46917 +       // XXXX _ TBD should generate interrupt
46918 +
46919 +       mov             %r1, %r17                       // SP 
46920 +       mov             %r7, %r23                       // return pc
46921 +
46922 +       st64suspend     %r16, [%r8 + EP4_RCVR_THREAD_STALL]
46923 +       
46924 +.epcomms_resume_thread:
46925 +       /* %r8 == rxdElan */
46926 +       
46927 +       ld64            [%sp + 64], %r16                // restore call preserved register
46928 +       ld64            [%sp + 128], %r24
46929 +       jmpl            %r7+8, %r0                      // and return
46930 +        add            %sp, 192, %sp
46931 +
46932 diff -urN clean/drivers/net/qsnet/ep/epcomms.c linux-2.6.9/drivers/net/qsnet/ep/epcomms.c
46933 --- clean/drivers/net/qsnet/ep/epcomms.c        1969-12-31 19:00:00.000000000 -0500
46934 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms.c  2004-11-30 07:02:06.000000000 -0500
46935 @@ -0,0 +1,484 @@
46936 +/*
46937 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
46938 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
46939 + *
46940 + *    For licensing information please see the supplied COPYING file
46941 + *
46942 + */
46943 +
46944 +#ident "@(#)$Id: epcomms.c,v 1.77 2004/11/30 12:02:06 mike Exp $"
46945 +/*      $Source: /cvs/master/quadrics/epmod/epcomms.c,v $ */
46946 +
46947 +#include <qsnet/kernel.h>
46948 +#include <qsnet/kthread.h>
46949 +#include <qsnet/autoconf.h>
46950 +
46951 +#include <elan/kcomm.h>
46952 +#include <elan/epsvc.h>
46953 +#include <elan/epcomms.h>
46954 +#include "cm.h"
46955 +#include "debug.h"
46956 +
46957 +static void
46958 +ep_comms_thread (void *arg)
46959 +{
46960 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) arg;
46961 +    struct list_head *el;
46962 +
46963 +    kernel_thread_init ("ep_comms");
46964 +
46965 +    /* since ep_alloc_xmtr() has incremented the module use count,
46966 +     * we would be preventing the module from being unloaded, so
46967 +     * we decrement the use count since this thread must terminate
46968 +     * during unload of the module.
46969 +     */
46970 +    ep_mod_dec_usecount();
46971 +
46972 +    for (;;)
46973 +    {
46974 +       long nextRunTime = 0;
46975 +
46976 +       /* NOTE - subsys->Lock serializes us against flush/relocations
46977 +        *        caused by rail nodeset transitions.
46978 +        */
46979 +       kmutex_lock (&subsys->Lock);
46980 +       list_for_each (el, &subsys->Transmitters) {
46981 +           nextRunTime = ep_check_xmtr (list_entry (el, EP_XMTR, Link), nextRunTime);
46982 +       }
46983 +
46984 +       list_for_each (el, &subsys->Receivers) {
46985 +           nextRunTime = ep_check_rcvr (list_entry (el, EP_RCVR, Link), nextRunTime);
46986 +       }
46987 +       kmutex_unlock (&subsys->Lock);
46988 +
46989 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
46990 +       ep_csum_rxds (subsys);  
46991 +#endif
46992 +       nextRunTime = ep_forward_rxds (subsys, nextRunTime);
46993 +
46994 +       if (ep_kthread_sleep (&subsys->Thread, nextRunTime) < 0)
46995 +           break;
46996 +    }
46997 +
46998 +    ep_mod_inc_usecount();
46999 +
47000 +    ep_kthread_stopped (&subsys->Thread);
47001 +    kernel_thread_exit();
47002 +}
47003 +
47004 +int
47005 +ep_comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail)
47006 +{
47007 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) s;
47008 +    EP_COMMS_RAIL    *commsRail;
47009 +    struct list_head *el;
47010 +
47011 +    printk ("%s: vendorid=%x deviceid=%x\n", rail->Name, rail->Devinfo.dev_vendor_id, rail->Devinfo.dev_device_id);
47012 +
47013 +    switch (rail->Devinfo.dev_device_id)
47014 +    {
47015 +#if defined(CONFIG_ELAN3) || defined(CONFIG_ELAN3_MODULE)
47016 +    case PCI_DEVICE_ID_ELAN3:
47017 +       commsRail = ep3comms_add_rail (s, sys, rail);
47018 +       break;
47019 +#endif
47020 +#if defined(CONFIG_ELAN4) || defined(CONFIG_ELAN4_MODULE)
47021 +    case PCI_DEVICE_ID_ELAN4:
47022 +       commsRail = ep4comms_add_rail (s, sys, rail);
47023 +       break;
47024 +#endif
47025 +    default:
47026 +       return 0;
47027 +    }
47028 +
47029 +    if (commsRail == NULL)
47030 +       return 1;
47031 +
47032 +    commsRail->Rail   = rail;
47033 +    commsRail->Subsys = subsys;
47034 +
47035 +    kmutex_lock (&subsys->Lock);
47036 +    list_add_tail (&commsRail->Link, &subsys->Rails);
47037 +    
47038 +    list_for_each (el, &subsys->Receivers) {
47039 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47040 +
47041 +       EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail);
47042 +    }
47043 +       
47044 +    list_for_each (el, &subsys->Transmitters) {
47045 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
47046 +
47047 +       EP_RAIL_OP (commsRail, Xmtr.AddRail) (xmtr, commsRail);
47048 +    }
47049 +
47050 +    kmutex_unlock (&subsys->Lock);
47051 +
47052 +    return 0;
47053 +}
47054 +
47055 +void
47056 +ep_comms_del_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail)
47057 +{
47058 +    EP_COMMS_SUBSYS  *subsys    = (EP_COMMS_SUBSYS *) s;
47059 +    EP_COMMS_RAIL    *commsRail = NULL;
47060 +    struct list_head *el;
47061 +
47062 +    kmutex_lock (&subsys->Lock);
47063 +    /* find out rail entry and remove from system list */
47064 +    list_for_each (el, &subsys->Rails) {
47065 +       if ((commsRail = list_entry (el, EP_COMMS_RAIL, Link))->Rail == rail)
47066 +           break;
47067 +    }
47068 +
47069 +    list_del (&commsRail->Link);
47070 +    
47071 +    list_for_each (el, &subsys->Receivers) {
47072 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47073 +
47074 +       EP_RAIL_OP(commsRail, Rcvr.DelRail) (rcvr, commsRail);
47075 +    }
47076 +       
47077 +    list_for_each (el, &subsys->Transmitters) {
47078 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
47079 +
47080 +       EP_RAIL_OP(commsRail,Xmtr.DelRail) (xmtr, commsRail);
47081 +    }
47082 +
47083 +    kmutex_unlock (&subsys->Lock);
47084 +
47085 +    EP_RAIL_OP (commsRail, DelRail) (commsRail);
47086 +}
47087 +
47088 +void
47089 +ep_comms_fini (EP_SUBSYS *s, EP_SYS *sys)
47090 +{
47091 +    EP_COMMS_SUBSYS *subsys = (EP_COMMS_SUBSYS *) s;
47092 +
47093 +    ep_kthread_stop (&subsys->Thread);
47094 +    ep_kthread_destroy (&subsys->Thread);
47095 +
47096 +    if (subsys->ForwardXmtr)
47097 +       ep_free_xmtr (subsys->ForwardXmtr);
47098 +
47099 +    spin_lock_destroy (&subsys->ForwardDescLock);
47100 +
47101 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47102 +    spin_lock_destroy (&subsys->CheckSumDescLock);
47103 +#endif
47104 +
47105 +    kmutex_destroy (&subsys->Lock);
47106 +
47107 +    KMEM_FREE (subsys, sizeof (EP_COMMS_SUBSYS));
47108 +}
47109 +
47110 +int
47111 +ep_comms_init (EP_SYS *sys)
47112 +{
47113 +    EP_COMMS_SUBSYS *subsys;
47114 +
47115 +    KMEM_ZALLOC (subsys, EP_COMMS_SUBSYS *, sizeof (EP_COMMS_SUBSYS), 1);
47116 +
47117 +    if (subsys == NULL)
47118 +       return (ENOMEM);
47119 +
47120 +    INIT_LIST_HEAD (&subsys->Rails);
47121 +    INIT_LIST_HEAD (&subsys->Receivers);
47122 +    INIT_LIST_HEAD (&subsys->Transmitters);
47123 +    INIT_LIST_HEAD (&subsys->ForwardDescList);
47124 +
47125 +    kmutex_init (&subsys->Lock);
47126 +    spin_lock_init (&subsys->ForwardDescLock);
47127 +
47128 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47129 +    INIT_LIST_HEAD (&subsys->CheckSumDescList);
47130 +    spin_lock_init (&subsys->CheckSumDescLock);
47131 +#endif
47132 +
47133 +    subsys->Subsys.Sys        = sys;
47134 +    subsys->Subsys.Name              = "epcomms";
47135 +    subsys->Subsys.Destroy    = ep_comms_fini;
47136 +    subsys->Subsys.AddRail    = ep_comms_add_rail;
47137 +    subsys->Subsys.RemoveRail = ep_comms_del_rail;
47138 +
47139 +    ep_subsys_add (sys, &subsys->Subsys);
47140 +    ep_kthread_init (&subsys->Thread);
47141 +
47142 +    if ((subsys->ForwardXmtr = ep_alloc_xmtr (subsys->Subsys.Sys)) == NULL)
47143 +       goto failed;
47144 +
47145 +    if (kernel_thread_create (ep_comms_thread, subsys) == NULL)
47146 +       goto failed;
47147 +    ep_kthread_started (&subsys->Thread);
47148 +
47149 +    return (0);
47150 +
47151 + failed:
47152 +    ep_subsys_del (sys, &subsys->Subsys);
47153 +    ep_comms_fini (&subsys->Subsys, sys);
47154 +
47155 +    return (ENOMEM);
47156 +}
47157 +
47158 +void
47159 +ep_comms_display (EP_SYS *sys, char *how)
47160 +{
47161 +    EP_COMMS_SUBSYS  *subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME);
47162 +    struct list_head *el;
47163 +
47164 +    if (how == NULL || !strncmp (how, "rail", 4))
47165 +    {
47166 +       kmutex_lock (&subsys->Lock);
47167 +       list_for_each (el, &subsys->Rails) {
47168 +           EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47169 +
47170 +           EP_RAIL_OP(commsRail, DisplayRail) (commsRail);
47171 +       }
47172 +       kmutex_unlock (&subsys->Lock);
47173 +    }
47174 +           
47175 +    if (how == NULL || !strncmp (how, "xmtr", 4))
47176 +       list_for_each (el, &subsys->Transmitters)
47177 +           ep_display_xmtr (&di_ep_debug, list_entry (el, EP_XMTR, Link));
47178 +
47179 +    if (how == NULL || !strncmp (how, "rcvr", 4)) 
47180 +       list_for_each (el, &subsys->Receivers)
47181 +           ep_display_rcvr (&di_ep_debug, list_entry (el, EP_RCVR, Link), (how && how[4] == ',') ? 1 : 0);
47182 +}
47183 +
47184 +int
47185 +ep_svc_indicator_set (EP_SYS *epsys, int svc_indicator) 
47186 +{
47187 +    EP_COMMS_SUBSYS  *subsys;
47188 +    struct list_head *el;
47189 +
47190 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d \n",svc_indicator);
47191 +
47192 +    if (svc_indicator < 0 || svc_indicator > EP_SVC_NUM_INDICATORS)
47193 +       return (EP_EINVAL);
47194 +
47195 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
47196 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_set: ep_subsys_find failed\n");
47197 +       return (EP_EINVAL);
47198 +    }
47199 +
47200 +
47201 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
47202 +    list_for_each (el, &subsys->Rails) { 
47203 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47204 +
47205 +       cm_svc_indicator_set(commsRail->Rail, svc_indicator);
47206 +    }
47207 +    kmutex_unlock (&subsys->Lock);
47208 +
47209 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_set: %d success\n",svc_indicator);
47210 +    return (EP_SUCCESS);
47211 +}
47212 +
47213 +int
47214 +ep_svc_indicator_clear (EP_SYS *epsys, int svc_indicator) 
47215 +{
47216 +    EP_COMMS_SUBSYS  *subsys;
47217 +    struct list_head *el;
47218 +
47219 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d \n",svc_indicator);
47220 +
47221 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
47222 +       return (EP_EINVAL);
47223 +
47224 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
47225 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_clear: ep_subsys_find failed\n");
47226 +       return (EP_EINVAL);
47227 +    }
47228 +
47229 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
47230 +    list_for_each (el, &subsys->Rails) { 
47231 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47232 +
47233 +       cm_svc_indicator_clear(commsRail->Rail, svc_indicator);
47234 +    }
47235 +    kmutex_unlock (&subsys->Lock);
47236 +
47237 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_clear: %d success\n",svc_indicator);
47238 +    return (EP_SUCCESS);
47239 +}
47240 +
47241 +int 
47242 +ep_svc_indicator_is_set (EP_SYS *epsys, int svc_indicator, int nodeId) 
47243 +{
47244 +    EP_COMMS_SUBSYS  *subsys;
47245 +    struct list_head *el;
47246 +    int               set = 0;
47247 +
47248 +    EPRINTF2 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d \n", svc_indicator, nodeId);
47249 +
47250 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
47251 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_is_set: ep_subsys_find failed\n");
47252 +       return (0);
47253 +    }
47254 +
47255 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
47256 +    list_for_each (el, &subsys->Rails) { 
47257 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47258 +
47259 +       set |= cm_svc_indicator_is_set(commsRail->Rail, svc_indicator, nodeId);
47260 +    }
47261 +    kmutex_unlock (&subsys->Lock);
47262 +
47263 +    EPRINTF3 (DBG_SVC,"ep_svc_indicator_is_set: svc %d node %d returning %d\n", svc_indicator, nodeId, set);
47264 +    return set;
47265 +}
47266 +
47267 +int
47268 +ep_svc_indicator_bitmap (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) 
47269 +{
47270 +    EP_COMMS_SUBSYS  *subsys;
47271 +    struct list_head *el;
47272 +
47273 +    EPRINTF1 (DBG_SVC,"ep_svc_indicator_bitmap: svc %d\n", svc_indicator);
47274 +
47275 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
47276 +       return (-1);
47277 +
47278 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL) {
47279 +       EPRINTF0 (DBG_SVC,"ep_svc_indicator_bitmap: ep_subsys_find failed\n");
47280 +       return (-2);
47281 +    }
47282 +
47283 +    /* clear bitmap */
47284 +    bt_zero (bitmap, nnodes);
47285 +
47286 +    kmutex_lock (&subsys->Lock); /* walking rails list and setting info on Rail */
47287 +    list_for_each (el, &subsys->Rails) { 
47288 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47289 +
47290 +       /* this will or in each bit map */
47291 +       cm_svc_indicator_bitmap (commsRail->Rail, svc_indicator, bitmap, low, nnodes);
47292 +    }
47293 +    kmutex_unlock (&subsys->Lock);
47294 +
47295 +    return (0);
47296 +}
47297 +
47298 +int
47299 +ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes) 
47300 +{
47301 +    int i;
47302 +
47303 +    EPRINTF1 (DBG_SVC,"ep_xmtr_svc_indicator_bitmap: svc %d\n", svc_indicator);
47304 +
47305 +    if (svc_indicator < 0 || svc_indicator >= EP_SVC_NUM_INDICATORS)
47306 +       return (-1);
47307 +
47308 +    /* clear bitmap */
47309 +    bt_zero (bitmap, nnodes);
47310 +
47311 +    for (i = 0; i < EP_MAX_RAILS; i++)
47312 +    {
47313 +       if (xmtr->RailMask & (1 << i) )
47314 +       {
47315 +           /* this will or in each bit map */
47316 +           cm_svc_indicator_bitmap (xmtr->Rails[i]->CommsRail->Rail, svc_indicator, bitmap, low, nnodes);
47317 +       }
47318 +    }
47319 +
47320 +    return (0);
47321 +}
47322 +
47323 +EP_RAILMASK
47324 +ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId)
47325 +{
47326 +    EP_COMMS_SUBSYS  *subsys;
47327 +    struct list_head *el;
47328 +    EP_RAILMASK       rmask=0;
47329 +
47330 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL)
47331 +       return (rmask);
47332 +
47333 +    kmutex_lock (&subsys->Lock); /* walking rails list and reading info from Rail */
47334 +    list_for_each (el, &subsys->Rails) { 
47335 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
47336 +
47337 +       if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId))
47338 +            rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
47339 +    }
47340 +    kmutex_unlock (&subsys->Lock);
47341 +
47342 +    return (rmask);
47343 +}
47344 +
47345 +EP_RAILMASK
47346 +ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId)
47347 +{
47348 +    EP_RAILMASK    rmask=0;
47349 +    EP_COMMS_RAIL *commsRail;
47350 +    int            i;
47351 +
47352 +    for (i = 0; i < EP_MAX_RAILS; i++)
47353 +    {
47354 +       if (xmtr->RailMask & (1 << i) )
47355 +       {
47356 +           commsRail = xmtr->Rails[i]->CommsRail;
47357 +
47358 +           if ( cm_svc_indicator_is_set(commsRail->Rail, svc_indicator,nodeId))
47359 +               rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
47360 +       }
47361 +    }   
47362 +    
47363 +    EPRINTF3 (DBG_SVC, "ep_xmtr_svc_indicator_railmask: svc %d node %d mask 0x%x\n",  svc_indicator, nodeId, rmask);
47364 +
47365 +    return (rmask);
47366 +}
47367 +
47368 +EP_RAILMASK
47369 +ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service)
47370 +{
47371 +    EP_COMMS_SUBSYS  *subsys;
47372 +    EP_RAILMASK       rmask=0;
47373 +    struct list_head *el;
47374 +    
47375 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (epsys, "epcomms")) == NULL)
47376 +       return (rmask);
47377 +    
47378 +    kmutex_lock (&subsys->Lock);
47379 +    list_for_each (el, &subsys->Receivers) {
47380 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47381
47382 +       if (rcvr->Service == service)
47383 +           rmask |= rcvr->RailMask; 
47384 +    }
47385 +    kmutex_unlock(&subsys->Lock);
47386 +
47387 +    return (rmask);
47388 +}
47389 +
47390 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
47391 +uint32_t
47392 +ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags)
47393 +{
47394 +    EP_NMH   *nmh;
47395 +    int       i;
47396 +    uint16_t  check_data = 0;
47397 +    uint16_t  check_env  = 0;
47398 +
47399 +    for (i = 0; i < nFrags; i++) {
47400 +       /* find the nmh for this frag */
47401 +       nmh = ep_nmh_find (&sys->MappingTable, &nmd[i]);
47402 +
47403 +       ASSERT( nmh != NULL);
47404 +
47405 +       /* add the next frag to the check sum */
47406 +       check_data = nmh->nmh_ops->op_calc_check_sum (sys, nmh, &nmd[i], check_data);
47407 +    }
47408 +
47409 +    check_env = rolling_check_sum ((char *) env, offsetof(EP_ENVELOPE, CheckSum), 0);
47410 +
47411 +    return (EP_ENVELOPE_CHECK_SUM | ( (check_env & 0x7FFF) << 16) | (check_data & 0xFFFF));
47412 +}
47413 +#endif
47414 +
47415 +/*
47416 + * Local variables:
47417 + * c-file-style: "stroustrup"
47418 + * End:
47419 + */
47420 diff -urN clean/drivers/net/qsnet/ep/epcomms_elan3.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.c
47421 --- clean/drivers/net/qsnet/ep/epcomms_elan3.c  1969-12-31 19:00:00.000000000 -0500
47422 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.c    2004-08-03 07:34:34.000000000 -0400
47423 @@ -0,0 +1,191 @@
47424 +/*
47425 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47426 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47427 + *
47428 + *    For licensing information please see the supplied COPYING file
47429 + *
47430 + */
47431 +
47432 +#ident "@(#)$Id: epcomms_elan3.c,v 1.60 2004/08/03 11:34:34 david Exp $"
47433 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3.c,v $ */
47434 +
47435 +#include <qsnet/kernel.h>
47436 +
47437 +#include <elan/kcomm.h>
47438 +#include <elan/epsvc.h>
47439 +#include <elan/epcomms.h>
47440 +
47441 +#include "kcomm_elan3.h"
47442 +#include "epcomms_elan3.h"
47443 +
47444 +void
47445 +ep3comms_flush_callback (void *arg, statemap_t *map)
47446 +{
47447 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
47448 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
47449 +    struct list_head *el;
47450 +
47451 +    kmutex_lock (&subsys->Lock);
47452 +    list_for_each (el, &subsys->Transmitters) {
47453 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
47454 +       
47455 +       if (xmtr->Rails[commsRail->Rail->Number])
47456 +           ep3xmtr_flush_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
47457 +    }
47458 +
47459 +    list_for_each (el, &subsys->Receivers) {
47460 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47461 +       
47462 +       if (rcvr->Rails[commsRail->Rail->Number])
47463 +           ep3rcvr_flush_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
47464 +    }
47465 +    kmutex_unlock (&subsys->Lock);
47466 +}
47467 +
47468 +void
47469 +ep3comms_failover_callback (void *arg, statemap_t *map)
47470 +{
47471 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
47472 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
47473 +    struct list_head *el;
47474 +
47475 +    kmutex_lock (&subsys->Lock);
47476 +    list_for_each (el, &subsys->Transmitters) {
47477 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
47478 +       
47479 +       if (xmtr->Rails[commsRail->Rail->Number])
47480 +           ep3xmtr_failover_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
47481 +    }
47482 +
47483 +    list_for_each (el, &subsys->Receivers) {
47484 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47485 +       
47486 +       if (rcvr->Rails[commsRail->Rail->Number])
47487 +           ep3rcvr_failover_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
47488 +    }
47489 +    kmutex_unlock (&subsys->Lock);
47490 +}
47491 +
47492 +void
47493 +ep3comms_disconnect_callback (void *arg, statemap_t *map)
47494 +{
47495 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
47496 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
47497 +    struct list_head *el;
47498 +
47499 +    kmutex_lock (&subsys->Lock);
47500 +    list_for_each (el, &subsys->Transmitters) {
47501 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
47502 +       
47503 +       if (xmtr->Rails[commsRail->Rail->Number])
47504 +           ep3xmtr_disconnect_callback (xmtr, (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number]);
47505 +    }
47506 +
47507 +    list_for_each (el, &subsys->Receivers) {
47508 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
47509 +       
47510 +       if (rcvr->Rails[commsRail->Rail->Number])
47511 +           ep3rcvr_disconnect_callback (rcvr, (EP3_RCVR_RAIL *) rcvr->Rails[commsRail->Rail->Number]);
47512 +    }
47513 +    kmutex_unlock (&subsys->Lock);
47514 +}
47515 +
47516 +EP_COMMS_RAIL *
47517 +ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r)
47518 +{
47519 +    EP3_RAIL         *rail   = (EP3_RAIL *) r;
47520 +    ELAN3_DEV        *dev    = rail->Device;
47521 +    EP3_COMMS_RAIL   *commsRail;
47522 +    EP3_InputQueue    qdesc;
47523 +    int i;
47524 +
47525 +    KMEM_ZALLOC (commsRail, EP3_COMMS_RAIL *, sizeof (EP3_COMMS_RAIL), TRUE);
47526 +
47527 +    if (commsRail == NULL)
47528 +       return NULL;
47529 +    
47530 +    commsRail->Generic.Ops.DelRail          = ep3comms_del_rail;
47531 +    commsRail->Generic.Ops.DisplayRail      = ep3comms_display_rail;
47532 +    commsRail->Generic.Ops.Rcvr.AddRail     = ep3rcvr_add_rail;
47533 +    commsRail->Generic.Ops.Rcvr.DelRail     = ep3rcvr_del_rail;
47534 +    commsRail->Generic.Ops.Rcvr.Check       = ep3rcvr_check;
47535 +    commsRail->Generic.Ops.Rcvr.QueueRxd    = ep3rcvr_queue_rxd;
47536 +    commsRail->Generic.Ops.Rcvr.RpcPut      = ep3rcvr_rpc_put;
47537 +    commsRail->Generic.Ops.Rcvr.RpcGet      = ep3rcvr_rpc_get;
47538 +    commsRail->Generic.Ops.Rcvr.RpcComplete = ep3rcvr_rpc_complete;
47539 +
47540 +    commsRail->Generic.Ops.Rcvr.StealRxd    = ep3rcvr_steal_rxd;
47541 +
47542 +    commsRail->Generic.Ops.Rcvr.FillOutRailStats = ep3rcvr_fillout_rail_stats;
47543 +
47544 +    commsRail->Generic.Ops.Rcvr.DisplayRcvr = ep3rcvr_display_rcvr;
47545 +    commsRail->Generic.Ops.Rcvr.DisplayRxd  = ep3rcvr_display_rxd;
47546 +
47547 +    commsRail->Generic.Ops.Xmtr.AddRail     = ep3xmtr_add_rail;
47548 +    commsRail->Generic.Ops.Xmtr.DelRail     = ep3xmtr_del_rail;
47549 +    commsRail->Generic.Ops.Xmtr.Check       = ep3xmtr_check;
47550 +    commsRail->Generic.Ops.Xmtr.BindTxd     = ep3xmtr_bind_txd;
47551 +    commsRail->Generic.Ops.Xmtr.UnbindTxd   = ep3xmtr_unbind_txd;
47552 +    commsRail->Generic.Ops.Xmtr.PollTxd     = ep3xmtr_poll_txd;
47553 +    commsRail->Generic.Ops.Xmtr.CheckTxdState = ep3xmtr_check_txd_state;
47554 +
47555 +    commsRail->Generic.Ops.Xmtr.DisplayXmtr = ep3xmtr_display_xmtr;
47556 +    commsRail->Generic.Ops.Xmtr.DisplayTxd  = ep3xmtr_display_txd;
47557 +
47558 +    commsRail->Generic.Ops.Xmtr.FillOutRailStats = ep3xmtr_fillout_rail_stats;
47559 +
47560 +    /* Allocate the input queues at their fixed elan address */
47561 +    if (! (commsRail->QueueDescs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * sizeof (EP3_InputQueue), PAGESIZE), EP_PERM_ALL, 0)))
47562 +    {
47563 +       KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL));
47564 +       return NULL;
47565 +    }
47566 +
47567 +    qdesc.q_state          = E3_QUEUE_FULL;
47568 +    qdesc.q_base           = 0;
47569 +    qdesc.q_top            = 0;
47570 +    qdesc.q_fptr           = 0;
47571 +    qdesc.q_bptr           = 0;
47572 +    qdesc.q_size           = 0;
47573 +    qdesc.q_event.ev_Count = 0;
47574 +    qdesc.q_event.ev_Type  = 0;
47575 +
47576 +    /* Initialise all queue entries to be full */
47577 +    for (i = 0; i < EP_MSG_NSVC; i++)
47578 +       elan3_sdram_copyl_to_sdram (dev, &qdesc, commsRail->QueueDescs + (i * sizeof (EP3_InputQueue)), sizeof (EP3_InputQueue));
47579 +
47580 +    ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback,      commsRail);
47581 +    ep_register_callback (r, EP_CB_FLUSH_FLUSHING,  ep3comms_flush_callback,      commsRail);
47582 +    ep_register_callback (r, EP_CB_FAILOVER,        ep3comms_failover_callback,   commsRail);
47583 +    ep_register_callback (r, EP_CB_DISCONNECTING,   ep3comms_disconnect_callback, commsRail);
47584 +
47585 +    return (EP_COMMS_RAIL *) commsRail;
47586 +}
47587 +
47588 +void
47589 +ep3comms_del_rail (EP_COMMS_RAIL *r)
47590 +{
47591 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) r;
47592 +    EP_RAIL        *rail      = commsRail->Generic.Rail;
47593 +
47594 +    ep_remove_callback (rail, EP_CB_FLUSH_FILTERING, ep3comms_flush_callback,      commsRail);
47595 +    ep_remove_callback (rail, EP_CB_FLUSH_FLUSHING,  ep3comms_flush_callback,      commsRail);
47596 +    ep_remove_callback (rail, EP_CB_FAILOVER,        ep3comms_failover_callback,   commsRail);
47597 +    ep_remove_callback (rail, EP_CB_DISCONNECTING,   ep3comms_disconnect_callback, commsRail);
47598 +
47599 +    ep_free_memory_elan (rail, EP_EPCOMMS_QUEUE_BASE);
47600 +
47601 +    KMEM_FREE (commsRail, sizeof (EP3_COMMS_RAIL));
47602 +}
47603 +
47604 +void
47605 +ep3comms_display_rail (EP_COMMS_RAIL *r)
47606 +{
47607 +    
47608 +}
47609 +
47610 +/*
47611 + * Local variables:
47612 + * c-file-style: "stroustrup"
47613 + * End:
47614 + */
47615 diff -urN clean/drivers/net/qsnet/ep/epcomms_elan3.h linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.h
47616 --- clean/drivers/net/qsnet/ep/epcomms_elan3.h  1969-12-31 19:00:00.000000000 -0500
47617 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3.h    2004-11-12 05:55:03.000000000 -0500
47618 @@ -0,0 +1,330 @@
47619 +/*
47620 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47621 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47622 + *
47623 + *    For licensing information please see the supplied COPYING file
47624 + *
47625 + */
47626 +
47627 +#ifndef __EPCOMMS_ELAN3_H
47628 +#define __EPCOMMS_ELAN3_H
47629 +
47630 +#ident "@(#)$Id: epcomms_elan3.h,v 1.28 2004/11/12 10:55:03 mike Exp $"
47631 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3.h,v $ */
47632 +
47633 +#define EP3_DMAFAILCOUNT               3
47634 +
47635 +
47636 +/* Main/Elan spinlock */
47637 +typedef struct ep3_spinlock_elan 
47638 +{
47639 +    volatile E3_uint32 sl_lock;                /* main wants a lock */
47640 +    volatile E3_uint32 sl_seq;                 /* thread owns this word */
47641 +    /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */
47642 +    E3_uint64          sl_pad[14];             /* pad to 64-bytes */
47643 +} EP3_SPINLOCK_ELAN;
47644 +
47645 +/* Declare this as a main memory cache block for efficiency */
47646 +typedef struct ep3_spinlock_main {
47647 +    volatile E3_uint32 sl_seq;                 /* copy of seq number updated by Elan */
47648 +    volatile E3_uint32 sl_pad[15];             /* pad to 64-bytes */
47649 +} EP3_SPINLOCK_MAIN;
47650 +
47651 +#if defined (__ELAN3__)
47652 +
47653 +extern void ep3_spinblock (EP3_SPINLOCK_ELAN *, EP3_SPINLOCK_MAIN *);
47654 +
47655 +#define EP3_SPINENTER(SLE,SL) \
47656 +do {\
47657 +       (SLE)->sl_seq++; \
47658 +       if ((SLE)->sl_lock) \
47659 +               ep3_spinblock(SLE, SL);\
47660 +} while (0)
47661 +
47662 +#define EP3_SPINEXIT(SLE,SL) \
47663 +do {\
47664 +       (SL)->sl_seq = (SLE)->sl_seq;\
47665 +} while (0)
47666 +
47667 +#else
47668 +
47669 +#define EP3_SPINENTER(DEV,SLE,SL) do { \
47670 +    E3_uint32 seq; \
47671 +\
47672 +    mb();\
47673 +    elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);\
47674 +    mb();\
47675 +    seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\
47676 +    while (seq != (SL)->sl_seq)\
47677 +    {\
47678 +       while ((SL)->sl_seq == (seq - 1))\
47679 +       {\
47680 +           mb();\
47681 +\
47682 +           DELAY (1); \
47683 +       }\
47684 +       seq = elan3_sdram_readl (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_seq));\
47685 +    }\
47686 +} while (0)
47687 +
47688 +#define EP3_SPINEXIT(DEV,SLE,SL) do { \
47689 +       wmb(); \
47690 +       elan3_sdram_writel (DEV, (SLE) + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);\
47691 +       mmiob(); \
47692 +} while (0)
47693 +
47694 +#endif /* ! __ELAN3__ */
47695 +
47696 +/* per-rail elan memory portion receive descriptor */
47697 +typedef struct ep3_rxd_rail_elan
47698 +{
47699 +    E3_DMA             Dmas[EP_MAXFRAG+1];                     /* Dma's for fetching data/putting data & status blk */
47700 +    E3_Event           ChainEvent[EP_MAXFRAG];                 /* Events to chain dmas */
47701 +    E3_BlockCopyEvent  DataEvent;                              /* message received block event */
47702 +    E3_BlockCopyEvent  DoneEvent;                              /* RPC status block event */
47703 +    
47704 +    EP_NMD             Data;                                   /* Network mapping handle for receive data */
47705 +
47706 +    E3_Addr            RxdMain;                                /* pointer to main memory portion */
47707 +
47708 +    E3_Addr            Next;                                   /* linked list when on pending list (elan address) */
47709 +
47710 +    E3_uint64          MainAddr;                               /* kernel address of ep_rxd_main */
47711 +} EP3_RXD_RAIL_ELAN;
47712 +
47713 +#define EP3_RXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_RXD_RAIL_ELAN), E3_DMA_ALIGN)
47714 +
47715 +/* per-rail main memory portion of receive descriptor */
47716 +typedef struct ep3_rxd_rail_main
47717 +{
47718 +    E3_uint32          DataEvent;                              /* dest for done event */
47719 +    E3_uint32          DoneEvent;                              /* dest for done event */
47720 +} EP3_RXD_RAIL_MAIN;
47721 +
47722 +#define EP3_RXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_RXD_RAIL_MAIN), sizeof (E3_uint32))
47723 +
47724 +#if !defined(__ELAN3__)
47725 +/* Kernel memory portion of per-rail receive descriptor */
47726 +typedef struct ep3_rxd_rail
47727 +{
47728 +    EP_RXD_RAIL                Generic;                                /* generic rxd rail */
47729 +
47730 +    EP3_COOKIE         DataCookie;                             /* Event cookie */
47731 +    EP3_COOKIE         DoneCookie;                             /* Event cookie */
47732 +    EP3_COOKIE         ChainCookie[EP_MAXFRAG];                /* Event cookie */
47733 +
47734 +    sdramaddr_t                RxdElan;                                /* per-rail elan receive descriptor */
47735 +    E3_Addr            RxdElanAddr;                            /*   and elan address */
47736 +
47737 +    EP3_RXD_RAIL_MAIN  *RxdMain;                               /* per-rail main receive descriptor */
47738 +    E3_Addr            RxdMainAddr;                            /*   and elan address */
47739 +
47740 +    EP_BACKOFF         Backoff;                                /* dma backoff */
47741 +} EP3_RXD_RAIL;
47742 +
47743 +#define EP3_NUM_RXD_PER_BLOCK  16
47744 +
47745 +typedef struct ep3_rxd_rail_block
47746 +{
47747 +    struct list_head   Link;
47748 +    
47749 +    EP3_RXD_RAIL        Rxd[EP3_NUM_RXD_PER_BLOCK];
47750 +} EP3_RXD_RAIL_BLOCK;
47751 +
47752 +#endif /* ! __ELAN3__ */
47753 +
47754 +typedef struct ep3_rcvr_rail_elan                              /* Elan memory service structure */
47755 +{
47756 +    EP3_SPINLOCK_ELAN  ThreadLock;                             /* elan memory portion of spin lock */
47757 +    EP3_SPINLOCK_ELAN  PendingLock;                            /* spin lock for pending rx list */
47758 +
47759 +    E3_Addr           PendingDescs;                            /* list of pending receive descriptors */
47760 +    E3_uint32          ThreadShouldHalt;                        /* marks that the thread should halt */
47761 +
47762 +    E3_uint64         MainAddr;                                /* kernel address of ep_rcvr (for StallThreadForNoDescs)*/
47763 +} EP3_RCVR_RAIL_ELAN;
47764 +
47765 +typedef struct ep3_rcvr_rail_main                              /* Main memory service strucure */
47766 +{
47767 +    EP3_SPINLOCK_MAIN  ThreadLock;                             /* main memory portion of spin lock */
47768 +    EP3_SPINLOCK_MAIN  PendingLock;                            /* spinlock for pending rx list */
47769 +
47770 +    volatile unsigned   PendingDescsTailp;                     /* next pointer of last receive descriptor on pending list */
47771 +} EP3_RCVR_RAIL_MAIN;
47772 +
47773 +#if !defined(__ELAN3__)
47774 +
47775 +typedef struct ep3_rcvr_rail_stats
47776 +{
47777 +    unsigned long some_stat;
47778 +} EP3_RCVR_RAIL_STATS;
47779 +
47780 +typedef struct ep3_rcvr_rail
47781 +{
47782 +    EP_RCVR_RAIL       Generic;                                /* generic portion */
47783 +    
47784 +    EP3_RCVR_RAIL_MAIN *RcvrMain;
47785 +    E3_Addr            RcvrMainAddr;
47786 +    sdramaddr_t         RcvrElan;
47787 +    E3_Addr             RcvrElanAddr;
47788 +
47789 +    sdramaddr_t                InputQueueBase;                         /* base of receive queue */
47790 +    E3_Addr            InputQueueAddr;                         /* elan address of receive queue */
47791 +
47792 +    E3_Addr            ThreadStack;                            /* Thread processor stack */
47793 +    E3_Addr            ThreadWaiting;                          /* Elan thread is waiting as no receive descriptors pending (sp stored here ) */
47794 +    E3_Addr            ThreadHalted;                           /* Elan thread is waiting as it was requested to halt */
47795 +
47796 +    struct list_head   FreeDescList;                           /* freelist of per-rail receive descriptors */
47797 +    unsigned int       FreeDescCount;                          /*   and number on free list */
47798 +    unsigned int        TotalDescCount;                                /*   total number created */
47799 +    spinlock_t         FreeDescLock;                           /*   and lock for free list */
47800 +    struct list_head    DescBlockList;                         /* list of receive descriptor blocks */
47801 +
47802 +    unsigned int        FreeDescWaiting;                       /* waiting for descriptors to be freed */
47803 +    kcondvar_t         FreeDescSleep;                          /*   and sleep here */
47804 +
47805 +    unsigned int       CleanupWaiting;                         /* waiting for cleanup */
47806 +    kcondvar_t         CleanupSleep;                           /*   and sleep here */
47807 +
47808 +    EP3_RCVR_RAIL_STATS stats;                                  /* elan3 specific rcvr_rail stats */
47809 +} EP3_RCVR_RAIL;
47810 +
47811 +#endif /* ! __ELAN3__ */
47812 +
47813 +/* per-rail portion of transmit descriptor */
47814 +typedef struct ep3_txd_rail_elan
47815 +{
47816 +    EP_ENVELOPE               Envelope;                                /* message envelope */
47817 +    EP_PAYLOAD        Payload;                                 /* message payload */
47818 +
47819 +    E3_BlockCopyEvent EnveEvent;                               /* envelope event */
47820 +    E3_BlockCopyEvent DataEvent;                               /* data transfer event */
47821 +    E3_BlockCopyEvent DoneEvent;                               /* rpc done event */
47822 +} EP3_TXD_RAIL_ELAN;
47823 +
47824 +#define EP3_TXD_RAIL_ELAN_SIZE roundup (sizeof (EP3_TXD_RAIL_ELAN), E3_BLK_ALIGN)
47825 +
47826 +typedef struct ep3_txd_rail_main
47827 +{
47828 +    E3_uint32         EnveEvent;                               /* dest for envelope event */
47829 +    E3_uint32         DataEvent;                               /* dest for data transfer event */
47830 +    E3_uint32        DoneEvent;                                /* dest for rpc done event */
47831 +} EP3_TXD_RAIL_MAIN;
47832 +
47833 +#define EP3_TXD_RAIL_MAIN_SIZE roundup (sizeof(EP3_TXD_RAIL_MAIN), E3_BLK_ALIGN)
47834 +
47835 +#if !defined(__ELAN3__)
47836 +
47837 +typedef struct ep3_txd_rail
47838 +{
47839 +    EP_TXD_RAIL               Generic;                                 /* generic txd rail */
47840 +
47841 +    EP3_COOKIE        EnveCookie;                              /* Event cookies */
47842 +    EP3_COOKIE        DataCookie;
47843 +    EP3_COOKIE        DoneCookie;
47844 +
47845 +    sdramaddr_t               TxdElan;                                 /* Elan TX descriptor */
47846 +    E3_Addr           TxdElanAddr;                             /*  and elan address */
47847 +
47848 +    EP3_TXD_RAIL_MAIN *TxdMain;                                        /* Elan Main memory tx descriptor */
47849 +    E3_Addr           TxdMainAddr;                             /*  and elan address */
47850 +
47851 +    EP_BACKOFF        Backoff;                                 /* dma backoff */
47852 +} EP3_TXD_RAIL;
47853 +
47854 +
47855 +#define EP3_NUM_TXD_PER_BLOCK  16
47856 +
47857 +typedef struct ep3_txd_rail_block
47858 +{
47859 +    struct list_head   Link;
47860 +    
47861 +    EP3_TXD_RAIL       Txd[EP3_NUM_TXD_PER_BLOCK];
47862 +} EP3_TXD_RAIL_BLOCK;
47863 +
47864 +typedef struct ep3_xmtr_rail_stats
47865 +{
47866 +    unsigned long some_stat;
47867 +} EP3_XMTR_RAIL_STATS;
47868 +
47869 +typedef struct ep3_xmtr_rail
47870 +{
47871 +    EP_XMTR_RAIL       Generic;                                /* generic portion */
47872 +
47873 +    struct list_head   FreeDescList;                           /* freelist of per-rail receive descriptors */
47874 +    unsigned int       FreeDescCount;                          /*   and number on free list */
47875 +    unsigned int        TotalDescCount;
47876 +    spinlock_t         FreeDescLock;                           /*   and lock for free list */
47877 +    struct list_head    DescBlockList;                         /* list of receive descriptor blocks */
47878 +
47879 +    unsigned int        FreeDescWaiting;                       /* waiting for descriptors to be freed */
47880 +    kcondvar_t          FreeDescSleep;                         /*   and sleep here */
47881 +
47882 +    EP3_XMTR_RAIL_STATS stats;                                  /* elan3 specific xmtr rail stats */
47883 +} EP3_XMTR_RAIL;
47884 +
47885 +typedef struct ep3_comms_rail
47886 +{
47887 +    EP_COMMS_RAIL      Generic;                                /* generic comms rail */
47888 +    sdramaddr_t                QueueDescs;                             /* input queue descriptors */
47889 +} EP3_COMMS_RAIL;
47890 +
47891 +/* epcommxTx_elan3.c */
47892 +extern void           ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
47893 +extern void           ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
47894 +extern void           ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail);
47895 +
47896 +/* epcommsRx_elan3.c */
47897 +extern void          CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdMainAddr, E3_uint32 PAckVal);
47898 +extern void           StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp);
47899 +extern void           StallThreadForHalted  (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp);
47900 +
47901 +extern void           ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
47902 +extern void           ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
47903 +extern void           ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail);
47904 +
47905 +/* epcomms_elan3.c */
47906 +extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r);
47907 +extern void           ep3comms_del_rail (EP_COMMS_RAIL *r);
47908 +extern void           ep3comms_display_rail (EP_COMMS_RAIL *r);
47909 +
47910 +/* epcommsTx_elan3.c */
47911 +extern int            ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
47912 +extern void           ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase);
47913 +extern int            ep3xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
47914 +extern long           ep3xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
47915 +extern void           ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
47916 +extern void           ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
47917 +extern int            ep3xmtr_check_txd_state(EP_TXD *txd);
47918 +
47919 +extern void           ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
47920 +extern void           ep3xmtr_display_txd  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
47921 +
47922 +extern void           ep3xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
47923 +
47924 +/* epcommsRx_elan3.c */
47925 +extern int           ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
47926 +extern void          ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
47927 +extern void          ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
47928 +extern void          ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
47929 +
47930 +extern EP_RXD       *ep3rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail);
47931 +
47932 +extern long          ep3rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
47933 +extern void           ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
47934 +extern void           ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
47935 +
47936 +extern void           ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
47937 +extern void           ep3rcvr_display_rxd  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
47938 +
47939 +extern void           ep3rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
47940 +
47941 +#endif /* !defined(__ELAN3__) */
47942 +
47943 +/*
47944 + * Local variables:
47945 + * c-file-style: "stroustrup"
47946 + * End:
47947 + */
47948 +#endif /* __EPCOMMS_ELAN3_H */
47949 diff -urN clean/drivers/net/qsnet/ep/epcomms_elan3_thread.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3_thread.c
47950 --- clean/drivers/net/qsnet/ep/epcomms_elan3_thread.c   1969-12-31 19:00:00.000000000 -0500
47951 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan3_thread.c     2004-01-20 06:03:15.000000000 -0500
47952 @@ -0,0 +1,296 @@
47953 +/*
47954 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
47955 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
47956 + *
47957 + *    For licensing information please see the supplied COPYING file
47958 + *
47959 + */
47960 +
47961 +#ident "@(#)$Id: epcomms_elan3_thread.c,v 1.4 2004/01/20 11:03:15 david Exp $"
47962 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan3_thread.c,v $ */
47963 +
47964 +//#include <qsnet/types.h>
47965 +
47966 +typedef char               int8_t;
47967 +typedef unsigned char      uint8_t;
47968 +typedef short              int16_t;
47969 +typedef unsigned short     uint16_t;
47970 +typedef int                int32_t;
47971 +typedef unsigned int       uint32_t;
47972 +typedef long long          int64_t;
47973 +typedef unsigned long long uint64_t;
47974 +
47975 +#include <elan3/e3types.h>
47976 +#include <elan3/events.h>
47977 +#include <elan3/elanregs.h>
47978 +#include <elan3/intrinsics.h>
47979 +
47980 +#include <elan/nmh.h>
47981 +#include <elan/kcomm.h>
47982 +#include <elan/epcomms.h>
47983 +
47984 +#include "kcomm_vp.h"
47985 +#include "kcomm_elan3.h"
47986 +#include "epcomms_elan3.h"
47987 +
47988 +#ifndef offsetof
47989 +#define offsetof(s, m)                 (unsigned long)(&(((s *)0)->m))
47990 +#endif
47991 +
47992 +EP3_RAIL_ELAN *rail;
47993 +EP3_RCVR_RAIL_ELAN *r;
47994 +EP3_RCVR_RAIL_MAIN *rm;
47995 +
47996 +void
47997 +ep3comms_rcvr (EP3_RAIL_ELAN *rail, EP3_RCVR_RAIL_ELAN *rcvrElan, EP3_RCVR_RAIL_MAIN *rcvrMain, 
47998 +             EP3_InputQueue *q, unsigned int *cookies)
47999 +{
48000 +    int           count = 1;
48001 +    E3_Addr       nfptr = q->q_fptr + q->q_size;
48002 +    E3_uint32     tmp;
48003 +    int           i;
48004 +    E3_Addr       buffer;
48005 +    int                  len;
48006 +    E3_DMA       *dma;
48007 +    E3_Event     *event;
48008 +
48009 +    /* clear the queue state to allow envelopes to arrive */
48010 +    q->q_state = 0;
48011 +
48012 +    for (;;)
48013 +    {
48014 +       if (! rcvrElan->ThreadShouldHalt)
48015 +           c_waitevent ((E3_Event *) &q->q_event, count);                                              /* HALT POINT */
48016 +
48017 +       if (rcvrElan->ThreadShouldHalt && nfptr == q->q_bptr)
48018 +       {
48019 +           asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan));
48020 +           asm volatile ("ta %0"        : /* no outputs */ : "i" (EP3_UNIMP_THREAD_HALTED));           /* HALT POINT */
48021 +           continue;
48022 +       }
48023 +
48024 +       count = 0;
48025 +       do {
48026 +           /* Process the message at nfptr */
48027 +           EP_ENVELOPE      *env = (EP_ENVELOPE *) nfptr;
48028 +           EP3_RXD_RAIL_ELAN *rxd;
48029 +           int ack;
48030 +           
48031 +           EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);                                        /* HALT POINT */
48032 +           
48033 +           while ((rxd = (EP3_RXD_RAIL_ELAN *)rcvrElan->PendingDescs) == 0)
48034 +           {
48035 +               /* no receive descriptors, so trap to the kernel to wait
48036 +                * for receive descriptor to be queued, we pass the rcvr
48037 +                * in %g1, so that the trap handler can restart us. */
48038 +               EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
48039 +               asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rcvrElan));
48040 +               asm volatile ("ta %0"        : /* no outputs */ : "i" (EP3_UNIMP_TRAP_NO_DESCS));       /* HALT POINT */
48041 +               EP3_SPINENTER(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);                            /* HALT POINT */
48042 +           }
48043 +
48044 +           if (env->Version != EP_ENVELOPE_VERSION)
48045 +           {
48046 +               /* This envelope has been cancelled - so just consume it */
48047 +               EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
48048 +               goto consume_envelope;
48049 +           }
48050 +
48051 +           dma   = rxd->Dmas;
48052 +           event = rxd->ChainEvent;
48053 +
48054 +           if (EP_IS_MULTICAST(env->Attr))
48055 +           {
48056 +               dma->dma_type            = E3_DMA_TYPE (DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
48057 +               dma->dma_size            = BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t);
48058 +               dma->dma_source          = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, Bitmap);
48059 +               dma->dma_dest            = (E3_Addr) &((EP_RXD_MAIN *) rxd->RxdMain)->Bitmap;
48060 +               dma->dma_destEvent       = (E3_Addr) event;
48061 +               dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
48062 +               dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
48063 +               dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
48064 +               
48065 +               event->ev_Count = 1;
48066 +
48067 +               dma++; event++;
48068 +           }
48069 +
48070 +           if (env->nFrags == 0)
48071 +           {
48072 +               /* Generate a "get" DMA to accept the envelope and fire the rx handler */
48073 +               dma->dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
48074 +               dma->dma_size            = 0;
48075 +               dma->dma_destEvent       = (E3_Addr) &rxd->DataEvent;
48076 +               dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
48077 +               dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
48078 +               dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
48079 +               len = 0;
48080 +           }
48081 +           else
48082 +           {
48083 +               /* Generate the DMA chain to fetch the data */
48084 +               for (i = 0, buffer = rxd->Data.nmd_addr, len = 0; i < env->nFrags; i++, dma++, event++)
48085 +               {
48086 +                   dma->dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
48087 +                   dma->dma_size            = env->Frags[i].nmd_len;
48088 +                   dma->dma_source          = env->Frags[i].nmd_addr;
48089 +                   dma->dma_dest            = buffer;
48090 +                   dma->dma_destEvent       = (E3_Addr) event;
48091 +                   dma->dma_destCookieVProc = DMA_COOKIE_THREAD | DMA_COOKIE (cookies[env->NodeId], EP_VP_DATA (rail->NodeId));
48092 +                   dma->dma_srcEvent        = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DataEvent);
48093 +                   dma->dma_srcCookieVProc  = DMA_COOKIE_THREAD | DMA_REMOTE_COOKIE (cookies[env->NodeId], EP_VP_DATA (env->NodeId));
48094 +                   
48095 +                   event->ev_Count = 1;
48096 +                   
48097 +                   buffer += dma->dma_size;
48098 +                   len    += dma->dma_size;
48099 +               }
48100 +               
48101 +               /* Point the last dma at the done event */
48102 +               (--dma)->dma_destEvent = (E3_Addr) &rxd->DataEvent;
48103 +               
48104 +               if (rxd->Data.nmd_len < len)
48105 +               {
48106 +                   /* The receive descriptor was too small for the message */
48107 +                   /* complete the message anyway,  but don't transfer any */
48108 +                   /* data,  we set the length to EP_MSG_TOO_BIG */
48109 +                   for (i = 0, dma = rxd->Dmas; i < env->nFrags; i++, dma++)
48110 +                       dma->dma_size = 0;
48111 +                   
48112 +                   len = EP_MSG_TOO_BIG;
48113 +               }
48114 +           }
48115 +           
48116 +           /* Store the received message length in the rxdElan for CompleteEnvelope */
48117 +           rxd->Data.nmd_len = len;
48118 +
48119 +           /* Initialise %g1 with the  "rxd" so the trap handler can
48120 +            * complete the envelope processing if we trap while sending the
48121 +            * packet */
48122 +           asm volatile ("mov %0, %%g1" : /* no outputs */ : "r" (rxd));
48123 +
48124 +           /* Generate a packet to start the data transfer */
48125 +           c_open (EP_VP_DATA (env->NodeId));
48126 +           c_sendtrans2 (TR_THREADIDENTIFY, rxd->Dmas->dma_destCookieVProc, 0, 0);
48127 +           c_sendmem (TR_SENDACK | TR_REMOTEDMA, 0, rxd->Dmas); 
48128 +           ack = c_close();
48129 +           
48130 +           /*
48131 +            * If we trapped for an output timeout, then the trap handler will have
48132 +            * completed processing this envelope and cleared the spinlock, so we just
48133 +            * need to update the queue descriptor.
48134 +            */
48135 +           if (ack == EP3_PAckStolen)
48136 +               goto consume_envelope;
48137 +           
48138 +           if (ack != E3_PAckOk)
48139 +           {
48140 +               /* our packet got nacked, so trap into the kernel so that
48141 +                * it can complete processing of this envelope.
48142 +                */
48143 +               asm volatile ("ta %0" : /* no outputs */ : "i" (EP3_UNIMP_TRAP_PACKET_NACKED));         /* HALT POINT */
48144 +               goto consume_envelope;
48145 +           }
48146 +
48147 +           /* remove the RXD from the pending list */
48148 +           EP3_SPINENTER (&rcvrElan->PendingLock, &rcvrMain->PendingLock);
48149 +           if ((rcvrElan->PendingDescs = rxd->Next) == 0)
48150 +               rcvrMain->PendingDescsTailp = 0;
48151 +           EP3_SPINEXIT (&rcvrElan->PendingLock, &rcvrMain->PendingLock);
48152 +
48153 +           /* Copy the envelope information - as 5 64 byte chunks.
48154 +            * We force the parameters in g5, g6 so that they aren't
48155 +            * trashed by the loadblk32 into the locals/ins
48156 +            */
48157 +           if (EP_HAS_PAYLOAD(env->Attr))
48158 +           { 
48159 +               register void *src asm ("g5") = (void *) env;
48160 +               register void *dst asm ("g6") = (void *)  &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope;
48161 +
48162 +               asm volatile (
48163 +                   "and     %%sp,63,%%g7               ! Calculate stack alignment\n"
48164 +                   "add     %%g7,64,%%g7               ! Space to save the registers\n"
48165 +                   "sub     %%sp,%%g7,%%sp             ! align stack\n" 
48166 +                   "stblock64 %%l0,[%%sp]              ! save the locals and ins\n"
48167 +
48168 +                   "ldblock64 [%0 + 0],%%l0            ! load 64-byte block into locals/ins\n"         /* copy envelope */
48169 +                   "stblock64 %%l0,[%1 + 0]            ! store 64-byte block from local/ins\n"
48170 +                   "ldblock64 [%0 + 64],%%l0           ! load 64-byte block into locals/ins\n"
48171 +                   "stblock64 %%l0,[%1 + 64]           ! store 64-byte block from local/ins\n"
48172 +
48173 +                   "ldblock64 [%0 + 128],%%l0          ! load 64-byte block into locals/ins\n"         /* copy payload */
48174 +                   "stblock64 %%l0,[%1 + 128]          ! store 64-byte block from local/ins\n"
48175 +                   "ldblock64 [%0 + 192],%%l0          ! load 64-byte block into locals/ins\n"
48176 +                   "stblock64 %%l0,[%1 + 192]          ! store 64-byte block from local/ins\n"
48177 +
48178 +                   "ldblock64 [%%sp],%%l0              ! restore locals and ins\n"
48179 +                   "add     %%sp,%%g7,%%sp             ! restore stack pointer\n"
48180 +                   : /* outputs */
48181 +                   : /* inputs */ "r" (src), "r" (dst)
48182 +                   : /* clobbered */ "g5", "g6", "g7" );
48183 +           }
48184 +           else
48185 +           { 
48186 +               register void *src asm ("g5") = (void *) env;
48187 +               register void *dst asm ("g6") = (void *)  &((EP_RXD_MAIN *) rxd->RxdMain)->Envelope;
48188 +
48189 +               asm volatile (
48190 +                   "and     %%sp,63,%%g7               ! Calculate stack alignment\n"
48191 +                   "add     %%g7,64,%%g7               ! Space to save the registers\n"
48192 +                   "sub     %%sp,%%g7,%%sp             ! align stack\n" 
48193 +                   "stblock64 %%l0,[%%sp]              ! save the locals and ins\n"
48194 +
48195 +                   "ldblock64 [%0 + 0],%%l0            ! load 64-byte block into locals/ins\n"
48196 +                   "stblock64 %%l0,[%1 + 0]            ! store 64-byte block from local/ins\n"
48197 +                   "ldblock64 [%0 + 64],%%l0           ! load 64-byte block into locals/ins\n"
48198 +                   "stblock64 %%l0,[%1 + 64]           ! store 64-byte block from local/ins\n"
48199 +
48200 +                   "ldblock64 [%%sp],%%l0              ! restore locals and ins\n"
48201 +                   "add     %%sp,%%g7,%%sp             ! restore stack pointer\n"
48202 +                   : /* outputs */
48203 +                   : /* inputs */ "r" (src), "r" (dst)
48204 +                   : /* clobbered */ "g5", "g6", "g7" );
48205 +           }
48206 +
48207 +           /* Store the message length to indicate that I've finished */
48208 +           ((EP_RXD_MAIN *) rxd->RxdMain)->Len = rxd->Data.nmd_len;                                    /* PCI write  */
48209 +           
48210 +           EP3_SPINEXIT(&rcvrElan->ThreadLock, &rcvrMain->ThreadLock);
48211 +
48212 +       consume_envelope:
48213 +           /* Sample the queue full bit *BEFORE* moving the fptr.
48214 +            * Then only clear it if it was full before, otherwise,
48215 +            * as soon as the fptr is moved on the queue could fill 
48216 +            * up, and so clearing it could mark a full queue as 
48217 +            * empty.
48218 +            *
48219 +            * While the full bit is set, the queue is in a 'steady
48220 +            * state', so it is safe to set the q_state
48221 +            * 
48222 +            */
48223 +           if (((tmp = q->q_state) & E3_QUEUE_FULL) == 0)
48224 +               q->q_fptr = nfptr;                              /* update queue */
48225 +           else
48226 +           {
48227 +               q->q_fptr = nfptr;                              /* update queue */
48228 +               q->q_state = tmp &~E3_QUEUE_FULL;               /* and clear full flag */
48229 +           }
48230 +
48231 +           count++;                                            /* bump message count */
48232 +           if (nfptr == q->q_top)                              /* queue wrap */
48233 +               nfptr = q->q_base;
48234 +           else
48235 +               nfptr += q->q_size;
48236 +
48237 +           c_break_busywait();                                 /* be nice              HALT POINT */
48238 +
48239 +       } while (nfptr != q->q_bptr);                           /* loop until Fptr == Bptr */
48240 +    }
48241 +}
48242 +
48243 +
48244 +/*
48245 + * Local variables:
48246 + * c-file-style: "stroustrup"
48247 + * End:
48248 + */
48249 diff -urN clean/drivers/net/qsnet/ep/epcomms_elan4.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.c
48250 --- clean/drivers/net/qsnet/ep/epcomms_elan4.c  1969-12-31 19:00:00.000000000 -0500
48251 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.c    2005-08-09 05:57:14.000000000 -0400
48252 @@ -0,0 +1,393 @@
48253 +/*
48254 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
48255 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
48256 + *
48257 + *    For licensing information please see the supplied COPYING file
48258 + *
48259 + */
48260 +
48261 +#ident "@(#)$Id: epcomms_elan4.c,v 1.12.2.1 2005/08/09 09:57:14 mike Exp $"
48262 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4.c,v $ */
48263 +
48264 +#include <qsnet/kernel.h>
48265 +
48266 +#include <elan/kcomm.h>
48267 +#include <elan/epsvc.h>
48268 +#include <elan/epcomms.h>
48269 +
48270 +#include "debug.h"
48271 +#include "kcomm_elan4.h"
48272 +#include "epcomms_elan4.h"
48273 +
48274 +static void
48275 +ep4comms_flush_interrupt (EP4_RAIL *rail, void *arg)
48276 +{
48277 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) arg;
48278 +    unsigned long  flags;
48279 +
48280 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
48281 +    commsRail->r_flush_count = 0;
48282 +    kcondvar_wakeupall (&commsRail->r_flush_sleep, &commsRail->r_flush_lock);
48283 +    spin_unlock_irqrestore  (&commsRail->r_flush_lock, flags);
48284 +}
48285 +
48286 +void
48287 +ep4comms_flush_start (EP4_COMMS_RAIL *commsRail)
48288 +{
48289 +    kmutex_lock (&commsRail->r_flush_mutex);
48290 +}
48291 +
48292 +void
48293 +ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail)
48294 +{
48295 +    unsigned long flags;
48296 +
48297 +    ep4_wait_event_cmd (commsRail->r_flush_mcq, 
48298 +                       commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event),
48299 +                       E4_EVENT_INIT_VALUE (-32 * commsRail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),
48300 +                       commsRail->r_flush_ecq->ecq_addr, 
48301 +                       INTERRUPT_CMD | (commsRail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT));
48302 +
48303 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags); 
48304 +    while (commsRail->r_flush_count != 0) 
48305 +       if (kcondvar_timedwait (&commsRail->r_flush_sleep, &commsRail->r_flush_lock, &flags, (lbolt + (HZ*10))) == -1) 
48306 +           elan4_hardware_lock_check(((EP4_RAIL *)(commsRail->r_generic.Rail))->r_ctxt.ctxt_dev, "flush_wait");
48307 +    spin_unlock_irqrestore (&commsRail->r_flush_lock, flags);
48308 +    
48309 +    kmutex_unlock (&commsRail->r_flush_mutex);
48310 +}
48311 +
48312 +void
48313 +ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq)
48314 +{
48315 +    unsigned long flags;
48316 +
48317 +    spin_lock_irqsave (&commsRail->r_flush_lock, flags);
48318 +
48319 +    elan4_set_event_cmd (cq, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event));
48320 +    
48321 +    commsRail->r_flush_count++;
48322 +    
48323 +    spin_unlock_irqrestore (&commsRail->r_flush_lock, flags);
48324 +}
48325 +
48326 +void
48327 +ep4comms_flush_callback (void *arg, statemap_t *map)
48328 +{
48329 +    EP4_COMMS_RAIL   *commsRail = (EP4_COMMS_RAIL *) arg;
48330 +    EP_COMMS_SUBSYS  *subsys    = commsRail->r_generic.Subsys;
48331 +    EP4_RAIL        *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
48332 +    unsigned int     rnum       = rail->r_generic.Number;
48333 +    struct list_head *el;
48334 +
48335 +    /*
48336 +     * We stall the retry thread from CB_FLUSH_FILTERING until
48337 +     * we've finished CB_FLUSH_FLUSHING to ensure that sten 
48338 +     * packets can not be being retried while we flush them
48339 +     * through.
48340 +     */
48341 +    switch (rail->r_generic.CallbackStep)
48342 +    {
48343 +    case EP_CB_FLUSH_FILTERING:
48344 +       ep_kthread_stall (&rail->r_retry_thread);
48345 +
48346 +       ep4comms_flush_start (commsRail);
48347 +       break;
48348 +
48349 +    case EP_CB_FLUSH_FLUSHING:
48350 +       break;
48351 +    }
48352 +
48353 +    kmutex_lock (&subsys->Lock);
48354 +    list_for_each (el, &subsys->Transmitters) {
48355 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
48356 +       
48357 +       if (xmtr->Rails[rnum])
48358 +           ep4xmtr_flush_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
48359 +    }
48360 +
48361 +    list_for_each (el, &subsys->Receivers) {
48362 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
48363 +       
48364 +       if (rcvr->Rails[rnum])
48365 +           ep4rcvr_flush_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
48366 +    }
48367 +    kmutex_unlock (&subsys->Lock);
48368 +
48369 +    switch (rail->r_generic.CallbackStep)
48370 +    {
48371 +    case EP_CB_FLUSH_FILTERING:
48372 +       ep4comms_flush_wait (commsRail);
48373 +       break;
48374 +
48375 +    case EP_CB_FLUSH_FLUSHING:
48376 +       ep_kthread_resume (&rail->r_retry_thread);
48377 +       break;
48378 +    }
48379 +}
48380 +
48381 +void
48382 +ep4comms_failover_callback (void *arg, statemap_t *map)
48383 +{
48384 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
48385 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
48386 +    unsigned int     rnum       = commsRail->Rail->Number;
48387 +    struct list_head *el;
48388 +
48389 +    kmutex_lock (&subsys->Lock);
48390 +    list_for_each (el, &subsys->Transmitters) {
48391 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
48392 +       
48393 +       if (xmtr->Rails[rnum])
48394 +           ep4xmtr_failover_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
48395 +    }
48396 +
48397 +    list_for_each (el, &subsys->Receivers) {
48398 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
48399 +       
48400 +       if (rcvr->Rails[rnum])
48401 +           ep4rcvr_failover_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
48402 +    }
48403 +    kmutex_unlock (&subsys->Lock);
48404 +}
48405 +
48406 +void
48407 +ep4comms_disconnect_callback (void *arg, statemap_t *map)
48408 +{
48409 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
48410 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
48411 +    unsigned int     rnum       = commsRail->Rail->Number;
48412 +    struct list_head *el;
48413 +
48414 +    kmutex_lock (&subsys->Lock);
48415 +    list_for_each (el, &subsys->Transmitters) {
48416 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
48417 +       
48418 +       if (xmtr->Rails[rnum])
48419 +           ep4xmtr_disconnect_callback (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum]);
48420 +    }
48421 +
48422 +    list_for_each (el, &subsys->Receivers) {
48423 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
48424 +       
48425 +       if (rcvr->Rails[rnum])
48426 +           ep4rcvr_disconnect_callback (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum]);
48427 +    }
48428 +    kmutex_unlock (&subsys->Lock);
48429 +}
48430 +
48431 +void
48432 +ep4comms_neterr_callback (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
48433 +{
48434 +    EP_COMMS_RAIL    *commsRail = (EP_COMMS_RAIL *) arg;
48435 +    EP_COMMS_SUBSYS  *subsys    = commsRail->Subsys;
48436 +    unsigned int     rnum       = commsRail->Rail->Number;
48437 +    struct list_head *el;
48438 +    
48439 +    /* First - stall the retry thread, so that it will no longer restart 
48440 +     *         any sten packets from the retry lists */
48441 +    ep_kthread_stall (&rail->r_retry_thread);
48442 +
48443 +    ep4comms_flush_start ((EP4_COMMS_RAIL *) commsRail);
48444 +
48445 +    /* Second - flush through all command queues for xmtrs and rcvrs */
48446 +    kmutex_lock (&subsys->Lock);
48447 +    list_for_each (el, &subsys->Transmitters) {
48448 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
48449 +       
48450 +       if (xmtr->Rails[rnum])
48451 +           ep4xmtr_neterr_flush (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies);
48452 +    }
48453 +    
48454 +    list_for_each (el, &subsys->Receivers) {
48455 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
48456 +       
48457 +       if (rcvr->Rails[rnum])
48458 +           ep4rcvr_neterr_flush (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies);
48459 +    }
48460 +    kmutex_unlock (&subsys->Lock);
48461 +
48462 +    /* Third - wait for flush to complete */
48463 +    ep4comms_flush_wait ((EP4_COMMS_RAIL *) commsRail);
48464 +    
48465 +    /* Fourth - flush through all command queues */
48466 +    ep4_flush_ecqs (rail);
48467 +    
48468 +    /* Fifth - search all the retry lists for the network error cookies */
48469 +    kmutex_lock (&subsys->Lock);
48470 +    list_for_each (el, &subsys->Transmitters) {
48471 +       EP_XMTR *xmtr = list_entry (el, EP_XMTR, Link);
48472 +       
48473 +       if (xmtr->Rails[rnum])
48474 +           ep4xmtr_neterr_check (xmtr, (EP4_XMTR_RAIL *) xmtr->Rails[rnum], nodeId, cookies);
48475 +    }
48476 +
48477 +    list_for_each (el, &subsys->Receivers) {
48478 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
48479 +       
48480 +       if (rcvr->Rails[rnum])
48481 +           ep4rcvr_neterr_check (rcvr, (EP4_RCVR_RAIL *) rcvr->Rails[rnum], nodeId, cookies);
48482 +    }
48483 +    kmutex_unlock (&subsys->Lock);
48484 +
48485 +    ep_kthread_resume (&rail->r_retry_thread);
48486 +}
48487 +
48488 +
48489 +EP_COMMS_RAIL *
48490 +ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r)
48491 +{
48492 +    EP4_RAIL       *rail = (EP4_RAIL *)r;
48493 +    ELAN4_DEV      *dev  = rail->r_ctxt.ctxt_dev;
48494 +    EP4_COMMS_RAIL *commsRail;
48495 +    E4_InputQueue   qdesc;
48496 +    int i;
48497 +
48498 +    KMEM_ZALLOC (commsRail, EP4_COMMS_RAIL *,sizeof (EP4_COMMS_RAIL), 1);
48499 +
48500 +    if (commsRail == NULL)
48501 +       return NULL;
48502 +    
48503 +    commsRail->r_generic.Ops.DelRail          = ep4comms_del_rail;
48504 +    commsRail->r_generic.Ops.DisplayRail      = ep4comms_display_rail;
48505 +    commsRail->r_generic.Ops.Rcvr.AddRail     = ep4rcvr_add_rail;
48506 +    commsRail->r_generic.Ops.Rcvr.DelRail     = ep4rcvr_del_rail;
48507 +    commsRail->r_generic.Ops.Rcvr.Check       = ep4rcvr_check;
48508 +    commsRail->r_generic.Ops.Rcvr.QueueRxd    = ep4rcvr_queue_rxd;
48509 +    commsRail->r_generic.Ops.Rcvr.RpcPut      = ep4rcvr_rpc_put;
48510 +    commsRail->r_generic.Ops.Rcvr.RpcGet      = ep4rcvr_rpc_get;
48511 +    commsRail->r_generic.Ops.Rcvr.RpcComplete = ep4rcvr_rpc_complete;
48512 +
48513 +    commsRail->r_generic.Ops.Rcvr.StealRxd    = ep4rcvr_steal_rxd;
48514 +
48515 +    commsRail->r_generic.Ops.Rcvr.DisplayRcvr = ep4rcvr_display_rcvr;
48516 +    commsRail->r_generic.Ops.Rcvr.DisplayRxd  = ep4rcvr_display_rxd;
48517 +
48518 +    commsRail->r_generic.Ops.Rcvr.FillOutRailStats = ep4rcvr_fillout_rail_stats;
48519 +
48520 +    commsRail->r_generic.Ops.Xmtr.AddRail     = ep4xmtr_add_rail;
48521 +    commsRail->r_generic.Ops.Xmtr.DelRail     = ep4xmtr_del_rail;
48522 +    commsRail->r_generic.Ops.Xmtr.Check       = ep4xmtr_check;
48523 +    commsRail->r_generic.Ops.Xmtr.BindTxd     = ep4xmtr_bind_txd;
48524 +    commsRail->r_generic.Ops.Xmtr.UnbindTxd   = ep4xmtr_unbind_txd;
48525 +    commsRail->r_generic.Ops.Xmtr.PollTxd     = ep4xmtr_poll_txd;
48526 +    commsRail->r_generic.Ops.Xmtr.CheckTxdState = ep4xmtr_check_txd_state;
48527 +
48528 +    commsRail->r_generic.Ops.Xmtr.DisplayXmtr = ep4xmtr_display_xmtr;
48529 +    commsRail->r_generic.Ops.Xmtr.DisplayTxd  = ep4xmtr_display_txd;
48530 +
48531 +    commsRail->r_generic.Ops.Xmtr.FillOutRailStats = ep4xmtr_fillout_rail_stats;
48532 +
48533 +    /* Allocate command queue space for flushing (1 dword for interrupt + 4 dwords for waitevent) */
48534 +    if ((commsRail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == NULL)
48535 +    {
48536 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
48537 +       return NULL;
48538 +    }
48539 +
48540 +    if ((commsRail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == NULL)
48541 +    {
48542 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
48543 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
48544 +       return NULL;
48545 +    }
48546 +
48547 +    /* Allocate and initialise the elan memory part */
48548 +    if ((commsRail->r_elan = ep_alloc_elan (r, EP4_COMMS_RAIL_ELAN_SIZE, 0, &commsRail->r_elan_addr)) == (sdramaddr_t) 0)
48549 +    {
48550 +       ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
48551 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
48552 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
48553 +       return NULL;
48554 +    }
48555 +
48556 +    ep4_register_intcookie (rail, &commsRail->r_flush_intcookie, commsRail->r_elan_addr + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event),
48557 +                           ep4comms_flush_interrupt, commsRail);
48558 +
48559 +    elan4_sdram_writeq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType),
48560 +                       E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
48561 +
48562 +
48563 +    /* Allocate and initialise all the queue desriptors as "full" with no event */
48564 +    if ((commsRail->r_descs = ep_alloc_memory_elan (r, EP_EPCOMMS_QUEUE_BASE, roundup (EP_MSG_NSVC * EP_QUEUE_DESC_SIZE, SDRAM_PAGE_SIZE), EP_PERM_ALL, 0)) == (sdramaddr_t) 0)
48565 +    {
48566 +       ep_free_elan (r, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE);
48567 +       ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
48568 +       ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
48569 +       KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
48570 +       return NULL;
48571 +    }
48572 +
48573 +    qdesc.q_bptr    = 0;
48574 +    qdesc.q_fptr    = 8;
48575 +    qdesc.q_control = E4_InputQueueControl (qdesc.q_bptr,qdesc.q_fptr, 8);
48576 +    qdesc.q_event   = 0;
48577 +
48578 +    for (i = 0; i < EP_MSG_NSVC; i++)
48579 +       elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qdesc, commsRail->r_descs + (i * EP_QUEUE_DESC_SIZE),
48580 +                                   sizeof (E4_InputQueue));
48581 +
48582 +    kmutex_init (&commsRail->r_flush_mutex);
48583 +    spin_lock_init (&commsRail->r_flush_lock);
48584 +    kcondvar_init (&commsRail->r_flush_sleep);
48585 +
48586 +    ep_register_callback (r, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback,      commsRail);
48587 +    ep_register_callback (r, EP_CB_FLUSH_FLUSHING,  ep4comms_flush_callback,      commsRail);
48588 +    ep_register_callback (r, EP_CB_FAILOVER,        ep4comms_failover_callback,   commsRail);
48589 +    ep_register_callback (r, EP_CB_DISCONNECTING,   ep4comms_disconnect_callback, commsRail);
48590 +
48591 +    commsRail->r_neterr_ops.op_func = ep4comms_neterr_callback;
48592 +    commsRail->r_neterr_ops.op_arg  = commsRail;
48593 +    
48594 +    ep4_add_neterr_ops (rail, &commsRail->r_neterr_ops);
48595 +
48596 +    return (EP_COMMS_RAIL *) commsRail;
48597 +}
48598 +
48599 +void
48600 +ep4comms_del_rail (EP_COMMS_RAIL *r)
48601 +{
48602 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r;
48603 +    EP4_RAIL       *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
48604 +
48605 +    ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FILTERING, ep4comms_flush_callback,      commsRail);
48606 +    ep_remove_callback (&rail->r_generic, EP_CB_FLUSH_FLUSHING,  ep4comms_flush_callback,      commsRail);
48607 +    ep_remove_callback (&rail->r_generic, EP_CB_FAILOVER,        ep4comms_failover_callback,   commsRail);
48608 +    ep_remove_callback (&rail->r_generic, EP_CB_DISCONNECTING,   ep4comms_disconnect_callback, commsRail);
48609 +
48610 +    kcondvar_destroy (&commsRail->r_flush_sleep);
48611 +    spin_lock_destroy (&commsRail->r_flush_lock);
48612 +    kmutex_destroy (&commsRail->r_flush_mutex);
48613 +
48614 +    ep_free_memory_elan (&rail->r_generic, EP_EPCOMMS_QUEUE_BASE);
48615 +    ep_free_elan (&rail->r_generic, commsRail->r_elan_addr, EP4_COMMS_RAIL_ELAN_SIZE);
48616 +
48617 +    ep4_deregister_intcookie (rail, &commsRail->r_flush_intcookie);
48618 +
48619 +    ep4_put_ecq (rail, commsRail->r_flush_mcq, 4);
48620 +    ep4_put_ecq (rail, commsRail->r_flush_ecq, 1);
48621 +
48622 +    KMEM_FREE (commsRail, sizeof (EP4_COMMS_RAIL));
48623 +}
48624 +
48625 +void
48626 +ep4comms_display_rail (EP_COMMS_RAIL *r)
48627 +{
48628 +    EP4_COMMS_RAIL *commsRail = (EP4_COMMS_RAIL *) r;
48629 +    EP4_RAIL       *rail      = (EP4_RAIL *) commsRail->r_generic.Rail;
48630 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
48631 +    
48632 +    ep4_display_rail (rail);
48633 +
48634 +    ep_debugf (DBG_DEBUG, "   flush count=%d mcq=%p ecq=%p event %llx.%llx.%llx\n", 
48635 +              commsRail->r_flush_count, commsRail->r_flush_mcq, commsRail->r_flush_ecq,
48636 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_CountAndType)),
48637 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WritePtr)),
48638 +              elan4_sdram_readq (dev, commsRail->r_elan + offsetof (EP4_COMMS_RAIL_ELAN, r_flush_event.ev_WriteValue)));
48639 +}
48640 +
48641 +/*
48642 + * Local variables:
48643 + * c-file-style: "stroustrup"
48644 + * End:
48645 + */
48646 diff -urN clean/drivers/net/qsnet/ep/epcomms_elan4.h linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.h
48647 --- clean/drivers/net/qsnet/ep/epcomms_elan4.h  1969-12-31 19:00:00.000000000 -0500
48648 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4.h    2005-03-22 11:47:36.000000000 -0500
48649 @@ -0,0 +1,471 @@
48650 +/*
48651 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
48652 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
48653 + *
48654 + *    For licensing information please see the supplied COPYING file
48655 + *
48656 + */
48657 +
48658 +#ifndef __EPCOMMS_ELAN4_H
48659 +#define __EPCOMMS_ELAN4_H
48660 +
48661 +#ident "@(#)$Id: epcomms_elan4.h,v 1.15 2005/03/22 16:47:36 david Exp $"
48662 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4.h,v $ */
48663 +
48664 +
48665 +#include <elan4/types.h>
48666 +
48667 +/*
48668 + * Elan4 spinlocks are a pair of 64 bit words, one in elan sdram and one in main memory
48669 + * the sdram word holds the thread sequence number in the bottom 32 bits and the main
48670 + * lock in the top 32 bits.  The main memory word holds the sequence number only in
48671 + * it's bottom 32 bits */
48672 +
48673 +typedef volatile E4_uint64 EP4_SPINLOCK_MAIN;
48674 +typedef volatile E4_uint64 EP4_SPINLOCK_ELAN;
48675 +
48676 +#define EP4_SPINLOCK_SEQ       0
48677 +#define EP4_SPINLOCK_MLOCK     4
48678 +
48679 +#if defined(__elan4__)
48680 +
48681 +#define EP4_SPINENTER(CPORT,SLE,SLM) \
48682 +do { \
48683 +    register long tmp; \
48684 +\
48685 +    asm volatile ("ld4         [%1], %0\n" \
48686 +                 "inc          %0\n" \
48687 +                 "st4          %0, [%1]\n" \
48688 +                 "ld4          [%1 + 4], %0\n" \
48689 +                 "srl8,byte    %0, 4, %0\n" \
48690 +                 : /* outputs */ "=r" (tmp)  \
48691 +                 : /* inputs */ "r" (SLE), "r" (SLM)); \
48692 +\
48693 +    if (tmp) \
48694 +       ep4_spinblock (CPORT,SLE, SLM); \
48695 +} while (0)
48696 +
48697 +extern void ep4_spinblock(E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm);
48698 +
48699 +#define EP4_SPINEXIT(CPORT,SLE,SLM) \
48700 +do { \
48701 +    register long tmp; \
48702 +\
48703 +    asm volatile ("ld4         [%1], %0\n" \
48704 +                 "st4          %0, [%2]\n" \
48705 +                 : /* outputs */ "=r" (tmp) \
48706 +                 : /* inputs */ "r" (SLE), "r" (SLM)); \
48707 +} while (0)
48708 +
48709 +#else
48710 +
48711 +#define EP4_SPINENTER(DEV,SLE,SLM) \
48712 +do { \
48713 +    uint32_t seq; \
48714 +\
48715 +    mb(); \
48716 +    elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 1); \
48717 +    mb(); \
48718 +    while ((seq = elan4_sdram_readl (DEV, (SLE) + EP4_SPINLOCK_SEQ)) != *((uint32_t *) (SLM))) \
48719 +    { \
48720 +       while (*((uint32_t *) (SLM)) == (seq - 1)) \
48721 +       { \
48722 +           mb(); \
48723 +           DELAY(1); \
48724 +       } \
48725 +    } \
48726 +} while (0)
48727 +
48728 +#define EP4_SPINEXIT(DEV,SLE,SLM) \
48729 +do { \
48730 +    wmb(); \
48731 +    elan4_sdram_writel (DEV, (SLE) + EP4_SPINLOCK_MLOCK, 0); \
48732 +} while (0)
48733 +
48734 +#endif /* !defined(__elan4__) */
48735 +
48736 +#define EP4_TXD_STEN_RETRYCOUNT        16
48737 +#define EP4_RXD_STEN_RETRYCOUNT        1
48738 +#define EP4_DMA_RETRYCOUNT     16
48739 +
48740 +typedef struct ep4_intr_cmd
48741 +{
48742 +    E4_uint64          c_write_cmd;
48743 +    E4_uint64          c_write_value;
48744 +    E4_uint64          c_intr_cmd;
48745 +} EP4_INTR_CMD;
48746 +
48747 +#define        EP4_INTR_CMD_NDWORDS    (sizeof (EP4_INTR_CMD) / 8)
48748 +
48749 +typedef struct ep4_rxd_sten_cmd
48750 +{
48751 +    E4_uint64          c_open;
48752 +
48753 +    E4_uint64          c_trans;
48754 +    E4_uint64          c_cookie;
48755 +    E4_uint64          c_dma_typeSize;
48756 +    E4_uint64          c_dma_cookie;
48757 +    E4_uint64          c_dma_vproc;
48758 +    E4_uint64          c_dma_srcAddr;
48759 +    E4_uint64          c_dma_dstAddr;
48760 +    E4_uint64          c_dma_srcEvent;
48761 +    E4_uint64          c_dma_dstEvent;
48762 +
48763 +    E4_uint64          c_ok_guard;
48764 +    E4_uint64          c_ok_write_cmd;
48765 +    E4_uint64          c_ok_write_value;
48766 +    
48767 +    E4_uint64          c_fail_guard;
48768 +    E4_uint64          c_fail_setevent;
48769 +
48770 +    E4_uint64          c_nop_cmd;
48771 +} EP4_RXD_STEN_CMD;
48772 +
48773 +#define EP4_RXD_STEN_CMD_NDWORDS       (sizeof (EP4_RXD_STEN_CMD) / 8)
48774 +
48775 +typedef struct ep4_rxd_dma_cmd
48776 +{
48777 +    E4_uint64          c_dma_typeSize;
48778 +    E4_uint64          c_dma_cookie;
48779 +    E4_uint64          c_dma_vproc;
48780 +    E4_uint64          c_dma_srcAddr;
48781 +    E4_uint64          c_dma_dstAddr;
48782 +    E4_uint64          c_dma_srcEvent;
48783 +    E4_uint64          c_dma_dstEvent;
48784 +    E4_uint64          c_nop_cmd;
48785 +} EP4_RXD_DMA_CMD;
48786 +
48787 +#define EP4_RXD_DMA_CMD_NDWORDS                (sizeof (EP4_RXD_DMA_CMD) / 8)
48788 +#define EP4_RXD_START_CMD_NDWORDS      (sizeof (E4_ThreadRegs) / 8)
48789 +
48790 +typedef struct ep4_rxd_rail_elan
48791 +{
48792 +    EP4_RXD_STEN_CMD    rxd_sten[EP_MAXFRAG+1];
48793 +
48794 +    EP4_INTR_CMD       rxd_done_cmd;                           /* command stream issued by done event (aligned to 64 bytes) */
48795 +    E4_Addr            rxd_next;                               /* linked list when on pending list (pad to 32 bytes)*/
48796 +    E4_Event32         rxd_failed;                             /* event set when sten packet fails */
48797 +
48798 +    EP4_INTR_CMD        rxd_failed_cmd;                                /* command stream issued by fail event (aligned to 64 bytes) */
48799 +    E4_uint64          rxd_queued;                             /* rxd queuing thread has executed (pad to 32 bytes)*/
48800 +
48801 +    E4_Event32         rxd_start;                              /* event to set to fire off and event chain (used as chain[0]) */
48802 +    E4_Event32         rxd_chain[EP_MAXFRAG];                  /* chained events (aligned to 32 bytes) */
48803 +    E4_Event32         rxd_done;                               /* event to fire done command stream causing interrupt (used as chain[EP_MAXFRAG]) */
48804 +
48805 +    E4_Addr            rxd_rxd;                                /* elan address of EP4_RXD_MAIN */
48806 +    E4_Addr            rxd_main;                               /* elan address of EP4_RXD_RAIL_MAIN */
48807 +    E4_uint64          rxd_debug;                              /* thread debug value */
48808 +
48809 +    EP_NMD             rxd_buffer;                             /* Network mapping descriptor for receive data */
48810 +} EP4_RXD_RAIL_ELAN;
48811 +
48812 +#define EP4_RXD_RAIL_ELAN_SIZE roundup(sizeof (EP4_RXD_RAIL_ELAN), 64)
48813 +
48814 +typedef struct ep4_rxd_rail_main
48815 +{
48816 +    E4_uint64          rxd_sent[EP_MAXFRAG+1];                 /* sten packet sent */
48817 +    E4_uint64          rxd_failed;                             /* sten packet failed */
48818 +    E4_uint64          rxd_done;                               /* operation complete */
48819 +
48820 +    E4_Addr            rxd_scq;                                /* command port for scq */
48821 +} EP4_RXD_RAIL_MAIN;
48822 +
48823 +#define EP4_RXD_RAIL_MAIN_SIZE roundup(sizeof (EP4_RXD_RAIL_MAIN), 8)
48824 +
48825 +#if !defined(__elan4__)
48826 +typedef struct ep4_rxd_rail
48827 +{
48828 +    EP_RXD_RAIL                rxd_generic;
48829 +
48830 +    struct list_head    rxd_retry_link;
48831 +    unsigned long       rxd_retry_time;
48832 +
48833 +    EP4_INTCOOKIE      rxd_intcookie;
48834 +
48835 +    sdramaddr_t                rxd_elan;
48836 +    EP_ADDR            rxd_elan_addr;
48837 +    
48838 +    EP4_RXD_RAIL_MAIN  *rxd_main;
48839 +    EP_ADDR            rxd_main_addr;
48840 +
48841 +    EP4_ECQ           *rxd_ecq;                                /* cq with 128 bytes targetted by event */
48842 +    EP4_ECQ           *rxd_scq;                                /* cq with 8 bytes targetted by main/thread store */
48843 +} EP4_RXD_RAIL;
48844 +
48845 +#define EP4_NUM_RXD_PER_BLOCK  16
48846 +
48847 +typedef struct ep4_rxd_rail_block
48848 +{
48849 +    struct list_head   blk_link;
48850 +    EP4_RXD_RAIL       blk_rxds[EP4_NUM_RXD_PER_BLOCK];
48851 +} EP4_RXD_RAIL_BLOCK;
48852 +
48853 +#endif /* !defined(__elan4__) */
48854 +
48855 +typedef struct ep4_rcvr_rail_elan
48856 +{
48857 +    E4_uint64          rcvr_thread_stall[8];                   /* place for thread to stall */
48858 +    E4_Event32         rcvr_qevent;                            /* Input queue event */
48859 +    E4_Event32         rcvr_thread_halt;                       /* place for thread to halt */
48860 +
48861 +    volatile E4_Addr    rcvr_pending_tailp;                    /* list of pending rxd's (elan addr) */
48862 +    volatile E4_Addr   rcvr_pending_head;                      /*   -- this pair aligned to 16 bytes */
48863 +
48864 +    EP4_SPINLOCK_ELAN  rcvr_thread_lock;                       /* spinlock for thread processing loop */
48865 +
48866 +    E4_uint64          rcvr_stall_intcookie;                   /* interrupt cookie to use when requseted to halt */
48867 +
48868 +    E4_uint64          rcvr_qbase;                             /* base of input queue */
48869 +    E4_uint64          rcvr_qlast;                             /* last item in input queue */
48870 +
48871 +    E4_uint64          rcvr_debug;                             /* thread debug value */
48872 +} EP4_RCVR_RAIL_ELAN;
48873 +
48874 +typedef struct ep4_rcvr_rail_main
48875 +{
48876 +    EP4_SPINLOCK_MAIN   rcvr_thread_lock;                      /* spinlock for thread processing loop */
48877 +} EP4_RCVR_RAIL_MAIN;
48878 +
48879 +#if !defined(__elan4__)
48880 +
48881 +typedef struct ep4_rcvr_rail_stats
48882 +{
48883 +    unsigned long some_stat;
48884 +} EP4_RCVR_RAIL_STATS;
48885 +
48886 +typedef struct ep4_rcvr_rail
48887 +{
48888 +    EP_RCVR_RAIL       rcvr_generic;                           /* generic portion */
48889 +    
48890 +    sdramaddr_t                rcvr_elan;
48891 +    EP_ADDR            rcvr_elan_addr;
48892 +
48893 +    EP4_RCVR_RAIL_MAIN *rcvr_main;
48894 +    EP_ADDR            rcvr_main_addr;
48895 +
48896 +    sdramaddr_t                rcvr_slots;                             /* input queue slots */
48897 +    EP_ADDR            rcvr_slots_addr;                        /*   and elan address */
48898 +
48899 +    EP_ADDR            rcvr_stack;                             /* stack for thread */
48900 +
48901 +    EP4_ECQ           *rcvr_ecq;                               /* command queue space for thread STEN packets */
48902 +    EP4_ECQ           *rcvr_resched;                           /* command queue space to reschedule the thread */
48903 +
48904 +    struct list_head    rcvr_freelist;                         /* freelist of per-rail receive descriptors */
48905 +    unsigned int        rcvr_freecount;                                /*   and number on free list */
48906 +    unsigned int        rcvr_totalcount;                               /*   total number created */
48907 +    spinlock_t          rcvr_freelock;                         /*   and lock for free list */
48908 +    struct list_head    rcvr_blocklist;                                /* list of receive descriptor blocks */
48909 +
48910 +    unsigned int        rcvr_freewaiting;                      /* waiting for descriptors to be freed */
48911 +    kcondvar_t         rcvr_freesleep;                         /*   and sleep here */
48912 +
48913 +    EP4_INTCOOKIE      rcvr_stall_intcookie;                   /* interrupt cookie for thread halt */
48914 +    unsigned char      rcvr_thread_halted;                     /* thread has been halted */
48915 +    unsigned char       rcvr_cleanup_waiting;                  /* waiting for cleanup */
48916 +    kcondvar_t          rcvr_cleanup_sleep;                    /*   and sleep here */
48917 +
48918 +    EP4_RETRY_OPS      rcvr_retryops;
48919 +
48920 +    struct list_head    rcvr_retrylist;                                /* list of txd's to retry envelopes for */
48921 +    struct list_head    rcvr_polllist;                         /* list of txd's to poll for completion */
48922 +    spinlock_t          rcvr_retrylock;
48923 +    
48924 +    EP4_RCVR_RAIL_STATS rcvr_stats;                             /* elan4 specific rcvr_rail stats */
48925 +
48926 +} EP4_RCVR_RAIL;
48927 +
48928 +#endif /* !defined(__elan4__) */
48929 +
48930 +typedef struct ep4_txd_rail_elan
48931 +{
48932 +    EP4_INTR_CMD        txd_env_cmd;                           /* command stream for envelope event (64 byte aligned) */
48933 +    E4_uint64          txd_pad0;                               /*  pad to 32 bytes */
48934 +    E4_Event32         txd_env;                                /* event set when STEN packet fails */
48935 +
48936 +    EP4_INTR_CMD       txd_done_cmd;                           /* command stream for done event (64 byte aligned) */
48937 +    E4_uint64          txd_pad1;                               /*  pad to 32 bytes */
48938 +    E4_Event32         txd_done;                               /* event set when transmit complete */
48939 +
48940 +    E4_Event32         txd_data;                               /* event set when xmit completes (=> phase becomes passive) */
48941 +} EP4_TXD_RAIL_ELAN;
48942 +
48943 +#define EP4_TXD_RAIL_ELAN_SIZE         roundup(sizeof(EP4_TXD_RAIL_ELAN), 64)
48944 +
48945 +typedef struct ep4_txd_rail_main
48946 +{
48947 +    E4_uint64          txd_env;
48948 +    E4_uint64          txd_data;
48949 +    E4_uint64          txd_done;
48950 +} EP4_TXD_RAIL_MAIN;
48951 +
48952 +#define EP4_TXD_RAIL_MAIN_SIZE         roundup(sizeof(EP4_TXD_RAIL_MAIN), 8)
48953 +
48954 +#if !defined (__elan4__)
48955 +typedef struct ep4_txd_rail
48956 +{
48957 +    EP_TXD_RAIL                txd_generic;
48958 +
48959 +    struct list_head    txd_retry_link;
48960 +    unsigned long      txd_retry_time;
48961 +
48962 +    EP4_INTCOOKIE      txd_intcookie;
48963 +
48964 +    sdramaddr_t                txd_elan;
48965 +    EP_ADDR            txd_elan_addr;
48966 +    
48967 +    EP4_TXD_RAIL_MAIN  *txd_main;
48968 +    EP_ADDR            txd_main_addr;
48969 +
48970 +    EP4_ECQ           *txd_ecq;
48971 +
48972 +    E4_uint64          txd_cookie;
48973 +} EP4_TXD_RAIL;
48974 +
48975 +#define EP4_NUM_TXD_PER_BLOCK  21
48976 +
48977 +typedef struct ep4_txd_rail_block
48978 +{
48979 +    struct list_head   blk_link;
48980 +    EP4_TXD_RAIL       blk_txds[EP4_NUM_TXD_PER_BLOCK];
48981 +} EP4_TXD_RAIL_BLOCK;
48982 +
48983 +typedef struct ep4_xmtr_rail_main
48984 +{
48985 +    E4_int64           xmtr_flowcnt;
48986 +} EP4_XMTR_RAIL_MAIN;
48987 +
48988 +typedef struct ep4_xmtr_rail_stats
48989 +{
48990 +    unsigned long some_stat;
48991 +} EP4_XMTR_RAIL_STATS;
48992 +
48993 +#define EP4_TXD_LIST_POLL      0
48994 +#define EP4_TXD_LIST_STALLED   1
48995 +#define EP4_TXD_LIST_RETRY     2
48996 +#define EP4_TXD_NUM_LISTS      3
48997 +typedef struct ep4_xmtr_rail
48998 +{
48999 +    EP_XMTR_RAIL       xmtr_generic;
49000 +
49001 +    EP4_XMTR_RAIL_MAIN *xmtr_main;
49002 +    EP_ADDR            xmtr_main_addr;
49003 +
49004 +    struct list_head    xmtr_freelist;
49005 +    unsigned int        xmtr_freecount;
49006 +    unsigned int        xmtr_totalcount;
49007 +    spinlock_t          xmtr_freelock;
49008 +    struct list_head    xmtr_blocklist;
49009 +    unsigned int        xmtr_freewaiting;
49010 +    kcondvar_t         xmtr_freesleep;
49011 +
49012 +    EP4_INTCOOKIE      xmtr_intcookie;                         /* interrupt cookie for "polled" descriptors */
49013 +
49014 +    ELAN4_CQ           *xmtr_cq;
49015 +    E4_int64           xmtr_flowcnt;
49016 +
49017 +    EP4_RETRY_OPS      xmtr_retryops;
49018 +
49019 +    struct list_head    xmtr_retrylist[EP4_TXD_NUM_LISTS];     /* list of txd's to retry envelopes for */
49020 +    struct list_head    xmtr_polllist;                         /* list of txd's to poll for completion */
49021 +    spinlock_t          xmtr_retrylock;
49022 +
49023 +    EP4_XMTR_RAIL_STATS stats;                                  /* elan4 specific xmtr rail stats */
49024 +} EP4_XMTR_RAIL;
49025 +
49026 +#define EP4_XMTR_CQSIZE                CQ_Size64K                              /* size of command queue for xmtr */
49027 +#define EP4_XMTR_FLOWCNT       (CQ_Size(EP4_XMTR_CQSIZE) / 512)        /* # of STEN packets which can fit in */
49028 +
49029 +typedef struct ep4_comms_rail_elan
49030 +{
49031 +    E4_Event32         r_flush_event;
49032 +} EP4_COMMS_RAIL_ELAN;
49033 +
49034 +#define EP4_COMMS_RAIL_ELAN_SIZE       roundup(sizeof (EP4_COMMS_RAIL_ELAN), 32)
49035 +
49036 +typedef struct ep4_comms_rail
49037 +{
49038 +    EP_COMMS_RAIL      r_generic;                              /* generic comms rail */
49039 +    sdramaddr_t                r_descs;                                /* input queue descriptors */
49040 +
49041 +    sdramaddr_t                r_elan;                                 /* elan portion */
49042 +    EP_ADDR            r_elan_addr;
49043 +
49044 +    kmutex_t           r_flush_mutex;                          /* sequentialise flush usage */
49045 +    EP4_INTCOOKIE       r_flush_intcookie;                     /* interrupt cookie to generate */
49046 +
49047 +    kcondvar_t         r_flush_sleep;                          /* place to sleep waiting */
49048 +    spinlock_t         r_flush_lock;                           /*   and spinlock to use */
49049 +
49050 +    unsigned int       r_flush_count;                          /* # setevents issued */
49051 +    EP4_ECQ           *r_flush_ecq;                            /* command queue for interrupt */
49052 +    EP4_ECQ           *r_flush_mcq;                            /* command queeu to issue waitevent */
49053 +
49054 +    EP4_NETERR_OPS      r_neterr_ops;                          /* network error fixup ops */
49055 +} EP4_COMMS_RAIL;
49056 +
49057 +/* epcommsTx_elan4.c */
49058 +extern void           ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
49059 +extern void           ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
49060 +extern void           ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail);
49061 +
49062 +extern void          ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
49063 +extern void          ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
49064 +
49065 +/* epcommsRx_elan4.c */
49066 +extern void           ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
49067 +extern void           ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
49068 +extern void           ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail);
49069 +
49070 +extern void          ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
49071 +extern void          ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
49072 +
49073 +/* epcomms_elan4.c */
49074 +extern void           ep4comms_flush_start (EP4_COMMS_RAIL *commsRail);
49075 +extern void           ep4comms_flush_wait (EP4_COMMS_RAIL *commsRail);
49076 +extern void           ep4comms_flush_setevent (EP4_COMMS_RAIL *commsRail, ELAN4_CQ *cq);
49077 +
49078 +extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *r);
49079 +extern void           ep4comms_del_rail (EP_COMMS_RAIL *r);
49080 +extern void          ep4comms_display_rail (EP_COMMS_RAIL *r);
49081 +
49082 +/* epcommsTx_elan4.c */
49083 +extern int            ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
49084 +extern void           ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase);
49085 +extern int            ep4xmtr_poll_txd (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
49086 +extern long           ep4xmtr_check (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
49087 +extern void           ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
49088 +extern void           ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail);
49089 +extern int            ep4xmtr_check_txd_state(EP_TXD *txd);
49090 +
49091 +extern void           ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
49092 +extern void           ep4xmtr_display_txd  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
49093 +
49094 +extern void           ep4xmtr_fillout_rail_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
49095 +
49096 +/* epcommsRx_elan4.c */
49097 +extern int           ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
49098 +extern void          ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
49099 +extern void          ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
49100 +extern void          ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
49101 +
49102 +extern EP_RXD       *ep4rcvr_steal_rxd (EP_RCVR_RAIL *rcvrRail);
49103 +
49104 +extern long          ep4rcvr_check (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
49105 +extern void           ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
49106 +extern void           ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
49107 +
49108 +extern void           ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
49109 +extern void           ep4rcvr_display_rxd  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
49110 +
49111 +extern void           ep4rcvr_fillout_rail_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
49112 +
49113 +#endif /* !defined(__elan4__) */
49114 +
49115 +/*
49116 + * Local variables:
49117 + * c-file-style: "stroustrup"
49118 + * End:
49119 + */
49120 +#endif /* __EPCOMMS_ELAN4_H */
49121 diff -urN clean/drivers/net/qsnet/ep/epcomms_elan4_thread.c linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4_thread.c
49122 --- clean/drivers/net/qsnet/ep/epcomms_elan4_thread.c   1969-12-31 19:00:00.000000000 -0500
49123 +++ linux-2.6.9/drivers/net/qsnet/ep/epcomms_elan4_thread.c     2005-03-22 09:41:55.000000000 -0500
49124 @@ -0,0 +1,347 @@
49125 +/*
49126 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
49127 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
49128 + *
49129 + *    For licensing information please see the supplied COPYING file
49130 + *
49131 + */
49132 +
49133 +#ident "@(#)$Id: epcomms_elan4_thread.c,v 1.13 2005/03/22 14:41:55 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
49134 +/*      $Source: /cvs/master/quadrics/epmod/epcomms_elan4_thread.c,v $*/
49135 +
49136 +//#include <qsnet/types.h>
49137 +
49138 +typedef char           int8_t;
49139 +typedef unsigned char  uint8_t;
49140 +typedef short          int16_t;
49141 +typedef unsigned short uint16_t;
49142 +typedef int            int32_t;
49143 +typedef unsigned int   uint32_t;
49144 +typedef long           int64_t;
49145 +typedef unsigned long  uint64_t;
49146 +
49147 +#include <elan/nmh.h>
49148 +#include <elan/kcomm.h>
49149 +#include <elan/epcomms.h>
49150 +
49151 +#include <elan4/registers.h>
49152 +
49153 +#include "kcomm_vp.h"
49154 +#include "kcomm_elan4.h"
49155 +#include "epcomms_elan4.h"
49156 +
49157 +#include <elan4/trtype.h>
49158 +
49159 +/* assembler in epcomms_asm_elan4_thread.S */
49160 +extern void               c_waitevent_interrupt (E4_uint64 *cport, E4_Event32 *event, E4_uint64 count, E4_uint64 intcookie);
49161 +extern EP4_RXD_RAIL_ELAN *c_stall_thread (EP4_RCVR_RAIL_ELAN *rcvrRail);
49162 +
49163 +#define R32_to_R47             "%r32", "%r33", "%r34", "%r35", "%r36", "%r37", "%r38", "%r39", \
49164 +                               "%r40", "%r41", "%r42", "%r43", "%r44", "%r45", "%r46", "%r47"
49165 +#define R48_to_R63             "%r48", "%r49", "%r50", "%r51", "%r52", "%r53", "%r54", "%r55", \
49166 +                               "%r56", "%r57", "%r58", "%r59", "%r60", "%r61", "%r62", "%r63"
49167 +
49168 +/* proto types for code in asm_elan4_thread.S */
49169 +extern void c_waitevent (E4_uint64 *commandport, E4_Addr event, E4_uint64 count);
49170 +extern void c_reschedule(E4_uint64 *commandport);
49171 +
49172 +static inline unsigned long
49173 +c_load_u16(unsigned short *ptr)
49174 +{
49175 +    unsigned long value;
49176 +
49177 +    asm volatile ("ld2         [%1], %%r2\n"
49178 +                 "srl8,byte    %%r2, %1, %0\n"
49179 +                 "sll8         %0, 48, %0\n"
49180 +                 "srl8         %0, 48, %0\n"
49181 +                 : /* outputs */ "=r" (value) 
49182 +                 : /* inputs */ "r" (ptr)
49183 +                 : /* clobbered */ "%r2");
49184 +    return value;
49185 +}
49186 +
49187 +static inline unsigned long
49188 +c_load_u32(unsigned int *ptr)
49189 +{
49190 +    unsigned long value;
49191 +
49192 +    asm volatile ("ld4         [%1], %%r2\n"
49193 +                 "srl8,byte    %%r2, %1, %0\n"
49194 +                 "sll8         %0, 32, %0\n"
49195 +                 "srl8         %0, 32, %0\n"
49196 +                 : /* outputs */ "=r" (value) 
49197 +                 : /* inputs */ "r" (ptr)
49198 +                 : /* clobbered */ "%r2");
49199 +    return value;
49200 +}
49201 +
49202 +static inline void
49203 +c_store_u32(unsigned int *ptr, unsigned long value)
49204 +{
49205 +    asm volatile ("sll8,byte   %0, %1, %%r2\n"
49206 +                 "st4          %%r2, [%1]\n"
49207 +                 : /* no outputs */ 
49208 +                 : /* inputs */ "r" (value), "r" (ptr)
49209 +                 : /* clobbered */ "%r2");
49210 +}
49211 +
49212 +/* Reschedule the current Elan thread to the back of the run queue 
49213 + * if there is another one ready to run */
49214 +static inline void
49215 +c_yield (E4_uint64 *commandport)
49216 +{
49217 +    unsigned long rval;
49218 +
49219 +    asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */);
49220 +
49221 +    if (rval  & ICC_SIGNED_BIT)
49222 +       c_reschedule(commandport);
49223 +}
49224 +
49225 +/* Reschedule the current thread if we're in danger of exceeding the 
49226 + * thread instruction count */
49227 +static inline void
49228 +c_insn_check(E4_uint64 *commandport)
49229 +{
49230 +    unsigned long rval;
49231 +
49232 +    asm volatile ("breaktest %0" : /* outputs */ "=r" (rval) : /* inputs */);
49233 +
49234 +    if (rval & ICC_ZERO_BIT)
49235 +       c_reschedule(commandport);
49236 +}
49237 +
49238 +void
49239 +ep4_spinblock (E4_uint64 *cport, EP4_SPINLOCK_ELAN *sle, EP4_SPINLOCK_MAIN *slm)
49240 +{
49241 +    do {
49242 +       unsigned long val = *sle & 0xfffffffff;
49243 +
49244 +       *slm = val;                                     /* Release my lock */
49245 +       
49246 +       while (*sle >> 32)                              /* Wait until the main */
49247 +           c_yield(cport);                             /* releases the lock */
49248 +       
49249 +       c_store_u32 ((unsigned int *) sle, val + 1);    /* and try and relock */
49250 +    } while (*sle >> 32);
49251 +}
49252 +
49253 +#define RESCHED_AFTER_PKTS     ((CQ_Size(CQ_Size64K) / 128) - 1)
49254 +
49255 +void
49256 +ep4comms_rcvr (EP4_RAIL_ELAN *rail, EP4_RCVR_RAIL_ELAN *rcvrElan, EP4_RCVR_RAIL_MAIN *rcvrMain,
49257 +              E4_InputQueue *inputq, E4_uint64 *cport, E4_uint64 *resched)
49258 +{
49259 +    long count = 1;
49260 +    long fptr  = inputq->q_fptr;
49261 +
49262 +    for (;;)
49263 +    {
49264 +       c_waitevent (cport, inputq->q_event, -count << 5);
49265 +
49266 +       count = 0;
49267 +
49268 +       while (fptr != inputq->q_bptr)
49269 +       {
49270 +           EP_ENVELOPE        *env      = (EP_ENVELOPE *) fptr;
49271 +           unsigned long       nodeid   = c_load_u32 (&env->NodeId);
49272 +           unsigned long       opencmd  = OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(nodeid));
49273 +           unsigned long       vproc    = EP_VP_DATA(rail->r_nodeid);
49274 +           EP_ATTRIBUTE        attr     = c_load_u32 (&env->Attr);
49275 +           unsigned long       txdRail  = c_load_u32 (&env->TxdRail);
49276 +           unsigned long       nFrags   = c_load_u32 (&env->nFrags);
49277 +           unsigned long       srcevent = (EP_IS_RPC(attr) ? txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_data) :
49278 +                                           txdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done));
49279 +           E4_uint64           cookie;
49280 +           EP4_RXD_RAIL_ELAN  *rxdElan;
49281 +           EP4_RXD_RAIL_MAIN  *rxdMain;
49282 +           EP_RXD_MAIN        *rxd;
49283 +           EP4_RXD_STEN_CMD   *sten;
49284 +           E4_Event32         *event;
49285 +           unsigned long       first;
49286 +           unsigned long       buffer;
49287 +           unsigned long       len;
49288 +           unsigned long       i;
49289 +
49290 +           EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
49291 +
49292 +           if ((rxdElan = (EP4_RXD_RAIL_ELAN *) rcvrElan->rcvr_pending_head) == 0)
49293 +           {
49294 +               EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
49295 +
49296 +               rxdElan = c_stall_thread (rcvrElan);
49297 +
49298 +               EP4_SPINENTER(resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
49299 +           }
49300 +           
49301 +           if (c_load_u32 (&env->Version) != EP_ENVELOPE_VERSION)              /* envelope has been cancelled */
49302 +           {
49303 +               EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
49304 +               goto consume_envelope;
49305 +           }
49306 +
49307 +           rxd     = (EP_RXD_MAIN *) rxdElan->rxd_rxd;
49308 +           rxdMain = (EP4_RXD_RAIL_MAIN *) rxdElan->rxd_main;
49309 +           first   = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(attr) ? 1 : 0) + (nFrags == 0 ? 1 : nFrags));
49310 +           sten    = &rxdElan->rxd_sten[first];
49311 +           event   = &rxdElan->rxd_chain[first];
49312 +           cookie  = rail->r_cookies[nodeid];
49313 +
49314 +           if (EP_IS_MULTICAST(attr))                          /* need to fetch broadcast bitmap */
49315 +           {
49316 +               sten->c_open          = opencmd;
49317 +               sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
49318 +               sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
49319 +               sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(BT_BITOUL(EP_MAX_NODES) * sizeof (bitmap_t), DMA_DataTypeWord, 0, EP4_DMA_RETRYCOUNT);
49320 +               sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
49321 +               sten->c_dma_vproc     = vproc;
49322 +               sten->c_dma_srcAddr   = c_load_u32 (&env->TxdMain.nmd_addr) + offsetof(EP_TXD_MAIN, Bitmap);
49323 +               sten->c_dma_dstAddr   = (E4_Addr) &rxd->Bitmap;
49324 +               sten->c_dma_srcEvent  = srcevent;
49325 +               sten->c_dma_dstEvent  = (E4_Addr) event;
49326 +
49327 +               event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS);
49328 +
49329 +               cookie += (EP4_COOKIE_INC << 1);
49330 +
49331 +               sten++; event++;
49332 +           }
49333 +
49334 +           if (nFrags == 0)
49335 +           {
49336 +               /* Generate an empty "get" DMA to accept the envelope and fire the rx handler */
49337 +               sten->c_open          = opencmd;
49338 +               sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
49339 +               sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
49340 +               sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
49341 +               sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
49342 +               sten->c_dma_vproc     = vproc;
49343 +               sten->c_dma_srcEvent  = srcevent;
49344 +               sten->c_dma_dstEvent  = (E4_Addr) event;
49345 +
49346 +               event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS);
49347 +
49348 +               len = 0;
49349 +
49350 +               cookie += (EP4_COOKIE_INC << 1);
49351 +           }
49352 +           else
49353 +           {
49354 +               /* Generate the DMA chain to fetch the data */
49355 +               for (i = 0, buffer = c_load_u32 (&rxdElan->rxd_buffer.nmd_addr), len = 0; i < nFrags; i++)
49356 +               {
49357 +                   unsigned long fragLen = c_load_u32 (&env->Frags[i].nmd_len);
49358 +
49359 +                   sten->c_open          = opencmd;
49360 +                   sten->c_trans         = SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16);
49361 +                   sten->c_cookie        = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_STEN;
49362 +                   sten->c_dma_typeSize  = E4_DMA_TYPE_SIZE(fragLen, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
49363 +                   sten->c_dma_cookie    = cookie | EP4_COOKIE_THREAD | EP4_COOKIE_REMOTE | EP4_COOKIE_DMA | EP4_COOKIE_INC;
49364 +                   sten->c_dma_vproc     = vproc;
49365 +                   sten->c_dma_srcAddr   = c_load_u32 (&env->Frags[i].nmd_addr);
49366 +                   sten->c_dma_dstAddr   = buffer;
49367 +                   sten->c_dma_srcEvent  = srcevent;
49368 +                   sten->c_dma_dstEvent  = (E4_Addr) event;
49369 +                   
49370 +                   event->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS);
49371 +                   
49372 +                   buffer += fragLen;
49373 +                   len    += fragLen;
49374 +
49375 +                   cookie += (EP4_COOKIE_INC << 1);
49376 +
49377 +                   sten++; event++;
49378 +               }
49379 +               
49380 +               (--event)->ev_CountAndType = E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS);
49381 +
49382 +               if (c_load_u32 (&rxdElan->rxd_buffer.nmd_len) < len)
49383 +               {
49384 +                   /* The receive descriptor was too small for the message */
49385 +                   /* complete the message anyway,  but don't transfer any */
49386 +                   /* data,  we set the length to EP_MSG_TOO_BIG */
49387 +                   for (i = first, sten = &rxdElan->rxd_sten[first]; i <= EP_MAXFRAG; i++, sten++)
49388 +                       sten->c_dma_typeSize = E4_DMA_TYPE_SIZE(0, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
49389 +                   
49390 +                   len = EP_MSG_TOO_BIG;
49391 +               }
49392 +           }
49393 +           
49394 +           /* Stuff the first STEN packet into the command queue, there's always enough space, 
49395 +            * since we will insert a waitevent at least once for the queue size */
49396 +           asm volatile ("ld64         [%0], %%r32\n"
49397 +                         "ld64         [%0 + 64], %%r48\n"
49398 +                         "st64         %%r32, [%1]\n"
49399 +                         "st64         %%r48, [%1]\n"
49400 +                         : /* no outputs */
49401 +                         : /* inputs */ "r" (&rxdElan->rxd_sten[first]), "r" (cport)
49402 +                         : /* clobbered */ R32_to_R47, R48_to_R63);
49403 +
49404 +           /* remove the RXD from the pending list */
49405 +           if ((rcvrElan->rcvr_pending_head = rxdElan->rxd_next) == 0)
49406 +               rcvrElan->rcvr_pending_tailp = (E4_Addr)&rcvrElan->rcvr_pending_head;
49407 +
49408 +           /* mark as not queued */
49409 +           rxdElan->rxd_queued = 0;
49410 +
49411 +           /* copy down the envelope */
49412 +           if (EP_HAS_PAYLOAD(attr))
49413 +               asm volatile ("ld64     [%0],    %%r32\n"
49414 +                             "ld64     [%0+64], %%r48\n"
49415 +                             "st64     %%r32, [%1]\n"
49416 +                             "ld64     [%0+128], %%r32\n"
49417 +                             "st64     %%r48, [%1+64]\n"
49418 +                             "ld64     [%0+192], %%r48\n"
49419 +                             "st64     %%r32, [%1 + 128]\n"
49420 +                             "st64     %%r48, [%1 + 192]\n"
49421 +                             : /* no outputs */
49422 +                             : /* inputs */    "r" (env), "r" (&rxd->Envelope)
49423 +                             : /* clobbered */ R32_to_R47, R48_to_R63);
49424
49425 +           else
49426 +               asm volatile ("ld64     [%0],    %%r32\n"
49427 +                             "ld64     [%0+64], %%r48\n"
49428 +                             "st64     %%r32, [%1]\n"
49429 +                             "st64     %%r48, [%1+64]\n"
49430 +                             : /* no outputs */
49431 +                             : /* inputs */    "r" (env), "r" (&rxd->Envelope)
49432 +                             : /* clobbered */ R32_to_R47, R48_to_R63);
49433 +
49434 +           /* Store the message length to indicate that I've finished */
49435 +           c_store_u32 (&rxd->Len, len);
49436 +           
49437 +           /* Finally update the network error cookie */
49438 +           rail->r_cookies[nodeid] = cookie;
49439 +
49440 +           EP4_SPINEXIT (resched, &rcvrElan->rcvr_thread_lock, &rcvrMain->rcvr_thread_lock);
49441 +
49442 +       consume_envelope:
49443 +           if (fptr != rcvrElan->rcvr_qlast)
49444 +               fptr += EP_INPUTQ_SIZE;
49445 +           else
49446 +               fptr = rcvrElan->rcvr_qbase;
49447 +
49448 +           if (! rcvrElan->rcvr_stall_intcookie)
49449 +               inputq->q_fptr = fptr;
49450 +
49451 +           if (++count >= RESCHED_AFTER_PKTS)
49452 +               break;
49453 +
49454 +           c_insn_check (cport);
49455 +       }
49456 +       
49457 +       if (rcvrElan->rcvr_stall_intcookie)
49458 +       {
49459 +           c_waitevent_interrupt (cport, &rcvrElan->rcvr_thread_halt, -(1 << 5), rcvrElan->rcvr_stall_intcookie);
49460 +           inputq->q_fptr = fptr;
49461 +
49462 +           count++;                                            /* one extra as we were given an extra set to wake us up */
49463 +       }
49464 +    }
49465 +}
49466 +
49467 +/*
49468 + * Local variables:
49469 + * c-file-style: "stroustrup"
49470 + * End:
49471 + */
49472 diff -urN clean/drivers/net/qsnet/ep/epcommsFwd.c linux-2.6.9/drivers/net/qsnet/ep/epcommsFwd.c
49473 --- clean/drivers/net/qsnet/ep/epcommsFwd.c     1969-12-31 19:00:00.000000000 -0500
49474 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsFwd.c       2005-07-20 08:01:34.000000000 -0400
49475 @@ -0,0 +1,310 @@
49476 +/*
49477 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
49478 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
49479 + *
49480 + *    For licensing information please see the supplied COPYING file
49481 + *
49482 + */
49483 +
49484 +#ident "@(#)$Id: epcommsFwd.c,v 1.12.4.1 2005/07/20 12:01:34 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
49485 +/*      $Source: /cvs/master/quadrics/epmod/epcommsFwd.c,v $*/
49486 +
49487 +#include <qsnet/kernel.h>
49488 +
49489 +#include <elan/kcomm.h>
49490 +#include <elan/epsvc.h>
49491 +#include <elan/epcomms.h>
49492 +
49493 +#include "debug.h"
49494 +
49495 +unsigned int epcomms_forward_limit = 8;
49496 +
49497 +static void
49498 +GenerateTree (unsigned nodeId, unsigned lowId, unsigned  highId, bitmap_t *bitmap, 
49499 +             unsigned *parentp, unsigned *childrenp, int *nchildrenp)
49500 +{
49501 +    int i;
49502 +    int count;
49503 +    int branch;
49504 +    int nSub;
49505 +    int branchIndex;
49506 +    int parent;
49507 +    int nBranch;
49508 +    int rem;
49509 +    int self;
49510 +    int branchRatio;
49511 +    int node;
49512 +    int x, y, z;
49513 +
49514 +
49515 +#ifdef DEBUG_PRINTF
49516 +    {
49517 +#define OVERFLOW "...]"
49518 +#define LINESZ  128
49519 +       char space[LINESZ+1];
49520 +
49521 +       if (ep_sprintf_bitmap (space, LINESZ-strlen(OVERFLOW), bitmap, 0, 0, (highId - lowId)+1) != -1)
49522 +           strcat (space, OVERFLOW);
49523 +
49524 +       EPRINTF3 (DBG_FORWARD, "GenerateTree; elan node low=%d node high=%d bitmap=%s\n", lowId, highId, space);
49525 +#undef OVERFLOW
49526 +#undef LINESZ
49527 +    }
49528 +#endif
49529 +
49530 +    /* Count the number of nodes in the partition */
49531 +    /* and work out which one I am */
49532 +    for (count = 0, self = ELAN_INVALID_NODE, i = lowId; i <= highId; i++)
49533 +    {
49534 +       if (BT_TEST (bitmap, i-lowId))
49535 +       {
49536 +           if (i == nodeId)
49537 +               self = count;
49538 +           count++;
49539 +       }
49540 +    }
49541 +
49542 +    EPRINTF2 (DBG_FORWARD, "GenerateTree: count=%d self=%d\n", count, self);
49543 +
49544 +    if (count == 0 || self == ELAN_INVALID_NODE)
49545 +    {
49546 +       *parentp    = ELAN_INVALID_NODE;
49547 +       *nchildrenp = 0;
49548 +       return;
49549 +    }
49550 +
49551 +    /* search for position in tree */
49552 +    branchRatio = EP_TREE_ARITY;               /* branching ratio */
49553 +    branch      = 0;                           /* start with process 0 */
49554 +    nSub        = count;                       /* and whole tree */
49555 +    branchIndex = -1;                          /* my branch # in parent */
49556 +    parent      = -1;                          /* my parent's group index # */
49557 +    
49558 +    while (branch != self)                     /* descend process tree */
49559 +    {                                          /* until I find myself */
49560 +       parent = branch;
49561 +       branch++;                               /* parent + 1 = first born */
49562 +       nSub--;                                 /* set # descendents */
49563 +       
49564 +       rem  = nSub % branchRatio;
49565 +       nSub = nSub / branchRatio + 1;
49566 +       x = rem * nSub;
49567 +       y = self - branch;
49568 +       
49569 +       if (y < x)                              /* my first 'rem' branches have */
49570 +       {                                       /* 1 more descendent... */
49571 +           branchIndex = y / nSub;
49572 +           branch += branchIndex * nSub;
49573 +       }
49574 +       else                                    /* than the rest of my branches */
49575 +       {
49576 +           nSub--;
49577 +           z = (y - x) / nSub;
49578 +           branchIndex = rem + z;
49579 +           branch += x + z * nSub;
49580 +       }
49581 +    }
49582 +
49583 +    branch++;                                  /* my first born */
49584 +    nSub--;                                    /* total # of my descendents */
49585 +    /* leaves + their parents may have # children < branchRatio */
49586 +    nBranch = (nSub < branchRatio) ? nSub : branchRatio;       
49587 +
49588 +    EPRINTF2 (DBG_FORWARD, "GenerateTree: parent=%d nBranch=%d\n", parent, nBranch);
49589 +
49590 +    /* Now calculate the real elan id's of the parent and my children */
49591 +    if (parent == -1)
49592 +       *parentp = ELAN_INVALID_NODE;
49593 +    else
49594 +    {
49595 +       for (i = lowId, node = 0; i <= highId; i++)
49596 +       {
49597 +           if (BT_TEST(bitmap, i-lowId))
49598 +               if (node++ == parent)
49599 +                   break;
49600 +       }
49601 +       *parentp = i;
49602 +    }
49603 +
49604 +    for (i = lowId, branchIndex = 0, node = 0; branchIndex < nBranch && i <= highId; i++)
49605 +    {
49606 +       if (BT_TEST(bitmap, i-lowId))
49607 +       {
49608 +           if (node == branch)
49609 +           {
49610 +               branch = branch + nSub / branchRatio + ((branchIndex < (nSub % branchRatio)) ? 1 : 0);
49611 +
49612 +               childrenp[branchIndex++] = i;
49613 +           }
49614 +           node++;
49615 +       }
49616 +    }
49617 +
49618 +    *nchildrenp = branchIndex;
49619 +}
49620 +
49621 +static void
49622 +ForwardTxDone (EP_TXD *txd, void *arg, EP_STATUS status)
49623 +{
49624 +    EP_FWD_DESC     *desc   = (EP_FWD_DESC *) arg;
49625 +    EP_RXD          *rxd    = desc->Rxd;
49626 +    EP_COMMS_SUBSYS *subsys = rxd->Rcvr->Subsys;
49627 +    unsigned long    flags;
49628 +
49629 +    /* XXXX: if transmit fails, could step to next node in this subtree ? */
49630 +
49631 +    spin_lock_irqsave (&subsys->ForwardDescLock, flags);
49632 +
49633 +    if (--desc->NumChildren > 0)
49634 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
49635 +    else
49636 +    {
49637 +       rxd->Rcvr->ForwardRxdCount--;
49638 +
49639 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
49640 +
49641 +       KMEM_FREE (desc, sizeof (EP_FWD_DESC));
49642 +
49643 +       rxd->Handler (rxd);
49644 +    }
49645 +}
49646 +
49647 +long
49648 +ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime)
49649 +{
49650 +    unsigned long flags;
49651 +    int i, res;
49652 +
49653 +    spin_lock_irqsave (&subsys->ForwardDescLock, flags);
49654 +    while (! list_empty (&subsys->ForwardDescList)) 
49655 +    {
49656 +       EP_RXD      *rxd     = (EP_RXD *) list_entry (subsys->ForwardDescList.next, EP_RXD, Link);
49657 +       EP_RXD_MAIN *rxdMain = rxd->RxdMain;
49658 +       EP_ENVELOPE *env     = &rxdMain->Envelope;
49659 +       EP_FWD_DESC *desc;
49660 +
49661 +       EPRINTF2 (DBG_FORWARD, "ep: forwarding rxd %p to range %x\n", rxd, env->Range);
49662 +
49663 +       list_del (&rxd->Link);
49664 +
49665 +       rxd->Rcvr->ForwardRxdCount++;
49666 +
49667 +       spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
49668 +
49669 +       KMEM_ALLOC (desc, EP_FWD_DESC *, sizeof (EP_FWD_DESC), 1);
49670 +
49671 +       if (desc == NULL)
49672 +       {
49673 +           spin_lock_irqsave (&subsys->ForwardDescLock, flags);
49674 +           rxd->Rcvr->ForwardRxdCount--;
49675 +           spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
49676 +
49677 +           rxd->Handler (rxd);
49678 +       }
49679 +       else
49680 +       {
49681 +           /* compute the spanning tree for this message */
49682 +           unsigned int destLo = EP_RANGE_LOW (env->Range);
49683 +           unsigned int destHi = EP_RANGE_HIGH (env->Range);
49684 +           unsigned int parent;
49685 +
49686 +           GenerateTree (subsys->Subsys.Sys->Position.pos_nodeid, destLo, destHi, rxdMain->Bitmap, &parent, desc->Children, &desc->NumChildren);
49687 +           
49688 +           if (desc->NumChildren == 0 || (epcomms_forward_limit && (rxd->Rcvr->ForwardRxdCount >= epcomms_forward_limit)))
49689 +           {
49690 +               EPRINTF5 (DBG_FORWARD, "ep; don't forward rxd %p to /%d (%d children/ %d forwarding (%d))\n",
49691 +                         rxd, rxd->Rcvr->Service, desc->NumChildren, rxd->Rcvr->ForwardRxdCount, epcomms_forward_limit);
49692 +
49693 +               spin_lock_irqsave (&subsys->ForwardDescLock, flags);
49694 +               rxd->Rcvr->ForwardRxdCount--;
49695 +               spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
49696 +
49697 +               KMEM_FREE (desc, sizeof (EP_FWD_DESC));
49698 +               
49699 +               rxd->Handler (rxd);
49700 +           }
49701 +           else
49702 +           {
49703 +               ep_nmd_subset (&desc->Data, &rxd->Data, 0, ep_rxd_len (rxd));
49704 +               desc->Rxd = rxd;
49705 +
49706 +               /* NOTE - cannot access 'desc' after last call to multicast, since it could complete
49707 +                *        and free the desc before we access it again.  Hence the reverse loop. */
49708 +               for (i = desc->NumChildren-1; i >= 0; i--)
49709 +               {
49710 +                   ASSERT (desc->Children[i] < subsys->Subsys.Sys->Position.pos_nodes);
49711 +
49712 +                   EPRINTF3 (DBG_FORWARD, "ep: forwarding rxd %p to node %d/%d\n", rxd, desc->Children[i], rxd->Rcvr->Service);
49713 +
49714 +                   if ((res = ep_multicast_forward (subsys->ForwardXmtr, desc->Children[i], rxd->Rcvr->Service, 0, 
49715 +                                                    ForwardTxDone, desc, env, EP_HAS_PAYLOAD(env->Attr) ? &rxdMain->Payload : NULL,  
49716 +                                                    rxdMain->Bitmap, &desc->Data, 1)) != EP_SUCCESS)
49717 +                   {
49718 +                       ep_debugf (DBG_FORWARD, "ep: ep_multicast_forward failed\n");
49719 +                       ForwardTxDone (NULL, desc, res);
49720 +                   }
49721 +               }
49722 +               
49723 +           }
49724 +       }
49725 +
49726 +       spin_lock_irqsave (&subsys->ForwardDescLock, flags);
49727 +    }
49728 +    spin_unlock_irqrestore (&subsys->ForwardDescLock, flags);
49729 +
49730 +    return (nextRunTime);
49731 +}
49732 +
49733 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
49734 +void
49735 +ep_csum_rxds (EP_COMMS_SUBSYS *subsys)
49736 +{
49737 +    unsigned long flags;
49738
49739 +    spin_lock_irqsave (&subsys->CheckSumDescLock, flags);
49740 +    while (! list_empty (&subsys->CheckSumDescList)) 
49741 +    {
49742 +       EP_RXD      *rxd = (EP_RXD *) list_entry (subsys->CheckSumDescList.next, EP_RXD, CheckSumLink);
49743 +       EP_ENVELOPE *env = &rxd->RxdMain->Envelope;
49744 +
49745 +       list_del_init (&rxd->CheckSumLink);
49746 +       spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags);
49747 +
49748 +       if (env->CheckSum) {
49749 +           EP_NMD nmd;
49750 +           uint32_t csum; 
49751 +
49752 +           ep_nmd_subset ( &nmd, &rxd->Data, 0, ep_rxd_len (rxd));
49753 +
49754 +           csum = ep_calc_check_sum(subsys->Subsys.Sys, env, &nmd, 1);
49755 +           if ( env->CheckSum  != csum ) {
49756 +               int f;
49757 +       
49758 +
49759 +               printk("Check Sum Error: env(0x%x,0x%x) data(0x%x,0x%x)\n", ((csum >> 16) & 0x7FFF), ((env->CheckSum >> 16) & 0x7FFF), 
49760 +                      (csum & 0xFFFF),  (env->CheckSum & 0xFFFF));
49761 +               printk("Check Sum Error: Sent : NodeId %u Range 0x%x Service %u Version 0x%x Attr 0x%x\n", env->NodeId, env->Range, rxd->Rcvr->Service, env->Version, env->Attr);
49762 +               printk("Check Sum Error: Sent : Xid Generation 0x%x Handle 0x%x Unique 0x%llx\n", env->Xid.Generation, env->Xid.Handle, (long long)env->Xid.Unique);
49763 +               printk("Check Sum Error: Sent : TxdRail 0x%x TxdMain nmd_addr 0x%x  nmd_len %u  nmd_attr 0x%x\n",  env->TxdRail, env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr ); 
49764 +               printk("Check Sum Error: Sent : nFrags %d \n", env->nFrags);
49765 +               for(f=0;f<env->nFrags;f++)
49766 +                   printk("Check Sum Error: Sent (%d): nmd_addr 0x%x   nmd_len %u   nmd_attr 0x%x\n", f,
49767 +                          env->Frags[f].nmd_addr, env->Frags[f].nmd_len, env->Frags[f].nmd_attr);
49768 +               printk("Check Sum Error: Recv : nmd_addr 0x%x   nmd_len %u   nmd_attr 0x%x\n",
49769 +                      nmd.nmd_addr, nmd.nmd_len, nmd.nmd_attr);
49770 +
49771 +           }
49772 +       }
49773 +       ep_rxd_received_now(rxd);
49774 +
49775 +       spin_lock_irqsave (&subsys->CheckSumDescLock, flags);
49776 +    }
49777 +    spin_unlock_irqrestore (&subsys->CheckSumDescLock, flags);
49778 +}
49779 +#endif
49780 +
49781 +/*
49782 + * Local variables:
49783 + * c-file-style: "stroustrup"
49784 + * End:
49785 + */
49786 diff -urN clean/drivers/net/qsnet/ep/epcommsRx.c linux-2.6.9/drivers/net/qsnet/ep/epcommsRx.c
49787 --- clean/drivers/net/qsnet/ep/epcommsRx.c      1969-12-31 19:00:00.000000000 -0500
49788 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsRx.c        2004-11-30 07:02:06.000000000 -0500
49789 @@ -0,0 +1,1205 @@
49790 +/*
49791 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
49792 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
49793 + *
49794 + *    For licensing information please see the supplied COPYING file
49795 + *
49796 + */
49797 +
49798 +#ident "@(#)$Id: epcommsRx.c,v 1.33 2004/11/30 12:02:06 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
49799 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx.c,v $*/
49800 +
49801 +#include <qsnet/kernel.h>
49802 +#include <elan/kcomm.h>
49803 +#include <elan/epsvc.h>
49804 +#include <elan/epcomms.h>
49805 +
49806 +#include "debug.h"
49807 +
49808 +unsigned int ep_rxd_lowat = 5;
49809 +
49810 +static int
49811 +AllocateRxdBlock (EP_RCVR *rcvr, EP_ATTRIBUTE attr, EP_RXD **rxdp)
49812 +{
49813 +    EP_RXD_BLOCK *blk;
49814 +    EP_RXD       *rxd;
49815 +    EP_RXD_MAIN  *pRxdMain;
49816 +    int                  i;
49817 +    unsigned long flags;
49818 +
49819 +    KMEM_ZALLOC (blk, EP_RXD_BLOCK *, sizeof (EP_RXD_BLOCK), ! (attr & EP_NO_SLEEP));
49820 +
49821 +    if (blk == NULL)
49822 +       return (ENOMEM);
49823 +
49824 +    if ((pRxdMain = ep_shared_alloc_main (rcvr->Subsys->Subsys.Sys, EP_RXD_MAIN_SIZE * EP_NUM_RXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0)
49825 +    {
49826 +       KMEM_FREE (blk, sizeof (EP_RXD_BLOCK));
49827 +       return (ENOMEM);
49828 +    }
49829 +    
49830 +    for (rxd = &blk->Rxd[0], i = 0; i < EP_NUM_RXD_PER_BLOCK; i++, rxd++)
49831 +    {
49832 +       rxd->Rcvr        = rcvr;
49833 +       rxd->RxdMain     = pRxdMain;
49834 +
49835 +       ep_nmd_subset (&rxd->NmdMain, &blk->NmdMain, (i * EP_RXD_MAIN_SIZE), EP_RXD_MAIN_SIZE);
49836 +
49837 +       /* move onto next descriptor */
49838 +       pRxdMain = (EP_RXD_MAIN *) ((unsigned long) pRxdMain + EP_RXD_MAIN_SIZE);
49839 +    }
49840 +
49841 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
49842 +
49843 +    list_add  (&blk->Link, &rcvr->DescBlockList);
49844 +
49845 +    rcvr->TotalDescCount += EP_NUM_RXD_PER_BLOCK;
49846 +
49847 +    for (i = rxdp ? 1 : 0; i < EP_NUM_RXD_PER_BLOCK; i++)
49848 +    {
49849 +       
49850 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
49851 +       INIT_LIST_HEAD (&blk->Rxd[i].CheckSumLink);
49852 +#endif
49853 +
49854 +       list_add (&blk->Rxd[i].Link, &rcvr->FreeDescList);
49855 +       
49856 +       rcvr->FreeDescCount++;
49857 +
49858 +       if (rcvr->FreeDescWanted)
49859 +       {
49860 +           rcvr->FreeDescWanted--;
49861 +           kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock);
49862 +       }
49863 +    }
49864 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
49865 +    
49866 +    if (rxdp)
49867 +    {
49868 +
49869 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
49870 +       INIT_LIST_HEAD (&blk->Rxd[0].CheckSumLink);
49871 +#endif
49872 +              
49873 +       *rxdp = &blk->Rxd[0];
49874 +    }
49875 +    return (ESUCCESS);
49876 +}
49877 +
49878 +static void
49879 +FreeRxdBlock (EP_RCVR *rcvr, EP_RXD_BLOCK *blk)
49880 +{
49881 +    unsigned long flags;
49882 +
49883 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
49884 +
49885 +    list_del (&blk->Link);
49886 +
49887 +    rcvr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK;
49888 +    rcvr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK;
49889 +
49890 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
49891 +
49892 +    ep_shared_free_main (rcvr->Subsys->Subsys.Sys, &blk->NmdMain);
49893 +    KMEM_FREE (blk, sizeof (EP_RXD_BLOCK));
49894 +}
49895 +
49896 +static EP_RXD *
49897 +GetRxd (EP_RCVR *rcvr, EP_ATTRIBUTE attr)
49898 +{
49899 +    EP_RXD *rxd;
49900 +    unsigned long flags;
49901 +    int low_on_rxds;
49902 +
49903 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
49904 +
49905 +    while (list_empty (&rcvr->FreeDescList))
49906 +    {
49907 +       if (! (attr & EP_NO_ALLOC))
49908 +       {
49909 +           spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
49910 +
49911 +           if (AllocateRxdBlock (rcvr, attr, &rxd) == ESUCCESS)
49912 +               return (rxd);
49913 +
49914 +           spin_lock_irqsave (&rcvr->FreeDescLock, flags);
49915 +       }
49916 +
49917 +       if (attr & EP_NO_SLEEP)
49918 +       {
49919 +           IncrStat (rcvr->Subsys, NoFreeRxds);
49920 +           spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
49921 +
49922 +           ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
49923 +           return (NULL);
49924 +       }
49925 +
49926 +       rcvr->FreeDescWanted++;
49927 +       kcondvar_wait (&rcvr->FreeDescSleep, &rcvr->FreeDescLock, &flags);
49928 +    }
49929 +
49930 +    rxd = list_entry (rcvr->FreeDescList.next, EP_RXD, Link);
49931 +
49932 +    list_del (&rxd->Link);
49933 +
49934 +    /* Wakeup the descriptor primer thread if there's not many left */
49935 +    low_on_rxds = (--rcvr->FreeDescCount < ep_rxd_lowat);
49936 +
49937 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
49938 +
49939 +    if (low_on_rxds)
49940 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
49941 +
49942 +    return (rxd);
49943 +}
49944 +
49945 +static void
49946 +FreeRxd (EP_RCVR *rcvr, EP_RXD *rxd)
49947 +{
49948 +    unsigned long flags;
49949 +
49950 +    ASSERT (EP_XID_INVALID(rxd->MsgXid));
49951 +
49952 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
49953 +
49954 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
49955 +    ASSERT(list_empty(&rxd->CheckSumLink));
49956 +#endif
49957 +   
49958 +    list_add (&rxd->Link, &rcvr->FreeDescList);
49959 +
49960 +    rcvr->FreeDescCount++;
49961 +
49962 +    if (rcvr->FreeDescWanted)                                  /* someone waiting for a receive */
49963 +    {                                                          /* descriptor, so wake them up */
49964 +       rcvr->FreeDescWanted--;
49965 +       kcondvar_wakeupone (&rcvr->FreeDescSleep, &rcvr->FreeDescLock);
49966 +    }
49967 +    
49968 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
49969 +}
49970 +
49971 +int
49972 +ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr)
49973 +{
49974 +    EP_RCVR_RAIL *rcvrRail;
49975 +    EP_RXD       *rxd;
49976 +    int           rnum;
49977 +    unsigned long flags;
49978 +
49979 +    if ((rxd = GetRxd (rcvr, attr)) == NULL)
49980 +       return (ENOMEM);
49981 +
49982 +    rxd->Handler      = handler;
49983 +    rxd->Arg          = arg;
49984 +    rxd->Data         = *nmd;
49985 +    rxd->RxdMain->Len = EP_RXD_PENDING;
49986 +    
49987 +    spin_lock_irqsave (&rcvr->Lock, flags);
49988 +
49989 +    list_add_tail (&rxd->Link, &rcvr->ActiveDescList);
49990 +    
49991 +    if (EP_IS_PREFRAIL_SET(attr))
49992 +       rnum = EP_ATTR2PREFRAIL(attr);
49993 +    else 
49994 +       rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd));
49995 +
49996 +    if (rnum < 0 || !(EP_NMD_RAILMASK(nmd) & EP_RAIL2RAILMASK(rnum) & rcvr->RailMask))
49997 +       rcvrRail = NULL;
49998 +    else
49999 +       rcvrRail = rcvr->Rails[rnum];
50000 +
50001 +    EPRINTF7 (DBG_RCVR,"ep_queue_receive: rxd=%p svc %d nmd=%08x,%d,%x rnum=%d rcvrRail=%p\n",
50002 +             rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, rnum, rcvrRail);
50003 +
50004 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
50005 +
50006 +    if (rcvrRail == NULL || !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail))
50007 +    {
50008 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
50009 +
50010 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50011 +    }
50012 +
50013 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50014 +
50015 +    return (ESUCCESS);
50016 +}
50017 +
50018 +void
50019 +ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr)
50020 +{
50021 +    EP_RCVR      *rcvr = rxd->Rcvr;
50022 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
50023 +    int           rnum = ep_pickRail(EP_NMD_RAILMASK(&rxd->Data));
50024 +    EP_RCVR_RAIL *rcvrRail;
50025 +    unsigned long flags;
50026 +
50027 +    ASSERT (rxd->RxdRail == NULL);
50028 +
50029 +    EPRINTF5 (DBG_RCVR,"ep_requeue_receive: rxd=%p svc %d nmd=%08x,%d,%x\n", 
50030 +             rxd, rcvr->Service, nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
50031 +
50032 +    rxd->Handler      = handler;
50033 +    rxd->Arg          = arg;
50034 +    rxd->Data         = *nmd;
50035 +    rxd->RxdMain->Len = EP_RXD_PENDING;
50036 +    
50037 +    spin_lock_irqsave (&rcvr->Lock, flags);
50038 +    
50039 +    list_add_tail (&rxd->Link, &rcvr->ActiveDescList);
50040 +
50041 +    /*
50042 +     * Rail selection: if they've asked for a particular rail, then use it, otherwise if
50043 +     *                 the rail it was last received on is mapped for the nmd and is available
50044 +     *                 then use that one, otherwise pick one that is mapped by the nmd.
50045 +     */
50046 +    if (EP_IS_PREFRAIL_SET(attr))
50047 +       rnum = EP_ATTR2PREFRAIL(attr);
50048 +    
50049 +    if (rnum < 0 || ! (EP_RAIL2RAILMASK (rnum) & EP_NMD_RAILMASK(nmd) & ep_rcvr_availrails (rcvr)))
50050 +       rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(nmd));
50051 +
50052 +    if (rnum < 0)
50053 +       rcvrRail = NULL;
50054 +    else
50055 +    {
50056 +       rcvrRail = rcvr->Rails[rnum];
50057 +
50058 +       if (! (EP_NMD_RAILMASK(&rxd->Data) & EP_RAIL2RAILMASK(rnum)) && ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) < 0)
50059 +           rcvrRail = NULL;
50060 +    }
50061 +
50062 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
50063 +
50064 +    if (rcvrRail == NULL || !EP_RCVR_OP(rcvrRail, QueueRxd) (rxd, rcvrRail))
50065 +    {
50066 +       EPRINTF1 (DBG_RCVR, "ep_requeue_receive: rcvrRail=%p - setting unbound\n", rcvrRail);
50067 +
50068 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
50069 +
50070 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50071 +    }
50072 +
50073 +    if (rcvr->CleanupWaiting)
50074 +       kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock);
50075 +    rcvr->CleanupWaiting = 0;
50076 +
50077 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50078 +}
50079 +
50080 +void
50081 +
50082 +ep_complete_receive (EP_RXD *rxd)
50083 +{
50084 +    EP_RCVR *rcvr = rxd->Rcvr;
50085 +    unsigned long flags;
50086 +
50087 +    ASSERT (rxd->RxdRail == NULL && rxd->State == EP_RXD_COMPLETED);
50088 +
50089 +    FreeRxd (rcvr, rxd);
50090 +
50091 +    /* if we're waiting for cleanup, then wake them up */
50092 +    spin_lock_irqsave (&rcvr->Lock, flags);
50093 +    if (rcvr->CleanupWaiting)
50094 +       kcondvar_wakeupall (&rcvr->CleanupSleep, &rcvr->Lock);
50095 +    rcvr->CleanupWaiting = 0;
50096 +    spin_unlock_irqrestore (&rcvr->Lock, flags);   
50097 +}
50098 +
50099 +int
50100 +ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *local, EP_NMD *remote, int nFrags)
50101 +{
50102 +    EP_RCVR      *rcvr = rxd->Rcvr;
50103 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
50104 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
50105 +    unsigned long flags;
50106 +
50107 +    spin_lock_irqsave (&rcvr->Lock, flags);
50108 +    
50109 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
50110 +    {
50111 +       EPRINTF2 (DBG_RCVR, "ep_rpc_put: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
50112 +       
50113 +       /* rxd no longer on active list - just free it */
50114 +       /* off and return an error */
50115 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50116 +       
50117 +       return EP_CONN_RESET;
50118 +    }
50119 +    else
50120 +    {
50121 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
50122 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
50123 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
50124 +       EP_RAIL          *rail      = commsRail->Rail;
50125 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
50126 +       int               i;
50127 +       
50128 +       /* Attempt to ensure that the local nmds are mapped */
50129 +       for (i = 0; i < nFrags; i++)
50130 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
50131 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
50132 +    
50133 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
50134 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
50135 +       {
50136 +           rxd->State = EP_RXD_PUT_ACTIVE;
50137 +
50138 +           EP_RCVR_OP(rcvrRail, RpcPut) (rxd, local, remote, nFrags);
50139 +       }
50140 +       else
50141 +       {
50142 +           /* RPC completion cannot progress - either node is no longer connected on this 
50143 +            * rail or some of the source/destination NMDs are not mapped on this rail.
50144 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
50145 +           EPRINTF4 (DBG_RCVR, "%s: ep_rpc_put: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd,
50146 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
50147 +
50148 +           rxd->State = EP_RXD_PUT_STALLED;
50149 +
50150 +           if (nodeRail->State == EP_NODE_CONNECTED)
50151 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50152 +       }
50153 +
50154 +       /* install the handler */
50155 +       rxd->Handler = handler;
50156 +       rxd->Arg     = arg;
50157 +       
50158 +       /* store the arguements */
50159 +       rxd->nFrags = nFrags;
50160 +       for (i = 0; i < nFrags; i++)
50161 +       {
50162 +           rxd->Local[i]  = local[i];
50163 +           rxd->Remote[i] = remote[i];
50164 +       }
50165 +    }
50166 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50167 +
50168 +    return EP_SUCCESS;
50169 +}
50170 +
50171 +int
50172 +ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *remote, EP_NMD *local, int nFrags)
50173 +{
50174 +    EP_RCVR      *rcvr = rxd->Rcvr;
50175 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
50176 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
50177 +    unsigned long flags;
50178 +
50179 +    spin_lock_irqsave (&rcvr->Lock, flags);
50180 +    
50181 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
50182 +    {
50183 +       EPRINTF2 (DBG_RCVR, "ep_rpc_get: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
50184 +       
50185 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50186 +
50187 +       return EP_CONN_RESET;
50188 +    }
50189 +    else
50190 +    {
50191 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
50192 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
50193 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
50194 +       EP_RAIL          *rail      = commsRail->Rail;
50195 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
50196 +       int               i;
50197 +       
50198 +       /* Attempt to ensure that the local nmds are mapped */
50199 +       for (i = 0; i < nFrags; i++)
50200 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
50201 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
50202 +
50203 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
50204 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
50205 +       {
50206 +           rxd->State = EP_RXD_GET_ACTIVE;
50207 +
50208 +           EP_RCVR_OP (rcvrRail, RpcGet) (rxd, local, remote, nFrags);
50209 +       }
50210 +       else
50211 +       {
50212 +           /* RPC completion cannot progress - either node is no longer connected on this 
50213 +            * node or some of the source/destination NMDs are not mapped on this rail.
50214 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
50215 +           EPRINTF4 (DBG_RCVR, "%s: ep_rpc_get: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, 
50216 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
50217 +           
50218 +           rxd->State = EP_RXD_GET_STALLED;
50219 +
50220 +           if (nodeRail->State == EP_NODE_CONNECTED)
50221 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50222 +       }
50223 +
50224 +       /* install the handler */
50225 +       rxd->Handler = handler;
50226 +       rxd->Arg     = arg;
50227 +       
50228 +       /* store the arguements */
50229 +       rxd->nFrags = nFrags;
50230 +       for (i = 0; i < nFrags; i++)
50231 +       {
50232 +           rxd->Local[i]  = local[i];
50233 +           rxd->Remote[i] = remote[i];
50234 +       }
50235 +    }
50236 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50237 +    
50238 +    return EP_SUCCESS;
50239 +}
50240 +
50241 +int
50242 +ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, EP_NMD *local, EP_NMD *remote, int nFrags)
50243 +{
50244 +    EP_RCVR      *rcvr = rxd->Rcvr;
50245 +    EP_SYS       *sys  = rcvr->Subsys->Subsys.Sys;
50246 +    EP_ENVELOPE  *env  = &rxd->RxdMain->Envelope;
50247 +    unsigned long flags;
50248 +
50249 +    spin_lock_irqsave (&rcvr->Lock, flags);
50250 +
50251 +    if (rxd->State == EP_RXD_BEEN_ABORTED)
50252 +    {
50253 +       EPRINTF2 (DBG_RCVR, "ep_complete_rpc: rcvr %p rxd %p completed because no rails available\n", rcvr, rxd);
50254 +       
50255 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
50256 +       return EP_CONN_RESET;
50257 +    }
50258 +    else
50259 +    {
50260 +       EP_RXD_RAIL      *rxdRail   = rxd->RxdRail;
50261 +       EP_RCVR_RAIL     *rcvrRail  = rxdRail->RcvrRail;
50262 +       EP_COMMS_RAIL    *commsRail = rcvrRail->CommsRail;
50263 +       EP_RAIL          *rail      = commsRail->Rail;
50264 +       EP_NODE_RAIL     *nodeRail  = &rail->Nodes[env->NodeId];
50265 +       int               i;
50266 +
50267 +       if (blk == NULL)
50268 +           bzero (&rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK));
50269 +       else
50270 +           bcopy (blk, &rxd->RxdMain->StatusBlk, sizeof (EP_STATUSBLK));
50271 +
50272 +       /* Attempt to ensure that the local nmds are mapped */
50273 +       for (i = 0; i < nFrags; i++)
50274 +           if (! (EP_NMD_RAILMASK(&local[i]) & EP_RAIL2RAILMASK(rail->Number)))
50275 +               ep_nmd_map_rails (sys, &local[i], EP_RAIL2RAILMASK(rail->Number));
50276 +
50277 +       if (nodeRail->State == EP_NODE_CONNECTED &&                                                                     /* rail is connected */
50278 +           (ep_nmd2railmask (local, nFrags) & ep_nmd2railmask (remote, nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* and NMDs valid for it */
50279 +       {
50280 +           rxd->State = EP_RXD_COMPLETE_ACTIVE;
50281 +
50282 +           EP_RCVR_OP (rcvrRail, RpcComplete) (rxd, local, remote, nFrags);
50283 +       }
50284 +       else
50285 +       {
50286 +           /* RPC completion cannot progress - either node is no longer connected on this 
50287 +            * node or some of the source/destination NMDs are not mapped on this rail.
50288 +            * Save the NMDs into the RXD and schedule the thread to request mappings */
50289 +           EPRINTF4 (DBG_RCVR, "%s: ep_complete_rpc: rcvr %p rxd %p %s\n", rail->Name, rcvr, rxd, 
50290 +                     (nodeRail->State == EP_NODE_CONNECTED) ? "NMDs not valid on this rail" : "no longer connected on this rail");
50291 +
50292 +           rxd->State = EP_RXD_COMPLETE_STALLED;
50293 +
50294 +           if (nodeRail->State == EP_NODE_CONNECTED)
50295 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50296 +       }
50297 +
50298 +       /* install the handler */
50299 +       rxd->Handler = handler;
50300 +       rxd->Arg     = arg;
50301 +       
50302 +       /* store the arguements */
50303 +       rxd->nFrags = nFrags;
50304 +       for (i = 0; i < nFrags; i++)
50305 +       {
50306 +           rxd->Local[i]  = local[i];
50307 +           rxd->Remote[i] = remote[i];
50308 +       }
50309 +    }
50310 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50311 +       
50312 +    return (ESUCCESS);
50313 +}
50314 +
50315 +/* functions for accessing fields of rxds */
50316 +void            *ep_rxd_arg(EP_RXD *rxd)               { return (rxd->Arg); }
50317 +int              ep_rxd_len(EP_RXD *rxd)               { return (rxd->RxdMain->Len); }
50318 +EP_STATUS       ep_rxd_status(EP_RXD *rxd)             { return (rxd->RxdMain->Len < 0 ? rxd->RxdMain->Len : EP_SUCCESS); }
50319 +int              ep_rxd_isrpc(EP_RXD *rxd)             { return (EP_IS_RPC(rxd->RxdMain->Envelope.Attr) != 0); }
50320 +EP_ENVELOPE     *ep_rxd_envelope(EP_RXD *rxd)          { return (&rxd->RxdMain->Envelope); }
50321 +EP_PAYLOAD      *ep_rxd_payload(EP_RXD *rxd)           { return (EP_HAS_PAYLOAD(rxd->RxdMain->Envelope.Attr) ? &rxd->RxdMain->Payload : NULL); }
50322 +int              ep_rxd_node(EP_RXD *rxd)              { return (rxd->RxdMain->Envelope.NodeId); }
50323 +EP_STATUSBLK    *ep_rxd_statusblk(EP_RXD *rxd)                 { return (&rxd->RxdMain->StatusBlk); }
50324 +EP_RAILMASK      ep_rxd_railmask(EP_RXD *rxd)          { return (rxd->Data.nmd_attr); }
50325 +
50326 +static void
50327 +ProcessNmdMapResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg)
50328 +{
50329 +    EP_RXD_RAIL  *rxdRail  = rxd->RxdRail;
50330 +    EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail;
50331 +    EP_RAIL      *rail     = rcvrRail->CommsRail->Rail;
50332 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[rxd->RxdMain->Envelope.NodeId];
50333 +    int           i;
50334 +
50335 +    ASSERT (msg->Body.MapNmd.nFrags == rxd->nFrags);
50336 +    
50337 +    for (i = 0; i < rxd->nFrags; i++)
50338 +       rxd->Remote[i] = msg->Body.MapNmd.Nmd[i];
50339 +    
50340 +    if (nodeRail->State == EP_NODE_CONNECTED &&        /* node is still connected on this rail */
50341 +       (ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))     /* NMDs are now valid for this rail */
50342 +    {
50343 +       switch (rxd->State)
50344 +       {
50345 +       case EP_RXD_PUT_STALLED:
50346 +           rxd->State = EP_RXD_PUT_ACTIVE;
50347 +
50348 +           EP_RCVR_OP(rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
50349 +           break;
50350 +
50351 +       case EP_RXD_GET_STALLED:
50352 +           rxd->State = EP_RXD_GET_ACTIVE;
50353 +
50354 +           EP_RCVR_OP(rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
50355 +           break;
50356 +           
50357 +       case EP_RXD_COMPLETE_STALLED:
50358 +           rxd->State = EP_RXD_COMPLETE_ACTIVE;
50359 +
50360 +           EP_RCVR_OP(rcvrRail, RpcComplete) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
50361 +           break;
50362 +
50363 +       default:
50364 +           panic ("ProcessNmdMapResponse: XID match but rxd in invalid state\n");
50365 +           break;
50366 +       }
50367 +
50368 +       rxd->NextRunTime = 0;
50369 +    }
50370 +    else
50371 +       ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr, rxd);
50372 +}
50373 +
50374 +static void
50375 +ProcessFailoverResponse (EP_RCVR *rcvr, EP_RXD *rxd, EP_MANAGER_MSG *msg)
50376 +{
50377 +    /* XXXX - TBD */
50378 +#ifdef NOTYET
50379 +    EP_COMMS_SUBSYS *subsys   = rcvr->Subsys;
50380 +    EP_RXD_RAIL     *rxdRail  = rxd->RxdRail;
50381 +    EP_RCVR_RAIL    *rcvrRail = rxdRail->RcvrRail;
50382 +    EP_RAIL         *rail     = rcvrRail->CommsRail->Rail;
50383 +    EP_RCVR_RAIL    *nRcvrRail;
50384 +    EP_RXD_RAIL     *nRxdRail;
50385 +
50386 +    ASSERT (rxd->RxdMain->Envelope.Attr & EP_RPC);
50387 +
50388 +    EPRINTF6 (DBG_RCVR, "ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p Xid=%016llx state %x.%x - txd on rail %d\n", rcvr, rxd, 
50389 +             rxd->MsgXid.Unique, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, msg->Body.FailoverTxd.Rail);
50390 +
50391 +    if ((nRcvrRail = rcvr->Rails[msg->Body.FailoverTxd.Rail]) == NULL ||
50392 +       (nRcvrRail->Rcvr->RailMask & EP_RAIL2RAILMASK (rail->Number)) == NULL)
50393 +    {
50394 +       ep_debugf (DBG_MANAGER, "%s: ep_rcvr_xid_msg_handler: rcvr=%p rxd=%p - still cannot proceed\n", rail->Name, rcvr,rxd);
50395 +       return;
50396 +    }
50397 +
50398 +
50399 +    nRxdRail = EP_RCVR_OP (nrcvrRail, GetRxd) (rcvr, nRcvrRail);
50400 +
50401 +
50402 +    /* If the RPC was in progress, then rollback and mark it as flagged, 
50403 +     * this will then get treated as though the NMDs were not mapped
50404 +     * for the rail when the user initiated the operation.
50405 +     */
50406 +    switch (rxdRail->RxdMain->DataEvent)
50407 +    {
50408 +    case EP_EVENT_ACTIVE|EP_RXD_PHASE_PUT:
50409 +    case EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT:
50410 +       ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE ||
50411 +               rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING);
50412 +       
50413 +       nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_PUT;
50414 +       nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING;
50415 +       break;
50416 +
50417 +    case EP_EVENT_ACTIVE|EP_RXD_PHASE_GET:
50418 +    case EP_EVENT_FLAGGED|EP_RXD_PHASE_GET:
50419 +       ASSERT (rxdRail->RxdMain->DoneEvent == EP_EVENT_PRIVATE ||
50420 +               rxdRail->RxdMain->DoneEvent == EP_EVENT_PENDING);
50421 +       
50422 +       nRxdRail->RxdMain->DataEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_GET;
50423 +       nRxdRail->RxdMain->DoneEvent = EP_EVENT_PENDING;
50424 +       break;
50425 +
50426 +    case EP_EVENT_PRIVATE:
50427 +       switch (rxdRail->RxdMain->DoneEvent)
50428 +       {
50429 +       case EP_EVENT_ACTIVE|EP_RXD_PHASE_COMPLETE:
50430 +       case EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE:
50431 +           nRxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE;
50432 +           nRxdRail->RxdMain->DoneEvent = EP_EVENT_FLAGGED|EP_RXD_PHASE_COMPLETE;
50433 +           break;
50434 +
50435 +       case EP_EVENT_PENDING:
50436 +           break;
50437 +
50438 +       default:
50439 +           panic ("ep_rcvr_xid_msg_handler: rxd in invalid state\n");
50440 +       }
50441 +       break;
50442 +
50443 +    default:
50444 +       panic ("ep_rcvr_xid_msg_handler: rxd in invalid staten");
50445 +    }
50446 +    
50447 +    UnbindRxdFromRail (rxd, rxdRail);
50448 +
50449 +    /* Mark rxdRail as no longer active */
50450 +    rxdRail->RxdMain->DataEvent = EP_EVENT_PRIVATE;
50451 +    rxdRail->RxdMain->DoneEvent = EP_EVENT_PRIVATE;
50452 +
50453 +    sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);
50454 +    sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);
50455 +    
50456 +    FreeRxdRail (rcvrRail, rxdRail);
50457 +
50458 +    BindRxdToRail (rxd, nRxdRail);
50459 +
50460 +    ep_kthread_schedule (&subsys->Thread, lbolt);
50461 +#endif
50462 +}
50463 +
50464 +void
50465 +ep_rcvr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg)
50466 +{
50467 +    EP_RCVR          *rcvr = (EP_RCVR *) arg;
50468 +    struct list_head *el;
50469 +    unsigned long     flags;
50470 +
50471 +    spin_lock_irqsave (&rcvr->Lock, flags);
50472 +    list_for_each (el, &rcvr->ActiveDescList) {
50473 +       EP_RXD *rxd = list_entry (el,EP_RXD, Link);
50474 +
50475 +       if (EP_XIDS_MATCH (msg->Hdr.Xid, rxd->MsgXid))
50476 +       {
50477 +           EP_INVALIDATE_XID (rxd->MsgXid);
50478 +
50479 +           switch (msg->Hdr.Type)
50480 +           {
50481 +           case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE:
50482 +               ProcessNmdMapResponse (rcvr, rxd, msg);
50483 +               break;
50484 +
50485 +           case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE:
50486 +               ProcessFailoverResponse (rcvr, rxd, msg);
50487 +               break;
50488 +
50489 +           default:
50490 +               panic ("ep_rcvr_xid_msg_handler: XID match but invalid message type\n");
50491 +           }
50492 +       }
50493 +    }
50494 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50495 +}
50496 +
50497 +
50498 +EP_RCVR *
50499 +ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvs)
50500 +{
50501 +    EP_COMMS_SUBSYS  *subsys;
50502 +    EP_RCVR          *rcvr;
50503 +    struct list_head *el;
50504 +    extern int portals_envelopes;
50505 +
50506 +    if (portals_envelopes && (svc == EP_MSG_SVC_PORTALS_SMALL || svc == EP_MSG_SVC_PORTALS_LARGE))
50507 +    {
50508 +       printk ("ep: use %d envelopes rather than %d for portals %s message service\n", sys->Position.pos_nodes * 16, nenvs,
50509 +               svc == EP_MSG_SVC_PORTALS_SMALL ? "small" : "large");
50510 +
50511 +       nenvs = portals_envelopes;
50512 +    }
50513 +
50514 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL)
50515 +       return (NULL);
50516 +
50517 +    KMEM_ZALLOC (rcvr, EP_RCVR *, sizeof (EP_RCVR), 1);
50518 +
50519 +    if (rcvr == NULL)
50520 +       return (NULL);
50521 +
50522 +    rcvr->Subsys            = subsys;
50523 +    rcvr->Service           = svc;
50524 +    rcvr->InputQueueEntries = nenvs;
50525 +    rcvr->FreeDescCount     = 0;
50526 +    rcvr->TotalDescCount    = 0;
50527 +    rcvr->ForwardRxdCount   = 0;
50528 +
50529 +    spin_lock_init (&rcvr->Lock);
50530 +    INIT_LIST_HEAD (&rcvr->ActiveDescList);
50531 +
50532 +    kcondvar_init (&rcvr->CleanupSleep);
50533 +    kcondvar_init (&rcvr->FreeDescSleep);
50534 +    spin_lock_init (&rcvr->FreeDescLock);
50535 +    INIT_LIST_HEAD (&rcvr->FreeDescList);
50536 +    INIT_LIST_HEAD (&rcvr->DescBlockList);
50537 +
50538 +    ep_xid_cache_init (sys, &rcvr->XidCache);
50539 +
50540 +    rcvr->XidCache.MessageHandler = ep_rcvr_xid_msg_handler;
50541 +    rcvr->XidCache.Arg            = rcvr;
50542 +
50543 +    kmutex_lock (&subsys->Lock);
50544 +    /* See if this service is already in use */
50545 +    list_for_each (el, &subsys->Receivers) {
50546 +       EP_RCVR *rcvr = list_entry (el, EP_RCVR, Link);
50547 +
50548 +       if (rcvr->Service == svc)
50549 +       {
50550 +           KMEM_FREE (rcvr, sizeof (EP_RCVR));
50551 +           kmutex_unlock (&subsys->Lock);   
50552 +           return NULL;
50553 +       }
50554 +    }
50555 +    
50556 +    
50557 +    list_add_tail (&rcvr->Link, &subsys->Receivers);
50558 +
50559 +    ep_procfs_rcvr_add(rcvr);
50560 +
50561 +    /* Now add all rails which are already started */
50562 +    list_for_each (el, &subsys->Rails) { 
50563 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
50564 +
50565 +       EP_RAIL_OP (commsRail, Rcvr.AddRail) (rcvr, commsRail);
50566 +    }
50567 +    kmutex_unlock (&subsys->Lock);   
50568 +
50569 +    ep_mod_inc_usecount();
50570 +
50571 +    return (rcvr);
50572 +}
50573 +
50574 +void
50575 +ep_free_rcvr (EP_RCVR *rcvr)
50576 +{
50577 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
50578 +    EP_SYS           *sys    = subsys->Subsys.Sys;
50579 +    struct list_head  list;
50580 +    struct list_head *el,*nel;
50581 +    unsigned long flags;
50582 +    
50583 +    kmutex_lock (&subsys->Lock);
50584 +    list_for_each (el, &subsys->Rails) { 
50585 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
50586 +       
50587 +       EP_RAIL_OP (commsRail, Rcvr.DelRail) (rcvr, commsRail);
50588 +    }
50589 +
50590 +    ep_procfs_rcvr_del(rcvr);
50591 +
50592 +    list_del (&rcvr->Link);
50593 +    kmutex_unlock (&subsys->Lock);
50594 +
50595 +    INIT_LIST_HEAD (&list);
50596 +
50597 +    /* abort all rxds - should not be bound to a rail */
50598 +    spin_lock_irqsave (&rcvr->Lock, flags);   
50599 +    for (;;)
50600 +    {
50601 +       if (! list_empty (&rcvr->ActiveDescList))
50602 +       {
50603 +           list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50604 +               EP_RXD *rxd = list_entry (el, EP_RXD, Link);
50605 +               
50606 +               ASSERT (rxd->RxdRail == NULL);
50607 +               ASSERT (rxd->RxdMain->Len == EP_RXD_PENDING);
50608 +               
50609 +               rxd->State = EP_RXD_COMPLETED;
50610 +               rxd->RxdMain->Len = EP_SHUTDOWN;
50611 +               
50612 +               list_del (&rxd->Link);
50613 +               list_add_tail (&rxd->Link, &list);
50614 +           }
50615 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
50616 +           
50617 +           while (! list_empty (&list))
50618 +           {
50619 +               EP_RXD *rxd = list_entry (list.next, EP_RXD, Link);
50620 +               
50621 +               list_del (&rxd->Link);
50622 +               
50623 +               if (rxd->Handler) 
50624 +                   rxd->Handler (rxd);
50625 +           }
50626 +           spin_lock_irqsave (&rcvr->Lock, flags);   
50627 +           continue;
50628 +       }
50629 +
50630 +       if (rcvr->FreeDescCount == rcvr->TotalDescCount)
50631 +           break;
50632 +
50633 +       rcvr->CleanupWaiting++;
50634 +       kcondvar_wait (&rcvr->CleanupSleep, &rcvr->Lock, &flags);
50635 +    }
50636 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50637 +
50638 +    /* must all be in free list */
50639 +    ASSERT( rcvr->FreeDescCount ==  rcvr->TotalDescCount);
50640 +
50641 +    while (! list_empty(& rcvr->DescBlockList) )
50642 +       FreeRxdBlock (rcvr, list_entry (rcvr->DescBlockList.next, EP_RXD_BLOCK, Link));
50643 +
50644 +    /* had better be all gone now */
50645 +    ASSERT((rcvr->FreeDescCount == 0) && (rcvr->TotalDescCount == 0));
50646 +
50647 +    ep_xid_cache_destroy (sys, &rcvr->XidCache);
50648
50649 +    spin_lock_destroy (&rcvr->Lock);
50650 +    KMEM_FREE (rcvr, sizeof (EP_RCVR));
50651 +
50652 +    ep_mod_dec_usecount();
50653 +}
50654 +
50655 +EP_RXD *
50656 +StealRxdFromOtherRail (EP_RCVR *rcvr)
50657 +{
50658 +    EP_RXD          *rxd;
50659 +    int               i;
50660 +       
50661 +    /* looking at the the rcvr railmask to find a rail to try to steal rxd from */
50662 +    for (i = 0; i < EP_MAX_RAILS; i++) 
50663 +       if (rcvr->RailMask & (1 << i) ) 
50664 +           if ((rxd = EP_RCVR_OP (rcvr->Rails[i], StealRxd) (rcvr->Rails[i])) != NULL)
50665 +               return rxd;
50666 +
50667 +    return NULL;
50668 +}
50669 +
50670 +long
50671 +CheckUnboundRxd (EP_RCVR *rcvr, EP_RXD *rxd, long nextRunTime)
50672 +{
50673 +    EP_SYS       *sys = rcvr->Subsys->Subsys.Sys;
50674 +    EP_RCVR_RAIL *rcvrRail;
50675 +    int           rnum;
50676 +    
50677 +    if ((rnum = ep_rcvr_prefrail (rcvr, EP_NMD_RAILMASK(&rxd->Data))) < 0)
50678 +       rnum = ep_rcvr_prefrail (rcvr, ep_rcvr_availrails (rcvr));
50679 +    
50680 +    if ( rnum < 0 )    {
50681 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
50682 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
50683 +       
50684 +       return (nextRunTime);
50685 +    }
50686 +
50687 +    ASSERT ( rnum >= 0 );
50688 +
50689 +    rcvrRail = rcvr->Rails[rnum];
50690 +
50691 +    ASSERT ( rcvrRail != NULL);
50692 +
50693 +    rxd->State = EP_RXD_RECEIVE_ACTIVE;
50694 +
50695 +    if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rnum)) &&                   /* not mapped already and */
50696 +        ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rnum)) == 0) ||            /* failed mapping, or */
50697 +       !EP_RCVR_OP (rcvrRail, QueueRxd) (rxd, rcvrRail))                               /* failed to queue */
50698 +    {
50699 +       ASSERT (rxd->RxdRail == NULL);
50700 +
50701 +       EPRINTF4 (DBG_RCVR,"CheckUnboundRxd: rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", rcvr, rxd, rnum, rcvrRail);
50702 +
50703 +       rxd->State = EP_RXD_RECEIVE_UNBOUND;
50704 +       
50705 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
50706 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
50707 +    }
50708 +
50709 +    return (nextRunTime);
50710 +}
50711 +
50712 +int
50713 +CheckRxdNmdsMapped (EP_RCVR *rcvr, EP_RXD *rxd)
50714 +{
50715 +    EP_RXD_RAIL *rxdRail = rxd->RxdRail;
50716 +    EP_RXD_MAIN *rxdMain = rxd->RxdMain;
50717 +    EP_ENVELOPE *env     = &rxdMain->Envelope;
50718 +    EP_SYS      *sys     = rcvr->Subsys->Subsys.Sys;
50719 +    EP_RAIL     *rail    = rxdRail->RcvrRail->CommsRail->Rail;
50720 +    int                 i;
50721 +
50722 +    /* Try and map the local NMDs before checking to see if we can proceed */
50723 +    if (! (ep_nmd2railmask (rxd->Local, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))
50724 +    {
50725 +       EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Local NMDs not mapped\n", rail->Name, rcvr, rxd);
50726 +       
50727 +       for (i = 0; i < rxd->nFrags; i++)
50728 +           if (! (EP_NMD_RAILMASK(&rxd->Local[i]) & EP_RAIL2RAILMASK(rail->Number)))
50729 +               if (ep_nmd_map_rails (sys, &rxd->Local[i], EP_RAIL2RAILMASK(rail->Number)))
50730 +                   rxd->NextRunTime = lbolt + RESOURCE_RETRY_TIME;
50731 +    }
50732 +    
50733 +    /* Try and map remote NMDs if they are not valid for this rail */
50734 +    if (! (ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)))
50735 +    {
50736 +       EP_MANAGER_MSG_BODY msgBody;
50737 +
50738 +       EPRINTF3 (DBG_MAPNMD, "%s: rcvr=%p rxd=%p RPC Remote NMDs not mapped\n", rail->Name, rcvr, rxd);
50739 +
50740 +       if (EP_XID_INVALID(rxd->MsgXid))
50741 +           rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
50742 +
50743 +       msgBody.MapNmd.nFrags   = rxd->nFrags;
50744 +       msgBody.MapNmd.Railmask = EP_RAIL2RAILMASK (rail->Number);
50745 +       for (i = 0; i < rxd->nFrags; i++)
50746 +           msgBody.MapNmd.Nmd[i] = rxd->Remote[i];
50747 +
50748 +       if (ep_send_message (rail, env->NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST, rxd->MsgXid, &msgBody) == 0)
50749 +           rxd->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
50750 +       else
50751 +           rxd->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
50752 +
50753 +       return 0;
50754 +    }
50755 +
50756 +    if ((ep_nmd2railmask (rxd->Local, rxd->nFrags) & ep_nmd2railmask (rxd->Remote, rxd->nFrags) & EP_RAIL2RAILMASK (rail->Number)) != 0)
50757 +    {
50758 +       rxd->NextRunTime = 0;
50759 +       return 1;
50760 +    }
50761 +
50762 +    return 0;
50763 +}
50764 +
50765 +long
50766 +ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime)
50767 +{
50768 +    struct list_head *el, *nel;
50769 +    unsigned long     flags;
50770 +    int               i;
50771 +
50772 +    /* Check to see if we're low on rxds */
50773 +    if (rcvr->FreeDescCount < ep_rxd_lowat)
50774 +       AllocateRxdBlock (rcvr, 0, NULL);
50775 +
50776 +    for (i = 0; i < EP_MAX_RAILS; i++) 
50777 +       if (rcvr->RailMask & (1 << i) )
50778 +           nextRunTime = EP_RCVR_OP (rcvr->Rails[i], Check) (rcvr->Rails[i], nextRunTime);
50779 +
50780 +    /* See if we have any rxd's which need to be handled */
50781 +    spin_lock_irqsave (&rcvr->Lock, flags);
50782 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
50783 +       EP_RXD      *rxd     = list_entry (el, EP_RXD, Link);
50784 +       EP_RXD_MAIN *rxdMain = rxd->RxdMain;
50785 +       EP_ENVELOPE *env     = &rxdMain->Envelope;
50786 +       EP_RXD_RAIL *rxdRail = rxd->RxdRail;
50787 +
50788 +       if (rxdRail == NULL)
50789 +           nextRunTime = CheckUnboundRxd (rcvr, rxd, nextRunTime);
50790 +       else
50791 +       {
50792 +           EP_RCVR_RAIL *rcvrRail = rxdRail->RcvrRail;
50793 +           EP_RAIL      *rail     = rcvrRail->CommsRail->Rail;
50794 +
50795 +           if (rxd->RxdMain->Len == EP_RXD_PENDING ||                          /* envelope not received yet */
50796 +               rail->Nodes[env->NodeId].State != EP_NODE_CONNECTED)            /* will be failing over */
50797 +               continue;
50798 +
50799 +           switch (rxd->State)
50800 +           {
50801 +           case EP_RXD_PUT_STALLED:
50802 +               if (CheckRxdNmdsMapped (rcvr, rxd))
50803 +               {
50804 +                   rxd->State = EP_RXD_PUT_ACTIVE;
50805 +
50806 +                   EP_RCVR_OP (rcvrRail, RpcPut) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
50807 +               }
50808 +               break;
50809 +
50810 +           case EP_RXD_GET_STALLED:
50811 +               if (CheckRxdNmdsMapped (rcvr, rxd))
50812 +               {
50813 +                   rxd->State = EP_RXD_GET_ACTIVE;
50814 +
50815 +                   EP_RCVR_OP (rcvrRail, RpcGet) (rxd, rxd->Local, rxd->Remote, rxd->nFrags);
50816 +               }
50817 +               break;
50818 +           
50819 +           case EP_RXD_COMPLETE_STALLED:
50820 +               if (CheckRxdNmdsMapped (rcvr, rxd))
50821 +               {
50822 +                   rxd->State = EP_RXD_COMPLETE_ACTIVE;
50823 +
50824 +                   EP_RCVR_OP (rcvrRail, RpcComplete)(rxd, rxd->Local, rxd->Remote, rxd->nFrags);
50825 +               }
50826 +               break;
50827 +           }
50828 +               
50829 +           if (rxd->NextRunTime && (nextRunTime == 0 || AFTER (nextRunTime, rxd->NextRunTime)))
50830 +               nextRunTime = rxd->NextRunTime;
50831 +       }
50832 +    }
50833 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50834 +    
50835 +    return (nextRunTime);
50836 +}
50837 +
50838 +void
50839 +ep_display_rxd (DisplayInfo *di, EP_RXD *rxd)
50840 +{
50841 +    EP_RXD_MAIN *rxdMain  = rxd->RxdMain;
50842 +    EP_ENVELOPE *env      = &rxdMain->Envelope;
50843 +    EP_RXD_RAIL *rxdRail  = rxd->RxdRail;
50844 +    
50845 +    (di->func)(di->arg, "  RXD: %p State=%x RxdMain=%p(%x.%x.%x) Data=%x.%x.%x %s\n", rxd,
50846 +              rxd->State, rxd->RxdMain, rxd->NmdMain.nmd_addr, rxd->NmdMain.nmd_len,
50847 +              rxd->NmdMain.nmd_attr, rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr,
50848 +              rxd->RxdMain->Len == EP_RXD_PENDING ? "Pending" : "Active");
50849 +    (di->func)(di->arg, "      NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d XID=%08x.%08x.%016llx\n",
50850 +              env->NodeId,  EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail, env->TxdMain.nmd_addr,
50851 +              env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags, env->Xid.Generation, env->Xid.Handle, env->Xid.Unique);;
50852 +    (di->func)(di->arg, "      Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr);
50853 +    (di->func)(di->arg, "      Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr);
50854 +    (di->func)(di->arg, "      Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr);
50855 +    (di->func)(di->arg, "      Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr);
50856 +
50857 +    if (rxdRail) EP_RCVR_OP (rxdRail->RcvrRail, DisplayRxd) (di, rxdRail);
50858 +}
50859 +
50860 +void
50861 +ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full)
50862 +{
50863 +    int               freeCount    = 0;
50864 +    int                      activeCount  = 0;
50865 +    int                      pendingCount = 0;
50866 +    int                      railCounts[EP_MAX_RAILS];
50867 +    struct list_head *el;
50868 +    int               i;
50869 +    unsigned long     flags;
50870 +
50871 +    for (i = 0; i <EP_MAX_RAILS; i++)
50872 +       railCounts[i] = 0;
50873 +
50874 +    spin_lock_irqsave (&rcvr->FreeDescLock, flags);
50875 +    list_for_each (el, &rcvr->FreeDescList)
50876 +       freeCount++;
50877 +    spin_unlock_irqrestore (&rcvr->FreeDescLock, flags);
50878 +
50879 +    spin_lock_irqsave (&rcvr->Lock, flags);
50880 +    list_for_each (el, &rcvr->ActiveDescList) {
50881 +       EP_RXD      *rxd     = list_entry (el, EP_RXD, Link);
50882 +       EP_RXD_RAIL *rxdRail = rxd->RxdRail;
50883 +
50884 +       if (rxd->RxdMain->Len == EP_RXD_PENDING)
50885 +           pendingCount++;
50886 +       else
50887 +           activeCount++;
50888 +
50889 +       if (rxdRail)
50890 +           railCounts[rxdRail->RcvrRail->CommsRail->Rail->Number]++;
50891 +    }
50892 +
50893 +    (di->func)(di->arg, "RCVR: rcvr=%p number=%d\n", rcvr, rcvr->Service);
50894 +    (di->func)(di->arg, "      RXDS Free=%d (%d) Pending=%d Active=%d Rails=%d.%d.%d.%d\n",
50895 +              freeCount, rcvr->FreeDescCount, pendingCount, activeCount, railCounts[0], railCounts[1],
50896 +              railCounts[2], railCounts[3]);
50897 +
50898 +    for (i = 0; i < EP_MAX_RAILS; i++)
50899 +       if (rcvr->Rails[i] != NULL)
50900 +           EP_RCVR_OP (rcvr->Rails[i], DisplayRcvr) (di, rcvr->Rails[i]);
50901 +
50902 +    list_for_each (el, &rcvr->ActiveDescList) {
50903 +       EP_RXD *rxd = list_entry (el, EP_RXD, Link);
50904 +
50905 +       if (rxd->RxdMain->Len != EP_RXD_PENDING || full)
50906 +           ep_display_rxd (di, rxd);
50907 +    }
50908 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
50909 +}
50910 +
50911 +void
50912 +ep_rxd_received_now(EP_RXD *rxd)
50913 +{
50914 +    EP_ENVELOPE   *env  = &rxd->RxdMain->Envelope;
50915 +    EP_RCVR       *rcvr = rxd->Rcvr;
50916 +    unsigned long  flags;
50917 +
50918 +    INC_STAT(rcvr->stats,rx);
50919 +    ADD_STAT(rcvr->stats,rx_len, rxd->RxdMain->Len);
50920 +
50921 +    if (rxd->RxdMain->Len < 0 || !EP_IS_MULTICAST(env->Attr))
50922 +    {
50923 +       rxd->Handler (rxd);
50924 +    }
50925 +    else
50926 +    {
50927 +       EPRINTF5 (DBG_RCVR, "ep_rxd_received: forward rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, 
50928 +                 rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd));
50929 +
50930 +       spin_lock_irqsave (&rcvr->Subsys->ForwardDescLock, flags);
50931 +       list_add_tail (&rxd->Link, &rcvr->Subsys->ForwardDescList);
50932 +       spin_unlock_irqrestore (&rcvr->Subsys->ForwardDescLock, flags);
50933 +       
50934 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50935 +    } 
50936 +}
50937 +
50938 +#if defined(CONFIG_EP_NO_CHECK_SUM)
50939 +void
50940 +ep_rxd_received(EP_RXD *rxd) 
50941 +{
50942 +   ep_rxd_received_now(rxd);
50943 +}
50944 +
50945 +#else
50946 +
50947 +void
50948 +ep_rxd_received(EP_RXD *rxd) 
50949 +{
50950 +  EP_ENVELOPE   *env  = &rxd->RxdMain->Envelope;
50951 +
50952 +  if (env->CheckSum) 
50953 +      ep_rxd_queue_csum(rxd);
50954 +  else 
50955 +      ep_rxd_received_now(rxd);
50956 +}
50957 +
50958 +void
50959 +ep_rxd_queue_csum(EP_RXD *rxd)
50960 +{
50961 +    EP_RCVR       *rcvr = rxd->Rcvr;
50962 +    unsigned long flags;
50963 +
50964 +    EPRINTF5 (DBG_RCVR, "ep_rxd_queue_csum: rxd=%p Data=%08x.%08x.%08x len=%d\n", rxd, 
50965 +             rxd->Data.nmd_addr, rxd->Data.nmd_len, rxd->Data.nmd_attr, ep_rxd_len(rxd));
50966 +    
50967 +    spin_lock_irqsave (&rcvr->Subsys->CheckSumDescLock, flags);
50968 +    list_add_tail (&rxd->CheckSumLink, &rcvr->Subsys->CheckSumDescList);
50969 +    spin_unlock_irqrestore (&rcvr->Subsys->CheckSumDescLock, flags);
50970 +    
50971 +    ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
50972 +}
50973 +#endif
50974 +
50975 +void
50976 +ep_rcvr_fillout_stats(EP_RCVR *rcvr, char *str)
50977 +{
50978 +    sprintf(str+strlen(str),"Rx     %lu  %lu /sec\n",       GET_STAT_TOTAL(rcvr->stats,rx),      GET_STAT_PER_SEC(rcvr->stats,rx) );
50979 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr->stats,rx_len) / (1024*1024),  GET_STAT_PER_SEC(rcvr->stats,rx_len) / (1024*1024));
50980 +}
50981 +
50982 +void
50983 +ep_rcvr_rail_fillout_stats(EP_RCVR_RAIL *rcvr_rail, char *str)
50984 +{
50985 +    sprintf(str+strlen(str),"Rx     %lu  %lu /sec\n",       GET_STAT_TOTAL(rcvr_rail->stats,rx),      GET_STAT_PER_SEC(rcvr_rail->stats,rx) );
50986 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(rcvr_rail->stats,rx_len) / (1024*1024),  GET_STAT_PER_SEC(rcvr_rail->stats,rx_len) / (1024*1024));
50987 +}
50988 +
50989 +
50990 +/*
50991 + * Local variables:
50992 + * c-file-style: "stroustrup"
50993 + * End:
50994 + */
50995 diff -urN clean/drivers/net/qsnet/ep/epcommsRx_elan3.c linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan3.c
50996 --- clean/drivers/net/qsnet/ep/epcommsRx_elan3.c        1969-12-31 19:00:00.000000000 -0500
50997 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan3.c  2005-03-10 10:25:52.000000000 -0500
50998 @@ -0,0 +1,1776 @@
50999 +/*
51000 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
51001 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
51002 + *
51003 + *    For licensing information please see the supplied COPYING file
51004 + *
51005 + */
51006 +
51007 +#ident "@(#)$Id: epcommsRx_elan3.c,v 1.24 2005/03/10 15:25:52 mike Exp $"
51008 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx_elan3.c,v $ */
51009 +
51010 +#include <qsnet/kernel.h>
51011 +
51012 +#include <elan/kcomm.h>
51013 +#include <elan/epsvc.h>
51014 +#include <elan/epcomms.h>
51015 +
51016 +#include "kcomm_vp.h"
51017 +#include "kcomm_elan3.h"
51018 +#include "epcomms_elan3.h"
51019 +#include "debug.h"
51020 +
51021 +#define RCVR_TO_RAIL(rcvrRail)         ((EP3_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail)
51022 +#define RCVR_TO_DEV(rcvrRail)          (RCVR_TO_RAIL(rcvrRail)->Device)
51023 +#define RCVR_TO_SUBSYS(rcvrRail)       (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys)
51024 +
51025 +static void RxDataEvent (EP3_RAIL *rail, void *arg);
51026 +static void RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
51027 +static void RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
51028 +
51029 +static EP3_COOKIE_OPS RxDataCookieOps = 
51030 +{
51031 +    RxDataEvent,
51032 +    RxDataRetry,
51033 +    NULL, /* DmaCancelled */
51034 +    RxDataVerify,
51035 +};
51036 +
51037 +static void RxDoneEvent (EP3_RAIL *rail, void *arg);
51038 +static void RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
51039 +static void RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
51040 +
51041 +static EP3_COOKIE_OPS RxDoneCookieOps = 
51042 +{
51043 +    RxDoneEvent,
51044 +    RxDoneRetry,
51045 +    NULL, /* DmaCancelled */
51046 +    RxDoneVerify,
51047 +};
51048 +
51049 +static int
51050 +AllocateRxdRailBlock (EP3_RCVR_RAIL *rcvrRail)
51051 +{
51052 +    EP3_RAIL         *rail = RCVR_TO_RAIL(rcvrRail);
51053 +    ELAN3_DEV         *dev  = rail->Device;
51054 +    EP3_RXD_RAIL_BLOCK *blk;
51055 +    EP3_RXD_RAIL       *rxdRail;
51056 +    sdramaddr_t        pRxdElan;
51057 +    EP3_RXD_RAIL_MAIN  *pRxdMain;
51058 +    E3_Addr           pRxdElanAddr;
51059 +    E3_Addr           pRxdMainAddr;
51060 +    E3_BlockCopyEvent  event;
51061 +    int                       i, j;
51062 +    unsigned long      flags;
51063 +
51064 +    KMEM_ZALLOC (blk, EP3_RXD_RAIL_BLOCK *, sizeof (EP3_RXD_RAIL_BLOCK), 1);
51065 +    if (blk == NULL)
51066 +       return 0;
51067 +
51068 +    if ((pRxdElan = ep_alloc_elan (&rail->Generic, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdElanAddr)) == (sdramaddr_t) 0)
51069 +    {
51070 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
51071 +       return 0;
51072 +    }
51073 +
51074 +    if ((pRxdMain = ep_alloc_main (&rail->Generic, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK, 0, &pRxdMainAddr)) == (sdramaddr_t) 0)
51075 +    {
51076 +       ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
51077 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
51078 +       return 0;
51079 +    }
51080 +    
51081 +    if (ReserveDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK, 0) != ESUCCESS)
51082 +    {
51083 +       ep_free_main (&rail->Generic, pRxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK);
51084 +       ep_free_elan (&rail->Generic, pRxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
51085 +       KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
51086 +       return 0;
51087 +    }
51088 +
51089 +    for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++)
51090 +    {
51091 +       rxdRail->Generic.RcvrRail = (EP_RCVR_RAIL *) rcvrRail;
51092 +       rxdRail->RxdElan          = pRxdElan;
51093 +       rxdRail->RxdElanAddr      = pRxdElanAddr;
51094 +       rxdRail->RxdMain          = pRxdMain;
51095 +       rxdRail->RxdMainAddr      = pRxdMainAddr;
51096 +
51097 +       elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain),  0);
51098 +       elan3_sdram_writel (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next),     0);
51099 +       elan3_sdram_writeq (dev, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr), (long) rxdRail);
51100 +
51101 +       for (j = 0; j < EP_MAXFRAG; j++)
51102 +       {
51103 +           RegisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j], pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), &RxDataCookieOps, (void *) rxdRail);
51104 +
51105 +           event.ev_Type  = EV_TYPE_DMA | (pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[j+1]));
51106 +           event.ev_Count = 0;
51107 +
51108 +           elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[j]), sizeof (E3_BlockCopyEvent));
51109 +       }
51110 +       
51111 +       RegisterCookie (&rail->CookieTable, &rxdRail->DataCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), &RxDataCookieOps, (void *) rxdRail);
51112 +       RegisterCookie (&rail->CookieTable, &rxdRail->DoneCookie, pRxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), &RxDoneCookieOps, (void *) rxdRail);
51113 +
51114 +       EP3_INIT_COPY_EVENT (event, rxdRail->DataCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DataEvent), 1);
51115 +       elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent));
51116 +
51117 +       EP3_INIT_COPY_EVENT (event, rxdRail->DoneCookie, pRxdMainAddr + offsetof (EP3_RXD_RAIL_MAIN, DoneEvent), 1);
51118 +       elan3_sdram_copyl_to_sdram (dev, &event, pRxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent));
51119 +       
51120 +       pRxdMain->DataEvent = EP3_EVENT_FREE;
51121 +       pRxdMain->DoneEvent = EP3_EVENT_FREE;
51122 +
51123 +       /* move onto next descriptor */
51124 +       pRxdElan     += EP3_RXD_RAIL_ELAN_SIZE;
51125 +       pRxdElanAddr += EP3_RXD_RAIL_ELAN_SIZE;
51126 +       pRxdMain      = (EP3_RXD_RAIL_MAIN *) ((unsigned long) pRxdMain + EP3_RXD_RAIL_MAIN_SIZE);
51127 +       pRxdMainAddr += EP3_RXD_RAIL_MAIN_SIZE;
51128 +    }
51129 +
51130 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
51131 +
51132 +    list_add  (&blk->Link, &rcvrRail->DescBlockList);
51133 +    rcvrRail->TotalDescCount += EP3_NUM_RXD_PER_BLOCK;
51134 +    rcvrRail->FreeDescCount  += EP3_NUM_RXD_PER_BLOCK;
51135 +
51136 +    for (i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++)
51137 +       list_add (&blk->Rxd[i].Generic.Link, &rcvrRail->FreeDescList);
51138 +
51139 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
51140 +    
51141 +    return 1;
51142 +}
51143 +
51144 +static void
51145 +FreeRxdRailBlock (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL_BLOCK *blk)
51146 +{
51147 +    EP3_RAIL     *rail = RCVR_TO_RAIL(rcvrRail);
51148 +    EP3_RXD_RAIL *rxdRail;
51149 +    unsigned long flags;
51150 +    int i, j;
51151 +
51152 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
51153 +
51154 +    list_del (&blk->Link);
51155 +
51156 +    rcvrRail->TotalDescCount -= EP3_NUM_RXD_PER_BLOCK;
51157 +
51158 +    for (rxdRail = &blk->Rxd[0], i = 0; i < EP3_NUM_RXD_PER_BLOCK; i++, rxdRail++)
51159 +    {
51160 +
51161 +       rcvrRail->FreeDescCount--;
51162 +
51163 +       list_del (&rxdRail->Generic.Link);
51164 +       
51165 +       for (j = 0; j < EP_MAXFRAG; j++)
51166 +           DeregisterCookie (&rail->CookieTable, &rxdRail->ChainCookie[j]);
51167 +       
51168 +       DeregisterCookie (&rail->CookieTable, &rxdRail->DataCookie);
51169 +       DeregisterCookie (&rail->CookieTable, &rxdRail->DoneCookie);
51170 +    }
51171 +
51172 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
51173 +
51174 +    ReleaseDmaRetries (rail, EP3_NUM_RXD_PER_BLOCK);
51175 +
51176 +    ep_free_main (&rail->Generic, blk->Rxd[0].RxdMainAddr, EP3_RXD_RAIL_MAIN_SIZE * EP3_NUM_RXD_PER_BLOCK);
51177 +    ep_free_elan (&rail->Generic, blk->Rxd[0].RxdElanAddr, EP3_RXD_RAIL_ELAN_SIZE * EP3_NUM_RXD_PER_BLOCK);
51178 +
51179 +    KMEM_FREE (blk, sizeof (EP3_RXD_RAIL_BLOCK));
51180 +}
51181 +
51182 +static EP3_RXD_RAIL *
51183 +GetRxdRail (EP3_RCVR_RAIL *rcvrRail)
51184 +{
51185 +    EP3_RXD_RAIL *rxdRail;
51186 +    unsigned long flags;
51187 +    int low_on_rxds;
51188 +
51189 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
51190 +
51191 +    if (list_empty (&rcvrRail->FreeDescList))
51192 +       rxdRail = NULL;
51193 +    else
51194 +    {
51195 +       rxdRail = list_entry (rcvrRail->FreeDescList.next, EP3_RXD_RAIL, Generic.Link);
51196 +
51197 +       list_del (&rxdRail->Generic.Link);
51198 +
51199 +       rcvrRail->FreeDescCount--;
51200 +    }
51201 +
51202 +    /* Wakeup the descriptor primer thread if there's not many left */
51203 +    low_on_rxds = (rcvrRail->FreeDescCount < ep_rxd_lowat);
51204 +
51205 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
51206 +
51207 +    if (low_on_rxds)
51208 +       ep_kthread_schedule (&RCVR_TO_SUBSYS(rcvrRail)->Thread, lbolt);
51209 +
51210 +    return (rxdRail);
51211 +}
51212 +
51213 +static void
51214 +FreeRxdRail (EP3_RCVR_RAIL *rcvrRail, EP3_RXD_RAIL *rxdRail)
51215 +{
51216 +    unsigned long flags;
51217 +
51218 +#if defined(DEBUG_ASSERT)
51219 +    {
51220 +       EP_RAIL  *rail = (EP_RAIL *) RCVR_TO_RAIL(rcvrRail);
51221 +       ELAN3_DEV *dev = RCVR_TO_DEV (rcvrRail);
51222 +
51223 +       EP_ASSERT (rail, rxdRail->Generic.RcvrRail == &rcvrRail->Generic);
51224 +       
51225 +       EP_ASSERT (rail, rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE);
51226 +       EP_ASSERT (rail, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE);
51227 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
51228 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
51229 +
51230 +       rxdRail->RxdMain->DataEvent = EP3_EVENT_FREE;
51231 +       rxdRail->RxdMain->DoneEvent = EP3_EVENT_FREE;
51232 +    }
51233 +#endif
51234 +
51235 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
51236 +    
51237 +    list_add (&rxdRail->Generic.Link, &rcvrRail->FreeDescList);
51238 +
51239 +    rcvrRail->FreeDescCount++;
51240 +
51241 +    if (rcvrRail->FreeDescWaiting)
51242 +    {
51243 +       rcvrRail->FreeDescWaiting--;
51244 +       kcondvar_wakeupall (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock);
51245 +    }
51246 +
51247 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
51248 +}
51249 +
51250 +static void
51251 +BindRxdToRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail)
51252 +{
51253 +    EP3_RAIL *rail = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail);
51254 +
51255 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
51256 +
51257 +    EPRINTF3 (DBG_RCVR, "%s: BindRxdToRail: rxd=%p rxdRail=%p\n",  rail->Generic.Name, rxd, rxdRail);
51258 +
51259 +    elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, RxdMain), rxd->NmdMain.nmd_addr);                        /* PCI write */
51260 +
51261 +    rxd->RxdRail         = &rxdRail->Generic;
51262 +    rxdRail->Generic.Rxd = rxd;
51263 +}
51264 +
51265 +static void
51266 +UnbindRxdFromRail (EP_RXD *rxd, EP3_RXD_RAIL *rxdRail)
51267 +{
51268 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51269 +
51270 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
51271 +    ASSERT (rxd->RxdRail == &rxdRail->Generic && rxdRail->Generic.Rxd == rxd);
51272 +    
51273 +    EPRINTF3 (DBG_RCVR, "%s: UnbindRxdFromRail: rxd=%p rxdRail=%p\n",  RCVR_TO_RAIL(rxdRail->Generic.RcvrRail)->Generic.Name, rxd, rxdRail);
51274 +
51275 +    rxd->RxdRail         = NULL;
51276 +    rxdRail->Generic.Rxd = NULL;
51277 +
51278 +    if (rcvrRail->CleanupWaiting)
51279 +       kcondvar_wakeupall (&rcvrRail->CleanupSleep, &rxd->Rcvr->Lock);
51280 +    rcvrRail->CleanupWaiting = 0;
51281 +}
51282 +
51283 +static void
51284 +LockRcvrThread (EP3_RCVR_RAIL *rcvrRail)
51285 +{
51286 +    EP_COMMS_RAIL     *commsRail   = rcvrRail->Generic.CommsRail;
51287 +    EP3_RAIL          *rail        = RCVR_TO_RAIL(rcvrRail);
51288 +    ELAN3_DEV        *dev         = rail->Device;
51289 +    sdramaddr_t        sle         = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
51290 +    EP3_SPINLOCK_MAIN *sl          = &rcvrRail->RcvrMain->ThreadLock;
51291 +    E3_uint32          RestartBits = 0;
51292 +    int                delay       = 1;
51293 +    E3_uint32          seq;
51294 +    E3_uint32          reg;
51295 +
51296 +    ASSERT (SPINLOCK_HELD (&rcvrRail->Generic.Rcvr->Lock));
51297 +
51298 +    mb();
51299 +    elan3_sdram_writel (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 1);
51300 +    mb();
51301 +    seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
51302 +    while (seq != sl->sl_seq)
51303 +    {
51304 +       while (sl->sl_seq == (seq - 1))
51305 +       {
51306 +           mb();
51307 +
51308 +           if ((read_reg32 (dev, Exts.InterruptReg) & (INT_TProc | INT_TProcHalted)) != 0 && spin_trylock (&dev->IntrLock))
51309 +           {
51310 +               reg=read_reg32 (dev, Exts.InterruptReg);
51311 +               ELAN_REG_REC(reg);
51312 +
51313 +               if ((reg & (INT_TProc | INT_TProcHalted)) != 0&& 
51314 +                   elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq)) != sl->sl_seq)
51315 +               {
51316 +                   EPRINTF1 (DBG_RCVR, "%s: LockRcvrThread - thread trapped\n", rail->Generic.Name);
51317 +
51318 +                   /* The thread processor has *really* trapped, and the spinlock is still held.
51319 +                    * thus is must have trapped due to a network error - we need to complete the
51320 +                    * actions required for this envelope, since we may be spin-locking the receiver
51321 +                    * to search the dma retry lists for a particular dma.  So must ensure that
51322 +                    * if the thread had trapped then the dma has been queued onto the retry list
51323 +                    * *before* we inspect them.
51324 +                    */
51325 +                   IncrStat (commsRail, LockRcvrTrapped);
51326 +
51327 +                   /* We're going to generate a spurious interrupt here - since we will
51328 +                    * handle the thread processor trap directly */
51329 +                   ELAN_REG_REC(reg);
51330 +                   if (HandleTProcTrap (dev, &RestartBits))
51331 +                   {
51332 +                       /* NOTE - this is not an assert, since the "store" to unlock the lock could
51333 +                        *        be held up on the PCI interface, whilst the thread processor has
51334 +                        *        gone on and switched to a new thread, which has then trapped, and
51335 +                        *        our read of the InterruptReg can overtake the unlock write.
51336 +                        *
51337 +                        * ASSERT (dev->ThreadTrap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 
51338 +                        *         elan3_sdram_readl (dev, rcvr->RcvrElan + offsetof (EP_RCVR_ELAN, PendingRxDescsElan)));
51339 +                        */
51340 +
51341 +                       PULSE_SCHED_STATUS (dev, RestartBits);
51342 +
51343 +                       DeliverTProcTrap (dev, dev->ThreadTrap, INT_TProc);
51344 +                   }
51345 +               }
51346 +               spin_unlock (&dev->IntrLock);
51347 +           }
51348 +           
51349 +           DELAY (delay); delay++;
51350 +       }
51351 +       seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
51352 +    }
51353 +}
51354 +
51355 +static void
51356 +UnlockRcvrThread (EP3_RCVR_RAIL *rcvrRail)
51357 +{
51358 +    EP3_RAIL   *rail = RCVR_TO_RAIL(rcvrRail);
51359 +    sdramaddr_t sle  = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
51360 +
51361 +    mb();
51362 +    elan3_sdram_writel (rail->Device, sle + offsetof (EP3_SPINLOCK_ELAN, sl_lock), 0);
51363 +    mmiob(); 
51364 +}
51365 +
51366 +void
51367 +CompleteEnvelope (EP3_RAIL *rail, E3_Addr rxdElanAddr, E3_uint32 PAckVal)
51368 +{
51369 +    ELAN3_DEV         *dev       = rail->Device;
51370 +    sdramaddr_t        rxdElan   = ep_elan2sdram (&rail->Generic, rxdElanAddr);
51371 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr));
51372 +    EP_RXD_MAIN       *rxdMain   = rxdRail->Generic.Rxd->RxdMain;
51373 +    EP_ENVELOPE       *env       = &rxdMain->Envelope;
51374 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51375 +    EP_COMMS_RAIL     *commsRail = rcvrRail->Generic.CommsRail;
51376 +    EP_RCVR           *rcvr      = rcvrRail->Generic.Rcvr;
51377 +    sdramaddr_t        queue     = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue);
51378 +    sdramaddr_t        sle       = rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock);
51379 +    EP3_SPINLOCK_MAIN *sl        = &rcvrRail->RcvrMain->ThreadLock;
51380 +    int               nodeId;
51381 +    EP_NODE_RAIL     *nodeRail;
51382 +    E3_DMA_BE         dma;
51383 +    E3_Addr           nfptr;
51384 +    E3_Addr          next;
51385 +
51386 +    ASSERT (commsRail->Rail == &rail->Generic);
51387 +    ASSERT (rxdElanAddr == elan3_sdram_readl (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs)));
51388 +
51389 +    IncrStat (commsRail, CompleteEnvelope);
51390 +
51391 +    /* We don't need to aquire the NodeLock here (however we might be holding it),
51392 +     * since this can only get called while the node is connected, or disconnecting.
51393 +     * If the node is disconnecting, then we can get called from FlushDisconnecting()
51394 +     * while holding the NodeLock - after we cannot get called again until the node 
51395 +     * has reconnected from scratch.
51396 +     */
51397 +    /* Copy the envelope information */
51398 +    nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr));
51399 +
51400 +    if (nfptr == elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)))
51401 +       nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base));
51402 +    else
51403 +       nfptr += elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size));
51404 +
51405 +    /* Copy the envelope and payload (unconditionally) */
51406 +    elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr), env, EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE);
51407 +
51408 +    ASSERT (env->Version == EP_ENVELOPE_VERSION);
51409 +
51410 +    /* Copy the received message length */
51411 +    rxdMain->Len = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len));
51412 +       
51413 +    /* Remove the RXD from the pending desc list */
51414 +    if ((next = elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0)
51415 +       rcvrRail->RcvrMain->PendingDescsTailp = 0;
51416 +    elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next);
51417 +
51418 +    /* Copy the DMA descriptor to queue on the approriate retry list */
51419 +    elan3_sdram_copyq_from_sdram (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]), &dma, sizeof (E3_DMA));        /* PCI read block */
51420 +    
51421 +    EP_ASSERT (&rail->Generic, dma.s.dma_direction == DMA_READ);;
51422 +
51423 +#if defined(DEBUG_ASSERT) && defined(DEBUG_SDRAM_ASSERT)
51424 +    /* NOTE: not an assertion, since the thread packet could have successfully
51425 +     *       transferred the "put" dma to the far side - which could then have
51426 +     *       completed - but the far side will see a network error which will
51427 +     *       cause the virtual circuit to be dropped by the far side and this 
51428 +     *       DMA will be removed */
51429 +    if (rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE ||
51430 +       elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1)
51431 +    {
51432 +       printk ("CompleteEnvelope: suspicious dma : Node=%d DataBlock=%d Event=%d\n", 
51433 +               env->NodeId, rxdRail->RxdMain->DataEvent, 
51434 +               elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)));
51435 +    }
51436 +#endif
51437 +    
51438 +    EPRINTF6 (DBG_RCVR, "%s: CompleteEnvelope: rxd=%p NodeId=%d Xid=%llx Cookies=%08x,%08x\n", commsRail->Rail->Name,
51439 +             rxdRail, env->NodeId, (long long) env->Xid.Unique, dma.s.dma_srcCookieVProc, dma.s.dma_destCookieVProc);
51440 +
51441 +    /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will
51442 +     * be read from the EP_RETRY_DMA rather than the original DMA - this can then get reused 
51443 +     * and an incorrect DMA descriptor sent */
51444 +    dma.s.dma_source    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]);
51445 +    dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
51446 +    
51447 +    nodeId   = EP_VP_TO_NODE(dma.s.dma_srcVProc);
51448 +    nodeRail = &rail->Generic.Nodes[nodeId];
51449 +
51450 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
51451 +
51452 +    if (PAckVal != E3_PAckOk)
51453 +    {
51454 +       if (nodeRail->State == EP_NODE_CONNECTED)
51455 +           QueueDmaForRetry (rail, &dma, EP_RETRY_LOW_PRI_RETRY);
51456 +       else
51457 +           QueueDmaOnStalledList (rail, &dma);
51458 +    }
51459 +
51460 +    /* Finaly forcefully drop the spinlock for the thread */
51461 +    sl->sl_seq = elan3_sdram_readl (dev, sle + offsetof (EP3_SPINLOCK_ELAN, sl_seq));
51462 +
51463 +    wmb();
51464 +}
51465 +
51466 +void
51467 +StallThreadForNoDescs (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp)
51468 +{
51469 +    ELAN3_DEV      *dev       = rail->Device;
51470 +    sdramaddr_t    rcvrElan   = ep_elan2sdram (&rail->Generic, rcvrElanAddr);
51471 +    EP3_RCVR_RAIL  *rcvrRail  = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr));
51472 +    EP_RCVR        *rcvr      = rcvrRail->Generic.Rcvr;
51473 +    EP_COMMS_RAIL  *commsRail = rcvrRail->Generic.CommsRail;
51474 +
51475 +    EPRINTF3 (DBG_RCVR, "%s: StallThreadForNoDescs - rcvrRail=%p sp=%x\n", commsRail->Rail->Name, rcvrRail, sp);
51476 +    
51477 +    IncrStat (commsRail, StallThread);
51478 +
51479 +    /* NOTE: spin lock not required as thread is trapped */
51480 +    
51481 +    if (rcvrRail->RcvrMain->PendingDescsTailp != 0)
51482 +    {
51483 +       EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - pending descriptors, wakeup thread\n", commsRail->Rail->Name);
51484 +       
51485 +       /*
51486 +        * A receive buffer was queued after the thread had decided to go to 
51487 +        * sleep, but before the event interrupt occured.  Just restart the
51488 +        * thread to consume the envelope.
51489 +        */
51490 +       IssueRunThread (rail, sp);
51491 +    }
51492 +    else
51493 +    {
51494 +       EPRINTF1 (DBG_RCVR, "%s: StallThreadForNoDescs - set ThreadWaiting\n", commsRail->Rail->Name);
51495 +       
51496 +       IncrStat (commsRail, ThrdWaiting);
51497 +
51498 +       /* Mark the rcvr as waiting for a rxd, and schedule a call of ep_check_rcvr
51499 +        * to attempt to "steal" a descriptor from a different rail */
51500 +       rcvrRail->ThreadWaiting = sp;
51501 +
51502 +       ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
51503 +    }
51504 +}
51505 +
51506 +void
51507 +StallThreadForHalted (EP3_RAIL *rail, E3_Addr rcvrElanAddr, E3_Addr sp)
51508 +{
51509 +    ELAN3_DEV     *dev       = rail->Device;
51510 +    sdramaddr_t    rcvrElan  = ep_elan2sdram (&rail->Generic, rcvrElanAddr);
51511 +    EP3_RCVR_RAIL *rcvrRail  = (EP3_RCVR_RAIL *) (unsigned long) elan3_sdram_readq (dev, rcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr));
51512 +    EP_RCVR       *rcvr      = rcvrRail->Generic.Rcvr;
51513 +    unsigned long  flags     = 0;
51514 +
51515 +    spin_lock_irqsave (&rcvr->Lock, flags);
51516 +
51517 +    rcvrRail->ThreadHalted = sp;
51518 +
51519 +    EPRINTF2 (DBG_EPTRAP, "%s: StallThreadForHalted: sp=%08x\n", rail->Generic.Name, sp);
51520 +
51521 +    if (rcvrRail->CleanupWaiting)
51522 +       kcondvar_wakeupone (&rcvrRail->CleanupSleep, &rcvr->Lock);
51523 +    rcvrRail->CleanupWaiting = 0;
51524 +
51525 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51526 +}
51527 +/*
51528 + * RxDataEvent: arg == EP3_RXD_RAIL
51529 + *   Called on completion of receiving data.
51530 + */
51531 +static void
51532 +RxDataEvent (EP3_RAIL *rail, void *arg)
51533 +{
51534 +    EP3_RXD_RAIL  *rxdRail  = (EP3_RXD_RAIL *) arg;
51535 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51536 +    EP_RXD        *rxd      = rxdRail->Generic.Rxd;
51537 +    EP_ENVELOPE   *env      = &rxd->RxdMain->Envelope;
51538 +    EP_RCVR       *rcvr     = rxd->Rcvr;
51539 +    ELAN3_DEV    *dev      = rail->Device;
51540 +    unsigned long flags;
51541 +    int delay = 1;
51542 +
51543 +    spin_lock_irqsave (&rcvr->Lock, flags);
51544 +    for (;;)
51545 +    {
51546 +       if (EP3_EVENT_FIRED (rxdRail->DataCookie, rxdRail->RxdMain->DataEvent))
51547 +           break;
51548 +
51549 +       if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent), rxdRail->DataCookie, rxdRail->RxdMain->DataEvent))
51550 +       {
51551 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
51552 +               panic ("RxDataEvent: events set but block copy not completed\n");
51553 +           DELAY(delay);
51554 +           delay <<= 1;
51555 +       }
51556 +       else
51557 +       {
51558 +           printk ("%s: RxDataEvent: rxd %p not complete [%x,%x,%x]\n", rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent,
51559 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)),
51560 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)));
51561 +           
51562 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
51563 +           return;
51564 +       }
51565 +       mb();
51566 +    }
51567 +
51568 +    /*
51569 +     * Note, since the thread will have sent the "get" dma before copying the
51570 +     * envelope, we must check that it has completed doing this,  if not then
51571 +     * it might be that the thread trapped due to a network error, so we must
51572 +     * spinlock against the thread 
51573 +     */
51574 +    if (rxd->RxdMain->Len == EP_RXD_PENDING)
51575 +    {
51576 +       LockRcvrThread (rcvrRail);
51577 +       UnlockRcvrThread (rcvrRail);
51578 +
51579 +       ASSERT (env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING);
51580 +    }
51581 +
51582 +    EPRINTF7 (DBG_RCVR, "%s: RxDataEvent: rxd=%p rxdRail=%p completed from elan node %d [XID=%llx] Length %d State %x\n", 
51583 +             rail->Generic.Name, rxd, rxdRail, env->NodeId, (long long) env->Xid.Unique, rxd->RxdMain->Len, rxd->State);
51584 +
51585 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_PUT_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE);
51586 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));      /* PCI read */
51587 +    EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE);
51588 +
51589 +    rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
51590 +    rxd->Data.nmd_attr          = EP_RAIL2RAILMASK (rail->Generic.Number);
51591 +
51592 +    if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr))
51593 +       rxd->State = EP_RXD_RPC_IN_PROGRESS;
51594 +    else
51595 +    {
51596 +       rxd->State = EP_RXD_COMPLETED;
51597 +
51598 +       /* remove from active list */
51599 +       list_del (&rxd->Link);
51600 +
51601 +       UnbindRxdFromRail (rxd, rxdRail);
51602 +       FreeRxdRail (rcvrRail, rxdRail);
51603 +    }
51604 +
51605 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51606 +    ep_rxd_received (rxd);
51607 +
51608 +}
51609 +
51610 +/* 
51611 + * RxDataRetry: arg == EP3_RXD_RAIL
51612 + *   Called on retry of "get" dma of large transmit data
51613 + *   and rpc_get/rpc_put and "put" of datavec of rpc completion.
51614 + */
51615 +static void
51616 +RxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
51617 +{
51618 +    EP3_RXD_RAIL  *rxdRail  = (EP3_RXD_RAIL *) arg;
51619 +    EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail;
51620 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
51621 +
51622 +#if defined(DEBUG_ASSERT)
51623 +    RxDataVerify (rail, arg, dma);
51624 +#endif
51625 +
51626 +    IncrStat (commsRail, RxDataRetry);
51627 +
51628 +    EPRINTF4 (DBG_RCVR, "%s: RxDataRetry: rcvr %p rxd %p [XID=%llx]\n", rail->Generic.Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique);
51629 +
51630 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DATA));
51631 +}
51632 +
51633 +static void
51634 +RxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
51635 +{
51636 +#if defined(DEBUG_ASSERT)
51637 +    EP3_RXD_RAIL   *rxdRail  = (EP3_RXD_RAIL *) arg;
51638 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
51639 +
51640 +    if (dma->s.dma_direction == DMA_WRITE)
51641 +    {
51642 +       EP_ASSERT (&rail->Generic, 
51643 +                  (rxd->State == EP_RXD_RECEIVE_ACTIVE  && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) ||
51644 +                  (rxd->State == EP_RXD_PUT_ACTIVE      && rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_PRIVATE) ||
51645 +                  (rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdRail->RxdMain->DoneEvent == EP3_EVENT_ACTIVE));
51646 +       EP_ASSERT (&rail->Generic, SDRAM_ASSERT (rxd->State == EP_RXD_COMPLETE_ACTIVE ?
51647 +                                                elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1:            /* PCI read */
51648 +                                                elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 1));          /* PCI read */
51649 +    }
51650 +    else
51651 +    {
51652 +       EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_READ_REQUEUE);
51653 +       
51654 +#if defined(DEBUG_SDRAM_ASSERT)
51655 +       /* NOTE: not an assertion, since the "get" DMA can still be running if
51656 +        *       it's packet got a network error - and then the "put" from the
51657 +        *       far side has completed - however the virtual circuit should
51658 +        *       then be dropped by the far side and this DMA will be removed */
51659 +       if (EP_VP_TO_NODE(dma->s.dma_srcVProc) != ep_rxd_node(rxd) || 
51660 +           (rxd->State != EP_RXD_RECEIVE_ACTIVE && rxd->State != EP_RXD_GET_ACTIVE) ||
51661 +           rxdRail->RxdMain->DataEvent != EP3_EVENT_ACTIVE ||
51662 +           elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) != 1)
51663 +       {
51664 +           EPRINTF6 (DBG_RCVR, "%s: RxDataRetry: suspicious dma : VProc=%d NodeId=%d State=%d DataBlock=%x Event=%d\n",  
51665 +                     rail->Generic.Name, EP_VP_TO_NODE(dma->s.dma_srcVProc), ep_rxd_node(rxd), rxd->State, rxdRail->RxdMain->DataEvent, 
51666 +                     elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)));
51667 +       }
51668 +#endif /* defined(DEBUG_SDRAM_ASSERT) */
51669 +    }
51670 +#endif /* DEBUG_ASSERT */
51671 +}
51672 +
51673 +/*
51674 + * RxDoneEvent: arg == EP_RXD
51675 + *   Called on completion of large receive.
51676 + */
51677 +static void
51678 +RxDoneEvent (EP3_RAIL *rail, void *arg)
51679 +{
51680 +    EP3_RXD_RAIL  *rxdRail   = (EP3_RXD_RAIL *) arg;
51681 +    EP3_RCVR_RAIL *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51682 +    EP_COMMS_RAIL *commsRail = rcvrRail->Generic.CommsRail;
51683 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
51684 +    EP_RCVR       *rcvr      = rxd->Rcvr;
51685 +    ELAN3_DEV    *dev       = rail->Device;
51686 +    int            delay     = 1;
51687 +    unsigned long  flags;
51688 +
51689 +    spin_lock_irqsave (&rcvr->Lock, flags);
51690 +    for (;;)
51691 +    {
51692 +       if (EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))
51693 +           break;
51694 +       
51695 +       if (EP3_EVENT_FIRING (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent), rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))
51696 +       {
51697 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
51698 +               panic ("RxDoneEvent: events set but block copy not completed\n");
51699 +           DELAY(delay);
51700 +           delay <<= 1;
51701 +       }
51702 +       else
51703 +       {
51704 +           printk ("RxDoneEvent: rxd %p not complete [%x,%x.%x]\n", rxd, rxdRail->RxdMain->DoneEvent,
51705 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)),
51706 +                   elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type)));
51707 +           
51708 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
51709 +           return;
51710 +       }
51711 +       mb();
51712 +    }
51713 +
51714 +    EPRINTF4 (DBG_RCVR, "%s: RxDoneEvent: rxd %p completed from elan node %d [XID=%llx]\n", 
51715 +             commsRail->Rail->Name, rxd, rxd->RxdMain->Envelope.NodeId, (long long) rxd->RxdMain->Envelope.Xid.Unique);
51716 +    
51717 +    IncrStat (commsRail, RxDoneEvent);
51718 +
51719 +    EP_ASSERT (&rail->Generic, rxdRail->RxdMain->DataEvent  == EP3_EVENT_PRIVATE);
51720 +    EP_ASSERT (&rail->Generic, EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent));
51721 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));      /* PCI read */
51722 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));      /* PCI read */
51723 +
51724 +    /* mark rxd as private  */
51725 +    rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
51726 +
51727 +    /* remove from active list */
51728 +    list_del (&rxd->Link);
51729 +
51730 +    UnbindRxdFromRail (rxd, rxdRail);
51731 +    FreeRxdRail (rcvrRail, rxdRail);
51732 +       
51733 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
51734 +
51735 +    rxd->Handler (rxd);
51736 +}
51737 +
51738 +/* 
51739 + * RxDoneRetry: arg == EP_RXD
51740 + *   Called on retry of "put" of RPC completion status block
51741 + */
51742 +static void
51743 +RxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
51744 +{
51745 +    EP3_RXD_RAIL  *rxdRail   = (EP3_RXD_RAIL *) arg;
51746 +    EP_COMMS_RAIL *commsRail = rxdRail->Generic.RcvrRail->CommsRail;
51747 +    EP_RXD        *rxd       = rxdRail->Generic.Rxd;
51748 +
51749 +#if defined(DEBUG_ASSERT)
51750 +    RxDoneVerify (rail, arg, dma);
51751 +#endif
51752 +
51753 +    IncrStat (commsRail, RxDoneRetry);
51754 +
51755 +    EPRINTF4 (DBG_RCVR, "%s: RxDoneRetry: rcvr %p rxd %p [XID=%llx]\n", commsRail->Rail->Name, rxd->Rcvr, rxd, (long long) rxd->RxdMain->Envelope.Xid.Unique);
51756 +
51757 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&rxdRail->Backoff, EP_BACKOFF_DONE));
51758 +}
51759 +
51760 +static void
51761 +RxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
51762 +{
51763 +#if defined(DEBUG_ASSERT)
51764 +    EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) arg;
51765 +    EP_RXD       *rxd     = rxdRail->Generic.Rxd;
51766 +
51767 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == ep_rxd_node(rxd));
51768 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE && rxdRail->RxdMain->DoneEvent  == EP3_EVENT_ACTIVE);
51769 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 1));     /* PCI read */
51770 +#endif /* defined(DEBUG_ASSERT) */
51771 +}
51772 +
51773 +int
51774 +ep3rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r)
51775 +{
51776 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r;
51777 +    EP3_RAIL      *rail     = RCVR_TO_RAIL(rcvrRail);
51778 +    ELAN3_DEV     *dev      = rail->Device;
51779 +    EP3_RXD_RAIL  *rxdRail;
51780 +
51781 +    ASSERT ( SPINLOCK_HELD(&rxd->Rcvr->Lock));
51782 +
51783 +    if ((rxdRail = GetRxdRail (rcvrRail)) == NULL)
51784 +       return 0;
51785 +
51786 +    /* Flush the Elan TLB if mappings have changed */
51787 +    ep_perrail_dvma_sync (&rail->Generic);
51788 +
51789 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr), rxd->Data.nmd_addr);              /* PCI write */
51790 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len),  rxd->Data.nmd_len);               /* PCI write */
51791 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_attr), rxd->Data.nmd_attr);              /* PCI write */
51792 +
51793 +    /* Bind the rxdRail and rxd together */
51794 +    BindRxdToRail (rxd, rxdRail);
51795 +    
51796 +    /* Mark as active */
51797 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);
51798 +
51799 +    rxdRail->RxdMain->DataEvent  = EP3_EVENT_ACTIVE;
51800 +    rxdRail->RxdMain->DoneEvent  = EP3_EVENT_PRIVATE;
51801 +
51802 +    /* Interlock with StallThreadForNoDescs */
51803 +    spin_lock (&dev->IntrLock);
51804 +
51805 +    EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p\n", rail->Generic.Name, rxd->Rcvr, rxd, rxdRail);
51806 +
51807 +    EP3_SPINENTER (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock);
51808 +
51809 +    elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next), 0);                                        /* PCI write */
51810 +    if (rcvrRail->RcvrMain->PendingDescsTailp == 0)
51811 +       elan3_sdram_writel (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), rxdRail->RxdElanAddr);       /* PCI write */
51812 +    else
51813 +       elan3_sdram_writel (dev, rcvrRail->RcvrMain->PendingDescsTailp, rxdRail->RxdElanAddr);                          /* PCI write */
51814 +    rcvrRail->RcvrMain->PendingDescsTailp = rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next);
51815 +    
51816 +    EP3_SPINEXIT (dev, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock), &rcvrRail->RcvrMain->PendingLock);
51817 +
51818 +    /* If the thread has paused because it was woken up with no receive buffer */
51819 +    /* ready, then wake it up to process the one we've just added */
51820 +    if (rcvrRail->ThreadWaiting)
51821 +    {
51822 +       EPRINTF1 (DBG_RCVR, "%s: DoReceive: ThreadWaiting - restart thread\n", rail->Generic.Name);
51823 +
51824 +       IssueRunThread (rail, rcvrRail->ThreadWaiting);
51825 +
51826 +       rcvrRail->ThreadWaiting = (E3_Addr) 0;
51827 +    }
51828 +
51829 +    spin_unlock (&dev->IntrLock);
51830 +
51831 +    return 1;
51832 +}
51833 +
51834 +void
51835 +ep3rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
51836 +{
51837 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
51838 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51839 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
51840 +    ELAN3_DEV         *dev       = rail->Device;
51841 +
51842 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
51843 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
51844 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
51845 +    E3_DMA_BE         dmabe;
51846 +    int                       i, len;
51847 +
51848 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_PUT_ACTIVE);
51849 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
51850 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
51851 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
51852 +
51853 +    /* Flush the Elan TLB if mappings have changed */
51854 +    ep_perrail_dvma_sync (&rail->Generic);
51855 +    
51856 +    /* Generate the DMA chain to put the data in two loops to burst
51857 +     * the data across the PCI bus */
51858 +    for (len = 0, i = (nFrags-1), local += (nFrags-1), remote += (nFrags-1); i >= 0;   len += local->nmd_len, i--, local--, remote--)
51859 +    {
51860 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
51861 +       dmabe.s.dma_size            = local->nmd_len;
51862 +       dmabe.s.dma_source          = local->nmd_addr;
51863 +       dmabe.s.dma_dest            = remote->nmd_addr;
51864 +       dmabe.s.dma_destEvent       = (E3_Addr) 0;
51865 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId);
51866 +       if (i == (nFrags-1))
51867 +           dmabe.s.dma_srcEvent    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent);
51868 +       else
51869 +           dmabe.s.dma_srcEvent    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]);
51870 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
51871 +       
51872 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
51873 +                 (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, dmabe.s.dma_srcCookieVProc);
51874 +       
51875 +       if (i != 0)
51876 +           elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */
51877 +    }
51878 +    
51879 +    for (i = 0; i < nFrags; i++)
51880 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
51881 +    
51882 +    /* Initialise the data event */
51883 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);                   /* PCI write */
51884 +    rxdMain->DataEvent = EP3_EVENT_ACTIVE;
51885 +   
51886 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
51887 +
51888 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
51889 +    {
51890 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
51891 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_put: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
51892 +       
51893 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
51894 +    }
51895 +    
51896 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
51897 +}
51898 +
51899 +void
51900 +ep3rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
51901 +{
51902 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
51903 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51904 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
51905 +    ELAN3_DEV         *dev       = rail->Device;
51906 +
51907 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
51908 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
51909 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
51910 +    E3_DMA_BE         dmabe;
51911 +    int                       i, len;
51912 +
51913 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_GET_ACTIVE);
51914 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
51915 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
51916 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
51917 +       
51918 +    /* Flush the Elan TLB if mappings have changed */
51919 +    ep_perrail_dvma_sync (&rail->Generic);
51920 +    
51921 +    /* Generate the DMA chain to get the data in two loops to burst
51922 +     * the data across the PCI bus */
51923 +    for (len = 0, i = (nFrags-1), remote += (nFrags-1), local += (nFrags-1); i >= 0;   len += remote->nmd_len, i--, remote--, local--)
51924 +    {
51925 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_READ, DMA_NORMAL, EP3_DMAFAILCOUNT);
51926 +       dmabe.s.dma_size            = remote->nmd_len;
51927 +       dmabe.s.dma_source          = remote->nmd_addr;
51928 +       dmabe.s.dma_dest            = local->nmd_addr;
51929 +       if (i == (nFrags-1))
51930 +           dmabe.s.dma_destEvent   = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DataEvent);
51931 +       else
51932 +           dmabe.s.dma_destEvent   = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i]);
51933 +       dmabe.s.dma_destCookieVProc = LocalCookie (rail, env->NodeId);
51934 +       dmabe.s.dma_srcEvent        = (E3_Addr) 0;
51935 +       dmabe.s.dma_srcCookieVProc  = RemoteCookie (rail, env->NodeId);
51936 +       
51937 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
51938 +                 (long long) env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len, dmabe.s.dma_destCookieVProc, 
51939 +                 dmabe.s.dma_srcCookieVProc);
51940 +       
51941 +       /* 
51942 +        * Always copy down the dma descriptor, since we issue it as a READ_REQUEUE
51943 +        * dma, and the elan will fetch the descriptor to send out of the link from
51944 +        * the rxdElan->Dmas[i] location,  before issueing the DMA chain we modify
51945 +        * the dma_source.
51946 +        */
51947 +       elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]), sizeof (E3_DMA)); /* PCI write block */
51948 +    }
51949 +    
51950 +    for (i = 0; i < nFrags; i++)
51951 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
51952 +    
51953 +    /* Initialise the data event */
51954 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 1);                   /* PCI write */
51955 +    rxdMain->DataEvent  = EP3_EVENT_ACTIVE;
51956 +    
51957 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
51958 +
51959 +    /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the DMA descriptor will
51960 +     * be read from the EP_RETRY_DMA rather than the orignal DMA - this can then get reused 
51961 +     * and an incorrect DMA descriptor sent */
51962 +    dmabe.s.dma_source    = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, Dmas[0]);
51963 +    dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
51964 +    
51965 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
51966 +    {
51967 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
51968 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_get: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
51969 +       
51970 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
51971 +    }
51972 +
51973 +    BucketStat (rxd->Rcvr->Subsys, RPCGet, len);
51974 +}
51975 +       
51976 +void
51977 +ep3rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
51978 +{
51979 +    EP3_RXD_RAIL      *rxdRail   = (EP3_RXD_RAIL *) rxd->RxdRail;
51980 +    EP3_RCVR_RAIL     *rcvrRail  = (EP3_RCVR_RAIL *) rxdRail->Generic.RcvrRail;
51981 +    EP3_RAIL          *rail      = RCVR_TO_RAIL (rcvrRail);
51982 +    ELAN3_DEV         *dev       = rail->Device;
51983 +
51984 +    EP3_RXD_RAIL_MAIN *rxdMain   = rxdRail->RxdMain;
51985 +    sdramaddr_t        rxdElan   = rxdRail->RxdElan;
51986 +    EP_ENVELOPE       *env       = &rxd->RxdMain->Envelope;
51987 +    E3_DMA_BE         dmabe;
51988 +    int                       i, len;
51989 +    
51990 +    EP_ASSERT (&rail->Generic, rxd->State == EP_RXD_COMPLETE_ACTIVE);
51991 +    EP_ASSERT (&rail->Generic, rxdMain->DataEvent == EP3_EVENT_PRIVATE && rxdMain->DoneEvent == EP3_EVENT_PRIVATE);
51992 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));       /* PCI read */
51993 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));       /* PCI read */
51994 +
51995 +    /* Flush the Elan TLB if mappings have changed */
51996 +    ep_perrail_dvma_sync (&rail->Generic);
51997 +    
51998 +    /* Initialise the status block dma */
51999 +    dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
52000 +    dmabe.s.dma_size            = sizeof (EP_STATUSBLK);
52001 +    dmabe.s.dma_source          = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk);
52002 +    dmabe.s.dma_dest            = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk);
52003 +    dmabe.s.dma_destEvent       = env->TxdRail + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent);
52004 +    dmabe.s.dma_destCookieVProc = EP_VP_DATA(env->NodeId);
52005 +    dmabe.s.dma_srcEvent        = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent);
52006 +    dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
52007 +    
52008 +    EPRINTF8 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
52009 +             (long long) env->Xid.Unique, dmabe.s.dma_source, dmabe.s.dma_dest, dmabe.s.dma_size, dmabe.s.dma_destCookieVProc, 
52010 +             dmabe.s.dma_srcCookieVProc);
52011 +
52012 +    for (len = 0, i = EP_MAXFRAG, remote += (nFrags-1), local += (nFrags-1); i > EP_MAXFRAG-nFrags; len += local->nmd_len, i--, local--, remote--)
52013 +    {
52014 +       /* copy down previous dma */
52015 +       elan3_sdram_copyq_to_sdram (dev, &dmabe, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Dmas[i]),  sizeof (E3_DMA));    /* PCI write block */
52016 +       
52017 +       dmabe.s.dma_type            = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_NORMAL, EP3_DMAFAILCOUNT);
52018 +       dmabe.s.dma_size            = local->nmd_len;
52019 +       dmabe.s.dma_source          = local->nmd_addr;
52020 +       dmabe.s.dma_dest            = remote->nmd_addr;
52021 +       dmabe.s.dma_destEvent       = (E3_Addr) 0;
52022 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (env->NodeId);
52023 +       dmabe.s.dma_srcEvent        = rxdRail->RxdElanAddr + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i-1]);
52024 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, env->NodeId);
52025 +       
52026 +       EPRINTF9 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x Cookies=%x.%x\n", rail->Generic.Name, rxd,
52027 +                 (long long) env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len, dmabe.s.dma_destCookieVProc, 
52028 +                 dmabe.s.dma_srcCookieVProc);
52029 +    }
52030 +    
52031 +    for (i = EP_MAXFRAG-nFrags; i < EP_MAXFRAG; i++)
52032 +       elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[i].ev_Count), 1);                            /* PCI write */
52033 +    
52034 +    /* Initialise the done event */
52035 +    elan3_sdram_writel (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 1);                                   /* PCI write */
52036 +    rxdMain->DoneEvent  = EP3_EVENT_ACTIVE;
52037 +
52038 +    ASSERT (rail->Generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->Generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
52039 +
52040 +    if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
52041 +    {
52042 +       /* Failed to issue the dma command, so copy the dma descriptor and queue it for retry */
52043 +       EPRINTF2 (DBG_RCVR, "%s: ep3rcvr_rpc_complete: queue rxd %p on retry thread\n", rail->Generic.Name, rxd);
52044 +       
52045 +       QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
52046 +    }
52047 +
52048 +    BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len);
52049 +}
52050 +       
52051 +void
52052 +ep3rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
52053 +{
52054 +    EP3_RAIL          *rail   = (EP3_RAIL *) commsRail->Rail;
52055 +    sdramaddr_t        qdescs = ((EP3_COMMS_RAIL *) commsRail)->QueueDescs;
52056 +    EP3_RCVR_RAIL     *rcvrRail;
52057 +    EP3_InputQueue     qdesc;
52058 +    sdramaddr_t        stack;
52059 +    unsigned long      flags;
52060 +
52061 +    KMEM_ZALLOC (rcvrRail, EP3_RCVR_RAIL *, sizeof (EP3_RCVR_RAIL), TRUE);
52062 +
52063 +    kcondvar_init (&rcvrRail->CleanupSleep);
52064 +    spin_lock_init (&rcvrRail->FreeDescLock);
52065 +    INIT_LIST_HEAD (&rcvrRail->FreeDescList);
52066 +    INIT_LIST_HEAD (&rcvrRail->DescBlockList);
52067 +
52068 +    rcvrRail->Generic.CommsRail = commsRail;
52069 +    rcvrRail->Generic.Rcvr      = rcvr;
52070 +
52071 +    rcvrRail->RcvrMain       = ep_alloc_main (&rail->Generic, sizeof (EP3_RCVR_RAIL_MAIN), 0, &rcvrRail->RcvrMainAddr);
52072 +    rcvrRail->RcvrElan       = ep_alloc_elan (&rail->Generic, sizeof (EP3_RCVR_RAIL_ELAN), 0, &rcvrRail->RcvrElanAddr);
52073 +    rcvrRail->InputQueueBase = ep_alloc_elan (&rail->Generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->InputQueueAddr);
52074 +    stack                    = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rcvrRail->ThreadStack);
52075 +
52076 +    rcvrRail->TotalDescCount = 0;
52077 +    rcvrRail->FreeDescCount  = 0;
52078 +
52079 +    /* Initialise the main/elan spin lock */
52080 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_lock), 0);
52081 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadLock.sl_seq),  0);
52082 +
52083 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_lock), 0);
52084 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingLock.sl_seq), 0);
52085 +    
52086 +    /* Initialise the receive lists */
52087 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), 0);
52088 +    
52089 +    /* Initialise the ThreadShould Halt */
52090 +    elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), 0);
52091 +
52092 +    /* Initialise pointer to the ep_rcvr_rail */
52093 +    elan3_sdram_writeq (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, MainAddr), (unsigned long) rcvrRail);
52094 +
52095 +    /* Initialise elan visible main memory */
52096 +    rcvrRail->RcvrMain->ThreadLock.sl_seq  = 0;
52097 +    rcvrRail->RcvrMain->PendingLock.sl_seq = 0;
52098 +    rcvrRail->RcvrMain->PendingDescsTailp  = 0;
52099 +
52100 +    /* initialise and copy down the input queue descriptor */
52101 +    qdesc.q_state          = E3_QUEUE_FULL;
52102 +    qdesc.q_base           = rcvrRail->InputQueueAddr;
52103 +    qdesc.q_top            = rcvrRail->InputQueueAddr + (rcvr->InputQueueEntries-1) * EP_INPUTQ_SIZE;
52104 +    qdesc.q_fptr           = rcvrRail->InputQueueAddr;
52105 +    qdesc.q_bptr           = rcvrRail->InputQueueAddr + EP_INPUTQ_SIZE;
52106 +    qdesc.q_size           = EP_INPUTQ_SIZE;
52107 +    qdesc.q_event.ev_Count = 0;
52108 +    qdesc.q_event.ev_Type  = 0;
52109 +
52110 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, qdescs + rcvr->Service * sizeof (EP3_InputQueue), sizeof (EP3_InputQueue));
52111 +
52112 +    spin_lock_irqsave (&rcvr->Lock, flags);
52113 +    rcvr->Rails[rail->Generic.Number] = &rcvrRail->Generic;
52114 +    rcvr->RailMask |= EP_RAIL2RAILMASK (rail->Generic.Number);
52115 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52116 +
52117 +    /* initialise and run the Elan thread to process the queue */
52118 +    IssueRunThread (rail, ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "ep3comms_rcvr"),
52119 +                                          rcvrRail->ThreadStack, stack, EP3_STACK_SIZE, 5,
52120 +                                          rail->RailElanAddr, rcvrRail->RcvrElanAddr, rcvrRail->RcvrMainAddr,
52121 +                                          EP_MSGQ_ADDR(rcvr->Service),
52122 +                                          rail->ElanCookies));
52123 +}
52124 +
52125 +void
52126 +ep3rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
52127 +{
52128 +    EP3_RAIL         *rail     = (EP3_RAIL *) commsRail->Rail;
52129 +    EP3_RCVR_RAIL    *rcvrRail = (EP3_RCVR_RAIL *) rcvr->Rails[rail->Generic.Number];  
52130 +    unsigned long     flags;
52131 +    struct list_head *el, *nel;
52132 +
52133 +    EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: removing rail\n", rail->Generic.Name);
52134 +
52135 +    /* flag the rail as no longer available */
52136 +    spin_lock_irqsave (&rcvr->Lock, flags);
52137 +    rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number);
52138 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52139 +    
52140 +    /* mark the input queue descriptor as full */
52141 +    SetQueueLocked(rail, ((EP3_COMMS_RAIL *)commsRail)->QueueDescs + rcvr->Service * sizeof (EP3_InputQueue));
52142 +
52143 +    /* need to halt the thread first         */
52144 +    /*   set ThreadShouldHalt in elan memory */
52145 +    /*   then trigger the event              */
52146 +    /*   and wait on haltWait                */
52147 +    elan3_sdram_writel  (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, ThreadShouldHalt), TRUE);
52148 +
52149 +    IssueSetevent (rail,  EP_MSGQ_ADDR(rcvr->Service) + offsetof(EP3_InputQueue, q_event));
52150 +
52151 +    spin_lock_irqsave (&rcvr->Lock, flags);
52152 +
52153 +    while (rcvrRail->ThreadHalted == 0)
52154 +    {
52155 +       rcvrRail->CleanupWaiting++;
52156 +       kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags);
52157 +    }
52158 +
52159 +    /* at this point the thread is halted and it has no envelopes */
52160
52161 +    /* we need to wait until all the rxd's in the list that are 
52162 +     * bound to the rail we are removing are not pending 
52163 +     */
52164 +    for (;;)
52165 +    {
52166 +       int mustWait = 0;
52167 +       
52168 +       list_for_each (el, &rcvr->ActiveDescList) {
52169 +           EP_RXD       *rxd     = list_entry (el,EP_RXD, Link);
52170 +           EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail;
52171 +
52172 +           if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING)
52173 +           {
52174 +               mustWait++;
52175 +               break;
52176 +           }
52177 +       }
52178 +       
52179 +       if (! mustWait)
52180 +           break;
52181 +
52182 +       EPRINTF1 (DBG_RCVR, "%s: ep3rcvr_del_rail: waiting for active rxd's to be returned\n", rail->Generic.Name);
52183 +
52184 +       rcvrRail->CleanupWaiting++;
52185 +       kcondvar_wait (&rcvrRail->CleanupSleep, &rcvr->Lock, &flags);
52186 +    }
52187 +
52188 +    /* at this point all rxd's in the list that are bound to the deleting rail are not pending */
52189 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
52190 +       EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
52191 +       EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) rxd->RxdRail;
52192 +       
52193 +       if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail))
52194 +       {
52195 +           /* here we need to unbind the remaining rxd's */
52196 +           rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
52197 +           rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
52198
52199 +           elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);  /* PCI write */
52200 +           elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);  /* PCI write */
52201 +
52202 +           UnbindRxdFromRail (rxd, rxdRail);
52203 +           FreeRxdRail(rcvrRail,  rxdRail );
52204 +       }
52205 +    }
52206 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52207 +    
52208 +    /* wait for all rxd's for this rail to become free */
52209 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
52210 +    while (rcvrRail->FreeDescCount != rcvrRail->TotalDescCount)
52211 +    {
52212 +       rcvrRail->FreeDescWaiting++;
52213 +       kcondvar_wait (&rcvrRail->FreeDescSleep, &rcvrRail->FreeDescLock, &flags);
52214 +    }
52215 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
52216 +
52217 +    /* can now remove the rail as it can no longer be used */
52218 +    spin_lock_irqsave (&rcvr->Lock, flags);
52219 +    rcvr->Rails[rail->Generic.Number] = NULL;
52220 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52221 +
52222 +    /* all the rxd's accociated with DescBlocks must be in the FreeDescList */
52223 +    ASSERT (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount);
52224 +
52225 +    /* run through the DescBlockList deleting them */
52226 +    while (!list_empty (&rcvrRail->DescBlockList))
52227 +       FreeRxdRailBlock (rcvrRail, list_entry(rcvrRail->DescBlockList.next, EP3_RXD_RAIL_BLOCK , Link));
52228 +
52229 +    /* it had better be empty after that */
52230 +    ASSERT ((rcvrRail->TotalDescCount == 0) && (rcvrRail->TotalDescCount == rcvrRail->FreeDescCount));
52231 +    
52232 +    ep_free_elan (&rail->Generic, rcvrRail->ThreadStack, EP3_STACK_SIZE);
52233 +    ep_free_elan (&rail->Generic, rcvrRail->InputQueueAddr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries);
52234 +    ep_free_elan (&rail->Generic, rcvrRail->RcvrElanAddr, sizeof (EP3_RCVR_RAIL_ELAN));
52235 +    ep_free_main (&rail->Generic, rcvrRail->RcvrMainAddr, sizeof (EP3_RCVR_RAIL_MAIN));
52236 +
52237 +    KMEM_FREE (rcvrRail, sizeof (EP3_RCVR_RAIL));
52238 +}
52239 +
52240 +EP_RXD *
52241 +ep3rcvr_steal_rxd (EP_RCVR_RAIL *r)
52242 +{
52243 +    EP3_RCVR_RAIL *rcvrRail = (EP3_RCVR_RAIL *) r;
52244 +    EP3_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
52245 +    EP_RCVR       *rcvr     = rcvrRail->Generic.Rcvr;
52246 +    E3_Addr        rxdElanAddr;
52247 +    unsigned long flags;
52248 +
52249 +    spin_lock_irqsave (&rcvr->Lock, flags);
52250 +
52251 +    LockRcvrThread (rcvrRail);
52252 +    if ((rxdElanAddr = elan3_sdram_readl (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs))) != 0)
52253 +    {
52254 +       sdramaddr_t  rxdElan  = ep_elan2sdram (&rail->Generic, rxdElanAddr);
52255 +       EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) (unsigned long) elan3_sdram_readq (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, MainAddr));
52256 +       EP_RXD      *rxd      = rxdRail->Generic.Rxd;
52257 +       sdramaddr_t  next;
52258 +       
52259 +       EPRINTF2 (DBG_RCVR, "%s: StealRxdFromOtherRail stealing rxd %p\n", rail->Generic.Name, rail);
52260 +       
52261 +       /* Remove the RXD from the pending desc list */
52262 +       if ((next = elan3_sdram_readl (rail->Device, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Next))) == 0)
52263 +           rcvrRail->RcvrMain->PendingDescsTailp = 0;
52264 +       elan3_sdram_writel (rail->Device, rcvrRail->RcvrElan + offsetof (EP3_RCVR_RAIL_ELAN, PendingDescs), next);
52265 +       UnlockRcvrThread (rcvrRail);
52266 +       
52267 +       UnbindRxdFromRail (rxd, rxdRail);
52268 +       
52269 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
52270 +       
52271 +       /* Mark rxdRail as no longer active */
52272 +       rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
52273 +       rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
52274 +       elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);
52275 +       elan3_sdram_writel (rail->Device, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);
52276 +       
52277 +       FreeRxdRail (rcvrRail, rxdRail);
52278 +
52279 +       return rxd;
52280 +    }
52281 +
52282 +    UnlockRcvrThread (rcvrRail);
52283 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52284 +
52285 +    return NULL;
52286 +}
52287 +
52288 +long
52289 +ep3rcvr_check (EP_RCVR_RAIL *r, long nextRunTime)
52290 +{
52291 +    EP3_RCVR_RAIL    *rcvrRail = (EP3_RCVR_RAIL *) r;
52292 +    EP3_RAIL         *rail     = RCVR_TO_RAIL (rcvrRail);
52293 +    EP_RCVR          *rcvr     = rcvrRail->Generic.Rcvr;
52294 +    EP_COMMS_SUBSYS *subsys    = rcvr->Subsys;
52295 +    EP_SYS           *sys       = subsys->Subsys.Sys;
52296 +    EP_RXD           *rxd;
52297 +    unsigned long     flags;
52298 +
52299 +    if (rcvrRail->FreeDescCount < ep_rxd_lowat && !AllocateRxdRailBlock (rcvrRail))
52300 +    {
52301 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->Generic.Name);
52302 +               
52303 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
52304 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
52305 +    }
52306 +    
52307 +    if (rcvrRail->ThreadWaiting && (rxd = StealRxdFromOtherRail (rcvr)) != NULL)
52308 +    {
52309 +       /* Map the receive buffer into this rail as well */
52310 +       EPRINTF4 (DBG_RCVR, "%s: mapping rxd->Data (%08x.%08x.%08x) into this rails\n",
52311 +                 rail->Generic.Name, rxd->Data.nmd_addr,rxd->Data.nmd_len, rxd->Data.nmd_attr);
52312 +
52313 +       spin_lock_irqsave (&rcvr->Lock, flags);
52314 +       if ((!(EP_NMD_RAILMASK (&rxd->Data) & EP_RAIL2RAILMASK(rail->Generic.Number)) &&                /* not already mapped and */
52315 +            ep_nmd_map_rails (sys, &rxd->Data, EP_RAIL2RAILMASK(rail->Generic.Number)) == 0) ||        /* failed to map it */
52316 +           ep3rcvr_queue_rxd (rxd, &rcvrRail->Generic))                                                /* or failed to queue it */
52317 +       {
52318 +           EPRINTF5 (DBG_RCVR,"%s: stolen rcvr=%p rxd=%p -> rnum=%d rcvrRail=%p (failed)\n", 
52319 +                     rail->Generic.Name, rcvr, rxd, rail->Generic.Number, rcvrRail);
52320 +               
52321 +           if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
52322 +               nextRunTime = lbolt + RESOURCE_RETRY_TIME;
52323 +       }
52324 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
52325 +    }
52326 +    
52327 +    return nextRunTime;
52328 +}
52329 +
52330 +static void
52331 +ep3rcvr_flush_filtering (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
52332 +{
52333 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail;
52334 +    EP3_RAIL       *rail      = (EP3_RAIL *) commsRail->Generic.Rail;
52335 +    ELAN3_DEV      *dev       = rail->Device;
52336 +    sdramaddr_t    qdesc      = commsRail->QueueDescs + rcvr->Service*sizeof (EP3_InputQueue);
52337 +    E3_Addr        qTop       = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_top));
52338 +    E3_Addr        qBase      = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_base));
52339 +    E3_Addr        qSize      = elan3_sdram_readl (dev,qdesc + offsetof (EP3_InputQueue, q_size));
52340 +    E3_uint32      nfptr, qbptr;
52341 +    unsigned long  flags;
52342 +    
52343 +    spin_lock_irqsave (&rcvr->Lock, flags);
52344 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
52345 +
52346 +    nfptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_fptr));
52347 +    qbptr = elan3_sdram_readl (dev, qdesc + offsetof (EP3_InputQueue, q_bptr));
52348 +    
52349 +    if (nfptr == qTop)
52350 +       nfptr = qBase;
52351 +    else
52352 +       nfptr += qSize;
52353 +    
52354 +    while (nfptr != qbptr)
52355 +    {
52356 +       unsigned nodeId = elan3_sdram_readl (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + 
52357 +                                      offsetof (EP_ENVELOPE, NodeId));
52358 +       
52359 +       EPRINTF3 (DBG_DISCON, "%s: ep3rcvr_flush_filtering: nodeId=%d State=%d\n", rail->Generic.Name, nodeId, rail->Generic.Nodes[nodeId].State);
52360 +       
52361 +       if (rail->Generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE)
52362 +           elan3_sdram_writel (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr) + 
52363 +                         offsetof (EP_ENVELOPE, Version), 0);
52364 +       
52365 +       if (nfptr == qTop)
52366 +           nfptr = qBase;
52367 +       else
52368 +           nfptr += qSize;
52369 +    }
52370 +    
52371 +    UnlockRcvrThread (rcvrRail);                                                                               /* PCI unlock */
52372 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52373 +}
52374 +
52375 +static void
52376 +ep3rcvr_flush_flushing (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
52377 +{
52378 +    EP3_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
52379 +    struct list_head *el, *nel;
52380 +    unsigned long     flags;
52381 +
52382 +    spin_lock_irqsave (&rcvr->Lock, flags);
52383 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
52384 +    
52385 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
52386 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
52387 +       EP3_RXD_RAIL *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
52388 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
52389 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId];
52390 +
52391 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
52392 +           continue;
52393 +       
52394 +       EPRINTF6 (DBG_DISCON, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p state %x.%x elan node %d\n", rail->Generic.Name,
52395 +                 rcvr, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, env->NodeId);
52396 +       
52397 +       switch (rxd->State)
52398 +       {
52399 +       case EP_RXD_FREE:
52400 +           printk ("ep3rcvr_flush_flushing: rxd state is free but bound to a fail\n");
52401 +           break;
52402 +
52403 +       case EP_RXD_RECEIVE_ACTIVE:
52404 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                /* incomplete message receive */
52405 +           {
52406 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", 
52407 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
52408 +               
52409 +               nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
52410 +               continue;
52411 +           }
52412 +           break;
52413 +           
52414 +       default:
52415 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
52416 +
52417 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))    /* incomplete RPC */
52418 +           {
52419 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", 
52420 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
52421 +               
52422 +               EP_INVALIDATE_XID (rxd->MsgXid);                        /* Ignore any previous NMD map responses */
52423 +               
52424 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
52425 +               continue;
52426 +           }
52427 +           break;
52428 +
52429 +       case EP_RXD_BEEN_ABORTED:
52430 +           printk ("ep3rcvr_flush_flushing: rxd state is aborted but bound to a fail\n");
52431 +           break;
52432 +       }
52433 +
52434 +       EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", 
52435 +                 rail->Generic.Name, rcvr, rxd, env->NodeId);
52436 +    }    
52437 +
52438 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
52439 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52440 +}
52441 +
52442 +void
52443 +ep3rcvr_flush_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
52444 +{
52445 +    EP3_RAIL *rail = RCVR_TO_RAIL(rcvrRail);
52446 +
52447 +    switch (rail->Generic.CallbackStep)
52448 +    {
52449 +    case EP_CB_FLUSH_FILTERING:
52450 +       ep3rcvr_flush_filtering (rcvr, rcvrRail);
52451 +       break;
52452 +
52453 +    case EP_CB_FLUSH_FLUSHING:
52454 +       ep3rcvr_flush_flushing (rcvr, rcvrRail);
52455 +       break;
52456 +    }
52457 +}
52458 +
52459 +void
52460 +ep3rcvr_failover_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
52461 +{
52462 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
52463 +    EP3_RAIL         *rail   = RCVR_TO_RAIL (rcvrRail);
52464 +    ELAN3_DEV        *dev    = rail->Device;
52465 +    struct list_head *el, *nel;
52466 +    unsigned long     flags;
52467 +#ifdef SUPPORT_RAIL_FAILOVER
52468 +    EP_SYS           *sys    = subsys->Subsys.Sys;
52469 +#endif
52470 +   
52471 +    spin_lock_irqsave (&rcvr->Lock, flags);
52472 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
52473 +    
52474 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
52475 +       EP_RXD             *rxd      = list_entry (el, EP_RXD, Link);
52476 +       EP3_RXD_RAIL       *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
52477 +       EP_ENVELOPE        *env      = &rxd->RxdMain->Envelope;
52478 +       EP_NODE_RAIL       *nodeRail = &rail->Generic.Nodes[env->NodeId];
52479 +#ifdef SUPPORT_RAIL_FAILOVER
52480 +       EP_MANAGER_MSG_BODY msgBody;
52481 +       EP_NODE            *node     = &sys->Nodes[env->NodeId];
52482 +#endif
52483 +       
52484 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED)
52485 +           continue;
52486 +
52487 +       EPRINTF6 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p elan node %d state %x.%x\n", rail->Generic.Name, rcvr, rxd, env->NodeId,
52488 +                 rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent);
52489 +
52490 +       switch (rxd->State)
52491 +       {
52492 +       case EP_RXD_FREE:
52493 +           printk ("ep4rcvr_failover_callback: rxd state is free but bound to a fail\n");
52494 +           break;
52495 +
52496 +       case EP_RXD_RECEIVE_ACTIVE:
52497 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                /* incomplete message receive */
52498 +           {
52499 +               EPRINTF4 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
52500 +               
52501 +               UnbindRxdFromRail (rxd, rxdRail);
52502 +               
52503 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
52504 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
52505 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
52506 +               
52507 +               /* clear the data event - the done event should already be zero */
52508 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
52509 +               
52510 +               FreeRxdRail (rcvrRail, rxdRail);
52511 +               
52512 +               /* epcomms thread will requeue on different rail */
52513 +               ep_kthread_schedule (&subsys->Thread, lbolt);
52514 +               continue;
52515 +           }
52516 +           break;
52517 +
52518 +       default:
52519 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
52520 +
52521 +#ifdef SUPPORT_RAIL_FAILOVER
52522 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent) && !(EP_IS_NO_FAILOVER(env->Attr)))         /* incomplete RPC, which can be failed over  */
52523 +           {
52524 +               EPRINTF7 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p State %x.%x Xid %llxx MsgXid %llxx nodeId %d - failover\n", 
52525 +                         rail->Generic.Name, rxd, rxdRail->RxdMain->DataEvent, rxdRail->RxdMain->DoneEvent, 
52526 +                         (long long) env->Xid.Unique, (long long) rxd->MsgXid.Unique, env->NodeId);
52527 +               
52528 +               if (EP_XID_INVALID(rxd->MsgXid))
52529 +                   rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
52530 +               
52531 +               /* XXXX maybe only send the message if the node failover retry is now ? */
52532 +               msgBody.Failover.Xid      = env->Xid;
52533 +               msgBody.Failover.Railmask = node->ConnectedRails;
52534 +               
52535 +               ep_send_message (&rail->Generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody);
52536 +               
52537 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
52538 +               continue;
52539 +           }
52540 +#endif
52541 +           break;
52542 +
52543 +       case EP_RXD_BEEN_ABORTED:
52544 +           printk ("ep3rcvr_failover_callback: rxd state is aborted but bound to a rail\n");
52545 +           break;
52546 +       }
52547 +
52548 +       EPRINTF3 (DBG_FAILOVER, "%s: ep3rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->Generic.Name, rxd, env->NodeId);
52549 +    }
52550 +    
52551 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
52552 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52553 +}
52554 +
52555 +void
52556 +ep3rcvr_disconnect_callback (EP_RCVR *rcvr, EP3_RCVR_RAIL *rcvrRail)
52557 +{
52558 +    EP3_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
52559 +    ELAN3_DEV        *dev = rail->Device;
52560 +    struct list_head *el, *nel;
52561 +    struct list_head  rxdList;
52562 +    unsigned long     flags;
52563 +
52564 +    INIT_LIST_HEAD (&rxdList);
52565 +    
52566 +    spin_lock_irqsave (&rcvr->Lock, flags);
52567 +    LockRcvrThread (rcvrRail);                                                                         /* PCI lock */
52568 +    
52569 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
52570 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
52571 +       EP3_RXD_RAIL *rxdRail  = (EP3_RXD_RAIL *) rxd->RxdRail;
52572 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
52573 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[env->NodeId];
52574 +       
52575 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
52576 +           continue;
52577 +
52578 +       EPRINTF4 (DBG_DISCON, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p elan node %d\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
52579 +
52580 +       switch (rxd->State)
52581 +       {
52582 +       case EP_RXD_FREE:
52583 +           printk ("ep3rcvr_disconnect_callback: rxd state is free but bound to a fail\n");
52584 +           break;
52585 +
52586 +       case EP_RXD_RECEIVE_ACTIVE:
52587 +           if (rxdRail->RxdMain->DataEvent == EP3_EVENT_ACTIVE)                        /* incomplete message receive */
52588 +           {
52589 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->Generic.Name, rcvr, rxd, env->NodeId);
52590 +               
52591 +               UnbindRxdFromRail (rxd, rxdRail);
52592 +               
52593 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
52594 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
52595 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
52596 +               
52597 +               /* clear the data event - the done event should already be zero */
52598 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
52599 +               
52600 +               FreeRxdRail (rcvrRail, rxdRail);
52601 +
52602 +               /* remark it as pending if it was partially received */
52603 +               rxd->RxdMain->Len = EP_RXD_PENDING;
52604 +               
52605 +               /* epcomms thread will requeue on different rail */
52606 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
52607 +               continue;
52608 +           }
52609 +           break;
52610 +
52611 +       default:
52612 +           EP_ASSERT (&rail->Generic, EP_IS_RPC(env->Attr));
52613 +
52614 +           if (!EP3_EVENT_FIRED (rxdRail->DoneCookie, rxdRail->RxdMain->DoneEvent))    /* incomplete RPC */
52615 +           {
52616 +               EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - not able to failover\n",
52617 +                         rail->Generic.Name, rcvr, rxd, env->NodeId);
52618 +           
52619 +               /* Mark as no longer active */
52620 +               rxdRail->RxdMain->DataEvent = EP3_EVENT_PRIVATE;
52621 +               rxdRail->RxdMain->DoneEvent = EP3_EVENT_PRIVATE;
52622 +               
52623 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count), 0);       /* PCI write */
52624 +               elan3_sdram_writel (dev, rxdRail->RxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count), 0);       /* PCI write */
52625 +               
52626 +               UnbindRxdFromRail (rxd, rxdRail);
52627 +               FreeRxdRail (rcvrRail, rxdRail);
52628 +
52629 +               /* Ignore any previous NMD/failover responses */
52630 +               EP_INVALIDATE_XID (rxd->MsgXid);
52631 +               
52632 +               /* Remove from active list */
52633 +               list_del (&rxd->Link);
52634 +               
52635 +               if (rxd->State == EP_RXD_RPC_IN_PROGRESS)                               /* ownder by user .... */
52636 +                   rxd->State = EP_RXD_BEEN_ABORTED;
52637 +               else                                                                    /* queue for completion */
52638 +               {
52639 +                   rxd->RxdMain->Len = EP_CONN_RESET;                                  /* ensure ep_rxd_status() fails */
52640 +                   list_add_tail (&rxd->Link, &rxdList);
52641 +               }
52642 +               continue;
52643 +           }
52644 +           break;
52645 +
52646 +       case EP_RXD_BEEN_ABORTED:
52647 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
52648 +           break;
52649 +       }
52650 +           
52651 +       EPRINTF4 (DBG_RCVR, "%s: ep3rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
52652 +                 rail->Generic.Name, rcvr, rxd, env->NodeId);
52653 +    }
52654 +    
52655 +    UnlockRcvrThread (rcvrRail);                                                                       /* PCI unlock */
52656 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
52657 +
52658 +    while (! list_empty (&rxdList)) 
52659 +    {
52660 +       EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link);
52661 +
52662 +       list_del (&rxd->Link);
52663 +
52664 +       rxd->Handler (rxd);
52665 +    }
52666 +}
52667 +
52668 +void
52669 +ep3rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r)
52670 +{
52671 +    EP3_RXD_RAIL *rxdRail = (EP3_RXD_RAIL *) r;
52672 +    sdramaddr_t   rxdElan = rxdRail->RxdElan;
52673 +    EP3_RAIL     *rail    = RCVR_TO_RAIL (rxdRail->Generic.RcvrRail);
52674 +    ELAN3_DEV    *dev     = rail->Device;
52675 +
52676 +    (di->func)(di->arg, "      ChainEvent=%x.%x %x.%x\n",
52677 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Count)),
52678 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[0].ev_Type)),
52679 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Count)),
52680 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[1].ev_Type)));
52681 +    (di->func)(di->arg, "      ChainEvent=%x.%x %x.%x\n",
52682 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Count)),
52683 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[2].ev_Type)),
52684 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Count)),
52685 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, ChainEvent[3].ev_Type)));
52686 +    (di->func)(di->arg, "      DataEvent=%x.%x DoneEvent=%x.%x\n",
52687 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Count)),
52688 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DataEvent.ev_Type)),
52689 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Count)),
52690 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, DoneEvent.ev_Type)));
52691 +    (di->func)(di->arg, "      Data=%x Len=%x\n",
52692 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_addr)),
52693 +              elan3_sdram_readl (dev, rxdElan + offsetof (EP3_RXD_RAIL_ELAN, Data.nmd_len)));
52694 +}
52695 +
52696 +void
52697 +ep3rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r)
52698 +{
52699 +    EP3_RCVR_RAIL  *rcvrRail  = (EP3_RCVR_RAIL *) r;
52700 +    EP3_COMMS_RAIL *commsRail = (EP3_COMMS_RAIL *) rcvrRail->Generic.CommsRail;
52701 +    EP3_RAIL       *rail      = RCVR_TO_RAIL (rcvrRail);
52702 +    ELAN3_DEV      *dev       = rail->Device;
52703 +    sdramaddr_t     queue     = commsRail->QueueDescs + rcvrRail->Generic.Rcvr->Service * sizeof (EP3_InputQueue);
52704 +    E3_Addr         qbase      = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base));
52705 +    E3_Addr         qtop       = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top));
52706 +    E3_uint32       qsize      = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size));
52707 +    int             freeCount  = 0;
52708 +    int             blockCount = 0;
52709 +    unsigned long   flags;
52710 +    struct list_head *el;
52711 +
52712 +    spin_lock_irqsave (&rcvrRail->FreeDescLock, flags);
52713 +    list_for_each (el, &rcvrRail->FreeDescList)
52714 +       freeCount++;
52715 +    list_for_each (el, &rcvrRail->DescBlockList)
52716 +       blockCount++;
52717 +    spin_unlock_irqrestore (&rcvrRail->FreeDescLock, flags);
52718 +
52719 +    (di->func)(di->arg, "                 Rail %d FreeDesc %d (%d) Total %d Blocks %d %s\n",
52720 +              rail->Generic.Number, rcvrRail->FreeDescCount, freeCount, rcvrRail->TotalDescCount, blockCount, 
52721 +              rcvrRail->ThreadWaiting ? "ThreadWaiting" : "");
52722 +    
52723 +    (di->func)(di->arg, "                 InputQueue state=%x bptr=%x size=%x top=%x base=%x fptr=%x\n",
52724 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_state)),
52725 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_bptr)),
52726 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_size)),
52727 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_top)),
52728 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_base)),
52729 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr)));
52730 +    (di->func)(di->arg, "                            event=%x.%x [%x.%x] wevent=%x.%x\n",
52731 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Type)),
52732 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Count)),
52733 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Source)),
52734 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_event.ev_Dest)),
52735 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wevent)),
52736 +              elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_wcount)));
52737 +    
52738 +    LockRcvrThread (rcvrRail);
52739 +    {
52740 +       E3_Addr     nfptr = elan3_sdram_readl (dev, queue + offsetof (EP3_InputQueue, q_fptr));
52741 +       EP_ENVELOPE env;
52742 +       
52743 +       if (nfptr == qtop)
52744 +           nfptr = qbase;
52745 +       else
52746 +           nfptr += qsize;
52747 +
52748 +       while (nfptr != elan3_sdram_readl (dev, queue + offsetof (E3_Queue, q_bptr)))
52749 +       {
52750 +           elan3_sdram_copyl_from_sdram (dev, rcvrRail->InputQueueBase + (nfptr - rcvrRail->InputQueueAddr),
52751 +                                         &env, sizeof (EP_ENVELOPE));
52752 +           
52753 +           (di->func)(di->arg, "                 ENVELOPE Version=%x Attr=%x Xid=%08x.%08x.%016llx\n",
52754 +                      env.Version, env.Attr, env.Xid.Generation, env.Xid.Handle, (long long) env.Xid.Unique);
52755 +           (di->func)(di->arg, "                          NodeId=%x Range=%x TxdRail=%x TxdMain=%x.%x.%x\n",
52756 +                      env.NodeId, env.Range, env.TxdRail, env.TxdMain.nmd_addr,
52757 +                      env.TxdMain.nmd_len, env.TxdMain.nmd_attr);
52758 +           
52759 +           
52760 +           if (nfptr == qtop)
52761 +               nfptr = qbase;
52762 +           else
52763 +               nfptr += qsize;
52764 +       }
52765 +    }
52766 +    UnlockRcvrThread (rcvrRail);
52767 +}
52768 +
52769 +void
52770 +ep3rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) {
52771 +    /* no stats here yet */
52772 +    /* EP3_RCVR_RAIL * ep4rcvr_rail = (EP3_RCVR_RAIL *) rcvr_rail; */
52773 +}
52774 +
52775 diff -urN clean/drivers/net/qsnet/ep/epcommsRx_elan4.c linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan4.c
52776 --- clean/drivers/net/qsnet/ep/epcommsRx_elan4.c        1969-12-31 19:00:00.000000000 -0500
52777 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsRx_elan4.c  2005-07-20 07:35:37.000000000 -0400
52778 @@ -0,0 +1,1765 @@
52779 +/*
52780 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
52781 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
52782 + *
52783 + *    For licensing information please see the supplied COPYING file
52784 + *
52785 + */
52786 +
52787 +#ident "@(#)$Id: epcommsRx_elan4.c,v 1.35.2.1 2005/07/20 11:35:37 mike Exp $"
52788 +/*      $Source: /cvs/master/quadrics/epmod/epcommsRx_elan4.c,v $ */
52789 +
52790 +#include <qsnet/kernel.h>
52791 +
52792 +#include <elan/kcomm.h>
52793 +#include <elan/epsvc.h>
52794 +#include <elan/epcomms.h>
52795 +
52796 +#include "debug.h"
52797 +#include "kcomm_vp.h"
52798 +#include "kcomm_elan4.h"
52799 +#include "epcomms_elan4.h"
52800 +
52801 +#include <elan4/trtype.h>
52802 +
52803 +#define RCVR_TO_COMMS(rcvrRail)                ((EP4_COMMS_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail)
52804 +#define RCVR_TO_RAIL(rcvrRail)         ((EP4_RAIL *) ((EP_RCVR_RAIL *) rcvrRail)->CommsRail->Rail)
52805 +#define RCVR_TO_DEV(rcvrRail)          (RCVR_TO_RAIL(rcvrRail)->r_ctxt.ctxt_dev)
52806 +#define RCVR_TO_SUBSYS(rcvrRail)       (((EP_RCVR_RAIL *) rcvrRail)->Rcvr->Subsys)
52807 +
52808 +#define RXD_TO_RCVR(txdRail)           ((EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail)
52809 +#define RXD_TO_RAIL(txdRail)           RCVR_TO_RAIL(RXD_TO_RCVR(rxdRail))
52810 +
52811 +static void rxd_interrupt (EP4_RAIL *rail, void *arg);
52812 +
52813 +static __inline__ void 
52814 +__ep4_rxd_assert_free (EP4_RXD_RAIL *rxdRail, const char *file, const int line)
52815 +{
52816 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail);
52817 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
52818 +    register int i, failed = 0;
52819 +    
52820 +    for (i = 0; i <= EP_MAXFRAG; i++)
52821 +       if (((rxdRail)->rxd_main->rxd_sent[i] != EP4_STATE_FREE)) 
52822 +           failed |= (1 << i);
52823 +    
52824 +    if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_FREE))
52825 +       failed |= (1 << 5);
52826 +    if (((rxdRail)->rxd_main->rxd_done   != EP4_STATE_FREE)) 
52827 +       failed |= (1 << 6);
52828 +    
52829 +    if (sdram_assert)
52830 +    {
52831 +       if (((elan4_sdram_readq (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)) >> 32) != 0)) 
52832 +           failed |= (1 << 7);
52833 +       for (i = 0; i < EP_MAXFRAG; i++)
52834 +           if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)) >> 32) != 0)) 
52835 +               failed |= (1 << (8 + i));
52836 +       if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0)) 
52837 +           failed |= (1 << 12);
52838 +       if (((int)(elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) 
52839 +           failed |= (1 << 13);
52840 +    }
52841 +
52842 +    if (failed)
52843 +    {
52844 +       printk ("__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
52845 +
52846 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_free: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
52847 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
52848 +
52849 +       for (i = 0; i <= EP_MAXFRAG; i++)
52850 +           (rxdRail)->rxd_main->rxd_sent[i] = EP4_STATE_FREE;
52851 +
52852 +       (rxdRail)->rxd_main->rxd_failed = EP4_STATE_FREE;
52853 +       (rxdRail)->rxd_main->rxd_done   = EP4_STATE_FREE;
52854 +
52855 +       if (sdram_assert)
52856 +       {
52857 +           elan4_sdram_writew (RXD_TO_RAIL(rxdRail)->r_ctxt.ctxt_dev,
52858 +                               (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType) + 4, 0);
52859 +
52860 +           for (i = 0; i < EP_MAXFRAG; i++)
52861 +               elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType) + 4, 0);
52862 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0);
52863 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32);
52864 +       }
52865 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_free");
52866 +    }
52867 +}
52868 +
52869 +static __inline__ void
52870 +__ep4_rxd_assert_pending(EP4_RXD_RAIL *rxdRail, const char *file, const int line)
52871 +{ 
52872 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rcvrRail);
52873 +    register int failed = 0;
52874 +
52875 +    failed |= ((rxdRail)->rxd_main->rxd_done != EP4_STATE_ACTIVE);
52876 +
52877 +    if (failed)
52878 +    {
52879 +       printk ("__ep4_rxd_assert_pending: %s - %d\n", file, line);
52880 +
52881 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_pending: %s - %d\n", file, line);
52882 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
52883 +
52884 +       (rxdRail)->rxd_main->rxd_done = EP4_STATE_ACTIVE;
52885 +
52886 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_pending");
52887 +    }
52888 +}
52889 +
52890 +static __inline__ void
52891 +__ep4_rxd_assert_private(EP4_RXD_RAIL *rxdRail, const char *file, const int line)
52892 +{
52893 +    EP4_RCVR_RAIL *rcvrRail = RXD_TO_RCVR(rxdRail);
52894 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
52895 +    register int failed = 0;
52896 +
52897 +    if (((rxdRail)->rxd_main->rxd_failed != EP4_STATE_ACTIVE)) failed |= (1 << 0);
52898 +    if (((rxdRail)->rxd_main->rxd_done != EP4_STATE_PRIVATE))  failed |= (1 << 1);
52899 +    
52900 +    if (sdram_assert)
52901 +    {
52902 +       if (((elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)) >> 32) != 0))           failed |= (1 << 2);
52903 +       if (((int) (elan4_sdram_readq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)) >> 32) != -32)) failed |= (1 << 3);
52904 +    }
52905 +
52906 +    if (failed)
52907 +    {
52908 +       printk ("__ep4_rxd_assert_private: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
52909 +
52910 +       ep_debugf (DBG_DEBUG, "__ep4_rxd_assert_private: failed=%x rxdRail=%p %s - %d\n", failed, rxdRail, file, line);
52911 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
52912 +
52913 +       (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52914 +       (rxdRail)->rxd_main->rxd_done   = EP4_STATE_PRIVATE;
52915 +
52916 +       if (sdram_assert)
52917 +       {
52918 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType) + 4, 0);
52919 +           elan4_sdram_writew (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType) + 4, -32);
52920 +       }
52921 +
52922 +       EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "__ep4_rxd_assert_private");
52923 +    }
52924 +}
52925 +
52926 +static __inline__ void
52927 +__ep4_rxd_private_to_free (EP4_RXD_RAIL *rxdRail)
52928 +{
52929 +    register int i;
52930 +
52931 +    for (i = 0; i <= EP_MAXFRAG; i++)
52932 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_FREE;
52933
52934 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_FREE;
52935 +    rxdRail->rxd_main->rxd_done   = EP4_STATE_FREE;
52936 +}
52937 +
52938 +static __inline__ void
52939 +__ep4_rxd_force_private (EP4_RXD_RAIL *rxdRail)
52940 +{
52941 +    EP4_RAIL  *rail = RXD_TO_RAIL(rxdRail);
52942 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
52943 +
52944 +    (rxdRail)->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
52945 +    (rxdRail)->rxd_main->rxd_done = EP4_STATE_PRIVATE;
52946 +
52947 +    if (sdram_assert) 
52948 +       elan4_sdram_writeq (dev, (rxdRail)->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
52949 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
52950 +}
52951 +
52952 +#define EP4_RXD_ASSERT_FREE(rxdRail)           __ep4_rxd_assert_free(rxdRail, __FILE__, __LINE__)
52953 +#define EP4_RXD_ASSERT_PENDING(rxdRail)                __ep4_rxd_assert_pending(rxdRail, __FILE__, __LINE__)
52954 +#define EP4_RXD_ASSERT_PRIVATE(rxdRail)                __ep4_rxd_assert_private(rxdRail, __FILE__, __LINE__)
52955 +#define EP4_RXD_PRIVATE_TO_FREE(rxdRail)       __ep4_rxd_private_to_free(rxdRail)
52956 +#define EP4_RXD_FORCE_PRIVATE(rxdRail)         __ep4_rxd_force_private(rxdRail)
52957 +
52958 +static int
52959 +alloc_rxd_block (EP4_RCVR_RAIL *rcvrRail)
52960 +{
52961 +    EP4_RAIL           *rail = RCVR_TO_RAIL (rcvrRail);
52962 +    ELAN4_DEV          *dev  = rail->r_ctxt.ctxt_dev;
52963 +    EP4_RXD_RAIL_BLOCK *blk;
52964 +    EP4_RXD_RAIL_MAIN  *rxdMain;
52965 +    EP_ADDR            rxdMainAddr;
52966 +    sdramaddr_t                rxdElan;
52967 +    EP_ADDR            rxdElanAddr;
52968 +    EP4_RXD_RAIL       *rxdRail;
52969 +    unsigned long       flags;
52970 +    int                 i, j;
52971 +
52972 +    KMEM_ZALLOC (blk, EP4_RXD_RAIL_BLOCK *, sizeof (EP4_RXD_RAIL_BLOCK), 1);
52973 +
52974 +    if (blk == NULL)
52975 +       return 0;
52976 +
52977 +    if ((rxdElan = ep_alloc_elan (&rail->r_generic, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdElanAddr)) == (sdramaddr_t) 0)
52978 +    {
52979 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
52980 +       return 0;
52981 +    }
52982 +
52983 +    if ((rxdMain = ep_alloc_main (&rail->r_generic, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK, 0, &rxdMainAddr)) == (EP4_RXD_RAIL_MAIN *) NULL)
52984 +    {
52985 +       ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
52986 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
52987 +       return 0;
52988 +    }
52989 +
52990 +    if (ep4_reserve_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK, 0) != 0)
52991 +    {
52992 +       ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
52993 +       ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
52994 +       KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
52995 +
52996 +       return 0;
52997 +    }
52998 +
52999 +    for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++)
53000 +    {
53001 +       rxdRail->rxd_generic.RcvrRail = &rcvrRail->rcvr_generic;
53002 +       rxdRail->rxd_elan             = rxdElan;
53003 +       rxdRail->rxd_elan_addr        = rxdElanAddr;
53004 +       rxdRail->rxd_main             = rxdMain;
53005 +       rxdRail->rxd_main_addr        = rxdMainAddr;
53006 +
53007 +       /* reserve 128 bytes of "event" cq space for the chained STEN packets */
53008 +       if ((rxdRail->rxd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_RXD_STEN_CMD_NDWORDS)) == NULL)
53009 +           goto failed;
53010 +
53011 +       /* allocate a single word of "setevent" command space */
53012 +       if ((rxdRail->rxd_scq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL)
53013 +       {
53014 +           ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
53015 +           goto failed;
53016 +       }
53017 +
53018 +       /* initialise the completion events */
53019 +       for (j = 0; j <= EP_MAXFRAG; j++)
53020 +           rxdMain->rxd_sent[i] = EP4_STATE_FREE;
53021 +
53022 +       rxdMain->rxd_done   = EP4_STATE_FREE;
53023 +       rxdMain->rxd_failed = EP4_STATE_FREE;
53024 +
53025 +       /* initialise the scq for the thread */
53026 +       rxdMain->rxd_scq = rxdRail->rxd_scq->ecq_addr;
53027 +
53028 +       /* initialise the "start" event to copy the first STEN packet into the command queue */
53029 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType),
53030 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS));
53031 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopySource),
53032 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]));
53033 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CopyDest),
53034 +                           rxdRail->rxd_ecq->ecq_addr);
53035 +
53036 +       /* initialise the "chain" events to copy the next STEN packet into the command queue */
53037 +       for (j = 0; j < EP_MAXFRAG; j++)
53038 +       {
53039 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CountAndType),
53040 +                               E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
53041 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopySource),
53042 +                               rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j+1]));
53043 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j].ev_CopyDest),
53044 +                               rxdRail->rxd_ecq->ecq_addr);
53045 +       }
53046 +
53047 +       /* initialise the portions of the sten packets which don't change */
53048 +       for (j = 0; j < EP_MAXFRAG+1; j++)
53049 +       {
53050 +           if (j < EP_MAXFRAG)
53051 +               elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent),
53052 +                                   rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[j]));
53053 +           else
53054 +               elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_dma_dstEvent),
53055 +                                   rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done));
53056 +
53057 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_guard),
53058 +                               GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_RXD_STEN_RETRYCOUNT));
53059 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_cmd),
53060 +                               WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_sent[j])));
53061 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_ok_write_value),
53062 +                               EP4_STATE_FINISHED);
53063 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_guard),
53064 +                               GUARD_CMD | GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_RXD_STEN_RETRYCOUNT));
53065 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_fail_setevent),
53066 +                               SET_EVENT_CMD | (rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed)));
53067 +           elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[j].c_nop_cmd),
53068 +                               NOP_CMD);
53069 +       }
53070 +
53071 +       /* register a main interrupt cookie */
53072 +       ep4_register_intcookie (rail, &rxdRail->rxd_intcookie, rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
53073 +                               rxd_interrupt, rxdRail);
53074 +
53075 +       /* initialise the command stream for the done event */
53076 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_cmd),
53077 +                           WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_done)));
53078 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_write_value),
53079 +                           EP4_STATE_FINISHED);
53080 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd.c_intr_cmd),
53081 +                           INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT));
53082 +
53083 +       /* initialise the command stream for the fail event */
53084 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_cmd),
53085 +                           WRITE_DWORD_CMD | (rxdMainAddr + offsetof (EP4_RXD_RAIL_MAIN, rxd_failed)));
53086 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_write_value),
53087 +                           EP4_STATE_FAILED);
53088 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd.c_intr_cmd),
53089 +                           INTERRUPT_CMD | (rxdRail->rxd_intcookie.int_val << E4_MAIN_INT_SHIFT));
53090 +
53091 +       /* initialise the done and fail events */
53092 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
53093 +                           E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
53094 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopySource),
53095 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done_cmd));
53096 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CopyDest),
53097 +                           rxdRail->rxd_ecq->ecq_addr);
53098 +
53099 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType),
53100 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
53101 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopySource),
53102 +                           rxdElanAddr + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed_cmd));
53103 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CopyDest),
53104 +                           rxdRail->rxd_ecq->ecq_addr);
53105 +       
53106 +       /* initialise the pointer to the main memory portion */
53107 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main), 
53108 +                           rxdMainAddr);
53109 +
53110 +       /* move onto next descriptor */
53111 +       rxdElan     += EP4_RXD_RAIL_ELAN_SIZE;
53112 +       rxdElanAddr += EP4_RXD_RAIL_ELAN_SIZE;
53113 +       rxdMain      = (EP4_RXD_RAIL_MAIN *) ((unsigned long) rxdMain + EP4_RXD_RAIL_MAIN_SIZE);
53114 +       rxdMainAddr += EP4_RXD_RAIL_MAIN_SIZE;
53115 +    }
53116 +
53117 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
53118 +
53119 +    list_add  (&blk->blk_link, &rcvrRail->rcvr_blocklist);
53120 +
53121 +    rcvrRail->rcvr_totalcount += EP4_NUM_RXD_PER_BLOCK;
53122 +    rcvrRail->rcvr_freecount  += EP4_NUM_RXD_PER_BLOCK;
53123 +
53124 +    for (i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++)
53125 +       list_add (&blk->blk_rxds[i].rxd_generic.Link, &rcvrRail->rcvr_freelist);
53126 +
53127 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
53128 +
53129 +    return 1;
53130 +
53131 + failed:
53132 +    while (--i >= 0)
53133 +    {
53134 +       rxdRail--;
53135 +
53136 +       ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
53137 +       ep4_put_ecq (rail, rxdRail->rxd_scq, 1);
53138 +
53139 +       ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie);
53140 +    }
53141 +
53142 +    ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK);
53143 +    
53144 +    ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
53145 +    ep_free_elan (&rail->r_generic, rxdElanAddr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
53146 +    KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
53147 +
53148 +    return 0;
53149 +}
53150 +
53151 +
53152 +static void
53153 +free_rxd_block (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL_BLOCK *blk)
53154 +{
53155 +    EP4_RAIL     *rail = RCVR_TO_RAIL (rcvrRail);
53156 +    EP4_RXD_RAIL *rxdRail;
53157 +    unsigned long flags;
53158 +    int           i;
53159 +
53160 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
53161 +
53162 +    list_del (&blk->blk_link);
53163 +
53164 +    rcvrRail->rcvr_totalcount -= EP4_NUM_RXD_PER_BLOCK;
53165 +
53166 +    for (rxdRail = &blk->blk_rxds[0], i = 0; i < EP4_NUM_RXD_PER_BLOCK; i++, rxdRail++)
53167 +    {
53168 +       rcvrRail->rcvr_freecount--;
53169 +
53170 +       ep4_put_ecq (rail, rxdRail->rxd_ecq, EP4_RXD_STEN_CMD_NDWORDS);
53171 +       ep4_put_ecq (rail, rxdRail->rxd_scq, 1);
53172 +
53173 +       ep4_deregister_intcookie (rail, &rxdRail->rxd_intcookie);
53174 +
53175 +       list_del (&rxdRail->rxd_generic.Link);
53176 +    }
53177 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
53178 +
53179 +    ep4_release_dma_retries (rail, EP4_NUM_RXD_PER_BLOCK);
53180 +
53181 +    ep_free_main (&rail->r_generic, blk->blk_rxds[0].rxd_main_addr, EP4_RXD_RAIL_MAIN_SIZE * EP4_NUM_RXD_PER_BLOCK);
53182 +    ep_free_elan (&rail->r_generic, blk->blk_rxds[0].rxd_elan_addr, EP4_RXD_RAIL_ELAN_SIZE * EP4_NUM_RXD_PER_BLOCK);
53183 +
53184 +    KMEM_FREE (blk, sizeof (EP4_RXD_RAIL_BLOCK));
53185 +}
53186 +
53187 +static EP4_RXD_RAIL *
53188 +get_rxd_rail (EP4_RCVR_RAIL *rcvrRail)
53189 +{
53190 +    EP_COMMS_SUBSYS  *subsys = RCVR_TO_SUBSYS(rcvrRail);
53191 +    EP4_RXD_RAIL     *rxdRail;
53192 +    unsigned long flags;
53193 +    int low_on_rxds;
53194 +
53195 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
53196 +
53197 +    if (list_empty (&rcvrRail->rcvr_freelist))
53198 +       rxdRail = NULL;
53199 +    else
53200 +    {
53201 +       rxdRail = list_entry (rcvrRail->rcvr_freelist.next, EP4_RXD_RAIL, rxd_generic.Link);
53202 +
53203 +       EP4_RXD_ASSERT_FREE(rxdRail);
53204 +
53205 +       list_del (&rxdRail->rxd_generic.Link);
53206 +
53207 +       rcvrRail->rcvr_freecount--;
53208 +    }
53209 +    /* Wakeup the descriptor primer thread if there's not many left */
53210 +    low_on_rxds = (rcvrRail->rcvr_freecount < ep_rxd_lowat);
53211 +
53212 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
53213 +
53214 +    if (low_on_rxds)
53215 +       ep_kthread_schedule (&subsys->Thread, lbolt);
53216 +
53217 +    return (rxdRail);
53218 +}
53219 +
53220 +static void
53221 +free_rxd_rail (EP4_RCVR_RAIL *rcvrRail, EP4_RXD_RAIL *rxdRail)
53222 +{
53223 +    unsigned long flags;
53224 +
53225 +    EP4_RXD_ASSERT_FREE(rxdRail);
53226 +
53227 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
53228 +    
53229 +    list_add (&rxdRail->rxd_generic.Link, &rcvrRail->rcvr_freelist);
53230 +
53231 +    rcvrRail->rcvr_freecount++;
53232 +
53233 +    if (rcvrRail->rcvr_freewaiting)
53234 +    {
53235 +       rcvrRail->rcvr_freewaiting--;
53236 +       kcondvar_wakeupall (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock);
53237 +    }
53238 +
53239 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
53240 +}
53241 +
53242 +static void
53243 +bind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail)
53244 +{
53245 +    EP4_RAIL *rail = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail);
53246 +
53247 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
53248 +
53249 +    EPRINTF3 (DBG_RCVR, "%s: bind_rxd_rail: rxd=%p rxdRail=%p\n",  rail->r_generic.Name, rxd, rxdRail);
53250 +
53251 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_rxd), rxd->NmdMain.nmd_addr);                      /* PCI write */
53252 +
53253 +    rxd->RxdRail             = &rxdRail->rxd_generic;
53254 +    rxdRail->rxd_generic.Rxd = rxd;
53255 +}
53256 +
53257 +static void
53258 +unbind_rxd_rail (EP_RXD *rxd, EP4_RXD_RAIL *rxdRail)
53259 +{
53260 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
53261 +    
53262 +    ASSERT (SPINLOCK_HELD (&rxd->Rcvr->Lock));
53263 +    ASSERT (rxd->RxdRail == &rxdRail->rxd_generic && rxdRail->rxd_generic.Rxd == rxd);
53264 +
53265 +    EP4_RXD_ASSERT_PRIVATE (rxdRail);
53266 +
53267 +    EPRINTF3 (DBG_RCVR, "%s: unbind_rxd_rail: rxd=%p rxdRail=%p\n",  RCVR_TO_RAIL(rcvrRail)->r_generic.Name, rxd, rxdRail);
53268 +
53269 +    rxd->RxdRail             = NULL;
53270 +    rxdRail->rxd_generic.Rxd = NULL;
53271 +
53272 +    if (rcvrRail->rcvr_cleanup_waiting)
53273 +       kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rxd->Rcvr->Lock);
53274 +    rcvrRail->rcvr_cleanup_waiting = 0;
53275 +
53276 +    EP4_RXD_PRIVATE_TO_FREE (rxdRail);
53277 +}
53278 +
53279 +
53280 +static void
53281 +rcvr_stall_interrupt (EP4_RAIL *rail, void *arg)
53282 +{
53283 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg;
53284 +    EP_RCVR       *rcvr     = rcvrRail->rcvr_generic.Rcvr;
53285 +    unsigned long  flags;
53286 +
53287 +    spin_lock_irqsave (&rcvr->Lock, flags);
53288 +    
53289 +    EPRINTF1 (DBG_RCVR, "rcvr_stall_interrupt: rcvrRail %p thread halted\n", rcvrRail);
53290 +
53291 +    rcvrRail->rcvr_thread_halted = 1;
53292 +
53293 +    kcondvar_wakeupall (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock);
53294 +
53295 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53296 +}
53297 +
53298 +static void
53299 +rcvr_stall_haltop (ELAN4_DEV *dev, void *arg)
53300 +{
53301 +    EP4_RCVR_RAIL  *rcvrRail  = (EP4_RCVR_RAIL *) arg;
53302 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
53303 +    EP_RCVR        *rcvr      = rcvrRail->rcvr_generic.Rcvr;
53304 +    sdramaddr_t     qdesc     = ((EP4_COMMS_RAIL *) commsRail)->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
53305 +    E4_uint64       qbptr     = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
53306 +
53307 +    /* Mark the queue as full by writing the fptr */
53308 +    if (qbptr == (rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)))
53309 +       elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), rcvrRail->rcvr_slots_addr);
53310 +    else
53311 +       elan4_sdram_writeq (dev, qdesc + offsetof (E4_InputQueue, q_fptr), qbptr + EP_INPUTQ_SIZE);
53312 +
53313 +    /* Notify the thread that it should stall after processing any outstanding envelopes */
53314 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie),
53315 +                       rcvrRail->rcvr_stall_intcookie.int_val);
53316 +
53317 +    /* Issue a swtevent to the queue event to wake the thread up */
53318 +    ep4_set_event_cmd (rcvrRail->rcvr_resched, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent));
53319 +}
53320 +
53321 +static void
53322 +rxd_interrupt (EP4_RAIL *rail, void *arg)
53323 +{
53324 +    EP4_RXD_RAIL      *rxdRail  = (EP4_RXD_RAIL *) arg;
53325 +    EP4_RCVR_RAIL     *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
53326 +    EP_RCVR           *rcvr     = rcvrRail->rcvr_generic.Rcvr;
53327 +    EP4_RXD_RAIL_MAIN *rxdMain  = rxdRail->rxd_main;
53328 +    unsigned long      delay    = 1;
53329 +    EP_RXD            *rxd;
53330 +    EP_ENVELOPE       *env;
53331 +    unsigned long      flags;
53332 +
53333 +    spin_lock_irqsave (&rcvr->Lock, flags);
53334 +
53335 +    for (;;)
53336 +    {
53337 +       if (rxdMain->rxd_done == EP4_STATE_FINISHED || rxdMain->rxd_failed == EP4_STATE_FAILED)
53338 +           break;
53339 +
53340 +       /* The write to rxd_done could be held up in the PCI bridge even though
53341 +        * we've seen the interrupt cookie.  Unlike elan3, there is no possibility
53342 +        * of spurious interrupts since we flush the command queues on node 
53343 +        * disconnection and the txcallback mechanism */
53344 +       mb();
53345 +
53346 +       if (delay > EP4_EVENT_FIRING_TLIMIT)
53347 +       {
53348 +           spin_unlock_irqrestore (&rcvr->Lock, flags);
53349 +
53350 +           EP_ASSFAIL (RCVR_TO_RAIL(rcvrRail), "rxd_interrupt - not finished\n");
53351 +           return;
53352 +       }
53353 +       DELAY(delay);
53354 +       delay <<= 1;
53355 +    }
53356 +
53357 +    if (rxdMain->rxd_done != EP4_STATE_FINISHED)
53358 +    {
53359 +       EPRINTF8 (DBG_RETRY, "%s: rxd_interrupt: rxdRail %p retry: done=%d failed=%d NodeId=%d XID=%08x.%08x.%016llx\n",
53360 +                 rail->r_generic.Name, rxdRail, (int)rxdMain->rxd_done, (int)rxdMain->rxd_failed, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.NodeId,
53361 +                 rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Generation, rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Handle, 
53362 +                 (long long)rxdRail->rxd_generic.Rxd->RxdMain->Envelope.Xid.Unique);
53363 +    
53364 +       spin_lock (&rcvrRail->rcvr_retrylock);
53365 +
53366 +       rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;                        /* XXXX backoff ? */
53367 +
53368 +       list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist);
53369 +
53370 +       ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time);
53371 +       spin_unlock (&rcvrRail->rcvr_retrylock);
53372 +
53373 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
53374 +       return;
53375 +    }
53376 +    
53377 +    rxd = rxdRail->rxd_generic.Rxd;
53378 +    env = &rxd->RxdMain->Envelope;
53379 +
53380 +    /*
53381 +     * Note, since the thread will have sent the remote dma packet before copying 
53382 +     * the envelope, we must check that it has completed doing this,  we do this
53383 +     * by acquiring the spinlock against the thread which it only drops once it's
53384 +     * completed.
53385 +     */
53386 +    if (rxd->RxdMain->Len == EP_RXD_PENDING)
53387 +    {
53388 +       EP4_SPINENTER (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock),
53389 +                      &rcvrRail->rcvr_main->rcvr_thread_lock);
53390 +       
53391 +       EP4_SPINEXIT (rail->r_ctxt.ctxt_dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock),
53392 +                     &rcvrRail->rcvr_main->rcvr_thread_lock);
53393 +       
53394 +       EP4_ASSERT (rail, env->Version == EP_ENVELOPE_VERSION && rxd->RxdMain->Len != EP_RXD_PENDING);
53395 +    }
53396 +
53397 +    EPRINTF8 (DBG_RCVR, "%s: rxd_interrupt: rxd %p finished from %d XID %08x.%08x.%016llx len %d attr %x\n", rail->r_generic.Name, 
53398 +             rxd, rxd->RxdMain->Envelope.NodeId, rxd->RxdMain->Envelope.Xid.Generation, rxd->RxdMain->Envelope.Xid.Handle, 
53399 +             (long long)rxd->RxdMain->Envelope.Xid.Unique,  rxd->RxdMain->Len, rxd->RxdMain->Envelope.Attr);
53400 +
53401 +    rxdMain->rxd_done  = EP4_STATE_PRIVATE;
53402 +    rxd->Data.nmd_attr = EP_RAIL2RAILMASK (rail->r_generic.Number);
53403 +
53404 +    switch (rxd->State)
53405 +    {
53406 +    case EP_RXD_RECEIVE_ACTIVE:
53407 +       if (rxd->RxdMain->Len >= 0 && EP_IS_RPC(env->Attr))
53408 +           rxd->State = EP_RXD_RPC_IN_PROGRESS;
53409 +       else
53410 +       {
53411 +           rxd->State = EP_RXD_COMPLETED;
53412 +
53413 +           /* remove from active list */
53414 +           list_del (&rxd->Link);
53415 +
53416 +           unbind_rxd_rail (rxd, rxdRail);
53417 +           free_rxd_rail (rcvrRail, rxdRail);
53418 +       }
53419 +
53420 +       if (rxd->RxdMain->Len >= 0) {
53421 +           INC_STAT(rcvrRail->rcvr_generic.stats,rx);
53422 +           ADD_STAT(rcvrRail->rcvr_generic.stats,rx_len,rxd->RxdMain->Len);
53423 +           INC_STAT(rail->r_generic.Stats,rx);
53424 +           ADD_STAT(rail->r_generic.Stats,rx_len,rxd->RxdMain->Len);
53425 +       }
53426 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
53427 +       ep_rxd_received (rxd);
53428 +
53429 +       break;
53430 +
53431 +    case EP_RXD_PUT_ACTIVE:
53432 +    case EP_RXD_GET_ACTIVE:
53433 +       rxd->State = EP_RXD_RPC_IN_PROGRESS;
53434 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
53435 +       
53436 +       rxd->Handler (rxd);
53437 +       break;
53438 +
53439 +    case EP_RXD_COMPLETE_ACTIVE:
53440 +       rxd->State = EP_RXD_COMPLETED;
53441 +
53442 +       /* remove from active list */
53443 +       list_del (&rxd->Link);
53444 +
53445 +       unbind_rxd_rail (rxd, rxdRail);
53446 +       free_rxd_rail (rcvrRail, rxdRail);
53447 +
53448 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
53449 +
53450 +       rxd->Handler(rxd);
53451 +       break;
53452 +
53453 +    default:
53454 +       spin_unlock_irqrestore (&rcvr->Lock, flags);
53455 +
53456 +       printk ("%s: rxd_interrupt: rxd %p in invalid state %d\n", rail->r_generic.Name, rxd, rxd->State);
53457 +       /* NOTREACHED */
53458 +    }
53459 +}
53460 +
53461 +static void
53462 +ep4rcvr_flush_filtering (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
53463 +{
53464 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
53465 +    EP4_RAIL       *rail      = RCVR_TO_RAIL(rcvrRail);
53466 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
53467 +    sdramaddr_t    qdesc      = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
53468 +    E4_Addr        qbase      = rcvrRail->rcvr_slots_addr;
53469 +    E4_Addr        qlast      = qbase + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1);
53470 +    E4_uint64      qfptr, qbptr;
53471 +    unsigned long  flags;
53472 +    
53473 +    spin_lock_irqsave (&rcvr->Lock, flags);
53474 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53475 +    
53476 +    /* zip down the input queue and invalidate any envelope we find to a node which is locally passivated */
53477 +    qfptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr));
53478 +    qbptr = elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
53479 +
53480 +    while (qfptr != qbptr)
53481 +    {
53482 +       unsigned int nodeId = elan4_sdram_readl (dev, rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, NodeId));
53483 +
53484 +       EPRINTF3 (DBG_DISCON, "%s: ep4rcvr_flush_filtering: nodeId=%d State=%d\n", rail->r_generic.Name, nodeId, rail->r_generic.Nodes[nodeId].State);
53485 +       
53486 +       if (rail->r_generic.Nodes[nodeId].State == EP_NODE_LOCAL_PASSIVATE)
53487 +           elan4_sdram_writel (dev,  rcvrRail->rcvr_slots + (qfptr - qbase) + offsetof (EP_ENVELOPE, Version), 0);
53488 +       
53489 +       if (qfptr != qlast)
53490 +           qfptr += EP_INPUTQ_SIZE;
53491 +       else
53492 +           qfptr = qbase;
53493 +    }
53494 +
53495 +    /* Insert an setevent command into the thread's command queue
53496 +     * to ensure that all sten packets have completed */
53497 +    elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS);
53498 +    ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq);
53499 +    
53500 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53501 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53502 +}
53503 +
53504 +static void
53505 +ep4rcvr_flush_flushing (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
53506 +{
53507 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
53508 +    ELAN4_DEV       *dev  = rail->r_ctxt.ctxt_dev;
53509 +    struct list_head *el, *nel;
53510 +    struct list_head  rxdList;
53511 +    unsigned long     flags;
53512 +
53513 +    INIT_LIST_HEAD (&rxdList);
53514 +    
53515 +    /* remove any sten packates which are retrying to nodes which are being passivated */
53516 +    spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags);
53517 +    list_for_each_safe (el, nel, &rcvrRail->rcvr_retrylist) {
53518 +       EP4_RXD_RAIL *rxdRail  = list_entry (el, EP4_RXD_RAIL, rxd_retry_link);
53519 +       EP_ENVELOPE  *env      = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope;
53520 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
53521 +
53522 +       if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
53523 +       {
53524 +           EPRINTF2 (DBG_XMTR, "%s; ep4rcvr_flush_flushing: removing rxdRail %p from retry list\n", rail->r_generic.Name, rxdRail);
53525 +           
53526 +           list_del (&rxdRail->rxd_retry_link);
53527 +       }
53528 +    }
53529 +    spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags);
53530 +
53531 +    spin_lock_irqsave (&rcvr->Lock, flags);
53532 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53533 +    
53534 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
53535 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
53536 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
53537 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
53538 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
53539 +
53540 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL (rxdRail, rcvrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
53541 +           continue;
53542 +       
53543 +       EPRINTF6 (DBG_DISCON, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p state %d elan node %d state %d\n", 
53544 +                 rail->r_generic.Name, rcvr, rxd, (int)rxdRail->rxd_main->rxd_done, env->NodeId, rxd->State);
53545 +       
53546 +       switch (rxd->State)
53547 +       {
53548 +       case EP_RXD_FREE:
53549 +           printk ("ep4rcvr_flush_flushing: rxd state is free but bound to a fail\n");
53550 +           break;
53551 +
53552 +       case EP_RXD_RECEIVE_ACTIVE:
53553 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete message receive */
53554 +           {
53555 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - passive\n", 
53556 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId);
53557 +               
53558 +               nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
53559 +               continue;
53560 +           }
53561 +           break;
53562 +           
53563 +       default:
53564 +           EP4_ASSERT (rail, EP_IS_RPC(env->Attr));
53565 +
53566 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete RPC */
53567 +           {
53568 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - active\n", 
53569 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId);
53570 +               
53571 +               EP_INVALIDATE_XID (rxd->MsgXid);                        /* Ignore any previous NMD map responses */
53572 +               
53573 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
53574 +               continue;
53575 +           }
53576 +           break;
53577 +
53578 +       case EP_RXD_BEEN_ABORTED:
53579 +           printk ("ep4rcvr_flush_flushing: rxd state is aborted but bound to a fail\n");
53580 +           break;
53581 +       }
53582 +
53583 +       EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_flush_flushing: rcvr %p rxd %p nodeId %d - finished\n", 
53584 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
53585 +    }    
53586 +
53587 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53588 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53589 +}
53590 +
53591 +void
53592 +ep4rcvr_flush_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
53593 +{
53594 +    EP4_RAIL *rail = RCVR_TO_RAIL(rcvrRail);
53595 +
53596 +    switch (rail->r_generic.CallbackStep)
53597 +    {
53598 +    case EP_CB_FLUSH_FILTERING:
53599 +       ep4rcvr_flush_filtering (rcvr, rcvrRail);
53600 +       break;
53601 +
53602 +    case EP_CB_FLUSH_FLUSHING:
53603 +       ep4rcvr_flush_flushing (rcvr, rcvrRail);
53604 +       break;
53605 +    }
53606 +}
53607 +
53608 +void
53609 +ep4rcvr_failover_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
53610 +{
53611 +    EP_COMMS_SUBSYS  *subsys = rcvr->Subsys;
53612 +    EP4_RAIL         *rail   = RCVR_TO_RAIL (rcvrRail);
53613 +    ELAN4_DEV       *dev    = rail->r_ctxt.ctxt_dev;
53614 +    struct list_head *el, *nel;
53615 +    unsigned long     flags;
53616 +#if SUPPORT_RAIL_FAILOVER
53617 +    EP_SYS           *sys    = subsys->Subsys.Sys;
53618 +#endif
53619 +    
53620 +    spin_lock_irqsave (&rcvr->Lock, flags);
53621 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53622 +    
53623 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
53624 +       EP_RXD             *rxd      = list_entry (el, EP_RXD, Link);
53625 +       EP4_RXD_RAIL       *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
53626 +       EP_ENVELOPE        *env      = &rxd->RxdMain->Envelope;
53627 +       EP_NODE_RAIL       *nodeRail = &rail->r_generic.Nodes[env->NodeId];
53628 +#if SUPPORT_RAIL_FAILOVER
53629 +       EP_NODE            *node     = &sys->Nodes[env->NodeId];
53630 +       EP_MANAGER_MSG_BODY msgBody;
53631 +#endif
53632 +       
53633 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_PASSIVATED)
53634 +           continue;
53635 +
53636 +       EPRINTF5 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p elan node %d state %d\n", 
53637 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId, (int)rxdRail->rxd_main->rxd_done);
53638 +
53639 +       switch (rxd->State)
53640 +       {
53641 +       case EP_RXD_FREE:
53642 +           printk ("ep4rcvr_failover_callback: rxd state is free but bound to a rail\n");
53643 +           break;
53644 +
53645 +       case EP_RXD_RECEIVE_ACTIVE:
53646 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                        /* incomplete message receive */
53647 +           {
53648 +               EPRINTF4 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId);
53649 +
53650 +               EP4_RXD_FORCE_PRIVATE(rxdRail);
53651 +               
53652 +               unbind_rxd_rail (rxd, rxdRail);
53653 +
53654 +               free_rxd_rail (rcvrRail, rxdRail);
53655 +           
53656 +               /* epcomms thread will requeue on different rail */
53657 +               ep_kthread_schedule (&subsys->Thread, lbolt);
53658 +               continue;
53659 +           }
53660 +           break;
53661 +
53662 +       default:
53663 +           EP4_ASSERT (rail, EP_IS_RPC(env->Attr));
53664 +
53665 +#if SUPPORT_RAIL_FAILOVER
53666 +           /* XXXX - no rail failover for now .... */
53667 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE && !EP_IS_NO_FAILOVER(env->Attr))       /* incomplete RPC, which can be failed over */
53668 +           {
53669 +               EPRINTF6 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p State %d Xid %llxx MsgXid %llxx nodeId %d - failover\n", 
53670 +                         rail->r_generic.Name, rxd, rxd->State, (long long)env->Xid.Unique, (long long)rxd->MsgXid.Unique, env->NodeId);
53671 +               
53672 +               if (EP_XID_INVALID(rxd->MsgXid))
53673 +                   rxd->MsgXid = ep_xid_cache_alloc (sys, &rcvr->XidCache);
53674 +               
53675 +               /* XXXX maybe only send the message if the node failover retry is now ? */
53676 +               msgBody.Failover.Xid      = env->Xid;
53677 +               msgBody.Failover.Railmask = node->ConnectedRails;
53678 +               
53679 +               ep_send_message (&rail->r_generic, env->NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST, rxd->MsgXid, &msgBody);
53680 +               
53681 +               nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
53682 +               continue;
53683 +           }
53684 +#endif
53685 +           break;
53686 +
53687 +       case EP_RXD_BEEN_ABORTED:
53688 +           printk ("ep4rcvr_failover_callback: rxd state is aborted but bound to a fail\n");
53689 +           break;
53690 +       }
53691 +       EPRINTF3 (DBG_FAILOVER, "%s: ep4rcvr_failover_callback: rxd %p nodeId %d - finished\n", rail->r_generic.Name, rxd, env->NodeId);
53692 +    }
53693 +    
53694 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53695 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53696 +}
53697 +
53698 +void
53699 +ep4rcvr_disconnect_callback (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail)
53700 +{
53701 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
53702 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
53703 +    struct list_head *el, *nel;
53704 +    struct list_head  rxdList;
53705 +    unsigned long     flags;
53706 +
53707 +    INIT_LIST_HEAD (&rxdList);
53708 +    
53709 +    spin_lock_irqsave (&rcvr->Lock, flags);
53710 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53711 +    
53712 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
53713 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
53714 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
53715 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
53716 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[env->NodeId];
53717 +       
53718 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
53719 +           continue;
53720 +
53721 +       EPRINTF5 (DBG_DISCON, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p elan node %d state %x\n", rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State);
53722 +
53723 +       switch (rxd->State)
53724 +       {
53725 +       case EP_RXD_FREE:
53726 +           printk ("ep4rcvr_disconnect_callback: rxd state is free but bound to a rail\n");
53727 +           break;
53728 +
53729 +       case EP_RXD_RECEIVE_ACTIVE:
53730 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE)                /* incomplete message receive */
53731 +           {
53732 +               EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - unbind\n", rail->r_generic.Name, rcvr, rxd, env->NodeId);
53733 +
53734 +               EP4_RXD_FORCE_PRIVATE (rxdRail);
53735 +               
53736 +               unbind_rxd_rail (rxd, rxdRail);
53737 +               free_rxd_rail (rcvrRail, rxdRail);
53738 +               
53739 +               /* remark it as pending if it was partially received */
53740 +               rxd->RxdMain->Len = EP_RXD_PENDING;
53741 +               
53742 +               /* epcomms thread will requeue on different rail */
53743 +               ep_kthread_schedule (&rcvr->Subsys->Thread, lbolt);
53744 +               continue;
53745 +           }
53746 +           break;
53747 +
53748 +       default:
53749 +           if (rxdRail->rxd_main->rxd_done == EP4_STATE_ACTIVE || rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE)            /* incomplete RPC */
53750 +           {
53751 +               EPRINTF5 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d state %x - not able to failover\n",
53752 +                         rail->r_generic.Name, rcvr, rxd, env->NodeId, rxd->State);
53753 +           
53754 +               EP4_RXD_FORCE_PRIVATE (rxdRail);
53755 +
53756 +               unbind_rxd_rail (rxd, rxdRail);
53757 +               free_rxd_rail (rcvrRail, rxdRail);
53758 +
53759 +               /* Ignore any previous NMD/failover responses */
53760 +               EP_INVALIDATE_XID (rxd->MsgXid);
53761 +               
53762 +               /* Remove from active list */
53763 +               list_del (&rxd->Link);
53764 +               
53765 +               if (rxd->State == EP_RXD_RPC_IN_PROGRESS)                               /* ownder by user .... */
53766 +                   rxd->State = EP_RXD_BEEN_ABORTED;
53767 +               else                                                                    /* queue for completion */
53768 +               {
53769 +                   rxd->RxdMain->Len = EP_CONN_RESET;                                  /* ensure ep_rxd_status() fails */
53770 +                   list_add_tail (&rxd->Link, &rxdList);
53771 +               }
53772 +               continue;
53773 +           }
53774 +           break;
53775 +
53776 +       case EP_RXD_BEEN_ABORTED:
53777 +           printk ("ep4rcvr_disconnect_callback: rxd state is aborted but bound to a rail\n");
53778 +           break;
53779 +       }
53780 +
53781 +       printk ("%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
53782 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
53783 +       EPRINTF4 (DBG_RCVR, "%s: ep4rcvr_disconnect_callback: rcvr %p rxd %p nodeId %d - finished\n", 
53784 +                 rail->r_generic.Name, rcvr, rxd, env->NodeId);
53785 +       ep4rcvr_display_rxd (&di_ep_debug, &rxdRail->rxd_generic);
53786 +    }
53787 +    
53788 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53789 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53790 +
53791 +    while (! list_empty (&rxdList)) 
53792 +    {
53793 +       EP_RXD *rxd = list_entry (rxdList.next, EP_RXD, Link);
53794 +
53795 +       list_del (&rxd->Link);
53796 +
53797 +       rxd->Handler (rxd);
53798 +    }
53799 +}
53800 +
53801 +void
53802 +ep4rcvr_neterr_flush (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
53803 +{
53804 +    EP4_COMMS_RAIL *commsRail = RCVR_TO_COMMS(rcvrRail);
53805 +    EP4_RAIL       *rail      = RCVR_TO_RAIL (rcvrRail);
53806 +    ELAN4_DEV      *dev       = rail->r_ctxt.ctxt_dev;
53807 +    unsigned long   flags;
53808 +
53809 +    spin_lock_irqsave (&rcvr->Lock, flags);
53810 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53811 +
53812 +    /* Insert an setevent command into the thread's command queue
53813 +     * to ensure that all sten packets have completed */
53814 +    elan4_guard (rcvrRail->rcvr_ecq->ecq_cq, GUARD_ALL_CHANNELS);
53815 +    ep4comms_flush_setevent (commsRail, rcvrRail->rcvr_ecq->ecq_cq);
53816 +    
53817 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53818 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53819 +}
53820 +
53821 +void
53822 +ep4rcvr_neterr_check (EP_RCVR *rcvr, EP4_RCVR_RAIL *rcvrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
53823 +{
53824 +    EP4_RAIL         *rail = RCVR_TO_RAIL (rcvrRail);
53825 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
53826 +    struct list_head *el;
53827 +    unsigned long     flags;
53828 +
53829 +    spin_lock_irqsave (&rcvr->Lock, flags);
53830 +    EP4_SPINENTER (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53831 +    
53832 +    list_for_each (el, &rcvr->ActiveDescList) {
53833 +       EP_RXD       *rxd      = list_entry (el, EP_RXD, Link);
53834 +       EP4_RXD_RAIL *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
53835 +       EP_ENVELOPE  *env      = &rxd->RxdMain->Envelope;
53836 +
53837 +       if (rxd->RxdMain->Len == EP_RXD_PENDING || !RXD_BOUND2RAIL(rxdRail,rcvrRail) || env->NodeId != nodeId)
53838 +           continue;
53839 +
53840 +       if (rxd->State == EP_RXD_RECEIVE_ACTIVE || rxd->State == EP_RXD_GET_ACTIVE)
53841 +       {
53842 +           EP_NETERR_COOKIE cookie;
53843 +           unsigned int     first, this;
53844 +
53845 +           if (rxd->State == EP_RXD_RECEIVE_ACTIVE)
53846 +               first = (EP_MAXFRAG+1) - (( EP_IS_MULTICAST(env->Attr) ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags));
53847 +           else
53848 +               first = (EP_MAXFRAG+1) - rxd->nFrags;
53849 +
53850 +           for (this = first; this < (EP_MAXFRAG+1); this++)
53851 +               if (rxdRail->rxd_main->rxd_sent[this] == EP4_STATE_ACTIVE)
53852 +                   break;
53853 +           
53854 +           if (this > first)
53855 +           {
53856 +               /* Look at the last completed STEN packet and if it's neterr cookie matches, then change
53857 +                * the rxd to look the same as if the sten packet had failed and then schedule it for retry */
53858 +               cookie = elan4_sdram_readq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[--this].c_cookie));
53859 +               
53860 +               if (cookie == cookies[0] || cookie == cookies[1])
53861 +               {
53862 +                   EP_NETERR_COOKIE ncookie = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_STEN;
53863 +
53864 +                   EPRINTF6 (DBG_NETWORK_ERROR, "%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d new cookie <%lld%s%s%s%s>\n",
53865 +                             rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this, EP4_COOKIE_STRING(ncookie));
53866 +                   
53867 +                   printk ("%s: ep4rcvr_neterr_check: cookie <%lld%s%s%s%s> matches rxd %p rxdRail %p this %d new cookie <%lld%s%s%s%s>\n",
53868 +                           rail->r_generic.Name, EP4_COOKIE_STRING(cookie), rxd, rxdRail, this, EP4_COOKIE_STRING(ncookie));
53869 +                   
53870 +                   /* Allocate a new cookie for this sten packet, since this message could be received more than once.  
53871 +                    * If the second arrives after we've sucessfully sent the response and the packet completes, then we
53872 +                    * could try and requeue it after the next sten packet got nacked. */
53873 +                   elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[this].c_cookie), ncookie);
53874 +
53875 +                   rxdRail->rxd_main->rxd_sent[this] = EP4_STATE_ACTIVE;
53876 +                   rxdRail->rxd_main->rxd_failed     = EP4_STATE_FAILED;
53877 +                   
53878 +                   spin_lock (&rcvrRail->rcvr_retrylock);
53879 +
53880 +                   EP4_ASSERT (rail, rxdRail->rxd_retry_time == 0);
53881 +
53882 +                   rxdRail->rxd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;
53883 +                       
53884 +                   list_add_tail (&rxdRail->rxd_retry_link, &rcvrRail->rcvr_retrylist);
53885 +                       
53886 +                   ep_kthread_schedule (&rail->r_retry_thread, rxdRail->rxd_retry_time);
53887 +                   
53888 +                   spin_unlock (&rcvrRail->rcvr_retrylock);
53889 +               }
53890 +           }
53891 +       }
53892 +    }
53893 +    EP4_SPINEXIT (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), &rcvrRail->rcvr_main->rcvr_thread_lock);
53894 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
53895 +}
53896 +
53897 +int
53898 +ep4rcvr_queue_rxd (EP_RXD *rxd, EP_RCVR_RAIL *r)
53899 +{
53900 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r;
53901 +    EP4_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
53902 +    ELAN4_DEV     *dev      = rail->r_ctxt.ctxt_dev;
53903 +    EP4_RXD_RAIL  *rxdRail;
53904 +    register int   i;
53905 +
53906 +    ASSERT (SPINLOCK_HELD(&rxd->Rcvr->Lock));
53907 +
53908 +    if ((rxdRail = get_rxd_rail (rcvrRail)) == NULL)
53909 +       return 0;
53910 +    
53911 +    /* Flush the Elan TLB if mappings have changed */
53912 +    ep_perrail_dvma_sync (&rail->r_generic);
53913 +
53914 +    EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_queue_rxd: rcvr %p rxd %p rxdRail %p buffer %x len %x\n", 
53915 +             rail->r_generic.Name, rxd->Rcvr, rxd, rxdRail, rxd->Data.nmd_addr, rxd->Data.nmd_len);
53916 +
53917 +    /* bind the rxdRail and rxd together */
53918 +    bind_rxd_rail (rxd, rxdRail);
53919 +
53920 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_addr), rxd->Data.nmd_addr);       /* PCI write */
53921 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_len),  rxd->Data.nmd_len);                /* PCI write */
53922 +    elan4_sdram_writel (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_buffer.nmd_attr), rxd->Data.nmd_attr);       /* PCI write */
53923 +
53924 +    /* Mark as active */
53925 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType), 
53926 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
53927 +    
53928 +    for (i = 0; i <= EP_MAXFRAG; i++)
53929 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
53930 +
53931 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
53932 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
53933 +
53934 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x00, /* %r0 */
53935 +                       ep_symbol (&rail->r_threadcode, "c_queue_rxd"));
53936 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x10, /* %r2 */
53937 +                       rcvrRail->rcvr_elan_addr);
53938 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0]) + 0x18, /* %r3 */
53939 +                       rxdRail->rxd_elan_addr);
53940 +
53941 +    elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType),
53942 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_START_CMD_NDWORDS));
53943 +
53944 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_start));
53945 +
53946 +    return 1;
53947 +}
53948 +
53949 +void
53950 +ep4rcvr_rpc_put (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
53951 +{
53952 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
53953 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
53954 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
53955 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
53956 +    sdramaddr_t     rxdElan   = rxdRail->rxd_elan;
53957 +    EP_ENVELOPE    *env       = &rxd->RxdMain->Envelope;
53958 +    unsigned long   first     = (EP_MAXFRAG+1) - nFrags;
53959 +    EP4_RXD_DMA_CMD cmd;
53960 +    register int    i, len;
53961 +
53962 +    EP4_ASSERT (rail, rxd->State == EP_RXD_PUT_ACTIVE);
53963 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
53964 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
53965 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
53966 +
53967 +    /* Flush the Elan TLB if mappings have changed */
53968 +    ep_perrail_dvma_sync (&rail->r_generic);
53969 +
53970 +    /* Generate the DMA chain to put the data */
53971 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
53972 +    {
53973 +       cmd.c_dma_typeSize     = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
53974 +       cmd.c_dma_cookie       = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
53975 +       cmd.c_dma_vproc        = EP_VP_DATA(env->NodeId);
53976 +       cmd.c_dma_srcAddr      = local->nmd_addr;
53977 +       cmd.c_dma_dstAddr      = remote->nmd_addr;
53978 +       if (i == (nFrags-1))
53979 +           cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done);
53980 +       else
53981 +           cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]);
53982 +       cmd.c_dma_dstEvent     = 0;
53983 +       cmd.c_nop_cmd          = NOP_CMD;
53984 +
53985 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_put: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
53986 +                 rail->r_generic.Name, rxd, (long long)env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len);
53987 +       
53988 +       elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD));
53989 +    }
53990 +
53991 +    /* Initialise the event chain */
53992 +    for (i = 0; i < nFrags-1; i++)
53993 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
53994 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
53995 +
53996 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
53997 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
53998 +
53999 +    for (i = 0; i <= EP_MAXFRAG; i++)
54000 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
54001 +
54002 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
54003 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
54004 +
54005 +    /* Initialise the previous event to start the whole chain off */
54006 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
54007 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
54008 +
54009 +    EP4_ASSERT (rail, rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
54010 +
54011 +    /* finally issue the setevent to start the whole chain */
54012 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
54013 +
54014 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
54015 +}    
54016 +
54017 +void
54018 +ep4rcvr_rpc_get (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
54019 +{
54020 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
54021 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
54022 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
54023 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
54024 +    sdramaddr_t      rxdElan  = rxdRail->rxd_elan;
54025 +    EP_ENVELOPE     *env      = &rxd->RxdMain->Envelope;
54026 +    unsigned long    first    = (EP_MAXFRAG+1) - nFrags;
54027 +    register int    i, len;
54028 +
54029 +    EP4_ASSERT (rail, rxd->State == EP_RXD_GET_ACTIVE);
54030 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
54031 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
54032 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54033 +
54034 +    /* Flush the Elan TLB if mappings have changed */
54035 +    ep_perrail_dvma_sync (&rail->r_generic);
54036 +
54037 +    /* Generate the DMA chain to get the data */
54038 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
54039 +    {
54040 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_get rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
54041 +                 rail->r_generic.Name, rxd, (long long)env->Xid.Unique, i, remote->nmd_addr, local->nmd_addr, remote->nmd_len);
54042 +       
54043 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_open),
54044 +                           OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(env->NodeId)));
54045 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_trans),
54046 +                           SEND_TRANS_CMD | ((TR_REMOTEDMA | TR_WAIT_FOR_EOP) << 16));
54047 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_cookie),
54048 +                           ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_STEN);
54049 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_typeSize),
54050 +                           E4_DMA_TYPE_SIZE (local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT));
54051 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_cookie),
54052 +                           ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA);
54053 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_vproc),
54054 +                           EP_VP_DATA (rail->r_generic.Position.pos_nodeid));
54055 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcAddr),
54056 +                           remote->nmd_addr);
54057 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstAddr),
54058 +                           local->nmd_addr);
54059 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_srcEvent),
54060 +                           0);
54061 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i].c_dma_dstEvent),
54062 +                           i == (nFrags-1) ? rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done) : 
54063 +                                             rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]));
54064 +    }
54065 +
54066 +    /* Initialise the event chain */
54067 +    for (i = 0; i < nFrags-1; i++)
54068 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
54069 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
54070 +
54071 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
54072 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54073 +
54074 +    for (i = 0; i <= EP_MAXFRAG; i++)
54075 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
54076 +
54077 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
54078 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
54079 +
54080 +    /* Initialise the previous event to start the whole chain off */
54081 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
54082 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
54083 +
54084 +    EP4_ASSERT (rail, rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
54085 +
54086 +    /* finally issue the setevent to start the whole chain */
54087 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
54088 +
54089 +    BucketStat (rxd->Rcvr->Subsys, RPCPut, len);
54090 +}
54091 +
54092 +void
54093 +ep4rcvr_rpc_complete (EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags)
54094 +{
54095 +    EP4_RXD_RAIL    *rxdRail  = (EP4_RXD_RAIL *) rxd->RxdRail;
54096 +    EP4_RCVR_RAIL   *rcvrRail = (EP4_RCVR_RAIL *) rxdRail->rxd_generic.RcvrRail;
54097 +    EP4_RAIL        *rail     = RCVR_TO_RAIL (rcvrRail);
54098 +    ELAN4_DEV      *dev      = RCVR_TO_DEV (rcvrRail);
54099 +    sdramaddr_t     rxdElan   = rxdRail->rxd_elan;
54100 +    EP_ENVELOPE    *env       = &rxd->RxdMain->Envelope;
54101 +    unsigned long   first     = (EP_MAXFRAG+1) - nFrags - 1;
54102 +    EP4_RXD_DMA_CMD cmd;
54103 +    register int    i, len;
54104 +
54105 +    EP4_ASSERT (rail, rxd->State == EP_RXD_COMPLETE_ACTIVE);
54106 +    EP4_ASSERT (rail, rxdRail->rxd_main->rxd_done == EP4_STATE_PRIVATE);
54107 +    EP4_SDRAM_ASSERT (rail, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType),
54108 +                     E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54109 +
54110 +    /* Flush the Elan TLB if mappings have changed */
54111 +    ep_perrail_dvma_sync (&rail->r_generic);
54112 +
54113 +    /* Generate the DMA chain to put the data */
54114 +    for (i = 0, len = 0; i < nFrags; i++, len += local->nmd_len, local++, remote++)
54115 +    {
54116 +       cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(local->nmd_len, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
54117 +       cmd.c_dma_cookie   = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
54118 +       cmd.c_dma_vproc    = EP_VP_DATA(env->NodeId);
54119 +       cmd.c_dma_srcAddr  = local->nmd_addr;
54120 +       cmd.c_dma_dstAddr  = remote->nmd_addr;
54121 +       cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]);
54122 +       cmd.c_dma_dstEvent = 0;
54123 +       cmd.c_nop_cmd      = NOP_CMD;
54124 +
54125 +       EPRINTF7 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] idx=%d Source=%08x Dest=%08x Len=%x\n", 
54126 +                 rail->r_generic.Name, rxd, (long long)env->Xid.Unique, i, local->nmd_addr, remote->nmd_addr, local->nmd_len);
54127 +
54128 +       elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[first + i]), sizeof (EP4_RXD_DMA_CMD));
54129 +    }
54130 +    
54131 +    /* Initialise the status block dma */
54132 +    cmd.c_dma_typeSize = RUN_DMA_CMD | E4_DMA_TYPE_SIZE(EP_STATUSBLK_SIZE, DMA_DataTypeByte, 0, EP4_DMA_RETRYCOUNT);
54133 +    cmd.c_dma_cookie   = ep4_neterr_cookie (rail, env->NodeId) | EP4_COOKIE_DMA;
54134 +    cmd.c_dma_vproc    = EP_VP_DATA(env->NodeId);
54135 +    cmd.c_dma_srcAddr  = rxd->NmdMain.nmd_addr + offsetof (EP_RXD_MAIN, StatusBlk);
54136 +    cmd.c_dma_dstAddr  = env->TxdMain.nmd_addr + offsetof (EP_TXD_MAIN, StatusBlk);
54137 +    cmd.c_dma_srcEvent = rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_done);
54138 +    cmd.c_dma_dstEvent = env->TxdRail + offsetof (EP4_TXD_RAIL_ELAN, txd_done);;
54139 +    cmd.c_nop_cmd      = NOP_CMD;
54140 +
54141 +    EPRINTF6 (DBG_RCVR, "%s: ep4rcvr_rpc_complete: rxd %p [XID=%llx] statusblk source=%08x dest=%08x len=%llx\n", 
54142 +             rail->r_generic.Name, rxd, (long long)env->Xid.Unique, (int) cmd.c_dma_srcAddr, (int) cmd.c_dma_dstAddr, (long long)EP_STATUSBLK_SIZE);
54143 +
54144 +    elan4_sdram_copyq_to_sdram (dev, &cmd, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[EP_MAXFRAG]), sizeof (EP4_RXD_DMA_CMD));
54145 +
54146 +    /* Initialise the event chain */
54147 +    for (i = 0; i < nFrags; i++)
54148 +       elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first + i]), 
54149 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
54150 +
54151 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done),
54152 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54153 +
54154 +    for (i = 0; i <= EP_MAXFRAG; i++)
54155 +       rxdRail->rxd_main->rxd_sent[i] = EP4_STATE_ACTIVE;
54156 +
54157 +    rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
54158 +    rxdRail->rxd_main->rxd_done = EP4_STATE_ACTIVE;
54159 +
54160 +    /* Initialise the previous event to start the whole chain off */
54161 +    elan4_sdram_writeq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]),
54162 +                       E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_DMA_CMD_NDWORDS));
54163 +
54164 +    EP4_ASSERT (rail, rail->r_generic.Nodes[env->NodeId].State >= EP_NODE_CONNECTED && rail->r_generic.Nodes[env->NodeId].State <= EP_NODE_LOCAL_PASSIVATE);
54165 +
54166 +    /* finally issue the setevent to start the whole chain */
54167 +    ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
54168 +
54169 +    BucketStat (rxd->Rcvr->Subsys, CompleteRPC, len);
54170 +}
54171 +
54172 +EP_RXD *
54173 +ep4rcvr_steal_rxd (EP_RCVR_RAIL *r)
54174 +{
54175 +    /* XXXX - TBD */
54176 +    return NULL;
54177 +}
54178 +
54179 +long
54180 +ep4rcvr_check (EP_RCVR_RAIL *r, long nextRunTime)
54181 +{
54182 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) r;
54183 +    EP4_RAIL      *rail     = RCVR_TO_RAIL (rcvrRail);
54184 +
54185 +    if (rcvrRail->rcvr_freecount < ep_rxd_lowat && !alloc_rxd_block (rcvrRail))
54186 +    {
54187 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow rxd rail pool\n", rail->r_generic.Name);
54188 +               
54189 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
54190 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
54191 +    }
54192 +    
54193 +    return nextRunTime;
54194 +}
54195 +
54196 +unsigned long
54197 +ep4rcvr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
54198 +{
54199 +    EP4_RCVR_RAIL *rcvrRail = (EP4_RCVR_RAIL *) arg;
54200 +    ELAN4_DEV     *dev      = RCVR_TO_DEV(rcvrRail);
54201 +    unsigned long  flags;
54202 +
54203 +    spin_lock_irqsave (&rcvrRail->rcvr_retrylock, flags);
54204 +    while (! list_empty (&rcvrRail->rcvr_retrylist))
54205 +    {
54206 +       EP4_RXD_RAIL *rxdRail = list_entry (rcvrRail->rcvr_retrylist.next, EP4_RXD_RAIL, rxd_retry_link);
54207 +       EP_ENVELOPE  *env     = &rxdRail->rxd_generic.Rxd->RxdMain->Envelope;
54208 +       unsigned int  first   = (EP_MAXFRAG+1) - ((env->Attr & EP_MULTICAST ? 1 : 0) + (env->nFrags == 0 ? 1 : env->nFrags));
54209 +       
54210 +       if (BEFORE (lbolt, rxdRail->rxd_retry_time))
54211 +       {
54212 +           if (nextRunTime == 0 || AFTER (nextRunTime, rxdRail->rxd_retry_time))
54213 +               nextRunTime = rxdRail->rxd_retry_time;
54214 +
54215 +           break;
54216 +       }
54217 +
54218 +       list_del (&rxdRail->rxd_retry_link);
54219 +       rxdRail->rxd_retry_time = 0;
54220 +
54221 +       /* determine which sten packet to resubmit */
54222 +       for (; first < (EP_MAXFRAG+1); first++)
54223 +           if (rxdRail->rxd_main->rxd_sent[first] == EP4_STATE_ACTIVE)
54224 +               break;
54225 +
54226 +       EPRINTF3 (DBG_RETRY, "%s: ep4rcvr_retry: rxdRail %p, reissuing sten[%d]\n", rail->r_generic.Name, rxdRail, first);
54227 +
54228 +       /* re-initialise the fail event */
54229 +       elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType),
54230 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
54231 +
54232 +       rxdRail->rxd_main->rxd_failed = EP4_STATE_ACTIVE;
54233 +
54234 +       /* re-initialise the chain event to resubmit this sten packet */
54235 +       elan4_sdram_writeq (dev, rxdRail->rxd_elan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first-1].ev_CountAndType),
54236 +                           E4_EVENT_INIT_VALUE(-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_RXD_STEN_CMD_NDWORDS));
54237 +       
54238 +       /* finally issue the setevent to start the chain again */
54239 +       ep4_set_event_cmd (rxdRail->rxd_scq, rxdRail->rxd_elan_addr + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[first - 1]));
54240 +    }
54241 +    spin_unlock_irqrestore (&rcvrRail->rcvr_retrylock, flags);
54242 +    
54243 +    return nextRunTime;
54244 +}
54245 +
54246 +void
54247 +ep4rcvr_add_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
54248 +{
54249 +    EP4_RAIL          *rail   = (EP4_RAIL *) commsRail->Rail;
54250 +    ELAN4_DEV         *dev    = rail->r_ctxt.ctxt_dev;
54251 +    sdramaddr_t        qdescs = ((EP4_COMMS_RAIL *) commsRail)->r_descs;
54252 +    EP4_RCVR_RAIL     *rcvrRail;
54253 +    E4_InputQueue      qdesc;
54254 +    E4_ThreadRegs      tregs;
54255 +    sdramaddr_t        stack;
54256 +    unsigned long      flags;
54257 +
54258 +    KMEM_ZALLOC (rcvrRail, EP4_RCVR_RAIL *, sizeof (EP4_RCVR_RAIL), 1);
54259 +
54260 +    spin_lock_init (&rcvrRail->rcvr_freelock);
54261 +    INIT_LIST_HEAD (&rcvrRail->rcvr_freelist);
54262 +    INIT_LIST_HEAD (&rcvrRail->rcvr_blocklist);
54263 +
54264 +    kcondvar_init (&rcvrRail->rcvr_cleanup_sleep);
54265 +    kcondvar_init (&rcvrRail->rcvr_freesleep);
54266 +
54267 +    INIT_LIST_HEAD (&rcvrRail->rcvr_retrylist);
54268 +    spin_lock_init (&rcvrRail->rcvr_retrylock);
54269 +
54270 +    rcvrRail->rcvr_generic.CommsRail = commsRail;
54271 +    rcvrRail->rcvr_generic.Rcvr      = rcvr;
54272 +
54273 +    rcvrRail->rcvr_main  = ep_alloc_main (&rail->r_generic, sizeof (EP4_RCVR_RAIL_MAIN), 0, &rcvrRail->rcvr_main_addr);
54274 +    rcvrRail->rcvr_elan  = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RCVR_RAIL_ELAN), 0, &rcvrRail->rcvr_elan_addr);
54275 +    rcvrRail->rcvr_slots = ep_alloc_elan (&rail->r_generic, EP_INPUTQ_SIZE * rcvr->InputQueueEntries, 0, &rcvrRail->rcvr_slots_addr);
54276 +    stack                = ep_alloc_elan (&rail->r_generic, EP4_STACK_SIZE, 0, &rcvrRail->rcvr_stack);
54277 +
54278 +    /* allocate a command queue for the thread to use, plus space for it to wait/reschedule */
54279 +    rcvrRail->rcvr_ecq     = ep4_alloc_ecq (rail, CQ_Size64K);
54280 +    rcvrRail->rcvr_resched = ep4_get_ecq (rail, EP4_ECQ_ATOMIC, 8);
54281 +
54282 +    ep4_register_intcookie (rail, &rcvrRail->rcvr_stall_intcookie, rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie),
54283 +                           rcvr_stall_interrupt, rcvrRail);
54284 +
54285 +    /* Initialise the elan portion */
54286 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent.ev_CountAndType), 0);
54287 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_halt.ev_CountAndType), 0);
54288 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock), 0);
54289 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp),
54290 +                       rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head));
54291 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head), 0);
54292 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_stall_intcookie), 0);
54293 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qbase), rcvrRail->rcvr_slots_addr);
54294 +    elan4_sdram_writeq (dev, rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qlast), 
54295 +                       rcvrRail->rcvr_slots_addr + EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1));
54296 +
54297 +    /* Initialise the main memory portion */
54298 +    rcvrRail->rcvr_main->rcvr_thread_lock = 0;
54299 +
54300 +    /* Install our retry handler */
54301 +    rcvrRail->rcvr_retryops.op_func = ep4rcvr_retry;
54302 +    rcvrRail->rcvr_retryops.op_arg  = rcvrRail;
54303 +
54304 +    ep4_add_retry_ops (rail, &rcvrRail->rcvr_retryops);
54305 +
54306 +    /* Update the queue desriptor */
54307 +    qdesc.q_bptr    = rcvrRail->rcvr_slots_addr;
54308 +    qdesc.q_fptr    = rcvrRail->rcvr_slots_addr;
54309 +    qdesc.q_control = E4_InputQueueControl (rcvrRail->rcvr_slots_addr, rcvrRail->rcvr_slots_addr + (EP_INPUTQ_SIZE * (rcvr->InputQueueEntries-1)), EP_INPUTQ_SIZE);
54310 +    qdesc.q_event   = rcvrRail->rcvr_elan_addr + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent);
54311 +
54312 +    ep4_write_qdesc (rail, qdescs + (rcvr->Service * EP_QUEUE_DESC_SIZE), &qdesc);
54313 +
54314 +    spin_lock_irqsave (&rcvr->Lock, flags);
54315 +    rcvr->Rails[rail->r_generic.Number] = &rcvrRail->rcvr_generic;
54316 +    rcvr->RailMask |= EP_RAIL2RAILMASK (rail->r_generic.Number);
54317 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
54318 +
54319 +    {
54320 +       sdramaddr_t stackTop     = stack + EP4_STACK_SIZE;
54321 +       E4_Addr     stackTopAddr = rcvrRail->rcvr_stack + EP4_STACK_SIZE;
54322 +
54323 +       ep4_init_thread (rail, &tregs, stackTop, stackTopAddr, ep_symbol (&rail->r_threadcode, "ep4comms_rcvr"), 6, 
54324 +                        (E4_uint64) rail->r_elan_addr, (E4_uint64) rcvrRail->rcvr_elan_addr, (E4_uint64) rcvrRail->rcvr_main_addr,
54325 +                        (E4_uint64) EP_MSGQ_ADDR(rcvr->Service), (E4_uint64) rcvrRail->rcvr_ecq->ecq_addr, (E4_uint64) rcvrRail->rcvr_resched->ecq_addr);
54326 +    }
54327 +    
54328 +    /* Issue the command to the threads private command queue */
54329 +    elan4_run_thread_cmd (rcvrRail->rcvr_ecq->ecq_cq, &tregs);
54330 +
54331 +    ep_procfs_rcvr_add_rail(&(rcvrRail->rcvr_generic));
54332 +}
54333 +
54334 +void
54335 +ep4rcvr_del_rail (EP_RCVR *rcvr, EP_COMMS_RAIL *commsRail)
54336 +{
54337 +    EP4_RAIL         *rail     = (EP4_RAIL *) commsRail->Rail;
54338 +    EP4_RCVR_RAIL    *rcvrRail = (EP4_RCVR_RAIL *) rcvr->Rails[rail->r_generic.Number];  
54339 +    ELAN4_HALTOP      haltop;
54340 +    struct list_head *el, *nel;
54341 +    unsigned long     flags;
54342 +
54343 +    ep_procfs_rcvr_del_rail(&(rcvrRail->rcvr_generic));
54344 +
54345 +    /* Run a halt operation to mark the input queue as full and
54346 +     * request the thread to halt */
54347 +    haltop.op_mask     = INT_DiscardingHighPri | INT_TProcHalted;
54348 +    haltop.op_function = rcvr_stall_haltop;
54349 +    haltop.op_arg      = rcvrRail;
54350 +
54351 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &haltop);
54352 +
54353 +    /* Wait for the thread to tell us it's processed the input queue */
54354 +    spin_lock_irqsave (&rcvr->Lock, flags);
54355 +    while (! rcvrRail->rcvr_thread_halted)
54356 +       kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags);
54357 +    rcvrRail->rcvr_thread_halted = 0;
54358 +
54359 +    /* flag the rail as no longer available */
54360 +    rcvr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number);
54361 +
54362 +    /* wait for all active communications to terminate */
54363 +    for (;;)
54364 +    {
54365 +       int mustWait = 0;
54366 +
54367 +       list_for_each (el, &rcvr->ActiveDescList) {
54368 +           EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
54369 +           EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail;
54370 +           
54371 +           if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail) && rxd->RxdMain->Len != EP_RXD_PENDING)
54372 +           {
54373 +               mustWait++;
54374 +               break;
54375 +           }
54376 +       }
54377 +
54378 +       if (! mustWait)
54379 +           break;
54380 +
54381 +       rcvrRail->rcvr_cleanup_waiting++;
54382 +       kcondvar_wait (&rcvrRail->rcvr_cleanup_sleep, &rcvr->Lock, &flags);
54383 +    }
54384 +
54385 +    /* at this point all rxd's in the list that are bound to the deleting rail are pending */
54386 +    list_for_each_safe (el, nel, &rcvr->ActiveDescList) {
54387 +       EP_RXD       *rxd     = list_entry (el, EP_RXD, Link);
54388 +       EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) rxd->RxdRail;
54389 +
54390 +       if (rxdRail && RXD_BOUND2RAIL (rxdRail, rcvrRail))
54391 +       {
54392 +           EP4_RXD_ASSERT_PENDING (rxdRail);
54393 +           EP4_RXD_FORCE_PRIVATE (rxdRail);
54394 +
54395 +           unbind_rxd_rail (rxd, rxdRail);
54396 +           free_rxd_rail (rcvrRail, rxdRail);
54397 +       }
54398 +    }
54399 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
54400 +
54401 +    /* wait for all rxd's for this rail to become free */
54402 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
54403 +    while (rcvrRail->rcvr_freecount != rcvrRail->rcvr_totalcount)
54404 +    {
54405 +       rcvrRail->rcvr_freewaiting++;
54406 +       kcondvar_wait (&rcvrRail->rcvr_freesleep, &rcvrRail->rcvr_freelock, &flags);
54407 +    }
54408 +    spin_unlock_irqrestore (&rcvrRail->rcvr_freelock, flags);
54409 +
54410 +    /* can now remove the rail as it can no longer be used */
54411 +    spin_lock_irqsave (&rcvr->Lock, flags);
54412 +    rcvr->Rails[rail->r_generic.Number] = NULL;
54413 +    spin_unlock_irqrestore (&rcvr->Lock, flags);
54414 +
54415 +    /* all the rxd's accociated with DescBlocks must be in the FreeDescList */
54416 +    ASSERT (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount);
54417 +
54418 +    /* run through the DescBlockList deleting them */
54419 +    while (!list_empty (&rcvrRail->rcvr_blocklist))
54420 +       free_rxd_block (rcvrRail, list_entry(rcvrRail->rcvr_blocklist.next, EP4_RXD_RAIL_BLOCK , blk_link));
54421 +
54422 +    /* it had better be empty after that */
54423 +    ASSERT ((rcvrRail->rcvr_totalcount == 0) && (rcvrRail->rcvr_totalcount == rcvrRail->rcvr_freecount));
54424 +
54425 +    ep4_remove_retry_ops (rail, &rcvrRail->rcvr_retryops);
54426 +
54427 +    ep4_deregister_intcookie (rail, &rcvrRail->rcvr_stall_intcookie);
54428 +
54429 +    ep4_put_ecq (rail, rcvrRail->rcvr_resched, 8);
54430 +    ep4_free_ecq (rail, rcvrRail->rcvr_ecq);
54431 +
54432 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_stack, EP4_STACK_SIZE);
54433 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_slots_addr, EP_INPUTQ_SIZE * rcvr->InputQueueEntries);
54434 +    ep_free_elan (&rail->r_generic, rcvrRail->rcvr_elan_addr, sizeof (EP4_RCVR_RAIL_ELAN));
54435 +    ep_free_main (&rail->r_generic, rcvrRail->rcvr_main_addr, sizeof (EP4_RCVR_RAIL_MAIN));
54436 +
54437 +    KMEM_FREE (rcvrRail, sizeof (EP4_RCVR_RAIL));
54438 +}
54439 +
54440 +void
54441 +ep4rcvr_display_rxd (DisplayInfo *di, EP_RXD_RAIL *r)
54442 +{
54443 +    EP4_RXD_RAIL *rxdRail = (EP4_RXD_RAIL *) r;
54444 +    sdramaddr_t   rxdElan = rxdRail->rxd_elan;
54445 +    EP4_RAIL     *rail    = RCVR_TO_RAIL (rxdRail->rxd_generic.RcvrRail);
54446 +    ELAN4_DEV    *dev     = rail->r_ctxt.ctxt_dev;
54447 +    int i;
54448 +
54449 +    (di->func)(di->arg, "    Rail %d rxd %p elan %lx(%x) main %p(%x) ecq %d scq %d debug %llx\n", rail->r_generic.Number,
54450 +              rxdRail, rxdRail->rxd_elan, rxdRail->rxd_elan_addr, rxdRail->rxd_main, rxdRail->rxd_main_addr,
54451 +              elan4_cq2num(rxdRail->rxd_ecq->ecq_cq), elan4_cq2num(rxdRail->rxd_scq->ecq_cq),
54452 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_debug)));
54453 +    (di->func)(di->arg, "          start    %016llx %016llx %016llx [%016llx %016llx]\n",
54454 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_CountAndType)),
54455 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[0])),
54456 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_start.ev_Params[1])),
54457 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_cookie)),
54458 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[0].c_dma_cookie)));
54459 +              
54460 +    for (i = 0; i < EP_MAXFRAG; i++)
54461 +       (di->func)(di->arg, "          chain[%d] %016llx %016llx %016llx [%016llx %016llx]\n", i,
54462 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_CountAndType)),
54463 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[0])),
54464 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_chain[i].ev_Params[1])),
54465 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_cookie)),
54466 +                  elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_sten[i+1].c_dma_cookie)));
54467 +    (di->func)(di->arg, "          done    %016llx %016llx %016llx -> %016llx\n",
54468 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_CountAndType)),
54469 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[0])),
54470 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_done.ev_Params[1])),
54471 +              rxdRail->rxd_main->rxd_done);
54472 +    (di->func)(di->arg, "          fail    %016llx %016llx %016llx -> %016llx\n",
54473 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_CountAndType)),
54474 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[0])),
54475 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_failed.ev_Params[1])),
54476 +              rxdRail->rxd_main->rxd_failed);
54477 +    (di->func)(di->arg, "          next %016llx queued %016llx main %016llx\n",
54478 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_next)),
54479 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_queued)),
54480 +              elan4_sdram_readq (dev, rxdElan + offsetof (EP4_RXD_RAIL_ELAN, rxd_main)));
54481 +    (di->func)(di->arg, "          sent %016llx %016llx %016llx %016llx %016llx\n",
54482 +              rxdRail->rxd_main->rxd_sent[0], rxdRail->rxd_main->rxd_sent[1], rxdRail->rxd_main->rxd_sent[2],
54483 +              rxdRail->rxd_main->rxd_sent[3], rxdRail->rxd_main->rxd_sent[4]);
54484 +}
54485 +
54486 +void
54487 +ep4rcvr_display_rcvr (DisplayInfo *di, EP_RCVR_RAIL *r)
54488 +{
54489 +    EP_RCVR          *rcvr       = r->Rcvr;
54490 +    EP4_RCVR_RAIL    *rcvrRail   = (EP4_RCVR_RAIL *) r;
54491 +    EP4_COMMS_RAIL   *commsRail  = RCVR_TO_COMMS(rcvrRail);
54492 +    EP4_RAIL         *rail       = RCVR_TO_RAIL (rcvrRail);
54493 +    ELAN4_DEV        *dev        = rail->r_ctxt.ctxt_dev;
54494 +    sdramaddr_t       rcvrElan   = rcvrRail->rcvr_elan;
54495 +    sdramaddr_t       qdesc      = commsRail->r_descs + (rcvr->Service * EP_QUEUE_DESC_SIZE);
54496 +    sdramaddr_t       event      = rcvrRail->rcvr_elan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_qevent);
54497 +    unsigned int      freeCount  = 0;
54498 +    unsigned int      blockCount = 0;
54499 +    struct list_head *el;
54500 +    unsigned long     flags;
54501 +    
54502 +    spin_lock_irqsave (&rcvrRail->rcvr_freelock, flags);
54503 +    list_for_each (el, &rcvrRail->rcvr_freelist)
54504 +       freeCount++;
54505 +    list_for_each (el, &rcvrRail->rcvr_blocklist)
54506 +       blockCount++;
54507 +    spin_unlock_irqrestore(&rcvrRail->rcvr_freelock, flags);
54508 +
54509 +    (di->func)(di->arg, "      Rail %d elan %lx(%x) main %p(%x) ecq %d resched %d debug %llx\n",
54510 +              rail->r_generic.Number, rcvrRail->rcvr_elan, rcvrRail->rcvr_elan_addr,
54511 +              rcvrRail->rcvr_main, rcvrRail->rcvr_main_addr, elan4_cq2num(rcvrRail->rcvr_ecq->ecq_cq),
54512 +              elan4_cq2num (rcvrRail->rcvr_resched->ecq_cq),
54513 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_debug)));
54514 +    (di->func)(di->arg, "        free %d (%d) total %d blocks %d\n",
54515 +              rcvrRail->rcvr_freecount, freeCount, rcvrRail->rcvr_totalcount, blockCount);
54516 +    (di->func)(di->arg, "        spinlock %016llx %016llx\n", rcvrRail->rcvr_main->rcvr_thread_lock,
54517 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_thread_lock)));
54518 +    (di->func)(di->arg, "        queue: bptr %016llx fptr %016llx control %016llx (base %lx %x)\n",
54519 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_bptr)),
54520 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_fptr)),
54521 +              elan4_sdram_readq (dev, qdesc + offsetof (E4_InputQueue, q_control)),
54522 +              rcvrRail->rcvr_slots, rcvrRail->rcvr_slots_addr);
54523 +    (di->func)(di->arg, "        event %016llx %016llx %016llx\n",
54524 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_CountAndType)),
54525 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[0])),
54526 +              elan4_sdram_readq (dev, event + offsetof (E4_Event32, ev_Params[1])));
54527 +    (di->func)(di->arg, "        pending_tailp %016llx pending_head %016llx\n", 
54528 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_tailp)),
54529 +              elan4_sdram_readq (dev, rcvrElan + offsetof (EP4_RCVR_RAIL_ELAN, rcvr_pending_head)));
54530 +}
54531 +
54532 +void
54533 +ep4rcvr_fillout_rail_stats(EP_RCVR_RAIL *rcvr_rail, char *str) {
54534 +    /* no stats here yet */
54535 +    /* EP4_RCVR_RAIL * ep4rcvr_rail = (EP4_RCVR_RAIL *) rcvr_rail; */
54536 +}
54537 +
54538 +
54539 +/*
54540 + * Local variables:
54541 + * c-file-style: "stroustrup"
54542 + * End:
54543 + */
54544 diff -urN clean/drivers/net/qsnet/ep/epcommsTx.c linux-2.6.9/drivers/net/qsnet/ep/epcommsTx.c
54545 --- clean/drivers/net/qsnet/ep/epcommsTx.c      1969-12-31 19:00:00.000000000 -0500
54546 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsTx.c        2005-09-02 07:04:02.000000000 -0400
54547 @@ -0,0 +1,919 @@
54548 +/*
54549 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
54550 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
54551 + *
54552 + *    For licensing information please see the supplied COPYING file
54553 + *
54554 + */
54555 +
54556 +#ident "@(#)$Id: epcommsTx.c,v 1.30.2.2 2005/09/02 11:04:02 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
54557 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx.c,v $*/
54558 +
54559 +#include <qsnet/kernel.h>
54560 +
54561 +#include <elan/kcomm.h>
54562 +#include <elan/epsvc.h>
54563 +#include <elan/epcomms.h>
54564 +
54565 +#include "cm.h"
54566 +#include "debug.h"
54567 +
54568 +unsigned int ep_txd_lowat = 5;
54569 +
54570 +static int
54571 +AllocateTxdBlock (EP_XMTR *xmtr, EP_ATTRIBUTE attr, EP_TXD **txdp)
54572 +{
54573 +    EP_TXD_BLOCK *blk;
54574 +    EP_TXD       *txd;
54575 +    EP_TXD_MAIN  *pTxdMain;
54576 +    int                  i;
54577 +    unsigned long flags;
54578 +
54579 +    EPRINTF1 (DBG_XMTR, "AllocateTxdBlock: xmtr=%p\n", xmtr);
54580 +
54581 +    KMEM_ZALLOC (blk, EP_TXD_BLOCK *, sizeof (EP_TXD_BLOCK), ! (attr & EP_NO_SLEEP));
54582 +
54583 +    if (blk == NULL)
54584 +       return -ENOMEM;
54585 +
54586 +    if ((pTxdMain = ep_shared_alloc_main (xmtr->Subsys->Subsys.Sys, EP_TXD_MAIN_SIZE * EP_NUM_TXD_PER_BLOCK, attr, &blk->NmdMain)) == (sdramaddr_t) 0)
54587 +    {
54588 +       KMEM_FREE (blk, sizeof (EP_TXD_BLOCK));
54589 +       return -ENOMEM;
54590 +    }
54591 +
54592 +    for (txd = &blk->Txd[0], i = 0; i < EP_NUM_TXD_PER_BLOCK; i++, txd++)
54593 +    {
54594 +       txd->Xmtr     = xmtr;
54595 +       txd->TxdMain = pTxdMain;
54596 +
54597 +       ep_nmd_subset (&txd->NmdMain, &blk->NmdMain, (i * EP_TXD_MAIN_SIZE), EP_TXD_MAIN_SIZE);
54598 +
54599 +       /* move onto next descriptor */
54600 +       pTxdMain = (EP_TXD_MAIN *) ((unsigned long) pTxdMain + EP_TXD_MAIN_SIZE);
54601 +    }
54602 +
54603 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
54604 +
54605 +    list_add  (&blk->Link, &xmtr->DescBlockList);
54606 +    xmtr->TotalDescCount += EP_NUM_TXD_PER_BLOCK;
54607 +
54608 +    for (i = txdp ? 1 : 0; i < EP_NUM_TXD_PER_BLOCK; i++)
54609 +    {
54610 +       list_add (&blk->Txd[i].Link, &xmtr->FreeDescList);
54611 +
54612 +       xmtr->FreeDescCount++;
54613 +
54614 +       if (xmtr->FreeDescWanted)
54615 +       {
54616 +           xmtr->FreeDescWanted--;
54617 +           kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock);
54618 +       }
54619 +    }
54620 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
54621 +    
54622 +    if (txdp)
54623 +       *txdp = &blk->Txd[0];
54624 +
54625 +    return 0;
54626 +}
54627 +
54628 +static void
54629 +FreeTxdBlock (EP_XMTR *xmtr, EP_TXD_BLOCK *blk)
54630 +{
54631 +    unsigned long flags;
54632 +
54633 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
54634 +    list_del (&blk->Link);
54635 +
54636 +    xmtr->TotalDescCount -= EP_NUM_RXD_PER_BLOCK;
54637 +    xmtr->FreeDescCount -= EP_NUM_RXD_PER_BLOCK;
54638 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
54639 +
54640 +    ep_shared_free_main (xmtr->Subsys->Subsys.Sys, &blk->NmdMain);
54641 +    KMEM_FREE (blk, sizeof (EP_TXD_BLOCK));
54642 +}
54643 +
54644 +static EP_TXD *
54645 +GetTxd (EP_XMTR *xmtr, EP_ATTRIBUTE attr)
54646 +{
54647 +    EP_COMMS_SUBSYS *subsys = xmtr->Subsys;
54648 +    EP_TXD          *txd;
54649 +    int low_on_txds;
54650 +    unsigned long flags;
54651 +
54652 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
54653 +
54654 +    while (list_empty (&xmtr->FreeDescList))
54655 +    {
54656 +       if (! (attr & EP_NO_ALLOC))
54657 +       {
54658 +           spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
54659 +
54660 +           if (AllocateTxdBlock (xmtr, attr, &txd) == ESUCCESS)
54661 +               return (txd);
54662 +
54663 +           spin_lock_irqsave (&xmtr->FreeDescLock, flags);
54664 +       }
54665 +
54666 +       if (attr & EP_NO_SLEEP)
54667 +       {
54668 +           spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
54669 +
54670 +           return (NULL);
54671 +       }
54672 +
54673 +       xmtr->FreeDescWanted++;
54674 +       kcondvar_wait (&xmtr->FreeDescSleep, &xmtr->FreeDescLock, &flags);
54675 +    }
54676 +
54677 +    txd = list_entry (xmtr->FreeDescList.next, EP_TXD, Link);
54678 +
54679 +    list_del (&txd->Link);
54680 +
54681 +    /* Wakeup the descriptor primer thread if there's not many left */
54682 +    low_on_txds = (--xmtr->FreeDescCount < ep_txd_lowat);
54683 +
54684 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
54685 +
54686 +    if (low_on_txds)
54687 +       ep_kthread_schedule (&subsys->Thread, lbolt);
54688 +
54689 +    return (txd);
54690 +}
54691 +
54692 +void
54693 +FreeTxd (EP_XMTR *xmtr, EP_TXD *txd)
54694 +{
54695 +    unsigned long flags;
54696 +
54697 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
54698 +    
54699 +    list_add (&txd->Link, &xmtr->FreeDescList);
54700 +
54701 +    xmtr->FreeDescCount++;
54702 +
54703 +    if (xmtr->FreeDescWanted)                                  /* someone waiting for a receive */
54704 +    {                                                          /* descriptor, so wake them up */
54705 +       xmtr->FreeDescWanted--;
54706 +       kcondvar_wakeupone (&xmtr->FreeDescSleep, &xmtr->FreeDescLock);
54707 +    }
54708 +    
54709 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
54710 +}
54711 +
54712 +int
54713 +TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail)
54714 +{
54715 +    EP_TXD      *txd  = txdRail->Txd;
54716 +    EP_XMTR     *xmtr = txd->Xmtr;
54717 +    EP_ATTRIBUTE attr = txd->Envelope.Attr;
54718 +    int                 stabilise;
54719 +    extern int   txd_stabilise;
54720 +
54721 +    switch (EP_ATTR2TYPE (attr)) 
54722 +    {
54723 +    case EP_TYPE_SVC_INDICATOR:                                /* is the rail in the current service indicator rail mask */
54724 +       if ((txd_stabilise & 4) == 0)
54725 +           return 0;
54726 +
54727 +       stabilise = (ep_xmtr_svc_indicator_railmask (xmtr, EP_ATTR2DATA (attr), txd->NodeId) & EP_RAIL2RAILMASK (rail->Number)) == 0;
54728 +       break;
54729 +
54730 +    case EP_TYPE_TIMEOUT:
54731 +       if ((txd_stabilise & 2) == 0)
54732 +           return 0;
54733 +
54734 +       stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_ATTR2DATA(attr));
54735 +       break;
54736 +
54737 +    default:
54738 +       if ((txd_stabilise & 1) == 0)
54739 +           return 0;
54740 +
54741 +       stabilise = AFTER(lbolt, txdRail->Txd->TimeStamp + EP_DEFAULT_TIMEOUT);
54742 +       break;
54743 +    }
54744 +
54745 +    if (stabilise)
54746 +    {
54747 +       txd->Envelope.Attr = EP_SET_TXD_STABALISING(txd->Envelope.Attr);
54748 +       txd->RetryTime     = lbolt;
54749 +
54750 +       ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);    
54751 +    }
54752 +
54753 +    return stabilise;
54754 +}
54755 +
54756 +void ep_xmtr_txd_stat(EP_XMTR *xmtr, EP_TXD *txd) 
54757 +{
54758 +    int f;
54759 +    unsigned long size;
54760 +    EP_TXD_RAIL *txdRail = txd->TxdRail;
54761 +
54762 +    size = 0;
54763 +    for (f=0; f < txd->Envelope.nFrags; f++)
54764 +       size += txd->Envelope.Frags[f].nmd_len;
54765 +
54766 +    INC_STAT(xmtr->stats,tx);
54767 +    ADD_STAT(xmtr->stats,tx_len, size);  
54768 +    
54769 +    if ((txdRail != NULL) && (txdRail->XmtrRail != NULL)){
54770 +       INC_STAT(txdRail->XmtrRail->stats,tx);
54771 +       ADD_STAT(txdRail->XmtrRail->stats,tx_len, size); 
54772 +       
54773 +       if ((txdRail->XmtrRail->CommsRail != NULL) && ( txdRail->XmtrRail->CommsRail->Rail != NULL)) {
54774 +           INC_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx);
54775 +           ADD_STAT(txdRail->XmtrRail->CommsRail->Rail->Stats,tx_len, size);
54776 +       }
54777 +    }
54778 +}
54779 +
54780 +static int
54781 +PollActiveTransmitList (EP_XMTR *xmtr, int flag)
54782 +{
54783 +    struct list_head *el, *nel;
54784 +    struct list_head list;
54785 +    unsigned long flags;
54786 +    int count;
54787 +
54788 +    INIT_LIST_HEAD (&list);
54789 +
54790 +    spin_lock_irqsave (&xmtr->Lock, flags);
54791 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
54792 +       EP_TXD      *txd     = list_entry (el, EP_TXD, Link);
54793 +       EP_TXD_RAIL *txdRail = txd->TxdRail;
54794 +       
54795 +       if (txdRail == NULL)
54796 +           continue;
54797 +
54798 +       ASSERT (txdRail->Txd == txd);
54799 +       
54800 +       if (EP_XMTR_OP (txdRail->XmtrRail,PollTxd) (txdRail->XmtrRail, txdRail, flag))
54801 +       {
54802 +           list_del (&txd->Link);                              /* remove from active transmit list */
54803 +           list_add_tail (&txd->Link, &list);                  /* and add to list to call handlers */
54804 +       }
54805 +    }
54806 +    
54807 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54808 +
54809 +    for (count = 0; !list_empty (&list); count++)
54810 +    {
54811 +       EP_TXD *txd = list_entry (list.next, EP_TXD, Link);
54812 +
54813 +       list_del (&txd->Link);
54814 +
54815 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
54816 +
54817 +       FreeTxd (xmtr, txd);
54818 +    }
54819 +    return (count);
54820 +}
54821 +
54822 +static inline void
54823 +DoTransmit (EP_XMTR *xmtr, EP_TXD *txd)
54824 +{
54825 +    EP_RAILMASK   nmdRailMask = ep_nmd2railmask (txd->Envelope.Frags, txd->Envelope.nFrags);
54826 +    EP_XMTR_RAIL *xmtrRail;
54827 +    unsigned long flags;
54828 +    int rnum;
54829 +
54830 +    spin_lock_irqsave (&xmtr->Lock, flags);
54831 +
54832 +    if (EP_IS_SVC_INDICATOR(txd->Envelope.Attr))
54833 +       nmdRailMask = nmdRailMask & ep_xmtr_svc_indicator_railmask(xmtr, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId);
54834 +
54835 +    if (EP_IS_PREFRAIL_SET(txd->Envelope.Attr))
54836 +       rnum = EP_ATTR2PREFRAIL(txd->Envelope.Attr);
54837 +    else 
54838 +       rnum = ep_xmtr_prefrail (xmtr, nmdRailMask, txd->NodeId);
54839 +    
54840 +    if (rnum < 0 || !(nmdRailMask & EP_RAIL2RAILMASK(rnum)))
54841 +       xmtrRail = NULL;
54842 +    else
54843 +       xmtrRail = xmtr->Rails[rnum];
54844 +    
54845 +    /* Allocate the XID while holding the xmtr->Lock from our XID cache */
54846 +    txd->Envelope.Xid = ep_xid_cache_alloc (xmtr->Subsys->Subsys.Sys, &xmtr->XidCache);
54847 +    
54848 +    EPRINTF7 (DBG_XMTR, "ep: transmit txd %p to %d/%d: Xid %llx nFrags %d [%08x.%d]\n",
54849 +             txd, txd->NodeId, txd->Service, (long long) txd->Envelope.Xid.Unique, 
54850 +             txd->Envelope.nFrags, txd->Envelope.Frags[0].nmd_addr, txd->Envelope.Frags[0].nmd_len);
54851 +
54852 +    /* Store time transmit started to timeout if not received */
54853 +    txd->TimeStamp = lbolt;
54854 +    
54855 +    /* Initialise the retry backoff */
54856 +    txd->Backoff.type = EP_BACKOFF_FREE;
54857 +
54858 +    list_add_tail (&txd->Link, &xmtr->ActiveDescList);
54859 +
54860 +    if (xmtrRail == NULL || !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE))
54861 +       ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);
54862 +    
54863 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
54864 +
54865 +    if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
54866 +       PollActiveTransmitList (xmtr, POLL_TX_LIST);
54867 +}
54868 +
54869 +EP_STATUS
54870 +ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
54871 +                    EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
54872 +{
54873 +    EP_TXD       *txd;
54874 +    int           i, len;
54875 +
54876 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
54877 +       return (EP_EINVAL);
54878 +
54879 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
54880 +       return (EP_ENOMEM);
54881 +
54882 +    txd->Handler = handler;
54883 +    txd->Arg     = arg;
54884 +    txd->Service = service;
54885 +    txd->NodeId  = (unsigned short) dest;
54886 +
54887 +    /* Initialise the envelope */
54888 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
54889 +    txd->Envelope.Attr      = EP_CLEAR_LOCAL_ATTR(attr);
54890 +    txd->Envelope.Range     = EP_RANGE (dest, dest);
54891 +    txd->Envelope.TxdMain   = txd->NmdMain;
54892 +    txd->Envelope.nFrags    = nFrags;
54893 +
54894 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
54895 +       txd->Envelope.Frags[i] = nmd[i];
54896 +
54897 +    if (payload)
54898 +    {
54899 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
54900 +
54901 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
54902 +    }
54903 +
54904 +    DoTransmit (xmtr, txd);
54905 +
54906 +    BucketStat (xmtr->Subsys, DataXmit, len);
54907 +
54908 +    return (EP_SUCCESS);
54909 +}
54910 +
54911 +EP_STATUS
54912 +ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, EP_SERVICE service, 
54913 +                    EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
54914 +{
54915 +    EP_SYS       *sys = xmtr->Subsys->Subsys.Sys;
54916 +    EP_TXD       *txd;
54917 +    int           nnodes;
54918 +    int           i, len;
54919 +    unsigned long flags;    
54920 +
54921 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
54922 +       return (EP_EINVAL);
54923 +
54924 +    if (destLo == -1) 
54925 +       destLo = sys->Position.pos_nodeid & ~(EP_MAX_NODES-1);
54926 +
54927 +    if (destHi == -1 && (destHi = ((sys->Position.pos_nodeid + EP_MAX_NODES) & ~(EP_MAX_NODES-1)) - 1) >= sys->Position.pos_nodes)
54928 +       destHi = sys->Position.pos_nodes-1;
54929 +
54930 +    nnodes = (destHi-destLo+1);
54931 +
54932 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
54933 +       return (EP_ENOMEM);
54934 +
54935 +    txd->Handler = handler;
54936 +    txd->Arg     = arg;
54937 +    txd->Service = service;
54938 +
54939 +    /* Initialise the envelope */
54940 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
54941 +    txd->Envelope.Attr      = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr));
54942 +    txd->Envelope.Range     = EP_RANGE (destLo, destHi);
54943 +    txd->Envelope.TxdMain   = txd->NmdMain;
54944 +    txd->Envelope.nFrags    = nFrags;
54945 +
54946 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
54947 +       txd->Envelope.Frags[i] = nmd[i];
54948 +
54949 +    if (payload)
54950 +    {
54951 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
54952 +
54953 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
54954 +    }
54955 +
54956 +    spin_lock_irqsave (&sys->NodeLock, flags);
54957 +    if (EP_IS_SVC_INDICATOR(attr)) 
54958 +       ep_xmtr_svc_indicator_bitmap(xmtr, EP_ATTR2DATA(attr), txd->TxdMain->Bitmap, destLo, nnodes);
54959 +    else
54960 +       bt_subset (statemap_tobitmap(sys->NodeSet), txd->TxdMain->Bitmap, destLo, nnodes);
54961 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
54962 +
54963 +    if (bitmap != NULL)                                                                        /* bitmap supplied, so intersect it with */
54964 +       bt_intersect (txd->TxdMain->Bitmap, bitmap, nnodes);                            /* the current node set map */
54965 +    
54966 +    if ((attr & EP_NOT_MYSELF) && destLo <= sys->Position.pos_nodeid && sys->Position.pos_nodeid <= destHi)
54967 +       BT_CLEAR (txd->TxdMain->Bitmap, (sys->Position.pos_nodeid-destLo));                     /* clear myself if not wanted */
54968 +
54969 +    if ((i = bt_lowbit (txd->TxdMain->Bitmap, nnodes)) < 0)
54970 +    {
54971 +       FreeTxd (xmtr, txd);
54972 +       return (EP_NODE_DOWN);
54973 +    }
54974 +
54975 +    txd->NodeId = (unsigned short) i;
54976 +
54977 +    DoTransmit (xmtr, txd);
54978 +
54979 +    BucketStat (xmtr->Subsys, McastXmit, len);
54980 +
54981 +    return (EP_SUCCESS);
54982 +}
54983 +
54984 +EP_STATUS
54985 +ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr,
54986 +                EP_TXH *handler, void *arg, EP_PAYLOAD *payload, EP_NMD *nmd, int nFrags)
54987 +{
54988 +    EP_TXD       *txd;
54989 +    int           i, len;
54990 +
54991 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
54992 +       return (EP_EINVAL);
54993 +
54994 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
54995 +       return (EP_ENOMEM);
54996 +
54997 +    txd->Handler = handler;
54998 +    txd->Arg     = arg;
54999 +    txd->Service = service;
55000 +    txd->NodeId  = dest;
55001 +
55002 +    /* Initialise the envelope */
55003 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
55004 +    txd->Envelope.Attr      = EP_SET_RPC(EP_CLEAR_LOCAL_ATTR(attr));    
55005 +    txd->Envelope.Range     = EP_RANGE (dest, dest);
55006 +    txd->Envelope.TxdMain   = txd->NmdMain;
55007 +    txd->Envelope.nFrags    = nFrags;
55008 +     
55009 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
55010 +       txd->Envelope.Frags[i] = nmd[i];
55011 +
55012 +    if (payload)
55013 +    {
55014 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
55015 +
55016 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
55017 +    }
55018 +
55019 +    DoTransmit (xmtr, txd);
55020 +
55021 +    BucketStat (xmtr->Subsys, RPCXmit, len);
55022 +
55023 +    return (EP_SUCCESS);
55024 +}
55025 +
55026 +EP_STATUS
55027 +ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg,
55028 +                     EP_ENVELOPE *env,  EP_PAYLOAD *payload, bitmap_t *bitmap, EP_NMD *nmd, int nFrags)
55029 +{
55030 +    EP_TXD       *txd;
55031 +    int           i, len;
55032 +
55033 +    if (nFrags > EP_MAXFRAG || service > EP_MSG_NSVC)
55034 +       return (EP_EINVAL);
55035 +
55036 +    if ((txd = GetTxd (xmtr, attr)) == NULL)
55037 +       return (EP_ENOMEM);
55038 +
55039 +    txd->Handler = handler;
55040 +    txd->Arg     = arg;
55041 +    txd->Service = service;
55042 +    txd->NodeId  = (unsigned short) dest;
55043 +
55044 +    /* Initialise the envelope */
55045 +    txd->Envelope.Version   = EP_ENVELOPE_VERSION;
55046 +    txd->Envelope.Attr      = EP_SET_MULTICAST(EP_CLEAR_LOCAL_ATTR(attr));
55047 +    txd->Envelope.Range     = env->Range;
55048 +    txd->Envelope.TxdMain   = txd->NmdMain;
55049 +    txd->Envelope.nFrags    = nFrags;
55050 +
55051 +    for (i = len = 0; i < nFrags; len += nmd[i].nmd_len, i++)
55052 +       txd->Envelope.Frags[i] = nmd[i];
55053 +
55054 +    bt_copy (bitmap, txd->TxdMain->Bitmap, EP_RANGE_HIGH(env->Range) - EP_RANGE_LOW(env->Range) + 1);
55055 +
55056 +    if (payload)
55057 +    {
55058 +       txd->Envelope.Attr = EP_SET_HAS_PAYLOAD(txd->Envelope.Attr);
55059 +
55060 +       bcopy (payload, &txd->Payload, sizeof (EP_PAYLOAD));
55061 +    }
55062 +
55063 +    DoTransmit (xmtr, txd);
55064 +
55065 +    BucketStat (xmtr->Subsys, McastXmit, len);
55066 +
55067 +    return (EP_SUCCESS);
55068 +}
55069 +
55070 +int
55071 +ep_poll_transmits (EP_XMTR *xmtr)
55072 +{
55073 +    return (PollActiveTransmitList (xmtr, POLL_TX_LIST));
55074 +}
55075 +
55076 +int
55077 +ep_enable_txcallbacks (EP_XMTR *xmtr)
55078 +{
55079 +    return (PollActiveTransmitList (xmtr, ENABLE_TX_CALLBACK));
55080 +}
55081 +
55082 +int
55083 +ep_disable_txcallbacks (EP_XMTR *xmtr)
55084 +{
55085 +    return (PollActiveTransmitList (xmtr, DISABLE_TX_CALLBACK));
55086 +}
55087 +
55088 +/* functions for accessing fields of txds */
55089 +int              ep_txd_node(EP_TXD *txd)              { return (txd->NodeId); }
55090 +EP_STATUSBLK    *ep_txd_statusblk(EP_TXD *txd)                 { return (&txd->TxdMain->StatusBlk); }
55091 +
55092 +void
55093 +ep_xmtr_xid_msg_handler (void *arg, EP_MANAGER_MSG *msg)
55094 +{
55095 +    EP_XMTR          *xmtr = (EP_XMTR *) arg;
55096 +    EP_SYS           *sys  = xmtr->Subsys->Subsys.Sys;
55097 +    struct list_head *el,*nel;
55098 +    unsigned long     flags;
55099 +
55100 +    switch (msg->Hdr.Type)
55101 +    {
55102 +    case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST:
55103 +       spin_lock_irqsave (&xmtr->Lock, flags);
55104 +       list_for_each (el, &xmtr->ActiveDescList) {
55105 +           EP_TXD      *txd     = list_entry (el, EP_TXD, Link);
55106 +           EP_TXD_RAIL *txdRail = txd->TxdRail;
55107 +
55108 +           if (txdRail != NULL && EP_XIDS_MATCH (msg->Body.Failover.Xid, txd->Envelope.Xid))
55109 +           {
55110 +               EP_XMTR_RAIL       *xmtrRail = txdRail->XmtrRail;
55111 +               EP_RAIL            *rail     = xmtrRail->CommsRail->Rail;
55112 +               EP_MANAGER_MSG_BODY msgBody;
55113 +               int                 rnum;
55114 +
55115 +               if (! (msg->Body.Failover.Railmask & EP_RAIL2RAILMASK (rail->Number)))
55116 +               {
55117 +                   /* Need to failover this txd to a different rail, select a rail from
55118 +                    * the set that she has asked us to use and which is connected to her
55119 +                    * on this transmitter.   If there are no such rails, then in all probability
55120 +                    * we're offline on all common rails and eventually she will see we have no
55121 +                    * rails in common and abort the receive. */
55122 +                   if ((rnum = ep_xmtr_prefrail (xmtr, msg->Body.Failover.Railmask, txd->NodeId)) < 0)
55123 +                       ep_debugf (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST but can't determine rail (%04x,%04x,%d,%04x)\n",
55124 +                                  rail->Name, msg->Body.Failover.Railmask, xmtr->RailMask, txd->NodeId, sys->Nodes[txd->NodeId].ConnectedRails);
55125 +                   else
55126 +                   {
55127 +                       EP_XMTR_RAIL *nXmtrRail = xmtr->Rails[rnum];
55128 +
55129 +                       EPRINTF4 (DBG_XMTR, "%s: ep_xmtr_xid_msg_handler: FAILOVER_REQUEST txd=%p XID=%llx-> rail %d\n", rail->Name, txd, (long long) txd->Envelope.Xid.Unique, rnum);
55130 +
55131 +                       /* Bind the txd rail onto the new rail - it doesn't matter if we fail
55132 +                        * as it will remain bound to the original rail */
55133 +                       (void) EP_XMTR_OP (nXmtrRail, BindTxd) (txd, nXmtrRail, EP_TXD_PHASE_PASSIVE);
55134 +                   }
55135 +               }
55136 +
55137 +               /* Send a failover response including an envelope update */
55138 +               msgBody.FailoverTxd.Rail     = rail->Number;
55139 +               msgBody.FailoverTxd.Xid      = txd->Envelope.Xid;
55140 +               msgBody.FailoverTxd.TxdRail  = txd->Envelope.TxdRail;
55141 +
55142 +               ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE, msg->Hdr.Xid, &msgBody);
55143 +           }
55144 +       }
55145 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
55146 +       break;
55147 +
55148 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: {
55149 +       int         txd_has_not_sent_envelope = 0;
55150 +       EP_TXD      *txd            = NULL;
55151 +       EP_TXD_RAIL *txdRail        = NULL;
55152 +
55153 +       if (msg->Body.NodeState.NetworkErrorState != 0)
55154 +           ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt + MESSAGE_RETRY_TIME);
55155 +       else
55156 +       {
55157 +           spin_lock_irqsave (&xmtr->Lock, flags);
55158 +           list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
55159 +               
55160 +               txd     = list_entry (el, EP_TXD, Link);
55161 +               txdRail = txd->TxdRail;
55162 +               
55163 +               if (txdRail != NULL && EP_XIDS_MATCH (msg->Hdr.Xid, txd->Envelope.Xid)) {
55164 +                   txd_has_not_sent_envelope = EP_XMTR_OP(txdRail->XmtrRail,CheckTxdState)(txd);
55165 +                   break;
55166 +               }
55167 +           }
55168 +           
55169 +           if (txd_has_not_sent_envelope) {
55170 +               EPRINTF2 (DBG_STABILISE, "ep_xmtr_xid_msg_handler: GET_NODE_STATE_RESPONSE txd=%p XID=%llx not sent envelope\n",
55171 +                         txd, (long long) txd->Envelope.Xid.Unique);
55172 +
55173 +               /* at this point it has finished stabalising */
55174 +               txd->Envelope.Attr = EP_CLEAR_TXD_STABALISING(txd->Envelope.Attr);
55175 +
55176 +               /* store railmask into txd if not a service indicator or timeout */
55177 +               if (EP_IS_NO_TYPE(txd->Envelope.Attr))
55178 +                   txd->Envelope.Attr = EP_SET_DATA(txd->Envelope.Attr, EP_TYPE_RAILMASK, msg->Body.NodeState.Railmask);
55179 +
55180 +               spin_unlock_irqrestore (&xmtr->Lock, flags);
55181 +               
55182 +               /* TXD is now no longer bound to a rail , so let ep_check_xmtr() handle it */
55183 +               ep_kthread_schedule (&xmtr->Subsys->Thread, lbolt);
55184 +           }
55185 +           else
55186 +               spin_unlock_irqrestore (&xmtr->Lock, flags);    
55187 +       }
55188 +       break;
55189 +    }
55190 +    default:
55191 +       panic ("ep_xmtr_xid_msg_handler: XID match but invalid message type\n");
55192 +    }
55193 +}
55194 +
55195 +EP_XMTR *
55196 +ep_alloc_xmtr (EP_SYS *sys)
55197 +{
55198 +    EP_COMMS_SUBSYS   *subsys;
55199 +    EP_XMTR          *xmtr;
55200 +    struct list_head *el;
55201 +
55202 +    if ((subsys = (EP_COMMS_SUBSYS *) ep_subsys_find (sys, EPCOMMS_SUBSYS_NAME)) == NULL)
55203 +       return (NULL);
55204 +
55205 +    KMEM_ZALLOC (xmtr, EP_XMTR *, sizeof (EP_XMTR), 1);
55206 +
55207 +    if (xmtr == NULL)
55208 +       return (NULL);
55209 +    
55210 +    xmtr->Subsys = subsys;
55211 +
55212 +    spin_lock_init (&xmtr->Lock);
55213 +    INIT_LIST_HEAD (&xmtr->ActiveDescList);
55214 +    
55215 +    kcondvar_init (&xmtr->FreeDescSleep);
55216 +    spin_lock_init (&xmtr->FreeDescLock);
55217 +    INIT_LIST_HEAD (&xmtr->FreeDescList);
55218 +    INIT_LIST_HEAD (&xmtr->DescBlockList);
55219 +
55220 +    ep_xid_cache_init (sys, &xmtr->XidCache);
55221 +
55222 +    xmtr->XidCache.MessageHandler = ep_xmtr_xid_msg_handler;
55223 +    xmtr->XidCache.Arg            = xmtr;
55224 +
55225 +    kmutex_lock (&subsys->Lock);
55226 +    list_add_tail (&xmtr->Link, &subsys->Transmitters);
55227 +
55228 +    ep_procfs_xmtr_add(xmtr);
55229 +
55230 +    /* Now add all rails which are already started */
55231 +    list_for_each (el, &subsys->Rails) { 
55232 +       EP_COMMS_RAIL *commsRail = list_entry (el, EP_COMMS_RAIL, Link);
55233 +
55234 +       EP_RAIL_OP(commsRail, Xmtr.AddRail) (xmtr, commsRail);
55235 +    }
55236 +    kmutex_unlock (&subsys->Lock);
55237 +
55238 +    ep_mod_inc_usecount();
55239 +
55240 +    return (xmtr);
55241 +}
55242 +
55243 +void
55244 +ep_free_xmtr (EP_XMTR *xmtr)
55245 +{
55246 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
55247 +    EP_SYS           *sys    = subsys->Subsys.Sys;
55248 +    struct list_head *el;
55249 +    
55250 +    kmutex_lock (&subsys->Lock);
55251 +    list_for_each (el, &subsys->Rails) { 
55252 +       EP_COMMS_RAIL *rail = list_entry (el, EP_COMMS_RAIL, Link);
55253 +
55254 +       EP_RAIL_OP(rail,Xmtr.DelRail) (xmtr, rail);
55255 +    }
55256 +
55257 +    list_del (&xmtr->Link);
55258 +    kmutex_unlock (&subsys->Lock);
55259 +
55260 +    /* all the desc's must be free */
55261 +    ASSERT(xmtr->FreeDescCount == xmtr->TotalDescCount);
55262 +
55263 +    /* delete the descs */
55264 +    while (!list_empty (&xmtr->DescBlockList))
55265 +       FreeTxdBlock( xmtr, list_entry(xmtr->DescBlockList.next, EP_TXD_BLOCK , Link));
55266 +
55267 +    /* they had better all be gone now */
55268 +    ASSERT((xmtr->FreeDescCount == 0) && (xmtr->TotalDescCount == 0));
55269 +
55270 +    ep_procfs_xmtr_del(xmtr);
55271 +
55272 +    ep_xid_cache_destroy (sys, &xmtr->XidCache);
55273 +
55274 +    spin_lock_destroy (&xmtr->Lock);
55275 +    KMEM_FREE (xmtr, sizeof (EP_XMTR));
55276 +
55277 +    ep_mod_dec_usecount();
55278 +}
55279 +
55280 +long
55281 +ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime)
55282 +{
55283 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
55284 +    EP_SYS           *sys    = subsys->Subsys.Sys;
55285 +    struct list_head *el, *nel;
55286 +    struct list_head  txdList;
55287 +    unsigned long       flags;
55288 +    int                 timed_out=0;
55289 +    int                 i;
55290 +    EP_MANAGER_MSG_BODY body;
55291 +
55292 +    INIT_LIST_HEAD (&txdList);
55293 +
55294 +    /* See if we have any txd's which need to be bound to a rail */
55295 +    spin_lock_irqsave (&xmtr->Lock, flags);
55296 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
55297 +       EP_TXD      *txd      = list_entry (el, EP_TXD, Link);
55298 +       EP_NODE     *node     = &sys->Nodes[txd->NodeId];
55299 +       EP_RAILMASK nodeRails = node->ConnectedRails & xmtr->RailMask;
55300 +       EP_ENVELOPE *env      = &txd->Envelope;
55301 +
55302 +       if (EP_IS_TXD_STABALISING(txd->Envelope.Attr)) 
55303 +       {
55304 +           ASSERT(txd->TxdRail != NULL);
55305 +
55306 +           if (AFTER (lbolt, txd->RetryTime))
55307 +           {
55308 +               EPRINTF6 (DBG_STABILISE, "ep_check_xmtr txd=%p txdRail=%p send get node state to %d Xid=%08x.%08x.%016llx\n",
55309 +                         txd, txd->TxdRail, txd->NodeId, env->Xid.Generation, env->Xid.Handle, (long long)env->Xid.Unique);
55310 +               
55311 +               body.Service = txd->Service;
55312 +               if (ep_send_message ( txd->TxdRail->XmtrRail->CommsRail->Rail, txd->NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE, env->Xid, &body) == 0)
55313 +                   txd->RetryTime = lbolt + (MESSAGE_RETRY_TIME << ep_backoff (&txd->Backoff, EP_BACKOFF_STABILISE));
55314 +               else
55315 +                   txd->RetryTime = lbolt + MSGBUSY_RETRY_TIME;
55316 +           }
55317 +
55318 +           ep_kthread_schedule (&subsys->Thread, txd->RetryTime);
55319 +           continue;
55320 +       }
55321 +
55322 +       if (txd->TxdRail != NULL)
55323 +           continue;
55324 +
55325 +       switch (EP_ATTR2TYPE(txd->Envelope.Attr)) 
55326 +       {
55327 +       case EP_TYPE_SVC_INDICATOR: 
55328 +       {
55329 +           EP_RAILMASK       rmask=0;
55330 +           struct list_head *tmp;
55331 +
55332 +           list_for_each (tmp, &subsys->Rails) { 
55333 +               EP_COMMS_RAIL *commsRail = list_entry (tmp, EP_COMMS_RAIL, Link);
55334 +               if ( cm_svc_indicator_is_set(commsRail->Rail, EP_ATTR2DATA(txd->Envelope.Attr), txd->NodeId))
55335 +                   rmask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
55336 +           } 
55337 +           nodeRails &= rmask;
55338 +           break;
55339 +       }
55340 +       case EP_TYPE_TIMEOUT:
55341 +           timed_out = AFTER(lbolt, txd->TimeStamp + EP_ATTR2DATA(txd->Envelope.Attr)) ? (1) : (0);
55342 +           break;
55343 +       case EP_TYPE_RAILMASK:
55344 +           nodeRails &= EP_ATTR2DATA(txd->Envelope.Attr);
55345 +           break;
55346 +       default:
55347 +           timed_out = AFTER(lbolt, txd->TimeStamp +  EP_DEFAULT_TIMEOUT) ? (1) : (0);
55348 +           break;
55349 +       }
55350 +
55351 +       if (nodeRails == 0 || timed_out || (EP_IS_NO_FAILOVER(env->Attr) && EP_IS_PREFRAIL_SET(env->Attr) && 
55352 +                                           (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))) == 0))
55353 +       {
55354 +           EPRINTF5 (timed_out ? DBG_STABILISE : DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx to %d no rails connected or cannot failover (nodeRails=0x%x,timed_out=%d\n", 
55355 +                     txd, (long long) env->Xid.Unique, txd->NodeId, nodeRails, timed_out);
55356 +
55357 +           list_del  (&txd->Link);
55358 +           list_add_tail (&txd->Link, &txdList);
55359 +       }
55360 +       else
55361 +       {
55362 +           EP_XMTR_RAIL *xmtrRail;
55363 +           int i, len, rnum;
55364 +
55365 +           if (EP_IS_PREFRAIL_SET(env->Attr) && (nodeRails & EP_RAIL2RAILMASK(EP_ATTR2PREFRAIL(env->Attr))))
55366 +               rnum = EP_ATTR2PREFRAIL(env->Attr);
55367 +           else
55368 +               rnum = ep_pickRail (nodeRails);
55369 +
55370 +           EPRINTF3 (DBG_XMTR, "ep_check_xmtr: txd=%p XID=%llx mapping NMDs onto rail %d \n", txd, (long long) env->Xid.Unique, rnum);
55371 +
55372 +           for (i = len = 0; i < env->nFrags; i++, len += env->Frags[i].nmd_len)
55373 +               ep_nmd_map_rails (sys, &env->Frags[i], nodeRails);
55374 +
55375 +           if ((xmtrRail = xmtr->Rails[rnum]) == NULL || 
55376 +               !EP_XMTR_OP(xmtrRail,BindTxd) (txd, xmtrRail, EP_TXD_PHASE_ACTIVE))
55377 +               ep_kthread_schedule (&subsys->Thread, lbolt + RESOURCE_RETRY_TIME);
55378 +       }
55379 +    }
55380 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55381 +
55382 +    while (! list_empty (&txdList))
55383 +    {
55384 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
55385 +       list_del (&txd->Link);
55386 +
55387 +       txd->Handler (txd, txd->Arg, EP_NODE_DOWN);
55388 +       FreeTxd (xmtr, txd);
55389 +    }
55390 +
55391 +    /* Check to see if we're low on txds */
55392 +    if (xmtr->FreeDescCount < ep_txd_lowat)
55393 +       AllocateTxdBlock (xmtr, 0, NULL);
55394 +    
55395 +    /* Then check each rail */
55396 +    for (i = 0; i < EP_MAX_RAILS; i++) 
55397 +       if (xmtr->RailMask & (1 << i) ) 
55398 +           nextRunTime = EP_XMTR_OP (xmtr->Rails[i],Check) (xmtr->Rails[i], nextRunTime);
55399 +    return (nextRunTime);
55400 +}
55401 +
55402 +void
55403 +ep_display_txd (DisplayInfo *di, EP_TXD *txd)
55404 +{
55405 +    EP_ENVELOPE *env     = &txd->Envelope;
55406 +    EP_TXD_RAIL *txdRail = txd->TxdRail;
55407 +
55408 +    (di->func)(di->arg, "TXD: %p Version=%x Attr=%x Xid=%08x.%08x.%016llx\n", txd, 
55409 +              env->Version, env->Attr, env->Xid.Generation, env->Xid.Handle, (long long) env->Xid.Unique);
55410 +    (di->func)(di->arg,  "     NodeId=%d Range=%d.%d TxdRail=%x TxdMain=%x.%x.%x nFrags=%d\n",
55411 +              env->NodeId, EP_RANGE_LOW(env->Range), EP_RANGE_HIGH(env->Range), env->TxdRail,
55412 +              env->TxdMain.nmd_addr, env->TxdMain.nmd_len, env->TxdMain.nmd_attr, env->nFrags);
55413 +    (di->func)(di->arg,  "       Frag[0] %08x.%08x.%08x\n", env->Frags[0].nmd_addr, env->Frags[0].nmd_len, env->Frags[0].nmd_attr);
55414 +    (di->func)(di->arg,  "       Frag[1] %08x.%08x.%08x\n", env->Frags[1].nmd_addr, env->Frags[1].nmd_len, env->Frags[1].nmd_attr);
55415 +    (di->func)(di->arg,  "       Frag[2] %08x.%08x.%08x\n", env->Frags[2].nmd_addr, env->Frags[2].nmd_len, env->Frags[2].nmd_attr);
55416 +    (di->func)(di->arg,  "       Frag[3] %08x.%08x.%08x\n", env->Frags[3].nmd_addr, env->Frags[3].nmd_len, env->Frags[3].nmd_attr);
55417 +
55418 +    if (txdRail != NULL) EP_XMTR_OP (txdRail->XmtrRail, DisplayTxd) (di, txdRail);
55419 +}
55420 +
55421 +void
55422 +ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr)
55423 +{
55424 +    int               freeCount   = 0;
55425 +    int               activeCount = 0;
55426 +    struct list_head *el;
55427 +    int               i;
55428 +    unsigned long     flags;
55429 +
55430 +    spin_lock_irqsave (&xmtr->FreeDescLock, flags);
55431 +    list_for_each (el, &xmtr->FreeDescList)
55432 +       freeCount++;
55433 +    spin_unlock_irqrestore (&xmtr->FreeDescLock, flags);
55434 +
55435 +    spin_lock_irqsave (&xmtr->Lock, flags);
55436 +    list_for_each (el, &xmtr->ActiveDescList)
55437 +       activeCount++;
55438 +    
55439 +    (di->func)(di->arg, "ep_display_xmtr: xmtr=%p Free=%d Active=%d\n", xmtr, freeCount, activeCount);
55440 +    for (i = 0; i < EP_MAX_RAILS; i++)
55441 +       if (xmtr->Rails[i]) EP_XMTR_OP (xmtr->Rails[i], DisplayXmtr) (di, xmtr->Rails[i]);
55442 +
55443 +    list_for_each (el,&xmtr->ActiveDescList)
55444 +       ep_display_txd (di, list_entry (el, EP_TXD, Link));
55445 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55446 +}
55447 +
55448 +void
55449 +ep_xmtr_fillout_stats(EP_XMTR *xmtr, char *str)
55450 +{
55451 +    sprintf(str+strlen(str),"Tx     %lu  %lu /sec\n",       GET_STAT_TOTAL(xmtr->stats,tx),      GET_STAT_PER_SEC(xmtr->stats,tx) );
55452 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr->stats,tx_len) / (1024*1024),  GET_STAT_PER_SEC(xmtr->stats,tx_len) / (1024*1024));
55453 +}
55454 +
55455 +void
55456 +ep_xmtr_rail_fillout_stats(EP_XMTR_RAIL *xmtr_rail, char *str)
55457 +{
55458 +    sprintf(str+strlen(str),"Tx     %lu  %lu /sec\n",       GET_STAT_TOTAL(xmtr_rail->stats,tx),      GET_STAT_PER_SEC(xmtr_rail->stats,tx) );
55459 +    sprintf(str+strlen(str),"MBytes %lu  %lu Mbytes/sec\n", GET_STAT_TOTAL(xmtr_rail->stats,tx_len) / (1024*1024),  GET_STAT_PER_SEC(xmtr_rail->stats,tx_len) / (1024*1024));
55460 +}
55461 +
55462 +/*
55463 + * Local variables:
55464 + * c-file-style: "stroustrup"
55465 + * End:
55466 + */
55467 diff -urN clean/drivers/net/qsnet/ep/epcommsTx_elan3.c linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan3.c
55468 --- clean/drivers/net/qsnet/ep/epcommsTx_elan3.c        1969-12-31 19:00:00.000000000 -0500
55469 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan3.c  2004-11-12 05:55:03.000000000 -0500
55470 @@ -0,0 +1,1173 @@
55471 +/*
55472 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
55473 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
55474 + *
55475 + *    For licensing information please see the supplied COPYING file
55476 + *
55477 + */
55478 +
55479 +#ident "@(#)$Id: epcommsTx_elan3.c,v 1.19 2004/11/12 10:55:03 mike Exp $"
55480 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx_elan3.c,v $ */
55481 +
55482 +#include <qsnet/kernel.h>
55483 +
55484 +#include <elan/kcomm.h>
55485 +#include <elan/epsvc.h>
55486 +#include <elan/epcomms.h>
55487 +
55488 +#include "kcomm_vp.h"
55489 +#include "kcomm_elan3.h"
55490 +#include "epcomms_elan3.h"
55491 +#include "debug.h"
55492 +
55493 +#define XMTR_TO_RAIL(xmtrRail)         ((EP3_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail)
55494 +#define XMTR_TO_DEV(xmtrRail)          (XMTR_TO_RAIL(xmtrRail)->Device)
55495 +#define XMTR_TO_SUBSYS(xmtrRail)       (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys)
55496 +
55497 +static void TxEnveEvent (EP3_RAIL *rail, void *arg);
55498 +static void TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
55499 +static void TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
55500 +
55501 +static EP3_COOKIE_OPS EnveCookieOps =
55502 +{
55503 +    TxEnveEvent,
55504 +    TxEnveRetry,
55505 +    NULL, /* DmaCancelled */
55506 +    TxEnveVerify
55507 +};
55508 +
55509 +static void TxDataEvent (EP3_RAIL *rail, void *arg);
55510 +static void TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status);
55511 +static void TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);
55512 +
55513 +static EP3_COOKIE_OPS DataCookieOps =
55514 +{
55515 +    TxDataEvent,
55516 +    TxDataRetry,
55517 +    NULL, /* DmaCancelled */
55518 +    TxDataVerify
55519 +};
55520 +
55521 +static void TxDoneEvent (EP3_RAIL *dev, void *arg);
55522 +static void TxDoneRetry (EP3_RAIL *dev, void *arg, E3_DMA_BE *dma, int status);
55523 +static void TxDoneVerify (EP3_RAIL  *dev, void *arg, E3_DMA_BE *dma);
55524 +
55525 +static EP3_COOKIE_OPS DoneCookieOps = 
55526 +{
55527 +    TxDoneEvent,
55528 +    TxDoneRetry,
55529 +    NULL, /* DmaCancelled */
55530 +    TxDoneVerify,
55531 +} ;
55532 +
55533 +static int
55534 +AllocateTxdRailBlock (EP3_XMTR_RAIL *xmtrRail)
55535 +{
55536 +    EP3_RAIL          *rail = XMTR_TO_RAIL (xmtrRail);
55537 +    ELAN3_DEV         *dev = rail->Device;
55538 +    EP3_TXD_RAIL_BLOCK *blk;
55539 +    EP3_TXD_RAIL       *txdRail;
55540 +    sdramaddr_t        pTxdElan;
55541 +    EP3_TXD_RAIL_MAIN  *pTxdMain;
55542 +    E3_Addr           pTxdElanAddr;
55543 +    E3_Addr           pTxdMainAddr;
55544 +    E3_BlockCopyEvent  event;
55545 +    int                       i;
55546 +    unsigned long      flags;
55547 +
55548 +    KMEM_ZALLOC (blk, EP3_TXD_RAIL_BLOCK *, sizeof (EP3_TXD_RAIL_BLOCK), 1);
55549 +
55550 +    if (blk == NULL)
55551 +       return 0;
55552 +
55553 +    if ((pTxdElan = ep_alloc_elan (&rail->Generic, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdElanAddr)) == (sdramaddr_t) 0)
55554 +    {
55555 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
55556 +       return 0;
55557 +    }
55558 +
55559 +    if ((pTxdMain = ep_alloc_main (&rail->Generic, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK, 0, &pTxdMainAddr)) == (EP3_TXD_RAIL_MAIN *) NULL)
55560 +    {
55561 +       ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
55562 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
55563 +       return 0;
55564 +    }
55565 +    
55566 +    if (ReserveDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK, 0) != ESUCCESS)
55567 +    {
55568 +       ep_free_main (&rail->Generic, pTxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK);
55569 +       ep_free_elan (&rail->Generic, pTxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
55570 +       KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
55571 +       return 0;
55572 +    }
55573 +
55574 +    for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++)
55575 +    {
55576 +       txdRail->Generic.XmtrRail = &xmtrRail->Generic;
55577 +       txdRail->TxdElan          = pTxdElan;
55578 +       txdRail->TxdElanAddr      = pTxdElanAddr;
55579 +       txdRail->TxdMain          = pTxdMain;
55580 +       txdRail->TxdMainAddr      = pTxdMainAddr;
55581 +
55582 +       RegisterCookie (&rail->CookieTable, &txdRail->EnveCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), &EnveCookieOps, (void *) txdRail);
55583 +       RegisterCookie (&rail->CookieTable, &txdRail->DataCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), &DataCookieOps, (void *) txdRail);
55584 +       RegisterCookie (&rail->CookieTable, &txdRail->DoneCookie, pTxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), &DoneCookieOps, (void *) txdRail);
55585 +
55586 +       EP3_INIT_COPY_EVENT (event, txdRail->EnveCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, EnveEvent), 0);
55587 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent), sizeof (E3_BlockCopyEvent));
55588 +
55589 +       EP3_INIT_COPY_EVENT (event, txdRail->DataCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DataEvent), 0);
55590 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), sizeof (E3_BlockCopyEvent));
55591 +
55592 +       EP3_INIT_COPY_EVENT (event, txdRail->DoneCookie, pTxdMainAddr + offsetof (EP3_TXD_RAIL_MAIN, DoneEvent), 0);
55593 +       elan3_sdram_copyl_to_sdram (dev, &event, pTxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), sizeof (E3_BlockCopyEvent));
55594 +       
55595 +       pTxdMain->EnveEvent = EP3_EVENT_FREE;
55596 +       pTxdMain->DataEvent = EP3_EVENT_FREE;
55597 +       pTxdMain->DoneEvent = EP3_EVENT_FREE;
55598 +
55599 +       /* move onto next descriptor */
55600 +       pTxdElan     += EP3_TXD_RAIL_ELAN_SIZE;
55601 +       pTxdElanAddr += EP3_TXD_RAIL_ELAN_SIZE;
55602 +       pTxdMain      = (EP3_TXD_RAIL_MAIN *) ((unsigned long) pTxdMain + EP3_TXD_RAIL_MAIN_SIZE);
55603 +       pTxdMainAddr += EP3_TXD_RAIL_MAIN_SIZE;
55604 +    }
55605 +
55606 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
55607 +
55608 +    list_add  (&blk->Link, &xmtrRail->DescBlockList);
55609 +    xmtrRail->TotalDescCount += EP3_NUM_TXD_PER_BLOCK;
55610 +    xmtrRail->FreeDescCount  += EP3_NUM_TXD_PER_BLOCK;
55611 +
55612 +    for (i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++)
55613 +       list_add (&blk->Txd[i].Generic.Link, &xmtrRail->FreeDescList);
55614 +
55615 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
55616 +    
55617 +    return 1;
55618 +}
55619 +
55620 +static void
55621 +FreeTxdRailBlock (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL_BLOCK *blk)
55622 +{
55623 +    EP3_RAIL     *rail = XMTR_TO_RAIL(xmtrRail);
55624 +    EP3_TXD_RAIL *txdRail;
55625 +    unsigned long flags;
55626 +    int i;
55627 +
55628 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
55629 +
55630 +    list_del (&blk->Link);
55631 +    
55632 +    xmtrRail->TotalDescCount -= EP3_NUM_TXD_PER_BLOCK;
55633 +    
55634 +    for (txdRail = &blk->Txd[0], i = 0; i < EP3_NUM_TXD_PER_BLOCK; i++, txdRail++)
55635 +    {
55636 +       xmtrRail->FreeDescCount--;
55637 +       
55638 +       list_del (&txdRail->Generic.Link);
55639 +       
55640 +       DeregisterCookie (&rail->CookieTable, &txdRail->EnveCookie);
55641 +       DeregisterCookie (&rail->CookieTable, &txdRail->DataCookie);
55642 +       DeregisterCookie (&rail->CookieTable, &txdRail->DoneCookie);
55643 +    }
55644 +
55645 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
55646 +
55647 +    ReleaseDmaRetries (rail, EP3_NUM_TXD_PER_BLOCK);
55648 +
55649 +    ep_free_main (&rail->Generic, blk->Txd[0].TxdMainAddr, EP3_TXD_RAIL_MAIN_SIZE * EP3_NUM_TXD_PER_BLOCK);
55650 +    ep_free_elan (&rail->Generic, blk->Txd[0].TxdElanAddr, EP3_TXD_RAIL_ELAN_SIZE * EP3_NUM_TXD_PER_BLOCK);
55651 +    KMEM_FREE (blk, sizeof (EP3_TXD_RAIL_BLOCK));
55652 +}
55653 +
55654 +static EP3_TXD_RAIL *
55655 +GetTxdRail (EP3_XMTR_RAIL *xmtrRail)
55656 +{
55657 +    EP_COMMS_SUBSYS  *subsys = xmtrRail->Generic.Xmtr->Subsys;
55658 +    EP3_TXD_RAIL     *txdRail;
55659 +    int low_on_txds;
55660 +    unsigned long flags;
55661 +
55662 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
55663 +
55664 +    if (list_empty (&xmtrRail->FreeDescList))
55665 +       txdRail = NULL;
55666 +    else
55667 +    {
55668 +       txdRail = list_entry (xmtrRail->FreeDescList.next, EP3_TXD_RAIL, Generic.Link);
55669 +
55670 +#if defined(DEBUG)
55671 +       {
55672 +           EP_RAIL   *rail = xmtrRail->Generic.CommsRail->Rail;
55673 +           ELAN3_DEV *dev  = ((EP3_RAIL *) rail)->Device;
55674 +           
55675 +           EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_FREE);
55676 +           EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_FREE);
55677 +           EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_FREE);
55678 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
55679 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
55680 +           EP_ASSERT (rail, SDRAM_ASSERT(elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
55681 +       }
55682 +#endif
55683 +       
55684 +       list_del (&txdRail->Generic.Link);
55685 +
55686 +       xmtrRail->FreeDescCount--;
55687 +    }
55688 +    /* Wakeup the descriptor primer thread if there's not many left */
55689 +    low_on_txds = (xmtrRail->FreeDescCount < ep_txd_lowat);
55690 +
55691 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
55692 +
55693 +    if (low_on_txds)
55694 +       ep_kthread_schedule (&subsys->Thread, lbolt);
55695 +
55696 +    return (txdRail);
55697 +}
55698 +
55699 +static void
55700 +FreeTxdRail (EP3_XMTR_RAIL *xmtrRail, EP3_TXD_RAIL *txdRail)
55701 +{
55702 +    unsigned long flags;
55703 +
55704 +#if defined(DEBUG_ASSERT)
55705 +    {
55706 +       EP_RAIL   *rail = xmtrRail->Generic.CommsRail->Rail;
55707 +       ELAN3_DEV *dev  = ((EP3_RAIL *) rail)->Device;
55708 +
55709 +       EP_ASSERT (rail, txdRail->Generic.XmtrRail == &xmtrRail->Generic);
55710 +       
55711 +       EP_ASSERT (rail, txdRail->TxdMain->EnveEvent == EP3_EVENT_PRIVATE);
55712 +       EP_ASSERT (rail, txdRail->TxdMain->DataEvent == EP3_EVENT_PRIVATE);
55713 +       EP_ASSERT (rail, txdRail->TxdMain->DoneEvent == EP3_EVENT_PRIVATE);
55714 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
55715 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
55716 +       EP_ASSERT (rail, SDRAM_ASSERT (elan3_sdram_readl (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)) == 0));
55717 +       
55718 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_FREE;
55719 +       txdRail->TxdMain->DataEvent = EP3_EVENT_FREE;
55720 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_FREE;
55721 +    }
55722 +#endif
55723 +
55724 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
55725 +    
55726 +    list_add (&txdRail->Generic.Link, &xmtrRail->FreeDescList);
55727 +
55728 +    xmtrRail->FreeDescCount++;
55729 +
55730 +    if (xmtrRail->FreeDescWaiting)
55731 +    {
55732 +       xmtrRail->FreeDescWaiting--;
55733 +       kcondvar_wakeupall (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock);
55734 +    }
55735 +
55736 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
55737 +}
55738 +
55739 +static void
55740 +BindTxdToRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
55741 +{
55742 +    ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock));
55743 +
55744 +    EPRINTF6 (DBG_XMTR, "%s: BindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
55745 +             XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, 
55746 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique);
55747 +
55748 +    txd->TxdRail = &txdRail->Generic;
55749 +    txdRail->Generic.Txd = txd;
55750 +}
55751 +
55752 +static void
55753 +UnbindTxdFromRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
55754 +{
55755 +    ASSERT (SPINLOCK_HELD (&txd->Xmtr->Lock));
55756 +    ASSERT (txd->TxdRail == &txdRail->Generic && txdRail->Generic.Txd == txd);
55757 +
55758 +    EPRINTF6 (DBG_XMTR, "%s: UnbindTxdToRail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
55759 +             XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail, 
55760 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long) txd->Envelope.Xid.Unique);
55761 +    txd->TxdRail = NULL;
55762 +    txdRail->Generic.Txd = NULL; 
55763 +}
55764 +
55765 +/*
55766 + * TxEnveEvent: arg == EP_TXD
55767 + *    Called when envelope delivered
55768 + */
55769 +static void
55770 +TxEnveEvent (EP3_RAIL *rail, void *arg)
55771 +{
55772 +    panic ("TxEnveEvent");
55773 +}
55774 +
55775 +/*
55776 + * TxEnveRetry: arg == EP3_TXD_RAIL
55777 + *    Called on retry of dma of large message envelope.
55778 + */
55779 +static void
55780 +TxEnveRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
55781 +{
55782 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) arg;
55783 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
55784 +    
55785 +    EPRINTF3 (DBG_XMTR, "%s: TxEnveRetry: xmtr %p txd %p\n",  rail->Generic.Name, xmtrRail, txdRail);
55786 +    
55787 +    EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE);
55788 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */
55789 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId);
55790 +
55791 +    if (! TxdShouldStabalise (&txdRail->Generic, &rail->Generic))
55792 +       QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_ENVELOPE));
55793 +    else
55794 +       QueueDmaForRetry (rail, dma, EP_RETRY_STABALISING);     /* place dma on stabilising list for neterr fixup */
55795 +}
55796 +
55797 +static void
55798 +TxEnveVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
55799 +{
55800 +    EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg;
55801 +    
55802 +    EP_ASSERT (&rail->Generic, txdRail->TxdMain->EnveEvent == EP3_EVENT_ACTIVE);
55803 +    EP_ASSERT (&rail->Generic, SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 1)); /* PCI read */
55804 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txdRail->Generic.Txd->NodeId);
55805 +}
55806 +
55807 +/*
55808 + * TxDataEvent: arg == EP3_TXD
55809 + *    Called on completion of a large transmit.
55810 + */
55811 +static void
55812 +TxDataEvent (EP3_RAIL *rail, void *arg)
55813 +{
55814 +    EP3_TXD_RAIL      *txdRail  = (EP3_TXD_RAIL *) arg;
55815 +    EP3_XMTR_RAIL     *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
55816 +    EP_XMTR          *xmtr     = xmtrRail->Generic.Xmtr;
55817 +    EP3_TXD_RAIL_MAIN *txdMain  = txdRail->TxdMain;
55818 +    sdramaddr_t        txdElan  = txdRail->TxdElan;
55819 +    int                delay    = 1;
55820 +    EP_TXD            *txd;
55821 +    unsigned long      flags;
55822 +
55823 +    spin_lock_irqsave (&xmtr->Lock, flags);
55824 +    for (;;)
55825 +    {
55826 +       if (EP3_EVENT_FIRED (txdRail->DataCookie, txdMain->DataEvent))
55827 +           break;
55828 +
55829 +       if (EP3_EVENT_FIRING (rail->Device, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdMain->DataEvent))                /* PCI read */
55830 +       {
55831 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
55832 +               panic ("TxDataEvent: events set but block copy not completed\n");
55833 +           DELAY(delay);
55834 +           delay <<= 1;
55835 +       }
55836 +       else
55837 +       {
55838 +           EPRINTF3 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p previously collecting by polling\n", 
55839 +                     rail->Generic.Name, xmtrRail, txdRail);
55840 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
55841 +           return;
55842 +       }
55843 +       mb();
55844 +    }
55845 +
55846 +    if ((txd = txdRail->Generic.Txd) == NULL ||                        /* If there is no txd, or if the descriptor is marked */
55847 +       !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr)) ||       /* as no interrupt, or been reused as an RPC, */
55848 +       (EP_IS_RPC(txd->Envelope.Attr)))                        /* then we were either called as a result of a previous */
55849 +    {                                                          /* tx which was completed by polling or as a result */
55850 +       spin_unlock_irqrestore (&xmtr->Lock, flags);            /* of a EnableTxCallBack/DisableTxCallback */
55851 +
55852 +       EPRINTF4 (DBG_XMTR, "%s: TxDataEvent: xmtr %p txd %p recyled (%x)\n", 
55853 +                 rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0);
55854 +       return;
55855 +    }
55856 +
55857 +    ASSERT (EP3_EVENT_FIRED (txdRail->EnveCookie, txdMain->EnveEvent));
55858 +
55859 +    EPRINTF5 (DBG_XMTR, "%s: TxDataEvent : xmtrRail=%p txdRail=%p tx=%p XID=%llx\n", 
55860 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
55861 +    
55862 +    ep_xmtr_txd_stat(xmtr,txd);
55863 +    
55864 +    /* remove from active transmit lists */
55865 +    list_del (&txd->Link);
55866 +
55867 +    UnbindTxdFromRail (txd, txdRail);
55868 +    
55869 +    /* clear the done flags for next time round */
55870 +    txdMain->EnveEvent = EP3_EVENT_PRIVATE;
55871 +    txdMain->DataEvent = EP3_EVENT_PRIVATE;
55872 +    txdMain->DoneEvent = EP3_EVENT_PRIVATE;
55873 +    
55874 +    FreeTxdRail (xmtrRail, txdRail);
55875 +
55876 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55877 +    
55878 +    txd->Handler (txd, txd->Arg, EP_SUCCESS);
55879 +    
55880 +    FreeTxd (xmtr, txd);
55881 +}
55882 +
55883 +/*
55884 + * TxDataRetry: arg == EP3_TXD
55885 + *    Called on retry of remote "put" dma of large transmit data.
55886 + */
55887 +static void
55888 +TxDataRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
55889 +{
55890 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) arg;
55891 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
55892 +    EP_TXD        *txd      = txdRail->Generic.Txd;
55893 +
55894 +    EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && 
55895 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) ||  /* PCI read */
55896 +                               (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
55897 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0))));  /* PCI read */
55898 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId);
55899 +
55900 +    EPRINTF5 (DBG_XMTR, "%s: TxDataRetry: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", 
55901 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
55902 +    
55903 +    QueueDmaForRetry (rail, dma, EP_RETRY_LOW_PRI_RETRY + ep_backoff (&txdRail->Backoff, EP_BACKOFF_DATA));
55904 +}
55905 +
55906 +static void
55907 +TxDataVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
55908 +{
55909 +    EP3_TXD_RAIL *txdRail = (EP3_TXD_RAIL *) arg;
55910 +    EP_TXD       *txd     = txdRail->Generic.Txd;
55911 +
55912 +    EP_ASSERT (&rail->Generic, ((txdRail->TxdMain->DataEvent == EP3_EVENT_ACTIVE && 
55913 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) >= 1)) ||  /* PCI read */
55914 +                               (EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
55915 +                                SDRAM_ASSERT (elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0))));  /* PCI read */
55916 +    EP_ASSERT (&rail->Generic, dma->s.dma_direction == DMA_WRITE && EP_VP_TO_NODE(dma->s.dma_destVProc) == txd->NodeId);
55917 +}
55918 +
55919 +/*
55920 + * TxDoneEvent: arg == EP3_TXD
55921 + *    Called on completion of a RPC.
55922 + */
55923 +static void
55924 +TxDoneEvent (EP3_RAIL *rail, void *arg)
55925 +{
55926 +    EP3_TXD_RAIL      *txdRail  = (EP3_TXD_RAIL *) arg;
55927 +    EP3_XMTR_RAIL     *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
55928 +    EP_XMTR          *xmtr     = xmtrRail->Generic.Xmtr;
55929 +    int                delay   = 1;
55930 +    EP_TXD           *txd;
55931 +    unsigned long      flags;
55932 +
55933 +    spin_lock_irqsave (&xmtr->Lock, flags);
55934 +
55935 +    for (;;)
55936 +    {
55937 +       if (EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && 
55938 +           EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
55939 +           break;
55940 +       
55941 +       if (EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent), txdRail->DoneCookie, txdRail->TxdMain->DoneEvent) && 
55942 +           EP3_EVENT_FIRING (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent), txdRail->DataCookie, txdRail->TxdMain->DataEvent))
55943 +       {
55944 +           if (delay > EP3_EVENT_FIRING_TLIMIT)
55945 +               panic ("TxDoneEvent: events set but block copy not completed\n");
55946 +           DELAY(delay);
55947 +           delay <<= 1;
55948 +       }
55949 +       else
55950 +       {
55951 +           EPRINTF3 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txdRail %p previously collecting by polling\n", 
55952 +                     rail->Generic.Name, xmtr, txdRail);
55953 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
55954 +           return;
55955 +       }
55956 +       mb();
55957 +    }
55958 +
55959 +    if ((txd = txdRail->Generic.Txd) == NULL ||                                                 /* If there is no txd, or if the descriptor is marked */
55960 +       !(EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr) || EP_IS_RPC(txd->Envelope.Attr))) /* marked as no interrupt, or been reused as an transmit, */
55961 +    {                                                                                   /* then we were either called as a result of a previous */
55962 +       spin_unlock_irqrestore (&xmtr->Lock, flags);                                     /* tx which was completed by polling or as a result */
55963 +                                                                                        /* of a EnableTxCallBack/DisableTxCallback */
55964 +
55965 +       EPRINTF4 (DBG_XMTR, "%s: TxDoneEvent: xmtr %p txd %p recyled (%x)\n", 
55966 +                 rail->Generic.Name, xmtr, txd, txd ? txd->Envelope.Attr : 0);
55967 +       return; 
55968 +    }
55969 +
55970 +    EPRINTF5 (DBG_XMTR, "%s: TxDoneEvent: xmtrRail=%p txdRail=%p txd=%p XID=%llx\n", 
55971 +             rail->Generic.Name, xmtrRail, txdRail, txd, (long long) txd->Envelope.Xid.Unique);
55972 +
55973 +    ep_xmtr_txd_stat(xmtr,txd);
55974 +
55975 +    /* remove from active transmit list */
55976 +    list_del (&txd->Link);
55977 +    
55978 +    UnbindTxdFromRail (txd, txdRail);
55979 +    
55980 +    /* clear the done flags for next time round */
55981 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
55982 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
55983 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
55984 +    
55985 +    FreeTxdRail (xmtrRail, txdRail);
55986 +
55987 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
55988 +           
55989 +    if (txd->Handler)
55990 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
55991 +       
55992 +    FreeTxd (xmtr, txd);
55993 +}
55994 +
55995 +/*
55996 + * TxDoneRetry: arg == EP3_TXD
55997 + */
55998 +static void
55999 +TxDoneRetry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int status)
56000 +{
56001 +    panic ("TxDoneRetry");
56002 +}
56003 +
56004 +static void
56005 +TxDoneVerify (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma)
56006 +{
56007 +    panic ("TxDoneVerify");
56008 +}
56009 +
56010 +static void
56011 +EnableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
56012 +{
56013 +    ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device;
56014 +
56015 +    EPRINTF3 (DBG_XMTR, "%s: EnableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail);
56016 +
56017 +    txd->Envelope.Attr = EP_SET_INTERRUPT_ENABLED(txd->Envelope.Attr);
56018 +               
56019 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY);
56020 +       
56021 +    if (EP_IS_RPC(txd->Envelope.Attr))
56022 +    {
56023 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY);
56024 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type),  EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DoneCookie.Cookie);
56025 +    }
56026 +    else
56027 +    {
56028 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY | EV_TYPE_EVIRQ | txdRail->DataCookie.Cookie);
56029 +       elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY);
56030 +    }
56031 +}
56032 +
56033 +static void
56034 +DisableTransmitCallback (EP_TXD *txd, EP3_TXD_RAIL *txdRail)
56035 +{
56036 +    ELAN3_DEV *dev = XMTR_TO_RAIL(txdRail->Generic.XmtrRail)->Device;
56037 +
56038 +    EPRINTF3 (DBG_XMTR, "%s: DisableTransmitCallback: txd %p txdRail %p\n", XMTR_TO_RAIL (txdRail->Generic.XmtrRail)->Generic.Name, txd, txdRail);
56039 +
56040 +    txd->Envelope.Attr = EP_CLEAR_INTERRUPT_ENABLED(txd->Envelope.Attr);
56041 +
56042 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type), EV_TYPE_BCOPY);
56043 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type), EV_TYPE_BCOPY);
56044 +    elan3_sdram_writel (dev, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type), EV_TYPE_BCOPY);
56045 +}
56046 +
56047 +static void
56048 +InitialiseTxdRail (EP_TXD *txd, EP3_TXD_RAIL *txdRail, int phase)
56049 +{
56050 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
56051 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
56052 +
56053 +    /* Flush the Elan TLB if mappings have changed */
56054 +    ep_perrail_dvma_sync (&rail->Generic);
56055 +
56056 +    /* Initialise the per-rail fields in the envelope */
56057 +    txd->Envelope.TxdRail = txdRail->TxdElanAddr;
56058 +    txd->Envelope.NodeId  = rail->Generic.Position.pos_nodeid;
56059 +
56060 +    /* Initialise the dma backoff */
56061 +    txdRail->Backoff.type = EP_BACKOFF_FREE;
56062 +
56063 +    /* Initialise the per-rail events */
56064 +    switch (phase)
56065 +    {
56066 +    case EP_TXD_PHASE_ACTIVE:
56067 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 1);
56068 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 
56069 +                           (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + (EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0));
56070 +       
56071 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_ACTIVE;
56072 +       txdRail->TxdMain->DataEvent = EP3_EVENT_ACTIVE;
56073 +       break;
56074 +       
56075 +    case EP_TXD_PHASE_PASSIVE:
56076 +       ASSERT (EP_IS_RPC(txd->Envelope.Attr));
56077 +
56078 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);
56079 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);
56080 +
56081 +       txdRail->TxdMain->EnveEvent = txdRail->EnveCookie.Cookie;
56082 +       txdRail->TxdMain->DataEvent = txdRail->DataCookie.Cookie;
56083 +       break;
56084 +    }
56085 +
56086 +    if (! EP_IS_RPC(txd->Envelope.Attr))
56087 +       txdRail->TxdMain->DoneEvent = txdRail->DoneCookie.Cookie;
56088 +    else
56089 +    {
56090 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 1);
56091 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_ACTIVE;
56092 +    }
56093 +
56094 +    if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
56095 +       DisableTransmitCallback (txd, txdRail);
56096 +    else
56097 +       EnableTransmitCallback (txd, txdRail);
56098 +
56099 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
56100 +    if ( epdebug_check_sum ) 
56101 +       txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags);
56102 +    else
56103 +#endif
56104 +       txd->Envelope.CheckSum = 0;  
56105 +
56106 +    /* copy the envelope and payload if present down to sdram */
56107 +    elan3_sdram_copyl_to_sdram (rail->Device, &txd->Envelope, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Envelope), EP_ENVELOPE_SIZE);
56108 +    
56109 +    if (EP_HAS_PAYLOAD(txd->Envelope.Attr))
56110 +       elan3_sdram_copyl_to_sdram (rail->Device, &txd->Payload, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, Payload), EP_PAYLOAD_SIZE);
56111 +}
56112 +
56113 +void
56114 +ep3xmtr_flush_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
56115 +{
56116 +    EP3_RAIL *rail = XMTR_TO_RAIL (xmtrRail);
56117 +    struct list_head *el;
56118 +    unsigned long flags;
56119 +
56120 +    switch (rail->Generic.CallbackStep)
56121 +    {
56122 +    case EP_CB_FLUSH_FILTERING:
56123 +       /* only need to acquire/release the Lock to ensure that
56124 +        * the node state transition has been noticed. */
56125 +       spin_lock_irqsave (&xmtr->Lock, flags);
56126 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
56127 +       break;
56128 +
56129 +    case EP_CB_FLUSH_FLUSHING:
56130 +       spin_lock_irqsave (&xmtr->Lock, flags);
56131 +       
56132 +       list_for_each (el, &xmtr->ActiveDescList) {
56133 +           EP_TXD       *txd      = list_entry (el, EP_TXD, Link);
56134 +           EP3_TXD_RAIL *txdRail  = (EP3_TXD_RAIL *) txd->TxdRail;
56135 +           EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
56136 +           
56137 +           if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
56138 +               continue;
56139 +           
56140 +           if (EP_IS_RPC(txd->Envelope.Attr))
56141 +           {
56142 +               if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
56143 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
56144 +               else if (! EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
56145 +                   nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
56146 +           }
56147 +           else
56148 +           {
56149 +               if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
56150 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
56151 +           }
56152 +       }
56153 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
56154 +       break;
56155 +
56156 +    default:
56157 +       panic ("ep3xmtr_flush_callback: invalid callback step\n");
56158 +       break;
56159 +    }
56160 +}
56161 +
56162 +void
56163 +ep3xmtr_failover_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
56164 +{
56165 +    EP3_RAIL         *rail   = XMTR_TO_RAIL (xmtrRail);
56166 +    struct list_head  txdList;
56167 +    struct list_head *el, *nel;
56168 +    unsigned long flags;
56169 +#ifdef SUPPORT_RAIL_FAILOVER
56170 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
56171 +#endif
56172 +
56173 +    INIT_LIST_HEAD (&txdList);
56174 +
56175 +    spin_lock_irqsave (&xmtr->Lock, flags);
56176 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
56177 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
56178 +       EP3_TXD_RAIL *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
56179 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
56180 +           
56181 +       /* Only progress relocation of txd's bound to this rail */
56182 +       if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED)
56183 +           continue;
56184 +       
56185 +#ifdef SUPPORT_RAIL_FAILOVER
56186 +       /* Transmit data not been sent, so just restart on different rail */
56187 +       if (! EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent))
56188 +       {
56189 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d unbind an retry\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
56190 +           
56191 +           UnbindTxdFromRail (txd, txdRail);
56192 +           
56193 +           /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56194 +           txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56195 +           txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56196 +           txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56197 +
56198 +           /* reset all events, since non of them could have been set */
56199 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                          /* PCI write */
56200 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                          /* PCI write */
56201 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                          /* PCI write */
56202 +           
56203 +           FreeTxdRail (xmtrRail, txdRail);
56204 +           
56205 +           /* epcomms thread will restart on different rail */
56206 +           ep_kthread_schedule (&subsys->Thread, lbolt);
56207 +           continue;
56208 +       }
56209 +
56210 +       if (EP_IS_RPC(txd->Envelope.Attr) && !EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
56211 +       {
56212 +           if (EP_IS_NO_FAILOVER(txd->Envelope.Attr))
56213 +           {
56214 +               EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d - not able to failover\n",
56215 +                         rail->Generic.Name, xmtr, txd, txd->NodeId);
56216 +
56217 +               list_del (&txd->Link);
56218 +               UnbindTxdFromRail (txd, txdRail);
56219 +               
56220 +               /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56221 +               txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56222 +               txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56223 +               txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56224 +               
56225 +               /* envelope and data events must have been set, so only clear the done event */
56226 +               EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)) == 0));
56227 +               EP_ASSERT (&rail->Generic, SDRAM_ASSERT(elan3_sdram_readl (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)) == 0));
56228 +
56229 +               elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                              /* PCI write */
56230 +               
56231 +               FreeTxdRail (xmtrRail, txdRail);
56232 +           
56233 +               list_add_tail (&txd->Link, &txdList);
56234 +               continue;
56235 +           }
56236 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d passive\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
56237 +           
56238 +           nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
56239 +           continue;
56240 +       }
56241 +
56242 +       EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
56243 +#endif
56244 +
56245 +    }
56246 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
56247 +
56248 +    while (! list_empty (&txdList)) 
56249 +    {
56250 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
56251 +
56252 +       list_del (&txd->Link);
56253 +
56254 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
56255 +       
56256 +       FreeTxd (xmtr, txd);
56257 +    }
56258 +}
56259 +
56260 +
56261 +void
56262 +ep3xmtr_disconnect_callback (EP_XMTR *xmtr, EP3_XMTR_RAIL *xmtrRail)
56263 +{
56264 +    EP3_RAIL         *rail = XMTR_TO_RAIL (xmtrRail);
56265 +    struct list_head *el, *nel;
56266 +    struct list_head  txdList;
56267 +    unsigned long flags;
56268 +    
56269 +    INIT_LIST_HEAD (&txdList);
56270 +
56271 +    spin_lock_irqsave (&xmtr->Lock, flags);
56272 +
56273 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
56274 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
56275 +       EP3_TXD_RAIL *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
56276 +       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[txd->NodeId];
56277 +           
56278 +       if (!TXD_BOUND2RAIL(txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
56279 +           continue;
56280 +       
56281 +       if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) &&
56282 +           EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
56283 +           EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
56284 +       {
56285 +           EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p completed to node %d\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
56286 +           continue;
56287 +       }
56288 +
56289 +       /* Remove from active list */
56290 +       list_del (&txd->Link);
56291 +       
56292 +       UnbindTxdFromRail (txd, txdRail);
56293 +       
56294 +       /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56295 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56296 +       txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56297 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56298 +       
56299 +       /* reset the envelope and data events, since only they could have been set */
56300 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                              /* PCI write */
56301 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                              /* PCI write */
56302 +       elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                              /* PCI write */
56303 +       
56304 +       FreeTxdRail (xmtrRail, txdRail);
56305 +           
56306 +       EPRINTF4 (DBG_XMTR, "%s: ep3xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->Generic.Name, xmtr, txd, txd->NodeId);
56307 +
56308 +       /* add to the list of txd's which are to be completed */
56309 +       list_add_tail (&txd->Link, &txdList);
56310 +    }
56311 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
56312 +    
56313 +    while (! list_empty (&txdList)) 
56314 +    {
56315 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
56316 +
56317 +       list_del (&txd->Link);
56318 +
56319 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
56320 +       
56321 +       FreeTxd (xmtr, txd);
56322 +    }
56323 +}
56324 +
56325 +int
56326 +ep3xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how)
56327 +{
56328 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
56329 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) t;
56330 +    EP_TXD        *txd      = txdRail->Generic.Txd;
56331 +
56332 +    switch (how)
56333 +    {
56334 +    case ENABLE_TX_CALLBACK:
56335 +       if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
56336 +           EnableTransmitCallback (txd, txdRail);
56337 +       break;
56338 +
56339 +    case DISABLE_TX_CALLBACK:
56340 +       if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
56341 +           DisableTransmitCallback (txd, txdRail);
56342 +       break;
56343 +    }
56344 +
56345 +    if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent) &&
56346 +       EP3_EVENT_FIRED (txdRail->DataCookie, txdRail->TxdMain->DataEvent) &&
56347 +       EP3_EVENT_FIRED (txdRail->DoneCookie, txdRail->TxdMain->DoneEvent))
56348 +    {
56349 +       EPRINTF3 (DBG_XMTR, "%s: ep3xmtr_poll_txd: txd=%p XID=%llx completed\n", 
56350 +                 XMTR_TO_RAIL (xmtrRail)->Generic.Name, txd, (long long) txd->Envelope.Xid.Unique);
56351 +
56352 +       ep_xmtr_txd_stat(xmtrRail->Generic.Xmtr,txd);
56353 +
56354 +       UnbindTxdFromRail (txd, txdRail);
56355 +       
56356 +       /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56357 +       txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56358 +       txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56359 +       txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56360 +       
56361 +       FreeTxdRail (xmtrRail, txdRail);
56362 +
56363 +       return 1;
56364 +    }
56365 +
56366 +    return 0;
56367 +}
56368 +
56369 +int
56370 +ep3xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase)
56371 +{
56372 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
56373 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
56374 +    EP3_TXD_RAIL  *txdRail;
56375 +    E3_DMA_BE      dmabe;
56376 +
56377 +    if ((txdRail = GetTxdRail (xmtrRail)) == NULL)
56378 +       return 0;
56379 +
56380 +    switch (phase)
56381 +    {
56382 +    case EP_TXD_PHASE_ACTIVE:
56383 +       if (rail->Generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED)
56384 +       {
56385 +           EPRINTF2 (DBG_XMTR, "%s: TransmitTxdOnRail: node %u not connected on this rail\n", rail->Generic.Name, txd->NodeId);
56386 +
56387 +           /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56388 +           txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56389 +           txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56390 +           txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56391 +
56392 +           /* reset all events, since non of them could have been set */
56393 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                          /* PCI write */
56394 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                          /* PCI write */
56395 +           elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                          /* PCI write */
56396 +
56397 +           FreeTxdRail (xmtrRail, txdRail);
56398 +           return 0;
56399 +       }
56400 +
56401 +       InitialiseTxdRail (txd, txdRail, phase);
56402 +
56403 +       /* Initialise the dma descriptor */
56404 +       dmabe.s.dma_type            = E3_DMA_TYPE (DMA_BYTE, DMA_WRITE, DMA_QUEUED, EP3_DMAFAILCOUNT);
56405 +       dmabe.s.dma_size            = (EP_HAS_PAYLOAD(txd->Envelope.Attr) ? EP_INPUTQ_SIZE : EP_ENVELOPE_SIZE);
56406 +       dmabe.s.dma_source          = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, Envelope);
56407 +       dmabe.s.dma_dest            = (E3_Addr) 0;
56408 +       dmabe.s.dma_destEvent       = EP_MSGQ_ADDR(txd->Service);
56409 +       dmabe.s.dma_destCookieVProc = EP_VP_DATA (txd->NodeId);
56410 +       dmabe.s.dma_srcEvent        = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent);
56411 +       dmabe.s.dma_srcCookieVProc  = LocalCookie (rail, txd->NodeId);
56412 +
56413 +       EPRINTF8 (DBG_XMTR, "%s: TransmitTxdOnRail: txd=%p txdRail=%p @ %x XID=%llx dest=%u srcEvent=%x srcCookie=%x\n", rail->Generic.Name, 
56414 +                 txd, txdRail, txdRail->TxdElanAddr, (long long) txd->Envelope.Xid.Unique, txd->NodeId, dmabe.s.dma_srcEvent, dmabe.s.dma_srcCookieVProc);
56415 +       
56416 +       BindTxdToRail (txd, txdRail);
56417 +       
56418 +       if (IssueDma (rail, &dmabe, EP_RETRY_LOW_PRI, FALSE) != ISSUE_COMMAND_OK)
56419 +           QueueDmaForRetry (rail, &dmabe, EP_RETRY_LOW_PRI);
56420 +       break;
56421 +
56422 +    case EP_TXD_PHASE_PASSIVE:
56423 +       InitialiseTxdRail (txd, txdRail, EP_TXD_PHASE_PASSIVE);                         /* initialise as passive (updated envelope) */
56424 +       
56425 +       EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE);     /* unbind from existing rail */
56426 +
56427 +       BindTxdToRail (txd, txdRail);                                                   /* and bind it to our new rail */
56428 +       break;
56429 +    }
56430 +
56431 +    return 1;
56432 +}
56433 +
56434 +void
56435 +ep3xmtr_unbind_txd (EP_TXD *txd, unsigned int phase)
56436 +{
56437 +    EP3_TXD_RAIL  *txdRail  = (EP3_TXD_RAIL *) txd->TxdRail;
56438 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
56439 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
56440 +
56441 +    /* XXXX - TBD assertions on phase */
56442 +
56443 +    UnbindTxdFromRail (txd, txdRail);
56444 +    
56445 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56446 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56447 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56448 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56449 +    
56450 +    /* reset the envelope and data events, since only they could have been set */
56451 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                         /* PCI write */
56452 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                         /* PCI write */
56453 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                         /* PCI write */         
56454 +    
56455 +    FreeTxdRail (xmtrRail, txdRail);
56456 +}
56457 +
56458 +long
56459 +ep3xmtr_check (EP_XMTR_RAIL *x, long nextRunTime)
56460 +{
56461 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
56462 +
56463 +    if (xmtrRail->FreeDescCount < ep_txd_lowat && !AllocateTxdRailBlock(xmtrRail))
56464 +    {
56465 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->Generic.Name);
56466 +               
56467 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
56468 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
56469 +    }
56470 +    
56471 +    return nextRunTime;
56472 +}
56473 +
56474 +void
56475 +ep3xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
56476 +{
56477 +    EP3_XMTR_RAIL *xmtrRail;
56478 +    unsigned long  flags;
56479 +
56480 +    KMEM_ZALLOC (xmtrRail, EP3_XMTR_RAIL *, sizeof (EP3_XMTR_RAIL), 1);
56481 +
56482 +    spin_lock_init (&xmtrRail->FreeDescLock);
56483 +    kcondvar_init  (&xmtrRail->FreeDescSleep);
56484 +    INIT_LIST_HEAD (&xmtrRail->FreeDescList);
56485 +    INIT_LIST_HEAD (&xmtrRail->DescBlockList);
56486 +
56487 +    xmtrRail->Generic.CommsRail = commsRail;
56488 +    xmtrRail->Generic.Xmtr      = xmtr;
56489 +
56490 +    spin_lock_irqsave (&xmtr->Lock, flags);
56491 +
56492 +    xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->Generic;
56493 +    xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
56494 +
56495 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
56496 +}
56497 +
56498 +void
56499 +ep3xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
56500 +{
56501 +    EP3_RAIL         *rail     = (EP3_RAIL *) commsRail->Rail;
56502 +    EP3_XMTR_RAIL    *xmtrRail = (EP3_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number];
56503 +    unsigned long     flags;
56504 +
56505 +    /* rail mask set as not usable */
56506 +    spin_lock_irqsave (&xmtr->Lock, flags);
56507 +    xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->Generic.Number);
56508 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
56509 +
56510 +    /* wait for all txd's for this rail to become free */
56511 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
56512 +    while (xmtrRail->FreeDescCount != xmtrRail->TotalDescCount)
56513 +    {
56514 +       xmtrRail->FreeDescWaiting++;
56515 +       kcondvar_wait (&xmtrRail->FreeDescSleep, &xmtrRail->FreeDescLock, &flags);
56516 +    }
56517 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
56518 +
56519 +    spin_lock_irqsave (&xmtr->Lock, flags);
56520 +    xmtr->Rails[commsRail->Rail->Number] = NULL;
56521 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
56522 +
56523 +    /* need to free up the txd's and blocks */
56524 +    /* all the txd's accociated with DescBlocks must be in the FreeDescList */
56525 +    ASSERT (xmtrRail->TotalDescCount == xmtrRail->FreeDescCount);
56526 +
56527 +    /* run through the DescBlockList deleting them */
56528 +    while (!list_empty (&xmtrRail->DescBlockList))
56529 +       FreeTxdRailBlock (xmtrRail, list_entry(xmtrRail->DescBlockList.next, EP3_TXD_RAIL_BLOCK , Link));
56530 +    
56531 +    /* it had better be empty after that */
56532 +    ASSERT ((xmtrRail->FreeDescCount == 0) && (xmtrRail->TotalDescCount == 0));
56533 +
56534 +    spin_lock_destroy (&xmtrRail->FreeDescLock);
56535 +    kcondvar_destroy (&xmtrRail->FreeDescSleep);
56536 +
56537 +    KMEM_FREE (xmtrRail, sizeof (EP3_XMTR_RAIL));
56538 +}
56539 +
56540 +void
56541 +ep3xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x)
56542 +{
56543 +    EP3_XMTR_RAIL *xmtrRail = (EP3_XMTR_RAIL *) x;
56544 +    EP3_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
56545 +    struct list_head *el;
56546 +    unsigned long flags;
56547 +    int freeCount = 0;
56548 +
56549 +    spin_lock_irqsave (&xmtrRail->FreeDescLock, flags);
56550 +    list_for_each (el, &xmtrRail->FreeDescList)
56551 +       freeCount++;
56552 +    spin_unlock_irqrestore (&xmtrRail->FreeDescLock, flags);
56553 +
56554 +    (di->func)(di->arg, "                 Rail=%d Free=%d Total=%d (%d)\n",
56555 +               rail->Generic.Number, xmtrRail->FreeDescCount, xmtrRail->TotalDescCount, freeCount);
56556 +}
56557 +
56558 +void
56559 +ep3xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t)
56560 +{
56561 +    EP3_TXD_RAIL      *txdRail   = (EP3_TXD_RAIL *) t;
56562 +    EP3_XMTR_RAIL     *xmtrRail  = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
56563 +    EP3_TXD_RAIL_MAIN *txdMain   = txdRail->TxdMain;
56564 +    sdramaddr_t        txdElan   = txdRail->TxdElan;
56565 +    EP3_RAIL          *rail      = (EP3_RAIL *) xmtrRail->Generic.CommsRail->Rail;
56566 +    ELAN3_DEV         *dev       = rail->Device;
56567 +    
56568 +    (di->func)(di->arg, "     EnveEvent=%x DataEvent=%x DoneEvent=%x Rail=%s\n", 
56569 +              txdMain->EnveEvent, txdMain->DataEvent, txdMain->DoneEvent, rail->Generic.Name);
56570 +    (di->func)(di->arg, "     EnveEvent=%x.%x DataEvent=%x.%x DoneEvent=%x.%x\n",
56571 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count)),
56572 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Type)),
56573 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count)),
56574 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Type)),
56575 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count)),
56576 +              elan3_sdram_readl (dev, txdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Type)));
56577 +}
56578 +
56579 +int
56580 +ep3xmtr_check_txd_state (EP_TXD *txd)
56581 +{
56582 +    EP3_TXD_RAIL  *txdRail   = (EP3_TXD_RAIL *) txd->TxdRail;
56583 +    EP3_XMTR_RAIL *xmtrRail  = (EP3_XMTR_RAIL *) txdRail->Generic.XmtrRail;
56584 +    EP3_RAIL      *rail      = XMTR_TO_RAIL (xmtrRail);
56585 +    E3_Addr        enveEvent = txdRail->TxdElanAddr + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent);
56586 +    EP3_RETRY_DMA *retry = NULL;
56587 +
56588 +    struct list_head *el;
56589 +    struct list_head *nel;
56590 +    unsigned long     flags;
56591 +
56592 +    /*  is enevelope event is really not set */
56593 +    if (EP3_EVENT_FIRED (txdRail->EnveCookie, txdRail->TxdMain->EnveEvent )) 
56594 +       return (0);
56595 +    
56596 +    /* remove matching dma from stalled list */            
56597 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
56598 +    
56599 +    list_for_each_safe(el, nel,  &rail->DmaRetries[EP_RETRY_STABALISING]) {
56600 +       retry = list_entry (el, EP3_RETRY_DMA, Link);
56601 +       
56602 +       if ( retry->Dma.s.dma_srcEvent == enveEvent ) {
56603 +           /* remove from retry list */
56604 +           list_del (&retry->Link);
56605 +           break; /* there can only be one */
56606 +       } 
56607 +    }
56608 +    ASSERT ( retry != NULL); /* must find one in list */
56609 +    ASSERT ( retry->Dma.s.dma_srcEvent == enveEvent ); /* better still be the right type then */    
56610 +
56611 +    /* add to free list */
56612 +    list_add (&retry->Link, &rail->DmaRetryFreeList);
56613 +
56614 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);    
56615 +                       
56616 +    UnbindTxdFromRail (txd, txdRail);
56617 +       
56618 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
56619 +    txdRail->TxdMain->EnveEvent = EP3_EVENT_PRIVATE;
56620 +    txdRail->TxdMain->DataEvent = EP3_EVENT_PRIVATE;
56621 +    txdRail->TxdMain->DoneEvent = EP3_EVENT_PRIVATE;
56622 +    
56623 +    /* reset the envelope and data events, since only they could have been set */
56624 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, EnveEvent.ev_Count), 0);                         /* PCI write */
56625 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DataEvent.ev_Count), 0);                         /* PCI write */
56626 +    elan3_sdram_writel (rail->Device, txdRail->TxdElan + offsetof (EP3_TXD_RAIL_ELAN, DoneEvent.ev_Count), 0);                         /* PCI write */         
56627 +    
56628 +    FreeTxdRail (xmtrRail, txdRail);
56629 +
56630 +    return (1);
56631 +}
56632 +
56633 +void
56634 +ep3xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) {
56635 +    /* no stats here yet */
56636 +    /* EP3_XMTR_RAIL * ep3xmtr_rail = (EP3_XMTR_RAIL *) xmtr_rail; */
56637 +}
56638 +
56639 +/*
56640 + * Local variables:
56641 + * c-file-style: "stroustrup"
56642 + * End:
56643 + */
56644 diff -urN clean/drivers/net/qsnet/ep/epcommsTx_elan4.c linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan4.c
56645 --- clean/drivers/net/qsnet/ep/epcommsTx_elan4.c        1969-12-31 19:00:00.000000000 -0500
56646 +++ linux-2.6.9/drivers/net/qsnet/ep/epcommsTx_elan4.c  2005-07-20 07:35:37.000000000 -0400
56647 @@ -0,0 +1,1389 @@
56648 +/*
56649 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
56650 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
56651 + *
56652 + *    For licensing information please see the supplied COPYING file
56653 + *
56654 + */
56655 +
56656 +#ident "@(#)$Id: epcommsTx_elan4.c,v 1.32.2.1 2005/07/20 11:35:37 mike Exp $"
56657 +/*      $Source: /cvs/master/quadrics/epmod/epcommsTx_elan4.c,v $ */
56658 +
56659 +#include <qsnet/kernel.h>
56660 +
56661 +#include <elan/kcomm.h>
56662 +#include <elan/epsvc.h>
56663 +#include <elan/epcomms.h>
56664 +
56665 +#include "debug.h"
56666 +#include "kcomm_vp.h"
56667 +#include "kcomm_elan4.h"
56668 +#include "epcomms_elan4.h"
56669 +
56670 +#include <elan4/trtype.h>
56671 +
56672 +#define XMTR_TO_COMMS(xmtrRail)                ((EP4_COMMS_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail)
56673 +#define XMTR_TO_RAIL(xmtrRail)         ((EP4_RAIL *) ((EP_XMTR_RAIL *) xmtrRail)->CommsRail->Rail)
56674 +#define XMTR_TO_DEV(xmtrRail)          (XMTR_TO_RAIL(xmtrRail)->r_ctxt.ctxt_dev)
56675 +#define XMTR_TO_SUBSYS(xmtrRail)       (((EP_XMTR_RAIL *) xmtrRail)->Xmtr->Subsys)
56676 +
56677 +#define TXD_TO_XMTR(txdRail)           ((EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail)
56678 +#define TXD_TO_RAIL(txdRail)           XMTR_TO_RAIL(TXD_TO_XMTR(txdRail))
56679 +
56680 +static void txd_interrupt (EP4_RAIL *rail, void *arg);
56681 +static void poll_interrupt (EP4_RAIL *rail, void *arg);
56682 +
56683 +static __inline__ int
56684 +on_list (struct list_head *ent, struct list_head *list)
56685 +{
56686 +    struct list_head *el;
56687 +    unsigned int count = 0;
56688 +    list_for_each (el, list) {
56689 +       if (el == ent)
56690 +           count++;
56691 +    }
56692 +    return count;
56693 +}
56694 +
56695 +static __inline__ void
56696 +__ep4_txd_assert_free (EP4_TXD_RAIL *txdRail, const char *file, const int line)
56697 +{
56698 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
56699 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
56700 +    register int   failed   = 0;
56701 +    
56702 +    if ((txdRail)->txd_retry_time     != 0)              failed |= (1 << 0);
56703 +    if ((txdRail)->txd_main->txd_env  != EP4_STATE_FREE) failed |= (1 << 1);
56704 +    if ((txdRail)->txd_main->txd_data != EP4_STATE_FREE) failed |= (1 << 2);
56705 +    if ((txdRail)->txd_main->txd_done != EP4_STATE_FREE) failed |= (1 << 3);
56706 +
56707 +    if (sdram_assert)
56708 +    {
56709 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32)  != -32) failed |= (1 << 4);
56710 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 5);
56711 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 6);
56712 +    }
56713 +
56714 +    if (failed)
56715 +    {
56716 +       printk ("__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
56717 +
56718 +       ep_debugf (DBG_DEBUG, "__ep4_txd_assert_free: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
56719 +       ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
56720 +
56721 +       (txdRail)->txd_retry_time     = 0;
56722 +       (txdRail)->txd_main->txd_env  = EP4_STATE_FREE;
56723 +       (txdRail)->txd_main->txd_data = EP4_STATE_FREE;
56724 +       (txdRail)->txd_main->txd_done = EP4_STATE_FREE;
56725 +
56726 +       if (sdram_assert)
56727 +       {
56728 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)  + 4, -32);
56729 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0);
56730 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0);
56731 +       }
56732 +       EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_free");
56733 +    }
56734 +}
56735 +
56736 +static __inline__ void
56737 +__ep4_txd_assert_finished (EP4_TXD_RAIL *txdRail, const char *file, const int line)
56738 +{
56739 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
56740 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
56741 +    register int   failed   = 0;
56742 +    
56743 +    if ((txdRail)->txd_retry_time     != 0)                  failed |= (1 << 0);
56744 +    if ((txdRail)->txd_main->txd_env  != EP4_STATE_FINISHED) failed |= (1 << 1);
56745 +    if ((txdRail)->txd_main->txd_data != EP4_STATE_FINISHED) failed |= (1 << 2);
56746 +    if ((txdRail)->txd_main->txd_done != EP4_STATE_FINISHED) failed |= (1 << 3);
56747 +    
56748 +    if (sdram_assert)
56749 +    {
56750 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)) >> 32)  != -32) failed |= (1 << 4);
56751 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 5);
56752 +       if ((int)(elan4_sdram_readq (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)) >> 32) != 0)   failed |= (1 << 6);
56753 +    }
56754 +
56755 +    if (failed)
56756 +    {
56757 +       printk ("__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
56758 +
56759 +       ep_debugf (DBG_DEBUG, "__ep4_txd_assert_finished: failed=%x txdRail=%p at %s:%d\n", failed, txdRail, file, line);
56760 +       ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
56761 +
56762 +       (txdRail)->txd_retry_time     = 0;
56763 +       (txdRail)->txd_main->txd_env  = EP4_STATE_FINISHED;
56764 +       (txdRail)->txd_main->txd_data = EP4_STATE_FINISHED;
56765 +       (txdRail)->txd_main->txd_done = EP4_STATE_FINISHED;
56766 +
56767 +       if (sdram_assert)
56768 +       {
56769 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)  + 4, -32);
56770 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType) + 4, 0);
56771 +           elan4_sdram_writel (dev, (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType) + 4, 0);
56772 +       }
56773 +       EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "__ep4_txd_assert_finished");
56774 +    }
56775 +}
56776 +
56777 +static __inline__ int
56778 +__ep4_txd_assfail (EP4_TXD_RAIL *txdRail, const char *expr, const char *file, const int line)
56779 +{
56780 +    EP4_XMTR_RAIL *xmtrRail = TXD_TO_XMTR (txdRail);
56781 +
56782 +    printk ("__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr);
56783 +
56784 +    ep_debugf (DBG_DEBUG, "__ep4_txd_assfail: %s:%d '%s'\n", file, line, expr);
56785 +    ep4xmtr_display_txd (&di_ep_debug, &txdRail->txd_generic);
56786 +
56787 +    EP_ASSFAIL (XMTR_TO_RAIL (xmtrRail), "__ep4_txd_assfail");
56788 +
56789 +    return 0;
56790 +}
56791 +
56792 +#define EP4_TXD_ASSERT(txdRail, EX)            ((void) ((EX) || (__ep4_txd_assfail(txdRail, #EX, __FILE__, __LINE__))))
56793 +#define EP4_TXD_ASSERT_FREE(txdRail)           __ep4_txd_assert_free(txdRail, __FILE__, __LINE__)
56794 +#define EP4_TXD_ASSERT_FINISHED(txdRail)       __ep4_txd_assert_finished(txdRail, __FILE__, __LINE__)
56795 +
56796 +static int
56797 +alloc_txd_block (EP4_XMTR_RAIL *xmtrRail)
56798 +{
56799 +    EP4_RAIL           *rail = XMTR_TO_RAIL(xmtrRail);
56800 +    ELAN4_DEV          *dev  = XMTR_TO_DEV(xmtrRail);
56801 +    EP4_TXD_RAIL_BLOCK *blk;
56802 +    EP4_TXD_RAIL_MAIN  *txdMain;
56803 +    EP_ADDR            txdMainAddr;
56804 +    sdramaddr_t                txdElan;
56805 +    EP_ADDR            txdElanAddr;
56806 +    EP4_TXD_RAIL       *txdRail;
56807 +    unsigned long       flags;
56808 +    int                 i;
56809 +
56810 +    KMEM_ZALLOC (blk, EP4_TXD_RAIL_BLOCK *, sizeof (EP4_TXD_RAIL_BLOCK), 1);
56811 +
56812 +    if (blk == NULL)
56813 +       return 0;
56814 +
56815 +    if ((txdElan = ep_alloc_elan (&rail->r_generic, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdElanAddr)) == (sdramaddr_t) 0)
56816 +    {
56817 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
56818 +       return 0;
56819 +    }
56820 +
56821 +    if ((txdMain = ep_alloc_main (&rail->r_generic, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK, 0, &txdMainAddr)) == (EP4_TXD_RAIL_MAIN *) NULL)
56822 +    {
56823 +       ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56824 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
56825 +       return 0;
56826 +    }
56827 +
56828 +    if (ep4_reserve_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK, 0) != 0)
56829 +    {
56830 +       ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56831 +       ep_free_elan (&rail->r_generic, txdElanAddr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56832 +       KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
56833 +       return 0;
56834 +    }
56835 +
56836 +    for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++)
56837 +    {
56838 +       txdRail->txd_generic.XmtrRail = &xmtrRail->xmtr_generic;
56839 +       txdRail->txd_elan             = txdElan;
56840 +       txdRail->txd_elan_addr        = txdElanAddr;
56841 +       txdRail->txd_main             = txdMain;
56842 +       txdRail->txd_main_addr        = txdMainAddr;
56843 +
56844 +       /* We only need to reserve space for one command stream, since the sten packet
56845 +        * can only be retrying *before* the dma source event is set.
56846 +        * reserve bytes of "event" cq space for the completion write + interrupt */
56847 +       if ((txdRail->txd_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, EP4_INTR_CMD_NDWORDS)) == NULL)
56848 +           goto failed;
56849 +
56850 +       /* register the main interrupt cookies */
56851 +       ep4_register_intcookie (rail, &txdRail->txd_intcookie, txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done), txd_interrupt, txdRail);
56852 +
56853 +       /* initialise the events */
56854 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
56855 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
56856 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopySource),
56857 +                           txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd));
56858 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CopyDest),
56859 +                           txdRail->txd_ecq->ecq_addr);
56860 +
56861 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
56862 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
56863 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WritePtr),
56864 +                           txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_data));
56865 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_WriteValue),
56866 +                           EP4_STATE_FINISHED);
56867 +
56868 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
56869 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
56870 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopySource),
56871 +                           txdElanAddr + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd));
56872 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CopyDest),
56873 +                           txdRail->txd_ecq->ecq_addr);
56874 +
56875 +       /* Initialise the command streams */
56876 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_cmd),
56877 +                           WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_env)));
56878 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_write_value),
56879 +                           EP4_STATE_FAILED);
56880 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env_cmd.c_intr_cmd),
56881 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
56882 +
56883 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_cmd),
56884 +                           WRITE_DWORD_CMD | (txdMainAddr + offsetof (EP4_TXD_RAIL_MAIN, txd_done)));
56885 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_write_value),
56886 +                           EP4_STATE_FINISHED);
56887 +       elan4_sdram_writeq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
56888 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
56889 +
56890 +       txdMain->txd_env  = EP4_STATE_FREE;
56891 +       txdMain->txd_data = EP4_STATE_FREE;
56892 +       txdMain->txd_done = EP4_STATE_FREE;
56893 +
56894 +       /* move onto next descriptor */
56895 +       txdElan     += EP4_TXD_RAIL_ELAN_SIZE;
56896 +       txdElanAddr += EP4_TXD_RAIL_ELAN_SIZE;
56897 +       txdMain      = (EP4_TXD_RAIL_MAIN *) ((unsigned long) txdMain + EP4_TXD_RAIL_MAIN_SIZE);
56898 +       txdMainAddr += EP4_TXD_RAIL_MAIN_SIZE;
56899 +    }
56900 +
56901 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
56902 +
56903 +    list_add  (&blk->blk_link, &xmtrRail->xmtr_blocklist);
56904 +
56905 +    xmtrRail->xmtr_totalcount += EP4_NUM_TXD_PER_BLOCK;
56906 +    xmtrRail->xmtr_freecount  += EP4_NUM_TXD_PER_BLOCK;
56907 +
56908 +    for (i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++)
56909 +       list_add (&blk->blk_txds[i].txd_generic.Link, &xmtrRail->xmtr_freelist);
56910 +
56911 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
56912 +
56913 +    return 1;
56914 +
56915 + failed:
56916 +    while (--i >= 0)
56917 +    {
56918 +       ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS);
56919 +       ep4_deregister_intcookie (rail, &txdRail->txd_intcookie);
56920 +    }
56921 +    ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK);
56922 +
56923 +    ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56924 +    ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56925 +
56926 +    KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
56927 +
56928 +    return 0;
56929 +}
56930 +
56931 +static void
56932 +free_txd_block (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL_BLOCK *blk)
56933 +{
56934 +    EP4_RAIL     *rail = XMTR_TO_RAIL (xmtrRail);
56935 +    EP4_TXD_RAIL *txdRail;
56936 +    unsigned long flags;
56937 +    int           i;
56938 +
56939 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
56940 +
56941 +    list_del (&blk->blk_link);
56942 +
56943 +    xmtrRail->xmtr_totalcount -= EP4_NUM_TXD_PER_BLOCK;
56944 +
56945 +    for (txdRail = &blk->blk_txds[0], i = 0; i < EP4_NUM_TXD_PER_BLOCK; i++, txdRail++)
56946 +    {
56947 +       xmtrRail->xmtr_freecount--;
56948 +
56949 +       ep4_put_ecq (rail, txdRail->txd_ecq, EP4_INTR_CMD_NDWORDS);
56950 +
56951 +       ep4_deregister_intcookie (rail, &txdRail->txd_intcookie);
56952 +
56953 +       list_del (&txdRail->txd_generic.Link);
56954 +    }
56955 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
56956 +
56957 +    ep4_release_dma_retries (rail, EP4_NUM_TXD_PER_BLOCK);
56958 +
56959 +    ep_free_main (&rail->r_generic, blk->blk_txds[0].txd_main_addr, EP4_TXD_RAIL_MAIN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56960 +    ep_free_elan (&rail->r_generic, blk->blk_txds[0].txd_elan_addr, EP4_TXD_RAIL_ELAN_SIZE * EP4_NUM_TXD_PER_BLOCK);
56961 +
56962 +    KMEM_FREE (blk, sizeof (EP4_TXD_RAIL_BLOCK));
56963 +}
56964 +
56965 +static EP4_TXD_RAIL *
56966 +get_txd_rail (EP4_XMTR_RAIL *xmtrRail)
56967 +{
56968 +    EP_COMMS_SUBSYS  *subsys = XMTR_TO_SUBSYS(xmtrRail);
56969 +    EP4_TXD_RAIL     *txdRail;
56970 +    unsigned long flags;
56971 +    int low_on_txds;
56972 +
56973 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
56974 +
56975 +    if (list_empty (&xmtrRail->xmtr_freelist))
56976 +       txdRail = NULL;
56977 +    else
56978 +    {
56979 +       txdRail = list_entry (xmtrRail->xmtr_freelist.next, EP4_TXD_RAIL, txd_generic.Link);
56980 +
56981 +       EP4_TXD_ASSERT_FREE(txdRail);
56982 +
56983 +       list_del (&txdRail->txd_generic.Link);
56984 +
56985 +       xmtrRail->xmtr_freecount--;
56986 +    }
56987 +    /* Wakeup the descriptor primer thread if there's not many left */
56988 +    low_on_txds = (xmtrRail->xmtr_freecount < ep_txd_lowat);
56989 +
56990 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
56991 +
56992 +    if (low_on_txds)
56993 +       ep_kthread_schedule (&subsys->Thread, lbolt);
56994 +
56995 +
56996 +    return (txdRail);
56997 +}
56998 +
56999 +static void
57000 +free_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
57001 +{
57002 +    unsigned long flags;
57003 +
57004 +    EP4_TXD_ASSERT_FREE(txdRail);
57005 +
57006 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
57007 +    
57008 +    list_add (&txdRail->txd_generic.Link, &xmtrRail->xmtr_freelist);
57009 +
57010 +    xmtrRail->xmtr_freecount++;
57011 +
57012 +    if (xmtrRail->xmtr_freewaiting)
57013 +    {
57014 +       xmtrRail->xmtr_freewaiting--;
57015 +       kcondvar_wakeupall (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock);
57016 +    }
57017 +
57018 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
57019 +}
57020 +
57021 +static void
57022 +bind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
57023 +{
57024 +    EPRINTF6 (DBG_XMTR, "%s: bind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
57025 +             XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, 
57026 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long)txd->Envelope.Xid.Unique);
57027 +
57028 +    txd->TxdRail = &txdRail->txd_generic;
57029 +    txdRail->txd_generic.Txd = txd;
57030 +}
57031 +
57032 +static void
57033 +unbind_txd_rail (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
57034 +{
57035 +    EP4_TXD_ASSERT (txdRail, txd->TxdRail == &txdRail->txd_generic && txdRail->txd_generic.Txd == txd);
57036 +
57037 +    EPRINTF6 (DBG_XMTR, "%s: unbind_txd_rail: txd=%p txdRail=%p XID=%08x.%08x.%016llx\n", 
57038 +             XMTR_TO_RAIL(txdRail->txd_generic.XmtrRail)->r_generic.Name, txd, txdRail, 
57039 +             txd->Envelope.Xid.Generation, txd->Envelope.Xid.Handle, (long long)txd->Envelope.Xid.Unique);
57040 +
57041 +
57042 +    txdRail->txd_generic.Txd = NULL; 
57043 +    txd->TxdRail = NULL;
57044 +}
57045 +
57046 +static void
57047 +initialise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail, unsigned int phase)
57048 +{
57049 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail;
57050 +    EP4_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
57051 +    ELAN4_DEV     *dev      = rail->r_ctxt.ctxt_dev;
57052 +    
57053 +    /* Flush the Elan TLB if mappings have changed */
57054 +    ep_perrail_dvma_sync (&rail->r_generic);
57055 +    
57056 +    /* Initialise the per-rail fields in the envelope */
57057 +    txd->Envelope.TxdRail = txdRail->txd_elan_addr;
57058 +    txd->Envelope.NodeId  = rail->r_generic.Position.pos_nodeid;
57059 +
57060 +    /* Allocate a network error fixup cookie */
57061 +    txdRail->txd_cookie = ep4_neterr_cookie (rail, txd->NodeId) | EP4_COOKIE_STEN;
57062 +
57063 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
57064 +    if ( epdebug_check_sum ) 
57065 +       txd->Envelope.CheckSum = ep_calc_check_sum( txd->Xmtr->Subsys->Subsys.Sys, &txd->Envelope, txd->Envelope.Frags, txd->Envelope.nFrags);
57066 +    else
57067 +#endif
57068 +       txd->Envelope.CheckSum = 0;  
57069 +
57070 +    /* Initialise the per-rail events */
57071 +    switch (phase)
57072 +    {
57073 +    case EP_TXD_PHASE_ACTIVE:
57074 +    {
57075 +       unsigned int nsets = (txd->Envelope.nFrags ? txd->Envelope.nFrags : 1) + ( EP_IS_MULTICAST(txd->Envelope.Attr) ? 1 : 0);
57076 +
57077 +       if (! EP_IS_RPC(txd->Envelope.Attr))
57078 +       {
57079 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
57080 +                               E4_EVENT_INIT_VALUE (-32 * nsets, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57081 +
57082 +           txdRail->txd_main->txd_data = EP4_STATE_FINISHED;
57083 +       }
57084 +       else
57085 +       {
57086 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
57087 +                               E4_EVENT_INIT_VALUE(-32 * nsets , E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
57088 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
57089 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57090 +
57091 +           txdRail->txd_main->txd_data = EP4_STATE_ACTIVE;
57092 +       }
57093 +                  
57094 +       txdRail->txd_main->txd_env  = EP4_STATE_ACTIVE;
57095 +       txdRail->txd_main->txd_done = EP4_STATE_ACTIVE;
57096 +       break;
57097 +    }
57098 +
57099 +    case EP_TXD_PHASE_PASSIVE:
57100 +       EP4_TXD_ASSERT (txdRail, EP_IS_RPC(txd->Envelope.Attr));
57101 +       
57102 +       txdRail->txd_main->txd_env  = EP4_STATE_FINISHED;
57103 +       txdRail->txd_main->txd_data = EP4_STATE_FINISHED;
57104 +       txdRail->txd_main->txd_done = EP4_STATE_ACTIVE;
57105 +
57106 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
57107 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57108 +       break;
57109 +    }
57110 +
57111 +   if (EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
57112 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD);
57113 +}
57114 +
57115 +static void
57116 +terminate_txd_rail (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
57117 +{
57118 +    EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\
57119 +                     (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\
57120 +                     E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\
57121 +
57122 +    /* clear the done flags - so that it will be ignored if an event interrupt is generated */
57123 +    txdRail->txd_main->txd_env  = EP4_STATE_FREE;
57124 +    txdRail->txd_main->txd_data = EP4_STATE_FREE;
57125 +    txdRail->txd_main->txd_done = EP4_STATE_FREE;
57126 +
57127 +#if defined(DEBUG_ASSERT)
57128 +    if (sdram_assert)
57129 +    {
57130 +       ELAN4_DEV *dev = XMTR_TO_RAIL (xmtrRail)->r_ctxt.ctxt_dev;
57131 +
57132 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType),
57133 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
57134 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType),
57135 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57136 +    }
57137 +#endif 
57138 +}
57139 +
57140 +static void
57141 +defer_txd_rail (EP4_TXD_RAIL *txdRail)
57142 +{
57143 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
57144 +    EP4_RAIL        *rail     = XMTR_TO_RAIL(xmtrRail);
57145 +    ELAN4_DEV       *dev      = rail->r_ctxt.ctxt_dev;
57146 +    EP_COMMS_SUBSYS *subsys   = XMTR_TO_SUBSYS(xmtrRail);
57147 +
57148 +    EPRINTF5 (DBG_XMTR, "%s: defer_txd_rail: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n",
57149 +             rail->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
57150 +                   
57151 +    /* transmit has completed, but the data dma has not completed
57152 +     * (because of network error fixup), we queue the txdRail onto a list
57153 +     * to be polled for completion later.
57154 +     */
57155 +    if (txdRail->txd_retry_time)
57156 +    {
57157 +       EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 ||
57158 +                                 on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1));
57159 +
57160 +       list_del (&txdRail->txd_retry_link);
57161 +
57162 +       txdRail->txd_main->txd_env = EP4_STATE_FINISHED;
57163 +
57164 +       /* re-initialise the envelope event */
57165 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
57166 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57167 +    }
57168 +    
57169 +    txdRail->txd_retry_time = lbolt;
57170 +       
57171 +    list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]);
57172 +       
57173 +    ep_kthread_schedule (&subsys->Thread, lbolt);
57174 +}
57175 +
57176 +static void
57177 +finalise_txd (EP_TXD *txd, EP4_TXD_RAIL *txdRail)
57178 +{
57179 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
57180 +
57181 +    EP4_TXD_ASSERT_FINISHED (txdRail);
57182 +
57183 +    unbind_txd_rail (txd, txdRail);
57184 +    
57185 +    terminate_txd_rail (xmtrRail, txdRail);
57186 +    free_txd_rail (xmtrRail, txdRail);
57187 +}
57188 +
57189 +static void
57190 +txd_interrupt (EP4_RAIL *rail, void *arg)
57191 +{
57192 +    EP4_TXD_RAIL    *txdRail  = (EP4_TXD_RAIL *) arg;
57193 +    EP4_XMTR_RAIL   *xmtrRail = TXD_TO_XMTR(txdRail);
57194 +    EP_XMTR         *xmtr     = xmtrRail->xmtr_generic.Xmtr;
57195 +    int              delay    = 1;
57196 +    EP_TXD          *txd;
57197 +    unsigned long    flags;
57198 +
57199 +    spin_lock_irqsave (&xmtr->Lock, flags);
57200 +    for (;;)
57201 +    {
57202 +       if (txdRail->txd_main->txd_done == EP4_STATE_FINISHED || txdRail->txd_main->txd_env == EP4_STATE_FAILED)
57203 +           break;
57204 +       
57205 +       /* The write to txd_done could be held up in the PCI bridge even though
57206 +        * we've seen the interrupt cookie.  Unlike elan3, there is no possibility
57207 +        * of spurious interrupts since we flush the command queues on node 
57208 +        * disconnection and the txcallback mechanism */
57209 +       mb();
57210 +
57211 +       if (delay > EP4_EVENT_FIRING_TLIMIT)
57212 +       {
57213 +           spin_unlock_irqrestore (&xmtr->Lock, flags);
57214 +
57215 +           EP_ASSFAIL (XMTR_TO_RAIL(xmtrRail), "txd_interrupt - not finished\n");
57216 +           return;
57217 +       }
57218 +       DELAY (delay);
57219 +       delay <<= 1;
57220 +    }
57221 +
57222 +    txd = txdRail->txd_generic.Txd;
57223 +
57224 +    if (txdRail->txd_main->txd_env == EP4_STATE_FAILED)
57225 +    {
57226 +       spin_lock (&xmtrRail->xmtr_retrylock);
57227 +
57228 +       EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time == 0);                         /* cannot be on retry/poll list */
57229 +       EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_done != EP4_STATE_FINISHED);    /* data xfer cannot have finished */
57230 +
57231 +       if (TxdShouldStabalise (&txdRail->txd_generic, &rail->r_generic))
57232 +       {
57233 +           EPRINTF6 (DBG_STABILISE, "%s: txd_interrupt: stablise xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
57234 +                     xmtrRail, txdRail, txd, (long long)txd->Envelope.Xid.Unique, txd->NodeId);
57235 +
57236 +           txdRail->txd_retry_time = lbolt;                    /* indicate on retry list */
57237 +           
57238 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]);
57239 +       }
57240 +       else
57241 +       {
57242 +           EPRINTF6 (DBG_RETRY, "%s: txd_interrupt: retry xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
57243 +                     xmtrRail, txdRail, txd, (long long)txd->Envelope.Xid.Unique, txd->NodeId);
57244 +
57245 +           txdRail->txd_retry_time = lbolt + EP_RETRY_LOW_PRI_TIME;            /* XXXX: backoff ? */
57246 +           
57247 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]);
57248 +           
57249 +           ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time);
57250 +       }
57251 +       spin_unlock (&xmtrRail->xmtr_retrylock);
57252 +
57253 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
57254 +       return;
57255 +    }
57256 +
57257 +    EP4_TXD_ASSERT (txdRail, txd != NULL && !(EP_IS_NO_INTERRUPT(txd->Envelope.Attr)));
57258 +
57259 +    EPRINTF6 (DBG_XMTR, "%s: txd_interrupt: xmtrRail=%p txdRail=%p txd=%p XID=%llx dest=%u\n", rail->r_generic.Name,
57260 +             xmtrRail, txdRail, txd, (long long)txd->Envelope.Xid.Unique, txd->NodeId);
57261 +            
57262 +    if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED)
57263 +    {
57264 +       defer_txd_rail (txdRail);
57265 +
57266 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
57267 +    }
57268 +    else
57269 +    {
57270 +       /* remove from active transmit list */
57271 +       list_del (&txd->Link);
57272 +
57273 +       ep_xmtr_txd_stat(xmtr,txd);
57274 +
57275 +       finalise_txd (txd, txdRail);
57276 +       
57277 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
57278 +       
57279 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
57280 +       
57281 +       FreeTxd (xmtr, txd);
57282 +    }
57283 +}
57284 +
57285 +static void
57286 +poll_interrupt (EP4_RAIL *rail, void *arg)
57287 +{
57288 +    EP4_XMTR_RAIL   *xmtrRail = (EP4_XMTR_RAIL *) arg;
57289 +
57290 +    ep_poll_transmits (xmtrRail->xmtr_generic.Xmtr);
57291 +}
57292 +
57293 +void
57294 +issue_envelope_packet (EP4_XMTR_RAIL *xmtrRail, EP4_TXD_RAIL *txdRail)
57295 +{
57296 +    EP_TXD    *txd    = txdRail->txd_generic.Txd;
57297 +    ELAN4_CQ  *cq     = xmtrRail->xmtr_cq;
57298 +    E4_uint64 *blk0   = (E4_uint64 *) &txd->Envelope;
57299 +    E4_uint64 *blk1   = EP_HAS_PAYLOAD(txd->Envelope.Attr) ? (E4_uint64 *) &txd->Payload : NULL;
57300 +    E4_Addr    qaddr  = EP_MSGQ_ADDR(txd->Service);
57301 +
57302 +    EP4_SDRAM_ASSERT (TXD_TO_RAIL(txdRail),\
57303 +                     (txdRail)->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),\
57304 +                     E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));\
57305 +
57306 +    elan4_open_packet (cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_DATA(txd->NodeId)));
57307 +    elan4_sendtrans0 (cq, TR_INPUT_Q_GETINDEX, EP_MSGQ_ADDR(txd->Service));
57308 +           
57309 +    /* send the payload if present */
57310 +    if (blk0) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 0,   blk0);
57311 +    if (blk1) elan4_sendtransp (cq, TR_WRITE(128 >> 3, 0, TR_DATATYPE_BYTE), 128, blk1);
57312 +
57313 +    elan4_sendtrans1 (cq, TR_INPUT_Q_COMMIT, qaddr, txdRail->txd_cookie);
57314 +
57315 +    elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (EP4_TXD_STEN_RETRYCOUNT));
57316 +    elan4_write_dword_cmd (cq, txdRail->txd_main_addr + offsetof (EP4_TXD_RAIL_MAIN, txd_env), EP4_STATE_FINISHED);
57317 +           
57318 +    elan4_guard (cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (EP4_TXD_STEN_RETRYCOUNT));
57319 +    elan4_set_event_cmd (cq, txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_env));
57320 +    
57321 +    elan4_write_dword_cmd (cq, xmtrRail->xmtr_main_addr + offsetof (EP4_XMTR_RAIL_MAIN, xmtr_flowcnt), ++xmtrRail->xmtr_flowcnt);
57322 +}
57323 +
57324 +void
57325 +ep4xmtr_flush_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
57326 +{
57327 +    EP4_RAIL       *rail      = XMTR_TO_RAIL (xmtrRail);
57328 +    EP4_COMMS_RAIL *commsRail = XMTR_TO_COMMS (xmtrRail);
57329 +    struct list_head *el, *nel;
57330 +    unsigned long flags;
57331 +
57332 +    switch (rail->r_generic.CallbackStep)
57333 +    {
57334 +    case EP_CB_FLUSH_FILTERING:
57335 +       /* need to acquire/release the Lock to ensure that the node state
57336 +        * transition has been noticed and no new envelopes are queued to 
57337 +        * nodes which are passivating. */
57338 +       spin_lock_irqsave (&xmtr->Lock, flags);
57339 +
57340 +       /* Then we insert a "setevent" into the command queue to flush
57341 +        * through the envelopes which have already been submitted */
57342 +       ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq);
57343 +
57344 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
57345 +
57346 +       break;
57347 +
57348 +    case EP_CB_FLUSH_FLUSHING:
57349 +       /* remove any envelopes which are retrying to nodes which are going down */
57350 +       spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
57351 +       list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) {
57352 +           EP4_TXD_RAIL *txdRail  = list_entry (el, EP4_TXD_RAIL, txd_retry_link);
57353 +           EP_TXD       *txd      = txdRail->txd_generic.Txd;
57354 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
57355 +           
57356 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_main->txd_env == EP4_STATE_FAILED);
57357 +           
57358 +           if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
57359 +           {
57360 +               EPRINTF2 (DBG_XMTR, "%s; ep4xmtr_flush_callback: removing txdRail %p from retry list\n", rail->r_generic.Name, txdRail);
57361 +               
57362 +               EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
57363 +
57364 +               list_del (&txdRail->txd_retry_link);
57365 +               list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]);
57366 +           }
57367 +       }
57368 +       spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
57369 +
57370 +       /* Determine whether we have active or passive messages to 
57371 +        * any node which is passivating */
57372 +       spin_lock_irqsave (&xmtr->Lock, flags);
57373 +       list_for_each (el, &xmtr->ActiveDescList) {
57374 +           EP_TXD       *txd      = list_entry (el, EP_TXD, Link);
57375 +           EP4_TXD_RAIL *txdRail  = (EP4_TXD_RAIL *) txd->TxdRail;
57376 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
57377 +           
57378 +           if (txdRail == NULL || txdRail->txd_generic.XmtrRail != &xmtrRail->xmtr_generic || nodeRail->State != EP_NODE_LOCAL_PASSIVATE)
57379 +               continue;
57380 +           
57381 +           EPRINTF5 (DBG_XMTR, "%s: flush txd=%p txdRail=%p data=%llx done=%llx\n", rail->r_generic.Name,
57382 +                     txd, txdRail, (long long)txdRail->txd_main->txd_data, (long long)txdRail->txd_main->txd_done);
57383 +
57384 +           if (EP_IS_RPC(txd->Envelope.Attr))
57385 +           {
57386 +               if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
57387 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
57388 +               else if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
57389 +                   nodeRail->MessageState |= EP_NODE_PASSIVE_MESSAGES;
57390 +           }
57391 +           else
57392 +           {
57393 +               if (txdRail->txd_main->txd_data == EP4_STATE_ACTIVE)
57394 +                   nodeRail->MessageState |= EP_NODE_ACTIVE_MESSAGES;
57395 +           }
57396 +       }
57397 +       spin_unlock_irqrestore (&xmtr->Lock, flags);
57398 +       break;
57399 +
57400 +    default:
57401 +       panic ("ep4xmtr_flush_callback: invalid callback step\n");
57402 +       break;
57403 +    }
57404 +}
57405 +
57406 +void
57407 +ep4xmtr_failover_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
57408 +{
57409 +    EP4_RAIL         *rail   = XMTR_TO_RAIL (xmtrRail);
57410 +    struct list_head  txdList;
57411 +    struct list_head *el, *nel;
57412 +    unsigned long flags;
57413 +
57414 +    INIT_LIST_HEAD (&txdList);
57415 +
57416 +    spin_lock_irqsave (&xmtr->Lock, flags);
57417 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
57418 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
57419 +       EP4_TXD_RAIL *txdRail   = (EP4_TXD_RAIL *) txd->TxdRail;
57420 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
57421 +           
57422 +       /* Only progress relocation of txd's bound to this rail */
57423 +       if (! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_PASSIVATED)
57424 +           continue;
57425 +       
57426 +       /* XXXX - no rail failover for now ....*/
57427 +
57428 +       EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_failover_callback - xmtr %p txd %p node %d completed\n", rail->r_generic.Name, xmtr, txd, txd->NodeId);
57429 +    }
57430 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57431 +
57432 +    while (! list_empty (&txdList)) 
57433 +    {
57434 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
57435 +
57436 +       list_del (&txd->Link);
57437 +
57438 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
57439 +       
57440 +       FreeTxd (xmtr, txd);
57441 +    }
57442 +}
57443 +
57444 +
57445 +void
57446 +ep4xmtr_disconnect_callback (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail)
57447 +{
57448 +    EP4_RAIL         *rail = XMTR_TO_RAIL (xmtrRail);
57449 +    ELAN4_DEV        *dev  = rail->r_ctxt.ctxt_dev;
57450 +    struct list_head *el, *nel;
57451 +    struct list_head  txdList;
57452 +    unsigned long flags;
57453 +    
57454 +    INIT_LIST_HEAD (&txdList);
57455 +
57456 +    spin_lock_irqsave (&xmtr->Lock, flags);
57457 +
57458 +    list_for_each_safe (el, nel, &xmtr->ActiveDescList) {
57459 +       EP_TXD       *txd       = list_entry (el, EP_TXD, Link);
57460 +       EP4_TXD_RAIL *txdRail   = (EP4_TXD_RAIL *) txd->TxdRail;
57461 +       EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[txd->NodeId];
57462 +           
57463 +       if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || nodeRail->State != EP_NODE_DISCONNECTING)
57464 +           continue;
57465 +       
57466 +       if (txdRail->txd_main->txd_done == EP4_STATE_ACTIVE)
57467 +       {
57468 +
57469 +           EPRINTF8 (DBG_DISCON, "ep4xmtr_disconnect_callback: txdRail=%p : events %llx,%llx,%llx done %llx,%llx,%llx retry %lx\n",txdRail,
57470 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)),
57471 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)),
57472 +                     elan4_sdram_readq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)),
57473 +                     (long long)txdRail->txd_main->txd_env, (long long)txdRail->txd_main->txd_data, (long long)txdRail->txd_main->txd_done,
57474 +                     txdRail->txd_retry_time);
57475 +                      
57476 +           if (txdRail->txd_retry_time)
57477 +           {
57478 +               /* re-initialise the envelope event */
57479 +               elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
57480 +                                   E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57481 +               
57482 +               EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1);
57483 +
57484 +               txdRail->txd_retry_time  = 0;
57485 +
57486 +               list_del (&txdRail->txd_retry_link);
57487 +           }
57488 +
57489 +           /* Remove from active list */
57490 +           list_del (&txd->Link);
57491 +       
57492 +           unbind_txd_rail (txd, txdRail);
57493 +
57494 +           terminate_txd_rail (xmtrRail, txdRail);
57495 +           free_txd_rail (xmtrRail, txdRail);
57496 +           
57497 +           EPRINTF4 (DBG_XMTR, "%s: ep4xmtr_disconnect_callback - xmtr %p txd %p node %d not conected\n", rail->r_generic.Name, xmtr, txd, txd->NodeId);
57498 +
57499 +           /* add to the list of txd's which are to be completed */
57500 +           list_add_tail (&txd->Link, &txdList);
57501 +       }
57502 +    }
57503 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57504 +    
57505 +    while (! list_empty (&txdList)) 
57506 +    {
57507 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
57508 +
57509 +       list_del (&txd->Link);
57510 +
57511 +       txd->Handler (txd, txd->Arg, EP_CONN_RESET);
57512 +       
57513 +       FreeTxd (xmtr, txd);
57514 +    }
57515 +}
57516 +
57517 +void
57518 +ep4xmtr_neterr_flush (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
57519 +{
57520 +    EP4_COMMS_RAIL   *commsRail = XMTR_TO_COMMS (xmtrRail);
57521 +    unsigned long flags;
57522 +    
57523 +    spin_lock_irqsave (&xmtr->Lock, flags);
57524 +
57525 +    /* insert a "setevent" into the command queue to flush
57526 +     * through the envelopes which have already been submitted */
57527 +    ep4comms_flush_setevent (commsRail, xmtrRail->xmtr_cq);
57528 +
57529 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57530 +}
57531 +
57532 +void
57533 +ep4xmtr_neterr_check (EP_XMTR *xmtr, EP4_XMTR_RAIL *xmtrRail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
57534 +{
57535 +    EP4_RAIL *rail = XMTR_TO_RAIL (xmtrRail);
57536 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
57537 +    struct list_head *el;
57538 +    unsigned long flags;
57539 +    
57540 +    spin_lock_irqsave (&xmtr->Lock, flags);
57541 +    list_for_each (el, &xmtr->ActiveDescList) {
57542 +       EP_TXD       *txd     = list_entry (el, EP_TXD, Link);
57543 +       EP4_TXD_RAIL *txdRail = (EP4_TXD_RAIL *) txd->TxdRail;
57544 +           
57545 +       if ( ! TXD_BOUND2RAIL (txdRail, xmtrRail) || txd->NodeId != nodeId)
57546 +           continue;
57547 +       
57548 +       /* The only non-dma associated with a txd is the initial sten packet, if it has been acked 
57549 +        * and the neterr cookie matches, then change it to look like it's been acked since the
57550 +        * INPUT_Q_COMMIT transaction has already been executed */
57551 +       if (txdRail->txd_main->txd_env == EP4_STATE_FAILED && (txdRail->txd_cookie == cookies[0] || txdRail->txd_cookie == cookies[1]))
57552 +       {
57553 +           EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4xmtr_neterr_callback: cookie <%lld%s%s%s%s> matches txd %p txdRail %p\n", 
57554 +                    rail->r_generic.Name, (long long)EP4_COOKIE_STRING(txdRail->txd_cookie), txd, txdRail);
57555 +
57556 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
57557 +
57558 +           txdRail->txd_main->txd_env = EP4_STATE_FINISHED;
57559 +
57560 +           /* re-initialise the envelope event */
57561 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
57562 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57563 +           
57564 +           spin_lock (&xmtrRail->xmtr_retrylock);
57565 +
57566 +           EP4_TXD_ASSERT (txdRail, (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]) == 1 ||
57567 +                                     on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1));
57568 +
57569 +           txdRail->txd_retry_time = 0;
57570 +
57571 +           list_del (&txdRail->txd_retry_link);
57572 +
57573 +           spin_unlock (&xmtrRail->xmtr_retrylock);
57574 +       }
57575 +    }
57576 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57577 +}
57578 +
57579 +int
57580 +ep4xmtr_poll_txd (EP_XMTR_RAIL *x, EP_TXD_RAIL *t, int how)
57581 +{
57582 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
57583 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
57584 +    EP4_TXD_RAIL  *txdRail  = (EP4_TXD_RAIL *) t;
57585 +    EP_TXD        *txd      = txdRail->txd_generic.Txd;
57586 +
57587 +    if (! EP_IS_NO_INTERRUPT(txd->Envelope.Attr))
57588 +       return 0;
57589 +
57590 +    switch (how)
57591 +    {
57592 +    case ENABLE_TX_CALLBACK:
57593 +       if (!EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr))
57594 +       {
57595 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
57596 +                               INTERRUPT_CMD | (xmtrRail->xmtr_intcookie.int_val << E4_MAIN_INT_SHIFT));
57597 +
57598 +           txd->Envelope.Attr |= EP_INTERRUPT_ENABLED;
57599 +       }
57600 +       break;
57601 +
57602 +    case DISABLE_TX_CALLBACK:
57603 +       if (EP_IS_INTERRUPT_ENABLED(txd->Envelope.Attr & EP_INTERRUPT_ENABLED))
57604 +       {
57605 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd), NOP_CMD);
57606 +
57607 +           txd->Envelope.Attr &= ~EP_INTERRUPT_ENABLED;
57608 +       }
57609 +    }
57610 +    
57611 +    if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED && txdRail->txd_main->txd_data == EP4_STATE_FINISHED && txdRail->txd_main->txd_done == EP4_STATE_FINISHED)
57612 +    {
57613 +       EPRINTF3 (DBG_XMTR, "%s: ep4xmtr_poll_txd: txd=%p XID=%llx completed\n",
57614 +                 XMTR_TO_RAIL (xmtrRail)->r_generic.Name, txd, (long long)txd->Envelope.Xid.Unique);
57615 +       
57616 +       elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_done_cmd.c_intr_cmd),
57617 +                           INTERRUPT_CMD | (txdRail->txd_intcookie.int_val << E4_MAIN_INT_SHIFT));
57618 +
57619 +
57620 +       ep_xmtr_txd_stat(xmtrRail->xmtr_generic.Xmtr,txd);
57621 +
57622 +       finalise_txd (txd, txdRail);
57623 +
57624 +       return 1;
57625 +    }
57626 +
57627 +    return 0;
57628 +}
57629 +
57630 +int
57631 +ep4xmtr_bind_txd (EP_TXD *txd, EP_XMTR_RAIL *x, unsigned int phase)
57632 +{
57633 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
57634 +    EP4_RAIL      *rail     = XMTR_TO_RAIL (xmtrRail);
57635 +    EP4_TXD_RAIL  *txdRail;
57636 +    unsigned long  flags;
57637 +
57638 +    if ((txdRail = get_txd_rail (xmtrRail)) == NULL)
57639 +       return 0;
57640 +    
57641 +    switch (phase)
57642 +    {
57643 +    case EP_TXD_PHASE_ACTIVE:
57644 +       if (rail->r_generic.Nodes[txd->NodeId].State != EP_NODE_CONNECTED)
57645 +       {
57646 +           EPRINTF2 (DBG_XMTR, "%s: ep4xmtr_bind_txd: node %u not connected on this rail\n", rail->r_generic.Name, txd->NodeId);
57647 +
57648 +           free_txd_rail (xmtrRail, txdRail);
57649 +           return 0;
57650 +       }
57651 +
57652 +       initialise_txd (txd, txdRail, EP_TXD_PHASE_ACTIVE);
57653 +
57654 +       bind_txd_rail (txd, txdRail);
57655 +       
57656 +       /* generate the STEN packet to transfer the envelope */
57657 +       spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
57658 +       if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT)
57659 +           issue_envelope_packet (xmtrRail, txdRail);
57660 +       else
57661 +       {
57662 +           txdRail->txd_retry_time = lbolt;
57663 +
57664 +           list_add_tail (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]);
57665 +
57666 +           ep_kthread_schedule (&rail->r_retry_thread, txdRail->txd_retry_time);
57667 +       }
57668 +       spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
57669 +       break;
57670 +
57671 +    case EP_TXD_PHASE_PASSIVE:
57672 +       initialise_txd (txd, txdRail, EP_TXD_PHASE_PASSIVE);
57673 +       
57674 +       EP_XMTR_OP (txd->TxdRail->XmtrRail, UnbindTxd) (txd, EP_TXD_PHASE_PASSIVE);     /* unbind from existing rail */
57675 +
57676 +       bind_txd_rail (txd, txdRail);                                                   /* and bind it to our new rail */
57677 +       break;
57678 +    }
57679 +
57680 +    return 1;
57681 +}
57682 +
57683 +void
57684 +ep4xmtr_unbind_txd (EP_TXD *txd, unsigned int phase)
57685 +{
57686 +    /* XXXX - TBD */
57687 +}
57688 +
57689 +long
57690 +ep4xmtr_check (EP_XMTR_RAIL *x, long nextRunTime)
57691 +{
57692 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) x;
57693 +    EP_XMTR       *xmtr     = xmtrRail->xmtr_generic.Xmtr;
57694 +    struct list_head  txdList;
57695 +    struct list_head *el, *nel;
57696 +    unsigned long flags;
57697 +
57698 +    INIT_LIST_HEAD (&txdList);
57699 +
57700 +    if (xmtrRail->xmtr_freecount < ep_txd_lowat && !alloc_txd_block (xmtrRail))
57701 +    {
57702 +       EPRINTF1 (DBG_RCVR,"%s: failed to grow txd rail pool\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name);
57703 +               
57704 +       if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + RESOURCE_RETRY_TIME))
57705 +           nextRunTime = lbolt + RESOURCE_RETRY_TIME;
57706 +    }
57707 +
57708 +    spin_lock_irqsave (&xmtr->Lock, flags);
57709 +    list_for_each_safe (el, nel, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]) {
57710 +       EP4_TXD_RAIL *txdRail = list_entry (el, EP4_TXD_RAIL, txd_retry_link);
57711 +
57712 +       if (txdRail->txd_main->txd_env != EP4_STATE_FINISHED || txdRail->txd_main->txd_data != EP4_STATE_FINISHED)
57713 +       {
57714 +           ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) not finished\n",
57715 +                      XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
57716 +                   
57717 +           nextRunTime = lbolt + HZ;
57718 +       }
57719 +       else
57720 +       {
57721 +           EP_TXD *txd = txdRail->txd_generic.Txd;
57722 +
57723 +           ep_debugf (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n",
57724 +                      XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
57725 +
57726 +           EPRINTF5 (DBG_XMTR, "%s: ep4xmtr_check: xmtrRail=%p txdRail=%p env/data (%d,%d) finished\n",
57727 +                     XMTR_TO_RAIL(xmtrRail)->r_generic.Name, xmtrRail, txdRail, (int)txdRail->txd_main->txd_env, (int)txdRail->txd_main->txd_data);
57728 +           EPRINTF3  (DBG_XMTR, "%s:    done %x data %x\n", XMTR_TO_RAIL(xmtrRail)->r_generic.Name,
57729 +                      (uint)(txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_done)),
57730 +                      (uint)(txdRail->txd_elan_addr + offsetof (EP4_TXD_RAIL_ELAN, txd_data)));
57731 +
57732 +           EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
57733 +
57734 +           /* remove txd from active list and add to list to call handlers */
57735 +           list_del (&txd->Link);
57736 +           list_add_tail (&txd->Link, &txdList);
57737 +
57738 +           /* remove and free of txdRail */
57739 +           txdRail->txd_retry_time = 0;
57740 +           list_del (&txdRail->txd_retry_link);
57741 +
57742 +           finalise_txd (txd, txdRail);
57743 +
57744 +       }
57745 +    }
57746 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57747 +
57748 +    while (! list_empty (&txdList))
57749 +    {
57750 +       EP_TXD *txd = list_entry (txdList.next, EP_TXD, Link);
57751 +
57752 +       list_del (&txd->Link);
57753 +
57754 +       ep_xmtr_txd_stat (xmtr,txd);
57755 +
57756 +       txd->Handler (txd, txd->Arg, EP_SUCCESS);
57757 +
57758 +       FreeTxd (xmtr, txd);
57759 +    }
57760 +
57761 +    return nextRunTime;
57762 +}
57763 +
57764 +unsigned long
57765 +ep4xmtr_retry (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
57766 +{
57767 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) arg;
57768 +    ELAN4_DEV     *dev      = XMTR_TO_DEV(xmtrRail);
57769 +    unsigned long  flags;
57770 +
57771 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
57772 +    while (! list_empty (&xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]))
57773 +    {
57774 +       EP4_TXD_RAIL *txdRail = list_entry (xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY].next, EP4_TXD_RAIL, txd_retry_link);
57775 +
57776 +       if (BEFORE (lbolt, txdRail->txd_retry_time))
57777 +       {
57778 +           if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time))
57779 +               nextRunTime = txdRail->txd_retry_time;
57780 +
57781 +           break;
57782 +       }
57783 +
57784 +       if (((int) (xmtrRail->xmtr_flowcnt - xmtrRail->xmtr_main->xmtr_flowcnt)) < EP4_XMTR_FLOWCNT)
57785 +       {
57786 +           txdRail->txd_retry_time = 0;
57787 +
57788 +           list_del (&txdRail->txd_retry_link);
57789 +           
57790 +           /* re-initialise the envelope event */
57791 +           elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
57792 +                               E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
57793 +           
57794 +           EPRINTF3 (DBG_RETRY, "%s: ep4xmtr_retry: re-issue envelope packet to %d for txdRail=%p\n", 
57795 +                     rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId, txdRail);
57796 +           
57797 +           txdRail->txd_main->txd_env = EP4_STATE_ACTIVE;
57798 +           
57799 +           issue_envelope_packet (xmtrRail, txdRail);
57800 +       }
57801 +       else
57802 +       {
57803 +           EPRINTF2 (DBG_RETRY, "%s: ep4xmtr_retry: cannot re-issue envelope packet to %d\n", rail->r_generic.Name, txdRail->txd_generic.Txd->Envelope.NodeId);
57804 +
57805 +           if (nextRunTime == 0 || AFTER (nextRunTime, txdRail->txd_retry_time))
57806 +               nextRunTime = txdRail->txd_retry_time;
57807 +
57808 +           break;
57809 +       }
57810 +    }
57811 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
57812 +    
57813 +    return nextRunTime;
57814 +}
57815 +
57816 +void
57817 +ep4xmtr_add_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
57818 +{
57819 +    EP4_RAIL         *rail   = (EP4_RAIL *) commsRail->Rail;
57820 +    EP_COMMS_SUBSYS  *subsys = xmtr->Subsys;
57821 +    EP4_XMTR_RAIL    *xmtrRail;
57822 +    unsigned long     flags;
57823 +    int                      i;
57824 +
57825 +    KMEM_ZALLOC (xmtrRail, EP4_XMTR_RAIL *, sizeof (EP4_XMTR_RAIL), 1);
57826 +
57827 +    spin_lock_init (&xmtrRail->xmtr_freelock);
57828 +    kcondvar_init  (&xmtrRail->xmtr_freesleep);
57829 +    INIT_LIST_HEAD (&xmtrRail->xmtr_freelist);
57830 +    INIT_LIST_HEAD (&xmtrRail->xmtr_blocklist);
57831 +
57832 +    for (i = 0; i < EP4_TXD_NUM_LISTS; i++)
57833 +       INIT_LIST_HEAD (&xmtrRail->xmtr_retrylist[i]);
57834 +    spin_lock_init (&xmtrRail->xmtr_retrylock);
57835 +
57836 +    xmtrRail->xmtr_generic.CommsRail = commsRail;
57837 +    xmtrRail->xmtr_generic.Xmtr      = xmtr;
57838 +
57839 +    xmtrRail->xmtr_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_XMTR_RAIL_MAIN), 0, &xmtrRail->xmtr_main_addr);
57840 +    xmtrRail->xmtr_cq   = elan4_alloccq (&rail->r_ctxt, EP4_XMTR_CQSIZE, CQ_EnableAllBits, CQ_Priority);
57841 +
57842 +    xmtrRail->xmtr_retryops.op_func = ep4xmtr_retry;
57843 +    xmtrRail->xmtr_retryops.op_arg  = xmtrRail;
57844 +
57845 +    ep4_add_retry_ops (rail, &xmtrRail->xmtr_retryops);
57846 +
57847 +    ep4_register_intcookie (rail, &xmtrRail->xmtr_intcookie, xmtrRail->xmtr_main_addr,
57848 +                           poll_interrupt, xmtrRail);
57849 +
57850 +    spin_lock_irqsave (&xmtr->Lock, flags);
57851 +
57852 +    xmtr->Rails[commsRail->Rail->Number] = &xmtrRail->xmtr_generic;
57853 +    xmtr->RailMask |= EP_RAIL2RAILMASK(commsRail->Rail->Number);
57854 +
57855 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57856 +
57857 +    ep_kthread_schedule (&subsys->Thread, lbolt);
57858 +
57859 +    ep_procfs_xmtr_add_rail(&(xmtrRail->xmtr_generic));
57860 +}
57861 +
57862 +void
57863 +ep4xmtr_del_rail (EP_XMTR *xmtr, EP_COMMS_RAIL *commsRail)
57864 +{
57865 +    EP4_RAIL         *rail     = (EP4_RAIL *) commsRail->Rail;
57866 +    EP4_XMTR_RAIL    *xmtrRail = (EP4_XMTR_RAIL *) xmtr->Rails[commsRail->Rail->Number];
57867 +    unsigned long     flags;
57868 +
57869 +    /* rail mask set as not usable */
57870 +    spin_lock_irqsave (&xmtr->Lock, flags);
57871 +    xmtr->RailMask &= ~EP_RAIL2RAILMASK (rail->r_generic.Number);
57872 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57873 +
57874 +    ep_procfs_xmtr_del_rail(&(xmtrRail->xmtr_generic));
57875 +
57876 +    /* wait for all txd's for this rail to become free */
57877 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
57878 +    while (xmtrRail->xmtr_freecount != xmtrRail->xmtr_totalcount)
57879 +    {
57880 +       xmtrRail->xmtr_freewaiting++;
57881 +       kcondvar_wait (&xmtrRail->xmtr_freesleep, &xmtrRail->xmtr_freelock, &flags);
57882 +    }
57883 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
57884 +
57885 +    spin_lock_irqsave (&xmtr->Lock, flags);
57886 +    xmtr->Rails[commsRail->Rail->Number] = NULL;
57887 +    spin_unlock_irqrestore (&xmtr->Lock, flags);
57888 +
57889 +    /* all the txd's accociated with DescBlocks must be in the freelist */
57890 +    ASSERT (xmtrRail->xmtr_totalcount == xmtrRail->xmtr_freecount);
57891 +
57892 +    /* run through the DescBlockList deleting them */
57893 +    while (!list_empty (&xmtrRail->xmtr_blocklist))
57894 +       free_txd_block (xmtrRail, list_entry(xmtrRail->xmtr_blocklist.next, EP4_TXD_RAIL_BLOCK , blk_link));
57895 +    
57896 +    /* it had better be empty after that */
57897 +    ASSERT ((xmtrRail->xmtr_freecount == 0) && (xmtrRail->xmtr_totalcount == 0));
57898 +
57899 +    ep4_deregister_intcookie (rail, &xmtrRail->xmtr_intcookie);
57900 +
57901 +    ep4_remove_retry_ops (rail, &xmtrRail->xmtr_retryops);
57902 +
57903 +    elan4_freecq (&rail->r_ctxt, xmtrRail->xmtr_cq);
57904 +    ep_free_main (&rail->r_generic, xmtrRail->xmtr_main_addr, sizeof (EP4_XMTR_RAIL_MAIN));
57905 +
57906 +    spin_lock_destroy (&xmtrRail->xmtr_retrylock);
57907 +
57908 +    spin_lock_destroy (&xmtrRail->xmtr_freelock);
57909 +    kcondvar_destroy (&xmtrRail->xmtr_freesleep);
57910 +
57911 +    KMEM_FREE (xmtrRail, sizeof (EP4_XMTR_RAIL));
57912 +}
57913 +
57914 +void
57915 +ep4xmtr_display_xmtr (DisplayInfo *di, EP_XMTR_RAIL *x)
57916 +{
57917 +    EP4_XMTR_RAIL    *xmtrRail     = (EP4_XMTR_RAIL *) x;
57918 +    EP4_RAIL         *rail         = XMTR_TO_RAIL (xmtrRail);
57919 +    unsigned int      freeCount    = 0;
57920 +    unsigned int      pollCount    = 0;
57921 +    unsigned int      stalledCount = 0;
57922 +    unsigned int      retryCount   = 0;
57923 +    struct list_head *el;
57924 +    unsigned long     flags;
57925 +
57926 +    spin_lock_irqsave (&xmtrRail->xmtr_freelock, flags);
57927 +    list_for_each (el, &xmtrRail->xmtr_freelist)
57928 +       freeCount++;
57929 +    spin_unlock_irqrestore (&xmtrRail->xmtr_freelock, flags);
57930 +
57931 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
57932 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL])
57933 +       pollCount++;
57934 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED])
57935 +       stalledCount++;
57936 +    list_for_each (el, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY])
57937 +       retryCount++;
57938 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
57939 +
57940 +    (di->func)(di->arg, "        rail=%d free=%d total=%d (%d) (retry %d,%d,%d)\n",
57941 +              rail->r_generic.Number, xmtrRail->xmtr_freecount, xmtrRail->xmtr_totalcount, 
57942 +              freeCount, pollCount, stalledCount, retryCount);
57943 +    (di->func)(di->arg, "        cq %d flowcnt %lld,%lld\n", elan4_cq2num (xmtrRail->xmtr_cq), xmtrRail->xmtr_flowcnt, xmtrRail->xmtr_main->xmtr_flowcnt);
57944 +}
57945 +
57946 +void
57947 +ep4xmtr_display_txd (DisplayInfo *di, EP_TXD_RAIL *t)
57948 +{
57949 +    EP4_TXD_RAIL      *txdRail  = (EP4_TXD_RAIL *) t;
57950 +    EP4_XMTR_RAIL     *xmtrRail = TXD_TO_XMTR(txdRail);
57951 +    EP4_TXD_RAIL_MAIN *txdMain  = txdRail->txd_main;
57952 +    sdramaddr_t        txdElan  = txdRail->txd_elan;
57953 +    EP4_RAIL          *rail     = XMTR_TO_RAIL (xmtrRail);
57954 +    ELAN4_DEV         *dev      = XMTR_TO_DEV (xmtrRail);
57955 +    char             *list     = "";
57956 +    unsigned long      flags;
57957 +
57958 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
57959 +    if (txdRail->txd_retry_time)
57960 +    {
57961 +       if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_POLL]))
57962 +           list = " poll";
57963 +       else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]))
57964 +           list = " stalled";
57965 +       else if (on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_RETRY]))
57966 +           list = " retry";
57967 +       else
57968 +           list = " ERROR";
57969 +    }
57970 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
57971 +
57972 +    (di->func)(di->arg, "      Rail %d txd %p elan %lx (%x) main %p (%x) cookie <%lld%s%s%s%s> ecq %d %s\n", rail->r_generic.Number,
57973 +              txdRail, txdRail->txd_elan, txdRail->txd_elan_addr, txdRail->txd_main, txdRail->txd_main_addr, 
57974 +              EP4_COOKIE_STRING(txdRail->txd_cookie), elan4_cq2num (txdRail->txd_ecq->ecq_cq), list);
57975 +    
57976 +    (di->func)(di->arg, "        env  %016llx %016llx %016llx -> %016llx\n",
57977 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType)),
57978 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[0])),
57979 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_Params[1])),
57980 +              txdMain->txd_env);
57981 +    (di->func)(di->arg, "        data %016llx %016llx %016llx -> %016llx\n",
57982 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_CountAndType)),
57983 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[0])),
57984 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_data.ev_Params[1])),
57985 +              txdMain->txd_data);
57986 +    (di->func)(di->arg, "        done %016llx %016llx %016llx -> %016llx\n",
57987 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_CountAndType)),
57988 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[0])),
57989 +              elan4_sdram_readq (dev, txdElan + offsetof (EP4_TXD_RAIL_ELAN, txd_done.ev_Params[1])),
57990 +              txdMain->txd_done);
57991 +}
57992 +
57993 +int
57994 +ep4xmtr_check_txd_state (EP_TXD *txd) 
57995 +{
57996 +    EP4_TXD_RAIL  *txdRail  = (EP4_TXD_RAIL *) txd->TxdRail;
57997 +    EP4_XMTR_RAIL *xmtrRail = (EP4_XMTR_RAIL *) txdRail->txd_generic.XmtrRail;
57998 +    ELAN4_DEV     *dev      = XMTR_TO_DEV (xmtrRail);
57999 +    unsigned long  flags;
58000 +
58001 +    if (txdRail->txd_main->txd_env == EP4_STATE_FINISHED)
58002 +       return 0;
58003 +
58004 +    EP4_TXD_ASSERT (txdRail, txdRail->txd_retry_time != 0);
58005 +
58006 +    spin_lock_irqsave (&xmtrRail->xmtr_retrylock, flags);
58007 +    EP4_TXD_ASSERT (txdRail, on_list (&txdRail->txd_retry_link, &xmtrRail->xmtr_retrylist[EP4_TXD_LIST_STALLED]) == 1);
58008 +
58009 +    list_del (&txdRail->txd_retry_link);
58010 +    txdRail->txd_retry_time  = 0;
58011 +    spin_unlock_irqrestore (&xmtrRail->xmtr_retrylock, flags);
58012 +    
58013 +    /* re-initialise the envelope event */
58014 +    elan4_sdram_writeq (dev, txdRail->txd_elan + offsetof (EP4_TXD_RAIL_ELAN, txd_env.ev_CountAndType),
58015 +                       E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_INTR_CMD_NDWORDS));
58016 +           
58017 +    unbind_txd_rail (txd, txdRail);
58018 +
58019 +    terminate_txd_rail (xmtrRail, txdRail);
58020 +    free_txd_rail (xmtrRail, txdRail);
58021 +
58022 +    return 1;
58023 +}
58024 +
58025 +void
58026 +ep4xmtr_fillout_rail_stats(EP_XMTR_RAIL *xmtr_rail, char *str) {
58027 +    /* no stats here yet */
58028 +    /* EP4_XMTR_RAIL * ep4xmtr_rail = (EP4_XMTR_RAIL *) xmtr_rail; */
58029 +}
58030 +
58031 +
58032 +/*
58033 + * Local variables:
58034 + * c-file-style: "stroustrup"
58035 + * End:
58036 + */
58037 diff -urN clean/drivers/net/qsnet/ep/ep_procfs.c linux-2.6.9/drivers/net/qsnet/ep/ep_procfs.c
58038 --- clean/drivers/net/qsnet/ep/ep_procfs.c      1969-12-31 19:00:00.000000000 -0500
58039 +++ linux-2.6.9/drivers/net/qsnet/ep/ep_procfs.c        2005-03-30 09:06:34.000000000 -0500
58040 @@ -0,0 +1,331 @@
58041 +
58042 +/*
58043 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
58044 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
58045 + *
58046 + *    For licensing information please see the supplied COPYING file
58047 + *
58048 + */
58049 +
58050 +#ident "@(#)$Id: ep_procfs.c,v 1.10 2005/03/30 14:06:34 mike Exp $"
58051 +/*      $Source: /cvs/master/quadrics/epmod/ep_procfs.c,v $*/
58052 +
58053 +#include <qsnet/kernel.h>
58054 +
58055 +#include <elan/kcomm.h>
58056 +#include <elan/epsvc.h>
58057 +#include <elan/epcomms.h>
58058 +
58059 +#include "cm.h"
58060 +#include "debug.h"
58061 +#include "conf_linux.h"
58062 +
58063 +#include "kcomm_vp.h"
58064 +#include "kcomm_elan4.h"
58065 +#include "epcomms_elan4.h"
58066 +
58067 +#include <qsnet/procfs_linux.h>
58068 +
58069 +struct proc_dir_entry *ep_procfs_xmtr_root;
58070 +struct proc_dir_entry *ep_procfs_rcvr_root;
58071 +
58072 +static int
58073 +ep_proc_open (struct inode *inode, struct file *file)
58074 +{
58075 +    QSNET_PROC_PRIVATE *pr;
58076 +    int           pages = 4;
58077 +
58078 +    if ((pr = kmalloc (sizeof (QSNET_PROC_PRIVATE), GFP_KERNEL)) == NULL)
58079 +       return (-ENOMEM);
58080 +    
58081 +    do {       
58082 +       pr->pr_data_len = PAGESIZE * pages;
58083 +
58084 +       KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1);
58085 +       if (pr->pr_data == NULL) 
58086 +       { 
58087 +           pr->pr_len  = sprintf (pr->pr_data, "Out of Memory\n");
58088 +           break;
58089 +       } 
58090 +       
58091 +       pr->pr_off     = 0;
58092 +       pr->pr_len     = 0;
58093 +       pr->pr_data[0] = 0;
58094 +       
58095 +       pr->pr_di.func  = qsnet_proc_character_fill;
58096 +       pr->pr_di.arg   = (long)pr;
58097 +       
58098 +       if (!strcmp("debug_xmtr", file->f_dentry->d_iname)) 
58099 +       {   
58100 +           EP_XMTR *xmtr = (EP_XMTR *)(PDE(inode)->data);
58101 +           ep_display_xmtr (&pr->pr_di, xmtr);
58102 +       }
58103 +       
58104 +       if (!strcmp("debug_rcvr", file->f_dentry->d_iname)) 
58105 +       {
58106 +           EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data);
58107 +           ep_display_rcvr (&pr->pr_di, rcvr, 0);
58108 +       }
58109 +       
58110 +       if (!strcmp("debug_full", file->f_dentry->d_iname)) 
58111 +       {
58112 +           EP_RCVR *rcvr = (EP_RCVR *)(PDE(inode)->data);
58113 +           ep_display_rcvr (&pr->pr_di, rcvr, 1);
58114 +       }
58115 +
58116 +       if ( pr->pr_len < pr->pr_data_len) 
58117 +           break; /* we managed to get all the output into the buffer */
58118 +
58119 +       pages++;
58120 +       KMEM_FREE ( pr->pr_data,  pr->pr_data_len);
58121 +    } while (1);
58122 +       
58123 +
58124 +    file->private_data = (void *) pr;
58125 +
58126 +    MOD_INC_USE_COUNT;
58127 +    return (0);
58128 +}
58129 +
58130 +struct file_operations ep_proc_operations = 
58131 +{
58132 +    read:      qsnet_proc_read,
58133 +    open:      ep_proc_open,
58134 +    release:   qsnet_proc_release,
58135 +};
58136 +
58137 +static int
58138 +proc_read_rcvr_stats(char *page, char **start, off_t off,
58139 +                    int count, int *eof, void *data)
58140 +{
58141 +    EP_RCVR *rcvr = (EP_RCVR *)data;
58142 +    
58143 +    if (rcvr == NULL) 
58144 +       sprintf(page,"proc_read_rcvr_stats rcvr=NULL\n");
58145 +    else {
58146 +       page[0] = 0;
58147 +       ep_rcvr_fillout_stats(rcvr,page);
58148 +    }
58149 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
58150 +}
58151 +
58152 +static int
58153 +proc_read_rcvr_rail_stats(char *page, char **start, off_t off,
58154 +                    int count, int *eof, void *data)
58155 +{
58156 +    EP_RCVR_RAIL *rcvr_rail = (EP_RCVR_RAIL *)data;
58157 +
58158 +    if (rcvr_rail == NULL) {
58159 +       strcpy(page,"proc_read_rcvr_rail_stats rcvr_rail=NULL");
58160 +    } else {
58161 +       page[0] = 0;
58162 +       ep_rcvr_rail_fillout_stats(rcvr_rail, page);
58163 +       EP_RCVR_OP(rcvr_rail,FillOutRailStats)(rcvr_rail,page);
58164 +    }
58165 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
58166 +}
58167 +
58168 +void
58169 +ep_procfs_rcvr_add(EP_RCVR *rcvr)
58170 +{ 
58171 +    /* ep/rcvr/service_number/stats       */
58172 +    /* ep/rcvr/service_number/debug_rcvr  */
58173 +    /* ep/rcvr/service_number/debug_full  */
58174 +    struct proc_dir_entry *p;
58175 +    char str[32];
58176 +
58177 +    sprintf(str,"%d", rcvr->Service);
58178 +
58179 +    rcvr->procfs_root = proc_mkdir (str, ep_procfs_rcvr_root);
58180 +
58181 +    if ((p = create_proc_entry ("stats", 0,  rcvr->procfs_root)) != NULL)
58182 +    {
58183 +       p->write_proc = NULL;
58184 +       p->read_proc  = proc_read_rcvr_stats;
58185 +       p->data       = rcvr;
58186 +       p->owner      = THIS_MODULE;
58187 +    }
58188 +
58189 +    if ((p = create_proc_entry ("debug_rcvr", 0, rcvr->procfs_root)) != NULL)
58190 +    {
58191 +       p->proc_fops = &ep_proc_operations;
58192 +       p->owner     = THIS_MODULE;
58193 +       p->data      = rcvr;
58194 +    }
58195 +
58196 +    if ((p = create_proc_entry ("debug_full", 0, rcvr->procfs_root)) != NULL)
58197 +    {
58198 +       p->proc_fops = &ep_proc_operations;
58199 +       p->owner     = THIS_MODULE;
58200 +       p->data      = rcvr;
58201 +    }
58202 +}
58203 +
58204 +void
58205 +ep_procfs_rcvr_del(EP_RCVR *rcvr)
58206 +{  
58207 +    char str[32];
58208 +    sprintf(str,"%d", rcvr->Service);
58209 +
58210 +    remove_proc_entry ("debug_full", rcvr->procfs_root);
58211 +    remove_proc_entry ("debug_rcvr", rcvr->procfs_root);
58212 +    remove_proc_entry ("stats",      rcvr->procfs_root);
58213 +
58214 +    remove_proc_entry (str, ep_procfs_rcvr_root);
58215 +}
58216 +
58217 +void 
58218 +ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail)
58219 +{
58220 +    /* ep/rcvr/service_number/railN/stats */
58221 +
58222 +    struct proc_dir_entry *p;
58223 +    char str[32];
58224 +    sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number);
58225 +
58226 +    rcvrRail->procfs_root = proc_mkdir (str, rcvrRail->Rcvr->procfs_root);
58227 +    
58228 +    if ((p = create_proc_entry ("stats", 0,  rcvrRail->procfs_root)) != NULL)
58229 +    {
58230 +       p->write_proc = NULL;
58231 +       p->read_proc  = proc_read_rcvr_rail_stats;
58232 +       p->data       = rcvrRail;
58233 +       p->owner      = THIS_MODULE;
58234 +    } 
58235 +}
58236 +
58237 +void 
58238 +ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail)
58239 +{
58240 +    char str[32];
58241 +    sprintf(str,"rail%d",rcvrRail->CommsRail->Rail->Number);
58242 +
58243 +    remove_proc_entry ("stats", rcvrRail->procfs_root);
58244 +
58245 +    remove_proc_entry (str, rcvrRail->Rcvr->procfs_root);
58246 +}
58247 +
58248 +
58249 +
58250 +
58251 +static int
58252 +proc_read_xmtr_stats(char *page, char **start, off_t off,
58253 +                    int count, int *eof, void *data)
58254 +{
58255 +    EP_XMTR *xmtr = (EP_XMTR *)data;
58256 +
58257 +    if (xmtr == NULL) 
58258 +       strcpy(page,"proc_read_xmtr_stats xmtr=NULL\n");
58259 +    else {
58260 +       page[0] = 0;
58261 +       ep_xmtr_fillout_stats(xmtr, page);
58262 +    }
58263 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
58264 +}
58265 +
58266 +static int
58267 +proc_read_xmtr_rail_stats(char *page, char **start, off_t off,
58268 +                    int count, int *eof, void *data)
58269 +{
58270 +    EP_XMTR_RAIL *xmtr_rail = (EP_XMTR_RAIL *)data;
58271 +
58272 +    if (xmtr_rail == NULL) 
58273 +       strcpy(page,"proc_read_xmtr_rail_stats xmtr_rail=NULL\n");
58274 +    else {
58275 +       page[0] = 0;
58276 +       ep_xmtr_rail_fillout_stats(xmtr_rail, page);
58277 +       EP_XMTR_OP(xmtr_rail,FillOutRailStats)(xmtr_rail,page);
58278 +    }
58279 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
58280 +}
58281 +
58282 +void
58283 +ep_procfs_xmtr_add(EP_XMTR *xmtr)
58284 +{ 
58285 +    /* ep/xmtr/service_number/stats       */
58286 +    /* ep/xmtr/service_number/debug_xmtr  */
58287 +    struct proc_dir_entry *p;
58288 +    char str[32];
58289 +
58290 +    sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr);
58291 +
58292 +    xmtr->procfs_root = proc_mkdir (str, ep_procfs_xmtr_root);
58293 +
58294 +    if ((p = create_proc_entry ("stats", 0,  xmtr->procfs_root)) != NULL)
58295 +    {
58296 +       p->write_proc = NULL;
58297 +       p->read_proc  = proc_read_xmtr_stats;
58298 +       p->data       = xmtr;
58299 +       p->owner      = THIS_MODULE;
58300 +    } 
58301 +
58302 +    if ((p = create_proc_entry ("debug_xmtr", 0, xmtr->procfs_root)) != NULL)
58303 +    {
58304 +       p->proc_fops = &ep_proc_operations;
58305 +       p->owner     = THIS_MODULE;
58306 +       p->data      = xmtr;
58307 +    }
58308 +}
58309 +
58310 +void
58311 +ep_procfs_xmtr_del(EP_XMTR *xmtr)
58312 +{  
58313 +    char str[32];
58314 +    sprintf(str,"%llx", (unsigned long long) (unsigned long)xmtr);
58315 +
58316 +    remove_proc_entry ("stats",      xmtr->procfs_root);
58317 +    remove_proc_entry ("debug_xmtr", xmtr->procfs_root);
58318 +
58319 +    remove_proc_entry (str, ep_procfs_xmtr_root);
58320 +}
58321 +
58322 +void 
58323 +ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail)
58324 +{
58325 +    /* ep/xmtr/service_number/railN/stats */
58326 +    
58327 +    struct proc_dir_entry *p;
58328 +    char str[32];
58329 +    sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number);
58330 +
58331 +    xmtrRail->procfs_root = proc_mkdir (str, xmtrRail->Xmtr->procfs_root);
58332 +
58333 +    if ((p = create_proc_entry ("stats", 0,  xmtrRail->procfs_root)) != NULL)
58334 +    {
58335 +       p->write_proc = NULL;
58336 +       p->read_proc  = proc_read_xmtr_rail_stats;
58337 +       p->data       = xmtrRail;
58338 +       p->owner      = THIS_MODULE;
58339 +    } 
58340 +}
58341 +
58342 +void 
58343 +ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail)
58344 +{
58345 +    char str[32];
58346 +    sprintf(str,"rail%d",xmtrRail->CommsRail->Rail->Number);
58347 +
58348 +    remove_proc_entry ("stats", xmtrRail->procfs_root);
58349 +
58350 +    remove_proc_entry (str, xmtrRail->Xmtr->procfs_root);
58351 +}
58352 +
58353 +void
58354 +ep_procfs_rcvr_xmtr_init(void)
58355 +{
58356 +    ep_procfs_rcvr_root = proc_mkdir ("rcvr", ep_procfs_root);
58357 +    ep_procfs_xmtr_root = proc_mkdir ("xmtr", ep_procfs_root); 
58358 +}
58359 +
58360 +void
58361 +ep_procfs_rcvr_xmtr_fini(void)
58362 +{
58363 +    remove_proc_entry ("rcvr", ep_procfs_root);
58364 +    remove_proc_entry ("xmtr", ep_procfs_root);
58365 +}
58366 +
58367 +/*
58368 + * Local variables:
58369 + * c-file-style: "stroustrup"
58370 + * End:
58371 + */
58372 diff -urN clean/drivers/net/qsnet/ep/kalloc.c linux-2.6.9/drivers/net/qsnet/ep/kalloc.c
58373 --- clean/drivers/net/qsnet/ep/kalloc.c 1969-12-31 19:00:00.000000000 -0500
58374 +++ linux-2.6.9/drivers/net/qsnet/ep/kalloc.c   2004-12-14 05:19:23.000000000 -0500
58375 @@ -0,0 +1,677 @@
58376 +/*
58377 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
58378 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
58379 + *
58380 + *    For licensing information please see the supplied COPYING file
58381 + *
58382 + */
58383 +
58384 +#ident "@(#)$Id: kalloc.c,v 1.19 2004/12/14 10:19:23 mike Exp $"
58385 +/*      $Source: /cvs/master/quadrics/epmod/kalloc.c,v $ */
58386 +
58387 +#include <qsnet/kernel.h>
58388 +
58389 +#include <elan/kcomm.h>
58390 +
58391 +#include "debug.h"
58392 +
58393 +static void
58394 +HashInPool (EP_ALLOC *alloc, EP_POOL *pool)
58395 +{
58396 +    int idx0 = HASH (pool->Handle.nmh_nmd.nmd_addr);
58397 +    int idx1 = HASH (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len);
58398 +
58399 +    list_add (&pool->HashBase, &alloc->HashBase[idx0]);
58400 +    list_add (&pool->HashTop, &alloc->HashTop[idx1]);
58401 +}
58402 +
58403 +static void
58404 +HashOutPool (EP_ALLOC *alloc, EP_POOL *pool)
58405 +{
58406 +    list_del (&pool->HashBase);
58407 +    list_del (&pool->HashTop);
58408 +}
58409 +
58410 +static EP_POOL *
58411 +LookupPool (EP_ALLOC *alloc, EP_ADDR addr)
58412 +{
58413 +    struct list_head *el;
58414 +    
58415 +    list_for_each (el, &alloc->HashBase[HASH(addr)]) {
58416 +       EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
58417 +       
58418 +       if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len))
58419 +           return (pool);
58420 +    }
58421 +    
58422 +    list_for_each (el, &alloc->HashTop[HASH(addr)]) {
58423 +       EP_POOL *pool = list_entry (el, EP_POOL, HashTop);
58424 +       
58425 +       if (pool->Handle.nmh_nmd.nmd_addr <= addr && addr < (pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len))
58426 +           return (pool);
58427 +    }
58428 +    
58429 +    return (NULL);
58430 +}
58431 +
58432 +static EP_POOL *
58433 +AllocatePool (EP_ALLOC *alloc, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr)
58434 +{
58435 +    EP_ADDR base = 0;
58436 +    EP_POOL *pool;
58437 +    EP_RAIL *rail;
58438 +    int i, railmask = 0;
58439 +    struct list_head *el;
58440 +
58441 +    KMEM_ZALLOC (pool, EP_POOL *, sizeof (EP_POOL), !(attr & EP_NO_SLEEP));
58442 +    
58443 +    if (pool == NULL)
58444 +       return (NULL);
58445 +    
58446 +    if (addr != 0)
58447 +       base = addr;
58448 +    else
58449 +    {
58450 +       for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
58451 +       {
58452 +           KMEM_ZALLOC (pool->Bitmaps[i - LN2_MIN_SIZE], bitmap_t *, BT_BITOUL(1 << (LN2_MAX_SIZE-i)) * sizeof (bitmap_t), !(attr & EP_NO_SLEEP));
58453 +           if (pool->Bitmaps[i - LN2_MIN_SIZE] == NULL)
58454 +               goto failed;
58455 +       }
58456 +    
58457 +       if ((base = ep_rmalloc (alloc->ResourceMap, size, !(attr & EP_NO_SLEEP))) == 0)
58458 +           goto failed;
58459 +    }
58460 +
58461 +    switch (alloc->Type)
58462 +    {
58463 +    case EP_ALLOC_TYPE_PRIVATE_SDRAM:
58464 +       rail = alloc->Data.Private.Rail;
58465 +
58466 +       if ((pool->Buffer.Sdram = rail->Operations.SdramAlloc (rail, base, size)) == 0)
58467 +           goto failed;
58468 +
58469 +       ep_perrail_sdram_map (rail, base, pool->Buffer.Sdram, size, perm, attr);
58470 +
58471 +       pool->Handle.nmh_nmd.nmd_addr = base;
58472 +       pool->Handle.nmh_nmd.nmd_len  = size;
58473 +       break;
58474 +       
58475 +    case EP_ALLOC_TYPE_PRIVATE_MAIN:
58476 +        KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP));
58477 +       if (pool->Buffer.Ptr == 0)
58478 +           goto failed;
58479 +
58480 +       ep_perrail_kaddr_map (alloc->Data.Private.Rail, base, pool->Buffer.Ptr, size, perm, attr);
58481 +
58482 +       pool->Handle.nmh_nmd.nmd_addr = base;
58483 +       pool->Handle.nmh_nmd.nmd_len  = size;
58484 +       break;
58485 +
58486 +    case EP_ALLOC_TYPE_SHARED_MAIN:
58487 +        KMEM_GETPAGES(pool->Buffer.Ptr, unsigned long, btop (size), !(attr & EP_NO_SLEEP));
58488 +       if (pool->Buffer.Ptr == 0)
58489 +           goto failed;
58490 +
58491 +       list_for_each (el, &alloc->Data.Shared.Rails) {
58492 +           EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
58493 +
58494 +           ep_perrail_kaddr_map (rail, base, pool->Buffer.Ptr, size, perm, attr);
58495 +
58496 +           railmask |= (1 << rail->Number);
58497 +       }
58498 +       pool->Handle.nmh_nmd.nmd_addr = base;
58499 +       pool->Handle.nmh_nmd.nmd_len  = size;
58500 +       pool->Handle.nmh_nmd.nmd_attr = EP_NMD_ATTR (alloc->Data.Shared.System->Position.pos_nodeid, railmask);
58501 +
58502 +       ep_nmh_insert (&alloc->Data.Shared.System->MappingTable, &pool->Handle);
58503 +       break;
58504 +
58505 +    default:
58506 +       goto failed;
58507 +    }
58508 +
58509 +    return (pool);
58510 +    
58511 + failed:
58512 +    if (addr == 0 && base)
58513 +       ep_rmfree (alloc->ResourceMap, size, base);
58514 +
58515 +    for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
58516 +       if (pool->Bitmaps[i - LN2_MIN_SIZE] != NULL)
58517 +           KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t));
58518 +    
58519 +    KMEM_FREE (pool, sizeof (EP_POOL));
58520 +    return (NULL);
58521 +}
58522 +
58523 +static void
58524 +FreePool (EP_ALLOC *alloc, EP_POOL *pool)
58525 +{
58526 +    struct list_head *el;
58527 +    int i;
58528 +
58529 +    switch (alloc->Type)
58530 +    {
58531 +    case EP_ALLOC_TYPE_PRIVATE_SDRAM:
58532 +       ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
58533 +
58534 +       alloc->Data.Private.Rail->Operations.SdramFree (alloc->Data.Private.Rail, pool->Buffer.Sdram, pool->Handle.nmh_nmd.nmd_len);
58535 +       break;
58536 +       
58537 +    case EP_ALLOC_TYPE_PRIVATE_MAIN:
58538 +       ep_perrail_unmap (alloc->Data.Private.Rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
58539 +
58540 +       KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len));
58541 +       break;
58542 +
58543 +    case EP_ALLOC_TYPE_SHARED_MAIN:
58544 +       ep_nmh_remove (&alloc->Data.Shared.System->MappingTable, &pool->Handle);
58545 +
58546 +       list_for_each (el, &alloc->Data.Shared.Rails) {
58547 +           EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
58548 +
58549 +           ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
58550 +       }
58551 +
58552 +       KMEM_FREEPAGES (pool->Buffer.Ptr, btop (pool->Handle.nmh_nmd.nmd_len));
58553 +       break;
58554 +    }
58555 +    
58556 +    if (pool->Bitmaps[0])
58557 +    {
58558 +       ep_rmfree (alloc->ResourceMap, pool->Handle.nmh_nmd.nmd_len, pool->Handle.nmh_nmd.nmd_addr);
58559 +    
58560 +       for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i ++)
58561 +           KMEM_FREE (pool->Bitmaps[i - LN2_MIN_SIZE], BT_BITOUL(1 << (LN2_MAX_SIZE - i)) * sizeof (bitmap_t));
58562 +    }
58563 +    
58564 +    KMEM_FREE (pool, sizeof (EP_POOL));
58565 +}
58566 +
58567 +static int
58568 +AddRail (EP_ALLOC *alloc, EP_RAIL *rail)
58569 +{
58570 +    struct list_head *el;
58571 +    EP_RAIL_ENTRY *l;
58572 +    unsigned long flags;
58573 +    int i;
58574 +
58575 +    ASSERT (alloc->Type == EP_ALLOC_TYPE_SHARED_MAIN);
58576 +
58577 +    KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1);
58578 +
58579 +    if (l == NULL)
58580 +       return (ENOMEM);
58581 +
58582 +    l->Rail = rail;
58583 +
58584 +    spin_lock_irqsave (&alloc->Lock, flags);
58585 +    for (i = 0; i < NHASH; i++)
58586 +    {
58587 +       list_for_each (el, &alloc->HashBase[i]) {
58588 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
58589 +
58590 +           ep_perrail_kaddr_map (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Buffer.Ptr, 
58591 +                                 pool->Handle.nmh_nmd.nmd_len, EP_PERM_WRITE, EP_NO_SLEEP);
58592 +
58593 +           pool->Handle.nmh_nmd.nmd_attr |= EP_NMD_ATTR (0, 1 << rail->Number);
58594 +       }
58595 +    }
58596 +
58597 +    list_add (&l->Link, &alloc->Data.Shared.Rails);
58598 +
58599 +    spin_unlock_irqrestore (&alloc->Lock, flags); 
58600 +    return (0);
58601 +}
58602 +
58603 +static void
58604 +RemoveRail (EP_ALLOC *alloc, EP_RAIL *rail)
58605 +{
58606 +    struct list_head *el;
58607 +    unsigned long flags;
58608 +    int i;
58609 +
58610 +    spin_lock_irqsave (&alloc->Lock, flags);
58611 +    for (i = 0; i < NHASH; i++)
58612 +    {
58613 +       list_for_each (el, &alloc->HashBase[i]) {
58614 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
58615 +
58616 +           ep_perrail_unmap (rail, pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
58617 +
58618 +           pool->Handle.nmh_nmd.nmd_attr &= ~EP_NMD_ATTR (0, 1 << rail->Number);
58619 +       }
58620 +    }
58621 +
58622 +    list_for_each (el, &alloc->Data.Shared.Rails) {
58623 +       EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link);
58624 +       if (tmp->Rail == rail)
58625 +       {
58626 +           list_del (el);
58627 +           KMEM_FREE(tmp, sizeof (EP_RAIL_ENTRY));
58628 +           break;
58629 +       }
58630 +    }
58631 +
58632 +    spin_unlock_irqrestore (&alloc->Lock, flags);
58633 +}
58634 +
58635 +static EP_POOL *
58636 +AllocateBlock (EP_ALLOC *alloc, unsigned size, EP_ATTRIBUTE attr, int *offset)
58637 +{
58638 +    int block, j, k;
58639 +    unsigned long flags;
58640 +    EP_POOL *pool;
58641 +
58642 +
58643 +    if (size > MAX_SIZE)
58644 +    {
58645 +       if ((attr & EP_NO_ALLOC) || (pool  = AllocatePool (alloc, 0, size, alloc->Perm, attr)) == NULL)
58646 +           return (NULL);
58647 +
58648 +       spin_lock_irqsave (&alloc->Lock, flags);
58649 +       HashInPool (alloc, pool);
58650 +       spin_unlock_irqrestore (&alloc->Lock, flags);
58651 +
58652 +       *offset = 0;
58653 +
58654 +       return pool;
58655 +    }
58656 +
58657 +    spin_lock_irqsave (&alloc->Lock, flags);
58658 +
58659 +    /* Round up size to next power of 2 */
58660 +    for (k = LN2_MIN_SIZE; (1 << k) < size; k++)
58661 +       ;
58662 +    
58663 +    /* k now has ln2 of the size to allocate. */
58664 +    /* find the free list with the smallest block we can use*/
58665 +    for (j = k; j <= LN2_MAX_SIZE && list_empty (&alloc->Freelists[j - LN2_MIN_SIZE]); j++)
58666 +       ;
58667 +    
58668 +    /* j has ln2 of the smallest size block we can use */
58669 +    if (j < LN2_MAX_SIZE)
58670 +    {
58671 +       int nbits = 1 << (LN2_MAX_SIZE-j);
58672 +       
58673 +       pool  = list_entry (alloc->Freelists[j - LN2_MIN_SIZE].next, EP_POOL, Link[j - LN2_MIN_SIZE]);
58674 +       block = (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) << j);
58675 +       
58676 +       BT_CLEAR (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j);
58677 +       
58678 +       if (bt_lowbit (pool->Bitmaps[j - LN2_MIN_SIZE], nbits) == -1)
58679 +           list_del (&pool->Link[j - LN2_MIN_SIZE]);
58680 +    }
58681 +    else
58682 +    {
58683 +       spin_unlock_irqrestore (&alloc->Lock, flags);
58684 +       
58685 +       if ((attr & EP_NO_ALLOC) || (pool  = AllocatePool (alloc, 0, MAX_SIZE, alloc->Perm, attr)) == NULL)
58686 +           return (NULL);
58687 +
58688 +       block = 0;
58689 +       j = LN2_MAX_SIZE;
58690 +       
58691 +       spin_lock_irqsave (&alloc->Lock, flags);
58692 +       
58693 +       HashInPool (alloc, pool);
58694 +    }
58695 +    
58696 +    /* Split it until the buddies are the correct size, putting one
58697 +     * buddy back on the free list and continuing to split the other */
58698 +    while (--j >= k)
58699 +    {
58700 +       list_add (&pool->Link[j - LN2_MIN_SIZE], &alloc->Freelists[j - LN2_MIN_SIZE]);
58701 +       
58702 +       BT_SET (pool->Bitmaps[j - LN2_MIN_SIZE], block >> j);
58703 +       
58704 +       block += (1 << j);
58705 +    }
58706 +    spin_unlock_irqrestore (&alloc->Lock, flags);
58707 +
58708 +    *offset = block;
58709 +
58710 +    return (pool);
58711 +}
58712 +
58713 +static void
58714 +FreeBlock (EP_ALLOC *alloc, EP_ADDR addr, unsigned size)
58715 +{
58716 +    EP_POOL *pool;
58717 +    int  k, block = 0;
58718 +    unsigned long flags;
58719 +    
58720 +    spin_lock_irqsave (&alloc->Lock, flags);
58721 +    /* Round up size to next power of 2 */
58722 +    for (k = LN2_MIN_SIZE; (1 << k) < size; k++)
58723 +       ;
58724 +
58725 +    /* Find the pool containing this block */
58726 +    pool = LookupPool (alloc, addr);
58727 +
58728 +    /* It must exist */
58729 +    ASSERT (pool != NULL);
58730 +
58731 +    /* If we're freeing a subset of it, then update the bitmaps */
58732 +    if (size <= MAX_SIZE)
58733 +    {
58734 +       ASSERT (BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (addr - pool->Handle.nmh_nmd.nmd_addr) >> k) == 0);
58735 +       
58736 +       block = addr - pool->Handle.nmh_nmd.nmd_addr;
58737 +       
58738 +       while (k < LN2_MAX_SIZE && BT_TEST (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1))
58739 +       {
58740 +           BT_CLEAR (pool->Bitmaps[k - LN2_MIN_SIZE], (block >> k) ^ 1);
58741 +           
58742 +           if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1)
58743 +               list_del (&pool->Link[k - LN2_MIN_SIZE]);
58744 +           
58745 +           k++;
58746 +       }
58747 +    }
58748 +
58749 +    if (k >= LN2_MAX_SIZE)
58750 +    {
58751 +       HashOutPool (alloc, pool);
58752 +       spin_unlock_irqrestore (&alloc->Lock, flags);
58753 +
58754 +       FreePool (alloc, pool);
58755 +    }
58756 +    else
58757 +    {
58758 +       if (bt_lowbit (pool->Bitmaps[k - LN2_MIN_SIZE], (1 << (LN2_MAX_SIZE - k))) == -1)
58759 +           list_add (&pool->Link[k - LN2_MIN_SIZE], &alloc->Freelists[k - LN2_MIN_SIZE]);
58760 +
58761 +       BT_SET (pool->Bitmaps[k - LN2_MIN_SIZE], block >> k);
58762 +
58763 +       spin_unlock_irqrestore (&alloc->Lock, flags);
58764 +    }
58765 +}
58766 +
58767 +static void
58768 +InitialiseAllocator (EP_ALLOC *alloc, EP_ALLOC_TYPE type, unsigned int perm, EP_RMAP *rmap)
58769 +{
58770 +    int i;
58771 +
58772 +    spin_lock_init (&alloc->Lock);
58773 +
58774 +    alloc->Type        = type;
58775 +    alloc->ResourceMap = rmap;
58776 +    alloc->Perm        = perm;
58777 +
58778 +    for (i = 0; i < NHASH; i++)
58779 +    {
58780 +       (&alloc->HashBase[i])->next = &alloc->HashBase[i];
58781 +
58782 +       INIT_LIST_HEAD (&alloc->HashBase[i]);
58783 +       INIT_LIST_HEAD (&alloc->HashTop[i]);
58784 +    }
58785 +    
58786 +    for (i = 0; i < NUM_FREELISTS; i++)
58787 +       INIT_LIST_HEAD (&alloc->Freelists[i]);
58788 +}
58789 +
58790 +static void
58791 +DestroyAllocator (EP_ALLOC *alloc)
58792 +{
58793 +    struct list_head *el, *next;
58794 +    int i;
58795 +
58796 +    for (i = 0; i < NHASH; i++)
58797 +    {
58798 +       list_for_each_safe (el, next, &alloc->HashBase[i]) { 
58799 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
58800 +
58801 +           printk ("!!DestroyAllocator: pool=%p type=%d addr=%x len=%x\n", pool, alloc->Type,
58802 +                   pool->Handle.nmh_nmd.nmd_addr, pool->Handle.nmh_nmd.nmd_len);
58803 +
58804 +           list_del (&pool->HashBase);
58805 +           list_del (&pool->HashTop);
58806 +
58807 +           // XXXX: FreePool (alloc, pool);
58808 +       }
58809 +    }
58810 +
58811 +    spin_lock_destroy (&alloc->Lock);
58812 +}
58813 +
58814 +void
58815 +ep_display_alloc (EP_ALLOC *alloc)
58816 +{
58817 +    struct list_head *el;
58818 +    int i;
58819 +    int npools = 0;
58820 +    int nbytes = 0;
58821 +    int nfree = 0;
58822 +    unsigned long flags;
58823 +
58824 +    spin_lock_irqsave (&alloc->Lock, flags);
58825 +
58826 +    ep_debugf (DBG_DEBUG, "Kernel comms memory allocator %p type %d\n", alloc, alloc->Type);
58827 +    for (i = 0; i < NHASH; i++)
58828 +    {
58829 +       list_for_each (el, &alloc->HashBase[i]) {
58830 +           EP_POOL *pool = list_entry (el, EP_POOL, HashBase);
58831 +
58832 +           ep_debugf (DBG_DEBUG, "  POOL %4x: %p -> %x.%x\n", i, pool, pool->Handle.nmh_nmd.nmd_addr,
58833 +                      pool->Handle.nmh_nmd.nmd_addr + pool->Handle.nmh_nmd.nmd_len);
58834 +
58835 +           npools++;
58836 +           nbytes += pool->Handle.nmh_nmd.nmd_len;
58837 +       }
58838 +    }
58839 +    
58840 +    for (i = LN2_MIN_SIZE; i <= LN2_MAX_SIZE; i++)
58841 +    {
58842 +       int n = 0;
58843 +
58844 +       list_for_each (el, &alloc->Freelists[i - LN2_MIN_SIZE]) {
58845 +           EP_POOL *pool  = list_entry (el, EP_POOL, Link[i - LN2_MIN_SIZE]);
58846 +           int      nbits = bt_nbits (pool->Bitmaps[i - LN2_MIN_SIZE], 1 << (LN2_MAX_SIZE - i));
58847 +
58848 +           n += nbits;
58849 +           nfree += (nbits << i);
58850 +       }
58851 +       
58852 +       if (n != 0)
58853 +           ep_debugf (DBG_DEBUG, "  SIZE %5d : num %d\n", (1 << i), n);
58854 +    }
58855 +    ep_debugf (DBG_DEBUG, "%d pools with %d bytes and %d bytes free\n", npools, nbytes, nfree);
58856 +
58857 +    spin_unlock_irqrestore (&alloc->Lock, flags);
58858 +}
58859 +
58860 +/* per-rail allocators */
58861 +void
58862 +ep_alloc_init (EP_RAIL *rail)
58863 +{
58864 +    EP_RMAP *rmap = ep_rmallocmap (EP_PRIVATE_RMAP_SIZE, "PrivateMap", 1);
58865 +
58866 +    ep_rmfree (rmap, EP_PRIVATE_TOP-EP_PRIVATE_BASE, EP_PRIVATE_BASE);
58867 +
58868 +    InitialiseAllocator (&rail->ElanAllocator, EP_ALLOC_TYPE_PRIVATE_SDRAM, EP_PERM_ALL, rmap);
58869 +    InitialiseAllocator (&rail->MainAllocator, EP_ALLOC_TYPE_PRIVATE_MAIN, EP_PERM_WRITE, rmap);
58870 +
58871 +    rail->ElanAllocator.Data.Private.Rail = rail;
58872 +    rail->MainAllocator.Data.Private.Rail = rail;
58873 +}
58874 +
58875 +void
58876 +ep_alloc_fini (EP_RAIL *rail)
58877 +{
58878 +    EP_RMAP *rmap = rail->ElanAllocator.ResourceMap;
58879 +
58880 +    DestroyAllocator (&rail->ElanAllocator);
58881 +    DestroyAllocator (&rail->MainAllocator);
58882 +    
58883 +    ep_rmfreemap (rmap);
58884 +}
58885 +
58886 +sdramaddr_t
58887 +ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr)
58888 +{
58889 +    EP_POOL *pool = AllocatePool (&rail->ElanAllocator, addr, size, perm, attr);
58890 +    unsigned long flags;
58891 +
58892 +    if (pool == NULL)
58893 +       return (0);
58894 +
58895 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
58896 +    HashInPool (&rail->ElanAllocator, pool);
58897 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
58898 +
58899 +    return (pool->Buffer.Sdram);
58900 +}
58901 +
58902 +void
58903 +ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr)
58904 +{
58905 +    EP_POOL *pool;
58906 +    unsigned long flags;
58907 +
58908 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
58909 +    pool = LookupPool (&rail->ElanAllocator, addr);
58910 +    
58911 +    HashOutPool (&rail->ElanAllocator, pool);
58912 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
58913 +    
58914 +    FreePool (&rail->ElanAllocator, pool);
58915 +}
58916 +
58917 +sdramaddr_t
58918 +ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp)
58919 +{
58920 +    int             offset;
58921 +    EP_POOL *pool;
58922 +
58923 +    if ((pool = AllocateBlock (&rail->ElanAllocator, size, attr, &offset)) == NULL)
58924 +       return (0);
58925 +    
58926 +    *addrp  = pool->Handle.nmh_nmd.nmd_addr + offset;
58927 +
58928 +    return (pool->Buffer.Sdram + offset);
58929 +}
58930 +
58931 +void
58932 +ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size)
58933 +{
58934 +    FreeBlock (&rail->ElanAllocator, addr, size);
58935 +}
58936 +
58937 +void *
58938 +ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp)
58939 +{
58940 +    int             offset;
58941 +    EP_POOL *pool;
58942 +
58943 +    if ((pool = AllocateBlock (&rail->MainAllocator, size, attr, &offset)) == NULL)
58944 +       return (NULL);
58945 +    
58946 +    *addrp  = pool->Handle.nmh_nmd.nmd_addr + offset;
58947 +
58948 +    return ((void *) ((unsigned long) pool->Buffer.Ptr + offset));
58949 +}
58950 +
58951 +void
58952 +ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size)
58953 +{
58954 +    FreeBlock (&rail->MainAllocator, addr, size);
58955 +}
58956 +
58957 +sdramaddr_t
58958 +ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr)
58959 +{
58960 +    EP_POOL    *pool;
58961 +    sdramaddr_t res;
58962 +    unsigned long flags;
58963 +
58964 +    spin_lock_irqsave (&rail->ElanAllocator.Lock, flags);
58965 +    if ((pool = LookupPool (&rail->ElanAllocator, addr)) == NULL)
58966 +       res = 0;
58967 +    else
58968 +       res = pool->Buffer.Sdram + (addr - pool->Handle.nmh_nmd.nmd_addr);
58969 +    spin_unlock_irqrestore (&rail->ElanAllocator.Lock, flags);
58970 +
58971 +    return (res);
58972 +}
58973 +
58974 +void *
58975 +ep_elan2main (EP_RAIL *rail, EP_ADDR addr)
58976 +{
58977 +    EP_POOL *pool;
58978 +    void *res;
58979 +    unsigned long flags;
58980 +
58981 +    spin_lock_irqsave (&rail->MainAllocator.Lock, flags);
58982 +    if ((pool = LookupPool (&rail->MainAllocator, addr)) == NULL)
58983 +       res = NULL;
58984 +    else
58985 +       res = (void *) ((unsigned long) pool->Buffer.Ptr + (addr - pool->Handle.nmh_nmd.nmd_addr));
58986 +    spin_unlock_irqrestore (&rail->MainAllocator.Lock, flags);
58987 +
58988 +    return (res);
58989 +}
58990 +
58991 +/* shared allocators */
58992 +int
58993 +ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail)
58994 +{
58995 +    return (AddRail (&sys->Allocator, rail));
58996 +}
58997 +
58998 +void
58999 +ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail)
59000 +{
59001 +    RemoveRail (&sys->Allocator, rail);
59002 +}
59003 +
59004 +void
59005 +ep_shared_alloc_init (EP_SYS *sys)
59006 +{
59007 +    EP_RMAP *rmap = ep_rmallocmap (EP_SHARED_RMAP_SIZE, "shared_alloc_map", 1);
59008 +
59009 +    ep_rmfree (rmap, EP_SHARED_TOP - EP_SHARED_BASE, EP_SHARED_BASE);
59010 +
59011 +    InitialiseAllocator (&sys->Allocator, EP_ALLOC_TYPE_SHARED_MAIN, EP_PERM_WRITE, rmap);
59012 +
59013 +    INIT_LIST_HEAD (&sys->Allocator.Data.Shared.Rails);
59014 +
59015 +    sys->Allocator.Data.Shared.System = sys;
59016 +}
59017 +
59018 +void
59019 +ep_shared_alloc_fini (EP_SYS *sys)
59020 +{
59021 +    EP_RMAP *rmap = sys->Allocator.ResourceMap;
59022 +
59023 +    DestroyAllocator (&sys->Allocator);
59024 +
59025 +    ep_rmfreemap (rmap);
59026 +}
59027 +
59028 +void *
59029 +ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd)
59030 +{
59031 +    int offset;
59032 +    EP_POOL *pool;
59033 +
59034 +    if ((pool = AllocateBlock (&sys->Allocator, size, attr, &offset)) == NULL)
59035 +       return (NULL);
59036 +
59037 +    ep_nmd_subset (nmd, &pool->Handle.nmh_nmd, offset, size);
59038 +
59039 +    return ((void *) ((unsigned long) pool->Buffer.Ptr + offset));
59040 +}
59041 +
59042 +void
59043 +ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd)
59044 +{
59045 +    FreeBlock (&sys->Allocator, nmd->nmd_addr, nmd->nmd_len);
59046 +}
59047 +
59048 +/*
59049 + * Local variables:
59050 + * c-file-style: "stroustrup"
59051 + * End:
59052 + */
59053 diff -urN clean/drivers/net/qsnet/ep/kcomm.c linux-2.6.9/drivers/net/qsnet/ep/kcomm.c
59054 --- clean/drivers/net/qsnet/ep/kcomm.c  1969-12-31 19:00:00.000000000 -0500
59055 +++ linux-2.6.9/drivers/net/qsnet/ep/kcomm.c    2005-07-20 08:01:34.000000000 -0400
59056 @@ -0,0 +1,1447 @@
59057 +/*
59058 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
59059 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
59060 + *
59061 + *    For licensing information please see the supplied COPYING file
59062 + *
59063 + */
59064 +
59065 +#ident "@(#)$Id: kcomm.c,v 1.61.2.2 2005/07/20 12:01:34 mike Exp $"
59066 +/*      $Source: /cvs/master/quadrics/epmod/kcomm.c,v $ */
59067 +
59068 +#include <qsnet/kernel.h>
59069 +#include <qsnet/kthread.h>
59070 +
59071 +#include <elan/kcomm.h>
59072 +#include <elan/epsvc.h>
59073 +#include <elan/epcomms.h>
59074 +
59075 +#include "cm.h"
59076 +#include "debug.h"
59077 +
59078 +int MaxSwitchLevels = 5;                               /* Max 1024 sized machine */
59079 +
59080 +static char *NodeStateNames[EP_NODE_NUM_STATES] = 
59081 +{
59082 +    "Disconnected",
59083 +    "Connecting",
59084 +    "Connnected",
59085 +    "LeavingConnected",
59086 +    "LocalPassivate",
59087 +    "RemotePassivate",
59088 +    "Passivated",
59089 +    "Disconnecting",
59090 +};
59091 +
59092 +static void
59093 +ep_xid_cache_fill (EP_SYS *sys, EP_XID_CACHE *cache)
59094 +{
59095 +    unsigned long flags;
59096 +
59097 +    spin_lock_irqsave (&sys->XidLock, flags);
59098 +
59099 +    cache->Current = sys->XidNext;
59100 +    cache->Last    = cache->Current + EP_XID_CACHE_CHUNKS-1;
59101 +
59102 +    sys->XidNext += EP_XID_CACHE_CHUNKS;
59103 +
59104 +    spin_unlock_irqrestore (&sys->XidLock, flags);
59105 +}
59106 +
59107 +EP_XID
59108 +ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache)
59109 +{
59110 +    EP_XID xid;
59111 +    
59112 +    if (cache->Current == cache->Last)
59113 +       ep_xid_cache_fill (sys, cache);
59114 +
59115 +    xid.Generation = sys->XidGeneration;
59116 +    xid.Handle     = cache->Handle;
59117 +    xid.Unique     = cache->Current++;
59118 +
59119 +    return (xid);
59120 +}
59121 +
59122 +void
59123 +ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache)
59124 +{
59125 +    /* Stall manager thread - it doesn't lock the XidCacheList */
59126 +    ep_kthread_stall (&sys->ManagerThread);
59127 +
59128 +    cache->Handle = ++sys->XidHandle;
59129 +
59130 +    list_add_tail (&cache->Link, &sys->XidCacheList);
59131 +
59132 +    ep_kthread_resume (&sys->ManagerThread);
59133 +}
59134 +
59135 +void
59136 +ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache)
59137 +{
59138 +    /* Stall manager thread - it doesn't lock the XidCacheList */
59139 +    ep_kthread_stall (&sys->ManagerThread);
59140 +
59141 +    list_del (&cache->Link);
59142 +
59143 +    ep_kthread_resume (&sys->ManagerThread);
59144 +}
59145 +
59146 +EP_XID_CACHE *
59147 +ep_xid_cache_find (EP_SYS *sys, EP_XID xid)
59148 +{
59149 +    struct list_head *el;
59150 +
59151 +    list_for_each (el, &sys->XidCacheList) {
59152 +       EP_XID_CACHE *cache = list_entry (el, EP_XID_CACHE, Link);
59153 +
59154 +       if (sys->XidGeneration == xid.Generation && cache->Handle == xid.Handle)
59155 +           return (cache);
59156 +    }
59157 +
59158 +    return (NULL);
59159 +}
59160 +
59161 +static int
59162 +MsgBusy (EP_RAIL *rail, EP_OUTPUTQ *outputq, int slotNum)
59163 +{
59164 +    switch (rail->Operations.OutputQState (rail, outputq, slotNum))
59165 +    {
59166 +    case EP_OUTPUTQ_BUSY:                      /* still busy */
59167 +       return 1;
59168 +       
59169 +    case EP_OUTPUTQ_FAILED:                    /* NACKed */
59170 +    {
59171 +#if defined(DEBUG_PRINTF)
59172 +       EP_MANAGER_MSG *msg = rail->Operations.OutputQMsg (rail, outputq, slotNum);
59173 +
59174 +       EPRINTF4 (DBG_MANAGER, "%s: kcomm msg %d type %d to %d failed\n", rail->Name, slotNum, msg->Hdr.Type, msg->Hdr.DestId);
59175 +#endif
59176 +       break;
59177 +    }
59178 +    
59179 +    case EP_OUTPUTQ_FINISHED:                  /* anything else is finished */
59180 +       break;
59181 +    }
59182 +
59183 +    return 0;
59184 +}
59185 +
59186 +int
59187 +ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body)
59188 +{
59189 +    EP_SYS         *sys  = rail->System;
59190 +    EP_NODE        *node = &sys->Nodes[nodeId];
59191 +    int             n    = EP_MANAGER_OUTPUTQ_SLOTS;
59192 +    int             slotNum;
59193 +    int             rnum;
59194 +    EP_RAIL        *msgRail;
59195 +    EP_MANAGER_MSG *msg;
59196 +    unsigned long   flags;
59197 +
59198 +    ASSERT (! EP_XID_INVALID (xid));
59199 +
59200 +    if ((rnum = ep_pickRail (node->ConnectedRails)) >= 0)
59201 +       msgRail = sys->Rails[rnum];
59202 +    else
59203 +    {
59204 +       if (EP_MANAGER_MSG_TYPE_CONNECTED(type))
59205 +       {
59206 +           ep_debugf (DBG_MANAGER, "%s: no rails available, trying to send type %d to %d\n", rail->Name, type, nodeId);
59207 +           return -EHOSTDOWN;
59208 +       }
59209 +
59210 +       ep_debugf (DBG_MANAGER, "%s: no rails connected to %d - using receiving rail\n", rail->Name, nodeId);
59211 +
59212 +       msgRail = rail;
59213 +    }
59214 +    
59215 +
59216 +    spin_lock_irqsave (&msgRail->ManagerOutputQLock, flags);
59217 +
59218 +    slotNum = msgRail->ManagerOutputQNextSlot;
59219 +
59220 +    while (n-- > 0 && MsgBusy (msgRail, msgRail->ManagerOutputQ, slotNum))             /* search for idle message buffer */
59221 +    {
59222 +       if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS)
59223 +           msgRail->ManagerOutputQNextSlot = 0;
59224 +      
59225 +       slotNum = msgRail->ManagerOutputQNextSlot;
59226 +    }
59227 +
59228 +    if (n == 0)                                                        /* all message buffers busy */
59229 +    {
59230 +       spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags);
59231 +
59232 +       ep_debugf (DBG_MANAGER, "%s: all message buffers busy: trying to send type %d to %d\n", msgRail->Name, type, nodeId);
59233 +       return -EBUSY;
59234 +    }
59235 +
59236 +    msg = msgRail->Operations.OutputQMsg (msgRail, msgRail->ManagerOutputQ, slotNum);
59237 +    
59238 +    EPRINTF7 (DBG_MANAGER, "%s: ep_send_message: type=%d nodeId=%d rail=%d xid=%08x.%08x.%016llx\n", 
59239 +             msgRail->Name, type, nodeId, rail->Number, xid.Generation, xid.Handle, (long long) xid.Unique);
59240 +
59241 +    msg->Hdr.Version    = EP_MANAGER_MSG_VERSION;
59242 +    msg->Hdr.Type       = type;
59243 +    msg->Hdr.Rail       = rail->Number;
59244 +    msg->Hdr.NodeId     = msgRail->Position.pos_nodeid;
59245 +    msg->Hdr.DestId     = nodeId;
59246 +    msg->Hdr.Xid        = xid;
59247 +    msg->Hdr.Checksum   = 0;
59248 +
59249 +    if (body) bcopy (body, &msg->Body, sizeof (EP_MANAGER_MSG_BODY));
59250 +
59251 +    msg->Hdr.Checksum = CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE);
59252 +
59253 +    if (msgRail->Operations.OutputQSend (msgRail, msgRail->ManagerOutputQ, slotNum, EP_MANAGER_MSG_SIZE,
59254 +                                     nodeId, EP_SYSTEMQ_MANAGER, EP_MANAGER_OUTPUTQ_RETRIES) < 0)
59255 +       IncrStat (msgRail, SendMessageFailed);
59256 +    
59257 +    if (++(msgRail->ManagerOutputQNextSlot) == EP_MANAGER_OUTPUTQ_SLOTS) /* check this one last next time */
59258 +       msgRail->ManagerOutputQNextSlot = 0;
59259 +
59260 +    spin_unlock_irqrestore (&msgRail->ManagerOutputQLock, flags);
59261 +
59262 +    return 0;
59263 +}
59264 +
59265 +void
59266 +ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason)
59267 +{
59268 +    EP_NODE            *node = &sys->Nodes[nodeId];
59269 +    EP_MANAGER_MSG_BODY body;
59270 +    EP_XID              xid;
59271 +    kcondvar_t          sleep;
59272 +    int                 rnum;
59273 +    unsigned long       flags;
59274 +
59275 +    if (nodeId > sys->Position.pos_nodes)
59276 +       return;
59277 +
59278 +    strncpy (body.PanicReason, reason, sizeof (body.PanicReason));
59279 +
59280 +    kcondvar_init (&sleep);
59281 +    spin_lock_irqsave (&sys->NodeLock, flags);
59282 +    for (;;)
59283 +    {
59284 +       if (node->ConnectedRails == 0)
59285 +           break;
59286 +
59287 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
59288 +           if (node->ConnectedRails & (1 << rnum))
59289 +               break;
59290 +
59291 +       xid = ep_xid_cache_alloc(sys, &sys->Rails[rnum]->XidCache);
59292 +       
59293 +       if (ep_send_message (sys->Rails[rnum], nodeId, EP_MANAGER_MSG_TYPE_REMOTE_PANIC, xid, &body) == 0)
59294 +           break;
59295 +
59296 +       if (kcondvar_timedwaitsig (&sleep, &sys->NodeLock, &flags, lbolt + hz) == CV_RET_SIGPENDING)
59297 +           break;
59298 +    }
59299 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
59300 +    kcondvar_destroy (&sleep);
59301 +}
59302 +
59303 +static void
59304 +ProcessNeterrRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
59305 +{
59306 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr request - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, (long long)msg->Body.Cookies[0], (long long)msg->Body.Cookies[1]);
59307 +
59308 +    rail->Operations.NeterrFixup (rail, msg->Hdr.NodeId, msg->Body.Cookies);
59309 +    
59310 +    ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_NETERR_RESPONSE, msg->Hdr.Xid, &msg->Body);
59311 +}
59312 +
59313 +
59314 +static void
59315 +ProcessNeterrResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
59316 +{
59317 +    EP_SYS       *sys      = rail->System;
59318 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
59319 +    unsigned long flags;
59320 +
59321 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: process neterr response - node %d cookies %llx %llx\n", rail->Name, msg->Hdr.NodeId, (long long)msg->Body.Cookies[0], (long long)msg->Body.Cookies[1]);
59322 +
59323 +    spin_lock_irqsave (&sys->NodeLock, flags);
59324 +    if (EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid))
59325 +    {
59326 +       EP_INVALIDATE_XID (nodeRail->MsgXid);
59327 +
59328 +       if (nodeRail->NetworkErrorCookies[0] != 0 && nodeRail->NetworkErrorCookies[0] == msg->Body.Cookies[0])
59329 +           nodeRail->NetworkErrorCookies[0] = 0;
59330 +
59331 +       if (nodeRail->NetworkErrorCookies[1] != 0 && nodeRail->NetworkErrorCookies[1] == msg->Body.Cookies[1])
59332 +           nodeRail->NetworkErrorCookies[1] = 0;
59333 +       
59334 +       if (nodeRail->NetworkErrorCookies[0] == 0 && nodeRail->NetworkErrorCookies[1] == 0)
59335 +           nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_ATOMIC_PACKET;
59336 +    }
59337 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
59338 +}
59339 +
59340 +
59341 +static void
59342 +ProcessGetNodeState (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
59343 +{
59344 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
59345 +    unsigned int service = msg->Body.Service;
59346 +
59347 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessGetNodeState: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
59348 +             NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : "");
59349 +
59350 +    msg->Body.NodeState.State             = nodeRail->State;
59351 +    msg->Body.NodeState.NetworkErrorState = nodeRail->NetworkErrorState;
59352 +    msg->Body.NodeState.Railmask          = ep_rcvr_railmask (rail->System, service);
59353 +
59354 +    if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0)
59355 +       printk ("%s: get node state for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
59356 +}
59357 +
59358 +static void
59359 +ProcessFlushRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
59360 +{
59361 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[msg->Hdr.NodeId];
59362 +
59363 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushRequest: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
59364 +             NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState ? " (NetworkError)" : "");
59365 +
59366 +    switch (nodeRail->State)
59367 +    {
59368 +    case EP_NODE_REMOTE_PASSIVATE:
59369 +       nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;     /* retransmit our flush request quickly */
59370 +       EPRINTF3 (DBG_MANAGER, "%s: ProcessFlushRequest: NextRunTime -> %lx (%lx)\n", rail->Name, nodeRail->NextRunTime, lbolt);
59371 +       /* DROPTHROUGH */
59372 +
59373 +    case EP_NODE_PASSIVATED:
59374 +    case EP_NODE_DISCONNECTED:
59375 +       if (nodeRail->NetworkErrorState != 0)
59376 +           break;
59377 +
59378 +       if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE, msg->Hdr.Xid, NULL) < 0)
59379 +           printk ("%s: flush request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
59380 +       break;
59381 +       
59382 +    default:
59383 +       EPRINTF4 (DBG_MANAGER, "%s: flush request for %s[%d] - node not in approriate state - %s\n", msgRail->Name, rail->Name, msg->Hdr.NodeId, NodeStateNames[nodeRail->State]);
59384 +       break;
59385 +    }
59386 +}
59387 +
59388 +static void
59389 +ProcessFlushResponse (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
59390 +{
59391 +    EP_NODE_RAIL *nodeRail= &rail->Nodes[msg->Hdr.NodeId];
59392 +
59393 +    EPRINTF5 (DBG_MANAGER, "%s: ProcessFlushResponse: %s - %d %s%s\n",  msgRail->Name, rail->Name, msg->Hdr.NodeId,
59394 +             NodeStateNames[nodeRail->State], EP_XIDS_MATCH (nodeRail->MsgXid, msg->Hdr.Xid) ? " (XIDS match)" : "");
59395 +
59396 +    if (nodeRail->State == EP_NODE_REMOTE_PASSIVATE && EP_XIDS_MATCH(nodeRail->MsgXid, msg->Hdr.Xid))
59397 +    {
59398 +       EP_INVALIDATE_XID (nodeRail->MsgXid);
59399 +
59400 +       printk ("%s: flush response from %d - move to passivated list\n", rail->Name, msg->Hdr.NodeId);
59401 +       list_del (&nodeRail->Link);
59402 +
59403 +       /* Node is now passivated - attempt to failover  messages */
59404 +       list_add_tail (&nodeRail->Link, &rail->PassivatedList);
59405 +       nodeRail->State = EP_NODE_PASSIVATED;
59406 +    }
59407 +    else
59408 +    {
59409 +       printk ("%s: flush response from %d - not passivating (%s) or XIDs mismatch (%llx %llx)\n", rail->Name, 
59410 +               msg->Hdr.NodeId, NodeStateNames[nodeRail->State], (long long) nodeRail->MsgXid.Unique, (long long) msg->Hdr.Xid.Unique);
59411 +    }
59412 +}
59413 +
59414 +static void
59415 +ProcessMapNmdRequest (EP_RAIL *msgRail, EP_RAIL *rail, EP_MANAGER_MSG *msg)
59416 +{
59417 +    EP_SYS          *sys     = rail->System;
59418 +    EP_MAP_NMD_BODY *msgBody = &msg->Body.MapNmd;
59419 +    int              i;
59420 +
59421 +    EPRINTF4 (DBG_MANAGER, "%s: Map NMD request from %d for %d NMDs to railmask %x\n", rail->Name, msg->Hdr.NodeId, msgBody->nFrags, msgBody->Railmask);
59422 +    
59423 +    for (i = 0; i < msgBody->nFrags; i++)
59424 +       ep_nmd_map_rails (sys, &msgBody->Nmd[i], msgBody->Railmask);
59425 +    
59426 +    /* Must flush TLBs before responding */
59427 +    for (i = 0; i < EP_MAX_RAILS; i++)
59428 +       if (sys->Rails[i] && sys->Rails[i]->TlbFlushRequired)
59429 +           ep_perrail_dvma_sync (sys->Rails[i]);
59430 +
59431 +    if (ep_send_message (rail, msg->Hdr.NodeId, EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE, msg->Hdr.Xid, &msg->Body) < 0)
59432 +       printk ("%s: map nmd request for %s[%d] - failed to send response\n", msgRail->Name, rail->Name, msg->Hdr.NodeId);
59433 +}
59434 +
59435 +static void
59436 +ProcessXidMessage (EP_RAIL *msgRail, EP_MANAGER_MSG *msg, EP_XID xid)
59437 +{
59438 +    EP_XID_CACHE *xidCache = ep_xid_cache_find (msgRail->System, xid);
59439 +
59440 +    EPRINTF6 (DBG_MANAGER, "%s: ProcessXidMessage: XID=%08x.%0x8.%016llx -> %p(%p)\n",
59441 +             msgRail->Name, xid.Generation, xid.Handle, (long long) xid.Unique,
59442 +             xidCache  ? xidCache->MessageHandler : 0, xidCache  ? xidCache->Arg : 0);
59443 +    
59444 +    if (xidCache != NULL)
59445 +       xidCache->MessageHandler (xidCache->Arg, msg);
59446 +}
59447 +
59448 +static void
59449 +ProcessMessage (EP_RAIL *msgRail, void *arg, void *msgbuf)
59450 +{
59451 +    EP_SYS         *sys  = msgRail->System;
59452 +    EP_MANAGER_MSG *msg  = (EP_MANAGER_MSG *) msgbuf;
59453 +    uint16_t        csum = msg->Hdr.Checksum;
59454 +    EP_RAIL        *rail;
59455 +
59456 +    if (msg->Hdr.Version != EP_MANAGER_MSG_VERSION)
59457 +       return;
59458 +
59459 +    msg->Hdr.Checksum= 0;
59460 +    if (CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE) != csum)
59461 +    {
59462 +       printk ("%s: checksum failed on msg from %d (%d) (%x != %x) ?\n", msgRail->Name, msg->Hdr.NodeId, msg->Hdr.Type, csum, CheckSum ((char *) msg, EP_MANAGER_MSG_SIZE));
59463 +       return;
59464 +    }
59465 +
59466 +    if ((rail = sys->Rails[msg->Hdr.Rail]) == NULL)
59467 +    {
59468 +       printk ("%s: rail no longer exists for msg from %d?\n", msgRail->Name, msg->Hdr.NodeId);
59469 +       return;
59470 +    }
59471 +
59472 +    EPRINTF7 (DBG_MANAGER, "%s: ProcessMessage (%s) type=%d node=%d XID=%08x.%0x8.%016llx\n", 
59473 +             msgRail->Name, rail->Name, msg->Hdr.Type, msg->Hdr.NodeId,
59474 +             msg->Hdr.Xid.Generation, msg->Hdr.Xid.Handle, (long long)msg->Hdr.Xid.Unique);
59475 +
59476 +    switch (msg->Hdr.Type)
59477 +    {
59478 +    case EP_MANAGER_MSG_TYPE_REMOTE_PANIC:
59479 +       msg->Body.PanicReason[EP_PANIC_STRLEN] = '\0';          /* ensure string terminated */
59480 +
59481 +       printk ("%s: remote panic call from elan node %d - %s\n", msgRail->Name, msg->Hdr.NodeId, msg->Body.PanicReason);
59482 +       panic ("ep: remote panic request\n");
59483 +       break;
59484 +
59485 +    case EP_MANAGER_MSG_TYPE_NETERR_REQUEST:
59486 +       ProcessNeterrRequest (msgRail, rail, msg);
59487 +       break;
59488 +
59489 +    case EP_MANAGER_MSG_TYPE_NETERR_RESPONSE:
59490 +       ProcessNeterrResponse (msgRail, rail, msg);
59491 +       break;
59492 +
59493 +    case EP_MANAGER_MSG_TYPE_FLUSH_REQUEST:
59494 +       ProcessFlushRequest (msgRail, rail, msg);
59495 +       break;
59496 +
59497 +    case EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE:
59498 +       ProcessFlushResponse (msgRail, rail, msg);
59499 +       break;
59500 +
59501 +    case EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST:
59502 +       ProcessMapNmdRequest (msgRail, rail, msg);
59503 +       break;
59504 +
59505 +    case EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE:
59506 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
59507 +       break;
59508 +
59509 +    case EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST:
59510 +       ProcessXidMessage (msgRail, msg, msg->Body.Failover.Xid);
59511 +       break;
59512 +
59513 +    case EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE:
59514 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
59515 +       break;
59516 +       
59517 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE:
59518 +       ProcessGetNodeState (msgRail, rail, msg);
59519 +       break;
59520 +
59521 +    case EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE: 
59522 +       ProcessXidMessage (msgRail, msg, msg->Hdr.Xid);
59523 +       break;
59524 +
59525 +    default:
59526 +       printk ("%s: Unknown message type %d from %d\n", msgRail->Name, msg->Hdr.Type, msg->Hdr.NodeId);
59527 +       break;
59528 +    }
59529 +}
59530 +
59531 +
59532 +static void
59533 +ManagerQueueEvent (EP_RAIL *rail, void *arg)
59534 +{
59535 +    ep_kthread_schedule ((EP_KTHREAD *) arg, lbolt);
59536 +}
59537 +
59538 +void
59539 +UpdateConnectionState (EP_RAIL *rail, statemap_t *map)
59540 +{
59541 +    EP_SYS *sys = rail->System;
59542 +    bitmap_t seg;
59543 +    int offset, nodeId;
59544 +    unsigned long flags;
59545 +    
59546 +    while ((offset = statemap_findchange (map, &seg, 1)) >= 0)
59547 +    {
59548 +       for (nodeId = offset; nodeId < (offset + BT_NBIPUL) && nodeId < rail->Position.pos_nodes; nodeId++)
59549 +       {
59550 +           EP_NODE      *node     = &sys->Nodes[nodeId];
59551 +           EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId];
59552 +
59553 +           if (statemap_getbits (map, nodeId, 1))
59554 +           {
59555 +               spin_lock_irqsave (&sys->NodeLock, flags);
59556 +
59557 +               switch (nodeRail->State)
59558 +               {
59559 +               case EP_NODE_DISCONNECTED:
59560 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnected \n", rail->Name, nodeId);
59561 +                   break;
59562 +
59563 +               case EP_NODE_CONNECTING:
59564 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Connect\n", rail->Name, nodeId);
59565 +                   
59566 +                   /* load the route table entry *before*  setting the state
59567 +                    * to connected, since DMA's can be initiated as soon as
59568 +                    * the node is marked as connected */
59569 +                   rail->Operations.LoadNodeRoute (rail, nodeId);
59570 +                   
59571 +                   nodeRail->State = EP_NODE_CONNECTED;
59572 +                   
59573 +                   statemap_setbits (rail->NodeSet, nodeId, 1, 1);
59574 +                   if (statemap_getbits (sys->NodeSet, nodeId, 1) == 0)
59575 +                       statemap_setbits (sys->NodeSet, nodeId, 1, 1);
59576 +
59577 +                   /* Add to rails connected to this node */
59578 +                   node->ConnectedRails |= (1 << rail->Number);
59579 +
59580 +                   /* Finally lower the per-node context filter */
59581 +                   rail->Operations.LowerFilter (rail, nodeId);
59582 +                   break;
59583 +                   
59584 +               case EP_NODE_LEAVING_CONNECTED:
59585 +                   EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Local Passivate\n", rail->Name, nodeId);
59586 +                   
59587 +                   /* Raise the per-node context filter */
59588 +                   rail->Operations.RaiseFilter (rail, nodeId);
59589 +
59590 +                   /* If it's resolving network errors it will be on the NodeNeterrList,
59591 +                    * remove if from this list before placing it on the LocalPassivateList
59592 +                    * as we'll resolve the network error later in RemotePassivate */
59593 +                   if (nodeRail->NetworkErrorState)
59594 +                       list_del (&nodeRail->Link);
59595 +
59596 +                   list_add_tail (&nodeRail->Link, &rail->LocalPassivateList);
59597 +                   nodeRail->State = EP_NODE_LOCAL_PASSIVATE;
59598 +
59599 +                   /* Remove from rails connected to this node */
59600 +                   node->ConnectedRails &= ~(1 << rail->Number);
59601 +                   break;
59602 +
59603 +               default:
59604 +                   printk ("%s: Node %d - in NodeChangeMap with state %d\n", rail->Name, nodeId, nodeRail->State);
59605 +                   panic ("Node in NodeChangeMap with invalid state\n");
59606 +                   break;
59607 +               }
59608 +               spin_unlock_irqrestore (&sys->NodeLock, flags);
59609 +           }
59610 +       }
59611 +    }
59612 +}
59613 +
59614 +void
59615 +ProgressNetworkError (EP_RAIL *rail, EP_NODE_RAIL *nodeRail)
59616 +{
59617 +    EP_SYS             *sys    = rail->System;
59618 +    int                 nodeId = nodeRail - rail->Nodes;
59619 +    EP_MANAGER_MSG_BODY msg;
59620 +
59621 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_REMOTE_PASSIVATE);
59622 +
59623 +    if (BEFORE (lbolt, nodeRail->NextRunTime))
59624 +       return;
59625 +
59626 +    if (nodeRail->NetworkErrorState & EP_NODE_NETERR_DMA_PACKET)
59627 +       nodeRail->NetworkErrorState &= ~EP_NODE_NETERR_DMA_PACKET;
59628 +    
59629 +    if (nodeRail->NetworkErrorState & EP_NODE_NETERR_ATOMIC_PACKET)
59630 +    {
59631 +       if (EP_XID_INVALID (nodeRail->MsgXid))
59632 +           nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache);
59633 +
59634 +       msg.Cookies[0] = nodeRail->NetworkErrorCookies[0];
59635 +       msg.Cookies[1] = nodeRail->NetworkErrorCookies[1];
59636 +       
59637 +       EPRINTF4 (DBG_NETWORK_ERROR, "%s: progress neterr - node %d cookies %llx %llx\n", rail->Name, nodeId, (long long)msg.Cookies[0], (long long)msg.Cookies[1]);
59638 +
59639 +       if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_NETERR_REQUEST, nodeRail->MsgXid, &msg) == 0)
59640 +           nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
59641 +       else
59642 +           nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
59643 +    }
59644 +}
59645 +
59646 +long
59647 +ProgressNodeLists (EP_RAIL *rail, long nextRunTime)
59648 +{
59649 +    EP_SYS           *sys = rail->System;
59650 +    struct list_head *el, *nel;
59651 +    unsigned long flags;
59652 +
59653 +    spin_lock_irqsave (&sys->NodeLock, flags);
59654 +    list_for_each_safe (el, nel, &rail->NetworkErrorList) {
59655 +       EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link);
59656 +       int           nodeId   = nodeRail - rail->Nodes;
59657 +
59658 +       ProgressNetworkError (rail, nodeRail);
59659 +
59660 +       if (nodeRail->NetworkErrorState == 0)
59661 +       {
59662 +           EPRINTF2 (DBG_NETWORK_ERROR, "%s: lower context filter for node %d due to network error\n", rail->Name, nodeId);
59663 +
59664 +           rail->Operations.LowerFilter (rail, nodeId);
59665 +
59666 +           list_del (&nodeRail->Link);
59667 +           continue;
59668 +       }
59669 +       
59670 +       if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
59671 +           nextRunTime = nodeRail->NextRunTime;
59672 +    }
59673 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
59674 +
59675 +    if (! list_empty (&rail->LocalPassivateList))
59676 +    {
59677 +       EPRINTF1 (DBG_MANAGER, "%s: Locally Passivating Nodes\n", rail->Name);
59678 +       
59679 +       /* We have disconnected from some nodes or have left ourselves
59680 +        * flush through all communications and determine whether we
59681 +        * need to perform rail failover */
59682 +       rail->Operations.FlushFilters (rail);
59683 +       
59684 +       ep_call_callbacks (rail, EP_CB_FLUSH_FILTERING, rail->NodeSet);
59685 +
59686 +       rail->Operations.FlushQueues (rail);
59687 +
59688 +       ep_call_callbacks (rail, EP_CB_FLUSH_FLUSHING, rail->NodeSet);
59689 +
59690 +       while (! list_empty (&rail->LocalPassivateList))
59691 +       {
59692 +           EP_NODE_RAIL *nodeRail = list_entry (rail->LocalPassivateList.next, EP_NODE_RAIL, Link);
59693 +           int           nodeId   = nodeRail - rail->Nodes;
59694 +
59695 +           list_del (&nodeRail->Link);
59696 +
59697 +           rail->Operations.UnloadNodeRoute (rail, nodeId);
59698 +           
59699 +           if (nodeRail->NetworkErrorState == 0 && nodeRail->MessageState == 0)
59700 +           {
59701 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Disconnecting\n", rail->Name, nodeId);
59702 +
59703 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
59704 +               nodeRail->State = EP_NODE_DISCONNECTING;
59705 +           }
59706 +           else
59707 +           {
59708 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d -> Remote Passivate\n", rail->Name, nodeId);
59709 +
59710 +               list_add_tail (&nodeRail->Link, &rail->RemotePassivateList);
59711 +               nodeRail->State = EP_NODE_REMOTE_PASSIVATE;
59712 +
59713 +               if (nodeRail->NetworkErrorState == 0)
59714 +                   nodeRail->NextRunTime = lbolt;
59715 +           }
59716 +       }
59717 +
59718 +       ep_call_callbacks (rail, EP_CB_PASSIVATED, rail->NodeSet);
59719 +    }
59720 +
59721 +    list_for_each_safe (el, nel, &rail->RemotePassivateList) {
59722 +       EP_NODE_RAIL *nodeRail = list_entry (el, EP_NODE_RAIL, Link);
59723 +       int           nodeId   = nodeRail - rail->Nodes;
59724 +       EP_NODE      *node     = &sys->Nodes[nodeId];
59725 +
59726 +       if (node->ConnectedRails == 0)                          /* no rails connected to this node (anymore) */
59727 +       {
59728 +           /*  Remove from this list */
59729 +           list_del (&nodeRail->Link);
59730 +
59731 +           EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Remote Passivate -> Disconnecting\n", rail->Name, nodeId);
59732 +
59733 +           /* transition towards disconnected */
59734 +           list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
59735 +           nodeRail->State = EP_NODE_DISCONNECTING;
59736 +           continue;
59737 +       }
59738 +
59739 +       EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n",
59740 +                 rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState,
59741 +                 nodeRail->NextRunTime, nextRunTime);
59742 +
59743 +       if (nodeRail->NetworkErrorState)
59744 +       {
59745 +           ProgressNetworkError (rail, nodeRail);
59746 +       }
59747 +       else if (! BEFORE (lbolt, nodeRail->NextRunTime))
59748 +       {
59749 +           if (EP_XID_INVALID (nodeRail->MsgXid))
59750 +               nodeRail->MsgXid = ep_xid_cache_alloc (sys, &rail->XidCache);
59751 +
59752 +           if (ep_send_message (rail, nodeId, EP_MANAGER_MSG_TYPE_FLUSH_REQUEST, nodeRail->MsgXid, NULL) == 0)
59753 +               nodeRail->NextRunTime = lbolt + MESSAGE_RETRY_TIME;
59754 +           else
59755 +               nodeRail->NextRunTime = lbolt + MSGBUSY_RETRY_TIME;
59756 +       }
59757 +
59758 +       if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
59759 +           nextRunTime = nodeRail->NextRunTime;
59760 +    }
59761 +    
59762 +    if (! list_empty (&rail->PassivatedList)) 
59763 +    {
59764 +       ep_call_callbacks (rail, EP_CB_FAILOVER, rail->NodeSet);
59765 +
59766 +       list_for_each_safe (el, nel, &rail->PassivatedList) {
59767 +           EP_NODE_RAIL *nodeRail = list_entry (rail->PassivatedList.next, EP_NODE_RAIL, Link);
59768 +           int           nodeId   = nodeRail - rail->Nodes;
59769 +           EP_NODE      *node     = &sys->Nodes[nodeId];
59770 +
59771 +           ASSERT (nodeRail->NetworkErrorState == 0);
59772 +
59773 +           if (node->ConnectedRails == 0)
59774 +           {
59775 +               /*  Remove from this list */
59776 +               list_del (&nodeRail->Link);
59777 +
59778 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d, no rails, Passivated -> Disconnecting\n", rail->Name, nodeId);
59779 +
59780 +               /* transition towards disconnected */
59781 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
59782 +               nodeRail->State = EP_NODE_DISCONNECTING;
59783 +               continue;
59784 +           }
59785 +           
59786 +           EPRINTF6 (DBG_MANAGER, "%s: Node %d - %s NetworkErrorState=%x NextRunTime=%lx (%lx)\n",
59787 +                     rail->Name, nodeId, NodeStateNames[nodeRail->State], nodeRail->NetworkErrorState,
59788 +                     nodeRail->NextRunTime, nextRunTime);
59789 +
59790 +           if (nodeRail->MessageState == 0)
59791 +           {
59792 +               EPRINTF2 (DBG_MANAGER, "%s: Node %d, no messages, Passivated -> Disconnecting\n", rail->Name,nodeId);
59793 +
59794 +               list_del (&nodeRail->Link);
59795 +               list_add_tail (&nodeRail->Link, &rail->DisconnectingList);
59796 +               nodeRail->State = EP_NODE_DISCONNECTING;
59797 +               continue;
59798 +           }
59799 +
59800 +           nodeRail->MessageState = 0;
59801 +           nodeRail->NextRunTime  = lbolt + FAILOVER_RETRY_TIME;
59802 +
59803 +           if (nextRunTime == 0 || AFTER (nextRunTime, nodeRail->NextRunTime))
59804 +               nextRunTime = nodeRail->NextRunTime;
59805 +       }
59806 +    }
59807 +
59808 +    if (! list_empty (&rail->DisconnectingList))
59809 +    {
59810 +       ep_call_callbacks (rail, EP_CB_DISCONNECTING, rail->NodeSet);
59811 +
59812 +       while (! list_empty (&rail->DisconnectingList))
59813 +       {
59814 +           EP_NODE_RAIL *nodeRail = list_entry (rail->DisconnectingList.next, EP_NODE_RAIL, Link);
59815 +           int           nodeId   = nodeRail - rail->Nodes;
59816 +           EP_NODE      *node     = &sys->Nodes[nodeId];
59817 +
59818 +           EPRINTF2 (DBG_MANAGER, "%s: Node %d, Disconnecting -> Disconnected\n", rail->Name, nodeId);
59819 +
59820 +           list_del (&nodeRail->Link);
59821 +
59822 +           rail->Operations.NodeDisconnected (rail, nodeId);
59823 +
59824 +           /* Clear the network error state */
59825 +           nodeRail->NextRunTime            = 0;
59826 +           nodeRail->NetworkErrorState      = 0;
59827 +           nodeRail->NetworkErrorCookies[0] = 0;
59828 +           nodeRail->NetworkErrorCookies[1] = 0;
59829 +
59830 +           /* Clear the message state */
59831 +           nodeRail->MessageState = 0;
59832 +
59833 +           cm_node_disconnected (rail, nodeId);
59834 +
59835 +           nodeRail->State = EP_NODE_DISCONNECTED;
59836 +           
59837 +           statemap_setbits (rail->NodeSet, nodeId, 0, 1);
59838 +
59839 +           if (node->ConnectedRails == 0)
59840 +               statemap_setbits (sys->NodeSet, nodeId, 0, 1);
59841 +       }
59842 +
59843 +       ep_call_callbacks (rail, EP_CB_DISCONNECTED, rail->NodeSet);
59844 +    }
59845 +
59846 +    return (nextRunTime);
59847 +}
59848 +
59849 +void
59850 +DisplayNodes (EP_RAIL *rail)
59851 +{
59852 +    EP_SYS *sys = rail->System;
59853 +    int i, state, count;
59854 +    unsigned long flags;
59855 +
59856 +    spin_lock_irqsave (&sys->NodeLock, flags);
59857 +
59858 +    for (state = 0; state < EP_NODE_NUM_STATES; state++)
59859 +    {
59860 +       for (count = i = 0; i < rail->Position.pos_nodes; i++)
59861 +       {
59862 +           ASSERT (rail->Nodes[i].State < EP_NODE_NUM_STATES);
59863 +
59864 +           if (rail->Nodes[i].State == state)
59865 +               if (state != EP_NODE_DISCONNECTED)
59866 +                   printk ("%s %d", !count++ ? NodeStateNames[state] : "", i);
59867 +       }
59868 +       if (count)
59869 +           printk ("%s (%d total)\n", state == EP_NODE_DISCONNECTED ? NodeStateNames[state] : "", count);
59870 +    }
59871 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
59872 +}
59873 +
59874 +static void
59875 +PositionFound (EP_RAIL *rail, ELAN_POSITION *pos)
59876 +{
59877 +    EP_SYS           *sys = rail->System;
59878 +    struct list_head *el;
59879 +    int i;
59880 +
59881 +    /* only called from the ep_managage whilst rail->State == EP_RAIL_STATE_STARTED */
59882 +    ASSERT ( rail->State == EP_RAIL_STATE_STARTED );
59883 +
59884 +#if defined(PER_CPU_TIMEOUT)
59885 +    /*
59886 +     * On Tru64 - if we're running in a "funnelled" thread, then we will be 
59887 +     * unable to start the per-cpu timeouts, so if we return then eventually
59888 +     * the ep_manager() thread will find the network position and we're
59889 +     * in control of our own destiny.
59890 +     */
59891 +    if (THREAD_IS_FUNNELED(current_thread()))
59892 +    {
59893 +       ep_kthread_schedule (&sys->ManagerThread, lbolt);
59894 +       return;
59895 +    }
59896 +#endif
59897 +
59898 +    sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid);
59899 +
59900 +    if (pos->pos_levels > MaxSwitchLevels)
59901 +    {
59902 +       for (i = 0; i < (pos->pos_levels - MaxSwitchLevels); i++)
59903 +           pos->pos_nodes /= pos->pos_arity[i];
59904 +
59905 +       for (i = 0; i < MaxSwitchLevels; i++)
59906 +           pos->pos_arity[i] = pos->pos_arity[i + (pos->pos_levels - MaxSwitchLevels)];
59907 +
59908 +       pos->pos_levels = MaxSwitchLevels;
59909 +       pos->pos_nodeid = pos->pos_nodeid % pos->pos_nodes;
59910 +                               
59911 +       printk ("%s: limiting switch levels to %d\n", rail->Name, MaxSwitchLevels);
59912 +       printk ("%s: nodeid=%d level=%d numnodes=%d\n", rail->Name, pos->pos_nodeid, pos->pos_levels, pos->pos_nodes);
59913 +
59914 +       sprintf (rail->Name, "ep%d[%d]", rail->Number, pos->pos_nodeid);
59915 +    }
59916 +
59917 +    if (rail->Position.pos_mode != ELAN_POS_UNKNOWN && rail->Position.pos_nodeid != pos->pos_nodeid)
59918 +    {
59919 +       printk ("%s: NodeId has changed from %d to %d\n", rail->Name, rail->Position.pos_nodeid, pos->pos_nodeid);
59920 +       panic ("ep: PositionFound: NodeId has changed\n");
59921 +    }
59922 +
59923 +    if (sys->Position.pos_mode != ELAN_POS_UNKNOWN && (sys->Position.pos_nodeid != pos->pos_nodeid || sys->Position.pos_nodes != pos->pos_nodes))
59924 +    {
59925 +       printk ("%s: position incompatible - disabling rail\n", rail->Name);
59926 +       rail->State = EP_RAIL_STATE_INCOMPATIBLE;
59927 +       return;
59928 +    }
59929 +
59930 +    if (sys->Position.pos_mode == ELAN_POS_UNKNOWN)
59931 +    {
59932 +       sys->Position = *pos;
59933 +       sys->NodeSet  = statemap_create (pos->pos_nodes);
59934 +       KMEM_ZALLOC (sys->Nodes, EP_NODE *, pos->pos_nodes * sizeof (EP_NODE), 1);
59935 +    }
59936 +
59937 +    rail->Position             = *pos;
59938 +    rail->SwitchBroadcastLevel = pos->pos_levels - 1;
59939 +    rail->State                = EP_RAIL_STATE_RUNNING;
59940 +
59941 +    for (i = 0; i < pos->pos_levels; i++)
59942 +    {
59943 +       rail->SwitchProbeTick[i]   = lbolt;
59944 +       rail->SwitchLast[i].uplink = 4;
59945 +    }
59946 +
59947 +    rail->Operations.PositionFound (rail, pos);
59948 +
59949 +    INIT_LIST_HEAD (&rail->NetworkErrorList);
59950 +    INIT_LIST_HEAD (&rail->LocalPassivateList);
59951 +    INIT_LIST_HEAD (&rail->RemotePassivateList);
59952 +    INIT_LIST_HEAD (&rail->PassivatedList);
59953 +    INIT_LIST_HEAD (&rail->DisconnectingList);
59954 +
59955 +    rail->NodeSet       = statemap_create (rail->Position.pos_nodes);
59956 +    rail->NodeChangeMap = statemap_create (rail->Position.pos_nodes);
59957 +    rail->NodeChangeTmp = statemap_create (rail->Position.pos_nodes);
59958 +
59959 +    KMEM_ZALLOC (rail->Nodes, EP_NODE_RAIL *, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL), 1);
59960 +
59961 +    for (i = 0; i < rail->Position.pos_nodes; i++)
59962 +    {
59963 +       spin_lock_init (&rail->Nodes[i].CookieLock);
59964 +
59965 +       INIT_LIST_HEAD (&rail->Nodes[i].StalledDmas);
59966 +
59967 +       rail->Nodes[i].State = EP_NODE_DISCONNECTED;
59968 +    }
59969 +
59970 +    /* Notify all subsystems that a new rail has been enabled */
59971 +    kmutex_lock (&sys->SubsysLock);
59972 +    list_for_each (el, &sys->Subsystems) { 
59973 +       EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
59974 +
59975 +       if (subsys->AddRail)
59976 +           subsys->AddRail (subsys, sys, rail);
59977 +
59978 +       /* XXXX: what to do if the subsystem refused to add the rail ? */
59979 +    }
59980 +    kmutex_unlock (&sys->SubsysLock);
59981 +
59982 +    /* Now enable the manager input queue */
59983 +    ep_enable_inputq (rail, rail->ManagerInputQ);
59984 +}
59985 +
59986 +static void
59987 +ep_manager (void *arg)
59988 +{
59989 +    EP_SYS            *sys = (EP_SYS *) arg;
59990 +    struct list_head *el;
59991 +    ELAN_POSITION     pos;
59992 +    unsigned long     flags;
59993 +
59994 +    kernel_thread_init ("ep_manager");
59995 +    kernel_thread_become_highpri();
59996 +
59997 +    for (;;)
59998 +    {
59999 +       long nextRunTime = lbolt + MSEC2TICKS(CM_THREAD_SCHEDULE_TIMEOUT);
60000 +
60001 +       list_for_each (el, &sys->ManagedRails) {
60002 +           EP_RAIL *rail = list_entry (el, EP_RAIL, ManagerLink);
60003 +
60004 +           switch (rail->State)
60005 +           {
60006 +           case EP_RAIL_STATE_STARTED:
60007 +               if (ProbeNetwork (rail, &pos) == 0)
60008 +               {
60009 +                   PositionFound (rail, &pos);
60010 +                   break;
60011 +               }
60012 +
60013 +               if (nextRunTime == 0 || AFTER (nextRunTime, lbolt + HZ))
60014 +                   nextRunTime = lbolt + HZ;
60015 +               break;
60016 +
60017 +           case EP_RAIL_STATE_RUNNING:
60018 +               if (ep_poll_inputq (rail, rail->ManagerInputQ, 100, ProcessMessage, rail) >= 100)
60019 +                   nextRunTime = lbolt;
60020 +               
60021 +               /* Handle any nodes which the cluster membership subsystem
60022 +                * has indicated are to begin connecting or disconnecting */
60023 +               spin_lock_irqsave (&sys->NodeLock, flags);
60024 +               if (! statemap_changed (rail->NodeChangeMap))
60025 +                   spin_unlock_irqrestore (&sys->NodeLock, flags);
60026 +               else
60027 +               {
60028 +                   /*
60029 +                    * Take a copy of the statemap, and zero all entries so
60030 +                    * we only see new requests next time
60031 +                    */
60032 +                   statemap_copy (rail->NodeChangeTmp, rail->NodeChangeMap);
60033 +                   statemap_zero (rail->NodeChangeMap);
60034 +                   spin_unlock_irqrestore (&sys->NodeLock, flags);
60035 +                   
60036 +                   UpdateConnectionState (rail, rail->NodeChangeTmp);
60037 +               }
60038 +
60039 +               nextRunTime = ProgressNodeLists (rail, nextRunTime);
60040 +
60041 +               if (statemap_changed (rail->NodeSet))
60042 +               {
60043 +                   ep_call_callbacks (rail, EP_CB_NODESET, rail->NodeSet);
60044 +
60045 +                   statemap_clearchanges (rail->NodeSet);
60046 +               }
60047 +               break;
60048 +
60049 +           case EP_RAIL_STATE_INCOMPATIBLE:
60050 +               break;
60051 +           }
60052 +       }
60053 +
60054 +
60055 +       EPRINTF5 (DBG_MANAGER, "ep_manager: sleep now=%lx nextRunTime=%lx (%ld) [%lx (%ld)]\n",
60056 +                 lbolt, nextRunTime, nextRunTime ? nextRunTime - lbolt : 0, sys->ManagerThread.next_run,
60057 +                 sys->ManagerThread.next_run ? sys->ManagerThread.next_run - lbolt : 0);
60058 +
60059 +       if (ep_kthread_sleep (&sys->ManagerThread, nextRunTime) < 0)
60060 +           break;
60061 +    }
60062 +
60063 +    ep_kthread_stopped (&sys->ManagerThread);
60064 +    kernel_thread_exit();
60065 +}
60066 +
60067 +void
60068 +ep_connect_node (EP_RAIL *rail, int nodeId)
60069 +{
60070 +    EP_SYS       *sys  = rail->System;
60071 +    EP_NODE_RAIL *node = &rail->Nodes[nodeId];
60072 +    unsigned long flags;
60073 +  
60074 +    spin_lock_irqsave (&sys->NodeLock, flags);
60075 +
60076 +    EPRINTF2 (DBG_MANAGER, "%s: ep_connect_node: nodeId %d\n", rail->Name, nodeId);
60077 +
60078 +    ASSERT (node->State == EP_NODE_DISCONNECTED && statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0);
60079 +    
60080 +    node->State = EP_NODE_CONNECTING;
60081 +
60082 +    statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1);
60083 +
60084 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
60085 +
60086 +    ep_kthread_schedule (&sys->ManagerThread, lbolt);
60087 +}
60088 +
60089 +int
60090 +ep_disconnect_node (EP_RAIL *rail, int nodeId)
60091 +{
60092 +    EP_SYS       *sys  = rail->System;
60093 +    EP_NODE_RAIL *node = &rail->Nodes[nodeId];
60094 +    int                  state;
60095 +    unsigned long flags;
60096 +  
60097 +    spin_lock_irqsave (&sys->NodeLock, flags);
60098 +    
60099 +    EPRINTF3 (DBG_MANAGER, "%s: ep_disconnect_node: nodeId %d - %s\n", rail->Name, nodeId, NodeStateNames[node->State]);
60100 +
60101 +    switch (state = node->State)
60102 +    {
60103 +    case EP_NODE_CONNECTING:
60104 +       statemap_setbits (rail->NodeChangeMap, nodeId, 0, 1);
60105 +
60106 +       node->State = EP_NODE_DISCONNECTED;
60107 +       break;
60108 +       
60109 +    case EP_NODE_CONNECTED:
60110 +       statemap_setbits (rail->NodeChangeMap, nodeId, 1, 1);
60111 +
60112 +       node->State = EP_NODE_LEAVING_CONNECTED;
60113 +       break;
60114 +
60115 +    case EP_NODE_LEAVING_CONNECTED:
60116 +       /* no assert on NodeChangeMap as the map could have been taken but not acted on */
60117 +       break;
60118 +       
60119 +    default:
60120 +       ASSERT (statemap_getbits (rail->NodeChangeMap, nodeId, 1) == 0);
60121 +       break;
60122 +    }
60123 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
60124 +
60125 +    if (state == EP_NODE_CONNECTED)
60126 +       ep_kthread_schedule (&sys->ManagerThread, lbolt);
60127 +
60128 +    return state;
60129 +}
60130 +
60131 +int
60132 +ep_manager_add_rail (EP_SYS *sys, EP_RAIL *rail)
60133 +{
60134 +    if ((rail->ManagerOutputQ = ep_alloc_outputq (rail, EP_MANAGER_MSG_SIZE, EP_MANAGER_OUTPUTQ_SLOTS)) == NULL)
60135 +       return -ENOMEM;
60136 +
60137 +    if ((rail->ManagerInputQ = ep_alloc_inputq (rail, EP_SYSTEMQ_MANAGER, EP_MANAGER_MSG_SIZE, EP_MANAGER_INPUTQ_SLOTS,
60138 +                                                ManagerQueueEvent, &sys->ManagerThread)) == NULL)
60139 +    {
60140 +       ep_free_outputq (rail, rail->ManagerOutputQ);
60141 +       return -ENOMEM;
60142 +    }
60143 +
60144 +    spin_lock_init (&rail->ManagerOutputQLock);
60145 +
60146 +    ep_xid_cache_init (sys, &rail->XidCache);
60147 +
60148 +    ep_kthread_stall (&sys->ManagerThread);
60149 +    list_add_tail (&rail->ManagerLink, &sys->ManagedRails);
60150 +    ep_kthread_resume (&sys->ManagerThread);
60151 +
60152 +    return (0);
60153 +}
60154 +
60155 +void
60156 +ep_manager_remove_rail (EP_SYS *sys, EP_RAIL *rail)
60157 +{
60158 +    if (rail->ManagerInputQ != NULL)
60159 +    {
60160 +       ep_kthread_stall (&sys->ManagerThread);
60161 +       list_del (&rail->ManagerLink);
60162 +       ep_kthread_resume (&sys->ManagerThread);
60163 +
60164 +       ep_xid_cache_destroy (sys, &rail->XidCache);
60165 +
60166 +       spin_lock_destroy (&rail->ManagerOutputQLock);
60167 +
60168 +       ep_disable_inputq (rail, rail->ManagerInputQ);
60169 +       ep_free_inputq (rail, rail->ManagerInputQ);
60170 +       ep_free_outputq (rail, rail->ManagerOutputQ);
60171 +    }
60172 +}
60173 +
60174 +int
60175 +ep_manager_init (EP_SYS *sys)
60176 +{
60177 +    INIT_LIST_HEAD (&sys->ManagedRails);
60178 +
60179 +    ep_kthread_init (&sys->ManagerThread);
60180 +
60181 +    if (kernel_thread_create (ep_manager, (void *) sys) == 0)
60182 +       return (ENOMEM);
60183 +    
60184 +    ep_kthread_started (&sys->ManagerThread);
60185 +    
60186 +    return (0);
60187 +}
60188 +
60189 +void
60190 +ep_manager_fini (EP_SYS *sys)
60191 +{
60192 +    ep_kthread_stop (&sys->ManagerThread);
60193 +    ep_kthread_destroy (&sys->ManagerThread);
60194 +}
60195 +
60196 +int
60197 +ep_sys_init (EP_SYS *sys)
60198 +{
60199 +    kmutex_init (&sys->SubsysLock);   
60200 +    kmutex_init (&sys->StartStopLock);
60201 +    spin_lock_init (&sys->NodeLock);
60202 +
60203 +    INIT_LIST_HEAD (&sys->Subsystems);
60204 +
60205 +    /* initialise the xid allocators */
60206 +    spin_lock_init (&sys->XidLock);
60207 +    INIT_LIST_HEAD (&sys->XidCacheList);
60208 +
60209 +    /* initially don't know where we are in the network */
60210 +    sys->Position.pos_mode = ELAN_POS_UNKNOWN;
60211 +
60212 +    /* initialise the network mapping descriptor hash tables */
60213 +    ep_nmh_init (&sys->MappingTable);
60214 +
60215 +    /* intialise the shared allocators */
60216 +    ep_shared_alloc_init (sys);
60217 +
60218 +    /* initialise the dvma space */
60219 +    ep_dvma_init (sys);
60220 +
60221 +    /* intiialise the rail manager */
60222 +    ep_manager_init (sys);
60223 +
60224 +    /* initialise all subsystems */
60225 +    cm_init (sys);
60226 +    ep_comms_init (sys);
60227 +    //ep_msgsys_init (sys);
60228 +
60229 +    return (0);
60230 +}
60231 +
60232 +void
60233 +ep_sys_fini (EP_SYS *sys)
60234 +{
60235 +    /* Destroy the subsystems in the reverse order to their creation */
60236 +    while (! list_empty (&sys->Subsystems))
60237 +    {
60238 +       EP_SUBSYS *subsys = list_entry (sys->Subsystems.prev, EP_SUBSYS, Link);
60239 +
60240 +       list_del (&subsys->Link);
60241 +       
60242 +       subsys->Destroy (subsys, sys);
60243 +    }
60244 +
60245 +    ep_manager_fini(sys);
60246 +    ep_dvma_fini (sys);
60247 +    ep_shared_alloc_fini (sys);
60248 +
60249 +    ep_nmh_fini (&sys->MappingTable);
60250 +
60251 +    if (sys->Position.pos_mode != ELAN_POS_UNKNOWN) {
60252 +       statemap_destroy (sys->NodeSet);
60253 +       KMEM_FREE(sys->Nodes, sys->Position.pos_nodes * sizeof (EP_NODE));
60254 +    }
60255 +
60256 +    spin_lock_destroy (&sys->XidLock);
60257 +
60258 +    spin_lock_destroy (&sys->NodeLock);
60259 +    kmutex_destroy (&sys->SubsysLock);
60260 +    kmutex_destroy (&sys->StartStopLock);
60261 +}
60262 +
60263 +void
60264 +ep_shutdown (EP_SYS *sys)
60265 +{
60266 +    sys->Shutdown = 1;
60267 +}
60268 +
60269 +int
60270 +ep_init_rail (EP_SYS *sys, EP_RAIL *rail)
60271 +{
60272 +    static int rnum;
60273 +
60274 +    rail->System              = sys;
60275 +    rail->State               = EP_RAIL_STATE_UNINITIALISED;
60276 +    rail->Number              = rnum++;
60277 +    rail->Position.pos_mode   = ELAN_POS_UNKNOWN;
60278 +    rail->Position.pos_nodeid = ELAN_INVALID_NODE;
60279 +
60280 +    rail->CallbackRegistered  = 0;
60281 +
60282 +    sprintf (rail->Name, "ep%d", rail->Number);
60283 +
60284 +    /* Initialise externally visible locks */
60285 +    kmutex_init (&rail->CallbackLock);
60286 +
60287 +    ep_alloc_init (rail);
60288 +
60289 +    sys->Rails[rail->Number] = rail;
60290 +
60291 +    return 0;
60292 +}
60293 +
60294 +void
60295 +ep_destroy_rail (EP_RAIL *rail)
60296 +{
60297 +    ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED);
60298 +
60299 +    ep_alloc_fini (rail);
60300 +
60301 +    kmutex_destroy (&rail->CallbackLock);
60302 +
60303 +    rail->System->Rails[rail->Number] = NULL;
60304 +
60305 +    rail->Operations.DestroyRail (rail);
60306 +}
60307 +
60308 +/* We need to traverse the Subsystems lists backwards
60309 + * but it's not defined in <linux/list.h> */
60310 +#define list_for_each_backwards(pos,list) \
60311 +       for (pos = (list)->prev; pos != (list); \
60312 +            pos = (pos)->prev)
60313 +
60314 +void
60315 +__ep_stop_rail (EP_RAIL *rail)
60316 +{
60317 +    /* called holding the sys->Lock */
60318 +    EP_SYS           *sys = rail->System;
60319 +    struct list_head *el;
60320 +
60321 +    rail->Operations.StallRail (rail);
60322 +
60323 +    /* Notify all subsystems that this rail is being stopped */
60324 +    if (rail->State == EP_RAIL_STATE_RUNNING)
60325 +    {
60326 +       kmutex_lock (&sys->SubsysLock);
60327 +       list_for_each_backwards (el, &sys->Subsystems) { 
60328 +           EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
60329 +           
60330 +           if (subsys->RemoveRail)
60331 +               subsys->RemoveRail (subsys, sys, rail);
60332 +       }
60333 +       kmutex_unlock (&sys->SubsysLock);
60334 +
60335 +       ep_manager_remove_rail (sys, rail);
60336 +
60337 +       KMEM_FREE (rail->Nodes, rail->Position.pos_nodes * sizeof (EP_NODE_RAIL));
60338 +
60339 +       statemap_destroy (rail->NodeChangeTmp);
60340 +       statemap_destroy (rail->NodeChangeMap);
60341 +       statemap_destroy (rail->NodeSet);
60342 +    }
60343 +
60344 +    ep_dvma_remove_rail (sys, rail);
60345 +    ep_shared_alloc_remove_rail (sys, rail);
60346 +
60347 +    rail->Operations.StopRail (rail);
60348 +
60349 +    rail->State = EP_RAIL_STATE_UNINITIALISED;
60350 +}
60351 +
60352 +void
60353 +ep_stop_rail (EP_RAIL *rail)
60354 +{
60355 +    EP_SYS *sys = rail->System;
60356 +
60357 +    /* stall ep_manager                      */
60358 +    /* and remove the rail from the manaager */
60359 +
60360 +    ep_kthread_stall (&sys->ManagerThread);
60361 +    if ( rail->State == EP_RAIL_STATE_STARTED ) 
60362 +        ep_manager_remove_rail (sys, rail);
60363 +    ep_kthread_resume (&sys->ManagerThread);
60364 +
60365 +    __ep_stop_rail (rail);
60366 +}
60367 +
60368 +int
60369 +ep_start_rail (EP_RAIL *rail)
60370 +{
60371 +    EP_SYS *sys = rail->System;
60372 +
60373 +    ASSERT (rail->State == EP_RAIL_STATE_UNINITIALISED);
60374 +
60375 +    if (rail->Operations.StartRail (rail) < 0)
60376 +       return -ENXIO;
60377 +    
60378 +    kmutex_lock (&sys->StartStopLock);
60379 +    /* Add this rail to the shared allocator */
60380 +    if (ep_shared_alloc_add_rail (rail->System, rail))
60381 +       goto failed;
60382 +
60383 +    /* Add this rail to dvma kmap */
60384 +    if (ep_dvma_add_rail (rail->System, rail))
60385 +       goto failed;
60386 +
60387 +    /* rail is now started */
60388 +    rail->State = EP_RAIL_STATE_STARTED;
60389 +
60390 +    /* notify the rail manager of the new rail */
60391 +    if (ep_manager_add_rail (rail->System, rail))
60392 +       goto failed;
60393 +
60394 +    kmutex_unlock (&sys->StartStopLock);
60395 +    return (ESUCCESS);
60396 +
60397 + failed:
60398 +    printk ("%s: start failed\n", rail->Name);
60399 +    kmutex_unlock (&sys->StartStopLock);
60400 +    __ep_stop_rail (rail);
60401 +
60402 +    return (ENOMEM);   
60403 +}
60404 +
60405 +void
60406 +ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys)
60407 +{
60408 +    kmutex_lock (&sys->SubsysLock);
60409 +    list_add_tail (&subsys->Link, &sys->Subsystems);
60410 +    kmutex_unlock (&sys->SubsysLock);
60411 +}
60412 +
60413 +void
60414 +ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys)
60415 +{
60416 +    kmutex_lock (&sys->SubsysLock);
60417 +    list_del (&subsys->Link);
60418 +    kmutex_unlock (&sys->SubsysLock);
60419 +}
60420 +
60421 +EP_SUBSYS *
60422 +ep_subsys_find (EP_SYS *sys, char *name)
60423 +{
60424 +    struct list_head *el;
60425 +
60426 +    ASSERT ( !in_interrupt());
60427 +
60428 +    kmutex_lock (&sys->SubsysLock); 
60429 +    list_for_each (el, &sys->Subsystems) {
60430 +       EP_SUBSYS *subsys = list_entry (el, EP_SUBSYS, Link);
60431 +
60432 +       if (! strcmp (subsys->Name, name))
60433 +       {
60434 +           kmutex_unlock (&sys->SubsysLock);
60435 +           return (subsys);
60436 +       }
60437 +    }
60438 +
60439 +    kmutex_unlock (&sys->SubsysLock);
60440 +    return (NULL);
60441 +}
60442 +
60443 +int
60444 +ep_waitfor_nodeid (EP_SYS *sys)
60445 +{
60446 +    int i, printed = 0;
60447 +    kcondvar_t Sleep;
60448 +    spinlock_t Lock;
60449 +
60450 +    kcondvar_init (&Sleep);
60451 +    spin_lock_init (&Lock);
60452 +
60453 +#define TICKS_TO_WAIT  (10*hz)
60454 +#define TICKS_PER_LOOP (hz/10)
60455 +    for (i = 0; sys->Position.pos_mode == ELAN_POS_UNKNOWN && i < TICKS_TO_WAIT; i += TICKS_PER_LOOP)
60456 +    {
60457 +       if (! printed++)
60458 +           printk ("ep: waiting for network position to be found\n");
60459 +
60460 +       spin_lock (&Lock);
60461 +       kcondvar_timedwait (&Sleep, &Lock, NULL, lbolt + TICKS_PER_LOOP);
60462 +       spin_unlock (&Lock);
60463 +    }
60464 +
60465 +    if (sys->Position.pos_mode == ELAN_POS_UNKNOWN)
60466 +       printk ("ep: network position not found after waiting\n");
60467 +    else if (printed)
60468 +       printk ("ep: network position found at nodeid %d\n", sys->Position.pos_nodeid);
60469 +
60470 +    spin_lock_destroy (&Lock);
60471 +    kcondvar_destroy (&Sleep);
60472 +
60473 +    return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid);
60474 +}
60475 +
60476 +int
60477 +ep_nodeid (EP_SYS *sys)
60478 +{
60479 +    return (sys->Position.pos_mode == ELAN_POS_UNKNOWN ? ELAN_INVALID_NODE : sys->Position.pos_nodeid);
60480 +}
60481 +
60482 +int
60483 +ep_numnodes (EP_SYS *sys)
60484 +{
60485 +    return (sys->Position.pos_nodes);
60486 +}
60487 +
60488 +void
60489 +ep_fillout_stats(EP_RAIL *r, char *str) 
60490 +{
60491 +    sprintf(str+strlen(str),"SendMessageFailed %lu NeterrAtomicPacket %lu NeterrDmaPacket %lu \n", r->Stats.SendMessageFailed, r->Stats.NeterrAtomicPacket, r->Stats.NeterrDmaPacket);
60492 +    sprintf(str+strlen(str),"Rx %lu  %lu /sec\n",   GET_STAT_TOTAL(r->Stats,rx), GET_STAT_PER_SEC(r->Stats,rx) ); 
60493 +    sprintf(str+strlen(str),"MBytes %lu  %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,rx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,rx_len) / (1024*1024)); 
60494 +    sprintf(str+strlen(str),"Tx %lu  %lu /sec\n",   GET_STAT_TOTAL(r->Stats,tx), GET_STAT_PER_SEC(r->Stats,tx) ); 
60495 +    sprintf(str+strlen(str),"MBytes %lu  %lu MB/sec\n", GET_STAT_TOTAL(r->Stats,tx_len)/ (1024*1024), GET_STAT_PER_SEC(r->Stats,tx_len) / (1024*1024)); 
60496 +}
60497 +
60498 +
60499 +/*
60500 + * Local variables:
60501 + * c-file-style: "stroustrup"
60502 + * End:
60503 + */
60504 diff -urN clean/drivers/net/qsnet/ep/kcomm_elan3.c linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.c
60505 --- clean/drivers/net/qsnet/ep/kcomm_elan3.c    1969-12-31 19:00:00.000000000 -0500
60506 +++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.c      2004-11-30 07:02:06.000000000 -0500
60507 @@ -0,0 +1,504 @@
60508 +
60509 +/*
60510 + *    Copyright (c) 2003 by Quadrics Ltd.
60511 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
60512 + *
60513 + *    For licensing information please see the supplied COPYING file
60514 + *
60515 + */
60516 +
60517 +#ident "@(#)$Id: kcomm_elan3.c,v 1.34 2004/11/30 12:02:06 mike Exp $"
60518 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan3.c,v $ */
60519 +
60520 +#include <qsnet/kernel.h>
60521 +
60522 +#include <elan/kcomm.h>
60523 +
60524 +#include "kcomm_vp.h"
60525 +#include "kcomm_elan3.h"
60526 +#include "conf_linux.h"
60527 +
60528 +extern EP_CODE threadcode_elan3;
60529 +
60530 +unsigned int
60531 +ep3_create_rails (EP_SYS *sys, unsigned int disabled)
60532 +{
60533 +    unsigned int rmask = 0;
60534 +    ELAN3_DEV   *dev;
60535 +    EP_RAIL     *rail;
60536 +    int          i;
60537 +
60538 +    for (i = 0; i < EP_MAX_RAILS; i++)
60539 +    {
60540 +       if ((dev = elan3_device (i)) != NULL)
60541 +       {
60542 +           if ((rail = ep3_create_rail (sys, dev)) != NULL)
60543 +           {
60544 +               if (disabled & (1 << rail->Number))
60545 +                   printk ("%s: auto-start of device disabled by configuration\n", rail->Name);
60546 +               else
60547 +                   ep_start_rail (rail);
60548 +               
60549 +               ep_procfs_rail_init(rail);
60550 +
60551 +               rmask |= (1 << rail->Number);
60552 +           }
60553 +       }
60554 +    }
60555 +
60556 +    return rmask;
60557 +}
60558 +
60559 +EP_RAIL *
60560 +ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev)
60561 +{
60562 +    EP3_RAIL *rail;
60563 +    int       res;
60564 +
60565 +    KMEM_ZALLOC (rail, EP3_RAIL *, sizeof (EP3_RAIL), TRUE);
60566 +
60567 +    if (rail == NULL)
60568 +       return (EP_RAIL *) NULL;
60569 +
60570 +    if ((res = ep_init_rail (sys, &rail->Generic)) != 0)
60571 +    {
60572 +       KMEM_FREE (rail, sizeof (EP3_RAIL));
60573 +       return (EP_RAIL *) NULL;
60574 +    }
60575 +
60576 +    rail->Device = dev;
60577 +
60578 +    /* Install our rail operations */
60579 +    rail->Generic.Operations.DestroyRail      = ep3_destroy_rail;
60580 +    rail->Generic.Operations.StartRail        = ep3_start_rail;
60581 +    rail->Generic.Operations.StallRail        = ep3_stall_rail;
60582 +    rail->Generic.Operations.StopRail         = ep3_stop_rail;
60583 +
60584 +    rail->Generic.Operations.SdramAlloc       = ep3_sdram_alloc;
60585 +    rail->Generic.Operations.SdramFree        = ep3_sdram_free;
60586 +    rail->Generic.Operations.SdramWriteb      = ep3_sdram_writeb;
60587 +
60588 +    rail->Generic.Operations.KaddrMap         = ep3_kaddr_map;
60589 +    rail->Generic.Operations.SdramMap         = ep3_sdram_map;
60590 +    rail->Generic.Operations.Unmap            = ep3_unmap;
60591 +
60592 +    rail->Generic.Operations.DvmaReserve      = ep3_dvma_reserve;
60593 +    rail->Generic.Operations.DvmaRelease      = ep3_dvma_release;
60594 +    rail->Generic.Operations.DvmaSetPte       = ep3_dvma_set_pte;
60595 +    rail->Generic.Operations.DvmaReadPte      = ep3_dvma_read_pte;
60596 +    rail->Generic.Operations.DvmaUnload       = ep3_dvma_unload;
60597 +    rail->Generic.Operations.FlushTlb         = ep3_flush_tlb;
60598 +
60599 +    rail->Generic.Operations.ProbeRoute       = ep3_probe_route;
60600 +    rail->Generic.Operations.PositionFound    = ep3_position_found;
60601 +    rail->Generic.Operations.CheckPosition    = ep3_check_position;
60602 +    rail->Generic.Operations.NeterrFixup      = ep3_neterr_fixup;
60603 +
60604 +    rail->Generic.Operations.LoadSystemRoute  = ep3_load_system_route;
60605 +
60606 +    rail->Generic.Operations.LoadNodeRoute    = ep3_load_node_route;
60607 +    rail->Generic.Operations.UnloadNodeRoute  = ep3_unload_node_route;
60608 +    rail->Generic.Operations.LowerFilter      = ep3_lower_filter;
60609 +    rail->Generic.Operations.RaiseFilter      = ep3_raise_filter;
60610 +    rail->Generic.Operations.NodeDisconnected = ep3_node_disconnected;
60611 +
60612 +    rail->Generic.Operations.FlushFilters     = ep3_flush_filters;
60613 +    rail->Generic.Operations.FlushQueues      = ep3_flush_queues;
60614 +
60615 +    rail->Generic.Operations.AllocInputQ      = ep3_alloc_inputq;
60616 +    rail->Generic.Operations.FreeInputQ       = ep3_free_inputq;
60617 +    rail->Generic.Operations.EnableInputQ     = ep3_enable_inputq;
60618 +    rail->Generic.Operations.DisableInputQ    = ep3_disable_inputq;
60619 +    rail->Generic.Operations.PollInputQ       = ep3_poll_inputq;
60620 +
60621 +    rail->Generic.Operations.AllocOutputQ     = ep3_alloc_outputq;
60622 +    rail->Generic.Operations.FreeOutputQ      = ep3_free_outputq;
60623 +    rail->Generic.Operations.OutputQMsg       = ep3_outputq_msg;
60624 +    rail->Generic.Operations.OutputQState     = ep3_outputq_state;
60625 +    rail->Generic.Operations.OutputQSend      = ep3_outputq_send;
60626 +
60627 +    rail->Generic.Operations.FillOutStats     = ep3_fillout_stats;
60628 +
60629 +    rail->Generic.Devinfo = dev->Devinfo;
60630 +
60631 +    printk ("%s: connected via elan3 rev%c device %d\n", rail->Generic.Name,
60632 +           'a' + dev->Devinfo.dev_revision_id, dev->Instance);
60633 +
60634 +    return (EP_RAIL *) rail;
60635 +}
60636 +
60637 +void
60638 +ep3_destroy_rail (EP_RAIL *r)
60639 +{
60640 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60641 +    
60642 +    KMEM_FREE (rail, sizeof (EP3_RAIL));
60643 +}
60644 +
60645 +static int
60646 +ep3_attach_rail (EP3_RAIL *rail)
60647 +{
60648 +    ELAN3_DEV        *dev = rail->Device;
60649 +    ELAN3_CTXT       *ctxt;
60650 +    ELAN_CAPABILITY  *cap;
60651 +    int               ctx;
60652 +    unsigned long     flags;
60653 +
60654 +    if ((ctxt = elan3_alloc (dev, TRUE)) == (ELAN3_CTXT *) NULL)
60655 +    {
60656 +       printk ("%s: cannot allocate elan context\n", rail->Generic.Name);
60657 +       return -ENXIO;
60658 +    }
60659 +    
60660 +    ctxt->Operations = &ep3_elan3_ops;
60661 +    ctxt->Private    = (void *) rail;
60662 +    
60663 +    /* Initialise a capability and attach to the elan*/
60664 +    KMEM_ALLOC (cap, ELAN_CAPABILITY *, sizeof (ELAN_CAPABILITY), TRUE);
60665 +    
60666 +    elan_nullcap (cap);
60667 +    
60668 +    cap->cap_type        = ELAN_CAP_TYPE_KERNEL;
60669 +    cap->cap_version     = ELAN_CAP_VERSION_NUMBER;
60670 +    cap->cap_mycontext   = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
60671 +    cap->cap_lowcontext  = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
60672 +    cap->cap_highcontext = ELAN3_MRF_CONTEXT_NUM | SYS_CONTEXT_BIT;
60673 +    cap->cap_railmask    = 1 << dev->Devinfo.dev_rail;
60674 +    
60675 +    /* Ensure the context filter is raised while we initialise */
60676 +    elan3_block_inputter (ctxt, TRUE);
60677 +
60678 +    if (elan3_doattach (ctxt, cap) != 0)
60679 +    {
60680 +       printk ("%s: cannot attach to kernel context\n", rail->Generic.Name);
60681 +
60682 +       KMEM_FREE (cap, sizeof (ELAN_CAPABILITY));
60683 +       elan3_free (ctxt);
60684 +       return -ENXIO;
60685 +    }
60686 +    KMEM_FREE (cap, sizeof (ELAN_CAPABILITY));
60687 +
60688 +    /* now attach to all the kernel comms input/dmaring/data contexts */
60689 +    spin_lock_irqsave (&dev->IntrLock, flags);
60690 +
60691 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
60692 +    {
60693 +       /* place it in the info table.  NOTE: don't call elan3mmu_set_info, as this */
60694 +       /* will queue the info again on the devices info list */
60695 +       dev->CtxtTable[ctx] = ctxt;
60696 +       
60697 +       elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
60698 +       elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1);
60699 +    }
60700 +
60701 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
60702 +    {
60703 +       /* place it in the info table.  NOTE: don't call elan3mmu_set_info, as this */
60704 +       /* will queue the info again on the devices info list */
60705 +       dev->CtxtTable[ctx] = ctxt;
60706 +       
60707 +       elan3mmu_set_context_filter (dev, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
60708 +       elan3mmu_attach (dev, ctx, ctxt->Elan3mmu, ctxt->RouteTable->Table, ctxt->RouteTable->Size-1);
60709 +    }
60710 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
60711 +
60712 +    /* Stash the ctxt,commandport, mmu and route table */
60713 +    rail->Ctxt        = ctxt;
60714 +    rail->CommandPort = ctxt->CommandPort;
60715 +    rail->Elan3mmu    = ctxt->Elan3mmu;
60716 +    rail->RouteTable  = ctxt->RouteTable;
60717 +
60718 +    return 0;
60719 +}
60720 +
60721 +static void
60722 +ep3_detach_rail (EP3_RAIL *rail)
60723 +{
60724 +    ELAN3_DEV *dev = rail->Device;
60725 +    unsigned long flags;
60726 +    int ctx;
60727 +
60728 +    /* detach from the elan */
60729 +    spin_lock_irqsave (&dev->IntrLock, flags);
60730 +
60731 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
60732 +    {
60733 +       dev->CtxtTable[ctx] = NULL;
60734 +       elan3mmu_detach (dev, ctx);
60735 +    }
60736 +
60737 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
60738 +    {
60739 +       dev->CtxtTable[ctx] = NULL;
60740 +       elan3mmu_detach (dev, ctx);
60741 +    }
60742 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
60743 +
60744 +    elan3_dodetach(rail->Ctxt);
60745 +    elan3_free (rail->Ctxt);
60746 +
60747 +    rail->Ctxt        = NULL;
60748 +    rail->CommandPort = 0;
60749 +    rail->Elan3mmu    = NULL;
60750 +    rail->RouteTable  = NULL;
60751 +}
60752 +
60753 +int
60754 +ep3_start_rail (EP_RAIL *r)
60755 +{
60756 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
60757 +    int           i, res;
60758 +    unsigned long flags;
60759 +
60760 +    if ((res = ep3_attach_rail (rail)) != 0)
60761 +       return res;
60762 +
60763 +    spin_lock_init (&rail->CookieLock);
60764 +    kmutex_init (&rail->HaltOpMutex);
60765 +    kcondvar_init (&rail->HaltOpSleep);
60766 +
60767 +    /* Initialise event interrupt cookie table */
60768 +    InitialiseCookieTable (&rail->CookieTable);
60769 +
60770 +    /* Load and map the thread code */
60771 +    rail->ThreadCode = threadcode_elan3;
60772 +    if (ep_loadcode (&rail->Generic, &rail->ThreadCode) != ESUCCESS)
60773 +       goto failed;
60774 +
60775 +    /* Map the command port to be visible to the Elan */
60776 +    ep3_ioaddr_map (&rail->Generic, EP3_COMMANDPORT_ADDR, rail->Ctxt->CommandPage, PAGESIZE, EP_PERM_WRITE);
60777 +    rail->CommandPortAddr = EP3_COMMANDPORT_ADDR + (rail->Ctxt->CommandPort - rail->Ctxt->CommandPage);
60778 +
60779 +    /* Allocate the elan visible sdram/main memory */
60780 +    if ((rail->RailElan = ep_alloc_elan (&rail->Generic, sizeof (EP3_RAIL_ELAN), 0, &rail->RailElanAddr)) == 0 ||
60781 +       (rail->RailMain = ep_alloc_main (&rail->Generic, sizeof (EP3_RAIL_MAIN), 0, &rail->RailMainAddr)) == 0)
60782 +    {
60783 +       goto failed;
60784 +    }
60785 +
60786 +    /* Allocate the system input queues at their fixed elan address */
60787 +    if (! (rail->QueueDescs = ep_alloc_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE, PAGESIZE, EP_PERM_ALL, 0)))
60788 +       goto failed;
60789 +
60790 +    /* Initialise all queue entries to be full */
60791 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
60792 +       elan3_sdram_writel (rail->Device, EP_SYSTEMQ_DESC(rail->QueueDescs, i) + offsetof (EP3_InputQueue, q_state), E3_QUEUE_FULL);
60793 +
60794 +    /* initialise the dma rings */
60795 +    if (DmaRingsCreate (rail))
60796 +       goto failed;
60797 +    
60798 +    if (InitialiseDmaRetries (rail))
60799 +       goto failed;
60800 +
60801 +    if (ep3_init_probenetwork (rail))
60802 +       goto failed;
60803 +
60804 +    /* can now drop the context filter for the system context */
60805 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
60806 +    elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, FALSE, 0, NULL);
60807 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
60808 +
60809 +    return 0;
60810 +
60811 + failed:
60812 +    printk ("ep3_start_rail: failed for rail %d\n", rail->Generic.Number);
60813 +    ep3_stop_rail (&rail->Generic);
60814 +
60815 +    return -ENOMEM;
60816 +}
60817 +
60818 +void
60819 +ep3_stall_rail (EP_RAIL *r)
60820 +{
60821 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
60822 +    int           ctx;
60823 +    unsigned long flags;
60824 +
60825 +    /* raise all the context filters */
60826 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
60827 +
60828 +    for (ctx = ELAN3_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN3_KCOMM_TOP_CONTEXT_NUM; ctx++)
60829 +       elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
60830 +
60831 +    for (ctx = ELAN3_DMARING_BASE_CONTEXT_NUM; ctx <= ELAN3_DMARING_TOP_CONTEXT_NUM; ctx++)
60832 +       elan3mmu_set_context_filter (rail->Device, ctx|SYS_CONTEXT_BIT, TRUE, 0, NULL);
60833 +
60834 +    elan3mmu_set_context_filter (rail->Device, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, TRUE, 0, NULL);
60835 +
60836 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
60837 +}
60838 +
60839 +void
60840 +ep3_stop_rail (EP_RAIL *r)
60841 +{
60842 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60843 +
60844 +    ep3_destroy_probenetwork (rail);
60845 +
60846 +    if (rail->DmaRetryInitialised)
60847 +       DestroyDmaRetries (rail);
60848 +
60849 +    DmaRingsRelease(rail);
60850 +
60851 +    if (rail->Generic.State == EP_RAIL_STATE_RUNNING)
60852 +    {
60853 +       KMEM_FREE (rail->MainCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32));
60854 +
60855 +       ep_free_elan (&rail->Generic, rail->ElanCookies, rail->Generic.Position.pos_nodes * sizeof (E3_uint32));
60856 +    }
60857 +
60858 +    if (rail->QueueDescs)
60859 +       ep_free_memory_elan (&rail->Generic, EP_SYSTEM_QUEUE_BASE);
60860 +    rail->QueueDescs = 0;
60861 +
60862 +    if (rail->RailMain)
60863 +       ep_free_main (&rail->Generic, rail->RailMainAddr, sizeof (EP3_RAIL_MAIN));
60864 +    rail->RailMain = 0;
60865 +
60866 +    if (rail->RailElan)
60867 +       ep_free_elan (&rail->Generic, rail->RailElanAddr, sizeof (EP3_RAIL_ELAN));
60868 +    rail->RailElan = 0;
60869 +
60870 +    ep_unloadcode (&rail->Generic, &rail->ThreadCode);
60871 +
60872 +    DestroyCookieTable (&rail->CookieTable);
60873 +
60874 +    ep_perrail_unmap (&rail->Generic, rail->Ctxt->CommandPage, PAGESIZE);
60875 +
60876 +    kcondvar_destroy (&rail->HaltOpSleep);
60877 +    kmutex_destroy (&rail->HaltOpMutex);
60878 +    spin_lock_destroy (&rail->CookieLock);
60879 +
60880 +    ep3_detach_rail (rail);
60881 +}
60882 +
60883 +void
60884 +ep3_position_found (EP_RAIL *r, ELAN_POSITION *pos)
60885 +{
60886 +    EP3_RAIL   *rail = (EP3_RAIL *) r;
60887 +    sdramaddr_t addr;
60888 +
60889 +    rail->SwitchBroadcastLevelTick = lbolt;
60890 +
60891 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, NodeId), pos->pos_nodeid);
60892 +
60893 +    /* Allocate Network Identify cookie state */
60894 +    KMEM_ZALLOC (rail->MainCookies, E3_uint32 *, pos->pos_nodes * sizeof (E3_uint32), 1);
60895 +    
60896 +    if (! (addr = ep_alloc_elan (&rail->Generic, pos->pos_nodes * sizeof (E3_uint32), 0, &rail->ElanCookies)))
60897 +       panic ("ep: PositionFound: cannot allocate elan cookies array\n");
60898 +
60899 +    elan3_sdram_zeroq_sdram (rail->Device, addr, pos->pos_nodes * sizeof (E3_uint32));
60900 +
60901 +    ep3_probe_position_found (rail, pos);
60902 +}
60903 +
60904 +sdramaddr_t
60905 +ep3_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size)
60906 +{
60907 +    return elan3_sdram_alloc (((EP3_RAIL *) r)->Device, size);
60908 +}
60909 +
60910 +void
60911 +ep3_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size)
60912 +{
60913 +    elan3_sdram_free (((EP3_RAIL *) r)->Device, addr, size);
60914 +}
60915 +
60916 +void
60917 +ep3_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val)
60918 +{
60919 +    elan3_sdram_writeb (((EP3_RAIL *) r)->Device, addr, val);
60920 +}
60921 +
60922 +void
60923 +ep3_flush_tlb (EP_RAIL *r)
60924 +{
60925 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60926 +    ELAN3_DEV *dev = rail->Device;
60927 +    unsigned long flags;
60928 +
60929 +    spin_lock_irqsave (&dev->TlbLock, flags);
60930 +    
60931 +    IncrStat (dev, TlbFlushes);
60932 +    
60933 +    write_reg32 (dev, Cache_Control_Reg.ContReg, dev->Cache_Control_Reg | MMU_FLUSH);
60934 +    mmiob ();
60935 +    spin_unlock_irqrestore (&dev->TlbLock, flags);
60936 +
60937 +    while (! (read_reg32 (dev, Cache_Control_Reg.ContReg) & MMU_FLUSHED))
60938 +       mb();
60939 +}
60940 +
60941 +void
60942 +ep3_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode)
60943 +{
60944 +    EP3_RAIL  *rail = (EP3_RAIL *) r;
60945 +    E3_uint16  flits[MAX_FLITS];
60946 +    int        nflits;
60947 +    
60948 +    nflits = GenerateRoute (&rail->Generic.Position, flits, lowNode, highNode, DEFAULT_ROUTE_TIMEOUT, HIGH_ROUTE_PRIORITY);
60949 +       
60950 +    if (LoadRoute (rail->Device, rail->RouteTable, vp, ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
60951 +    {
60952 +       /* XXXX: whilst LoadRoute() can fail - it is not likely. */
60953 +       panic ("ep3_load_system_route: cannot load p2p route entry\n");
60954 +    }
60955 +}
60956 +
60957 +void
60958 +ep3_load_node_route (EP_RAIL *r, unsigned nodeId)
60959 +{
60960 +    EP3_RAIL     *rail = (EP3_RAIL *) r;
60961 +    E3_uint16     flits[MAX_FLITS];
60962 +    int           nflits;
60963 +
60964 +    nflits = GenerateRoute (&rail->Generic.Position, flits, nodeId, nodeId, DEFAULT_ROUTE_TIMEOUT, DEFAULT_ROUTE_PRIORITY);
60965 +
60966 +    if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId), EP3_CONTEXT_NUM(rail->Generic.Position.pos_nodeid), nflits, flits) != 0)
60967 +       panic ("ep3_load_node_route: cannot load p2p data route entry\n");
60968 +}
60969 +
60970 +void
60971 +ep3_unload_node_route (EP_RAIL *r, unsigned nodeId)
60972 +{
60973 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60974 +
60975 +    ClearRoute (rail->Device, rail->RouteTable, EP_VP_DATA(nodeId));
60976 +}
60977 +
60978 +void
60979 +ep3_lower_filter (EP_RAIL *r, unsigned nodeId)
60980 +{
60981 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60982 +    unsigned long flags;
60983 +
60984 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
60985 +    elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 0, 0, NULL);
60986 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
60987 +}
60988 +
60989 +void
60990 +ep3_raise_filter (EP_RAIL *r, unsigned nodeId)
60991 +{
60992 +    EP3_RAIL *rail = (EP3_RAIL *) r;
60993 +    unsigned long flags;
60994 +
60995 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
60996 +    elan3mmu_set_context_filter (rail->Device, EP3_CONTEXT_NUM(nodeId), 1, 0, NULL);
60997 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
60998 +}
60999 +
61000 +void
61001 +ep3_node_disconnected (EP_RAIL *r, unsigned nodeId)
61002 +{
61003 +    FreeStalledDmas ((EP3_RAIL *) r, nodeId);
61004 +}
61005 +
61006 +void
61007 +ep3_fillout_stats(EP_RAIL *r, char *str) 
61008 +{
61009 +    /* no stats here yet */
61010 +    /* EP3_RAIL *ep3rail = (EP3_RAIL *)r; */
61011 +}
61012 diff -urN clean/drivers/net/qsnet/ep/kcomm_elan3.h linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.h
61013 --- clean/drivers/net/qsnet/ep/kcomm_elan3.h    1969-12-31 19:00:00.000000000 -0500
61014 +++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan3.h      2004-12-14 05:19:23.000000000 -0500
61015 @@ -0,0 +1,431 @@
61016 +/*
61017 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61018 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61019 + *
61020 + *    For licensing information please see the supplied COPYING file
61021 + *
61022 + */
61023 +
61024 +#ifndef __EP_KCOMM_ELAN3_H
61025 +#define __EP_KCOMM_ELAN3_H
61026 +
61027 +#ident "@(#)$Id: kcomm_elan3.h,v 1.53 2004/12/14 10:19:23 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
61028 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan3.h,v $*/
61029 +
61030 +#if !defined(__ELAN3__)
61031 +#include <elan3/elanregs.h>
61032 +#include <elan3/elandev.h>
61033 +#include <elan3/elanvp.h>
61034 +#include <elan3/elan3mmu.h>
61035 +#include <elan3/elanctxt.h>
61036 +#include <elan3/elandebug.h>
61037 +#endif /* !defined(__ELAN3__) */
61038 +
61039 +#include <elan3/trtype.h>
61040 +
61041 +/* private address allocation */
61042 +#define EP3_TEXT_BASE                  0xFF000000              /* base address for thread code (defined in makerules.elan3) */
61043 +#define EP3_COMMANDPORT_ADDR           0xFFF00000              /* mapping address for elan command port */
61044 +
61045 +#define EP3_STACK_SIZE                 1024                    /* default thread code stack size */
61046 +
61047 +#define EP3_PACEMAKER_EVENTADDR                0xfeedbeef              /* mis-aligned address used by heartbeat pacemaker */
61048 +
61049 +/* context number allocation */
61050 +#define EP3_CONTEXT_NUM(nodeId)                ((ELAN3_KCOMM_BASE_CONTEXT_NUM + (nodeId)) | SYS_CONTEXT_BIT)
61051 +#define EP3_CONTEXT_ISDATA(ctx)                (((ctx) & MAX_ROOT_CONTEXT_MASK) >= ELAN3_KCOMM_BASE_CONTEXT_NUM && \
61052 +                                        ((ctx) & MAX_ROOT_CONTEXT_MASK) <= ELAN3_KCOMM_TOP_CONTEXT_NUM)
61053 +#define EP3_CONTEXT_TO_NODE(ctx)       (((ctx) & MAX_ROOT_CONTEXT_MASK) - ELAN3_KCOMM_BASE_CONTEXT_NUM)
61054 +
61055 +/* DMA issueing rings */
61056 +#define EP3_RING_CRITICAL              0
61057 +#define EP3_RING_CRITICAL_LEN          128
61058 +#define EP3_RING_HIGH_PRI              1
61059 +#define EP3_RING_HIGH_PRI_LEN          64
61060 +#define EP3_RING_LOW_PRI               2
61061 +#define EP3_RING_LOW_PRI_LEN           32
61062 +#define EP3_NUM_RINGS                  3
61063 +
61064 +/* Value to "return" from c_close() when envelope handled  by the trap handler */
61065 +#define EP3_PAckStolen                 4
61066 +
61067 +/* unimplemented instruction trap types for thread code */
61068 +#define EP3_UNIMP_TRAP_NO_DESCS                0
61069 +#define EP3_UNIMP_TRAP_PACKET_NACKED   1
61070 +#define EP3_UNIMP_THREAD_HALTED                2
61071 +#define EP3_NUM_UNIMP_TRAPS            3
61072 +
61073 +/* forward declarations */
61074 +typedef struct ep3_rail        EP3_RAIL;
61075 +
61076 +/* block copy elan3 inputter queue - with waitvent0 */
61077 +typedef struct ep3_inputqueue
61078 +{
61079 +    volatile E3_uint32 q_state;        /* queue is full=bit0, queue is locked=bit8 */
61080 +    volatile E3_Addr   q_bptr;         /* block aligned ptr to current back item */
61081 +    E3_uint32          q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
61082 +    E3_Addr            q_top;          /* block aligned ptr to last queue item */
61083 +    E3_Addr            q_base;         /* block aligned ptr to first queue item */
61084 +    volatile E3_Addr   q_fptr;         /* block aligned ptr to current front item */
61085 +    E3_BlockCopyEvent  q_event;        /* queue block copy event */
61086 +    E3_uint32          q_pad[4];       /* pad to 64 bytes */
61087 +    E3_Addr            q_wevent;       /* WaitEvent0 struct */
61088 +    E3_int32           q_wcount;
61089 +} EP3_InputQueue;
61090 +
61091 +
61092 +#if !defined(__ELAN3__)
61093 +
61094 +/* dma retries types and retry times */
61095 +typedef struct ep3_retry_dma
61096 +{
61097 +    struct list_head    Link;                                  /* chained on free/retry list */
61098 +    long               RetryTime;                              /* "lbolt" to retry at */
61099 +    E3_DMA_BE          Dma;                                    /* DMA (in main memory) */
61100 +} EP3_RETRY_DMA;
61101 +
61102 +typedef struct ep3_dma_ring
61103 +{
61104 +    sdramaddr_t                pEvent;  
61105 +    E3_Addr            epEvent;
61106 +    
61107 +    sdramaddr_t                pDma;     
61108 +    E3_Addr            epDma; 
61109 +    
61110 +    E3_uint32         *pDoneBlk; 
61111 +    E3_Addr            epDoneBlk; 
61112 +    
61113 +    int                        Entries;                                /* number of slots in array  */
61114 +    int                        Position;                               /* current position in array */
61115 +
61116 +    ioaddr_t            CommandPort;
61117 +    ioaddr_t           CommandPage;
61118 +    DeviceMappingHandle CommandPageHandle;
61119 +} EP3_DMA_RING;
61120 +
61121 +#define DMA_RING_EVENT(ring,n)         ((ring)->pEvent + (n)*sizeof (E3_BlockCopyEvent))
61122 +#define DMA_RING_EVENT_ELAN(ring,n)    ((ring)->epEvent + (n)*sizeof (E3_BlockCopyEvent))
61123 +
61124 +#define DMA_RING_DMA(ring,n)           ((ring)->pDma   + (n)*sizeof (E3_DMA))
61125 +#define DMA_RING_DMA_ELAN(ring,n)      ((ring)->epDma   + (n)*sizeof (E3_DMA))
61126 +
61127 +#define DMA_RING_DONE_ELAN(ring,n)     ((ring)->epDoneBlk + (n)*sizeof (E3_uint32))
61128 +
61129 +/* Event interrupt cookie operations and lookup table */
61130 +typedef struct ep3_cookie_ops
61131 +{
61132 +    void       (*Event)       (EP3_RAIL *rail, void *arg);                             /* called from the interrupt handler when an event is "set" */
61133 +    void       (*DmaRetry)    (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error);  /* called from the interrupt handler when a DMA is "nacked" */
61134 +    void       (*DmaCancelled)(EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);             /* called from the interrupt handler/flush disconnecting when cancelled. */
61135 +    void       (*DmaVerify)   (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma);             /* called from multiple places, to check dma is consistent with state. */
61136 +} EP3_COOKIE_OPS;
61137 +
61138 +typedef struct ep3_cookie
61139 +{
61140 +    struct ep3_cookie  *Next;                                  /* Cookies are chained in hash table. */
61141 +    E3_uint32          Cookie;                                 /* Cooke store in ev_Type */
61142 +    EP3_COOKIE_OPS     *Operations;                            /* Cookie operations */
61143 +    void              *Arg;                                    /* Users arguement. */
61144 +} EP3_COOKIE;
61145 +
61146 +#define EP3_COOKIE_HASH_SIZE           (256)
61147 +#define EP3_HASH_COOKIE(a)             ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP3_COOKIE_HASH_SIZE-1))
61148 +
61149 +typedef struct ep3_cookie_table
61150 +{
61151 +    spinlock_t         Lock;
61152 +    EP3_COOKIE        *Entries[EP3_COOKIE_HASH_SIZE];
61153 +} EP3_COOKIE_TABLE;
61154 +
61155 +#endif /* !defined(__ELAN3__) */
61156 +
61157 +#define EP3_EVENT_FREE                 ((1 << 4) | EV_WCOPY)
61158 +#define EP3_EVENT_ACTIVE               ((2 << 4) | EV_WCOPY)
61159 +/* DONE == Cookie */
61160 +#define EP3_EVENT_FAILED               ((3 << 4) | EV_WCOPY)
61161 +#define EP3_EVENT_PRIVATE              ((4 << 4) | EV_WCOPY)
61162 +
61163 +/* The event cookie can get posted (and seen) before the write has */
61164 +/* hit main memory - in this case the event count is <= 0 and the block */
61165 +/* will be marked as ACTIVE - but could transition to DONE at any time */
61166 +/* Also for a word copy event, the value written into the "done" word */
61167 +/* can be the event interrupt cookie rather than the "source" value */
61168 +/* this happens since the uCode does not wait for the write to have */
61169 +/* occured before overwriting TMP_0 with the cookie */
61170 +#define EP3_EVENT_FIRING(edev, event, cookie, done) \
61171 +       (((((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie) || (done) == EP3_EVENT_ACTIVE) && \
61172 +        (int) elan3_sdram_readl (edev, (event) + offsetof (E3_BlockCopyEvent, ev_Count)) <= 0)
61173 +#define EP3_EVENT_FIRED(cookie, done) \
61174 +       (((done) & ~(EV_TYPE_BCOPY | EV_TYPE_MASK_EVIRQ)) == (cookie).Cookie)
61175 +
61176 +
61177 +/* Time limit to wait while event is firing and block write has not occured */
61178 +#define EP3_EVENT_FIRING_TLIMIT                16384                   /* 1023 uS */
61179 +
61180 +#define EP3_INIT_COPY_EVENT(event, cookie, dest, intr)                                                 \
61181 +{                                                                                                      \
61182 +       (event).ev_Count  = 0;                                                                          \
61183 +       (event).ev_Type   = (intr) ? EV_TYPE_BCOPY | EV_TYPE_EVIRQ | (cookie).Cookie : EV_TYPE_BCOPY;   \
61184 +       (event).ev_Source = (cookie).Cookie | EV_WCOPY;                                                 \
61185 +       (event).ev_Dest   = (dest) | EV_TYPE_BCOPY_WORD;                                                \
61186 +}
61187 +
61188 +#if !defined(__ELAN3__)
61189 +
61190 +/* Generic input queues which can be polled */
61191 +typedef struct ep3_inputq
61192 +{
61193 +    EP3_COOKIE         q_cookie;
61194 +    unsigned int       q_slotSize;
61195 +    unsigned int       q_slotCount;
61196 +
61197 +    void              *q_slots;
61198 +    EP_ADDR            q_slotsAddr;
61199 +
61200 +    EP_INPUTQ_CALLBACK *q_callback;
61201 +    void              *q_arg;
61202 +
61203 +    sdramaddr_t                q_desc;
61204 +    E3_Addr            q_descAddr;
61205 +
61206 +    E3_Addr            q_base;
61207 +    E3_Addr            q_top;
61208 +    E3_Addr            q_fptr;
61209 +
61210 +    E3_uint32          q_waitCount;
61211 +} EP3_INPUTQ;
61212 +
61213 +typedef struct ep3_outputq
61214 +{
61215 +    EP3_COOKIE         q_cookie;
61216 +
61217 +    unsigned int       q_slotCount;                            /* # slots allocated */
61218 +    unsigned int       q_slotSize;                             /* size of each slot (rounded up) */
61219 +
61220 +    sdramaddr_t                q_elan;
61221 +    E3_Addr            q_elanAddr;
61222 +
61223 +    void              *q_main;
61224 +    E3_Addr            q_mainAddr;
61225 +} EP3_OUTPUTQ;
61226 +
61227 +#endif /* !defined(__ELAN3__) */
61228 +
61229 +/* per-rail elan memory portion of device */
61230 +typedef struct ep3_rail_elan
61231 +{
61232 +    E3_uint16           ProbeSource0[TR_TRACEROUTE_ENTRIES];   /* 32 byte aligned */
61233 +    E3_uint16           ProbeSource1[TR_TRACEROUTE_ENTRIES];
61234 +
61235 +    E3_BlockCopyEvent   ProbeDone;                             /* 16 byte aligned */
61236 +    E3_Event            ProbeStart;                            /* 8 byte aligned */
61237 +    
61238 +    E3_uint32           ProbeType;                             /* 4 byte aligned */
61239 +    E3_uint32           ProbeLevel;
61240 +
61241 +    E3_uint32           NodeId;
61242 +} EP3_RAIL_ELAN;
61243 +
61244 +/* values for ProbeType */
61245 +#define PROBE_SINGLE                   0
61246 +#define PROBE_MULTIPLE                 1
61247 +/* number of attempts for each type */
61248 +#define PROBE_SINGLE_ATTEMPTS          10
61249 +#define PROBE_SINGLE_TIMEOUTS          5
61250 +#define PROBE_MULTIPLE_ATTEMPTS                20
61251 +#define PROBE_MULTIPLE_TIMEOUTS                10
61252 +
61253 +/* per-rail elan memory portsion of device */
61254 +typedef struct ep3_rail_main
61255 +{
61256 +    E3_uint16          ProbeDest0[TR_TRACEROUTE_ENTRIES];      /* 32 byte aligned */
61257 +    E3_uint16          ProbeDest1[TR_TRACEROUTE_ENTRIES];
61258 +    
61259 +    E3_uint32          ProbeDone;                              /* 4 byte aligned */
61260 +    E3_uint32          ProbeResult;
61261 +    E3_uint32          ProbeLevel;
61262 +} EP3_RAIL_MAIN;
61263 +
61264 +#if !defined(__ELAN3__)
61265 +
61266 +struct ep3_rail
61267 +{
61268 +    EP_RAIL            Generic;                                /* Generic rail */
61269 +
61270 +    ELAN3_DEV          *Device;                                        /* Elan device we're using */
61271 +    ELAN3_CTXT        *Ctxt;                                   /* Elan context struct */
61272 +    ioaddr_t            CommandPort;                           /* commandport from context */
61273 +    E3_Addr            CommandPortAddr;                        /*  and address mapped into elan */
61274 +
61275 +    ELAN3_ROUTE_TABLE  *RouteTable;                            /* routetable from context */
61276 +    ELAN3MMU          *Elan3mmu;                               /* elanmmu from context */
61277 +
61278 +    EP3_COOKIE_TABLE    CookieTable;                           /* Event cookie table */
61279 +
61280 +    EP_CODE            ThreadCode;                             /* copy of thread code */
61281 +    unsigned int        CommandPortEventTrap;                  /* flag to indicate command port eventint queue overflow trap */
61282 +
61283 +    sdramaddr_t         RailElan;                              /* Elan visible main/sdram portions of */
61284 +    E3_Addr             RailElanAddr;                          /* device structure */
61285 +    EP3_RAIL_MAIN      *RailMain;
61286 +    E3_Addr            RailMainAddr;
61287 +
61288 +    /* small system message queues */
61289 +    sdramaddr_t                QueueDescs;                             /* Input Queue descriptors */
61290 +
61291 +    /* Network position prober */
61292 +    E3_Addr            ProbeStack;                             /* Network position thread command structure */
61293 +    EP3_COOKIE         ProbeCookie;                            /*   event cookie for Done event */
61294 +    kcondvar_t         ProbeWait;                              /*   place to wait on probe thread */
61295 +    spinlock_t         ProbeLock;                              /*     and lock */
61296 +    volatile int        ProbeDone;                             /*     and flag to indicate it's done */
61297 +
61298 +    E3_uint16          ProbeDest0[TR_TRACEROUTE_ENTRIES];      /* last result of CheckNetworkPosition */
61299 +    E3_uint16          ProbeDest1[TR_TRACEROUTE_ENTRIES];
61300 +    E3_uint32          ProbeResult;
61301 +
61302 +    long               ProbeLevelTick[ELAN_MAX_LEVELS];
61303 +    long               SwitchBroadcastLevelTick;
61304 +
61305 +    /* rings for issueing dmas */
61306 +    EP3_DMA_RING        DmaRings[EP3_NUM_RINGS];
61307 +
61308 +    /* retry lists for dmas */
61309 +    struct list_head    DmaRetries[EP_NUM_RETRIES];            /* Dma retry lists */
61310 +    struct list_head    DmaRetryFreeList;                      /*   and free list */
61311 +    u_int              DmaRetryCount;                          /*   and total retry count */
61312 +    u_int              DmaRetryReserved;                       /*   and number reserved */
61313 +    u_int              DmaRetryThreadShouldStall;              /*   count of reasons to stall retries */
61314 +    u_int              DmaRetryThreadStarted:1;                /*   dma retry thread running */
61315 +    u_int              DmaRetryThreadShouldStop:1;             /*     but should stop */
61316 +    u_int              DmaRetryThreadStopped:1;                /*     and now it's stopped */
61317 +    u_int              DmaRetryInitialised:1;                  /* have initialise dma retries */
61318 +
61319 +    spinlock_t         DmaRetryLock;                           /*   spinlock protecting lists */
61320 +    kcondvar_t         DmaRetryWait;                           /*   place retry thread sleeps */
61321 +    long               DmaRetryTime;                           /*   and when it will next wakeup */
61322 +    unsigned int        DmaRetrySleeping;                      /*   and it's sleeping there */
61323 +
61324 +    /* Network Identify Cookies */
61325 +    E3_uint32         *MainCookies;                            /* One cookie allocator per-node for main*/
61326 +    E3_Addr            ElanCookies;                            /*   and one for elan */
61327 +    spinlock_t         CookieLock;                             /* spinlock to protect main cookies */
61328 +
61329 +    /* Halt operation flags for flushing. */
61330 +    kmutex_t            HaltOpMutex;                           /* serialize access to halt operations */
61331 +    unsigned int       HaltOpCompleted;                        /* flag to indicate halt operation completed */
61332 +    kcondvar_t         HaltOpSleep;                            /*   place to wait for it to complete */
61333 +
61334 +    /* Network error state */
61335 +    kcondvar_t         NetworkErrorSleep;                      /* place to sleep for network error halt operation */
61336 +    u_int              NetworkErrorFlushed;                    /*   and flag to indicate flushed */
61337 +
61338 +
61339 +    EP3_RAIL_STATS     Stats;                                  /* statistics */
61340 +};
61341 +
61342 +/* support.c */
61343 +
61344 +extern ELAN3_OPS  ep3_elan3_ops;
61345 +
61346 +extern E3_uint32    LocalCookie (EP3_RAIL *rail, unsigned int remoteNode);
61347 +extern E3_uint32    RemoteCookie (EP3_RAIL *rail, unsigned int remoteNode);
61348 +
61349 +extern void         InitialiseCookieTable (EP3_COOKIE_TABLE *table);
61350 +extern void         DestroyCookieTable (EP3_COOKIE_TABLE *table);
61351 +extern void         RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie, 
61352 +                                   E3_Addr event, EP3_COOKIE_OPS *ops, void *arg);
61353 +extern void         DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cookie);
61354 +extern EP3_COOKIE   *LookupCookie (EP3_COOKIE_TABLE *table, uint32_t cookie);
61355 +extern EP3_COOKIE   *LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr);
61356 +
61357 +extern int          DmaRingsCreate (EP3_RAIL *rail);
61358 +extern void         DmaRingsRelease (EP3_RAIL *rail);
61359 +extern int          IssueDma (EP3_RAIL *rail, E3_DMA_BE *dma, int type, int retryThread);
61360 +
61361 +extern int          IssueWaitevent (EP3_RAIL *rail, E3_Addr value);
61362 +extern void         IssueSetevent (EP3_RAIL *rail, E3_Addr value);
61363 +extern void         IssueRunThread (EP3_RAIL *rail, E3_Addr value);
61364 +extern long         DmaRetryTime (int type);
61365 +extern int          InitialiseDmaRetries (EP3_RAIL *rail);
61366 +extern void         DestroyDmaRetries (EP3_RAIL *rail);
61367 +extern int          ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr);
61368 +extern void         ReleaseDmaRetries (EP3_RAIL *rail, int count);
61369 +extern void         StallDmaRetryThread (EP3_RAIL *rail);
61370 +extern void         ResumeDmaRetryThread (EP3_RAIL *rail);
61371 +extern void         QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval);
61372 +extern void         QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma);
61373 +extern void         FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId);
61374 +
61375 +extern void         SetQueueLocked(EP3_RAIL *rail, sdramaddr_t qaddr);
61376 +
61377 +/* threadcode_elan3.c */
61378 +extern E3_Addr    ep3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack,
61379 +                                  int stackSize, int nargs, ...);
61380 +
61381 +/* probenetwork.c */
61382 +extern int        ep3_init_probenetwork (EP3_RAIL *rail);
61383 +extern void       ep3_destroy_probenetwork (EP3_RAIL *rail);
61384 +extern void       ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos);
61385 +extern int        ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw);
61386 +extern int        ep3_check_position (EP_RAIL *rail);
61387 +
61388 +/* neterr_elan3.c */
61389 +extern void       ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
61390 +
61391 +/* kcomm_elan3.c */
61392 +extern EP_RAIL    *ep3_create_rail (EP_SYS *sys, ELAN3_DEV *dev);
61393 +extern void        ep3_destroy_rail (EP_RAIL *rail);
61394 +
61395 +extern int         ep3_start_rail (EP_RAIL *rail);
61396 +extern void        ep3_stall_rail (EP_RAIL *rail);
61397 +extern void        ep3_stop_rail (EP_RAIL *rail);
61398 +
61399 +extern void       ep3_position_found (EP_RAIL *rail, ELAN_POSITION *pos);
61400 +
61401 +extern sdramaddr_t ep3_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size);
61402 +extern void        ep3_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size);
61403 +extern void        ep3_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
61404 +
61405 +extern void        ep3_flush_tlb (EP_RAIL *r);
61406 +extern void        ep3_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode);
61407 +extern void        ep3_load_node_route (EP_RAIL *r, unsigned int nodeId);
61408 +extern void        ep3_unload_node_route (EP_RAIL *r, unsigned int nodeId);
61409 +extern void        ep3_lower_filter (EP_RAIL *r, unsigned int nodeId);
61410 +extern void        ep3_raise_filter (EP_RAIL *rail, unsigned int nodeId);
61411 +extern void        ep3_node_disconnected (EP_RAIL *r, unsigned int nodeId);
61412 +
61413 +extern void        ep3_fillout_stats(EP_RAIL *rail, char *str);
61414 +
61415 +/* kmap_elan3.c */
61416 +extern void        ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr);
61417 +extern void        ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr);
61418 +extern void        ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned int len, unsigned int perm);
61419 +extern void        ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len);
61420 +extern void       *ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages);
61421 +extern void        ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private);
61422 +extern void        ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm);
61423 +extern physaddr_t  ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index);
61424 +extern void        ep3_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages);
61425 +
61426 +/* kmsg_elan3.c */
61427 +extern EP_INPUTQ  *ep3_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount,
61428 +                                    EP_INPUTQ_CALLBACK *callback, void *arg);
61429 +extern void        ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q);
61430 +extern void        ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q);
61431 +extern void        ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q);
61432 +extern int         ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
61433 +extern EP_OUTPUTQ *ep3_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount);
61434 +extern void        ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q);
61435 +extern void       *ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
61436 +extern int         ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
61437 +extern int         ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size,
61438 +                                    unsigned int nodeId, unsigned int qnum, unsigned int retries);
61439 +
61440 +/* support_elan3.c */
61441 +extern void        ep3_flush_filters (EP_RAIL *r);
61442 +extern void        ep3_flush_queues (EP_RAIL *r);
61443 +
61444 +#endif /* !defined(__ELAN3__) */
61445 +
61446 +#endif /* __EP_KCOMM_ELAN3_H */
61447 diff -urN clean/drivers/net/qsnet/ep/kcomm_elan4.c linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.c
61448 --- clean/drivers/net/qsnet/ep/kcomm_elan4.c    1969-12-31 19:00:00.000000000 -0500
61449 +++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.c      2004-11-30 07:02:06.000000000 -0500
61450 @@ -0,0 +1,526 @@
61451 +/*
61452 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61453 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61454 + *
61455 + *    For licensing information please see the supplied COPYING file
61456 + *
61457 + */
61458 +
61459 +#ident "@(#)$Id: kcomm_elan4.c,v 1.19 2004/11/30 12:02:06 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
61460 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan4.c,v $*/
61461 +
61462 +#include <qsnet/kernel.h>
61463 +#include <qsnet/kthread.h>
61464 +
61465 +#include <elan/kcomm.h>
61466 +
61467 +#include "kcomm_vp.h"
61468 +#include "kcomm_elan4.h"
61469 +#include "conf_linux.h"
61470 +
61471 +extern EP_CODE threadcode_elan4;
61472 +
61473 +unsigned int
61474 +ep4_create_rails (EP_SYS *sys, unsigned int disabled)
61475 +{
61476 +    unsigned int rmask = 0;
61477 +    ELAN4_DEV   *dev;
61478 +    EP_RAIL     *rail;
61479 +    int          i;
61480 +
61481 +    for (i = 0; i < EP_MAX_RAILS; i++)
61482 +    {
61483 +       if ((dev = elan4_reference_device (i, ELAN4_STATE_STARTED)) != NULL)
61484 +       {
61485 +           if ((rail = ep4_create_rail (sys, dev)) == NULL)
61486 +               elan4_dereference_device (dev);
61487 +           else
61488 +           {
61489 +               if (disabled & (1 << rail->Number))
61490 +                   printk ("%s: auto-start of device disabled by configuration\n", rail->Name);
61491 +               else
61492 +                   ep_start_rail (rail);
61493 +               
61494 +               ep_procfs_rail_init(rail);
61495 +
61496 +               rmask |= (1 << rail->Number);
61497 +           }
61498 +       }
61499 +    }
61500 +
61501 +    if (rmask)
61502 +       qsnet_debug_alloc();
61503 +
61504 +    return rmask;
61505 +}
61506 +
61507 +EP_RAIL *
61508 +ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev)
61509 +{
61510 +    EP4_RAIL *rail;
61511 +    int res;
61512 +
61513 +    KMEM_ZALLOC (rail, EP4_RAIL *, sizeof (EP4_RAIL), 1);
61514 +
61515 +    if (rail == NULL)
61516 +       return (EP_RAIL *) NULL;
61517 +    
61518 +    if ((res = ep_init_rail (sys, &rail->r_generic)) != 0)
61519 +    {
61520 +       KMEM_FREE (rail, sizeof (EP4_RAIL));
61521 +       return (EP_RAIL *) NULL;
61522 +    }
61523 +
61524 +    rail->r_ctxt.ctxt_dev = dev;
61525 +
61526 +    /* install our rail operations */
61527 +    rail->r_generic.Operations.DestroyRail      = ep4_destroy_rail;
61528 +    rail->r_generic.Operations.StartRail        = ep4_start_rail;
61529 +    rail->r_generic.Operations.StallRail        = ep4_stall_rail;
61530 +    rail->r_generic.Operations.StopRail         = ep4_stop_rail;    
61531 +
61532 +    rail->r_generic.Operations.SdramAlloc       = ep4_sdram_alloc;
61533 +    rail->r_generic.Operations.SdramFree        = ep4_sdram_free;
61534 +    rail->r_generic.Operations.SdramWriteb      = ep4_sdram_writeb;
61535 +
61536 +    rail->r_generic.Operations.KaddrMap         = ep4_kaddr_map;
61537 +    rail->r_generic.Operations.SdramMap         = ep4_sdram_map;
61538 +    rail->r_generic.Operations.Unmap            = ep4_unmap;
61539 +
61540 +    rail->r_generic.Operations.DvmaReserve     = ep4_dvma_reserve;
61541 +    rail->r_generic.Operations.DvmaRelease     = ep4_dvma_release;
61542 +    rail->r_generic.Operations.DvmaSetPte      = ep4_dvma_set_pte;
61543 +    rail->r_generic.Operations.DvmaReadPte     = ep4_dvma_read_pte;
61544 +    rail->r_generic.Operations.DvmaUnload      = ep4_dvma_unload;
61545 +    rail->r_generic.Operations.FlushTlb                = ep4_flush_tlb;
61546 +
61547 +    rail->r_generic.Operations.ProbeRoute       = ep4_probe_route;
61548 +
61549 +    rail->r_generic.Operations.PositionFound    = ep4_position_found;
61550 +    rail->r_generic.Operations.CheckPosition    = ep4_check_position;
61551 +    rail->r_generic.Operations.NeterrFixup      = ep4_neterr_fixup;
61552 +
61553 +    rail->r_generic.Operations.LoadSystemRoute  = ep4_load_system_route;
61554 +
61555 +    rail->r_generic.Operations.LoadNodeRoute    = ep4_load_node_route;
61556 +    rail->r_generic.Operations.UnloadNodeRoute  = ep4_unload_node_route;
61557 +    rail->r_generic.Operations.LowerFilter     = ep4_lower_filter;
61558 +    rail->r_generic.Operations.RaiseFilter     = ep4_raise_filter;
61559 +    rail->r_generic.Operations.NodeDisconnected = ep4_node_disconnected;
61560 +
61561 +    rail->r_generic.Operations.FlushFilters     = ep4_flush_filters;
61562 +    rail->r_generic.Operations.FlushQueues     = ep4_flush_queues;
61563 +
61564 +    rail->r_generic.Operations.AllocInputQ     = ep4_alloc_inputq;
61565 +    rail->r_generic.Operations.FreeInputQ      = ep4_free_inputq;
61566 +    rail->r_generic.Operations.EnableInputQ     = ep4_enable_inputq;
61567 +    rail->r_generic.Operations.DisableInputQ    = ep4_disable_inputq;
61568 +    rail->r_generic.Operations.PollInputQ      = ep4_poll_inputq;
61569 +
61570 +    rail->r_generic.Operations.AllocOutputQ     = ep4_alloc_outputq;
61571 +    rail->r_generic.Operations.FreeOutputQ     = ep4_free_outputq;
61572 +    rail->r_generic.Operations.OutputQMsg      = ep4_outputq_msg;
61573 +    rail->r_generic.Operations.OutputQState     = ep4_outputq_state;
61574 +    rail->r_generic.Operations.OutputQSend     = ep4_outputq_send;
61575 +
61576 +    rail->r_generic.Operations.FillOutStats     = ep4_fillout_stats;
61577 +    rail->r_generic.Operations.Debug           = ep4_debug_rail;
61578 +
61579 +    rail->r_generic.Devinfo = dev->dev_devinfo;
61580 +
61581 +    printk ("%s: connected via elan4 rev%c device %d\n", rail->r_generic.Name,
61582 +           'a' + dev->dev_devinfo.dev_revision_id, dev->dev_instance);
61583 +
61584 +    return (EP_RAIL *) rail;
61585 +}
61586 +
61587 +void
61588 +ep4_destroy_rail (EP_RAIL *r)
61589 +{
61590 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61591 +
61592 +    elan4_dereference_device (rail->r_ctxt.ctxt_dev);
61593 +
61594 +    KMEM_FREE (rail, sizeof (EP4_RAIL));
61595 +}
61596 +
61597 +static int
61598 +ep4_attach_rail (EP4_RAIL *r)
61599 +{
61600 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
61601 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
61602 +    unsigned   ctx;
61603 +
61604 +    if (elan4_insertctxt (dev, &rail->r_ctxt, &ep4_trap_ops) != 0)
61605 +       return -ENOMEM;
61606 +    
61607 +    if ((rail->r_routetable = elan4_alloc_routetable (dev, 4)) == NULL)        /* 512 << 4 == 8192 entries */
61608 +    {
61609 +       elan4_removectxt (dev, &rail->r_ctxt);
61610 +       return -ENOMEM;
61611 +    }
61612 +    elan4_set_routetable (&rail->r_ctxt, rail->r_routetable);
61613 +
61614 +    /* Attach to the kernel comms nextwork context */
61615 +    if (elan4_attach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM) < 0)
61616 +    {
61617 +       elan4_free_routetable (dev, rail->r_routetable);
61618 +       elan4_removectxt (dev, &rail->r_ctxt);
61619 +
61620 +       return -EBUSY;
61621 +    }
61622 +
61623 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
61624 +       elan4_attach_filter (&rail->r_ctxt, ctx);
61625 +
61626 +    return 0;
61627 +}
61628 +
61629 +static void
61630 +ep4_detach_rail (EP4_RAIL *rail)
61631 +{
61632 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
61633 +    unsigned   ctx;
61634 +
61635 +    elan4_detach_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM);
61636 +
61637 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
61638 +       elan4_detach_filter (&rail->r_ctxt, ctx);
61639 +
61640 +    if (rail->r_routetable)
61641 +    {
61642 +       elan4_set_routetable (&rail->r_ctxt, NULL);
61643 +       elan4_free_routetable (dev, rail->r_routetable);
61644 +    }
61645 +
61646 +    elan4_removectxt (dev, &rail->r_ctxt);
61647 +}
61648 +
61649 +int
61650 +ep4_start_rail (EP_RAIL *r)
61651 +{
61652 +    EP4_RAIL     *rail = (EP4_RAIL *) r;
61653 +    ELAN4_DEV    *dev  = rail->r_ctxt.ctxt_dev;
61654 +    E4_InputQueue qdesc;
61655 +    int           i, res;
61656 +
61657 +    if ((res = ep4_attach_rail (rail)) < 0)
61658 +       return res;
61659 +
61660 +    /* Initialise main interrupt cookie table */
61661 +    spin_lock_init (&rail->r_intcookie_lock);
61662 +    for (i = 0; i < EP4_INTCOOKIE_HASH_SIZE; i++)
61663 +       INIT_LIST_HEAD (&rail->r_intcookie_hash[i]);
61664 +
61665 +    kmutex_init (&rail->r_haltop_mutex);
61666 +    kcondvar_init (&rail->r_haltop_sleep);
61667 +    spin_lock_init (&rail->r_haltop_lock);
61668 +
61669 +    spin_lock_init (&rail->r_cookie_lock);
61670 +
61671 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_EVENT]);
61672 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_ATOMIC]);
61673 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_SINGLE]);
61674 +    INIT_LIST_HEAD (&rail->r_ecq_list[EP4_ECQ_MAIN]);
61675 +    spin_lock_init (&rail->r_ecq_lock);
61676 +
61677 +    ep_kthread_init (&rail->r_retry_thread);
61678 +    INIT_LIST_HEAD (&rail->r_retry_ops);
61679 +
61680 +    INIT_LIST_HEAD (&rail->r_neterr_ops);
61681 +
61682 +    kmutex_init (&rail->r_flush_mutex);
61683 +    kcondvar_init (&rail->r_flush_sleep);
61684 +
61685 +    /* Allocate the elan visible sdram/main memory */
61686 +    if ((rail->r_elan = ep_alloc_elan (&rail->r_generic, sizeof (EP4_RAIL_ELAN), 0, &rail->r_elan_addr)) == 0 ||
61687 +       (rail->r_main = ep_alloc_main (&rail->r_generic, sizeof (EP4_RAIL_MAIN), 0, &rail->r_main_addr)) == 0)
61688 +    {
61689 +       goto failed;
61690 +    }
61691 +
61692 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
61693 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[i].ev_CountAndType), 0);
61694 +
61695 +    elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
61696 +
61697 +    /* Allocate the system input queues at their fixed elan address */
61698 +    /*   avoid sdram address aliasing by allocating the min sdram pagesize */
61699 +    if (! (rail->r_queuedescs= ep_alloc_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE, SDRAM_PAGE_SIZE, EP_PERM_ALL, 0)))
61700 +       goto failed;
61701 +
61702 +    /* Initialise the input queue descriptor as "full" with no event */
61703 +    qdesc.q_bptr    = 0;
61704 +    qdesc.q_fptr    = 8;
61705 +    qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8);
61706 +    qdesc.q_event   = 0;
61707 +
61708 +    for (i = 0; i < EP_NUM_SYSTEMQ; i++)
61709 +       elan4_sdram_copyq_to_sdram (dev, &qdesc, EP_SYSTEMQ_DESC (rail->r_queuedescs, i), sizeof (E4_InputQueue));
61710 +
61711 +    /* Allocate the resource map for command queue mappings */
61712 +    if ((rail->r_ecq_rmap = ep_rmallocmap (EP4_ECQ_RMAPSIZE, "r_ecq_rmap", 1)) == NULL)
61713 +       goto failed;
61714 +    
61715 +    ep_rmfree (rail->r_ecq_rmap, EP4_ECQ_TOP - EP4_ECQ_BASE, EP4_ECQ_BASE);
61716 +
61717 +    /* register an interrupt cookie & allocate command queues for command queue flushing */
61718 +    rail->r_flush_mcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4);
61719 +    rail->r_flush_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1);
61720 +
61721 +    if (rail->r_flush_mcq == NULL || rail->r_flush_ecq == NULL)
61722 +       goto failed;
61723 +
61724 +    ep4_register_intcookie (rail, &rail->r_flush_intcookie, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event), ep4_flush_interrupt, rail);
61725 +
61726 +    /* startup the retry thread */
61727 +    if (kernel_thread_create (ep4_retry_thread, (void *) rail) == 0)
61728 +       goto failed;
61729 +    ep_kthread_started (&rail->r_retry_thread);
61730 +    
61731 +    ep4_initialise_dma_retries (rail);
61732 +
61733 +    if ((rail->r_event_ecq = ep4_alloc_ecq (rail, CQ_Size1K)) == NULL)
61734 +       goto failed;
61735 +    
61736 +    rail->r_threadcode = threadcode_elan4;
61737 +    if (ep_loadcode (&rail->r_generic, &rail->r_threadcode))
61738 +       goto failed;
61739 +
61740 +    elan4_flush_icache (&rail->r_ctxt);
61741 +
61742 +    if (ep4_probe_init (rail))
61743 +       goto failed;
61744 +
61745 +    /* can now drop the context filter for the system context */
61746 +    elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_HIGH_PRI);
61747 +
61748 +    return 0;
61749 +
61750 + failed:
61751 +    printk ("ep4_start_rail: failed for rail '%s'\n", rail->r_generic.Name);
61752 +    ep4_stop_rail (&rail->r_generic);
61753 +
61754 +    return -ENOMEM;
61755 +}
61756 +
61757 +void
61758 +ep4_stall_rail (EP_RAIL *r)
61759 +{
61760 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61761 +    unsigned  ctx;
61762 +
61763 +    /* Raise all the context filters */
61764 +    elan4_set_filter (&rail->r_ctxt, ELAN4_KCOMM_CONTEXT_NUM, E4_FILTER_DISCARD_ALL);
61765 +
61766 +    for (ctx = ELAN4_KCOMM_BASE_CONTEXT_NUM; ctx <= ELAN4_KCOMM_TOP_CONTEXT_NUM; ctx++)
61767 +       elan4_set_filter (&rail->r_ctxt, ctx, E4_FILTER_DISCARD_ALL);
61768 +}
61769 +
61770 +void
61771 +ep4_stop_rail (EP_RAIL *r)
61772 +{
61773 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61774 +
61775 +    if (rail->r_generic.State == EP_RAIL_STATE_RUNNING) /* undo ep4_position_found() */
61776 +    {
61777 +       ELAN_POSITION *pos  = &rail->r_generic.Position;
61778 +       EP_ADDR        addr = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies));
61779 +
61780 +       ep_free_elan (&rail->r_generic, addr, pos->pos_nodes * sizeof (E4_uint64));
61781 +
61782 +       KMEM_FREE (rail->r_cookies, pos->pos_nodes * sizeof (E4_uint64));
61783 +    }
61784 +
61785 +    ep4_probe_destroy (rail);
61786 +
61787 +    ep_unloadcode (&rail->r_generic, &rail->r_threadcode);
61788 +
61789 +    if (rail->r_event_ecq)
61790 +       ep4_free_ecq (rail, rail->r_event_ecq);
61791 +    rail->r_event_ecq = NULL;
61792 +
61793 +    ep4_finalise_dma_retries (rail);
61794 +
61795 +    ep_kthread_stop (&rail->r_retry_thread);
61796 +    ep_kthread_destroy (&rail->r_retry_thread);
61797 +
61798 +    if (rail->r_flush_intcookie.int_arg)
61799 +       ep4_deregister_intcookie (rail, &rail->r_flush_intcookie);
61800 +    rail->r_flush_intcookie.int_arg = NULL;
61801 +
61802 +    if (rail->r_flush_mcq)
61803 +       ep4_put_ecq (rail, rail->r_flush_mcq, 4);
61804 +    rail->r_flush_mcq = NULL;
61805 +
61806 +    if (rail->r_flush_ecq)
61807 +       ep4_put_ecq (rail, rail->r_flush_ecq, 1);
61808 +    rail->r_flush_ecq = NULL;
61809 +
61810 +    if (rail->r_ecq_rmap)
61811 +       ep_rmfreemap (rail->r_ecq_rmap);
61812 +    
61813 +    if (rail->r_queuedescs)
61814 +       ep_free_memory_elan (&rail->r_generic, EP_SYSTEM_QUEUE_BASE);
61815 +    rail->r_queuedescs = 0;
61816 +
61817 +    if (rail->r_elan)
61818 +       ep_free_elan (&rail->r_generic, rail->r_elan_addr, sizeof (EP4_RAIL_ELAN));
61819 +    rail->r_elan = 0;
61820 +
61821 +    if (rail->r_main)
61822 +       ep_free_main (&rail->r_generic, rail->r_main_addr, sizeof (EP4_RAIL_MAIN));
61823 +    rail->r_main = NULL;
61824 +
61825 +    kcondvar_destroy (&rail->r_flush_sleep);
61826 +    kmutex_destroy (&rail->r_flush_mutex);
61827 +
61828 +    spin_lock_destroy (&rail->r_ecq_lock);
61829 +    spin_lock_destroy (&rail->r_cookie_lock);
61830 +
61831 +    spin_lock_destroy (&rail->r_haltop_lock);
61832 +    kcondvar_destroy(&rail->r_haltop_sleep);
61833 +    kmutex_destroy (&rail->r_haltop_mutex);
61834 +    spin_lock_destroy (&rail->r_intcookie_lock);
61835 +
61836 +    ep4_detach_rail (rail);
61837 +}
61838 +
61839 +void
61840 +ep4_position_found (EP_RAIL *r, ELAN_POSITION *pos)
61841 +{
61842 +    EP4_RAIL   *rail = (EP4_RAIL *) r;
61843 +    sdramaddr_t cookies;
61844 +    EP_ADDR     addr;
61845 +    int         i;
61846 +
61847 +    KMEM_ZALLOC (rail->r_cookies, E4_uint64 *, pos->pos_nodes * sizeof (E4_uint64), 1);
61848 +
61849 +    if (! (cookies = ep_alloc_elan (&rail->r_generic, pos->pos_nodes * sizeof (E4_uint64), 0, &addr)))
61850 +       panic ("ep4_position_found: cannot allocate elan cookies array\n");
61851 +
61852 +    for (i = 0; i < pos->pos_nodes; i++)
61853 +       elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, cookies + (i * sizeof (E4_uint64)), 0);
61854 +    
61855 +    for (i = 0; i < pos->pos_nodes; i++)
61856 +       rail->r_cookies[i] = 0;
61857 +
61858 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_nodeid), pos->pos_nodeid);
61859 +    elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_cookies), addr);
61860 +
61861 +    ep4_probe_position_found (rail, pos);
61862 +}
61863 +
61864 +sdramaddr_t
61865 +ep4_sdram_alloc (EP_RAIL *r, EP_ADDR addr, unsigned size)
61866 +{
61867 +    ELAN4_DEV *dev = ((EP4_RAIL *) r)->r_ctxt.ctxt_dev;
61868 +
61869 +    if (size >= SDRAM_PAGE_SIZE)
61870 +       return elan4_sdram_alloc (dev, size);
61871 +    else
61872 +    {
61873 +       sdramaddr_t block = elan4_sdram_alloc (dev, SDRAM_PAGE_SIZE);
61874 +       sdramaddr_t sdram = block + (addr & (SDRAM_PAGE_SIZE-1));
61875 +
61876 +       /* free of the portion before sdram */
61877 +       if (sdram > block)
61878 +           elan4_sdram_free (dev, block, sdram - block);
61879 +
61880 +       /* free of the portion after sdram */
61881 +       if ((block + SDRAM_PAGE_SIZE) > (sdram + size))
61882 +           elan4_sdram_free (dev, sdram + size, block + SDRAM_PAGE_SIZE - (sdram + size));
61883 +
61884 +       return sdram;
61885 +    }
61886 +}
61887 +
61888 +void
61889 +ep4_sdram_free (EP_RAIL *r, sdramaddr_t addr, unsigned size)
61890 +{
61891 +    elan4_sdram_free (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, size);
61892 +}
61893 +
61894 +void
61895 +ep4_sdram_writeb (EP_RAIL *r, sdramaddr_t addr, unsigned char val)
61896 +{
61897 +    elan4_sdram_writeb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev, addr, val);
61898 +}
61899 +
61900 +void
61901 +ep4_flush_tlb (EP_RAIL *r)
61902 +{
61903 +    elan4mmu_flush_tlb (((EP4_RAIL *) r)->r_ctxt.ctxt_dev);
61904 +}
61905 +
61906 +void
61907 +ep4_load_system_route (EP_RAIL *r, unsigned vp, unsigned lowNode, unsigned highNode)
61908 +{
61909 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
61910 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
61911 +    E4_VirtualProcessEntry route;
61912 +
61913 +    if (elan4_generate_route (&rail->r_generic.Position, &route, ELAN4_KCOMM_CONTEXT_NUM, 
61914 +                             lowNode, highNode, FIRST_SYSTEM_PACKET | FIRST_HIGH_PRI | FIRST_TIMEOUT(3)) < 0)
61915 +    {
61916 +       panic ("ep4_load_system_route: generate route failed\n");
61917 +       /* NOTREACHED */
61918 +    }
61919 +
61920 +    elan4_write_route (dev, rail->r_routetable, vp, &route);
61921 +}
61922 +
61923 +void
61924 +ep4_load_node_route (EP_RAIL *r, unsigned nodeId)
61925 +{
61926 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
61927 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
61928 +    E4_VirtualProcessEntry route;
61929 +
61930 +    if (elan4_generate_route (&rail->r_generic.Position, &route, EP4_CONTEXT_NUM(rail->r_generic.Position.pos_nodeid),
61931 +                             nodeId, nodeId, FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3)) < 0)
61932 +    {
61933 +       panic ("ep4_load_node_route: generate route failed\n");
61934 +       /* NOTREACHED */
61935 +    }
61936 +
61937 +    elan4_write_route (dev, rail->r_routetable, EP_VP_DATA(nodeId), &route);
61938 +}
61939 +
61940 +void
61941 +ep4_unload_node_route (EP_RAIL *r, unsigned nodeId)
61942 +{
61943 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
61944 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
61945 +    
61946 +    elan4_invalidate_route (dev, rail->r_routetable, EP_VP_DATA(nodeId));
61947 +}
61948 +
61949 +void
61950 +ep4_lower_filter (EP_RAIL *r, unsigned nodeId)
61951 +{
61952 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61953 +
61954 +    elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_HIGH_PRI);
61955 +}
61956 +
61957 +void
61958 +ep4_raise_filter (EP_RAIL *r, unsigned nodeId)
61959 +{
61960 +    EP4_RAIL *rail = (EP4_RAIL *) r;
61961 +
61962 +    elan4_set_filter (&rail->r_ctxt, EP4_CONTEXT_NUM(nodeId), E4_FILTER_DISCARD_ALL);
61963 +}
61964 +
61965 +void
61966 +ep4_node_disconnected (EP_RAIL *r, unsigned nodeId)
61967 +{
61968 +    ep4_free_stalled_dmas ((EP4_RAIL *) r, nodeId);
61969 +}
61970 +
61971 +void
61972 +ep4_fillout_stats(EP_RAIL *r, char *str) 
61973 +{
61974 +    /* no stats here yet */
61975 +    /* EP4_RAIL *ep4rail = (EP4_RAIL *)r; */
61976 +}
61977 diff -urN clean/drivers/net/qsnet/ep/kcomm_elan4.h linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.h
61978 --- clean/drivers/net/qsnet/ep/kcomm_elan4.h    1969-12-31 19:00:00.000000000 -0500
61979 +++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_elan4.h      2005-07-20 08:01:34.000000000 -0400
61980 @@ -0,0 +1,443 @@
61981 +/*
61982 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
61983 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
61984 + *
61985 + *    For licensing information please see the supplied COPYING file
61986 + *
61987 + */
61988 +
61989 +#ifndef __EP_KCOMM_ELAN4_H
61990 +#define __EP_KCOMM_ELAN4_H
61991 +
61992 +#ident "@(#)$Id: kcomm_elan4.h,v 1.19.2.1 2005/07/20 12:01:34 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
61993 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_elan4.h,v $*/
61994 +
61995 +#include <elan4/types.h>
61996 +
61997 +#include <elan4/dma.h>
61998 +#include <elan4/events.h>
61999 +#include <elan4/commands.h>
62000 +
62001 +#if !defined(__elan4__)
62002 +#include <elan4/device.h>
62003 +#endif /* ! defined(__elan4__) */
62004 +
62005 +/* private address allocation */
62006 +#define EP4_TEXT_BASE                  0xF8000000              /* base address for thread code (defined in makerules.elan4) */
62007 +#define EP4_ECQ_BASE                   0xFF000000              /* address space for mapping command queues */
62008 +#define EP4_ECQ_TOP                    0xFF800000
62009 +
62010 +#define EP4_ECQ_RMAPSIZE               128
62011 +#define EP4_STACK_SIZE                 1024                    /* default thread code stack size */
62012 +#define EP4_MAX_LEVELS                 8                       /* same as ELAN_MAX_LEVELS */
62013 +
62014 +/* context number allocation */
62015 +#define EP4_CONTEXT_NUM(nodeId)                (ELAN4_KCOMM_BASE_CONTEXT_NUM + (nodeId))
62016 +#define EP4_CONTEXT_ISDATA(ctx)                ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM && \
62017 +                                        (ctx) <= ELAN4_KCOMM_TOP_CONTEXT_NUM)
62018 +#define EP4_CONTEXT_TO_NODE(ctx)       ((ctx) - ELAN4_KCOMM_BASE_CONTEXT_NUM)
62019 +
62020 +/*
62021 + * network error cookie format:
62022 + *  -------------------------------------------------
62023 + *  | unique cookie value | Remote | DMA | Location |
62024 + *  -------------------------------------------------
62025 + * [63:4] Cookie   - unique cookie number
62026 + * [3]    Thread   - cookie generated by thread code
62027 + * [2]   Remote   - cookie generated by remote end
62028 + * [1]    STEN     - cookie is for a STEN packet
62029 + * [0]    DMA      - cookie is for a DMA
62030 + */
62031 +#define EP4_COOKIE_DMA         (1    << 0)
62032 +#define EP4_COOKIE_STEN                (1    << 1)
62033 +#define EP4_COOKIE_REMOTE      (1    << 2)
62034 +#define EP4_COOKIE_THREAD      (1    << 3)
62035 +#define EP4_COOKIE_INC         (1ull << 4)
62036 +
62037 +#define EP4_COOKIE_STRING(val) (long long)(((val) & ~(EP4_COOKIE_INC-1)) >> 4), \
62038 +                               ((val) & EP4_COOKIE_DMA)    ? ",dma" : "", \
62039 +                               ((val) & EP4_COOKIE_REMOTE) ? ",remote" : "", \
62040 +                               ((val) & EP4_COOKIE_THREAD) ? ",thread" : "", \
62041 +                               ((val) & EP4_COOKIE_STEN)   ? ",sten" : ""
62042 +/*
62043 + * Done "word" values 
62044 + */
62045 +#define EP4_STATE_FREE         0
62046 +#define EP4_STATE_ACTIVE       1
62047 +#define EP4_STATE_FINISHED     2
62048 +#define EP4_STATE_FAILED       3
62049 +#define EP4_STATE_PRIVATE      4
62050 +
62051 +#define EP4_EVENT_FIRING_TLIMIT        16384                   /* 1023 uS */
62052 +
62053 +/* forward declarations */
62054 +typedef struct ep4_rail        EP4_RAIL;
62055 +
62056 +#if !defined(__elan4__)
62057 +
62058 +typedef struct ep4_intcookie
62059 +{
62060 +    struct list_head            int_link;
62061 +    E4_uint64                   int_val;
62062 +    void                      (*int_callback)(EP4_RAIL *rail, void *arg);
62063 +    void                       *int_arg;
62064 +} EP4_INTCOOKIE;
62065 +
62066 +#define EP4_INTCOOKIE_HASH_SIZE        256
62067 +#define EP4_INTCOOKIE_HASH(a)          ((((a) >> 3) ^ ((a) >> 7) ^ ((a) >> 11)) & (EP4_INTCOOKIE_HASH_SIZE-1))
62068 +
62069 +typedef struct ep4_ecq
62070 +{
62071 +    struct list_head   ecq_link;                               /* linked on r_ecq_list */
62072 +    ELAN4_INTOP                ecq_intop;                              /* main interrupt op space */
62073 +    ELAN4_CQ          *ecq_cq;                                 /* command queue */
62074 +    E4_Addr            ecq_addr;                               /* address mapped into elan */
62075 +    unsigned int       ecq_avail;                              /* # dwords still available */
62076 +
62077 +    spinlock_t         ecq_lock;                               /* spinlock for main accesses */
62078 +    sdramaddr_t                ecq_event;                              /* event for flushing "event" queues */
62079 +    EP_ADDR            ecq_event_addr;
62080 +    struct ep4_ecq     *ecq_flushcq;                           /*  and command port to issue setevent to */
62081 +} EP4_ECQ;
62082 +
62083 +#define EP4_ECQ_EVENT          0                               /* command queues targetted by multi-blocks events */
62084 +#define EP4_ECQ_ATOMIC         1                               /* command queues targetted by atomic store operations */
62085 +#define EP4_ECQ_SINGLE         2                               /* command queues targetted by single word commands from main */
62086 +#define EP4_ECQ_MAIN           3                               /* command queues targetted by multi word commands from main */
62087 +#define EP4_NUM_ECQ            4
62088 +
62089 +#define EP4_ECQ_Size(which)            ((which) == EP4_ECQ_EVENT  ? CQ_Size64K : \
62090 +                                        (which) == EP4_ECQ_ATOMIC ? CQ_Size8K  : \
62091 +                                        (which) == EP4_ECQ_SINGLE ? CQ_Size1K  : \
62092 +                                        (which) == EP4_ECQ_MAIN   ? CQ_Size8K  : \
62093 +                                        CQ_Size1K)
62094 +
62095 +typedef struct ep4_dma_retry
62096 +{
62097 +    struct list_head    retry_link;                            /* chained on free/retry list */
62098 +    unsigned long      retry_time;                             /* "lbolt" to retry at */
62099 +    E4_DMA             retry_dma;                              /* DMA (in main memory) */
62100 +} EP4_DMA_RETRY;
62101 +
62102 +#define EP4_DMA_RETRY_CQSIZE           CQ_Size8K                               /* size of command queue for dma retry */
62103 +#define EP4_DMA_RETRY_FLOWCNT          (CQ_Size(EP4_DMA_RETRY_CQSIZE)/72)      /* # of reissued DMA's which can fit in */
62104 +
62105 +typedef struct ep4_inputq
62106 +{
62107 +    EP4_INTCOOKIE      q_intcookie;
62108 +    unsigned int       q_slotSize;
62109 +    unsigned int       q_slotCount;
62110 +
62111 +    void              *q_slots;
62112 +    EP_ADDR            q_slotsAddr;
62113 +    
62114 +    EP_INPUTQ_CALLBACK *q_callback;
62115 +    void              *q_arg;
62116 +
62117 +    sdramaddr_t                q_desc;
62118 +    EP_ADDR            q_descAddr;
62119 +    EP_ADDR            q_eventAddr;
62120 +    EP4_ECQ           *q_wcq;                                  /* command queue to issue waitevent to */
62121 +    EP4_ECQ           *q_ecq;                                  /* command queue targetted by event to generate interrupt */
62122 +
62123 +    EP_ADDR            q_fptr;                                 /* cached current front pointer */
62124 +    EP_ADDR            q_last;                                 /* elan addr for last queue slot  */
62125 +
62126 +    atomic_t           q_fired;                                /* atomic flag that interrupt received */
62127 +    unsigned int       q_count;                                /* count of slots consumed */
62128 +} EP4_INPUTQ;
62129 +
62130 +typedef struct ep4_outputq
62131 +{
62132 +    spinlock_t         q_lock;
62133 +    unsigned int       q_slotCount;
62134 +    unsigned int       q_slotSize;
62135 +    unsigned int        q_dwords;
62136 +    ELAN4_CQ          *q_cq;
62137 +    void              *q_main;
62138 +    EP_ADDR            q_mainAddr;
62139 +    unsigned int       q_retries;
62140 +} EP4_OUTPUTQ;
62141 +
62142 +#endif /* ! defined(__elan4__) */
62143 +
62144 +typedef struct ep4_check_sten
62145 +{
62146 +    E4_uint64          c_reset_event_cmd;                      /* WRITEDWORD to reset start event */
62147 +    E4_uint64          c_reset_event_value;
62148 +
62149 +    E4_uint64          c_open;                                 /* OPEN VP_PROBE(lvl) */
62150 +    E4_uint64          c_trans_traceroute0;                    /* SENDTRANS TR_TRACEROUTE 0s */
62151 +    E4_uint64          c_addr_traceroute0;
62152 +    E4_uint64          c_data_traceroute0[8];
62153 +    E4_uint64          c_trans_traceroute1;                    /* SENDTRANS TR_TRACEROUTE 1s */
62154 +    E4_uint64          c_addr_traceroute1;
62155 +    E4_uint64          c_data_traceroute1[8];
62156 +    E4_uint64          c_trans_sendack;                        /* SENDTRANS SENDACK */
62157 +    E4_uint64          c_addr_sendack;
62158 +    
62159 +    E4_uint64          c_guard_ok;                             /* GUARD OK - write level */
62160 +    E4_uint64          c_writedword_ok;
62161 +    E4_uint64          c_value_ok;
62162 +    
62163 +    E4_uint64          c_guard_fail;                           /* GUARD FAIL - chain setevent/write fail */
62164 +    E4_uint64          c_setevent_fail;
62165 +    E4_uint64          c_setevent_nop;
62166 +    E4_uint64          c_nop_pad;
62167 +} EP4_CHECK_STEN;
62168 +
62169 +#define EP4_CHECK_STEN_NDWORDS (sizeof (EP4_CHECK_STEN) >> 3)
62170 +
62171 +typedef struct ep4_rail_elan
62172 +{
62173 +    EP4_CHECK_STEN     r_check_sten[EP4_MAX_LEVELS];
62174 +    E4_Event32         r_check_fail;                                   /* Check failed (== r_check_start[-1]) */
62175 +    E4_Event32          r_check_start[EP4_MAX_LEVELS];
62176 +
62177 +    E4_Event32         r_qevents[EP_NUM_SYSTEMQ];
62178 +    E4_Event32         r_flush_event;
62179 +
62180 +    E4_uint64          r_nodeid;
62181 +#ifdef __elan4__
62182 +    E4_uint64         *r_cookies;
62183 +#else
62184 +    E4_Addr            r_cookies;
62185 +#endif
62186 +} EP4_RAIL_ELAN;
62187 +
62188 +#define TRACEROUTE_ENTRIES     16                      /* 2 * ELAN_MAX_LEVELS */
62189 +#define TRACEROUTE_NDWORDS     (TRACEROUTE_ENTRIES/2)
62190 +
62191 +typedef struct ep4_rail_main
62192 +{
62193 +    E4_uint32          r_probe_dest0[TRACEROUTE_ENTRIES];
62194 +    E4_uint32          r_probe_dest1[TRACEROUTE_ENTRIES];
62195 +    E4_uint64          r_probe_result;
62196 +    E4_uint64          r_probe_level;
62197 +
62198 +    E4_uint64           r_dma_flowcnt;                         /*  count of dma's queued */
62199 +} EP4_RAIL_MAIN;
62200 +
62201 +#define EP4_PROBE_ACTIVE       (0xffff)
62202 +#define EP4_PROBE_FAILED       (0xfffe)
62203 +
62204 +#if !defined(__elan4__)
62205 +
62206 +typedef struct ep4_retry_ops
62207 +{
62208 +    struct list_head   op_link;
62209 +    unsigned long     (*op_func)(EP4_RAIL *rail, void *arg, unsigned long nextRunTime);
62210 +    void              *op_arg;
62211 +} EP4_RETRY_OPS;
62212 +
62213 +typedef struct ep4_neterr_ops
62214 +{
62215 +    struct list_head   op_link;
62216 +    void             (*op_func) (EP4_RAIL *rail, void *arg, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
62217 +    void              *op_arg;
62218 +} EP4_NETERR_OPS;
62219 +
62220 +struct ep4_rail
62221 +{
62222 +    EP_RAIL            r_generic;
62223 +    ELAN4_CTXT         r_ctxt;
62224 +    ELAN4_ROUTE_TABLE  *r_routetable;
62225 +    
62226 +    spinlock_t         r_intcookie_lock;
62227 +    struct list_head    r_intcookie_hash[EP4_INTCOOKIE_HASH_SIZE];
62228 +
62229 +    sdramaddr_t                r_elan;
62230 +    EP_ADDR            r_elan_addr;
62231 +    EP4_RAIL_MAIN      *r_main;
62232 +    EP_ADDR            r_main_addr;
62233 +    
62234 +    EP_CODE            r_threadcode;                           /* copy of thread code */
62235 +
62236 +    sdramaddr_t                r_queuedescs;                           /* systemq queue descriptors */
62237 +
62238 +    E4_uint64         *r_cookies;                              /* network error cookies */
62239 +    spinlock_t          r_cookie_lock;                         /*    and spin lock */
62240 +
62241 +    kcondvar_t         r_probe_wait;                           /* network position probing */
62242 +    spinlock_t         r_probe_lock;
62243 +    volatile int       r_probe_done;
62244 +    EP4_INTCOOKIE      r_probe_intcookie;
62245 +    EP4_ECQ           *r_probe_cq;
62246 +    E4_uint32          r_probe_source0[TRACEROUTE_ENTRIES];
62247 +    E4_uint32          r_probe_source1[TRACEROUTE_ENTRIES];
62248 +
62249 +    kmutex_t           r_haltop_mutex;                         /* halt/flush operations */
62250 +    ELAN4_HALTOP       r_haltop;
62251 +    ELAN4_DMA_FLUSHOP   r_flushop;
62252 +    kcondvar_t         r_haltop_sleep;
62253 +    spinlock_t         r_haltop_lock;
62254 +
62255 +    struct list_head    r_ecq_list[EP4_NUM_ECQ];               /* list of statically allocated command queues */
62256 +    EP_RMAP           *r_ecq_rmap;                             /* resource map for command queue mappings */
62257 +    spinlock_t          r_ecq_lock;                            /* spinlock for list/space management */
62258 +
62259 +    kmutex_t           r_flush_mutex;                          /* serialize command queue flushing */
62260 +    unsigned long      r_flush_count;                          /* # setevents issued for flushing */
62261 +    EP4_ECQ           *r_flush_mcq;                            /*   and command queue for waitevent */
62262 +    EP4_ECQ            *r_flush_ecq;                           /*   and command queue for interrupt */
62263 +    EP4_INTCOOKIE       r_flush_intcookie;                     /*   and interrupt cookie */
62264 +    kcondvar_t          r_flush_sleep;                         /*   and place to sleep ... */
62265 +
62266 +    EP_KTHREAD         r_retry_thread;                         /* retry thread */
62267 +    struct list_head    r_retry_ops;                           /*  list of retry operations */
62268 +
62269 +    EP4_RETRY_OPS       r_dma_ops;                             /* dma retry operations */
62270 +    EP4_ECQ           *r_dma_ecq;                              /*   command queue to reissue DMAs */
62271 +    E4_uint64           r_dma_flowcnt;                         /*   count of dma's reissued */
62272 +    struct list_head    r_dma_retrylist[EP_NUM_RETRIES];       /*   retry lists  */
62273 +    struct list_head    r_dma_freelist;                                /*   and free list */
62274 +    spinlock_t          r_dma_lock;                            /*   and spinlock to protect lists */
62275 +    unsigned long       r_dma_allocated;                       /*   # retries allocated*/
62276 +    unsigned long       r_dma_reserved;                                /*   # retries reserved */
62277 +
62278 +    EP4_ECQ           *r_event_ecq;                            /* command queue for occasional setevents */
62279 +
62280 +    struct list_head    r_neterr_ops;                          /* list of neterr fixup operations */
62281 +
62282 +    ELAN4_IPROC_TRAP    r_iproc_trap;
62283 +    ELAN4_TPROC_TRAP    r_tproc_trap;
62284 +} ;
62285 +
62286 +#define EP4_CTXT_TO_RAIL(ctxt) ((EP4_RAIL *) (((unsigned long) (ctxt)) - offsetof (EP4_RAIL, r_ctxt)))
62287 +
62288 +#if defined(DEBUG_ASSERT)
62289 +#define EP4_ASSERT(rail,EXPR)                  EP_ASSERT(&((rail)->r_generic), EXPR)
62290 +#define EP4_SDRAM_ASSERT(rail,off,value)       EP4_ASSERT(rail, (sdram_assert ? elan4_sdram_readq ((rail)->r_ctxt.ctxt_dev, (off)) == (value) : 1))
62291 +#else
62292 +#define EP4_ASSERT(rail,EXPR)
62293 +#define EP4_SDRAM_ASSERT(rail,off,value)
62294 +#endif
62295 +
62296 +/* kcomm_elan4.c */
62297 +extern EP_RAIL    *ep4_create_rail (EP_SYS *sys, ELAN4_DEV *dev);
62298 +extern void        ep4_destroy_rail (EP_RAIL *rail);
62299 +
62300 +extern int         ep4_start_rail (EP_RAIL *rail);
62301 +extern void        ep4_stall_rail (EP_RAIL *rail);
62302 +extern void        ep4_stop_rail (EP_RAIL *rail);
62303 +
62304 +extern void       ep4_debug_rail (EP_RAIL *rail);
62305 +
62306 +extern void        ep4_position_found (EP_RAIL *rail, ELAN_POSITION *pos);
62307 +
62308 +extern sdramaddr_t ep4_sdram_alloc (EP_RAIL *rail, EP_ADDR addr, unsigned int size);
62309 +extern void        ep4_sdram_free (EP_RAIL *rail, sdramaddr_t addr, unsigned int size);
62310 +extern void        ep4_sdram_writeb (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
62311 +
62312 +extern void        ep4_flush_tlb (EP_RAIL *r);
62313 +extern void        ep4_load_system_route (EP_RAIL *r, unsigned int vp, unsigned int lowNode, unsigned int highNode);
62314 +extern void        ep4_load_node_route (EP_RAIL *r, unsigned int nodeId);
62315 +extern void        ep4_unload_node_route (EP_RAIL *r, unsigned int nodeId);
62316 +extern void        ep4_lower_filter (EP_RAIL *r, unsigned int nodeId);
62317 +extern void        ep4_raise_filter (EP_RAIL *rail, unsigned int nodeId);
62318 +extern void        ep4_node_disconnected (EP_RAIL *r, unsigned int nodeId);
62319 +
62320 +/* kmap_elan4.c */
62321 +extern void        ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr);
62322 +extern void        ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr);
62323 +extern void        ep4_cport_map (EP_RAIL *r, EP_ADDR eaddr, unsigned long cqaddr, unsigned int len, unsigned int perm);
62324 +extern void        ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len);
62325 +extern void       *ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages);
62326 +extern void        ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private);
62327 +extern void        ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm);
62328 +extern physaddr_t  ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index);
62329 +extern void        ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages);
62330 +
62331 +/* kmsg_elan4.c */
62332 +extern EP_INPUTQ  *ep4_alloc_inputq (EP_RAIL *r, unsigned int qnum, unsigned int slotSize, unsigned int slotCount,
62333 +                                    EP_INPUTQ_CALLBACK *callback, void *arg);
62334 +extern void        ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q);
62335 +extern void        ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q);
62336 +extern void        ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q);
62337 +extern int         ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
62338 +extern EP_OUTPUTQ *ep4_alloc_outputq (EP_RAIL *r, unsigned int slotSize, unsigned int slotCount);
62339 +extern void        ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q);
62340 +extern void       *ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
62341 +extern int         ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum);
62342 +extern int         ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned int slotNum, unsigned int size,
62343 +                                    unsigned int nodeId, unsigned int qnum, unsigned int retries);
62344 +
62345 +/* probenetwork_elan4.c */
62346 +extern int         ep4_probe_init (EP4_RAIL *r);
62347 +extern void        ep4_probe_destroy (EP4_RAIL *r);
62348 +extern void        ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos);
62349 +extern int         ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw);
62350 +extern int         ep4_check_position (EP_RAIL *rail);
62351 +
62352 +/* support_elan4.c */
62353 +extern ELAN4_TRAP_OPS ep4_trap_ops;
62354 +extern void           ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg);
62355 +extern void           ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp);
62356 +extern EP4_INTCOOKIE *ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie);
62357 +extern E4_uint64      ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node);
62358 +
62359 +extern void           ep4_flush_filters (EP_RAIL *r);
62360 +extern void           ep4_flush_queues (EP_RAIL *r);
62361 +extern void          ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc);
62362 +
62363 +extern EP4_ECQ       *ep4_alloc_ecq (EP4_RAIL *rail, unsigned int cqsize);
62364 +extern void           ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq);
62365 +extern EP4_ECQ      *ep4_get_ecq (EP4_RAIL *rail, unsigned int which, unsigned int ndwords);
62366 +extern void           ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned int ndwords);
62367 +
62368 +extern void           ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag);
62369 +extern void           ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event);
62370 +extern void           ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1);
62371 +
62372 +extern void           ep4_flush_interrupt (EP4_RAIL *rail, void *arg);
62373 +extern void           ep4_flush_ecqs (EP4_RAIL *rail);
62374 +
62375 +extern void           ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, 
62376 +                                      EP_ADDR stackAddr, E4_Addr startpc, int nargs,...);
62377 +
62378 +extern void           ep4_initialise_dma_retries (EP4_RAIL *rail);
62379 +extern void           ep4_finalise_dma_retries (EP4_RAIL *rail);
62380 +extern int            ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, unsigned int attr);
62381 +extern void          ep4_release_dma_retries(EP4_RAIL *rail, unsigned int count);
62382 +extern void           ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval);
62383 +extern void           ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma);
62384 +extern void           ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId);
62385 +extern void           ep4_display_rail (EP4_RAIL *rail);
62386 +
62387 +extern void           ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
62388 +extern void           ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
62389 +extern void           ep4_retry_thread (EP4_RAIL *rail);
62390 +
62391 +/* neterr_elan4.c */
62392 +extern void           ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops);
62393 +extern void           ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops);
62394 +extern void           ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
62395 +
62396 +/* commands_elan4.c */
62397 +extern void           elan4_nop_cmd (ELAN4_CQ *cq, E4_uint64 tag);
62398 +extern void           elan4_write_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data);
62399 +extern void           elan4_add_dword_cmd (ELAN4_CQ *cq, E4_Addr addr, E4_uint64 data);
62400 +extern void           elan4_copy64_cmd (ELAN4_CQ *cq, E4_Addr from, E4_Addr to, E4_uint32 datatype);
62401 +extern void           elan4_interrupt_cmd (ELAN4_CQ *cq, E4_uint64 cookie);
62402 +extern void           elan4_run_thread_cmd (ELAN4_CQ *cq, E4_ThreadRegs *regs);
62403 +extern void           elan4_run_dma_cmd (ELAN4_CQ *cq, E4_DMA *dma);
62404 +extern void           elan4_set_event_cmd (ELAN4_CQ *cq, E4_Addr event);
62405 +extern void           elan4_set_eventn_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint32 count);
62406 +extern void           elan4_wait_event_cmd (ELAN4_CQ *cq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1);
62407 +extern void           elan4_open_packet (ELAN4_CQ *cq, E4_uint64 command);
62408 +extern void           elan4_guard (ELAN4_CQ *cq, E4_uint64 command);
62409 +extern void           elan4_sendtrans0 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr);
62410 +extern void           elan4_sendtrans1 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0);
62411 +extern void           elan4_sendtrans2 (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 p0, E4_uint64 p1);
62412 +extern void           elan4_sendtransn (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, ...);
62413 +extern void           elan4_sendtransp (ELAN4_CQ *cq, E4_uint16 trtype, E4_uint64 addr, E4_uint64 *ptr);
62414 +
62415 +extern void           ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
62416 +extern void          ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops);
62417 +extern void           ep4_retry_thread (EP4_RAIL *rail);
62418 +
62419 +extern void           ep4_fillout_stats(EP_RAIL *rail, char *str);
62420 +
62421 +#endif /* ! defined(__elan4__) */
62422 +
62423 +#endif /* __EP_KCOMM_ELAN4_H */
62424 diff -urN clean/drivers/net/qsnet/ep/kcomm_vp.h linux-2.6.9/drivers/net/qsnet/ep/kcomm_vp.h
62425 --- clean/drivers/net/qsnet/ep/kcomm_vp.h       1969-12-31 19:00:00.000000000 -0500
62426 +++ linux-2.6.9/drivers/net/qsnet/ep/kcomm_vp.h 2004-03-24 06:32:56.000000000 -0500
62427 @@ -0,0 +1,36 @@
62428 +/*
62429 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62430 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
62431 + *
62432 + *    For licensing information please see the supplied COPYING file
62433 + *
62434 + */
62435 +
62436 +#ifndef __EP_KCOMM_VP_H
62437 +#define __EP_KCOMM_VP_H
62438 +
62439 +#ident "@(#)$Id: kcomm_vp.h,v 1.2 2004/03/24 11:32:56 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
62440 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_vp.h,v $*/
62441 +
62442 +#define EP_MAX_NODES                   2048                    /* Max nodes we support */
62443 +
62444 +/* virtual process allocation */
62445 +#define EP_VP_NODE_BASE                        (0)
62446 +#define EP_VP_DATA_BASE                        (EP_VP_NODE_BASE + EP_MAX_NODES)
62447 +#define EP_VP_PROBE_BASE               (EP_VP_DATA_BASE + EP_MAX_NODES)
62448 +#define EP_VP_PROBE_COUNT              ELAN_MAX_LEVELS
62449 +
62450 +#define EP_VP_BCAST_BASE               (EP_VP_PROBE_BASE + EP_VP_PROBE_COUNT)
62451 +#define EP_VP_BCAST_COUNT              (CM_SGMTS_PER_LEVEL * (CM_MAX_LEVELS - 1) + 1)
62452 +
62453 +#define EP_VP_NODE(nodeId)             (EP_VP_NODE_BASE + (nodeId))
62454 +#define EP_VP_DATA(nodeId)             (EP_VP_DATA_BASE + (nodeId))
62455 +#define EP_VP_PROBE(lvl)               (EP_VP_PROBE_BASE + (lvl))
62456 +#define EP_VP_BCAST(lvl,sgmt)          (EP_VP_BCAST_BASE + ((lvl) - 1)*CM_SGMTS_PER_LEVEL + (sgmt))
62457 +
62458 +#define EP_VP_TO_NODE(vp)              ((vp) & (EP_MAX_NODES-1))
62459 +#define EP_VP_ISDATA(vp)               ((vp) >= EP_VP_DATA_BASE && (vp) < (EP_VP_DATA_BASE + EP_MAX_NODES))
62460 +
62461 +#endif /* __EP_KCOMM_VP_H */
62462 +
62463 +
62464 diff -urN clean/drivers/net/qsnet/ep/kmap.c linux-2.6.9/drivers/net/qsnet/ep/kmap.c
62465 --- clean/drivers/net/qsnet/ep/kmap.c   1969-12-31 19:00:00.000000000 -0500
62466 +++ linux-2.6.9/drivers/net/qsnet/ep/kmap.c     2004-12-14 05:19:23.000000000 -0500
62467 @@ -0,0 +1,561 @@
62468 +/*
62469 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
62470 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
62471 + *
62472 + *    For licensing information please see the supplied COPYING file
62473 + *
62474 + */
62475 +
62476 +#ident "@(#)$Id: kmap.c,v 1.12 2004/12/14 10:19:23 mike Exp $"
62477 +/*      $Source: /cvs/master/quadrics/epmod/kmap.c,v $*/
62478 +
62479 +#include <qsnet/kernel.h>
62480 +#include <qsnet/kpte.h>
62481 +
62482 +#include <elan/kcomm.h>
62483 +
62484 +#include "debug.h"
62485 +
62486 +#if defined(DIGITAL_UNIX)
62487 +#  define kernel_map                   (first_task->map)
62488 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
62489 +#elif defined(LINUX)
62490 +#  define kernel_map                   get_kern_mm()
62491 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
62492 +#elif defined(SOLARIS)
62493 +#  define kernel_map                   &kas
62494 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
62495 +#endif
62496 +
62497 +void
62498 +ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned long len,  unsigned int perm, int ep_attr)
62499 +{
62500 +    rail->Operations.KaddrMap (rail, eaddr, kaddr, len, perm, ep_attr);
62501 +}
62502 +
62503 +void
62504 +ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr)
62505 +{
62506 +    rail->Operations.SdramMap (rail, eaddr, saddr, len, perm, ep_attr);
62507 +}
62508 +
62509 +void
62510 +ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len)
62511 +{
62512 +    rail->Operations.Unmap (rail, eaddr, len);
62513 +}
62514 +
62515 +void
62516 +ep_perrail_dvma_sync (EP_RAIL *rail)
62517 +{
62518 +    if (rail->TlbFlushRequired)
62519 +    {
62520 +       rail->TlbFlushRequired = 0;
62521 +
62522 +       rail->Operations.FlushTlb (rail);
62523 +    }
62524 +}
62525 +
62526 +
62527 +static int ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask);
62528 +
62529 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
62530 +static uint16_t ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum);
62531 +#endif
62532 +
62533 +EP_NMH_OPS ep_dvma_nmh_ops = 
62534 +{
62535 +    ep_dvma_map_rails,
62536 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
62537 +    ep_dvma_calc_check_sum
62538 +#endif
62539 +};
62540 +
62541 +extern void
62542 +ep_dvma_init (EP_SYS *sys)
62543 +{
62544 +    EP_DVMA_STATE *d = &sys->DvmaState;
62545 +
62546 +    kmutex_init (&d->dvma_lock);
62547 +    
62548 +    INIT_LIST_HEAD (&d->dvma_handles);
62549 +    INIT_LIST_HEAD (&d->dvma_rails);
62550 +
62551 +    d->dvma_rmap = ep_rmallocmap (EP_DVMA_RMAP_SIZE, "dvma_rmap", 1);
62552 +
62553 +    ep_rmfree (d->dvma_rmap, EP_DVMA_TOP - EP_DVMA_BASE, EP_DVMA_BASE);
62554 +}
62555 +
62556 +extern void
62557 +ep_dvma_fini (EP_SYS *sys)
62558 +{
62559 +    EP_DVMA_STATE *d = &sys->DvmaState;
62560 +
62561 +    ep_rmfreemap (d->dvma_rmap);
62562 +
62563 +    kmutex_destroy (&d->dvma_lock);
62564 +}
62565 +    
62566 +extern int
62567 +ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail)
62568 +{
62569 +    EP_DVMA_STATE    *d = &sys->DvmaState;
62570 +    EP_RAIL_ENTRY    *l;
62571 +    struct list_head *el;
62572 +
62573 +    KMEM_ZALLOC (l, EP_RAIL_ENTRY *, sizeof (EP_RAIL_ENTRY), 1);
62574 +
62575 +    if (l == NULL)
62576 +       return (ENOMEM);
62577 +
62578 +    kmutex_lock (&d->dvma_lock);
62579 +
62580 +    l->Rail = rail;
62581 +
62582 +    list_add_tail (&l->Link, &d->dvma_rails);
62583 +
62584 +    list_for_each (el, &d->dvma_handles) {
62585 +       EP_DVMA_NMH *desc   = list_entry (el, EP_DVMA_NMH, dvma_link);
62586 +       int          npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT;
62587 +
62588 +       desc->dvma_rails[rail->Number] = rail;
62589 +       desc->dvma_railmask |= ( 1 << rail->Number);
62590 +
62591 +       desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages);
62592 +    }
62593 +
62594 +    kmutex_unlock (&d->dvma_lock);
62595 +    return (0);
62596 +}
62597 +
62598 +extern void
62599 +ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail)
62600 +{
62601 +    EP_DVMA_STATE    *d = &sys->DvmaState;
62602 +    struct list_head *el;
62603 +
62604 +    kmutex_lock (&d->dvma_lock);
62605 +    
62606 +    list_for_each (el, &d->dvma_handles) {
62607 +       EP_DVMA_NMH *desc   = list_entry (el, EP_DVMA_NMH, dvma_link);
62608 +       int          npages = desc->dvma_nmh.nmh_nmd.nmd_len >> PAGESHIFT;
62609 +
62610 +       desc->dvma_rails[rail->Number] = NULL;
62611 +       desc->dvma_railmask &= ~(1 << rail->Number);
62612 +
62613 +       rail->Operations.DvmaRelease (rail, desc->dvma_nmh.nmh_nmd.nmd_addr, npages, desc->dvma_private[rail->Number]);
62614 +    }
62615 +
62616 +    list_for_each (el, &d->dvma_rails) {
62617 +       EP_RAIL_ENTRY *tmp = list_entry (el, EP_RAIL_ENTRY, Link);
62618 +
62619 +       if (tmp->Rail == rail)
62620 +       {
62621 +           list_del (el);
62622 +
62623 +           KMEM_FREE (tmp, sizeof (EP_RAIL_ENTRY));
62624 +           break;
62625 +       }
62626 +    }
62627 +    kmutex_unlock (&d->dvma_lock);
62628 +}
62629 +
62630 +EP_NMH *
62631 +ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm)
62632 +{
62633 +    EP_DVMA_STATE    *d = &sys->DvmaState;
62634 +    EP_DVMA_NMH      *desc;
62635 +    EP_ADDR          addr;
62636 +    struct list_head *el;
62637 +    int               i;
62638 +
62639 +    KMEM_ZALLOC (desc, EP_DVMA_NMH *, offsetof (EP_DVMA_NMH, dvma_attrs[npages]), 1);
62640 +    
62641 +    if (desc == NULL)
62642 +       return (NULL);
62643 +
62644 +    if ((addr = ep_rmalloc (d->dvma_rmap, npages << PAGESHIFT, 0)) == 0)
62645 +    {
62646 +
62647 +       KMEM_FREE (desc, sizeof (EP_DVMA_NMH));
62648 +       return (NULL);
62649 +    }
62650 +
62651 +    spin_lock_init (&desc->dvma_lock);
62652 +
62653 +    desc->dvma_perm = perm;
62654 +
62655 +    kmutex_lock (&d->dvma_lock);
62656 +    /* reserve the mapping resource */
62657 +    list_for_each (el, &d->dvma_rails) {
62658 +       EP_RAIL *rail = list_entry (el, EP_RAIL_ENTRY, Link)->Rail;
62659 +
62660 +       EPRINTF4 (DBG_KMAP, "%s: ep_dvma_reserve desc=%p npages=%d rail=%p\n", rail->Name, desc, npages, rail);
62661 +
62662 +       if ((desc->dvma_private[rail->Number] = rail->Operations.DvmaReserve (rail, addr, npages)) == NULL)
62663 +       {
62664 +           printk ("%s: !!ep_dvma_reserve - rail->DvmaReserve failed\n", rail->Name);
62665 +           goto failed;
62666 +       }
62667 +
62668 +       desc->dvma_rails[rail->Number] = rail;
62669 +       desc->dvma_railmask |= (1 << rail->Number);
62670 +    }
62671 +
62672 +    /* insert into the network mapping handle table */
62673 +    desc->dvma_nmh.nmh_nmd.nmd_addr = addr;
62674 +    desc->dvma_nmh.nmh_nmd.nmd_len  = npages << PAGESHIFT;
62675 +    desc->dvma_nmh.nmh_nmd.nmd_attr = EP_NMD_ATTR (sys->Position.pos_nodeid, 0);
62676 +    desc->dvma_nmh.nmh_ops          = &ep_dvma_nmh_ops;
62677 +
62678 +    ep_nmh_insert (&sys->MappingTable, &desc->dvma_nmh);
62679 +
62680 +    list_add (&desc->dvma_link, &d->dvma_handles);
62681 +
62682 +    kmutex_unlock (&d->dvma_lock);
62683 +
62684 +    return (&desc->dvma_nmh);
62685 +
62686 + failed:
62687 +
62688 +    kmutex_unlock (&d->dvma_lock);
62689 +
62690 +    for (i = 0; i < EP_MAX_RAILS; i++)
62691 +       if (desc->dvma_rails[i] != NULL)
62692 +           desc->dvma_rails[i]->Operations.DvmaRelease (desc->dvma_rails[i], addr, npages, desc->dvma_private[i]);
62693 +
62694 +    ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr);
62695 +
62696 +    KMEM_FREE (desc, sizeof (EP_DVMA_NMH));
62697 +    return (NULL);
62698 +}
62699 +
62700 +void
62701 +ep_dvma_release (EP_SYS *sys, EP_NMH *nmh)
62702 +{
62703 +    EP_DVMA_STATE *d      = &sys->DvmaState;
62704 +    EP_DVMA_NMH   *desc   = (EP_DVMA_NMH *) nmh;
62705 +    EP_ADDR        addr   = nmh->nmh_nmd.nmd_addr;
62706 +    int            npages = nmh->nmh_nmd.nmd_len >> PAGESHIFT;
62707 +    EP_RAIL       *rail;
62708 +    int            i;
62709 +
62710 +    kmutex_lock (&d->dvma_lock);
62711 +
62712 +    list_del (&desc->dvma_link);
62713 +    
62714 +    ep_nmh_remove (&sys->MappingTable, nmh);
62715 +
62716 +    for (i = 0; i < EP_MAX_RAILS; i++)
62717 +       if ((rail = desc->dvma_rails[i]) != NULL)
62718 +           rail->Operations.DvmaRelease (rail, addr, npages, desc->dvma_private[i]);
62719 +
62720 +    ep_rmfree (d->dvma_rmap, npages << PAGESHIFT, addr);
62721 +
62722 +    KMEM_FREE (desc, offsetof (EP_DVMA_NMH, dvma_attrs[npages]));
62723 +
62724 +    kmutex_unlock (&d->dvma_lock);
62725 +}
62726 +
62727 +void
62728 +ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset)
62729 +{
62730 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
62731 +    unsigned     offset = (unsigned long) vaddr & PAGEOFFSET;
62732 +    unsigned     npages = btopr (len + offset);
62733 +    EP_ADDR      addr   = nmh->nmh_nmd.nmd_addr + (index << PAGESHIFT);
62734 +    int                 rmask  = *hints;
62735 +    EP_RAIL     *rail;
62736 +    register int i, rnum;
62737 +    unsigned long flags;
62738 +
62739 +    EPRINTF7 (DBG_KMAP, "ep_dvma_load: map=%p vaddr=%p len=%x nmh=%p(%x,%x) index=%d\n",
62740 +             map, vaddr, len, nmh, nmh->nmh_nmd.nmd_addr, nmh->nmh_nmd.nmd_len, index);
62741 +
62742 +    /* If no rail specified, then map into all rails */
62743 +    if (rmask == 0)
62744 +       rmask = desc->dvma_railmask;
62745 +
62746 +    ASSERT ((index + npages) <= (nmh->nmh_nmd.nmd_len >> PAGESHIFT));
62747 +
62748 +    /* If not map specified then use the kernel map */
62749 +    if (map == NULL)
62750 +       map = kernel_map;
62751 +
62752 +    spin_lock_irqsave (&desc->dvma_lock, flags);
62753 +    /* Now map each of the specified pages (backwards) */
62754 +
62755 +    vaddr = (vaddr - offset) + (npages-1)*PAGESIZE;
62756 +    for (i = npages-1; i >= 0; i--, vaddr -= PAGESIZE)
62757 +    {
62758 +       physaddr_t paddr = vaddr_to_phys (map, vaddr);
62759 +       
62760 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
62761 +       {
62762 +           if (! (rmask & (1 << rnum)) || (rail = desc->dvma_rails[rnum]) == NULL)
62763 +               rmask &= ~(1 << rnum);
62764 +           else
62765 +           {
62766 +               rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], index + i, paddr, desc->dvma_perm);
62767 +
62768 +               desc->dvma_attrs[index + i] |= (1 << rnum);
62769 +           }
62770 +       }
62771 +    }
62772 +
62773 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
62774 +       if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL)
62775 +           rail->TlbFlushRequired = 1;
62776 +
62777 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
62778 +
62779 +    /* Construct the network mapping handle to be returned. */
62780 +    subset->nmd_addr = addr + offset;
62781 +    subset->nmd_len  = len;
62782 +    subset->nmd_attr = EP_NMD_ATTR(sys->Position.pos_nodeid, rmask);
62783 +}
62784 +
62785 +void
62786 +ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd)
62787 +{
62788 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
62789 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
62790 +    unsigned     npages = btopr (nmd->nmd_len + offset);
62791 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
62792 +    EP_RAIL     *rail;
62793 +    int          rnum;
62794 +    int          rmask;
62795 +    register int i;
62796 +    unsigned long flags;
62797 +    
62798 +    spin_lock_irqsave (&desc->dvma_lock, flags);
62799 +
62800 +    /* compute which rails we need to unload on */
62801 +    for (rmask = 0, i = 0; i < npages; i++)
62802 +    {
62803 +       rmask |= desc->dvma_attrs[index + i];
62804 +       
62805 +       desc->dvma_attrs[index + i] = 0;
62806 +    }
62807 +    
62808 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
62809 +       if ((rmask & (1 << rnum)) && (rail = desc->dvma_rails[rnum]) != NULL)
62810 +           rail->Operations.DvmaUnload (rail, desc->dvma_private[rnum], index, npages);
62811 +
62812 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
62813 +}
62814 +
62815 +int
62816 +ep_dvma_map_rails (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask)
62817 +{
62818 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
62819 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
62820 +    unsigned     npages = btopr (nmd->nmd_len + offset);
62821 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
62822 +    int          r, rnum;
62823 +    register int i;
62824 +    unsigned long flags;
62825 +
62826 +    spin_lock_irqsave (&desc->dvma_lock, flags);
62827 +
62828 +    EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x mask=%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask);
62829 +
62830 +    if ((mask &= desc->dvma_railmask) == 0)
62831 +    {
62832 +       printk ("ep_dvma_map_rails: no intersecting rails %04x.%04x\n", mask, desc->dvma_railmask);
62833 +       spin_unlock_irqrestore (&desc->dvma_lock, flags);
62834 +       return (-1);
62835 +    }
62836 +
62837 +    for (i = npages-1; i >= 0; i--)
62838 +    {
62839 +       int pgidx = (index + i);
62840 +
62841 +       for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
62842 +           if (desc->dvma_attrs[pgidx] & (1 << rnum))
62843 +               break;
62844 +       
62845 +       if (rnum == EP_MAX_RAILS)
62846 +       {
62847 +           EPRINTF3 (DBG_KMAP, "ep_dvma_map_rails: nmh=%p idx=%x [%08x] not ptes valid\n", nmh, pgidx, 
62848 +                     nmh->nmh_nmd.nmd_addr + ((pgidx) << PAGESHIFT));
62849 +           mask = 0;
62850 +       }
62851 +       else
62852 +       {
62853 +           EP_RAIL   *rail  = desc->dvma_rails[rnum];
62854 +           physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx);
62855 +           
62856 +           EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx,
62857 +                     nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr);
62858 +           
62859 +           for (r = 0; r < EP_MAX_RAILS; r++)
62860 +           {
62861 +               if ((mask & (1 << r)) == 0)
62862 +                   continue;
62863 +               
62864 +               if ((desc->dvma_attrs[pgidx] & (1 << r)) == 0)
62865 +               {
62866 +                   EPRINTF5 (DBG_KMAP, "%s: ep_dvma_map_rails: nmh=%p idx=%x [%08x] paddr=%llx\n",
62867 +                             desc->dvma_rails[rnum]->Name, nmh, pgidx, nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), 
62868 +                             (long long) paddr);
62869 +                   
62870 +                   rail->Operations.DvmaSetPte (rail, desc->dvma_private[rnum], pgidx, paddr, desc->dvma_perm);
62871 +                   
62872 +                   desc->dvma_attrs[pgidx] |= (1 << r);
62873 +               }
62874 +           }
62875 +       }
62876 +    }
62877 +
62878 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
62879 +       if ((mask & (1 << rnum)) != 0)
62880 +           desc->dvma_rails[rnum]->TlbFlushRequired = 1;
62881 +
62882 +    EPRINTF4 (DBG_KMAP, "ep_dvma_map_rails: nmd=%08x.%08x.%08x|%04x\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, mask);
62883 +
62884 +    /* Finally update the network memory descriptor */
62885 +    nmd->nmd_attr |= mask;
62886 +
62887 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
62888 +
62889 +    return (0);
62890 +}
62891 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
62892 +#include <linux/highmem.h>
62893 +
62894 +/* Generic rolling checksum algorithm */
62895 +uint16_t
62896 +rolling_check_sum (char *msg, int nob, uint16_t sum)
62897 +{
62898 +    while (nob-- > 0)
62899 +       sum = sum * 13 + *msg++;
62900 +
62901 +    return (sum);
62902 +}
62903 +
62904 +#if ! defined(NO_RMAP)
62905 +void  
62906 +unmap_phys_address(unsigned long phys_addr)
62907 +{
62908 +    unsigned long pfn = (phys_addr >> PAGE_SHIFT);
62909 +    
62910 +    if (pfn_valid(pfn)) 
62911 +       kunmap(pfn_to_page(pfn));
62912 +}
62913 +
62914 +void * 
62915 +map_phys_address(unsigned long phys_addr)
62916 +{
62917 +    unsigned long pfn = (phys_addr >> PAGE_SHIFT);
62918 +    
62919 +    if (pfn_valid(pfn)) 
62920 +       return  kmap(pfn_to_page(pfn));
62921 +    
62922 +    return NULL;
62923 +}
62924 +#else
62925 +void  
62926 +unmap_phys_address(unsigned long phys_addr)
62927 +{
62928 +    struct page *p = virt_to_page(__va(phys_addr));
62929 +    
62930 +    if (VALID_PAGE(p)) 
62931 +       kunmap(p);
62932 +}
62933 +
62934 +void * 
62935 +map_phys_address(unsigned long phys_addr)
62936 +{
62937 +    struct page *p = virt_to_page(__va(phys_addr));
62938 +                               
62939 +    if (VALID_PAGE(p)) 
62940 +       return  kmap(p);
62941 +    
62942 +    return NULL;
62943 +}
62944 +#endif
62945 +
62946 +uint16_t
62947 +ep_dvma_calc_check_sum (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum)
62948 +{
62949 +    /* cant be called from an interupt */
62950 +
62951 +    EP_DVMA_NMH *desc = (EP_DVMA_NMH *) nmh;
62952 +    unsigned     offset = nmd->nmd_addr & PAGEOFFSET;
62953 +    unsigned     npages = btopr (nmd->nmd_len + offset);
62954 +    unsigned     index  = (nmd->nmd_addr - nmh->nmh_nmd.nmd_addr) >> PAGESHIFT;
62955 +    unsigned     start, len;
62956 +    int          rnum;
62957 +    register int i;
62958 +    unsigned long flags;
62959 +    EP_RAIL      *rail;
62960 +
62961 +
62962 +    spin_lock_irqsave (&desc->dvma_lock, flags);
62963 +
62964 +    EPRINTF3 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x \n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
62965
62966 +    /* find a rail */
62967 +    for (rnum = 0; rnum < EP_MAX_RAILS; rnum++)
62968 +       if (desc->dvma_attrs[index] & (1 << rnum))
62969 +           break;
62970 +       
62971 +    ASSERT (rnum != EP_MAX_RAILS);
62972
62973 +    rail = desc->dvma_rails[rnum];
62974 +
62975 +    for (i = 0; i <= (npages-1); i++)
62976 +    {
62977 +       int        pgidx = (index + i);
62978 +       physaddr_t paddr = rail->Operations.DvmaReadPte (rail, desc->dvma_private[rnum], pgidx);
62979 +       void *     virt;
62980 +
62981 +       spin_unlock_irqrestore (&desc->dvma_lock, flags); /* unlock for check sum calc */
62982 +
62983 +       virt = map_phys_address(paddr);
62984 +
62985 +       if (!virt)
62986 +           printk("ep_dvma_calc_check_sum: virt = NULL ! \n");
62987 +       else {
62988 +           if ( i == 0 ) {
62989 +               /* last bit of the first page */
62990 +               start =  (nmd->nmd_addr & (PAGESIZE - 1)) ;
62991 +               len   =  PAGESIZE - start;
62992 +               if ( len > nmd->nmd_len) /* less than the remaining page */ 
62993 +                   len =  nmd->nmd_len;
62994 +           } else {
62995 +               if ( i != (npages-1)) {
62996 +                   /* all of the middle pages    */
62997 +                   start = 0;
62998 +                   len   = PAGESIZE;
62999 +               } else {
63000 +                   /* first bit of the last page */
63001 +                   start = 0;
63002 +                   len   = ((nmd->nmd_addr + nmd->nmd_len -1) & (PAGESIZE -1)) +1;
63003 +               }
63004 +           }
63005 +
63006 +           check_sum = rolling_check_sum (((char *)virt)+start, len, check_sum);
63007 +           unmap_phys_address(paddr);
63008 +   
63009 +           /* re aquire the lock */
63010 +           spin_lock_irqsave (&desc->dvma_lock, flags);
63011 +       }
63012 +       
63013 +       EPRINTF5 (DBG_KMAP, "%s: ep_dvma_calc_check_sum: nmh=%p idx=%x [%08x] paddr %llx\n", rail->Name, nmh, pgidx,
63014 +                 nmh->nmh_nmd.nmd_addr + (pgidx << PAGESHIFT), (long long) paddr);     
63015 +    }
63016 +
63017 +    EPRINTF4 (DBG_KMAP, "ep_dvma_calc_check_sum: nmd=%08x.%08x.%08x = %d\n", nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr, check_sum);
63018 +
63019 +    spin_unlock_irqrestore (&desc->dvma_lock, flags);
63020 +
63021 +    return (check_sum);
63022 +}
63023 +#endif
63024 +/*
63025 + * Local variables:
63026 + * c-file-style: "stroustrup"
63027 + * End:
63028 + */
63029 diff -urN clean/drivers/net/qsnet/ep/kmap_elan3.c linux-2.6.9/drivers/net/qsnet/ep/kmap_elan3.c
63030 --- clean/drivers/net/qsnet/ep/kmap_elan3.c     1969-12-31 19:00:00.000000000 -0500
63031 +++ linux-2.6.9/drivers/net/qsnet/ep/kmap_elan3.c       2004-12-14 05:19:23.000000000 -0500
63032 @@ -0,0 +1,209 @@
63033 +/*
63034 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63035 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63036 + *
63037 + *    For licensing information please see the supplied COPYING file
63038 + *
63039 + */
63040 +
63041 +#ident "@(#)$Id: kmap_elan3.c,v 1.4 2004/12/14 10:19:23 mike Exp $"
63042 +/*      $Source: /cvs/master/quadrics/epmod/kmap_elan3.c,v $ */
63043 +
63044 +#include <qsnet/kernel.h>
63045 +
63046 +#include <elan3/elanregs.h>
63047 +#include <elan3/elandev.h>
63048 +#include <elan3/elanvp.h>
63049 +#include <elan3/elan3mmu.h>
63050 +#include <elan3/elanctxt.h>
63051 +#include <elan3/elandebug.h>
63052 +
63053 +#include <elan/kcomm.h>
63054 +
63055 +#include "kcomm_elan3.h"
63056 +
63057 +#if defined(DIGITAL_UNIX)
63058 +#  define kernel_map                   (first_task->map)
63059 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
63060 +#elif defined(LINUX)
63061 +#  define kernel_map                   get_kern_mm()
63062 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
63063 +#elif defined(SOLARIS)
63064 +#  define kernel_map                   &kas
63065 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
63066 +#endif
63067 +
63068 +#define ELAN3_PTES_PER_PAGE            (PAGESIZE/ELAN3_PAGE_SIZE)
63069 +
63070 +#if defined(__LITTLE_ENDIAN__)
63071 +#define PERM_ENDIAN    0
63072 +#else
63073 +#define PERM_ENDIAN    ELAN3_PTE_BIG_ENDIAN
63074 +#endif
63075 +
63076 +static unsigned int main_permtable[] = 
63077 +{
63078 +    ELAN3_PERM_REMOTEALL,              /* EP_PERM_EXECUTE */
63079 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
63080 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
63081 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_ALL */
63082 +};
63083 +
63084 +static unsigned int sdram_permtable[] = 
63085 +{
63086 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_EXECUTE */
63087 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
63088 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
63089 +    ELAN3_PERM_REMOTEALL,              /* EP_PERM_ALL */
63090 +};
63091 +
63092 +static unsigned int io_permtable[] = 
63093 +{
63094 +    ELAN3_PERM_LOCAL_READ,             /* EP_PERM_EXECUTE */
63095 +    ELAN3_PERM_REMOTEREAD,             /* EP_PERM_READ */
63096 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_WRITE */
63097 +    ELAN3_PERM_REMOTEWRITE,            /* EP_PERM_ALL */
63098 +};
63099 +
63100 +void
63101 +ep3_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr)
63102 +{
63103 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
63104 +    unsigned     npages = len >> PAGESHIFT;
63105 +    int          i;
63106 +    unsigned int off;
63107 +
63108 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63109 +
63110 +    for (i = 0; i < npages; i++)
63111 +    {
63112 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr);
63113 +
63114 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
63115 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, 
63116 +                             main_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0));
63117 +
63118 +       eaddr += PAGESIZE;
63119 +       kaddr += PAGESIZE;
63120 +    }
63121 +}
63122 +
63123 +void
63124 +ep3_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr)
63125 +{
63126 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
63127 +    unsigned     npages = len >> PAGESHIFT;
63128 +    int          i;
63129 +    unsigned int off;
63130 +
63131 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63132 +
63133 +    for (i = 0; i < npages; i++)
63134 +    {
63135 +       physaddr_t paddr = elan3_sdram_to_phys (rail->Device, saddr);
63136 +
63137 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
63138 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr+off, paddr+off, 
63139 +                             sdram_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC | ((ep_attr & EP_NO_SLEEP) ? PTE_NO_SLEEP : 0) );
63140 +
63141 +       eaddr += PAGESIZE;
63142 +       saddr += PAGESIZE;
63143 +    }
63144 +}
63145 +
63146 +void
63147 +ep3_ioaddr_map (EP_RAIL *r, EP_ADDR eaddr, ioaddr_t ioaddr, unsigned len, unsigned int perm)
63148 +{
63149 +    EP3_RAIL    *rail   = (EP3_RAIL *) r;
63150 +    unsigned     npages = len >> PAGESHIFT;
63151 +    int          i;
63152 +    unsigned int off;
63153 +
63154 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (ioaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63155 +
63156 +    for (i = 0; i < npages; i++)
63157 +    {
63158 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) ioaddr);
63159 +
63160 +       for (off = 0; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
63161 +           elan3mmu_pteload (rail->Elan3mmu, PTBL_LEVEL_3, eaddr + off, paddr + off, 
63162 +                             io_permtable[perm], PTE_LOAD_LOCK | PTE_LOAD_NOSYNC);
63163 +
63164 +       eaddr += PAGESIZE;
63165 +       ioaddr += PAGESIZE;
63166 +    }
63167 +}
63168 +void
63169 +ep3_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned len)
63170 +{
63171 +    EP3_RAIL *rail = (EP3_RAIL *) r;
63172 +
63173 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63174 +
63175 +    elan3mmu_unload (rail->Elan3mmu, eaddr, len, PTE_UNLOAD_UNLOCK | PTE_UNLOAD_NOSYNC);
63176 +}
63177 +
63178 +void *
63179 +ep3_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned npages)
63180 +{
63181 +    EP3_RAIL *rail = (EP3_RAIL *) r;
63182 +    void     *private;
63183 +
63184 +    KMEM_ALLOC (private, void *, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t), 1);
63185 +    
63186 +    if (private == NULL)
63187 +       return NULL;
63188 +    
63189 +    elan3mmu_reserve (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private);
63190 +
63191 +    return private;
63192 +}
63193 +
63194 +void
63195 +ep3_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned npages, void *private)
63196 +{
63197 +    EP3_RAIL *rail = (EP3_RAIL *) r;
63198 +
63199 +    elan3mmu_release (rail->Elan3mmu, eaddr, npages * ELAN3_PTES_PER_PAGE, (sdramaddr_t *) private);
63200 +
63201 +    KMEM_FREE (private, npages * ELAN3_PTES_PER_PAGE * sizeof (sdramaddr_t));
63202 +}
63203 +
63204 +void
63205 +ep3_dvma_set_pte (EP_RAIL *r, void *private, unsigned index, physaddr_t paddr, unsigned int perm)
63206 +{
63207 +    ELAN3_DEV    *dev  = ((EP3_RAIL *) r)->Device;
63208 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
63209 +    int          off;
63210 +
63211 +    for (off =0 ; off < PAGESIZE; off += ELAN3_PAGE_SIZE)
63212 +    {  
63213 +       ELAN3_PTE newpte = elan3mmu_phys_to_pte (dev, paddr + off, main_permtable[perm]) | ELAN3_PTE_REF | ELAN3_PTE_MOD;
63214 +
63215 +       elan3_writepte (dev, *ptep, newpte);
63216 +
63217 +       ptep++;
63218 +    }
63219 +}
63220 +
63221 +physaddr_t
63222 +ep3_dvma_read_pte (EP_RAIL *r, void *private, unsigned index)
63223 +{
63224 +    EP3_RAIL    *rail = (EP3_RAIL *) r;
63225 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
63226 +    ELAN3_PTE     pte  = elan3_readpte (rail->Device, *ptep);
63227 +
63228 +    return pte & ELAN3_PTE_PFN_MASK;
63229 +}
63230 +
63231 +void
63232 +ep3_dvma_unload (EP_RAIL *r, void *private, unsigned index, unsigned npages)
63233 +{
63234 +    EP3_RAIL    *rail = (EP3_RAIL *) r;
63235 +    sdramaddr_t *ptep = &((sdramaddr_t *) private)[index * ELAN3_PTES_PER_PAGE];
63236 +    ELAN3_PTE     tpte = elan3mmu_kernel_invalid_pte (rail->Elan3mmu);
63237 +    int i;
63238 +
63239 +    for (i = (npages * ELAN3_PTES_PER_PAGE) - 1; i >= 0; i--)
63240 +       elan3_writepte (rail->Device, ptep[i], tpte);
63241 +}
63242 diff -urN clean/drivers/net/qsnet/ep/kmap_elan4.c linux-2.6.9/drivers/net/qsnet/ep/kmap_elan4.c
63243 --- clean/drivers/net/qsnet/ep/kmap_elan4.c     1969-12-31 19:00:00.000000000 -0500
63244 +++ linux-2.6.9/drivers/net/qsnet/ep/kmap_elan4.c       2005-07-20 07:35:37.000000000 -0400
63245 @@ -0,0 +1,224 @@
63246 +/*
63247 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63248 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63249 + *
63250 + *    For licensing information please see the supplied COPYING file
63251 + *
63252 + */
63253 +
63254 +#ident "@(#)$Id: kmap_elan4.c,v 1.12.2.1 2005/07/20 11:35:37 mike Exp $"
63255 +/*      $Source: /cvs/master/quadrics/epmod/kmap_elan4.c,v $ */
63256 +
63257 +#include <qsnet/kernel.h>
63258 +
63259 +#include <elan/kcomm.h>
63260 +
63261 +#include "debug.h"
63262 +#include "kcomm_elan4.h"
63263 +
63264 +#if defined(DIGITAL_UNIX)
63265 +#  define kernel_map                   (first_task->map)
63266 +#  define vaddr_to_phys(map, addr)     (pmap_extract (vm_map_pmap ((vm_map_t) map), (unsigned long) addr))
63267 +#elif defined(LINUX)
63268 +#  define kernel_map                   get_kern_mm()
63269 +#  define vaddr_to_phys(map, addr)     (kmem_to_phys(addr))
63270 +#elif defined(SOLARIS)
63271 +#  define kernel_map                   &kas
63272 +#  define vaddr_to_phys(map,addr)      ptob(hat_getpfnum (((struct as *) map)->a_hat, (caddr_t) addr))
63273 +#endif
63274 +
63275 +static unsigned int main_permtable[] = 
63276 +{
63277 +    PERM_Unused,                       /* EP_PERM_EXECUTE */
63278 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
63279 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
63280 +    PERM_DataReadWrite,                        /* EP_PERM_ALL */
63281 +};
63282 +
63283 +static unsigned int sdram_permtable[] = 
63284 +{
63285 +    PERM_LocExecute,                   /* EP_PERM_EXECUTE */
63286 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
63287 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
63288 +    PERM_RemoteAll,                    /* EP_PERM_ALL */
63289 +};
63290 +
63291 +static unsigned int cport_permtable[] = 
63292 +{
63293 +    PERM_Unused,                       /* EP_PERM_EXECUTE */
63294 +    PERM_RemoteReadOnly,               /* EP_PERM_READ */
63295 +    PERM_DataReadWrite,                        /* EP_PERM_WRITE */
63296 +    PERM_Unused,                       /* EP_PERM_ALL */
63297 +};
63298 +
63299 +void
63300 +ep4_kaddr_map (EP_RAIL *r, EP_ADDR eaddr, virtaddr_t kaddr, unsigned int len, unsigned int perm, int ep_attr)
63301 +{
63302 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
63303 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
63304 +    unsigned int npages = len >> PAGESHIFT;
63305 +    int          i;
63306 +    unsigned int off;
63307 +
63308 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (kaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63309 +
63310 +    for (i = 0; i < npages; i++)
63311 +    {
63312 +       physaddr_t paddr = vaddr_to_phys (kernel_map, (void *) kaddr);
63313 +
63314 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
63315 +       {
63316 +           E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]);
63317 +
63318 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, HE_TYPE_OTHER, newpte);
63319 +       }
63320 +
63321 +       eaddr += PAGESIZE;
63322 +       kaddr += PAGESIZE;
63323 +    }
63324 +}
63325 +
63326 +void
63327 +ep4_sdram_map (EP_RAIL *r, EP_ADDR eaddr, sdramaddr_t saddr, unsigned int len, unsigned int perm, int ep_attr)
63328 +{
63329 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
63330 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
63331 +    unsigned int npages = len >> PAGESHIFT;
63332 +    int          i;
63333 +    unsigned int off;
63334 +
63335 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (saddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63336 +
63337 +    if ((eaddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)) != (saddr & (SDRAM_PGOFF_OFFSET << PAGE_SHIFT)))
63338 +       printk ("ep4_sdram_map: eaddr=%x saddr=%lx - incorrectly alised\n", eaddr, saddr);
63339 +
63340 +    for (i = 0; i < npages; i++)
63341 +    {
63342 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
63343 +       {
63344 +           E4_uint64 newpte = ((saddr + off) >> PTE_PADDR_SHIFT) | PTE_SetPerm (sdram_permtable[perm]);
63345 +
63346 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, HE_TYPE_OTHER, newpte);
63347 +       }
63348 +
63349 +       eaddr += PAGESIZE;
63350 +       saddr += PAGESIZE;
63351 +    }
63352 +}
63353 +
63354 +void
63355 +ep4_cport_map (EP_RAIL *r, EP_ADDR eaddr, unsigned long cqaddr, unsigned int len, unsigned int perm)
63356 +{
63357 +    EP4_RAIL    *rail   = (EP4_RAIL *) r;
63358 +    ELAN4_DEV   *dev    = rail->r_ctxt.ctxt_dev;
63359 +    unsigned int npages = len >> PAGESHIFT;
63360 +    int          i;
63361 +    unsigned int off;
63362 +
63363 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (cqaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63364 +
63365 +    for (i = 0; i < npages; i++)
63366 +    {
63367 +       for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
63368 +       {
63369 +           E4_uint64 newpte = ((cqaddr + off) >> PTE_PADDR_SHIFT) | PTE_SetPerm(cport_permtable[perm]) | PTE_CommandQueue;
63370 +
63371 +           elan4mmu_pteload (&rail->r_ctxt, 0, eaddr + off, HE_TYPE_OTHER, newpte);
63372 +       }
63373 +
63374 +       eaddr += PAGESIZE;
63375 +       cqaddr += PAGESIZE;
63376 +    }
63377 +}
63378 +void
63379 +ep4_unmap (EP_RAIL *r, EP_ADDR eaddr, unsigned int len)
63380 +{
63381 +    EP4_RAIL *rail = (EP4_RAIL *) r;
63382 +
63383 +    ASSERT ((eaddr & PAGEOFFSET) == 0 && (len & PAGEOFFSET) == 0);
63384 +
63385 +    elan4mmu_unload_range (&rail->r_ctxt, 0, eaddr, len);
63386 +}
63387 +
63388 +void *
63389 +ep4_dvma_reserve (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages)
63390 +{
63391 +    EP4_RAIL  *rail = (EP4_RAIL *) r;
63392 +    ELAN4_DEV *dev  = rail->r_ctxt.ctxt_dev;
63393 +
63394 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_reserve: eaddr=%x npages=%d (=> %d)\n", eaddr, npages, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])));
63395 +
63396 +    return elan4mmu_reserve (&rail->r_ctxt, 0, (E4_Addr) eaddr, (npages << (PAGE_SHIFT - dev->dev_pageshift[0])), 1);
63397 +}
63398 +
63399 +void
63400 +ep4_dvma_release (EP_RAIL *r, EP_ADDR eaddr, unsigned int npages, void *private)
63401 +{
63402 +    EP4_RAIL *rail = (EP4_RAIL *) r;
63403 +
63404 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_release: eaddr=%x npages=%d private=%p\n", eaddr, npages, private);
63405 +
63406 +    elan4mmu_release (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private);
63407 +}
63408 +
63409 +void
63410 +ep4_dvma_set_pte (EP_RAIL *r, void *private, unsigned int index, physaddr_t paddr, unsigned int perm)
63411 +{
63412 +    EP4_RAIL     *rail  = (EP4_RAIL *) r;
63413 +    ELAN4_DEV    *dev   = rail->r_ctxt.ctxt_dev;
63414 +    unsigned int  off;
63415 +    unsigned long flags;
63416 +
63417 +    EPRINTF3 (DBG_KMAP, "ep4_dvma_set_pte: index %x -> eaddr %llx paddr %llx\n", 
63418 +             index, (long long)(((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE)), (long long) paddr);
63419 +
63420 +    local_irq_save (flags);
63421 +    for (off = 0; off < PAGESIZE; off += (1 << dev->dev_pageshift[0]))
63422 +    {
63423 +       E4_uint64 newpte = elan4mmu_phys2pte (dev, paddr + off, main_permtable[perm]);
63424 +
63425 +       elan4mmu_set_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, (index << (PAGE_SHIFT - dev->dev_pageshift[0])) +
63426 +                         (off >> dev->dev_pageshift[0]), newpte);
63427 +    }
63428 +    local_irq_restore (flags);
63429 +}
63430 +
63431 +physaddr_t
63432 +ep4_dvma_read_pte (EP_RAIL *r, void *private, unsigned int index)
63433 +{
63434 +    EP4_RAIL     *rail  = (EP4_RAIL *) r;
63435 +    ELAN4_DEV    *dev   = rail->r_ctxt.ctxt_dev;
63436 +    E4_uint64     pte;
63437 +    unsigned long flags;
63438 +
63439 +    local_irq_save (flags);
63440 +    pte = elan4mmu_get_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, index << (PAGE_SHIFT - dev->dev_pageshift[0]));
63441 +    local_irq_restore (flags);
63442 +
63443 +    return elan4mmu_pte2phys (dev, pte);
63444 +}
63445 +
63446 +void
63447 +ep4_dvma_unload (EP_RAIL *r, void *private, unsigned int index, unsigned int npages)
63448 +{
63449 +    EP4_RAIL  *rail  = (EP4_RAIL *) r;
63450 +    ELAN4_DEV *dev   = rail->r_ctxt.ctxt_dev;
63451 +    EP_ADDR    eaddr = ((ELAN4_HASH_CACHE *) private)->hc_start + (index * PAGE_SIZE);
63452 +    unsigned long idx = (index << (PAGE_SHIFT - dev->dev_pageshift[0]));
63453 +    unsigned long lim = idx + (npages << (PAGE_SHIFT - dev->dev_pageshift[0]));
63454 +    unsigned long flags;
63455 +
63456 +    EPRINTF5 (DBG_KMAP, "ep4_dvma_unload: eaddr %x -> %lx : index=%d idx=%ld lim=%ld\n", 
63457 +             eaddr, (unsigned long)(eaddr + (npages * PAGE_SIZE)), index, idx, lim);
63458 +
63459 +    local_irq_save (flags);
63460 +    for (; idx < lim; idx++)
63461 +       elan4mmu_clear_pte (&rail->r_ctxt, (ELAN4_HASH_CACHE *) private, idx);
63462 +    local_irq_restore (flags);
63463 +}
63464 +
63465 +/*
63466 + * Local variables:
63467 + * c-file-style: "stroustrup"
63468 + * End:
63469 + */
63470 diff -urN clean/drivers/net/qsnet/ep/kmsg_elan3.c linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan3.c
63471 --- clean/drivers/net/qsnet/ep/kmsg_elan3.c     1969-12-31 19:00:00.000000000 -0500
63472 +++ linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan3.c       2005-07-19 10:26:48.000000000 -0400
63473 @@ -0,0 +1,348 @@
63474 +/*
63475 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63476 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63477 + *
63478 + *    For licensing information please see the supplied COPYING file
63479 + *
63480 + */
63481 +
63482 +#ident "@(#)$Id: kmsg_elan3.c,v 1.4.2.1 2005/07/19 14:26:48 david Exp $"
63483 +/*      $Source: /cvs/master/quadrics/epmod/kmsg_elan3.c,v $ */
63484 +
63485 +#include <qsnet/kernel.h>
63486 +
63487 +#include <elan/kcomm.h>
63488 +
63489 +#include "kcomm_vp.h"
63490 +#include "kcomm_elan3.h"
63491 +#include "debug.h"
63492 +
63493 +static void
63494 +ep3_inputq_event (EP3_RAIL *rail, void *arg)
63495 +{
63496 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) arg;
63497 +    
63498 +    (*inputq->q_callback)((EP_RAIL *)rail, inputq->q_arg);
63499 +}
63500 +
63501 +static EP3_COOKIE_OPS ep3_inputq_cookie_ops = 
63502 +{
63503 +    ep3_inputq_event,
63504 +};
63505 +
63506 +EP_INPUTQ *
63507 +ep3_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount,
63508 +                 EP_INPUTQ_CALLBACK *callback, void *arg)
63509 +{
63510 +    EP3_RAIL      *rail = (EP3_RAIL *) r;
63511 +    EP3_INPUTQ    *inputq;
63512 +    EP3_InputQueue qdesc;
63513 +    void          *slots;
63514 +    int            i;
63515 +
63516 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
63517 +
63518 +    KMEM_ALLOC (inputq, EP3_INPUTQ *, sizeof (EP3_INPUTQ), TRUE);
63519 +
63520 +    if (inputq == NULL)
63521 +       return (EP_INPUTQ *) NULL;
63522 +    
63523 +    if ((slots = ep_alloc_main (&rail->Generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL)
63524 +    {
63525 +       KMEM_FREE (inputq, sizeof (EP3_INPUTQ));
63526 +       return (EP_INPUTQ *) NULL;
63527 +    }
63528 +
63529 +    inputq->q_slotSize  = slotSize;
63530 +    inputq->q_slotCount = slotCount;
63531 +    inputq->q_callback  = callback;
63532 +    inputq->q_arg       = arg;
63533 +    inputq->q_slots     = slots;
63534 +
63535 +    /* Initialise all the slots to be "unreceived" */
63536 +    for (i = 0; i < slotCount; i++)
63537 +       ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
63538 +    
63539 +    inputq->q_base     = inputq->q_slotsAddr;
63540 +    inputq->q_top      = inputq->q_base + (slotCount-1) * slotSize;
63541 +    inputq->q_fptr     = inputq->q_base;
63542 +    inputq->q_desc     = EP_SYSTEMQ_DESC(rail->QueueDescs, qnum);
63543 +    inputq->q_descAddr = EP_SYSTEMQ_ADDR (qnum);
63544 +
63545 +    if (callback)
63546 +       RegisterCookie (&rail->CookieTable, &inputq->q_cookie, inputq->q_descAddr, &ep3_inputq_cookie_ops, inputq);
63547 +
63548 +    /* Initialise the input queue descriptor */
63549 +    qdesc.q_state          = E3_QUEUE_FULL;
63550 +    qdesc.q_bptr           = inputq->q_base + slotSize;
63551 +    qdesc.q_fptr           = inputq->q_fptr;
63552 +    qdesc.q_base           = inputq->q_base;
63553 +    qdesc.q_top            = inputq->q_top;
63554 +    qdesc.q_size           = slotSize;
63555 +    qdesc.q_event.ev_Count = 1;
63556 +    qdesc.q_event.ev_Type  = callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0;
63557 +    qdesc.q_wevent         = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event);
63558 +    qdesc.q_wcount         = 0;
63559 +
63560 +    /* copy the queue descriptor down to sdram */
63561 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue));
63562 +
63563 +    return (EP_INPUTQ *) inputq;
63564 +}
63565 +
63566 +void
63567 +ep3_free_inputq (EP_RAIL *r, EP_INPUTQ *q)
63568 +{
63569 +    EP3_RAIL   *rail   = (EP3_RAIL *) r;
63570 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) q;
63571 +
63572 +    ep_free_main (&rail->Generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
63573 +
63574 +    if (inputq->q_callback)
63575 +       DeregisterCookie (&rail->CookieTable, &inputq->q_cookie);
63576 +
63577 +    KMEM_FREE (inputq, sizeof (EP3_INPUTQ));
63578 +}
63579 +
63580 +void
63581 +ep3_enable_inputq (EP_RAIL *r, EP_INPUTQ *q)
63582 +{
63583 +    EP3_RAIL   *rail   = (EP3_RAIL *) r;
63584 +    EP3_INPUTQ *inputq = (EP3_INPUTQ *) q;
63585 +
63586 +    elan3_sdram_writel (rail->Device, inputq->q_desc + offsetof (EP3_InputQueue, q_state), 0);
63587 +}
63588 +
63589 +void
63590 +ep3_disable_inputq (EP_RAIL *r, EP_INPUTQ *q)
63591 +{
63592 +    EP3_RAIL      *rail   = (EP3_RAIL *) r;
63593 +    EP3_INPUTQ    *inputq = (EP3_INPUTQ *) q;
63594 +    EP3_InputQueue qdesc;
63595 +
63596 +    /* mark the queue as locked */
63597 +    SetQueueLocked (rail, inputq->q_desc);
63598 +
63599 +    /* re-initialise the queue as empty */
63600 +    qdesc.q_state          = E3_QUEUE_FULL;
63601 +    qdesc.q_bptr           = (E3_Addr) inputq->q_base + inputq->q_slotSize;
63602 +    qdesc.q_fptr           = inputq->q_fptr;
63603 +    qdesc.q_base           = inputq->q_base;
63604 +    qdesc.q_top            = inputq->q_top;
63605 +    qdesc.q_size           = inputq->q_slotSize;
63606 +    qdesc.q_event.ev_Count = 1;
63607 +    qdesc.q_event.ev_Type  = inputq->q_callback ? EV_TYPE_EVIRQ | inputq->q_cookie.Cookie : 0;
63608 +    qdesc.q_wevent         = inputq->q_descAddr + offsetof (EP3_InputQueue, q_event);
63609 +    qdesc.q_wcount         = 0;
63610 +
63611 +    /* copy the queue descriptor down to sdram */
63612 +    elan3_sdram_copyl_to_sdram (rail->Device, &qdesc, inputq->q_desc, sizeof (EP3_InputQueue));
63613 +}
63614 +
63615 +int
63616 +ep3_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg)
63617 +{
63618 +    EP3_RAIL          *rail   = (EP3_RAIL *) r;
63619 +    EP3_INPUTQ        *inputq = (EP3_INPUTQ *) q;
63620 +    sdramaddr_t        qdesc  = inputq->q_desc;
63621 +    E3_Addr            nfptr;
63622 +    int                count = 0;
63623 +    E3_uint32          state;
63624 +    int                       delay;
63625 +
63626 + run_again_because_of_eventqueue_overflow:
63627 +    nfptr = inputq->q_fptr + inputq->q_slotSize;
63628 +    if (nfptr > inputq->q_top)                                 
63629 +       nfptr = inputq->q_base;
63630 +
63631 +    while (nfptr != elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_bptr)))                       /* PCI read */
63632 +    {
63633 +       unsigned long slot = (unsigned long) inputq->q_slots + (nfptr - inputq->q_base);
63634 +
63635 +       /* Poll the final word of the message until the message has completely
63636 +        * arrived in main memory. */
63637 +       for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
63638 +           DELAY (delay);
63639 +
63640 +       /* Call the message handler */
63641 +       (*handler) (r, arg, (void *) slot);
63642 +       
63643 +       /* reset the last word of the slot to "unreceived" */
63644 +       ((uint32_t *) (slot + inputq->q_slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
63645 +           
63646 +       state = elan3_sdram_readl (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state));                           /* PCI read */
63647 +       if ((state & E3_QUEUE_FULL) == 0)
63648 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr);                        /* PCI write */
63649 +       else
63650 +       {
63651 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_fptr), nfptr);                        /* PCI write */
63652 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_state), (state & ~E3_QUEUE_FULL));    /* PCI write */
63653 +       }
63654 +       inputq->q_fptr = nfptr;
63655 +       
63656 +       nfptr += roundup (inputq->q_slotSize, E3_BLK_ALIGN);
63657 +       if (nfptr > inputq->q_top)
63658 +           nfptr = inputq->q_base;
63659 +
63660 +       if (++count >= maxCount && maxCount)
63661 +           break;
63662 +    }
63663 +    
63664 +    if (inputq->q_callback && count != 0)
63665 +    {
63666 +       if (count != inputq->q_waitCount)
63667 +           elan3_sdram_writel (rail->Device, qdesc + offsetof (EP3_InputQueue, q_wcount), inputq->q_waitCount = count);
63668 +
63669 +       if (IssueWaitevent (rail, inputq->q_descAddr + offsetof (EP3_InputQueue, q_wevent)) == ISSUE_COMMAND_TRAPPED)
63670 +           goto run_again_because_of_eventqueue_overflow;
63671 +    }
63672 +
63673 +    return count;
63674 +}
63675 +
63676 +#define Q_EVENT(q,slotNum)             ((q)->q_elan      + (slotNum) * sizeof (E3_BlockCopyEvent))
63677 +#define Q_EVENT_ADDR(q,slotNum)                ((q)->q_elanAddr  + (slotNum) * sizeof (E3_BlockCopyEvent))
63678 +#define Q_MSG(q,slotNum)       (void *)((q)->q_main      + (slotNum) * (q)->q_slotSize)
63679 +#define Q_MSG_ADDR(q,slotNum)          ((q)->q_mainAddr  + (slotNum) * (q)->q_slotSize)
63680 +#define Q_DONE(q,slotNum)     (*((int *)((q)->q_main      + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32))))
63681 +#define Q_DONE_ADDR(q,slotNum)         ((q)->q_mainAddr  + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E3_uint32))
63682 +
63683 +#define Q_ELAN_SIZE(q)                 ((q)->q_slotCount * sizeof (E3_BlockCopyEvent))
63684 +#define Q_MAIN_SIZE(q)                 ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E3_uint32)))
63685 +
63686 +static void
63687 +ep3_outputq_retry (EP3_RAIL *rail, void *arg, E3_DMA_BE *dma, int error)
63688 +{
63689 +    E3_DMA_BE    *dmabe = (E3_DMA_BE *) dma;
63690 +    sdramaddr_t   event = ep_elan2sdram (&rail->Generic, dmabe->s.dma_srcEvent);
63691 +    E3_Addr       done  = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Dest));
63692 +    E3_uint32    *donep = ep_elan2main (&rail->Generic, done & ~EV_BCOPY_DTYPE_MASK);
63693 +
63694 +    EPRINTF1 (DBG_KMSG, "ep3_ouputq_retry: donep at %p -> FAILED\n", donep);
63695 +    
63696 +    *donep = EP3_EVENT_FAILED;
63697 +}
63698 +
63699 +static EP3_COOKIE_OPS ep3_outputq_cookie_ops =
63700 +{
63701 +    NULL, /* Event */
63702 +    ep3_outputq_retry,
63703 +    NULL, /* DmaCancelled */
63704 +    NULL, /* DmaVerify */
63705 +};
63706 +
63707 +EP_OUTPUTQ *
63708 +ep3_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount)
63709 +{
63710 +    EP3_RAIL         *rail = (EP3_RAIL *) r;
63711 +    EP3_OUTPUTQ      *outputq;
63712 +    int               i;
63713 +    E3_BlockCopyEvent event;
63714 +
63715 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
63716 +
63717 +    KMEM_ALLOC (outputq, EP3_OUTPUTQ *, sizeof (EP3_OUTPUTQ), 1);
63718 +
63719 +    if (outputq == NULL)
63720 +       return NULL;
63721 +
63722 +    outputq->q_slotCount = slotCount;
63723 +    outputq->q_slotSize  = slotSize;
63724 +
63725 +    outputq->q_elan = ep_alloc_elan (r, Q_ELAN_SIZE(outputq), 0, &outputq->q_elanAddr);
63726 +
63727 +    if (outputq->q_elan == (sdramaddr_t) 0)
63728 +    {
63729 +       KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
63730 +       return NULL;
63731 +    }
63732 +
63733 +    outputq->q_main = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr);
63734 +
63735 +    if (outputq->q_main == (void *) NULL)
63736 +    {
63737 +       ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq));
63738 +       KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
63739 +       return NULL;
63740 +    }
63741 +
63742 +    RegisterCookie (&rail->CookieTable, &outputq->q_cookie, outputq->q_elanAddr, &ep3_outputq_cookie_ops, outputq);
63743 +
63744 +    for (i = 0; i < slotCount; i++)
63745 +    {
63746 +       EP3_INIT_COPY_EVENT (event, outputq->q_cookie, Q_DONE_ADDR(outputq, i), 0);
63747 +
63748 +       Q_DONE(outputq, i) = outputq->q_cookie.Cookie;
63749 +       
63750 +       elan3_sdram_copyl_to_sdram (rail->Device, &event, Q_EVENT(outputq, i), sizeof (E3_BlockCopyEvent));
63751 +    }
63752 +
63753 +    return (EP_OUTPUTQ *) outputq;
63754 +}
63755 +
63756 +void
63757 +ep3_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q)
63758 +{
63759 +    EP3_RAIL    *rail    = (EP3_RAIL *) r;
63760 +    EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q;
63761 +
63762 +    DeregisterCookie (&rail->CookieTable, &outputq->q_cookie);
63763 +    
63764 +    ep_free_main (r, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
63765 +    ep_free_elan (r, outputq->q_elanAddr, Q_ELAN_SIZE(outputq));
63766
63767 +    KMEM_FREE (outputq, sizeof (EP3_OUTPUTQ));
63768 +}
63769 +
63770 +void *
63771 +ep3_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
63772 +{
63773 +    return Q_MSG ((EP3_OUTPUTQ *) q, slotNum);
63774 +}
63775 +
63776 +int
63777 +ep3_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
63778 +{
63779 +    switch (Q_DONE((EP3_OUTPUTQ *) q, slotNum))
63780 +    {
63781 +    case EP3_EVENT_ACTIVE:
63782 +       return EP_OUTPUTQ_BUSY;
63783 +       
63784 +    case EP3_EVENT_FAILED:
63785 +       return EP_OUTPUTQ_FAILED;
63786 +       
63787 +    default:
63788 +       return EP_OUTPUTQ_FINISHED;
63789 +    }
63790 +}
63791 +
63792 +int
63793 +ep3_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, 
63794 +                 unsigned vp, unsigned qnum, unsigned retries)
63795 +{
63796 +    EP3_RAIL    *rail    = (EP3_RAIL *) r;
63797 +    EP3_OUTPUTQ *outputq = (EP3_OUTPUTQ *) q;
63798 +    unsigned     base    = outputq->q_slotSize - roundup (size, E3_BLK_ALIGN);
63799 +    E3_DMA_BE    dmabe;
63800 +
63801 +    dmabe.s.dma_type           = E3_DMA_TYPE(DMA_BYTE, DMA_WRITE, DMA_QUEUED, retries);
63802 +    dmabe.s.dma_size            = roundup (size, E3_BLK_ALIGN);
63803 +    dmabe.s.dma_source          = Q_MSG_ADDR(outputq, slotNum) + base;
63804 +    dmabe.s.dma_dest            = base;
63805 +    dmabe.s.dma_destEvent       = EP_SYSTEMQ_ADDR(qnum);
63806 +    dmabe.s.dma_destCookieVProc = vp;
63807 +    dmabe.s.dma_srcEvent        = Q_EVENT_ADDR(outputq, slotNum);
63808 +    dmabe.s.dma_srcCookieVProc  = 0;
63809 +
63810 +    Q_DONE(outputq, slotNum) = EP3_EVENT_ACTIVE;
63811 +    
63812 +    elan3_sdram_writel (rail->Device, Q_EVENT(outputq, slotNum), 1);
63813 +
63814 +    if (IssueDma (rail, &dmabe, EP_RETRY_CRITICAL, FALSE) != ISSUE_COMMAND_OK)
63815 +    {
63816 +       Q_DONE(outputq, slotNum) = EP3_EVENT_FAILED;
63817 +       return FALSE;
63818 +    }
63819 +
63820 +    return TRUE;
63821 +}
63822 diff -urN clean/drivers/net/qsnet/ep/kmsg_elan4.c linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan4.c
63823 --- clean/drivers/net/qsnet/ep/kmsg_elan4.c     1969-12-31 19:00:00.000000000 -0500
63824 +++ linux-2.6.9/drivers/net/qsnet/ep/kmsg_elan4.c       2005-02-28 09:05:38.000000000 -0500
63825 @@ -0,0 +1,418 @@
63826 +/*
63827 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
63828 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
63829 + *
63830 + *    For licensing information please see the supplied COPYING file
63831 + *
63832 + */
63833 +
63834 +#ident "@(#)$Id: kmsg_elan4.c,v 1.10 2005/02/28 14:05:38 david Exp $"
63835 +/*      $Source: /cvs/master/quadrics/epmod/kmsg_elan4.c,v $ */
63836 +
63837 +#include <qsnet/kernel.h>
63838 +
63839 +#include <elan/kcomm.h>
63840 +
63841 +#include "debug.h"
63842 +#include "kcomm_vp.h"
63843 +#include "kcomm_elan4.h"
63844 +
63845 +#include <elan4/trtype.h>
63846 +
63847 +static void
63848 +ep4_inputq_interrupt (EP4_RAIL *rail, void *arg)
63849 +{
63850 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) arg;
63851 +
63852 +    /* mark the queue as "fired" to cause a single waitevent
63853 +     * to be issued next time the queue is polled */
63854 +    atomic_inc (&inputq->q_fired);
63855 +    
63856 +    (*inputq->q_callback)(&rail->r_generic, inputq->q_arg);
63857 +}
63858 +
63859 +EP_INPUTQ *
63860 +ep4_alloc_inputq (EP_RAIL *r, unsigned qnum, unsigned slotSize, unsigned slotCount,
63861 +                 EP_INPUTQ_CALLBACK *callback, void *arg)
63862 +{
63863 +    EP4_RAIL     *rail = (EP4_RAIL *) r;
63864 +    EP4_INPUTQ   *inputq;
63865 +    E4_Event32    qevent;
63866 +    void         *slots;
63867 +    int           i;
63868 +
63869 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
63870 +
63871 +    KMEM_ALLOC (inputq, EP4_INPUTQ *, sizeof (EP4_INPUTQ), 1);
63872 +
63873 +    if (inputq == NULL)
63874 +       return (EP_INPUTQ *) NULL;
63875 +    
63876 +    if ((slots = ep_alloc_main (&rail->r_generic, slotSize * slotCount, 0, &inputq->q_slotsAddr)) == NULL)
63877 +    {
63878 +       KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
63879 +       return (EP_INPUTQ *) NULL;
63880 +    }
63881 +
63882 +    inputq->q_slotSize  = slotSize;
63883 +    inputq->q_slotCount = slotCount;
63884 +    inputq->q_callback  = callback;
63885 +    inputq->q_arg       = arg;
63886 +    inputq->q_slots     = slots;
63887 +
63888 +    /* Initialise all the slots to be "unreceived" */
63889 +    for (i = 0; i < slotCount; i++)
63890 +       ((uint32_t *) ((unsigned long) slots + (i+1) * slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
63891 +    
63892 +    inputq->q_last      = inputq->q_slotsAddr + (slotCount-1) * slotSize;
63893 +    inputq->q_fptr      = inputq->q_slotsAddr;
63894 +    inputq->q_desc      = EP_SYSTEMQ_DESC (rail->r_queuedescs, qnum);
63895 +    inputq->q_descAddr  = EP_SYSTEMQ_ADDR (qnum);
63896 +    inputq->q_eventAddr = rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]);
63897 +
63898 +    if (callback)
63899 +    {
63900 +       if ((inputq->q_ecq = ep4_get_ecq (rail, EP4_ECQ_EVENT, 1)) == 0)
63901 +       {
63902 +           ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
63903 +
63904 +           KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
63905 +           return (EP_INPUTQ *) NULL;
63906 +       }
63907 +
63908 +       if ((inputq->q_wcq = ep4_get_ecq (rail, EP4_ECQ_MAIN, 4)) == 0)
63909 +       {
63910 +           ep4_put_ecq (rail, inputq->q_ecq, 1);
63911 +           ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
63912 +
63913 +           KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
63914 +           return (EP_INPUTQ *) NULL;
63915 +       }
63916 +
63917 +       ep4_register_intcookie (rail, &inputq->q_intcookie, inputq->q_descAddr, ep4_inputq_interrupt, inputq);
63918 +
63919 +       inputq->q_count = 0;
63920 +
63921 +       atomic_set (&inputq->q_fired, 0);
63922 +
63923 +       /* Initialise the queue event */
63924 +       qevent.ev_CountAndType = E4_EVENT_INIT_VALUE (callback ? -32 : 0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0);
63925 +       qevent.ev_WritePtr     = inputq->q_ecq->ecq_addr;
63926 +       qevent.ev_WriteValue   = (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD;
63927 +    }
63928 +
63929 +    /* copy the event down to sdram */
63930 +    elan4_sdram_copyq_to_sdram (rail->r_ctxt.ctxt_dev, &qevent, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_qevents[qnum]), sizeof (E4_Event32));
63931 +
63932 +    return (EP_INPUTQ *) inputq;
63933 +}
63934 +
63935 +void
63936 +ep4_free_inputq (EP_RAIL *r, EP_INPUTQ *q)
63937 +{
63938 +    EP4_RAIL   *rail   = (EP4_RAIL *) r;
63939 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) q;
63940 +
63941 +    ep_free_main (&rail->r_generic, inputq->q_slotsAddr, inputq->q_slotSize * inputq->q_slotCount);
63942 +
63943 +    if (inputq->q_callback)
63944 +    {
63945 +       ep4_deregister_intcookie (rail, &inputq->q_intcookie);
63946 +       ep4_put_ecq (rail, inputq->q_ecq, 1);
63947 +       ep4_put_ecq (rail, inputq->q_wcq, 4);
63948 +    }
63949 +
63950 +    KMEM_FREE (inputq, sizeof (EP4_INPUTQ));
63951 +}
63952 +
63953 +void
63954 +ep4_enable_inputq (EP_RAIL *r, EP_INPUTQ *q)
63955 +{
63956 +    EP4_RAIL     *rail     = (EP4_RAIL *) r;
63957 +    EP4_INPUTQ   *inputq   = (EP4_INPUTQ *) q;
63958 +    EP_ADDR       lastSlot = inputq->q_slotsAddr + (inputq->q_slotCount-1) * inputq->q_slotSize;
63959 +    E4_InputQueue qdesc;
63960 +
63961 +    qdesc.q_bptr    = inputq->q_slotsAddr;
63962 +    qdesc.q_fptr    = inputq->q_slotsAddr;
63963 +    qdesc.q_control = E4_InputQueueControl (inputq->q_slotsAddr, lastSlot, inputq->q_slotSize);
63964 +    qdesc.q_event   = inputq->q_callback ? inputq->q_eventAddr : 0;
63965 +
63966 +    /* copy the queue descriptor down to sdram */
63967 +    ep4_write_qdesc (rail, inputq->q_desc, &qdesc);
63968 +
63969 +    EPRINTF5 (DBG_KMSG,  "ep_enable_inputq: %x - %016llx %016llx %016llx %016llx\n", (int) inputq->q_descAddr,
63970 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 0),
63971 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 8),
63972 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 16),
63973 +           elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq->q_desc + 24));
63974 +}
63975 +
63976 +void
63977 +ep4_disable_inputq (EP_RAIL *r, EP_INPUTQ *q)
63978 +{
63979 +    EP4_RAIL     *rail   = (EP4_RAIL *) r;
63980 +    EP4_INPUTQ   *inputq = (EP4_INPUTQ *) q;
63981 +    E4_InputQueue qdesc;
63982 +
63983 +    /* Initialise the input queue descriptor as "full" with no event */
63984 +    qdesc.q_bptr    = 0;
63985 +    qdesc.q_fptr    = 8;
63986 +    qdesc.q_control = E4_InputQueueControl(qdesc.q_bptr, qdesc.q_fptr, 8);
63987 +    qdesc.q_event   = 0;
63988 +
63989 +    /* copy the queue descriptor down to sdram */
63990 +    ep4_write_qdesc (rail, inputq->q_desc, &qdesc);
63991 +}
63992 +
63993 +int
63994 +ep4_poll_inputq (EP_RAIL *r, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg)
63995 +{
63996 +    EP4_RAIL   *rail   = (EP4_RAIL *) r;
63997 +    ELAN4_DEV  *dev    = rail->r_ctxt.ctxt_dev; 
63998 +    EP4_INPUTQ *inputq = (EP4_INPUTQ *) q;
63999 +    sdramaddr_t qdesc = inputq->q_desc;
64000 +    E4_Addr     fptr  = inputq->q_fptr;
64001 +    E4_Addr     bptr  = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
64002 +    int                count = 0;
64003 +    int         delay;
64004 +
64005 +    while (bptr != 0 && fptr != bptr)
64006 +    {
64007 +       while (fptr != bptr)
64008 +       {
64009 +           unsigned long slot = (unsigned long) inputq->q_slots + (fptr - inputq->q_slotsAddr);
64010 +           
64011 +           /* Poll the final word of the message until the message has completely
64012 +            * arrived in main memory. */
64013 +           for (delay = 1; ((uint32_t *) (slot + inputq->q_slotSize))[-1] == EP_SYSTEMQ_UNRECEIVED && delay < EP_SYSTEMQ_UNRECEIVED_TLIMIT; delay <<= 1)
64014 +               DELAY (delay);
64015 +           
64016 +           EPRINTF4(DBG_KMSG, "ep4_poll_inputq: %x slot %d of %d [%08x]\n", (int)inputq->q_descAddr,
64017 +                    ((int)(fptr - inputq->q_slotsAddr))/inputq->q_slotSize, 
64018 +                    inputq->q_slotCount, ((uint32_t *) (slot + inputq->q_slotSize))[-1]);
64019 +           
64020 +           /* Call the message handler */
64021 +           (*handler) (r, arg, (void *) slot);
64022 +           
64023 +           /* reset the last word of the slot to "unreceived" */
64024 +           ((uint32_t *) (slot + inputq->q_slotSize))[-1] = EP_SYSTEMQ_UNRECEIVED;
64025 +           
64026 +           /* move on the front pointer */
64027 +           fptr = (fptr == inputq->q_last) ? inputq->q_slotsAddr : fptr + inputq->q_slotSize;
64028 +           
64029 +           elan4_sdram_writel (dev, qdesc + offsetof (E4_InputQueue, q_fptr), fptr);
64030 +           
64031 +           inputq->q_count++;
64032 +           
64033 +           if (++count >= maxCount && maxCount)
64034 +           {
64035 +               inputq->q_fptr = fptr;
64036 +
64037 +               return count;
64038 +           }
64039 +       }
64040 +
64041 +       bptr = elan4_sdram_readl (dev, qdesc + offsetof (E4_InputQueue, q_bptr));
64042 +    }
64043 +
64044 +    inputq->q_fptr = fptr;
64045 +
64046 +    /* Only insert a single wait event command if the callback has
64047 +     * occured, otherwise just acrue the count as we've just periodically
64048 +     * polled it.
64049 +     */
64050 +    if (inputq->q_callback && atomic_read (&inputq->q_fired))
64051 +    {
64052 +       atomic_dec (&inputq->q_fired);
64053 +
64054 +       ep4_wait_event_cmd (inputq->q_wcq, inputq->q_eventAddr,
64055 +                           E4_EVENT_INIT_VALUE (-inputq->q_count << 5, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0),
64056 +                           inputq->q_ecq->ecq_addr,
64057 +                           (inputq->q_intcookie.int_val << E4_MAIN_INT_SHIFT) | INTERRUPT_CMD);
64058 +
64059 +       inputq->q_count = 0;
64060 +    }
64061 +
64062 +    return count;
64063 +}
64064 +
64065 +#define Q_MSG(q,slotNum)         (unsigned long)((q)->q_main      + (slotNum) * (q)->q_slotSize)
64066 +#define Q_MSG_ADDR(q,slotNum)                  ((q)->q_mainAddr  + (slotNum) * (q)->q_slotSize)
64067 +#define Q_DONE(q,slotNum)        *((E4_uint64 *)((q)->q_main      + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64)))
64068 +#define Q_DONE_ADDR(q,slotNum)                 ((q)->q_mainAddr  + (q)->q_slotCount * (q)->q_slotSize + (slotNum) * sizeof (E4_uint64))
64069 +
64070 +#define Q_MAIN_SIZE(q)                 ((q)->q_slotCount * ((q)->q_slotSize + sizeof (E4_uint64)))
64071 +
64072 +#define Q_DONE_VAL(val,cnt)            ((cnt) << 16 | (val))
64073 +#define Q_DONE_RET(done)               ((int) ((done) & 0xffff))
64074 +#define Q_DONE_CNT(done)               ((int) ((done) >> 16))
64075 +
64076 +EP_OUTPUTQ *
64077 +ep4_alloc_outputq (EP_RAIL *r, unsigned slotSize, unsigned slotCount)
64078 +{
64079 +    EP4_RAIL    *rail = (EP4_RAIL *) r;
64080 +    EP4_OUTPUTQ *outputq;
64081 +    int          i;
64082 +
64083 +    ASSERT ((slotSize & (EP_SYSTEMQ_MSG_ALIGN-1)) == 0);
64084 +
64085 +    KMEM_ALLOC (outputq, EP4_OUTPUTQ *, sizeof (EP4_OUTPUTQ), 1);
64086 +
64087 +    if (outputq == NULL)
64088 +       return NULL;
64089 +
64090 +    spin_lock_init (&outputq->q_lock);
64091 +
64092 +    outputq->q_slotCount = slotCount;
64093 +    outputq->q_slotSize  = slotSize;
64094 +    outputq->q_main      = ep_alloc_main (r, Q_MAIN_SIZE(outputq), 0, &outputq->q_mainAddr);
64095 +
64096 +    if (outputq->q_main == (E4_uint64 *) NULL)
64097 +    {
64098 +       KMEM_FREE (outputq, sizeof (EP_OUTPUTQ));
64099 +       return NULL;
64100 +    }
64101 +
64102 +    outputq->q_cq = elan4_alloccq (&rail->r_ctxt, CQ_Size64K, CQ_STENEnableBit | CQ_WriteEnableBit, CQ_Priority);
64103 +
64104 +    if (outputq->q_cq == (ELAN4_CQ *) NULL)
64105 +    {
64106 +       ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
64107 +
64108 +       KMEM_FREE (outputq, sizeof (EP_OUTPUTQ));
64109 +    }
64110 +
64111 +    outputq->q_dwords = CQ_Size (outputq->q_cq->cq_size) >> 3;
64112 +
64113 +    /* mark all the queue slots as finished */
64114 +    for (i = 0; i < slotCount; i++)
64115 +       Q_DONE(outputq, i) = Q_DONE_VAL (EP_OUTPUTQ_FINISHED, 0);
64116 +
64117 +    return (EP_OUTPUTQ *) outputq;
64118 +}
64119 +
64120 +void
64121 +ep4_free_outputq (EP_RAIL *r, EP_OUTPUTQ *q)
64122 +{
64123 +    EP4_RAIL    *rail    = (EP4_RAIL *) r;
64124 +    EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q;
64125 +
64126 +    elan4_freecq (&rail->r_ctxt, outputq->q_cq);
64127 +
64128 +    ep_free_main (&rail->r_generic, outputq->q_mainAddr, Q_MAIN_SIZE(outputq));
64129 +
64130 +    spin_lock_destroy (&outputq->q_lock);
64131 +
64132 +    KMEM_FREE (outputq, sizeof (EP4_OUTPUTQ));
64133 +}
64134 +
64135 +void *
64136 +ep4_outputq_msg (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
64137 +{
64138 +    return (void *) Q_MSG ((EP4_OUTPUTQ *) q, slotNum);
64139 +}
64140 +
64141 +int
64142 +ep4_outputq_state (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum)
64143 +{
64144 +    EPRINTF2 (DBG_KMSG, "ep4_outputq_state: slotNum %d state %x\n", slotNum, (int)Q_DONE((EP4_OUTPUTQ *) q, slotNum));
64145 +
64146 +    return Q_DONE_RET(Q_DONE((EP4_OUTPUTQ *)q, slotNum));
64147 +}
64148 +
64149 +int
64150 +ep4_outputq_send (EP_RAIL *r, EP_OUTPUTQ *q, unsigned slotNum, unsigned size, 
64151 +                 unsigned vp, unsigned qnum, unsigned retries)
64152 +{
64153 +    EP4_OUTPUTQ *outputq = (EP4_OUTPUTQ *) q;
64154 +    unsigned int nbytes  = roundup (size, 32);
64155 +    unsigned int base    = outputq->q_slotSize - nbytes;
64156 +    unsigned int i, dwords;
64157 +    unsigned long flags;
64158 +    E4_uint64 val;
64159 +
64160 +    spin_lock_irqsave (&outputq->q_lock, flags);
64161 +
64162 +    EPRINTF4 (DBG_KMSG, "ep4_outputq_send: slotNum=%d size=%d vp=%d qnum=%d\n", slotNum, size, vp, qnum);
64163 +
64164 +    /* compute command queue size as follows - each slot uses
64165 +     *     overhead:   14 dwords +
64166 +     *    data > 128 ? 36 dwords
64167 +     *     data > 64  ? 18 dwords
64168 +     *     data > 32  ? 10 dwords
64169 +     *     else         6  dwords
64170 +     */
64171 +    dwords = 14 + (size > 128 ? 36 :
64172 +                  size > 64  ? 18 :
64173 +                  size  ? 10 : 6);
64174 +
64175 +    outputq->q_dwords += Q_DONE_CNT (Q_DONE(outputq, slotNum));
64176 +
64177 +    if (dwords > outputq->q_dwords)
64178 +    {
64179 +       /* attempt to reclaim command queue space from other slots */
64180 +       i = slotNum;
64181 +       do {
64182 +           if (++i == outputq->q_slotCount)
64183 +               i = 0;
64184 +           
64185 +           val = Q_DONE(outputq, i);
64186 +
64187 +           if ((Q_DONE_RET (val) == EP_OUTPUTQ_FINISHED || Q_DONE_RET (val) == EP_OUTPUTQ_FAILED) && Q_DONE_CNT(val) > 0)
64188 +           {
64189 +               outputq->q_dwords += Q_DONE_CNT (val);
64190 +
64191 +               Q_DONE(outputq, i) = Q_DONE_VAL(Q_DONE_RET(val), 0);
64192 +           }
64193 +       } while (i != slotNum && dwords > outputq->q_dwords);
64194 +    }
64195 +
64196 +    if (dwords > outputq->q_dwords)
64197 +    {
64198 +       spin_unlock_irqrestore (&outputq->q_lock, flags);
64199 +       
64200 +       EPRINTF0 (DBG_KMSG, "ep4_outputq_state: no command queue space\n");
64201 +       return 0;
64202 +    }
64203 +
64204 +    outputq->q_dwords -= dwords;
64205 +
64206 +    Q_DONE(outputq, slotNum) = Q_DONE_VAL (EP_OUTPUTQ_BUSY, dwords);
64207 +
64208 +    if (outputq->q_retries != retries)
64209 +    {
64210 +       outputq->q_retries = retries;
64211 +
64212 +       elan4_guard (outputq->q_cq, GUARD_CHANNEL(1) | GUARD_RESET(retries));
64213 +       elan4_nop_cmd (outputq->q_cq, 0);
64214 +    }
64215 +
64216 +    /* transfer the top "size" bytes from message buffer to top of input queue */
64217 +    elan4_open_packet (outputq->q_cq, OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, vp));
64218 +    elan4_sendtrans0 (outputq->q_cq, TR_INPUT_Q_GETINDEX, EP_SYSTEMQ_ADDR(qnum));
64219 +
64220 +    /* send upto EP_SYSTEMQ_MSG_MAX (256) bytes of message to the top of the slot */
64221 +    if (size > 128)
64222 +    {
64223 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 0,   (void *) (Q_MSG(outputq, slotNum) + base + 0));
64224 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base + 128, (void *) (Q_MSG(outputq, slotNum) + base + 128));
64225 +    }
64226 +    else if (size > 64)
64227 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (128 >> 3, 0, TR_DATATYPE_DWORD), base, (void *) (Q_MSG(outputq, slotNum) + base));
64228 +    else if (size > 32)
64229 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (64 >> 3, 0, TR_DATATYPE_DWORD),  base, (void *) (Q_MSG(outputq, slotNum) + base));
64230 +    else
64231 +       elan4_sendtransp (outputq->q_cq, TR_WRITE (32 >> 3, 0, TR_DATATYPE_DWORD),  base, (void *) (Q_MSG(outputq, slotNum) + base));
64232 +    elan4_sendtrans1 (outputq->q_cq, TR_INPUT_Q_COMMIT, EP_SYSTEMQ_ADDR(qnum), 0 /* no cookie */);
64233 +
64234 +    elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET (outputq->q_retries));
64235 +    elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FINISHED, dwords));
64236 +
64237 +    elan4_guard (outputq->q_cq, GUARD_CHANNEL (1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET (outputq->q_retries));
64238 +    elan4_write_dword_cmd (outputq->q_cq, Q_DONE_ADDR(outputq, slotNum), Q_DONE_VAL (EP_OUTPUTQ_FAILED, dwords));
64239 +
64240 +    spin_unlock_irqrestore (&outputq->q_lock, flags);
64241 +
64242 +    return 1;
64243 +}
64244 diff -urN clean/drivers/net/qsnet/ep/kthread.c linux-2.6.9/drivers/net/qsnet/ep/kthread.c
64245 --- clean/drivers/net/qsnet/ep/kthread.c        1969-12-31 19:00:00.000000000 -0500
64246 +++ linux-2.6.9/drivers/net/qsnet/ep/kthread.c  2004-05-19 04:54:57.000000000 -0400
64247 @@ -0,0 +1,186 @@
64248 +/*
64249 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64250 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64251 + *
64252 + *    For licensing information please see the supplied COPYING file
64253 + *
64254 + */
64255 +
64256 +#ident "@(#)$Id: kthread.c,v 1.5 2004/05/19 08:54:57 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
64257 +/*      $Source: /cvs/master/quadrics/epmod/kthread.c,v $*/
64258 +
64259 +#include <qsnet/kernel.h>
64260 +
64261 +#include <elan/kthread.h>
64262 +
64263 +void
64264 +ep_kthread_init (EP_KTHREAD *kt)
64265 +{
64266 +       spin_lock_init (&kt->lock);
64267 +       kcondvar_init (&kt->wait);
64268 +       
64269 +       kt->next_run     = 0;
64270 +       kt->should_stall = 0;
64271 +       kt->started      = 0;
64272 +       kt->should_stop  = 0;
64273 +       kt->stopped      = 0;
64274 +       kt->state        = KT_STATE_RUNNING;
64275 +}
64276 +
64277 +void
64278 +ep_kthread_destroy (EP_KTHREAD *kt)
64279 +{
64280 +       spin_lock_destroy (&kt->lock);
64281 +       kcondvar_destroy (&kt->wait);
64282 +}
64283 +
64284 +void
64285 +ep_kthread_started (EP_KTHREAD *kt)
64286 +{
64287 +       unsigned long flags;
64288 +       
64289 +       spin_lock_irqsave (&kt->lock, flags);
64290 +       kt->started = 1;
64291 +       spin_unlock_irqrestore(&kt->lock, flags);
64292 +}
64293 +
64294 +void
64295 +ep_kthread_stopped (EP_KTHREAD *kt)
64296 +{
64297 +       unsigned long flags;
64298 +       
64299 +       spin_lock_irqsave (&kt->lock, flags);
64300 +       kt->stopped = 1;
64301 +       kcondvar_wakeupall (&kt->wait, &kt->lock);
64302 +       spin_unlock_irqrestore(&kt->lock, flags);
64303 +}
64304 +
64305 +int
64306 +ep_kthread_should_stall (EP_KTHREAD *kth)
64307 +{
64308 +       return (kth->should_stall);
64309 +}
64310 +
64311 +int
64312 +ep_kthread_sleep (EP_KTHREAD *kt, long next_run)
64313 +{
64314 +       unsigned long flags;
64315 +
64316 +       spin_lock_irqsave (&kt->lock, flags);
64317 +       if (next_run && (kt->next_run == 0 || BEFORE (next_run, kt->next_run)))
64318 +               kt->next_run = next_run;
64319 +
64320 +       if (kt->should_stop)
64321 +       {
64322 +               spin_unlock_irqrestore (&kt->lock, flags);
64323 +               return (-1);
64324 +       }
64325 +       
64326 +       do {
64327 +               if (kt->should_stall)
64328 +                       kcondvar_wakeupall (&kt->wait, &kt->lock);
64329 +
64330 +               kt->state = KT_STATE_SLEEPING;
64331 +               kt->running = 0;
64332 +               if (kt->should_stall || kt->next_run == 0)
64333 +                       kcondvar_wait (&kt->wait, &kt->lock, &flags);
64334 +               else
64335 +                       kcondvar_timedwait (&kt->wait,&kt->lock, &flags, kt->next_run);
64336 +               kt->state = KT_STATE_RUNNING;
64337 +               kt->running = lbolt;
64338 +       } while (kt->should_stall);
64339 +       kt->next_run = 0;
64340 +       spin_unlock_irqrestore (&kt->lock, flags);
64341 +       
64342 +       return (0);
64343 +}
64344 +
64345 +void
64346 +ep_kthread_schedule (EP_KTHREAD *kt, long tick)
64347 +{
64348 +       unsigned long flags;
64349 +       
64350 +       spin_lock_irqsave (&kt->lock, flags);
64351 +       if (kt->next_run == 0 || BEFORE (tick, kt->next_run))
64352 +       {
64353 +               kt->next_run = tick;
64354 +               if (!kt->should_stall && kt->state == KT_STATE_SLEEPING)
64355 +               {
64356 +                       kt->state = KT_STATE_SCHEDULED;
64357 +                       kcondvar_wakeupone (&kt->wait, &kt->lock);
64358 +               }
64359 +       }
64360 +       spin_unlock_irqrestore (&kt->lock, flags);
64361 +}
64362 +
64363 +void
64364 +ep_kthread_stall (EP_KTHREAD *kt)
64365 +{
64366 +       unsigned long flags;
64367 +       
64368 +       spin_lock_irqsave (&kt->lock, flags);
64369 +       if (kt->should_stall++ == 0)
64370 +               kcondvar_wakeupall (&kt->wait, &kt->lock);
64371 +
64372 +       while (kt->state != KT_STATE_SLEEPING)
64373 +               kcondvar_wait (&kt->wait, &kt->lock, &flags);
64374 +       spin_unlock_irqrestore (&kt->lock, flags);
64375 +}
64376 +
64377 +void
64378 +ep_kthread_resume (EP_KTHREAD *kt)
64379 +{
64380 +       unsigned long flags;
64381 +
64382 +       spin_lock_irqsave (&kt->lock, flags);
64383 +       if (--kt->should_stall == 0)
64384 +       {
64385 +               kt->state = KT_STATE_SCHEDULED;
64386 +               kcondvar_wakeupone (&kt->wait, &kt->lock);
64387 +       }
64388 +       spin_unlock_irqrestore (&kt->lock, flags);
64389 +}
64390 +
64391 +void
64392 +ep_kthread_stop (EP_KTHREAD *kt)
64393 +{
64394 +       unsigned long flags;
64395 +       
64396 +       spin_lock_irqsave (&kt->lock, flags);
64397 +       kt->should_stop = 1;
64398 +       while (kt->started && !kt->stopped)
64399 +       {
64400 +               kcondvar_wakeupall (&kt->wait, &kt->lock);
64401 +               kcondvar_wait (&kt->wait, &kt->lock, &flags);
64402 +       }
64403 +       spin_unlock_irqrestore (&kt->lock, flags);
64404 +}
64405 +
64406 +int
64407 +ep_kthread_state (EP_KTHREAD *kt, long *time)
64408 +{
64409 +       unsigned long flags;
64410 +       int res = KT_STATE_SLEEPING;
64411 +
64412 +       spin_lock_irqsave (&kt->lock, flags);
64413 +
64414 +       if (kt->next_run) {
64415 +               *time = kt->next_run;
64416 +               res   = kt->should_stall ? KT_STATE_STALLED : KT_STATE_SCHEDULED;
64417 +       }
64418 +
64419 +       if (kt->running) {
64420 +               *time = kt->running;
64421 +               res   = KT_STATE_RUNNING;
64422 +       }
64423 +
64424 +       spin_unlock_irqrestore (&kt->lock, flags);
64425 +       
64426 +       return res;
64427 +}
64428 +
64429 +/*
64430 + * Local variables:
64431 + * c-file-style: "linux"
64432 + * End:
64433 + */
64434 diff -urN clean/drivers/net/qsnet/ep/kthread.h linux-2.6.9/drivers/net/qsnet/ep/kthread.h
64435 --- clean/drivers/net/qsnet/ep/kthread.h        1969-12-31 19:00:00.000000000 -0500
64436 +++ linux-2.6.9/drivers/net/qsnet/ep/kthread.h  2004-05-06 10:24:08.000000000 -0400
64437 @@ -0,0 +1,53 @@
64438 +/*
64439 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64440 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
64441 + *
64442 + *    For licensing information please see the supplied COPYING file
64443 + *
64444 + */
64445 +
64446 +#ifndef __ELAN3_KTHREAD_H
64447 +#define __ELAN3_KTHREAD_H
64448 +
64449 +#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
64450 +/*      $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/
64451 +
64452 +typedef struct ep_kthread
64453 +{
64454 +       kcondvar_t      wait;                                   /* place to sleep */
64455 +       spinlock_t      lock;                                   /* and lock */
64456 +       long            next_run;                               /* tick when thread should next run */
64457 +       long            running;                                /* tick when thread started to run */
64458 +       unsigned short  should_stall;
64459 +       unsigned char   state;
64460 +       unsigned int    started:1;
64461 +       unsigned int    should_stop:1;
64462 +       unsigned int    stopped:1;
64463 +} EP_KTHREAD;
64464 +
64465 +#define KT_STATE_SLEEPING              0
64466 +#define KT_STATE_SCHEDULED             1
64467 +#define KT_STATE_RUNNING               2
64468 +#define KT_STATE_STALLED               3
64469 +
64470 +#define AFTER(a, b)                    ((((long)(a)) - ((long)(b))) > 0)
64471 +#define BEFORE(a,b)                    ((((long)(a)) - ((long)(b))) < 0)
64472 +
64473 +extern void ep_kthread_init (EP_KTHREAD *kt);
64474 +extern void ep_kthread_destroy (EP_KTHREAD *kt);
64475 +extern void ep_kthread_started (EP_KTHREAD *kt);
64476 +extern void ep_kthread_stopped (EP_KTHREAD *kt);
64477 +extern int  ep_kthread_should_stall (EP_KTHREAD *kth);
64478 +extern int  ep_kthread_sleep (EP_KTHREAD *kth, long next_run);
64479 +extern void ep_kthread_schedule (EP_KTHREAD *kt, long when);
64480 +extern void ep_kthread_stall (EP_KTHREAD *kth);
64481 +extern void ep_kthread_resume (EP_KTHREAD *kt);
64482 +extern void ep_kthread_stop (EP_KTHREAD *kt);
64483 +extern int  ep_kthread_state (EP_KTHREAD *kt, long *time);
64484 +#endif /* __ELAN3_KTHREAD_H */
64485 +
64486 +/*
64487 + * Local variables:
64488 + * c-file-style: "linux"
64489 + * End:
64490 + */
64491 diff -urN clean/drivers/net/qsnet/ep/Makefile linux-2.6.9/drivers/net/qsnet/ep/Makefile
64492 --- clean/drivers/net/qsnet/ep/Makefile 1969-12-31 19:00:00.000000000 -0500
64493 +++ linux-2.6.9/drivers/net/qsnet/ep/Makefile   2005-10-10 17:47:31.000000000 -0400
64494 @@ -0,0 +1,17 @@
64495 +#
64496 +# Makefile for Quadrics QsNet
64497 +#
64498 +# Copyright (c) 2002-2004 Quadrics Ltd
64499 +#
64500 +# File: drivers/net/qsnet/ep/Makefile
64501 +#
64502 +
64503 +
64504 +ep3-$(CONFIG_ELAN3)    := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o
64505 +ep4-$(CONFIG_ELAN4)    := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o
64506 +#
64507 +
64508 +obj-$(CONFIG_EP)       += ep.o
64509 +ep-objs        := cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o $(ep3-$(CONFIG_EP)) $(ep4-$(CONFIG_EP))
64510 +
64511 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
64512 diff -urN clean/drivers/net/qsnet/ep/Makefile.conf linux-2.6.9/drivers/net/qsnet/ep/Makefile.conf
64513 --- clean/drivers/net/qsnet/ep/Makefile.conf    1969-12-31 19:00:00.000000000 -0500
64514 +++ linux-2.6.9/drivers/net/qsnet/ep/Makefile.conf      2005-09-07 10:39:44.000000000 -0400
64515 @@ -0,0 +1,12 @@
64516 +# Flags for generating QsNet Linux Kernel Makefiles
64517 +MODNAME                =       ep.o
64518 +MODULENAME     =       ep
64519 +KOBJFILES      =       cm.o debug.o kalloc.o kcomm.o kmap.o kthread.o neterr.o nmh.o probenetwork.o railhints.o rmap.o statemap.o support.o threadcode.o epcomms.o epcommsRx.o epcommsTx.o epcommsFwd.o conf_linux.o procfs_linux.o ep_procfs.o cm_procfs.o \$\(ep3-\$\(CONFIG_EP\)\) \$\(ep4-\$\(CONFIG_EP\)\)
64520 +EXPORT_KOBJS   =       conf_linux.o
64521 +CONFIG_NAME    =       CONFIG_EP
64522 +SGALFC         =       
64523 +# EXTRALINES START
64524 +
64525 +ep3-$(CONFIG_ELAN3)    := kcomm_elan3.o kmsg_elan3.o kmap_elan3.o neterr_elan3.o probenetwork_elan3.o support_elan3.o threadcode_elan3.o threadcode_elan3_Linux.o epcomms_elan3.o epcommsTx_elan3.o epcommsRx_elan3.o
64526 +ep4-$(CONFIG_ELAN4)    := kcomm_elan4.o kmsg_elan4.o kmap_elan4.o neterr_elan4.o probenetwork_elan4.o commands_elan4.o debug_elan4.o support_elan4.o threadcode_elan4_Linux.o epcomms_elan4.o epcommsTx_elan4.o epcommsRx_elan4.o
64527 +# EXTRALINES END
64528 diff -urN clean/drivers/net/qsnet/ep/neterr.c linux-2.6.9/drivers/net/qsnet/ep/neterr.c
64529 --- clean/drivers/net/qsnet/ep/neterr.c 1969-12-31 19:00:00.000000000 -0500
64530 +++ linux-2.6.9/drivers/net/qsnet/ep/neterr.c   2005-07-20 08:01:34.000000000 -0400
64531 @@ -0,0 +1,79 @@
64532 +/*
64533 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64534 + *
64535 + *    For licensing information please see the supplied COPYING file
64536 + *
64537 + */
64538 +
64539 +#ident "@(#)$Id: neterr.c,v 1.27.2.1 2005/07/20 12:01:34 mike Exp $"
64540 +/*      $Source: /cvs/master/quadrics/epmod/neterr.c,v $ */
64541 +
64542 +#include <qsnet/kernel.h>
64543 +#include <elan/kcomm.h>
64544 +
64545 +#include "debug.h"
64546 +
64547 +void
64548 +ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie)
64549 +{
64550 +    EP_SYS       *sys      = rail->System;
64551 +    EP_NODE_RAIL *nodeRail = &rail->Nodes[nodeId];
64552 +    unsigned long flags;
64553 +
64554 +    spin_lock_irqsave (&sys->NodeLock, flags);
64555 +
64556 +    ASSERT (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
64557 +    
64558 +    if (nodeRail->NetworkErrorState == 0)
64559 +    {
64560 +       EPRINTF2 (DBG_NETWORK_ERROR, "%s: raise context filter for node %d due to network error\n", rail->Name, nodeId);
64561 +       
64562 +       rail->Operations.RaiseFilter (rail, nodeId);
64563 +       
64564 +       if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
64565 +           printk ("%s: node %d is flushing - deferring network error fixup\n", rail->Name, nodeId);
64566 +       else
64567 +           list_add_tail (&nodeRail->Link, &rail->NetworkErrorList);
64568 +    }
64569 +    
64570 +    switch (what)
64571 +    {
64572 +    case EP_NODE_NETERR_ATOMIC_PACKET:
64573 +       ASSERT (nodeRail->NetworkErrorCookies[channel] == 0);
64574 +       
64575 +       /* Need to raise the approriate context filter for this node,
64576 +        * and periodically send a neterr fixup message to it until 
64577 +        * we receive an ack from it
64578 +        */
64579 +       IncrStat (rail, NeterrAtomicPacket);
64580 +       
64581 +       nodeRail->NetworkErrorCookies[channel] = cookie;
64582 +       
64583 +       nodeRail->NetworkErrorState |= EP_NODE_NETERR_ATOMIC_PACKET;
64584 +       nodeRail->MsgXid             = ep_xid_cache_alloc (sys, &rail->XidCache);
64585 +       
64586 +       EPRINTF3 (DBG_NETWORK_ERROR, "%s: atomic packet destroyed - node %d cookie %llx\n", rail->Name, nodeId, (long long)cookie);
64587 +       break;
64588 +
64589 +    case EP_NODE_NETERR_DMA_PACKET:
64590 +       /* Must be an overlapped dma packet, raise the context filter,
64591 +        * and hold it up for a NETWORK_ERROR_TIMEOUT */
64592 +       IncrStat (rail, NeterrDmaPacket);
64593 +       
64594 +       nodeRail->NetworkErrorState |= EP_NODE_NETERR_DMA_PACKET;
64595 +       break;
64596 +    }
64597 +
64598 +    nodeRail->NextRunTime = lbolt + NETWORK_ERROR_TIMEOUT;
64599 +    
64600 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
64601 +
64602 +    ep_kthread_schedule (&sys->ManagerThread, nodeRail->NextRunTime);
64603 +}
64604 +
64605 +/*
64606 + * Local variables:
64607 + * c-file-style: "stroustrup"
64608 + * End:
64609 + */
64610 +
64611 diff -urN clean/drivers/net/qsnet/ep/neterr_elan3.c linux-2.6.9/drivers/net/qsnet/ep/neterr_elan3.c
64612 --- clean/drivers/net/qsnet/ep/neterr_elan3.c   1969-12-31 19:00:00.000000000 -0500
64613 +++ linux-2.6.9/drivers/net/qsnet/ep/neterr_elan3.c     2003-11-17 08:26:45.000000000 -0500
64614 @@ -0,0 +1,326 @@
64615 +/*
64616 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64617 + *
64618 + *    For licensing information please see the supplied COPYING file
64619 + *
64620 + */
64621 +
64622 +#ident "@(#)$Id: neterr_elan3.c,v 1.24 2003/11/17 13:26:45 david Exp $"
64623 +/*      $Source: /cvs/master/quadrics/epmod/neterr_elan3.c,v $ */
64624 +
64625 +#include <qsnet/kernel.h>
64626 +
64627 +#include <elan/kcomm.h>
64628 +
64629 +#include "kcomm_vp.h"
64630 +#include "kcomm_elan3.h"
64631 +#include "debug.h"
64632 +
64633 +typedef struct neterr_halt_args
64634 +{
64635 +    EP3_RAIL        *Rail;
64636 +    unsigned int      NodeId;
64637 +    EP_NETERR_COOKIE *Cookies;
64638 +} NETERR_HALT_ARGS;
64639 +
64640 +static int
64641 +DmaMatchesCookie (EP3_RAIL *rail, E3_DMA_BE *dma, int nodeId, EP_NETERR_COOKIE *cookies, char *where)
64642 +{
64643 +    E3_uint32     cvproc;
64644 +    E3_uint32     cookie;
64645 +    
64646 +    if (dma->s.dma_direction == DMA_WRITE)
64647 +    {
64648 +       cvproc = dma->s.dma_destCookieVProc;
64649 +       cookie = dma->s.dma_srcCookieVProc;
64650 +    }
64651 +    else
64652 +    {
64653 +       cvproc = dma->s.dma_srcCookieVProc;
64654 +       cookie = dma->s.dma_destCookieVProc;
64655 +    }
64656 +    
64657 +    EPRINTF6 (DBG_NETWORK_ERROR, "%s: Neterr - %s: DMA %08x %08x %08x %08x\n", rail->Generic.Name, where,
64658 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
64659 +    EPRINTF5 (DBG_NETWORK_ERROR, "%s:                     %08x %08x %08x %08x\n", rail->Generic.Name,
64660 +             dma->s.dma_destEvent, dma->s.dma_destCookieVProc, dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
64661 +
64662 +    if (EP_VP_ISDATA((cvproc & DMA_PROCESS_MASK)) && EP_VP_TO_NODE(cvproc & DMA_PROCESS_MASK) == nodeId)
64663 +    {
64664 +       /*
64665 +        * This is a DMA going to the node which has a network fixup
64666 +        * request pending, so check if the cookie matches.
64667 +        */
64668 +       if ((cookie == cookies[0] || cookie == cookies[1]) /* && !WaitForEop */)
64669 +       {
64670 +           EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %08x on %s\n", rail->Generic.Name, cookie, where);
64671 +           
64672 +           return (TRUE);
64673 +       }
64674 +    }
64675 +
64676 +    return (FALSE);
64677 +}
64678 +
64679 +
64680 +static void
64681 +NetworkErrorHaltOperation (ELAN3_DEV *dev, void *arg)
64682 +{
64683 +    NETERR_HALT_ARGS *args = (NETERR_HALT_ARGS *) arg;
64684 +    EP3_RAIL         *rail = args->Rail;
64685 +    EP_SYS           *sys  = rail->Generic.System;
64686 +    sdramaddr_t       FPtr, BPtr;
64687 +    sdramaddr_t       Base, Top;
64688 +    E3_DMA_BE         dma;
64689 +    unsigned long     flags;
64690 +
64691 +    spin_lock_irqsave (&sys->NodeLock, flags);
64692 +
64693 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
64694 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
64695 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
64696 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
64697 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
64698 +    
64699 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
64700 +    BPtr =  read_reg32 (dev, DProc_SysCntx_BPtr);
64701 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
64702 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
64703 +    
64704 +    while (FPtr != BPtr)
64705 +    {
64706 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
64707 +       
64708 +       if (DmaMatchesCookie (rail, &dma, args->NodeId, args->Cookies, "runq "))
64709 +       {
64710 +           /*
64711 +            * Transfer the DMA to the node, it's source event will 
64712 +            * get executed later.
64713 +            */
64714 +           QueueDmaOnStalledList (rail, &dma);
64715 +           
64716 +           /*
64717 +            * Remove the DMA from the queue by replacing it with one with
64718 +            * zero size and no events.
64719 +            *
64720 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
64721 +            * to mark the approriate run queue as empty.
64722 +            */
64723 +           dma.s.dma_type            = (SYS_CONTEXT_BIT << 16);
64724 +           dma.s.dma_size            = 0;
64725 +           dma.s.dma_source          = (E3_Addr) 0;
64726 +           dma.s.dma_dest            = (E3_Addr) 0;
64727 +           dma.s.dma_destEvent       = (E3_Addr) 0;
64728 +           dma.s.dma_destCookieVProc = 0;
64729 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
64730 +           dma.s.dma_srcCookieVProc  = 0;
64731 +           
64732 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
64733 +       }
64734 +
64735 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
64736 +    }
64737 +
64738 +    rail->NetworkErrorFlushed = TRUE;
64739 +    kcondvar_wakeupall (&rail->NetworkErrorSleep, &sys->NodeLock);
64740 +
64741 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
64742 +}
64743 +
64744 +void
64745 +ep3_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
64746 +{
64747 +    EP3_RAIL        *rail        = (EP3_RAIL *) r;
64748 +    EP_SYS          *sys         = rail->Generic.System;
64749 +    ELAN3_DEV       *dev         = rail->Device;
64750 +    EP_NODE_RAIL    *nodeRail    = &rail->Generic.Nodes[nodeId];
64751 +    E3_DMA_BE        dmabe;
64752 +    EP3_COOKIE      *cp;
64753 +    E3_uint32        vp;
64754 +    NETERR_HALT_ARGS args;
64755 +    struct list_head *el, *nel, matchedList;
64756 +    int              i;
64757 +    unsigned long    flags;
64758 +
64759 +    INIT_LIST_HEAD (&matchedList);
64760 +
64761 +    StallDmaRetryThread (rail);
64762 +
64763 +    args.Rail       = rail;
64764 +    args.NodeId     = nodeId;
64765 +    args.Cookies    = cookies;
64766 +
64767 +    spin_lock_irqsave (&rail->Device->IntrLock, flags);
64768 +    QueueHaltOperation (rail->Device, 0, NULL, INT_TProcHalted | INT_DProcHalted, NetworkErrorHaltOperation, &args);
64769 +    spin_unlock_irqrestore (&rail->Device->IntrLock, flags);
64770 +    
64771 +    spin_lock_irqsave (&sys->NodeLock, flags);
64772 +    while (! rail->NetworkErrorFlushed)
64773 +       kcondvar_wait (&rail->NetworkErrorSleep, &sys->NodeLock, &flags);
64774 +    rail->NetworkErrorFlushed = FALSE;
64775 +    
64776 +    spin_lock (&rail->DmaRetryLock);
64777 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
64778 +    {
64779 +       list_for_each_safe (el, nel, &rail->DmaRetries[i]) {
64780 +           EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
64781 +
64782 +           if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "retry"))
64783 +           {
64784 +               /* remove from retry list */
64785 +               list_del (&retry->Link);
64786 +
64787 +               /* add to list of dmas which matched */
64788 +               list_add_tail (&retry->Link, &matchedList);
64789 +           }
64790 +       }
64791 +    }
64792 +    
64793 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
64794 +       EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
64795 +       
64796 +       if (DmaMatchesCookie (rail, &retry->Dma, nodeId, cookies, "stalled"))
64797 +       {
64798 +           /* remove from retry list */
64799 +           list_del (&retry->Link);
64800 +           
64801 +           /* add to list of dmas which matched */
64802 +           list_add_tail (&retry->Link, &matchedList);
64803 +       }
64804 +    }
64805 +    
64806 +    spin_unlock (&rail->DmaRetryLock);
64807 +    spin_unlock_irqrestore (&sys->NodeLock, flags);
64808 +    
64809 +    ResumeDmaRetryThread (rail);
64810 +
64811 +    /* Now "set" the source event of any write DMA's */
64812 +    while (! list_empty (&matchedList))
64813 +    {
64814 +       EP3_RETRY_DMA *retry = list_entry (matchedList.next, EP3_RETRY_DMA, Link);
64815 +       
64816 +       list_del (&retry->Link);
64817 +
64818 +       if (retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_srcEvent)
64819 +       {
64820 +           sdramaddr_t event = ep_elan2sdram (&rail->Generic, retry->Dma.s.dma_srcEvent);
64821 +
64822 +           /* Block local interrupts, since we need to atomically
64823 +            * decrement the event count and perform the word write
64824 +            */
64825 +           local_irq_save (flags);
64826 +           {
64827 +               E3_uint32 type  = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type));
64828 +               E3_uint32 count = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Count));
64829 +
64830 +               elan3_sdram_writel (dev, event + offsetof (E3_Event, ev_Count), count - 1);
64831 +
64832 +               if (count == 1)
64833 +               {
64834 +                   if (type & EV_TYPE_MASK_BCOPY)
64835 +                   {
64836 +                       E3_Addr srcVal  = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Source));
64837 +                       E3_Addr dstAddr = elan3_sdram_readl (dev, event + offsetof (E3_BlockCopyEvent, ev_Dest)) & ~EV_BCOPY_DTYPE_MASK;
64838 +
64839 +                       ASSERT ((srcVal & EV_WCOPY) != 0);
64840 +                       
64841 +                       EPRINTF3 (DBG_NETWORK_ERROR, "%s: neterr perform event word write at %08x with %08x\n", rail->Generic.Name, dstAddr, srcVal);
64842 +
64843 +                       ELAN3_OP_STORE32 (rail->Ctxt, dstAddr, srcVal);
64844 +                   }
64845 +
64846 +                   if ((type & ~EV_TYPE_MASK_BCOPY) != 0)
64847 +                   {
64848 +                       if ((type & EV_TYPE_MASK_CHAIN) == EV_TYPE_CHAIN)
64849 +                       {
64850 +                           printk ("%s: event at %08x - chained event %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type);
64851 +                           panic ("ep: neterr invalid event type\n");
64852 +                       }
64853 +                       else if ((type & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
64854 +                       {
64855 +                           EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr event interrupt - cookie %08x\n", rail->Generic.Name, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)));
64856 +                           
64857 +                           cp = LookupCookie (&rail->CookieTable, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)));
64858 +                           
64859 +                           if (cp->Operations->Event)
64860 +                               cp->Operations->Event(rail, cp->Arg);
64861 +                       }
64862 +                       else if ((type & EV_TYPE_MASK_DMA) == EV_TYPE_DMA)
64863 +                       {
64864 +                           sdramaddr_t dma = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2));
64865 +                           
64866 +                           EPRINTF2 (DBG_NETWORK_ERROR, "%s: neterr chained dma - %08x\n", rail->Generic.Name, (type & ~EV_TYPE_MASK2));
64867 +                           
64868 +                           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
64869 +                           
64870 +                           if (dmabe.s.dma_direction == DMA_WRITE)
64871 +                           {
64872 +                               vp = dmabe.s.dma_destVProc;
64873 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
64874 +                           }
64875 +                           else
64876 +                           {
64877 +                               vp = dmabe.s.dma_srcVProc;
64878 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
64879 +                               
64880 +                               /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the 
64881 +                                * DMA descriptor will be read from the EP_RETRY_DMA rather than the 
64882 +                                * original DMA - this can then get reused and an incorrect DMA 
64883 +                                * descriptor sent 
64884 +                                * eventp->ev_Type contains the dma address with type in the lower bits 
64885 +                                */ 
64886 +                           
64887 +                               dmabe.s.dma_source    = (type & ~EV_TYPE_MASK2);
64888 +                               dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
64889 +                           }
64890 +                       
64891 +                           ASSERT (EP_VP_ISDATA(vp));
64892 +                       
64893 +                           nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
64894 +
64895 +                           switch (nodeRail->State)
64896 +                           {
64897 +                           case EP_NODE_CONNECTED:
64898 +                           case EP_NODE_LEAVING_CONNECTED:
64899 +                               if (cp != NULL)
64900 +                                   cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
64901 +                               else
64902 +                               {
64903 +                                   ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
64904 +                               
64905 +                                   QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
64906 +                               }
64907 +                               break;
64908 +
64909 +                           case EP_NODE_LOCAL_PASSIVATE:
64910 +                               QueueDmaOnStalledList (rail, &dmabe);
64911 +                               break;
64912 +
64913 +                           default:
64914 +                               panic ("ep: neterr incorrect state for node\n");
64915 +                           }
64916 +                       }
64917 +                       else if ((type & EV_TYPE_MASK_THREAD) == EV_TYPE_THREAD)
64918 +                       {
64919 +                           printk ("%s: event at %08x - thread waiting %x is invalid\n", rail->Generic.Name, retry->Dma.s.dma_srcEvent, type);
64920 +                           panic ("ep: neterr invalid event type\n");
64921 +                       }
64922 +                   }
64923 +               }
64924 +           }
64925 +           local_irq_restore(flags);
64926 +       }
64927 +       
64928 +       /* add to free list */
64929 +       spin_lock_irqsave (&rail->DmaRetryLock, flags);
64930 +       list_add (&retry->Link, &rail->DmaRetryFreeList);
64931 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
64932 +    }
64933 +}
64934 +
64935 +/*
64936 + * Local variables:
64937 + * c-file-style: "stroustrup"
64938 + * End:
64939 + */
64940 +
64941 diff -urN clean/drivers/net/qsnet/ep/neterr_elan4.c linux-2.6.9/drivers/net/qsnet/ep/neterr_elan4.c
64942 --- clean/drivers/net/qsnet/ep/neterr_elan4.c   1969-12-31 19:00:00.000000000 -0500
64943 +++ linux-2.6.9/drivers/net/qsnet/ep/neterr_elan4.c     2005-07-20 07:35:37.000000000 -0400
64944 @@ -0,0 +1,264 @@
64945 +/*
64946 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
64947 + *
64948 + *    For licensing information please see the supplied COPYING file
64949 + *
64950 + */
64951 +
64952 +#ident "@(#)$Id: neterr_elan4.c,v 1.3.2.1 2005/07/20 11:35:37 mike Exp $"
64953 +/*      $Source: /cvs/master/quadrics/epmod/neterr_elan4.c,v $ */
64954 +
64955 +#include <qsnet/kernel.h>
64956 +
64957 +#include <elan/kcomm.h>
64958 +
64959 +#include "kcomm_vp.h"
64960 +#include "kcomm_elan4.h"
64961 +#include "debug.h"
64962 +
64963 +struct neterr_desc
64964 +{
64965 +    EP4_RAIL         *rail;
64966 +    unsigned int      nodeid;
64967 +    EP_NETERR_COOKIE *cookies;
64968 +    int                      done;
64969 +} ;
64970 +
64971 +static int
64972 +dma_matches_cookie (EP4_RAIL *rail, E4_uint64 vproc, E4_uint64 cookie, unsigned int nodeId, EP_NETERR_COOKIE *cookies, const char *where)
64973 +{
64974 +    if ((EP_VP_ISDATA (vproc) && EP_VP_TO_NODE (vproc) == nodeId) && (cookie == cookies[0] || cookie == cookies[1]))
64975 +    {
64976 +       EPRINTF3 (DBG_NETWORK_ERROR, "%s: match cookie %016llx on %s\n", rail->r_generic.Name, (long long)cookie, where);
64977 +
64978 +       return 1;
64979 +    }
64980 +    return 0;
64981 +}
64982 +
64983 +static void
64984 +ep4_neterr_dma_flushop (ELAN4_DEV *dev, void *arg, int qfull)
64985 +{
64986 +    struct neterr_desc *desc  = (struct neterr_desc *) arg;
64987 +    EP4_RAIL           *rail  = desc->rail;
64988 +    E4_uint64           qptrs = read_reg64 (dev, DProcHighPriPtrs);
64989 +    E4_uint32           qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
64990 +    E4_uint32           qfptr = E4_QueueFrontPointer (qptrs);
64991 +    E4_uint32           qbptr = E4_QueueBackPointer (qptrs);
64992 +    E4_DProcQueueEntry  qentry;
64993 +    unsigned long       flags;
64994 +
64995 +    while ((qfptr != qbptr) || qfull)
64996 +    {
64997 +       E4_uint64 cookie = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie));
64998 +       E4_uint64 vproc  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc));
64999 +
65000 +       if (dma_matches_cookie (rail, vproc, cookie, desc->nodeid, desc->cookies, "runq "))
65001 +       {
65002 +           elan4_sdram_copyq_from_sdram (dev, qfptr, &qentry, sizeof (E4_DProcQueueEntry));
65003 +
65004 +           ep4_queue_dma_stalled (rail, &qentry.Desc);
65005 +
65006 +           /* Replace the dma with one which will "disappear" */
65007 +           qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
65008 +           qentry.Desc.dma_cookie   = 0;
65009 +           qentry.Desc.dma_vproc    = 0;
65010 +           qentry.Desc.dma_srcAddr  = 0;
65011 +           qentry.Desc.dma_dstAddr  = 0;
65012 +           qentry.Desc.dma_srcEvent = 0;
65013 +           qentry.Desc.dma_dstEvent = 0;
65014 +
65015 +           elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
65016 +       }
65017 +       
65018 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
65019 +       qfull = 0;
65020 +    }
65021 +
65022 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
65023 +    desc->done = 1;
65024 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
65025 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
65026 +}
65027 +
65028 +static void
65029 +ep4_neterr_dma_haltop (ELAN4_DEV *dev, void *arg)
65030 +{
65031 +    struct neterr_desc *desc = (struct neterr_desc *) arg;
65032 +
65033 +    elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1);
65034 +}
65035 +
65036 +void
65037 +ep4_neterr_fixup_dmas (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
65038 +{
65039 +    EP_NODE_RAIL      *nodeRail = &rail->r_generic.Nodes[nodeId];
65040 +    struct neterr_desc desc;
65041 +    struct list_head   matchedList;
65042 +    struct list_head  *el, *nel;
65043 +    unsigned long      flags;
65044 +    register int       i;
65045 +
65046 +    desc.rail    = rail;
65047 +    desc.nodeid  = nodeId;
65048 +    desc.cookies = cookies;
65049 +    desc.done    = 0;
65050 +
65051 +    INIT_LIST_HEAD (&matchedList);
65052 +
65053 +    /* First -  stall the retry thread, so that it will no longer restart
65054 +     *          any dma's from the retry list */
65055 +    ep_kthread_stall (&rail->r_retry_thread);
65056 +    
65057 +    /* Second - flush through all command queues targetted by events, thread etc */
65058 +    ep4_flush_ecqs (rail);
65059 +    
65060 +    /* Third - queue a halt operation to flush through all DMA's which are executing
65061 +     *         or on the run queues */
65062 +    kmutex_lock (&rail->r_haltop_mutex);
65063 +    
65064 +    rail->r_haltop.op_mask      = INT_DProcHalted;
65065 +    rail->r_haltop.op_function  = ep4_neterr_dma_haltop;
65066 +    rail->r_haltop.op_arg       = &desc;
65067 +
65068 +    rail->r_flushop.op_function = ep4_neterr_dma_flushop;
65069 +    rail->r_flushop.op_arg      = &desc;
65070 +    
65071 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
65072 +
65073 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
65074 +    while (! desc.done)
65075 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
65076 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
65077 +    kmutex_unlock (&rail->r_haltop_mutex);
65078 +
65079 +    /* Fourth - run down the dma retry lists and move all entries to the cancelled
65080 +     *          list.  Any dma's which were on the run queues have already been
65081 +     *          moved there */
65082 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
65083 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
65084 +    {
65085 +       list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) {
65086 +           EP4_DMA_RETRY *retry    = list_entry (el, EP4_DMA_RETRY, retry_link);
65087 +           
65088 +           if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "retry"))
65089 +           {
65090 +               /* remove from retry list */
65091 +               list_del (&retry->retry_link);
65092 +               
65093 +               /* add to list of dmas which matched */
65094 +               list_add_tail (&retry->retry_link, &matchedList);
65095 +           }
65096 +       }
65097 +    }
65098 +    
65099 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
65100 +       EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link);
65101 +       
65102 +       if (dma_matches_cookie (rail, retry->retry_dma.dma_vproc, retry->retry_dma.dma_cookie, nodeId, cookies, "stalled"))
65103 +       {
65104 +           /* remove from retry list */
65105 +           list_del (&retry->retry_link);
65106 +           
65107 +           /* add to list of dmas which matched */
65108 +           list_add_tail (&retry->retry_link, &matchedList);
65109 +       }
65110 +    }
65111 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
65112 +    
65113 +    /* Now "set" the source event of any put DMA#'s we can use the dma 
65114 +     * retry command queue as the retry thread is stalled */
65115 +    while (! list_empty (&matchedList))
65116 +    {
65117 +       EP4_DMA_RETRY *retry = list_entry (matchedList.next, EP4_DMA_RETRY, retry_link);
65118 +       
65119 +       list_del (&retry->retry_link);
65120 +
65121 +       elan4_set_event_cmd (rail->r_dma_ecq->ecq_cq, retry->retry_dma.dma_srcEvent);
65122 +
65123 +       spin_lock_irqsave (&rail->r_dma_lock, flags);
65124 +       list_add (&retry->retry_link, &rail->r_dma_freelist);
65125 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
65126 +    }
65127 +
65128 +    /* Flush through the command queues to ensure that all the setevents have executed */
65129 +    ep4_flush_ecqs (rail);
65130 +
65131 +    /* Finally - allow the retry thread to run again */
65132 +    ep_kthread_resume (&rail->r_retry_thread);
65133 +}
65134 +
65135 +void
65136 +ep4_add_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops)
65137 +{
65138 +    /* we're called from the ManagerThread, so no need to stall it */
65139 +    list_add_tail (&ops->op_link, &rail->r_neterr_ops);
65140 +}
65141 +void
65142 +ep4_remove_neterr_ops (EP4_RAIL *rail, EP4_NETERR_OPS *ops)
65143 +{
65144 +    EP_SYS *sys = rail->r_generic.System;
65145 +
65146 +    ep_kthread_stall (&sys->ManagerThread);
65147 +    list_del (&ops->op_link);
65148 +    ep_kthread_resume (&sys->ManagerThread);
65149 +}
65150 +
65151 +void
65152 +ep4_neterr_fixup_sten (EP4_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
65153 +{
65154 +    struct list_head *el;
65155 +
65156 +    /* First -  stall the retry thread, so that it will no longer restart
65157 +     *          any sten packets from the retry list */
65158 +    ep_kthread_stall (&rail->r_retry_thread);
65159 +
65160 +    /* Second - flush through all command queues targetted by events, thread etc */
65161 +    ep4_flush_ecqs (rail);
65162 +
65163 +    list_for_each (el, &rail->r_neterr_ops) {
65164 +       EP4_NETERR_OPS *op = list_entry (el, EP4_NETERR_OPS, op_link);
65165 +
65166 +       (op->op_func) (rail, op->op_arg, nodeId, cookies);
65167 +    }
65168 +
65169 +    /* Flush through the command queues to ensure that all the setevents have executed */
65170 +    ep4_flush_ecqs (rail);
65171 +
65172 +    /* Finally - allow the retry thread to run again */
65173 +    ep_kthread_resume (&rail->r_retry_thread);
65174 +}
65175 +
65176 +void
65177 +ep4_neterr_fixup (EP_RAIL *r, unsigned int nodeId, EP_NETERR_COOKIE *cookies)
65178 +{
65179 +    EP4_RAIL *rail = (EP4_RAIL *) r;
65180 +
65181 +    /* network error cookies can come from the following :
65182 +     *
65183 +     *   DMA  engine
65184 +     *     if a DMA matches a network error cookie, then we just need to 
65185 +     *     execute the local setevent *before* returning.
65186 +     *
65187 +     *   STEN packet
65188 +     *     if the STEN packet was generated with as a WAIT_FOR_EOP
65189 +     *     and it's not present on the retry lists, then re-create
65190 +     *     it.
65191 +     *
65192 +     */
65193 +    EPRINTF4 (DBG_NETWORK_ERROR, "%s: ep4_neterr_fixup: node %d cookies <%lld%s%s%s%s> <%lld%s%s%s%s>\n",
65194 +             rail->r_generic.Name, nodeId, EP4_COOKIE_STRING(cookies[0]), EP4_COOKIE_STRING(cookies[1]));
65195 +
65196 +    if ((cookies[0] & EP4_COOKIE_DMA) || (cookies[1] & EP4_COOKIE_DMA))
65197 +       ep4_neterr_fixup_dmas (rail, nodeId, cookies);
65198 +
65199 +    if ((cookies[0] & EP4_COOKIE_STEN) || (cookies[1] & EP4_COOKIE_STEN))
65200 +       ep4_neterr_fixup_sten (rail, nodeId, cookies);
65201 +}
65202 +
65203 +/*
65204 + * Local variables:
65205 + * c-file-style: "stroustrup"
65206 + * End:
65207 + */
65208 +
65209 diff -urN clean/drivers/net/qsnet/ep/nmh.c linux-2.6.9/drivers/net/qsnet/ep/nmh.c
65210 --- clean/drivers/net/qsnet/ep/nmh.c    1969-12-31 19:00:00.000000000 -0500
65211 +++ linux-2.6.9/drivers/net/qsnet/ep/nmh.c      2004-01-05 08:48:08.000000000 -0500
65212 @@ -0,0 +1,181 @@
65213 +/*
65214 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65215 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65216 + *
65217 + *    For licensing information please see the supplied COPYING file
65218 + *
65219 + */
65220 +#ident "@(#)$Id: nmh.c,v 1.6 2004/01/05 13:48:08 david Exp $"
65221 +/*      $Source: /cvs/master/quadrics/epmod/nmh.c,v $*/
65222 +
65223 +#include <qsnet/kernel.h>
65224 +
65225 +#include <elan/kcomm.h>
65226 +
65227 +#define EP_NMD_SPANS(nmd, base, top)   ((nmd)->nmd_addr <= (base) &&  \
65228 +                                        ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (top))
65229 +
65230 +#define EP_NMD_OVERLAPS(nmd, addr, len)        ((nmd)->nmd_addr <= ((addr) + (len)) && \
65231 +                                        ((nmd)->nmd_addr + (nmd)->nmd_len - 1) >= (addr))
65232 +
65233 +#define EP_NMH_HASH(tbl,idx,addr)      ((addr) % (tbl)->tbl_size[idx])
65234 +
65235 +int
65236 +ep_nmh_init (EP_NMH_TABLE *tbl)
65237 +{
65238 +    int i, idx, hsize = 1;
65239 +
65240 +    for (idx = EP_NMH_NUMHASH-1; idx >= 0; idx--, hsize <<= 1)
65241 +    {
65242 +       tbl->tbl_size[idx] = (hsize < EP_NMH_HASHSIZE) ? hsize : EP_NMH_HASHSIZE;
65243 +
65244 +       KMEM_ZALLOC (tbl->tbl_hash[idx], struct list_head *, sizeof (struct list_head) * tbl->tbl_size[idx], 1);
65245 +       
65246 +       if (tbl->tbl_hash == NULL)
65247 +       {
65248 +           while (++idx < EP_NMH_NUMHASH)
65249 +               KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]);
65250 +           return (ENOMEM);
65251 +       }
65252 +
65253 +       for (i = 0; i < tbl->tbl_size[idx]; i++)
65254 +           INIT_LIST_HEAD (&tbl->tbl_hash[idx][i]);
65255 +    }
65256 +
65257 +    return (0);
65258 +}
65259 +
65260 +void
65261 +ep_nmh_fini (EP_NMH_TABLE *tbl)
65262 +{
65263 +    int idx;
65264 +
65265 +    for (idx = 0; idx < EP_NMH_NUMHASH; idx++)
65266 +       if (tbl->tbl_hash[idx])
65267 +           KMEM_FREE (tbl->tbl_hash[idx], sizeof (struct list_head) * tbl->tbl_size[idx]);
65268 +    
65269 +    bzero (tbl, sizeof (EP_NMH_TABLE));
65270 +}
65271 +
65272 +void
65273 +ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmh)
65274 +{
65275 +    EP_ADDR base = nmh->nmh_nmd.nmd_addr;
65276 +    EP_ADDR top  = base + nmh->nmh_nmd.nmd_len - 1;
65277 +    int     idx;
65278 +
65279 +    for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1)
65280 +       ;
65281 +
65282 +    list_add_tail (&nmh->nmh_link, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]);
65283 +}
65284 +
65285 +void
65286 +ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmh)
65287 +{
65288 +    list_del (&nmh->nmh_link);
65289 +}
65290 +
65291 +EP_NMH *
65292 +ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmd)
65293 +{
65294 +    EP_ADDR           base = nmd->nmd_addr;
65295 +    EP_ADDR           top  = base + nmd->nmd_len - 1;
65296 +    int               idx;
65297 +    struct list_head *le;
65298 +    
65299 +    for (idx = 0, base >>= 12, top >>= 12; base != top && idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1)
65300 +       ;
65301 +    
65302 +    for (; idx < EP_NMH_NUMHASH; idx++, base >>= 1, top >>= 1) {
65303 +
65304 +       list_for_each (le, &tbl->tbl_hash[idx][EP_NMH_HASH(tbl, idx, base)]) {
65305 +           EP_NMH *nmh = list_entry (le, EP_NMH, nmh_link);
65306 +
65307 +           if (EP_NMD_SPANS (&nmh->nmh_nmd, nmd->nmd_addr, nmd->nmd_addr + nmd->nmd_len - 1))
65308 +               return (nmh);
65309 +       }
65310 +    }
65311 +
65312 +    return (0);
65313 +}
65314 +
65315 +void
65316 +ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len)
65317 +{
65318 +    ASSERT ((off + len - 1) <= nmd->nmd_len);
65319 +
65320 +    subset->nmd_addr = nmd->nmd_addr + off;
65321 +    subset->nmd_len  = len;
65322 +    subset->nmd_attr = nmd->nmd_attr;
65323 +}
65324 +
65325 +int
65326 +ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b)
65327 +{
65328 +    if (EP_NMD_NODEID (a) != EP_NMD_NODEID (b))                        /* not generated on the same node */
65329 +       return 0;
65330 +    
65331 +    if ((EP_NMD_RAILMASK (a) & EP_NMD_RAILMASK (b)) == 0)      /* no common rails */
65332 +       return 0;
65333 +    
65334 +    if (b->nmd_addr == (a->nmd_addr + a->nmd_len))
65335 +    {
65336 +       if (merged != NULL)
65337 +       {
65338 +           merged->nmd_addr = a->nmd_addr;
65339 +           merged->nmd_len  = a->nmd_len + b->nmd_len;
65340 +           merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(a), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b));
65341 +       }
65342 +       return 1;
65343 +    }
65344 +    
65345 +    if (a->nmd_addr == (b->nmd_addr + b->nmd_len))
65346 +    {
65347 +       if (merged != NULL)
65348 +       {
65349 +           merged->nmd_addr = b->nmd_addr;
65350 +           merged->nmd_len  = b->nmd_len + a->nmd_len;
65351 +           merged->nmd_attr = EP_NMD_ATTR(EP_NMD_NODEID(b), EP_NMD_RAILMASK(a) & EP_NMD_RAILMASK(b));
65352 +       }
65353 +       
65354 +       return 1;
65355 +    }
65356 +
65357 +    return 0;
65358 +}
65359 +
65360 +int
65361 +ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask)
65362 +{
65363 +    EP_NMH *nmh = ep_nmh_find (&sys->MappingTable, nmd);
65364 +
65365 +    if (nmh == NULL)
65366 +    {
65367 +       printk ("ep_nmd_map_rails: nmd=%08x.%08x.%08x cannot be found\n",
65368 +               nmd->nmd_addr, nmd->nmd_len, nmd->nmd_attr);
65369 +       return (-1);
65370 +    }
65371 +
65372 +    return (nmh->nmh_ops->op_map_rails (sys, nmh, nmd, railmask));
65373 +}
65374 +
65375 +EP_RAILMASK
65376 +ep_nmd2railmask (EP_NMD *frags, int nFrags)
65377 +{
65378 +    EP_RAILMASK mask;
65379 +
65380 +    if (nFrags == 0)
65381 +       return ((EP_RAILMASK)-1);
65382 +    
65383 +    for (mask = EP_NMD_RAILMASK(frags); --nFrags; )
65384 +       mask &= EP_NMD_RAILMASK(++frags);
65385 +
65386 +    return (mask);
65387 +}
65388 +
65389 +/*
65390 + * Local variables:
65391 + * c-file-style: "stroustrup"
65392 + * End:
65393 + */
65394 diff -urN clean/drivers/net/qsnet/ep/probenetwork.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork.c
65395 --- clean/drivers/net/qsnet/ep/probenetwork.c   1969-12-31 19:00:00.000000000 -0500
65396 +++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork.c     2004-04-19 11:43:15.000000000 -0400
65397 @@ -0,0 +1,446 @@
65398 +/*
65399 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65400 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65401 + *
65402 + *    For licensing information please see the supplied COPYING file
65403 + *
65404 + */
65405 +
65406 +#ident "@(#)$Id: probenetwork.c,v 1.43 2004/04/19 15:43:15 david Exp $"
65407 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork.c,v $ */
65408 +
65409 +#include <qsnet/kernel.h>
65410 +
65411 +#include <elan/kcomm.h>
65412 +#include "debug.h"
65413 +
65414 +int PositionCheck = 1;
65415 +
65416 +#define NUM_DOWN_FROM_VAL(NumDownLinksVal, level)      (((NumDownLinksVal) >> ((level) << 2)) & 0xF)
65417 +
65418 +int
65419 +ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos)
65420 +{
65421 +    int               lvl, i;
65422 +    int               level;
65423 +    int               nodeid;
65424 +    int               numnodes;
65425 +    int                      randomRoutingDisabled;
65426 +    int               sw;
65427 +    int               nacks;
65428 +    int               nowayup;
65429 +    int                      nalias;
65430 +    int                      upmask;
65431 +    int                      partial;
65432 +    int                      link;
65433 +    int                      invalid;
65434 +    int                      linkdown[ELAN_MAX_LEVELS];
65435 +    int                      linkup[ELAN_MAX_LEVELS];
65436 +    EP_SWITCH        *switches[ELAN_MAX_LEVELS];
65437 +    int               switchCount[ELAN_MAX_LEVELS+1];
65438 +    int               lowestBcast;
65439 +    int               numUpLinks[ELAN_MAX_LEVELS];
65440 +    int               routedown [ELAN_MAX_LEVELS];
65441 +
65442 +    EPRINTF1 (DBG_PROBE, "%s: ProbeNetwork started\n", rail->Name);
65443 +
65444 +    switchCount[0] = 1;
65445 +    numUpLinks [0] = 4;
65446 +
65447 +    for (level = 0; level < ELAN_MAX_LEVELS; level++)
65448 +    {
65449 +       int ndown  = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, level);
65450 +
65451 +       KMEM_ZALLOC (switches[level], EP_SWITCH *, sizeof (EP_SWITCH) * switchCount[level], 1);
65452 +
65453 +       for (sw = 0, nacks = 0, nowayup = 0, lowestBcast=7; sw < switchCount[level]; sw++)
65454 +       {
65455 +           EP_SWITCH *lsw  = &switches[level][sw];
65456 +           int        good = 1;
65457 +           int        tsw;
65458 +
65459 +           for (nodeid = 0,tsw = sw, lvl = level-1 ; lvl >= 0 ; lvl--)
65460 +           {
65461 +               EP_SWITCH *lsw;
65462 +               int        link = (8-numUpLinks[lvl]) + (tsw % numUpLinks[lvl]);
65463 +
65464 +               tsw  = tsw / numUpLinks[lvl];
65465 +               lsw  = &switches[lvl][tsw];
65466 +
65467 +               if (lsw->present == 0 || (lsw->lnr & (1 << link)))
65468 +               {
65469 +                   EPRINTF4 (DBG_PROBE, "lvl %d sw %d present=%d lnr=%x\n", lvl, sw, lsw->present, lsw->lnr);
65470 +                   good = 0;
65471 +               }
65472 +               
65473 +               linkup[lvl]   = link;
65474 +               linkdown[lvl] = lsw->link;
65475 +
65476 +               if ( lvl ) nodeid = ((nodeid + linkdown[lvl]) * (8-numUpLinks[lvl-1]));
65477 +               else       nodeid += linkdown[0];
65478 +
65479 +           }
65480 +           
65481 +           /* 
65482 +            * don't bother probing routes which we we've already seen are unreachable 
65483 +            * because a link upwards was in reset or the switch previously nacked us.
65484 +            */
65485 +           if (! good)
65486 +           {
65487 +               lsw->present = 0;
65488 +
65489 +               nacks++;
65490 +               nowayup++;
65491 +
65492 +               continue;
65493 +           }
65494 +
65495 +           lsw->present = rail->Operations.ProbeRoute (rail, level, sw, nodeid, linkup, linkdown, 5, lsw);
65496 +
65497 +           if (! lsw->present)
65498 +           {
65499 +               EPRINTF3 (DBG_PROBE, "%s: level %d switch %d - unexpected nack\n", rail->Name, level, sw);
65500 +
65501 +               nacks++;
65502 +               nowayup++;
65503 +           }
65504 +           else
65505 +           {
65506 +               EPRINTF5 (DBG_PROBE, "%s: level %d switch %d - link %d bcast %d\n", rail->Name, level, sw, lsw->link, lsw->bcast);
65507 +
65508 +               if (level == 2 && rail->Devinfo.dev_device_id == PCI_DEVICE_ID_ELAN3)
65509 +               {
65510 +                   /* If we see broadcast top as 7, and we came in on a low link, then we can't
65511 +                    * determine whether we're in a 128 way or a un-configured 64u64d switch, so
65512 +                    * we treat it as a 64u64d and detect the 128 way case by "going over the top" 
65513 +                    * below. Unless we've been told what it really is by NumDownLinksVal.
65514 +                    */
65515 +                   if (lsw->bcast == 7 && lsw->link < 4)
65516 +                       lsw->bcast = ndown ? (ndown - 1) : 3;
65517 +               }
65518 +
65519 +               if ( lowestBcast > lsw->bcast ) 
65520 +                   lowestBcast = lsw->bcast;
65521 +
65522 +               if (lsw->link > (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)))
65523 +               {
65524 +                   /* We've arrived on a "up-link" - this could be either
65525 +                    * we're in the top half of a x8 top-switch - or we're
65526 +                    * in the bottom half and have gone "over the top". We
65527 +                    * differentiate these cases since the switches below
65528 +                    * a x8 top-switch will have broadcast top set to 3, 
65529 +                    * and the x8 topswitch have broadcast top set to 7.
65530 +                    */
65531 +                   if (lsw->bcast == 7)
65532 +                       nowayup++;
65533 +                   else
65534 +                   {
65535 +                       EPRINTF2 (DBG_PROBE, "%s: level %d - gone over the top\n",
65536 +                                 rail->Name, level);
65537 +
65538 +                       if (level > 0)
65539 +                       {
65540 +                           KMEM_FREE (switches[level], sizeof (EP_SWITCH) * switchCount[level] );
65541 +                           level--;
65542 +                       }
65543 +                       
65544 +                       numUpLinks[level] = 0;
65545 +                       goto finished;
65546 +                   }
65547 +               }
65548 +
65549 +           }
65550 +       }
65551 +
65552 +       numUpLinks[level]    = ndown ? (8 - ndown) : (7 - lowestBcast);
65553 +       switchCount[level+1] = switchCount[level] *  numUpLinks[level];
65554 +       
65555 +       /* Now we know which links are uplinks, we can see whether there is
65556 +        * any possible ways up */
65557 +       upmask = (ndown ? (0xFF << ndown) & 0xFF : (0xFF << (8 - numUpLinks[level])) & 0xFF);
65558 +
65559 +       for (sw = 0; sw < switchCount[level]; sw++)
65560 +       {
65561 +           EP_SWITCH *lsw  = &switches[level][sw];
65562 +
65563 +           if (lsw->present && lsw->link <= (ndown ? (ndown-1) : (lowestBcast == 7 ? 3 : lowestBcast)) && (switches[level][sw].lnr & upmask) == upmask)
65564 +               nowayup++;
65565 +       }
65566 +
65567 +       EPRINTF7 (DBG_PROBE, "%s: level %d - sw=%d nacks=%d nowayup=%d bcast=%d numup=%d\n", 
65568 +                 rail->Name, level, sw, nacks, nowayup, lowestBcast, numUpLinks[level]);
65569 +
65570 +       if (nacks == sw)
65571 +       {
65572 +           static bitmap_t printed[BT_BITOUL(EP_MAX_RAILS)];
65573 +
65574 +           if (! BT_TEST (printed, rail->Number))
65575 +               printk ("%s: cannot determine network position\n", rail->Name);
65576 +           BT_SET (printed, rail->Number);
65577 +           goto failed;
65578 +       }
65579 +
65580 +       if (nowayup == sw)
65581 +           goto finished;
65582 +    }
65583 +    
65584 +    printk ("%s: exceeded number of levels\n", rail->Name);
65585 +    level = ELAN_MAX_LEVELS - 1;
65586 +
65587 + failed:
65588 +    
65589 +    for (lvl = 0; lvl <= level; lvl++)
65590 +       KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] );
65591 +
65592 +    return -EAGAIN;
65593 +
65594 + finished:
65595 +    /* we've successfully probed the network - now calculate our node 
65596 +     * positon and what level of random routing is possible */
65597 +    nalias = 1;
65598 +    for (lvl = 0, invalid = 0, partial = 0, randomRoutingDisabled = 0; lvl <= level; lvl++)
65599 +    {
65600 +       int ndown  = NUM_DOWN_FROM_VAL (rail->Devinfo.dev_num_down_links_value, lvl);
65601 +       int upmask = ndown ? (0xFF << ndown) & 0xFF : 0xF0;
65602 +
65603 +       for (sw = 0, nalias = 0; sw < switchCount[lvl]; sw++)
65604 +       {
65605 +           EP_SWITCH *lsw = &switches[lvl][sw];
65606 +           
65607 +           /* You can only use adaptive routing if links 4-7 are uplinks, and at least one of them is
65608 +            * not in reset.   Otherwise you can randomly select an "uplink" if all the uplinks are not
65609 +            * in reset. */
65610 +           if (lsw->present && ((upmask == 0xF0) ? (lsw->lnr & upmask) == upmask : (lsw->lnr & upmask) != 0))
65611 +               randomRoutingDisabled |= (1 << lvl);
65612 +           
65613 +           if (!lsw->present)
65614 +               partial++;
65615 +           else
65616 +           {
65617 +               if (lsw->invalid)
65618 +               {
65619 +                   printk ("%s: invalid switch detected (level %d switch %d)\n", rail->Name, lvl, sw);
65620 +                   invalid++;
65621 +               }
65622 +               
65623 +               for (i = 0; i < nalias; i++)
65624 +                   if (linkdown[i] == lsw->link)
65625 +                       break;
65626 +               if (i == nalias)
65627 +                   linkdown[nalias++] = lsw->link;
65628 +           }
65629 +       }
65630 +       
65631 +       link = linkdown[0];
65632 +       for (i = 1; i < nalias; i++)
65633 +           if (linkdown[i] < link)
65634 +               link = linkdown[i];
65635 +
65636 +       if (nalias > 1 && lvl != level)
65637 +       {
65638 +           printk ("%s: switch aliased below top level (level %d)\n", rail->Name, lvl);
65639 +           invalid++;
65640 +       }
65641 +       
65642 +       routedown[lvl] = link;
65643 +   }
65644 +
65645 +    for (lvl = 0; lvl <= level; lvl++) 
65646 +       KMEM_FREE (switches[lvl], sizeof (EP_SWITCH) * switchCount[lvl] );
65647 +
65648 +    if (invalid)
65649 +    {
65650 +       printk ("%s: invalid switch configuration\n", rail->Name);
65651 +       return (EINVAL);
65652 +    }
65653 +
65654 +    /* Handle the aliasing case where a 16 way is used as multiple smaller switches */
65655 +    if (nalias == 1)
65656 +       level++;
65657 +    else if (nalias == 2)                                      /* a 16 way as 2x8 ways */
65658 +       numUpLinks[level++] = 6;                                /*   only 2 down links */
65659 +    else if (nalias > 4)                                       /* a 16 way as 8x2 ways */
65660 +       numUpLinks[level-1] = 6;
65661 +    
65662 +    /* 
65663 +     * Compute my nodeid and number of nodes in the machine
65664 +     * from the routedown and the number of downlinks at each level.
65665 +     */
65666 +    for(nodeid=0, lvl = level - 1; lvl >= 0; lvl--)
65667 +    {
65668 +       if (lvl) nodeid = ((nodeid + routedown[lvl]) * (8-numUpLinks[lvl-1]));  
65669 +       else     nodeid += routedown[0];
65670 +    }
65671 +
65672 +    for (numnodes = 1, lvl = 0; lvl < level; lvl++)
65673 +       numnodes *= (8 - numUpLinks[lvl]);
65674 +
65675 +    sprintf (rail->Name, "ep%d[%d]", rail->Number, nodeid);
65676 +
65677 +    if (randomRoutingDisabled & ((1 << (level-1))-1))
65678 +       printk ("%s: nodeid=%d level=%d numnodes=%d (random routing disabled 0x%x)\n", 
65679 +               rail->Name, nodeid, level, numnodes, randomRoutingDisabled);
65680 +    else if (partial)
65681 +       printk ("%s: nodeid=%d level=%d numnodes=%d (random routing ok)\n",
65682 +               rail->Name, nodeid, level, numnodes);
65683 +    else
65684 +       printk ("%s: nodeid=%d level=%d numnodes=%d\n",
65685 +               rail->Name, nodeid, level, numnodes);
65686 +
65687 +    pos->pos_mode               = ELAN_POS_MODE_SWITCHED;
65688 +    pos->pos_nodeid              = nodeid;
65689 +    pos->pos_levels              = level;
65690 +    pos->pos_nodes               = numnodes;
65691 +    pos->pos_random_disabled     = randomRoutingDisabled;
65692 +
65693 +    for(lvl = 0; lvl < level; lvl++)
65694 +       pos->pos_arity[level -lvl - 1] = (8-numUpLinks[lvl]);
65695 +    pos->pos_arity[level] = 1;                         /* XXXX why does this need to be 1 ? */
65696 +    
65697 +    return 0;
65698 +}
65699 +
65700 +/*
65701 + * broadcast top is invalid if it is not set to the number of downlinks-1,
65702 + * or at the topmost level it is less than ndown-1.
65703 + */
65704 +#define BCAST_TOP_INVALID(lvl, bcast, ndown)   ((lvl) == 0 ? (bcast) < ((ndown)-1) : (bcast) != ((ndown) - 1))
65705 +
65706 +void
65707 +CheckPosition (EP_RAIL *rail)
65708 +{
65709 +    ELAN_POSITION *pos     = &rail->Position;
65710 +    unsigned int   nodeid  = pos->pos_nodeid;
65711 +    unsigned int   invalid = 0;
65712 +    unsigned int   changed = 0;
65713 +    int lvl, slvl;
65714 +
65715 +    if (! PositionCheck)
65716 +       return;
65717 +
65718 +    if (rail->Operations.CheckPosition(rail))          /* is update ready for this rail */
65719 +    {
65720 +       EPRINTF2 (DBG_ROUTETABLE, "%s: check position: SwitchProbeLevel=%d\n", rail->Name, rail->SwitchProbeLevel);
65721 +
65722 +       for (lvl = 0, slvl = pos->pos_levels-1; lvl <= rail->SwitchProbeLevel; lvl++, slvl--)
65723 +       {
65724 +           EP_SWITCHSTATE *state  = &rail->SwitchState[lvl];
65725 +           EP_SWITCHSTATE *lstate = &rail->SwitchLast[lvl];
65726 +           unsigned int    ndown  = pos->pos_arity[slvl];
65727 +           unsigned int    upmask = (0xFF << ndown) & 0xFF;
65728 +           unsigned int    mylink = nodeid % ndown;
65729 +           unsigned int    error  = 0;
65730 +           unsigned int    binval = 0;
65731 +
65732 +           nodeid /= ndown;
65733 +
65734 +           /*
65735 +            * broadcast top is invalid if it is not set to the number of downlinks-1,
65736 +            * or at the topmost level it is less than ndown-1.
65737 +            */
65738 +           if (BCAST_TOP_INVALID(lvl, state->bcast, ndown) || (state->LNR & upmask) == upmask)
65739 +           {
65740 +               /* no way up from here - we'd better be at the top */
65741 +               if (lvl != (pos->pos_levels-1))
65742 +               {
65743 +                   if (state->bcast != (ndown-1))
65744 +                       printk ("%s: invalid broadcast top %d at level %d\n", rail->Name, state->bcast, lvl);
65745 +                   else if ((state->LNR & upmask) == upmask && (lstate->LNR & upmask) == upmask)
65746 +                       printk ("%s: no way up to switch at level %d (turned off ?)\n", rail->Name, lvl+1);
65747 +               }
65748 +               else
65749 +               {
65750 +                   if (state->linkid != mylink)
65751 +                       printk ("%s: moved at top level was connected to link %d now connected to %d\n", rail->Name, mylink, state->linkid);
65752 +               }
65753 +
65754 +               if (state->linkid != mylink)
65755 +                   error++;
65756 +               
65757 +               if (BCAST_TOP_INVALID (lvl, state->bcast, ndown))
65758 +                   binval++;
65759 +           }
65760 +           else
65761 +           {
65762 +               if (state->linkid != mylink)
65763 +               {
65764 +                   if (state->linkid != rail->SwitchLast[lvl].linkid)
65765 +                       printk ("%s: moved at lvl %d was connected to link %d now connected to %d\n", rail->Name, lvl, mylink, state->linkid);
65766 +                       
65767 +                   error++;
65768 +               }
65769 +           }
65770 +
65771 +           if (error == 0 && invalid == 0)
65772 +               rail->SwitchProbeTick[lvl] = lbolt;
65773 +           
65774 +           EPRINTF10 (DBG_ROUTETABLE, "%s:   lvl=%d (slvl=%d) linkid=%d bcast=%d lnr=%02x uplink=%d : error=%d binval=%d invalid=%d\n", 
65775 +                      rail->Name, lvl, slvl, state->linkid, state->bcast, state->LNR, state->uplink, error, binval, invalid);
65776 +
65777 +           invalid |= (error | binval);
65778 +       }
65779 +       
65780 +       for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
65781 +           if (rail->SwitchState[lvl].uplink != rail->SwitchLast[lvl].uplink)
65782 +               changed++;
65783 +
65784 +       if (changed)
65785 +       {
65786 +           printk ("%s: broadcast tree has changed from", rail->Name);
65787 +           for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
65788 +               printk ("%c%d", lvl == 0 ? ' ' : ',', rail->SwitchLast[lvl].uplink);
65789 +
65790 +           for (lvl = 0; lvl < rail->SwitchProbeLevel; lvl++)
65791 +               printk ("%s%d", lvl == 0 ? " to " : ",", rail->SwitchState[lvl].uplink);
65792 +           printk ("\n");
65793 +       }
65794 +
65795 +       if (rail->SwitchProbeLevel > 0)
65796 +           bcopy (rail->SwitchState, rail->SwitchLast, rail->SwitchProbeLevel * sizeof (EP_SWITCHSTATE));
65797 +    }
65798 +
65799 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
65800 +    {
65801 +       EPRINTF4 (DBG_ROUTETABLE, "%s: level %d lbolt=%lx ProbeLevelTick=%lx\n",
65802 +                 rail->Name, lvl, lbolt, rail->SwitchProbeTick[lvl]);
65803 +       
65804 +       if (AFTER (lbolt, rail->SwitchProbeTick[lvl] + EP_POSITION_TIMEOUT))
65805 +       {
65806 +           if (lvl < rail->SwitchBroadcastLevel+1)
65807 +           {
65808 +               if (lvl == 0)
65809 +                   printk ("%s: cable disconnected\n", rail->Name);
65810 +               else
65811 +                   printk ("%s: broadcast level has dropped to %d (should be %d)\n",
65812 +                           rail->Name, lvl, rail->Position.pos_levels);
65813 +           }
65814 +           break;
65815 +       }
65816 +    }
65817 +    
65818 +    if (lvl > rail->SwitchBroadcastLevel+1)
65819 +    {
65820 +       if (rail->SwitchBroadcastLevel < 0)
65821 +           printk ("%s: cable reconnected\n", rail->Name);
65822 +       if (lvl == rail->Position.pos_levels)
65823 +           printk ("%s: broadcast level has recovered\n", rail->Name);
65824 +       else
65825 +           printk ("%s: broadcast level has recovered to %d (should be %d)\n", 
65826 +                   rail->Name, lvl, rail->Position.pos_levels);
65827 +    }
65828 +    
65829 +    if (rail->SwitchBroadcastLevel != (lvl - 1))
65830 +    {
65831 +       EPRINTF2 (DBG_ROUTETABLE, "%s: setting SwitchBroadcastLevel to %d\n", rail->Name, lvl-1);
65832 +       
65833 +       rail->SwitchBroadcastLevel     = lvl - 1;
65834 +       rail->SwitchBroadcastLevelTick = lbolt;
65835 +    }
65836 +}
65837 +
65838 +
65839 +/*
65840 + * Local variables:
65841 + * c-file-style: "stroustrup"
65842 + * End:
65843 + */
65844 diff -urN clean/drivers/net/qsnet/ep/probenetwork_elan3.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3.c
65845 --- clean/drivers/net/qsnet/ep/probenetwork_elan3.c     1969-12-31 19:00:00.000000000 -0500
65846 +++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3.c       2005-04-26 05:36:19.000000000 -0400
65847 @@ -0,0 +1,302 @@
65848 +/*
65849 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
65850 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
65851 + *
65852 + *    For licensing information please see the supplied COPYING file
65853 + *
65854 + */
65855 +
65856 +#ident "@(#)$Id: probenetwork_elan3.c,v 1.41 2005/04/26 09:36:19 mike Exp $"
65857 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan3.c,v $ */
65858 +
65859 +#include <qsnet/kernel.h>
65860 +
65861 +#include <elan/kcomm.h>
65862 +
65863 +#include "kcomm_vp.h"
65864 +#include "kcomm_elan3.h"
65865 +#include "debug.h"
65866 +
65867 +#include <elan3/intrinsics.h>
65868 +
65869 +static void ep3_probe_event (EP3_RAIL *rail, void *arg);
65870 +static EP3_COOKIE_OPS ep3_probe_ops = 
65871 +{
65872 +    ep3_probe_event
65873 +} ;
65874 +
65875 +int
65876 +ep3_init_probenetwork (EP3_RAIL *rail)
65877 +{
65878 +    sdramaddr_t              stack;
65879 +    E3_Addr           sp;
65880 +    E3_BlockCopyEvent event;
65881 +    int               i;
65882 +
65883 +    if (! (stack = ep_alloc_elan (&rail->Generic, EP3_STACK_SIZE, 0, &rail->ProbeStack)))
65884 +       return -ENOMEM;
65885 +
65886 +    spin_lock_init (&rail->ProbeLock);
65887 +    kcondvar_init (&rail->ProbeWait);
65888 +
65889 +    /* Initialise the probe command structure */
65890 +    for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++)
65891 +       elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[i]), 0);
65892 +    for (i = 0; i < TR_TRACEROUTE_ENTRIES; i++)
65893 +       elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[i]), 1);
65894 +    
65895 +    RegisterCookie (&rail->CookieTable, &rail->ProbeCookie, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeDone), &ep3_probe_ops, rail);
65896 +    
65897 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Type), 0);
65898 +    elan3_sdram_writel (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeStart.ev_Count), 0);
65899 +
65900 +    EP3_INIT_COPY_EVENT (event, rail->ProbeCookie, rail->RailMainAddr + offsetof (EP3_RAIL_MAIN, ProbeDone), 1);
65901 +    elan3_sdram_copyl_to_sdram (rail->Device, &event, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeDone), sizeof (E3_BlockCopyEvent));
65902 +
65903 +    rail->RailMain->ProbeDone = EP3_EVENT_FREE;
65904 +
65905 +    sp = ep3_init_thread (rail->Device, ep_symbol (&rail->ThreadCode, "kcomm_probe"),
65906 +                         rail->ProbeStack, stack, EP3_STACK_SIZE,
65907 +                         3, rail->CommandPortAddr, rail->RailElanAddr, rail->RailMainAddr);
65908 +    
65909 +    IssueRunThread (rail, sp);
65910 +
65911 +    return 0;
65912 +}
65913 +
65914 +void
65915 +ep3_destroy_probenetwork (EP3_RAIL *rail)
65916 +{
65917 +    if (rail->ProbeStack == (sdramaddr_t) 0)
65918 +       return;
65919 +
65920 +    /* XXXX: ensure that the network probe thread is stopped */
65921 +
65922 +    DeregisterCookie (&rail->CookieTable, &rail->ProbeCookie);
65923 +
65924 +    kcondvar_destroy (&rail->ProbeWait);
65925 +    spin_lock_destroy (&rail->ProbeLock);
65926 +    
65927 +    ep_free_elan (&rail->Generic, rail->ProbeStack, EP3_STACK_SIZE);
65928 +}
65929 +
65930 +static void
65931 +ep3_probe_event (EP3_RAIL *rail, void *arg)
65932 +{
65933 +    unsigned long flags;
65934 +
65935 +    spin_lock_irqsave (&rail->ProbeLock, flags);
65936 +    rail->ProbeDone = 1;
65937 +    kcondvar_wakeupone (&rail->ProbeWait, &rail->ProbeLock);
65938 +    spin_unlock_irqrestore (&rail->ProbeLock, flags);
65939 +}
65940 +
65941 +int
65942 +ep3_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw)
65943 +{
65944 +    EP3_RAIL      *rail     = (EP3_RAIL *) r;
65945 +    EP3_RAIL_MAIN *railMain = rail->RailMain;
65946 +    sdramaddr_t    railElan = rail->RailElan;
65947 +    E3_uint16      flits[MAX_FLITS];
65948 +    E3_uint32      result;
65949 +    int                   nflits;
65950 +    unsigned long  flags;
65951 +
65952 +    spin_lock_irqsave (&rail->ProbeLock, flags);
65953 +
65954 +    nflits = GenerateProbeRoute ( flits, nodeid, level, linkup, linkdown, 0);
65955 +           
65956 +    if (LoadRoute (rail->Device, rail->RouteTable, EP_VP_PROBE(level), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
65957 +    {
65958 +       EPRINTF0 (DBG_ROUTETABLE, "ProbeRoute: cannot load route entry\n");
65959 +       spin_unlock_irqrestore (&rail->ProbeLock, flags);
65960 +       return (EINVAL);
65961 +    }
65962 +
65963 +    do {
65964 +       /* Initialise the probe source to include our partially computed nodeid */
65965 +       elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), nodeid);
65966 +       elan3_sdram_writew (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), nodeid);
65967 +
65968 +       /* Initialise the count result etc */
65969 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_SINGLE);
65970 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level);
65971 +
65972 +       railMain->ProbeResult  = -1;
65973 +           
65974 +       /* Clear the receive area */
65975 +       bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0));
65976 +       bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1));
65977 +    
65978 +       /* Re-arm the completion event */
65979 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1);
65980 +       railMain->ProbeDone = EP3_EVENT_ACTIVE;
65981 +       rail->ProbeDone = 0;
65982 +
65983 +       /* And wakeup the thread to do the probe */
65984 +       IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart));
65985 +
65986 +       /* Now wait for it to complete */
65987 +       while (! rail->ProbeDone)
65988 +           kcondvar_wait (&rail->ProbeWait, &rail->ProbeLock, &flags);
65989 +
65990 +       /* wait for block copy event to flush write buffers */
65991 +       while (! EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone))
65992 +           if (! EP3_EVENT_FIRING(rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone), rail->ProbeCookie, railMain->ProbeDone))
65993 +               panic ("ProbeRoute: network probe event failure\n");
65994 +
65995 +       result = railMain->ProbeResult;
65996 +
65997 +       if (result == C_ACK_ERROR)
65998 +           kcondvar_timedwait (&rail->ProbeWait, &rail->ProbeLock, &flags, lbolt + (hz/8));
65999 +       
66000 +       railMain->ProbeDone = EP3_EVENT_FREE;
66001 +
66002 +    } while (result != C_ACK_OK && --attempts);
66003 +
66004 +    if (result == C_ACK_OK)
66005 +    {
66006 +       if (railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid ||
66007 +           railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != nodeid)
66008 +       {
66009 +           static unsigned long printed = 0;
66010 +           if ((lbolt - printed) > (HZ*10))
66011 +           {
66012 +               printk ("%s: lost nodeid at level %d switch %d - %d != %x\n", rail->Generic.Name, level, sw,
66013 +                       railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level)+1) - 1], nodeid);
66014 +               printed = lbolt;
66015 +           }
66016 +           result = C_ACK_ERROR;
66017 +       }
66018 +       else
66019 +       {
66020 +           E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - level - 1];
66021 +           E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - level - 1];
66022 +               
66023 +           EPRINTF7 (DBG_PROBE, "%s: level %d switch %d - linkid=%d bcast=%d LNR=%02x%s\n", 
66024 +                     rail->Generic.Name, level, sw, TR_TRACEROUTE0_LINKID(val0),
66025 +                     TR_TRACEROUTE1_BCAST_TOP(val1), TR_TRACEROUTE0_LNR(val0),
66026 +                     TR_TRACEROUTE0_REVID(val0) ? "" : " RevA Part");
66027 +           
66028 +           lsw->lnr     = TR_TRACEROUTE0_LNR(val0);
66029 +           lsw->link    = TR_TRACEROUTE0_LINKID(val0);
66030 +           lsw->bcast   = TR_TRACEROUTE1_BCAST_TOP(val1);
66031 +           lsw->invalid = (TR_TRACEROUTE0_REVID(val0) == 0);
66032 +       }
66033 +    }
66034 +    spin_unlock_irqrestore (&rail->ProbeLock, flags);
66035 +    
66036 +    return (result == C_ACK_OK);
66037 +}
66038 +
66039 +void
66040 +ep3_probe_position_found (EP3_RAIL *rail, ELAN_POSITION *pos)
66041 +{
66042 +    E3_uint16  flits[MAX_FLITS];
66043 +    int        lvl, nflits;
66044 +    
66045 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
66046 +    {
66047 +       nflits = GenerateCheckRoute (pos, flits, pos->pos_levels - lvl - 1, 0);
66048 +
66049 +       if (LoadRoute (rail->Device, rail->Ctxt->RouteTable, EP_VP_PROBE(lvl), ELAN3_MRF_CONTEXT_NUM|SYS_CONTEXT_BIT, nflits, flits) != 0)
66050 +           panic ("ep3_probe_position_found: cannot load probe route entry\n");
66051 +    }
66052 +    
66053 +    /* Initialise the traceroute source data with our nodeid */
66054 +    elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource0[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid);
66055 +    elan3_sdram_writew (rail->Device, rail->RailElan + offsetof (EP3_RAIL_ELAN, ProbeSource1[TR_TRACEROUTE_ENTRIES-1]), pos->pos_nodeid);
66056 +}
66057 +
66058 +int
66059 +ep3_check_position (EP_RAIL *r)
66060 +{
66061 +    EP3_RAIL      *rail     = (EP3_RAIL *) r;
66062 +    EP3_RAIL_MAIN *railMain = rail->RailMain;
66063 +    sdramaddr_t    railElan = rail->RailElan;
66064 +    ELAN_POSITION *pos      = &rail->Generic.Position;
66065 +    unsigned int   level    = rail->RailMain->ProbeLevel;
66066 +    unsigned int   updated  = EP3_EVENT_FIRED (rail->ProbeCookie, railMain->ProbeDone);
66067 +    unsigned int   lvl;
66068 +
66069 +    if (updated)
66070 +    {
66071 +       if (railMain->ProbeResult != C_ACK_OK)
66072 +       {
66073 +           EPRINTF2 (DBG_PROBE, "%s: CheckNetworkPosition: packet nacked result=%d\n", rail->Generic.Name, railMain->ProbeResult); 
66074 +           
66075 +           rail->Generic.SwitchProbeLevel = -1;
66076 +       }
66077 +       else
66078 +       {
66079 +           E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - 2*(level+1)];
66080 +           E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - 2*(level+1)];
66081 +
66082 +           if (val0 != pos->pos_nodeid || val1 != pos->pos_nodeid)
66083 +           {
66084 +               static unsigned long printed = 0;
66085 +
66086 +               /* We've received a packet from another node - this probably means
66087 +                * that we've moved */
66088 +               if ((lbolt - printed) > (HZ*10))
66089 +               {
66090 +                   printk ("%s: ep3_check_position - level %d lost nodeid\n", rail->Generic.Name, level);
66091 +                   printed = lbolt;
66092 +               }
66093 +
66094 +               rail->Generic.SwitchProbeLevel = -1;
66095 +           }
66096 +           else
66097 +           {
66098 +               for (lvl = 0; lvl <= level; lvl++)
66099 +               {
66100 +                   E3_uint16 val0 = railMain->ProbeDest0[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
66101 +                   E3_uint16 val1 = railMain->ProbeDest1[TR_TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
66102 +
66103 +                   rail->Generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID(val0);
66104 +                   rail->Generic.SwitchState[lvl].LNR    = TR_TRACEROUTE0_LNR(val0);
66105 +                   rail->Generic.SwitchState[lvl].bcast  = TR_TRACEROUTE1_BCAST_TOP(val1);
66106 +                   rail->Generic.SwitchState[lvl].uplink = 4;
66107 +
66108 +                   EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->Generic.SwitchState[lvl].linkid,
66109 +                             rail->Generic.SwitchState[lvl].LNR, rail->Generic.SwitchState[lvl].bcast ,rail->Generic.SwitchState[lvl].uplink);
66110 +               }
66111 +               rail->Generic.SwitchProbeLevel = level;
66112 +           }
66113 +       }
66114 +
66115 +       railMain->ProbeDone = EP3_EVENT_FREE;
66116 +    }
66117 +
66118 +    if (railMain->ProbeDone == EP3_EVENT_FREE)
66119 +    {
66120 +       if (rail->Generic.SwitchBroadcastLevel == rail->Generic.Position.pos_levels-1)
66121 +           level = rail->Generic.Position.pos_levels - 1;
66122 +       else
66123 +           level = rail->Generic.SwitchBroadcastLevel + 1;
66124 +
66125 +       EPRINTF2 (DBG_PROBE, "%s: ep3_check_postiion: level %d\n", rail->Generic.Name, level);
66126 +
66127 +       /* Initialise the count result etc */
66128 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeType), PROBE_MULTIPLE);
66129 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeLevel), level);
66130 +
66131 +       railMain->ProbeResult = -1;
66132 +       railMain->ProbeLevel  = -1;
66133 +       
66134 +       /* Clear the receive area */
66135 +       bzero (railMain->ProbeDest0, sizeof (railMain->ProbeDest0));
66136 +       bzero (railMain->ProbeDest1, sizeof (railMain->ProbeDest1));
66137 +       
66138 +       /* Re-arm the completion event */
66139 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Type), EV_TYPE_BCOPY);
66140 +       elan3_sdram_writel (rail->Device, railElan + offsetof (EP3_RAIL_ELAN, ProbeDone.ev_Count), 1);
66141 +
66142 +       railMain->ProbeDone = EP3_EVENT_ACTIVE;
66143 +       
66144 +       IssueSetevent (rail, rail->RailElanAddr + offsetof (EP3_RAIL_ELAN, ProbeStart));
66145 +    }
66146 +
66147 +    return updated;
66148 +}
66149 +
66150 diff -urN clean/drivers/net/qsnet/ep/probenetwork_elan3_thread.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3_thread.c
66151 --- clean/drivers/net/qsnet/ep/probenetwork_elan3_thread.c      1969-12-31 19:00:00.000000000 -0500
66152 +++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan3_thread.c        2004-03-24 06:32:56.000000000 -0500
66153 @@ -0,0 +1,98 @@
66154 +/*
66155 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66156 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66157 + *
66158 + *    For licensing information please see the supplied COPYING file
66159 + *
66160 + */
66161 +
66162 +#ident "@(#)$Id: probenetwork_elan3_thread.c,v 1.19 2004/03/24 11:32:56 david Exp $"
66163 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan3_thread.c,v $*/
66164 +
66165 +#include <elan3/e3types.h>
66166 +#include <elan3/events.h>
66167 +#include <elan3/elanregs.h>
66168 +#include <elan3/intrinsics.h>
66169 +
66170 +#include "kcomm_vp.h"
66171 +#include "kcomm_elan3.h"
66172 +
66173 +static int
66174 +kcomm_probe_vp (EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain, int vp, int attempts, int timeouts)
66175 +{
66176 +    int rc;
66177 +
66178 +    /* Since we use %g1 to hold the "rxd" so the trap handler can
66179 +     * complete the envelope processing - we pass zero to indicate we're
66180 +     * not a receiver thread */
66181 +    asm volatile ("mov %g0, %g1");
66182 +
66183 +    while (attempts && timeouts)
66184 +    {
66185 +       c_open (vp);
66186 +       c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest0, &railElan->ProbeSource0);
66187 +       c_sendmem (TR_TRACEROUTE, &railMain->ProbeDest1, &railElan->ProbeSource1);
66188 +       c_sendtrans0 (TR_SENDACK | TR_SETEVENT, (E3_Addr) 0);
66189 +       
66190 +       switch (rc = c_close())
66191 +       {
66192 +       case C_ACK_OK:
66193 +           return (C_ACK_OK);
66194 +           
66195 +       case C_ACK_DISCARD:
66196 +           attempts--;
66197 +           break;
66198 +
66199 +       default:                                        /* output timeout */
66200 +           timeouts--;
66201 +       }
66202 +
66203 +       c_break_busywait();
66204 +    }
66205 +
66206 +    return (timeouts == 0 ? C_ACK_ERROR : C_ACK_DISCARD);
66207 +}
66208 +
66209 +void
66210 +kcomm_probe (E3_CommandPort *cport, EP3_RAIL_ELAN *railElan, EP3_RAIL_MAIN *railMain)
66211 +{
66212 +    int level;
66213 +
66214 +    for (;;)
66215 +    {
66216 +       c_waitevent (&railElan->ProbeStart, 1);
66217 +
66218 +       switch (railElan->ProbeType)
66219 +       {
66220 +       case PROBE_SINGLE:
66221 +           railMain->ProbeResult = kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(railElan->ProbeLevel),
66222 +                                                   PROBE_SINGLE_ATTEMPTS, PROBE_SINGLE_TIMEOUTS);
66223 +
66224 +           cport->SetEvent = (E3_Addr) &railElan->ProbeDone;
66225 +           break;
66226 +
66227 +       case PROBE_MULTIPLE:
66228 +           for (level = railElan->ProbeLevel; level >= 0; level--)
66229 +           {
66230 +               if (kcomm_probe_vp (railElan, railMain, EP_VP_PROBE(level),
66231 +                                   PROBE_MULTIPLE_ATTEMPTS, PROBE_MULTIPLE_TIMEOUTS) == C_ACK_OK)
66232 +               {
66233 +                   railMain->ProbeLevel  = level;
66234 +                   railMain->ProbeResult = C_ACK_OK;
66235 +                   break;
66236 +               }
66237 +
66238 +               c_break_busywait();
66239 +           }
66240 +           cport->SetEvent = (E3_Addr) &railElan->ProbeDone;
66241 +           break;
66242 +       }
66243 +
66244 +    }
66245 +}
66246 +
66247 +/*
66248 + * Local variables:
66249 + * c-file-style: "stroustrup"
66250 + * End:
66251 + */
66252 diff -urN clean/drivers/net/qsnet/ep/probenetwork_elan4.c linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan4.c
66253 --- clean/drivers/net/qsnet/ep/probenetwork_elan4.c     1969-12-31 19:00:00.000000000 -0500
66254 +++ linux-2.6.9/drivers/net/qsnet/ep/probenetwork_elan4.c       2005-07-20 07:35:37.000000000 -0400
66255 @@ -0,0 +1,401 @@
66256 +/*
66257 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66258 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66259 + *
66260 + *    For licensing information please see the supplied COPYING file
66261 + *
66262 + */
66263 +
66264 +#ident "@(#)$Id: probenetwork_elan4.c,v 1.10.2.1 2005/07/20 11:35:37 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
66265 +/*      $Source: /cvs/master/quadrics/epmod/probenetwork_elan4.c,v $*/
66266 +
66267 +#include <qsnet/kernel.h>
66268 +
66269 +#include <elan/kcomm.h>
66270 +
66271 +#include "kcomm_vp.h"
66272 +#include "kcomm_elan4.h"
66273 +#include "debug.h"
66274 +
66275 +#include <elan4/trtype.h>
66276 +#include <elan4/commands.h>
66277 +
66278 +static void
66279 +probe_interrupt (EP4_RAIL *rail, void *arg)
66280 +{
66281 +    unsigned long flags;
66282 +
66283 +    spin_lock_irqsave (&rail->r_probe_lock, flags);
66284 +    rail->r_probe_done = 1;
66285 +    kcondvar_wakeupone (&rail->r_probe_wait, &rail->r_probe_lock);
66286 +    spin_unlock_irqrestore (&rail->r_probe_lock, flags);
66287 +}
66288 +
66289 +int
66290 +ep4_probe_init (EP4_RAIL *rail)
66291 +{
66292 +    spin_lock_init (&rail->r_probe_lock);
66293 +    kcondvar_init (&rail->r_probe_wait);
66294 +
66295 +    rail->r_probe_cq = ep4_alloc_ecq (rail, CQ_Size1K);
66296 +
66297 +    if (rail->r_probe_cq == NULL)
66298 +       return -ENOMEM;
66299 +
66300 +    ep4_register_intcookie (rail, &rail->r_probe_intcookie, rail->r_elan_addr, probe_interrupt, rail);
66301 +
66302 +    return 0;
66303 +}
66304 +
66305 +void
66306 +ep4_probe_destroy (EP4_RAIL *rail)
66307 +{
66308 +    if (rail->r_probe_cq)
66309 +       ep4_free_ecq (rail, rail->r_probe_cq);
66310 +
66311 +    if (rail->r_probe_intcookie.int_arg == NULL)
66312 +       return;
66313 +    ep4_deregister_intcookie (rail, &rail->r_probe_intcookie);
66314 +
66315 +    kcondvar_destroy (&rail->r_probe_wait);
66316 +    spin_lock_destroy (&rail->r_probe_lock);
66317 +}
66318 +
66319 +#define LINKDOWN(nodeid, level)        ((nodeid >> (level << 1)) & 3)
66320 +#define PROBE_PATTERN0(nodeid) (0xaddebabe ^ nodeid)
66321 +#define PROBE_PATTERN1(nodeid)  (0xfeedbeef ^ nodeid)
66322 +
66323 +#define EP4_PROBE_RETRIES      4
66324 +
66325 +int
66326 +ep4_probe_route (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, int *linkdown, int attempts, EP_SWITCH *lsw)
66327 +{
66328 +    EP4_RAIL      *rail  = (EP4_RAIL *) r;
66329 +    EP4_RAIL_MAIN *rmain = rail->r_main;
66330 +    E4_uint16      first = 0;
66331 +    int                   rb    = 0;
66332 +
66333 +    E4_uint8  packed[ROUTE_NUM_PACKED];
66334 +    E4_VirtualProcessEntry route;
66335 +    unsigned long flags;
66336 +    int i;
66337 +
66338 +    for (i = 0; i < ROUTE_NUM_PACKED; i++)
66339 +       packed[i] = 0;
66340 +
66341 +    /* Generate "up" routes */
66342 +    for (i = 0; i < level; i++)
66343 +       if (first == 0)
66344 +           first = linkup ? FIRST_ROUTE(linkup[i]) : FIRST_ADAPTIVE;
66345 +       else
66346 +           packed[rb++] = linkup ? PACKED_ROUTE(linkup[i]) : PACKED_ADAPTIVE;
66347 +    
66348 +    /* Generate a "to-me" route down */
66349 +    if (first == 0)
66350 +       first = FIRST_MYLINK;
66351 +    else
66352 +       packed[rb++] = PACKED_MYLINK;
66353 +    
66354 +    /* Generate the "down" routes */
66355 +    for (i = level-1; i >= 0; i--)
66356 +       packed[rb++] = linkdown ? PACKED_ROUTE(linkdown[i]) : PACKED_ROUTE(LINKDOWN(nodeid, i));
66357 +    
66358 +    /* Pack up the routes into the virtual process entry */
66359 +    route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3);
66360 +    route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM);
66361 +
66362 +    for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
66363 +    {
66364 +       route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
66365 +       route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
66366 +    }
66367 +
66368 +    elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(level), &route);
66369 +    
66370 +    while (attempts--)
66371 +    {
66372 +       rail->r_probe_done = 0;
66373 +
66374 +       /* generate the STEN packet - note we use a datatype of dword as we're copying to elan in dwords
66375 +        *   NB - no flow control is required, since the max packet size is less than the command queue
66376 +        *        size and it's dedicated for network probing.
66377 +        */
66378 +       
66379 +       elan4_guard   (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_RESET(EP4_PROBE_RETRIES));
66380 +       elan4_nop_cmd (rail->r_probe_cq->ecq_cq, 0);
66381 +       
66382 +       elan4_open_packet (rail->r_probe_cq->ecq_cq, OPEN_STEN_PKT_CMD | OPEN_PACKET(0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(level)));
66383 +       elan4_sendtransn  (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS),
66384 +                          rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0),
66385 +                          0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 
66386 +                          0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull, 0x0000000000000000ull | ((E4_uint64)PROBE_PATTERN0(nodeid) << 32));
66387 +       elan4_sendtransn  (rail->r_probe_cq->ecq_cq, TR_TRACEROUTE(TRACEROUTE_NDWORDS),
66388 +                          rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1),
66389 +                          0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 
66390 +                          0x0000000100000001ull, 0x0000000100000001ull, 0x0000000100000001ull, 0x0000000000000001ull | ((E4_uint64)PROBE_PATTERN1(nodeid) << 32));
66391 +       elan4_sendtrans0  (rail->r_probe_cq->ecq_cq, TR_NOP_TRANS | TR_LAST_AND_SEND_ACK, 0);
66392 +
66393 +       elan4_guard           (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES));
66394 +       elan4_write_dword_cmd (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FINISHED);
66395 +
66396 +       elan4_guard            (rail->r_probe_cq->ecq_cq, GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES));
66397 +       elan4_write_dword_cmd  (rail->r_probe_cq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_result), EP4_STATE_FAILED);
66398 +
66399 +       elan4_interrupt_cmd   (rail->r_probe_cq->ecq_cq,  rail->r_probe_intcookie.int_val);
66400 +
66401 +       spin_lock_irqsave (&rail->r_probe_lock, flags);
66402 +       while (! rail->r_probe_done)
66403 +           kcondvar_wait (&rail->r_probe_wait, &rail->r_probe_lock, &flags);
66404 +       spin_unlock_irqrestore (&rail->r_probe_lock, flags);
66405 +
66406 +       if (rmain->r_probe_result == EP4_STATE_FINISHED)
66407 +       {
66408 +           if (rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN0(nodeid) ||
66409 +               rmain->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level)+1) - 1] != PROBE_PATTERN1(nodeid))
66410 +           {
66411 +               static unsigned long printed = 0;
66412 +               if ((lbolt - printed) > (HZ*10))
66413 +               {
66414 +                   printk ("%s: lost nodeid at level %d switch %d - %d != %x\n", rail->r_generic.Name, level, sw,
66415 +                           rmain->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level)+1) - 1], PROBE_PATTERN0(nodeid));
66416 +                   printed = lbolt;
66417 +               }
66418 +           }
66419 +           else
66420 +           {
66421 +               E4_uint32 val0 = rmain->r_probe_dest0[TRACEROUTE_ENTRIES - level - 1];
66422 +               E4_uint32 val1 = rmain->r_probe_dest1[TRACEROUTE_ENTRIES - level - 1];
66423 +               
66424 +               lsw->lnr     = TR_TRACEROUTE0_LNR(val0);
66425 +               lsw->link    = TR_TRACEROUTE0_LINKID(val0);
66426 +               lsw->bcast   = TR_TRACEROUTE1_BCAST_TOP(val1);
66427 +               lsw->invalid = 0;
66428 +
66429 +               return 1;
66430 +           }
66431 +       }
66432 +
66433 +       rmain->r_probe_result = EP4_STATE_FREE;
66434 +    }
66435 +
66436 +    return 0;
66437 +}
66438 +
66439 +
66440 +void
66441 +ep4_probe_position_found (EP4_RAIL *rail, ELAN_POSITION *pos)
66442 +{
66443 +    ELAN4_DEV  *dev  = rail->r_ctxt.ctxt_dev;
66444 +    int         lvl;
66445 +
66446 +    for (lvl = 0; lvl < pos->pos_levels; lvl++)
66447 +    {
66448 +       /* Initialise the "probe" route to use the broadcast tree */
66449 +       ELAN_POSITION *pos     = &rail->r_generic.Position;
66450 +       unsigned char *arityp  = &pos->pos_arity[pos->pos_levels - 1];
66451 +       unsigned int   spanned = *arityp;
66452 +       E4_uint16      first   = 0;
66453 +       int            rb      = 0;
66454 +       
66455 +       E4_uint8  packed[ROUTE_NUM_PACKED];
66456 +       E4_VirtualProcessEntry route;
66457 +       int i;
66458 +       
66459 +       for (i = 0; i < ROUTE_NUM_PACKED; i++)
66460 +           packed[i] = 0;
66461 +
66462 +       /* Generate "up" routes */
66463 +       for (i = 0; i < lvl; i++, spanned *= *(--arityp))
66464 +       {
66465 +           if (first == 0)
66466 +               first = FIRST_BCAST_TREE;
66467 +           else
66468 +               packed[rb++] = PACKED_BCAST_TREE;
66469 +       }
66470 +
66471 +       /* Generate a "to-me" route down */
66472 +       if (first == 0)
66473 +           first = FIRST_MYLINK;
66474 +       else
66475 +           packed[rb++] = PACKED_MYLINK;
66476 +
66477 +       spanned /= *arityp++;
66478 +
66479 +       /* Generate the "down" routes */
66480 +       for (i = lvl-1; i >= 0; i--)
66481 +       {
66482 +           spanned /= *arityp;
66483 +           packed[rb++] = PACKED_ROUTE((pos->pos_nodeid / spanned) % *arityp);
66484 +           arityp++;
66485 +       }
66486 +
66487 +    
66488 +       /* Pack up the routes into the virtual process entry */
66489 +       route.Values[0] = first | FIRST_HIGH_PRI | FIRST_SYSTEM_PACKET | FIRST_TIMEOUT(3);
66490 +       route.Values[1] = ROUTE_CTXT_VALUE(ELAN4_KCOMM_CONTEXT_NUM);
66491 +       
66492 +       for (i = 0; i < (ROUTE_NUM_PACKED >> 1); i++)
66493 +       {
66494 +           route.Values[0] |= ((E4_uint64) packed[i]) << ((i << 2) + ROUTE_PACKED_OFFSET);
66495 +           route.Values[1] |= ((E4_uint64) packed[i+(ROUTE_NUM_PACKED >> 1)]) << ((i << 2));
66496 +       }
66497 +       
66498 +       elan4_write_route (rail->r_ctxt.ctxt_dev, rail->r_routetable, EP_VP_PROBE(lvl), &route);
66499 +       
66500 +       /* Initialise "start" event for this level */
66501 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CountAndType),
66502 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS));
66503 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopySource),
66504 +                           rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl]));
66505 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_start[lvl].ev_CopyDest),
66506 +                           rail->r_probe_cq->ecq_addr);
66507 +
66508 +       /* Initiailise command stream - reset the start event */
66509 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_cmd),
66510 +                           WRITE_DWORD_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl])));
66511 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_reset_event_value),
66512 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, EP4_CHECK_STEN_NDWORDS));
66513 +
66514 +       /* Initiailise command stream - sten traceroute packet */
66515 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_open),
66516 +                           OPEN_STEN_PKT_CMD | OPEN_PACKET (0, PACK_OK | RESTART_COUNT_ZERO, EP_VP_PROBE(lvl)));
66517 +
66518 +       /* Initiailise command stream - traceroute 0 */
66519 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute0),
66520 +                           SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16));
66521 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute0),
66522 +                           rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest0));
66523 +       for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++)
66524 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]),
66525 +                               0x0000000000000000ull);
66526 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute0[i]),
66527 +                           0x0000000000000000ull | ((E4_uint64) PROBE_PATTERN0(pos->pos_nodeid) << 32));
66528 +
66529 +       /* Initiailise command stream - traceroute 1 */
66530 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_traceroute1),
66531 +                           SEND_TRANS_CMD | (TR_TRACEROUTE(TRACEROUTE_NDWORDS) << 16));
66532 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_traceroute1),
66533 +                           rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_dest1));
66534 +       for (i = 0; i < (TRACEROUTE_NDWORDS-1); i++)
66535 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]),
66536 +                               0x0000000100000001ull);
66537 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_data_traceroute1[i]),
66538 +                           0x0000000000000001ull | ((E4_uint64) PROBE_PATTERN1(pos->pos_nodeid) << 32));
66539 +
66540 +       /* Initiailise command stream - null sendack */
66541 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_trans_sendack),
66542 +                           SEND_TRANS_CMD | ((TR_NOP_TRANS | TR_LAST_AND_SEND_ACK) << 16));
66543 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_addr_sendack),
66544 +                           0);
66545 +       
66546 +       /* Initiailise command stream - guard ok, write done */
66547 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_ok),
66548 +                           GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, PACK_OK) | GUARD_RESET(EP4_PROBE_RETRIES));
66549 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_writedword_ok),
66550 +                           WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level)));
66551 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_value_ok),
66552 +                           lvl);
66553 +
66554 +       /* Initiailise command stream - guard fail, chain to next or write done */
66555 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_guard_fail),
66556 +                           GUARD_CMD | GUARD_CHANNEL(1) | GUARD_TEST(0, RESTART_COUNT_ZERO) | GUARD_RESET(EP4_PROBE_RETRIES));
66557 +
66558 +       if (lvl > 0)
66559 +       {
66560 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail),
66561 +                               SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[lvl-1])));
66562 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop),
66563 +                               NOP_CMD);
66564 +       }
66565 +       else
66566 +       {
66567 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_fail),
66568 +                               WRITE_DWORD_CMD | (rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_probe_level)));
66569 +           elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_setevent_nop),
66570 +                               EP4_PROBE_FAILED);
66571 +       }
66572 +       elan4_sdram_writeq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_check_sten[lvl].c_nop_pad),
66573 +                           NOP_CMD);
66574 +    }
66575 +
66576 +    
66577 +    rail->r_main->r_probe_level = EP4_PROBE_ACTIVE;
66578 +
66579 +    mb();
66580 +    ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[pos->pos_levels-1]));
66581 +}
66582 +
66583 +int
66584 +ep4_check_position (EP_RAIL *r)
66585 +{
66586 +    EP4_RAIL      *rail = (EP4_RAIL *) r;
66587 +    ELAN_POSITION *pos  = &rail->r_generic.Position;
66588 +    unsigned int level  = rail->r_main->r_probe_level;
66589 +    unsigned int lvl;
66590 +
66591 +    EPRINTF2 (DBG_PROBE, "%s: ep4_check_position: level=%lld\n", rail->r_generic.Name, (long long)rail->r_main->r_probe_level);
66592 +
66593 +    if (rail->r_main->r_probe_level != EP4_PROBE_ACTIVE)
66594 +    {
66595 +       if (rail->r_main->r_probe_level == EP4_PROBE_FAILED)
66596 +       {
66597 +           EPRINTF1 (DBG_PROBE, "%s: ep4_check_position: packets all nacked\n", rail->r_generic.Name);
66598 +
66599 +           rail->r_generic.SwitchProbeLevel = -1;
66600 +       }
66601 +       else
66602 +       {
66603 +           E4_uint32 val0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - 2*(level+1)];
66604 +           E4_uint32 val1  = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - 2*(level+1)];
66605 +
66606 +           if (val0 != PROBE_PATTERN0 (pos->pos_nodeid) || val1 != PROBE_PATTERN1 (pos->pos_nodeid))
66607 +           {
66608 +               static unsigned long printed = 0;
66609 +
66610 +               /* We've received a packet from another node - this probably means
66611 +                * that we've moved */
66612 +               if ((lbolt - printed) > (HZ*10))
66613 +               {
66614 +                   printk ("%s: ep4_check_position - level %d lost nodeid\n", rail->r_generic.Name, level);
66615 +                   printed = lbolt;
66616 +               }
66617 +
66618 +               rail->r_generic.SwitchProbeLevel = -1;
66619 +           }
66620 +           else
66621 +           {
66622 +               for (lvl = 0 ; lvl <= level; lvl++)
66623 +               {
66624 +                   E4_uint32 uval0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - lvl - 1];
66625 +                   E4_uint32 dval0  = rail->r_main->r_probe_dest0[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
66626 +                   E4_uint32 dval1  = rail->r_main->r_probe_dest1[TRACEROUTE_ENTRIES - ((2*level) - lvl + 1)];
66627 +
66628 +                   rail->r_generic.SwitchState[lvl].linkid = TR_TRACEROUTE0_LINKID (dval0);
66629 +                   rail->r_generic.SwitchState[lvl].LNR    = TR_TRACEROUTE0_LNR(dval0);
66630 +                   rail->r_generic.SwitchState[lvl].bcast  = TR_TRACEROUTE1_BCAST_TOP (dval1);
66631 +                   rail->r_generic.SwitchState[lvl].uplink = TR_TRACEROUTE0_LINKID (uval0);
66632 +
66633 +                   EPRINTF5 (DBG_PROBE, " --- lvl %d: linkid=%d LNR=%x bcast=%d uplink=%d\n", lvl, rail->r_generic.SwitchState[lvl].linkid,
66634 +                             rail->r_generic.SwitchState[lvl].LNR, rail->r_generic.SwitchState[lvl].bcast ,rail->r_generic.SwitchState[lvl].uplink);
66635 +
66636 +               }
66637 +
66638 +               rail->r_generic.SwitchProbeLevel = level;
66639 +           }
66640 +       }
66641 +
66642 +       rail->r_main->r_probe_level = EP4_PROBE_ACTIVE;
66643 +       mb();
66644 +
66645 +       if (rail->r_generic.SwitchBroadcastLevel == rail->r_generic.Position.pos_levels-1)
66646 +           level = rail->r_generic.Position.pos_levels - 1;
66647 +       else
66648 +           level = rail->r_generic.SwitchBroadcastLevel + 1;
66649 +
66650 +       ep4_set_event_cmd (rail->r_probe_cq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_check_start[level]));
66651 +
66652 +       return 1;
66653 +    }
66654 +
66655 +    return 0;
66656 +}
66657 diff -urN clean/drivers/net/qsnet/ep/procfs_linux.c linux-2.6.9/drivers/net/qsnet/ep/procfs_linux.c
66658 --- clean/drivers/net/qsnet/ep/procfs_linux.c   1969-12-31 19:00:00.000000000 -0500
66659 +++ linux-2.6.9/drivers/net/qsnet/ep/procfs_linux.c     2005-09-07 10:35:03.000000000 -0400
66660 @@ -0,0 +1,632 @@
66661 +/*
66662 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
66663 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
66664 + *
66665 + *    For licensing information please see the supplied COPYING file
66666 + *
66667 + */
66668 +
66669 +#ident "@(#)$Id: procfs_linux.c,v 1.60.2.3 2005/09/07 14:35:03 mike Exp $"
66670 +/*      $Source: /cvs/master/quadrics/epmod/procfs_linux.c,v $*/
66671 +
66672 +#include <qsnet/kernel.h>
66673 +#include <qsnet/autoconf.h>
66674 +
66675 +#include <elan/kcomm.h>
66676 +#include <elan/epsvc.h>
66677 +#include <elan/epcomms.h>
66678 +
66679 +#include "cm.h"
66680 +#include "debug.h"
66681 +#include "conf_linux.h"
66682 +#include <qsnet/module.h>
66683 +#include <linux/wait.h>
66684 +#include <linux/poll.h>
66685 +
66686 +#include <qsnet/procfs_linux.h>
66687 +
66688 +struct proc_dir_entry *ep_procfs_root;
66689 +struct proc_dir_entry *ep_config_root;
66690 +
66691 +/*
66692 + * We provide a slightly "special" interface for /proc/elan/device%d/nodeset,
66693 + * so that it can be included in a "poll" system call.  On each "read" on the
66694 + * file, we generate a new nodeset if a) the previous one has been completely
66695 + * read and b) if it has changed since it was generated.
66696 + *
66697 + * Unfortunately ... this doesn't allow "tail -f" to work, since this uses
66698 + * fstat() on the fd, as we only hold the last nodeset string, we could not
66699 + * handle the case where two processes were reading a different rates.
66700 + * We could maybe have implemented this as a "sliding window", so that we 
66701 + * add a new nodeset string, when it has changed and someone reads past 
66702 + * end of the last one.   Then if someone read from before out "window"
66703 + * we would produce "padding" data.  The problem with this, is that a 
66704 + * simple "cat" on /proc/elan/device%d/nodeset will read the whole "file"
66705 + * which will be mostly padding !
66706 + *
66707 + * Just to not that the purpose of this interface is:
66708 + *    1) to allow cat /proc/elan/device%d/nodeset to show the current
66709 + *       nodeset.
66710 + *    2) to allow rms (or similar) to poll() on the file, and when the
66711 + *       nodeset changes read a new one.
66712 + *
66713 + * so ... we don't bother solving the troublesome "tail -f" problem.
66714 + */
66715 +
66716 +typedef struct nodeset_private
66717 +{
66718 +    struct nodeset_private *pr_next;
66719 +    EP_RAIL                *pr_rail;
66720 +    unsigned               pr_changed;
66721 +    char                  *pr_page;
66722 +    unsigned               pr_off;
66723 +    unsigned               pr_len;
66724 +} NODESET_PRIVATE;
66725 +
66726 +NODESET_PRIVATE   *ep_nodeset_list;
66727 +wait_queue_head_t  ep_nodeset_wait;
66728 +spinlock_t         ep_nodeset_lock;
66729 +
66730 +static int
66731 +proc_write_state(struct file *file, const char *buffer,
66732 +                unsigned long count, void *data)
66733 +{
66734 +    EP_RAIL *rail = (EP_RAIL *) data;
66735 +    char    tmpbuf[128];
66736 +    int     res;
66737 +
66738 +    if (count > sizeof (tmpbuf)-1)
66739 +       return (-EINVAL);
66740 +    
66741 +    MOD_INC_USE_COUNT;
66742 +    
66743 +    if (copy_from_user (tmpbuf, buffer, count))
66744 +       res = -EFAULT;
66745 +    else 
66746 +    {
66747 +       tmpbuf[count] = '\0';   
66748 +
66749 +       if (tmpbuf[count-1] == '\n')
66750 +           tmpbuf[count-1] = '\0';
66751 +
66752 +       if (! strcmp (tmpbuf, "start") && rail->State == EP_RAIL_STATE_UNINITIALISED)
66753 +           ep_start_rail (rail);
66754 +       
66755 +       if (! strcmp (tmpbuf, "stop") && rail->State > EP_RAIL_STATE_UNINITIALISED)
66756 +           ep_stop_rail (rail);
66757 +       
66758 +       if (! strcmp (tmpbuf, "offline") && rail->State > EP_RAIL_STATE_UNINITIALISED)
66759 +           cm_force_offline (rail, 1, CM_OFFLINE_PROCFS);
66760 +
66761 +       if (! strcmp (tmpbuf, "online") && rail->State > EP_RAIL_STATE_UNINITIALISED)
66762 +           cm_force_offline (rail, 0, CM_OFFLINE_PROCFS);
66763 +
66764 +       if (! strncmp (tmpbuf, "restart=", 8) && rail->State == EP_RAIL_STATE_RUNNING)
66765 +           cm_restart_node (rail, simple_strtol (tmpbuf + 8, NULL, 0));
66766 +
66767 +       if (! strncmp (tmpbuf, "panic=", 6))
66768 +           ep_panic_node (rail->System, simple_strtol(tmpbuf + 6, NULL, 0),
66769 +                          strchr (tmpbuf, ',') ? strchr(tmpbuf, ',') + 1 : "remote panic request");
66770 +
66771 +       if (! strncmp (tmpbuf, "raise=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED)
66772 +           rail->Operations.RaiseFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0));
66773 +
66774 +       if (! strncmp (tmpbuf, "lower=", 6) && rail->State > EP_RAIL_STATE_UNINITIALISED)
66775 +           rail->Operations.LowerFilter (rail, simple_strtol (tmpbuf + 6, NULL, 0));
66776 +       
66777 +       res = count;
66778 +    }
66779 +
66780 +    MOD_DEC_USE_COUNT;
66781 +
66782 +    return (res);
66783 +}
66784 +
66785 +static int
66786 +proc_read_state(char *page, char **start, off_t off,
66787 +               int count, int *eof, void *data)
66788 +{
66789 +    EP_RAIL *rail = (EP_RAIL *) data;
66790 +    int     len;
66791 +
66792 +    switch (rail->State)
66793 +    {
66794 +    case EP_RAIL_STATE_UNINITIALISED:
66795 +       len = sprintf (page, "uninitialised\n");
66796 +       break;
66797 +    case EP_RAIL_STATE_STARTED:
66798 +       len = sprintf (page, "started\n");
66799 +       break;
66800 +    case EP_RAIL_STATE_RUNNING:
66801 +       len = sprintf (page, "running NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes);
66802 +       break;
66803 +    case EP_RAIL_STATE_INCOMPATIBLE:
66804 +       len = sprintf (page, "incompatible NodeId=%d NumNodes=%d\n", rail->Position.pos_nodeid, rail->Position.pos_nodes);
66805 +       break;
66806 +    default:
66807 +       len = sprintf (page, "<unknown>\n");
66808 +       break;
66809 +    }
66810 +
66811 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
66812 +}
66813 +
66814 +static int
66815 +proc_write_display(struct file *file, const char *buffer,
66816 +                  unsigned long count, void *data)
66817 +{
66818 +    EP_RAIL *rail = (EP_RAIL *) data;
66819 +    char    tmpbuf[128];
66820 +    int     res;
66821 +
66822 +    if (count > sizeof (tmpbuf)-1)
66823 +       return (-EINVAL);
66824 +    
66825 +    MOD_INC_USE_COUNT;
66826 +    
66827 +    if (copy_from_user (tmpbuf, buffer, count))
66828 +       res = -EFAULT;
66829 +    else 
66830 +    {
66831 +       tmpbuf[count] = '\0';   
66832 +
66833 +       if (tmpbuf[count-1] == '\n')
66834 +           tmpbuf[count-1] = '\0';
66835 +
66836 +       if (! strcmp (tmpbuf, "rail"))
66837 +           DisplayRail (rail);
66838 +       if (! strcmp (tmpbuf, "segs"))
66839 +           DisplaySegs (rail);
66840 +       if (! strcmp (tmpbuf, "nodes"))
66841 +           DisplayNodes (rail);
66842 +       if (! strcmp (tmpbuf, "status"))
66843 +           DisplayStatus (rail);
66844 +       if (! strcmp (tmpbuf, "debug") && rail->Operations.Debug)
66845 +           rail->Operations.Debug (rail);
66846 +       if (! strncmp (tmpbuf, "epcomms", 7))
66847 +           ep_comms_display (rail->System, tmpbuf[7] == '=' ? tmpbuf + 8 : NULL);
66848 +       res = count;
66849 +    }
66850 +
66851 +    MOD_DEC_USE_COUNT;
66852 +
66853 +    return (res);
66854 +}
66855 +
66856 +static int
66857 +proc_read_display(char *page, char **start, off_t off,
66858 +                 int count, int *eof, void *data)
66859 +{
66860 +    int len = sprintf (page, "<unreadable>\n");
66861 +    
66862 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
66863 +}
66864 +
66865 +
66866 +static int
66867 +proc_read_stats(char *page, char **start, off_t off,
66868 +               int count, int *eof, void *data)
66869 +{
66870 +    EP_RAIL *rail = (EP_RAIL *) data;
66871 +
66872 +    if ( rail == NULL ) {
66873 +       strcpy(page,"proc_read_stats rail=NULL\n");
66874 +    } else {
66875 +       page[0] = 0;
66876 +       ep_fillout_stats(rail, page);
66877 +       rail->Operations.FillOutStats (rail, page);
66878 +    }
66879 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, strlen(page)));
66880 +}
66881 +
66882 +static int
66883 +proc_read_devinfo(char *page, char **start, off_t off,
66884 +                 int count, int *eof, void *data)
66885 +{
66886 +    EP_RAIL       *rail    = (EP_RAIL *) data;
66887 +    ELAN_DEVINFO  *devinfo = &rail->Devinfo;
66888 +    ELAN_POSITION *pos     = &rail->Position;
66889 +    char          *p       = page;
66890 +    
66891 +    switch (devinfo->dev_device_id)
66892 +    {
66893 +    case PCI_DEVICE_ID_ELAN3:
66894 +       p += sprintf (p, "ep%d is elan3 %d rev %c\n", rail->Number, 
66895 +                     devinfo->dev_instance, 'a' + devinfo->dev_revision_id);
66896 +       break;
66897 +       
66898 +    case PCI_DEVICE_ID_ELAN4:
66899 +       p += sprintf (p, "ep%d is elan4 %d rev %c\n", rail->Number, 
66900 +                     devinfo->dev_instance, 'a' + devinfo->dev_revision_id);
66901 +       break;
66902 +    default:
66903 +       p += sprintf (p, "ep%d is unknown %x/%x\n", rail->Number, devinfo->dev_vendor_id, devinfo->dev_device_id);
66904 +       break;
66905 +    }
66906 +
66907 +    if (rail->State == EP_RAIL_STATE_RUNNING)
66908 +       p += sprintf (p, "ep%d nodeid %d numnodes %d\n", rail->Number, pos->pos_nodeid, pos->pos_nodes);
66909 +
66910 +    return (qsnet_proc_calc_metrics (page, start, off, count, eof, p - page));
66911 +}
66912 +
66913 +static struct rail_info
66914 +{
66915 +    char *name;
66916 +    int (*read_func) (char *page, char **start, off_t off, int count, int *eof, void *data);
66917 +    int (*write_func) (struct file *file, const char *buf, unsigned long count, void *data);
66918 +} rail_info[] = {
66919 +    {"state",   proc_read_state,   proc_write_state},
66920 +    {"display", proc_read_display, proc_write_display},
66921 +    {"stats",   proc_read_stats,   NULL},
66922 +    {"devinfo", proc_read_devinfo, NULL},
66923 +};
66924 +
66925 +static int
66926 +nodeset_open (struct inode *inode, struct file *file)
66927 +{
66928 +    NODESET_PRIVATE *pr;
66929 +
66930 +    if ((pr = kmalloc (sizeof (NODESET_PRIVATE), GFP_KERNEL)) == NULL)
66931 +       return (-ENOMEM);
66932 +    
66933 +    pr->pr_changed = 1;
66934 +    pr->pr_off     = 0;
66935 +    pr->pr_len     = 0;
66936 +    pr->pr_page    = NULL;
66937 +    pr->pr_rail    = (EP_RAIL *)( PDE(inode)->data );
66938 +
66939 +    spin_lock (&ep_nodeset_lock);
66940 +    pr->pr_next = ep_nodeset_list;
66941 +    ep_nodeset_list = pr;
66942 +    spin_unlock (&ep_nodeset_lock);
66943 +
66944 +    file->private_data = (void *) pr;
66945 +
66946 +    MOD_INC_USE_COUNT;
66947 +    return (0);
66948 +}
66949 +
66950 +static int
66951 +nodeset_release (struct inode *inode, struct file *file)
66952 +{
66953 +    NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data;
66954 +    NODESET_PRIVATE **ppr;
66955 +
66956 +    spin_lock (&ep_nodeset_lock);
66957 +    for (ppr = &ep_nodeset_list; (*ppr) != pr; ppr = &(*ppr)->pr_next)
66958 +       ;
66959 +    (*ppr) = pr->pr_next;
66960 +    spin_unlock (&ep_nodeset_lock);
66961 +
66962 +    if (pr->pr_page)
66963 +       free_page ((unsigned long) pr->pr_page);
66964 +    kfree (pr);
66965 +    
66966 +    MOD_DEC_USE_COUNT;
66967 +    return (0);
66968 +}
66969 +
66970 +static ssize_t
66971 +nodeset_read (struct file *file, char *buf, size_t count, loff_t *ppos)
66972 +{
66973 +    NODESET_PRIVATE *pr  = (NODESET_PRIVATE *) file->private_data;
66974 +    EP_RAIL          *rail = pr->pr_rail;
66975 +    int              error;
66976 +    unsigned long    flags;
66977 +
66978 +    if (!pr->pr_changed && pr->pr_off >= pr->pr_len)
66979 +       return (0);
66980 +
66981 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
66982 +       return (error);
66983 +
66984 +    if (pr->pr_page == NULL && (pr->pr_page = (char *) __get_free_page (GFP_KERNEL)) == NULL)
66985 +       return (-ENOMEM);
66986 +
66987 +    if (pr->pr_off >= pr->pr_len)
66988 +    {
66989 +       kmutex_lock (&rail->CallbackLock);
66990 +       if (rail->State == EP_RAIL_STATE_RUNNING)
66991 +       {
66992 +           spin_lock_irqsave (&rail->System->NodeLock, flags);
66993 +           ep_sprintf_bitmap (pr->pr_page, PAGESIZE, statemap_tobitmap(rail->NodeSet), 0, 0, rail->Position.pos_nodes);
66994 +           spin_unlock_irqrestore (&rail->System->NodeLock, flags);
66995 +
66996 +           if (rail->SwitchBroadcastLevel == -1)
66997 +               strcat (pr->pr_page, "<disconnected>");
66998 +           else if (rail->SwitchBroadcastLevel < (rail->Position.pos_levels-1))
66999 +               sprintf (pr->pr_page + strlen (pr->pr_page), "<%d>", rail->SwitchBroadcastLevel);
67000 +           strcat (pr->pr_page, "\n");
67001 +       }
67002 +       else
67003 +           strcpy (pr->pr_page, "<not running>\n");
67004 +       kmutex_unlock (&rail->CallbackLock);
67005 +
67006 +       pr->pr_len     = strlen (pr->pr_page);
67007 +       pr->pr_off     = 0;
67008 +       pr->pr_changed = 0;
67009 +    }
67010 +
67011 +    if (count >= (pr->pr_len - pr->pr_off))
67012 +       count = pr->pr_len - pr->pr_off;
67013 +
67014 +    copy_to_user (buf, pr->pr_page + pr->pr_off, count);
67015 +
67016 +    pr->pr_off += count;
67017 +    *ppos      += count;
67018 +
67019 +    if (pr->pr_off >= pr->pr_len)
67020 +    {
67021 +       free_page ((unsigned long) pr->pr_page);
67022 +       pr->pr_page = NULL;
67023 +    }
67024 +
67025 +    return (count);
67026 +}
67027 +
67028 +static unsigned int
67029 +nodeset_poll (struct file *file, poll_table *wait)
67030 +{
67031 +    NODESET_PRIVATE *pr = (NODESET_PRIVATE *) file->private_data;
67032 +
67033 +    poll_wait (file, &ep_nodeset_wait, wait);
67034 +    if (pr->pr_changed || pr->pr_off < pr->pr_len)
67035 +       return (POLLIN | POLLRDNORM);
67036 +    return (0);
67037 +}
67038 +
67039 +static void 
67040 +nodeset_callback (void *arg, statemap_t *map)
67041 +{
67042 +    EP_RAIL         *rail = (EP_RAIL *) arg;
67043 +    NODESET_PRIVATE *pr;
67044 +
67045 +    ep_display_bitmap (rail->Name, "Nodeset", statemap_tobitmap(map), 0, ep_numnodes(rail->System));
67046 +
67047 +    spin_lock (&ep_nodeset_lock);
67048 +    for (pr = ep_nodeset_list; pr; pr = pr->pr_next)
67049 +       if (pr->pr_rail == rail)
67050 +           pr->pr_changed = 1;
67051 +    spin_unlock (&ep_nodeset_lock);
67052 +
67053 +    wake_up_interruptible (&ep_nodeset_wait);
67054 +}
67055 +
67056 +static int
67057 +proc_open (struct inode *inode, struct file *file)
67058 +{
67059 +    QSNET_PROC_PRIVATE *pr;
67060 +    CM_RAIL            *cmRail;
67061 +    EP_RAIL            *epRail;
67062 +    int                 pages = 4;
67063 +    unsigned long       flags;
67064 +
67065 +    if ((pr = kmalloc (sizeof (QSNET_PROC_PRIVATE), GFP_KERNEL)) == NULL)
67066 +       return (-ENOMEM);
67067 +    
67068 +    epRail = (EP_RAIL *)(PDE(inode)->data);
67069 +       
67070 +    do {       
67071 +       pr->pr_data_len = PAGESIZE * pages;
67072 +
67073 +       KMEM_ZALLOC (pr->pr_data, char *, pr->pr_data_len, 1);
67074 +       if (pr->pr_data == NULL) 
67075 +       { 
67076 +           pr->pr_len  = sprintf (pr->pr_data, "Out of Memory\n");
67077 +           break;
67078 +       } 
67079 +       
67080 +       pr->pr_off     = 0;
67081 +       pr->pr_len     = 0;
67082 +       pr->pr_data[0] = 0;
67083 +       
67084 +       if (epRail->State != EP_RAIL_STATE_RUNNING) 
67085 +       { 
67086 +           pr->pr_len  = sprintf (pr->pr_data, "Rail not Running\n");
67087 +           break;
67088 +       } 
67089 +       else 
67090 +       {
67091 +           pr->pr_di.func  = qsnet_proc_character_fill;
67092 +           pr->pr_di.arg   = (long)pr;
67093 +
67094 +           if (!strcmp("maps", file->f_dentry->d_iname)) 
67095 +           {
67096 +               cmRail = epRail->ClusterRail;
67097 +
67098 +               spin_lock_irqsave (&cmRail->Lock, flags);
67099 +               DisplayNodeMaps (&pr->pr_di, cmRail);   
67100 +               spin_unlock_irqrestore (&cmRail->Lock, flags);  
67101 +           }
67102 +
67103 +           if (!strcmp("segs", file->f_dentry->d_iname)) 
67104 +           {
67105 +               cmRail = epRail->ClusterRail;
67106 +               
67107 +               spin_lock_irqsave (&cmRail->Lock, flags);       
67108 +               DisplayNodeSgmts (&pr->pr_di, cmRail);
67109 +               spin_unlock_irqrestore (&cmRail->Lock, flags);
67110 +           }
67111 +
67112 +           if (!strcmp("tree", file->f_dentry->d_iname)) 
67113 +               DisplayRailDo (&pr->pr_di, epRail);
67114 +       }
67115 +
67116 +       if ( pr->pr_len < pr->pr_data_len) 
67117 +           break; /* we managed to get all the output into the buffer */
67118 +
67119 +       pages++;
67120 +       KMEM_FREE ( pr->pr_data,  pr->pr_data_len);
67121 +    } while (1);
67122 +       
67123 +
67124 +    file->private_data = (void *) pr;
67125 +
67126 +    MOD_INC_USE_COUNT;
67127 +    return (0);
67128 +}
67129 +
67130 +struct file_operations proc_nodeset_operations = 
67131 +{
67132 +    read:      nodeset_read,
67133 +    poll:      nodeset_poll,
67134 +    open:      nodeset_open,
67135 +    release:   nodeset_release,
67136 +};
67137 +
67138 +struct file_operations proc_operations = 
67139 +{
67140 +    read:      qsnet_proc_read,
67141 +    open:      proc_open,
67142 +    release:   qsnet_proc_release,
67143 +};
67144 +
67145 +void
67146 +ep_procfs_rail_init (EP_RAIL *rail)
67147 +{
67148 +    struct proc_dir_entry *dir;
67149 +    struct proc_dir_entry *p;
67150 +    char                   name[10];
67151 +    int                    i;
67152 +
67153 +    sprintf (name, "rail%d", rail->Number);
67154 +
67155 +    if ((dir = rail->ProcDir = proc_mkdir (name, ep_procfs_root)) == NULL)
67156 +       return;
67157 +
67158 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
67159 +    {
67160 +       if ((p = create_proc_entry (rail_info[i].name, 0, dir)) != NULL)
67161 +       {
67162 +           p->read_proc  = rail_info[i].read_func;
67163 +           p->write_proc = rail_info[i].write_func;
67164 +           p->data       = rail;
67165 +           p->owner      = THIS_MODULE;
67166 +       }
67167 +    }
67168 +
67169 +    if ((p = create_proc_entry ("nodeset", 0, dir)) != NULL)
67170 +    {
67171 +       p->proc_fops = &proc_nodeset_operations;
67172 +       p->owner     = THIS_MODULE;
67173 +       p->data      = rail;
67174 +
67175 +       rail->CallbackRegistered = 1;
67176 +       ep_register_callback (rail, EP_CB_NODESET, nodeset_callback, rail);
67177 +    }
67178 +     
67179 +    if ((p = create_proc_entry ("maps", 0, dir)) != NULL)
67180 +    {
67181 +       p->proc_fops = &proc_operations;
67182 +       p->owner     = THIS_MODULE;
67183 +       p->data      = rail;    
67184 +    }
67185 +    
67186 +    if ((p = create_proc_entry ("segs", 0, dir)) != NULL)
67187 +    {
67188 +       p->proc_fops = &proc_operations;
67189 +       p->owner     = THIS_MODULE;
67190 +       p->data      = rail;
67191 +    }
67192 +    
67193 +    if ((p = create_proc_entry ("tree", 0, dir)) != NULL)
67194 +    {
67195 +       p->proc_fops = &proc_operations;
67196 +       p->owner     = THIS_MODULE;
67197 +       p->data      = rail;
67198 +    }
67199 +
67200 +}
67201 +
67202 +void
67203 +ep_procfs_rail_fini (EP_RAIL *rail)
67204 +{
67205 +    struct proc_dir_entry *dir = rail->ProcDir;
67206 +    char name[10];
67207 +    int  i;
67208 +
67209 +    if (dir == NULL)
67210 +       return;
67211 +
67212 +    if (rail->CallbackRegistered)
67213 +    {
67214 +       ep_remove_callback (rail, EP_CB_NODESET, nodeset_callback, rail);
67215 +
67216 +       remove_proc_entry ("nodeset", dir);
67217 +    }
67218 +
67219 +    remove_proc_entry ("maps",    dir);
67220 +    remove_proc_entry ("segs",    dir);
67221 +    remove_proc_entry ("tree",    dir);
67222 +
67223 +    for (i = 0; i < sizeof (rail_info)/sizeof (rail_info[0]); i++)
67224 +       remove_proc_entry (rail_info[i].name, dir);
67225 +
67226 +    sprintf (name, "rail%d", rail->Number);
67227 +    remove_proc_entry (name, ep_procfs_root);
67228 +}
67229 +
67230 +#include "quadrics_version.h"
67231 +static char     quadrics_version[] = QUADRICS_VERSION;
67232 +
67233 +void
67234 +ep_procfs_init()
67235 +{
67236 +    extern int txd_stabilise;
67237 +    extern int MaxSwitchLevels;
67238 +
67239 +    printk ("ep Module (version %s)\n", quadrics_version);
67240 +
67241 +    spin_lock_init (&ep_nodeset_lock);
67242 +    init_waitqueue_head (&ep_nodeset_wait);
67243 +
67244 +    ep_procfs_root = proc_mkdir ("ep", qsnet_procfs_root);
67245 +    ep_config_root = proc_mkdir ("config", ep_procfs_root);
67246 +
67247 +    qsnet_proc_register_str (ep_procfs_root, "version", quadrics_version, 1);
67248 +
67249 +    qsnet_proc_register_hex (ep_config_root, "epdebug",               &epdebug,               0);
67250 +    qsnet_proc_register_hex (ep_config_root, "epdebug_console",       &epdebug_console,       0);
67251 +    qsnet_proc_register_hex (ep_config_root, "epdebug_cmlevel",       &epdebug_cmlevel,       0);
67252 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
67253 +    qsnet_proc_register_hex (ep_config_root, "epdebug_check_sum",     &epdebug_check_sum,     0);
67254 +#endif
67255 +    qsnet_proc_register_hex (ep_config_root, "epcomms_forward_limit", &epcomms_forward_limit, 0);
67256 +    qsnet_proc_register_int (ep_config_root, "txd_stabilise",         &txd_stabilise,         0);
67257 +    qsnet_proc_register_int (ep_config_root, "assfail_mode",          &assfail_mode,          0);
67258 +    qsnet_proc_register_int (ep_config_root, "max_switch_levels",     &MaxSwitchLevels,       1);
67259 +
67260 +    ep_procfs_rcvr_xmtr_init();
67261 +}
67262 +
67263 +void
67264 +ep_procfs_fini(void)
67265 +{
67266 +    ep_procfs_rcvr_xmtr_fini();
67267 +
67268 +    remove_proc_entry ("max_switch_levels",     ep_config_root);
67269 +    remove_proc_entry ("assfail_mode",          ep_config_root);
67270 +    remove_proc_entry ("txd_stabilise",         ep_config_root);
67271 +    remove_proc_entry ("epcomms_forward_limit", ep_config_root);
67272 +
67273 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
67274 +    remove_proc_entry ("epdebug_check_sum",     ep_config_root);
67275 +#endif
67276 +    remove_proc_entry ("epdebug_cmlevel",       ep_config_root);
67277 +    remove_proc_entry ("epdebug_console",       ep_config_root);
67278 +    remove_proc_entry ("epdebug",               ep_config_root);
67279 +
67280 +    remove_proc_entry ("version", ep_procfs_root);
67281 +    
67282 +    remove_proc_entry ("config", ep_procfs_root);
67283 +    remove_proc_entry ("ep", qsnet_procfs_root);
67284 +
67285 +    spin_lock_destroy (&ep_nodeset_lock);
67286 +}
67287 +
67288 +/*
67289 + * Local variables:
67290 + * c-file-style: "stroustrup"
67291 + * End:
67292 + */
67293 diff -urN clean/drivers/net/qsnet/ep/quadrics_version.h linux-2.6.9/drivers/net/qsnet/ep/quadrics_version.h
67294 --- clean/drivers/net/qsnet/ep/quadrics_version.h       1969-12-31 19:00:00.000000000 -0500
67295 +++ linux-2.6.9/drivers/net/qsnet/ep/quadrics_version.h 2005-09-07 10:39:49.000000000 -0400
67296 @@ -0,0 +1 @@
67297 +#define QUADRICS_VERSION "5.11.3qsnet"
67298 diff -urN clean/drivers/net/qsnet/ep/railhints.c linux-2.6.9/drivers/net/qsnet/ep/railhints.c
67299 --- clean/drivers/net/qsnet/ep/railhints.c      1969-12-31 19:00:00.000000000 -0500
67300 +++ linux-2.6.9/drivers/net/qsnet/ep/railhints.c        2004-02-06 17:37:06.000000000 -0500
67301 @@ -0,0 +1,103 @@
67302 +/*
67303 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
67304 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
67305 + *
67306 + *    For licensing information please see the supplied COPYING file
67307 + *
67308 + */
67309 +
67310 +#ident "@(#)$Id: railhints.c,v 1.5 2004/02/06 22:37:06 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
67311 +/*      $Source: /cvs/master/quadrics/epmod/railhints.c,v $*/
67312 +
67313 +#include <qsnet/kernel.h>
67314 +
67315 +#include <elan/kcomm.h>
67316 +#include <elan/epsvc.h>
67317 +#include <elan/epcomms.h>
67318 +
67319 +#include "debug.h"
67320 +
67321 +int
67322 +ep_pickRail(EP_RAILMASK railmask)
67323 +{
67324 +    static volatile int lastGlobal;
67325 +    int i, rnum, last = lastGlobal;
67326 +
67327 +    /* Pick a single rail out of the railmask */
67328 +    for (i = 0; i < EP_MAX_RAILS; i++)
67329 +       if (railmask & (1 << ((last + i) % EP_MAX_RAILS)))
67330 +           break;
67331 +
67332 +    if (i == EP_MAX_RAILS)
67333 +       return (-1);
67334 +
67335 +    rnum = (last + i) % EP_MAX_RAILS;
67336 +
67337 +    lastGlobal = (rnum + 1) % EP_MAX_RAILS;
67338 +
67339 +    ASSERT (railmask & (1 << rnum));
67340 +
67341 +    return (rnum);
67342 +}
67343 +
67344 +int
67345 +ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails)
67346 +{
67347 +    /* Retrun a single rail out of allowed mask with the best connectivity for broadcast. */
67348 +    return (ep_pickRail (allowedRails & xmtr->RailMask));
67349 +}
67350 +
67351 +int
67352 +ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId)
67353 +{
67354 +    EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId];
67355 +
67356 +    EPRINTF5 (DBG_XMTR, "ep_xmtr_prefrail: xmtr=%p allowedRails=%x nodeId=%d xmtr->RailMaks=%x Connected=%x\n", 
67357 +             xmtr, allowedRails, nodeId, xmtr->RailMask, node->ConnectedRails);
67358 +
67359 +    /* Return a single rail which is currently connected to nodeId (limited to rails
67360 +     * in allowedmask) - if more than one rail is possible, then round-robin between 
67361 +     * them */
67362 +    return (ep_pickRail (allowedRails & xmtr->RailMask & node->ConnectedRails));
67363 +}
67364 +
67365 +EP_RAILMASK
67366 +ep_xmtr_availrails (EP_XMTR *xmtr)
67367 +{
67368 +    /* Return which rails can be used to transmit one. */
67369 +
67370 +    return (xmtr->RailMask);
67371 +}
67372 +
67373 +EP_RAILMASK
67374 +ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId)
67375 +{
67376 +    EP_NODE *node = &xmtr->Subsys->Subsys.Sys->Nodes[nodeId];
67377 +
67378 +    /* Return which rails can be used to transmit to this node. */
67379 +
67380 +    return (xmtr->RailMask & node->ConnectedRails);
67381 +}
67382 +
67383 +int
67384 +ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails)
67385 +{
67386 +    /* Return the "best" rail for queueing a receive buffer out on - this will be a
67387 +     * rail with ThreadWaiting set or the rail with the least descriptors queued
67388 +     * on it. */
67389 +    
67390 +    return (ep_pickRail (allowedRails & rcvr->RailMask));
67391 +}
67392 +
67393 +EP_RAILMASK
67394 +ep_rcvr_availrails (EP_RCVR *rcvr)
67395 +{
67396 +    /* Return which rails can be used to queue receive buffers. */
67397 +    return (rcvr->RailMask);
67398 +}
67399 +
67400 +/*
67401 + * Local variables:
67402 + * c-file-style: "stroustrup"
67403 + * End:
67404 + */
67405 diff -urN clean/drivers/net/qsnet/ep/rmap.c linux-2.6.9/drivers/net/qsnet/ep/rmap.c
67406 --- clean/drivers/net/qsnet/ep/rmap.c   1969-12-31 19:00:00.000000000 -0500
67407 +++ linux-2.6.9/drivers/net/qsnet/ep/rmap.c     2004-05-19 06:24:38.000000000 -0400
67408 @@ -0,0 +1,365 @@
67409 +/*
67410 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
67411 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
67412 + *
67413 + *    For licensing information please see the supplied COPYING file
67414 + *
67415 + */
67416 +
67417 +#ident "@(#)$Id: rmap.c,v 1.15 2004/05/19 10:24:38 david Exp $"
67418 +/*      $Source: /cvs/master/quadrics/epmod/rmap.c,v $ */
67419 +
67420 +#include <qsnet/kernel.h>
67421 +#include <elan/rmap.h>
67422 +
67423 +#include "debug.h"
67424 +
67425 +void
67426 +ep_display_rmap (EP_RMAP *mp)
67427 +{
67428 +    EP_RMAP_ENTRY *bp;
67429 +    unsigned long flags;
67430 +    
67431 +    spin_lock_irqsave (&mp->m_lock, flags);
67432 +    ep_debugf (DBG_DEBUG, "map: %s size %d free %d\n", mp->m_name, mp->m_size, mp->m_free);
67433 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
67434 +       ep_debugf (DBG_DEBUG, "   [%lx - %lx]\n", bp->m_addr, bp->m_addr+bp->m_size-1);
67435 +    spin_unlock_irqrestore (&mp->m_lock, flags);
67436 +}
67437 +
67438 +void
67439 +ep_mapinit (EP_RMAP *mp, char *name, u_int mapsize)
67440 +{
67441 +    spin_lock_init (&mp->m_lock);
67442 +    kcondvar_init (&mp->m_wait);
67443 +    
67444 +    /* The final segment in the array has size 0 and acts as a delimiter
67445 +     * we insure that we never use segments past the end of the array by
67446 +     * maintaining a free segment count in m_free.  When excess segments
67447 +     * occur we discard some resources */
67448 +    
67449 +    mp->m_size = mapsize;
67450 +    mp->m_free = mapsize;
67451 +    mp->m_name = name;
67452 +    
67453 +    bzero (mp->m_map, sizeof (EP_RMAP_ENTRY) * (mapsize+1));
67454 +}
67455 +
67456 +EP_RMAP *
67457 +ep_rmallocmap (size_t mapsize, char *name, int cansleep)
67458 +{
67459 +    EP_RMAP *mp;
67460 +
67461 +    KMEM_ZALLOC (mp, EP_RMAP *, sizeof (EP_RMAP) + mapsize*sizeof (EP_RMAP_ENTRY), cansleep);
67462 +
67463 +    if (mp != NULL)
67464 +       ep_mapinit (mp, name, mapsize);
67465 +
67466 +    return (mp);
67467 +}
67468 +
67469 +void
67470 +ep_rmfreemap (EP_RMAP *mp)
67471 +{
67472 +    spin_lock_destroy (&mp->m_lock);
67473 +    kcondvar_destroy (&mp->m_wait);
67474 +    
67475 +    KMEM_FREE (mp, sizeof (EP_RMAP) + mp->m_size * sizeof (EP_RMAP_ENTRY));
67476 +}
67477 +
67478 +static u_long
67479 +ep_rmalloc_locked (EP_RMAP *mp, size_t size)
67480 +{
67481 +    EP_RMAP_ENTRY *bp;
67482 +    u_long            addr;
67483 +    
67484 +    ASSERT (size > 0);
67485 +    ASSERT (SPINLOCK_HELD (&mp->m_lock));
67486 +
67487 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
67488 +    {
67489 +       if (bp->m_size >= size)
67490 +       {
67491 +           addr = bp->m_addr;
67492 +           bp->m_addr += size;
67493 +           
67494 +           if ((bp->m_size -= size) == 0)
67495 +           {
67496 +               /* taken all of this slot - so shift the map down */
67497 +               do {
67498 +                   bp++;
67499 +                   (bp-1)->m_addr = bp->m_addr;
67500 +               } while (((bp-1)->m_size = bp->m_size) != 0);
67501 +
67502 +               mp->m_free++;
67503 +           }
67504 +           return (addr);
67505 +       }
67506 +    }
67507 +
67508 +    return (0);
67509 +}
67510 +
67511 +u_long
67512 +ep_rmalloc (EP_RMAP *mp, size_t size, int cansleep)
67513 +{
67514 +    unsigned long addr;
67515 +    unsigned long flags;
67516 +
67517 +    spin_lock_irqsave (&mp->m_lock, flags);
67518 +    while ((addr = ep_rmalloc_locked (mp, size)) == 0 && cansleep)
67519 +    {
67520 +       mp->m_want = 1;
67521 +       kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags);
67522 +    }
67523 +
67524 +    spin_unlock_irqrestore (&mp->m_lock, flags);
67525 +
67526 +    return (addr);
67527 +}
67528 +
67529 +
67530 +
67531 +u_long
67532 +ep_rmalloc_constrained (EP_RMAP *mp, size_t size, u_long alo, u_long ahi, u_long align, int cansleep)
67533 +{
67534 +    EP_RMAP_ENTRY *bp, *bp2, *lbp;
67535 +    unsigned long addr=0;
67536 +    size_t        delta;
67537 +    int           ok;
67538 +    unsigned long flags;
67539 +
67540 +    spin_lock_irqsave (&mp->m_lock, flags);
67541 + again:
67542 +    for (bp = &mp->m_map[0]; bp->m_size; bp++)
67543 +    {
67544 +       delta = 0;
67545 +       
67546 +       if (alo < bp->m_addr)
67547 +       {
67548 +           addr = bp->m_addr;
67549 +           
67550 +           if (addr & (align-1))
67551 +               addr = (addr + (align-1)) & ~(align-1);
67552 +           
67553 +           delta = addr - bp->m_addr;
67554 +           
67555 +           if (ahi >= bp->m_addr + bp->m_size)
67556 +               ok = (bp->m_size >= (size + delta));
67557 +           else
67558 +               ok = ((bp->m_addr + size + delta) <= ahi);
67559 +       }
67560 +       else
67561 +       {
67562 +           addr = alo;
67563 +           if (addr & (align-1))
67564 +               addr = (addr + (align-1)) & ~(align-1);
67565 +           delta = addr - bp->m_addr;
67566 +           
67567 +           if (ahi >= bp->m_addr + bp->m_size)
67568 +               ok = ((alo + size + delta) <= (bp->m_addr + bp->m_size));
67569 +           else
67570 +               ok = ((alo + size + delta) <= ahi);
67571 +       }
67572 +
67573 +       if (ok)
67574 +           break;
67575 +    }  
67576 +    
67577 +    if (bp->m_size == 0)
67578 +    {
67579 +       if (cansleep)
67580 +       {
67581 +           mp->m_want = 1;
67582 +           kcondvar_wait (&mp->m_wait, &mp->m_lock, &flags);
67583 +           goto again;
67584 +       }
67585 +       spin_unlock_irqrestore (&mp->m_lock, flags);
67586 +       return (0);
67587 +    }
67588 +
67589 +    /* found an approriate map entry - so take the bit out which we want */
67590 +    if (bp->m_addr == addr) 
67591 +    {
67592 +       if (bp->m_size == size) 
67593 +       {
67594 +           /* allocate entire segment and compress map */
67595 +           bp2 = bp;
67596 +           while (bp2->m_size) 
67597 +           {
67598 +               bp2++;
67599 +               (bp2-1)->m_addr = bp2->m_addr;
67600 +               (bp2-1)->m_size = bp2->m_size;
67601 +           }
67602 +           mp->m_free++;
67603 +       }
67604 +       else 
67605 +       {
67606 +           /* take from start of segment */
67607 +           bp->m_addr += size;
67608 +           bp->m_size -= size;
67609 +       }
67610 +    }
67611 +    else 
67612 +    {
67613 +       if (bp->m_addr + bp->m_size == addr + size) 
67614 +       {
67615 +           /* take from end of segment */
67616 +           bp->m_size -= size;
67617 +       }
67618 +       else 
67619 +       {
67620 +           /* split the segment loosing the last entry if there's no space */
67621 +           if (mp->m_free == 0) 
67622 +           {
67623 +               /* find last map entry */
67624 +               for (lbp = bp; lbp->m_size != 0; lbp++)
67625 +                   ;
67626 +               lbp--;
67627 +               
67628 +               if (lbp->m_size > (lbp-1)->m_size)
67629 +                   lbp--;
67630 +               
67631 +               printk ("%s: lost resource map entry [%lx, %lx]\n",
67632 +                       mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size);
67633 +               
67634 +               *lbp = *(lbp+1);
67635 +               (lbp+1)->m_size = 0;
67636 +               
67637 +               mp->m_free++;
67638 +           }
67639 +           
67640 +           for (bp2 = bp; bp2->m_size != 0; bp2++)
67641 +               continue;
67642 +           
67643 +           for (bp2--; bp2 > bp; bp2--)
67644 +           {
67645 +               (bp2+1)->m_addr = bp2->m_addr;
67646 +               (bp2+1)->m_size = bp2->m_size;
67647 +           }
67648 +
67649 +           mp->m_free--;
67650 +           
67651 +           (bp+1)->m_addr = addr + size;
67652 +           (bp+1)->m_size = bp->m_addr + bp->m_size - (addr + size);
67653 +           bp->m_size = addr - bp->m_addr;
67654 +       }
67655 +    }
67656 +
67657 +    spin_unlock_irqrestore (&mp->m_lock, flags);
67658 +    return (addr);
67659 +}
67660 +
67661 +void
67662 +ep_rmfree (EP_RMAP *mp, size_t size, u_long addr)
67663 +{
67664 +    EP_RMAP_ENTRY *bp;
67665 +    unsigned long t;
67666 +    unsigned long flags;
67667 +
67668 +    spin_lock_irqsave (&mp->m_lock, flags);
67669 +
67670 +    ASSERT (addr != 0 && size > 0);
67671 +       
67672 +again:
67673 +    /* find the piece of the map which starts after the returned space
67674 +     * or the end of the map */
67675 +    for (bp = &mp->m_map[0]; bp->m_addr <= addr && bp->m_size != 0; bp++)
67676 +       ;
67677 +
67678 +    /* bp points to the piece to the right of where we want to go */
67679 +    
67680 +    if (bp > &mp->m_map[0] && (bp-1)->m_addr + (bp-1)->m_size >= addr) 
67681 +    {
67682 +       /* merge with piece on the left */
67683 +       
67684 +       ASSERT ((bp-1)->m_addr + (bp-1)->m_size <= addr);
67685 +       
67686 +       (bp-1)->m_size += size;
67687 +       
67688 +       ASSERT (bp->m_size == 0 || addr+size <= bp->m_addr);
67689 +       
67690 +       if (bp->m_size && (addr + size) == bp->m_addr)
67691 +       {
67692 +           /* merge witht he piece on the right by 
67693 +            * growing the piece on the left and shifting
67694 +            * the map down */
67695 +           
67696 +           ASSERT ((addr + size) <= bp->m_addr);
67697 +           
67698 +           (bp-1)->m_size += bp->m_size;
67699 +           while (bp->m_size) 
67700 +           {
67701 +               bp++;
67702 +               (bp-1)->m_addr = bp->m_addr;
67703 +               (bp-1)->m_size = bp->m_size;
67704 +           }
67705 +           
67706 +           mp->m_free++;
67707 +       }
67708 +    }
67709 +    else if (addr + size >= bp->m_addr && bp->m_size)
67710 +    {
67711 +       /* merge with piece to the right */
67712 +       
67713 +       ASSERT ((addr + size) <= bp->m_addr);
67714 +       
67715 +       bp->m_addr -= size;
67716 +       bp->m_size += size;
67717 +    }
67718 +    else
67719 +    {
67720 +       /* doesn't join with left or right - check for map
67721 +          overflow and discard the smallest of the last or
67722 +          next to last entries */
67723 +
67724 +       if (mp->m_free == 0)
67725 +       {
67726 +           EP_RMAP_ENTRY *lbp;
67727 +           
67728 +           /* find last map entry */
67729 +           for (lbp = bp; lbp->m_size != 0; lbp++)
67730 +               ;
67731 +           lbp--;
67732 +           
67733 +           if (lbp->m_size > (lbp-1)->m_size)
67734 +               lbp--;
67735 +           
67736 +           printk ("%s: lost resource map entry [%lx, %lx]\n", 
67737 +                   mp->m_name, lbp->m_addr, lbp->m_addr + lbp->m_size);
67738 +           
67739 +           *lbp = *(lbp+1);
67740 +           (lbp+1)->m_size = 0;
67741 +
67742 +           mp->m_free++;
67743 +           goto again;
67744 +       }
67745 +
67746 +       /* make a new entry and push the remaining ones up */
67747 +       do {
67748 +           t = bp->m_addr;
67749 +           bp->m_addr = addr;
67750 +           addr = t;
67751 +           t = bp->m_size;
67752 +           bp->m_size = size;
67753 +           bp++;
67754 +       } while ((size = t) != 0);
67755 +
67756 +       mp->m_free--;
67757 +    }
67758 +    
67759 +    /* if anyone blocked on rmalloc failure, wake 'em up */
67760 +    if (mp->m_want)
67761 +    {
67762 +       mp->m_want = 0;
67763 +       kcondvar_wakeupall (&mp->m_wait, &mp->m_lock);
67764 +    }
67765 +
67766 +    spin_unlock_irqrestore (&mp->m_lock, flags);
67767 +}
67768 +
67769 +/*
67770 + * Local variables:
67771 + * c-file-style: "stroustrup"
67772 + * End:
67773 + */
67774 diff -urN clean/drivers/net/qsnet/ep/spinlock_elan3_thread.c linux-2.6.9/drivers/net/qsnet/ep/spinlock_elan3_thread.c
67775 --- clean/drivers/net/qsnet/ep/spinlock_elan3_thread.c  1969-12-31 19:00:00.000000000 -0500
67776 +++ linux-2.6.9/drivers/net/qsnet/ep/spinlock_elan3_thread.c    2003-10-07 09:22:38.000000000 -0400
67777 @@ -0,0 +1,44 @@
67778 +/*
67779 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
67780 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
67781 + *
67782 + *    For licensing information please see the supplied COPYING file
67783 + *
67784 + */
67785 +
67786 +#ident "@(#)$Id: spinlock_elan3_thread.c,v 1.9 2003/10/07 13:22:38 david Exp $"
67787 +/*      $Source: /cvs/master/quadrics/epmod/spinlock_elan3_thread.c,v $ */
67788 +
67789 +#include <qsnet/types.h>
67790 +
67791 +#include <elan3/e3types.h>
67792 +#include <elan3/events.h>
67793 +#include <elan3/elanregs.h>
67794 +#include <elan3/intrinsics.h>
67795 +
67796 +#include <elan/nmh.h>
67797 +#include <elan/kcomm.h>
67798 +#include <elan/epcomms.h>
67799 +
67800 +#include "kcomm_elan3.h"
67801 +#include "epcomms_elan3.h"
67802 +
67803 +void
67804 +ep3_spinblock (EP3_SPINLOCK_ELAN *sle, EP3_SPINLOCK_MAIN *sl)
67805 +{
67806 +    do {
67807 +       sl->sl_seq = sle->sl_seq;                       /* Release my lock */
67808 +       
67809 +       while (sle->sl_lock)                            /* Wait until the main */
67810 +           c_break();                                  /* releases the lock */
67811 +       
67812 +       sle->sl_seq++;                                  /* and try and relock */
67813 +    } while (sle->sl_lock);
67814 +}
67815 +
67816 +
67817 +/*
67818 + * Local variables:
67819 + * c-file-style: "stroustrup"
67820 + * End:
67821 + */
67822 diff -urN clean/drivers/net/qsnet/ep/statemap.c linux-2.6.9/drivers/net/qsnet/ep/statemap.c
67823 --- clean/drivers/net/qsnet/ep/statemap.c       1969-12-31 19:00:00.000000000 -0500
67824 +++ linux-2.6.9/drivers/net/qsnet/ep/statemap.c 2004-09-01 12:13:43.000000000 -0400
67825 @@ -0,0 +1,385 @@
67826 +/*
67827 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
67828 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
67829 + *
67830 + *    For licensing information please see the supplied COPYING file
67831 + *
67832 + */
67833 +
67834 +#ident "@(#)$Id: statemap.c,v 1.12 2004/09/01 16:13:43 david Exp $"
67835 +/*      $Source: /cvs/master/quadrics/epmod/statemap.c,v $ */
67836 +
67837 +#include <qsnet/kernel.h>
67838 +#include <elan/statemap.h>
67839 +
67840 +/******************************** global state bitmap stuff **********************************/
67841 +static int
67842 +statemap_setmapbit (bitmap_t *map, int offset, int bit)
67843 +{
67844 +   bitmap_t *e    = &map[offset >> BT_ULSHIFT];
67845 +   bitmap_t  mask = ((bitmap_t)1) << (offset & BT_ULMASK);
67846 +   int       rc = ((*e) & mask) != 0;
67847 +   
67848 +   if (bit)
67849 +   {
67850 +      *e |= mask;
67851 +      return (!rc);
67852 +   }
67853 +
67854 +   *e &= ~mask;
67855 +   return (rc);
67856 +}
67857 +
67858 +static int
67859 +statemap_firstsegbit (bitmap_t seg)
67860 +{
67861 +   int            bit = 0;
67862 +   
67863 +   if (seg == 0)
67864 +      return (-1);
67865 +
67866 +#if (BT_ULSHIFT == 6)
67867 +   if ((seg & 0xffffffffL) == 0)
67868 +   {
67869 +      seg >>= 32;
67870 +      bit += 32;
67871 +   }
67872 +#elif (BT_ULSHIFT != 5)
67873 +# error "Unexpected value of BT_ULSHIFT"
67874 +#endif
67875 +
67876 +   if ((seg & 0xffff) == 0)
67877 +   {
67878 +      seg >>= 16;
67879 +      bit += 16;
67880 +   }
67881 +      
67882 +   if ((seg & 0xff) == 0)
67883 +   {
67884 +      seg >>= 8;
67885 +      bit += 8;
67886 +   }
67887 +      
67888 +   if ((seg & 0xf) == 0)
67889 +   {
67890 +      seg >>= 4;
67891 +      bit += 4;
67892 +   }
67893 +      
67894 +   if ((seg & 0x3) == 0)
67895 +   {
67896 +      seg >>= 2;
67897 +      bit += 2;
67898 +   }
67899 +
67900 +   return (((seg & 0x1) == 0) ? bit + 1 : bit);
67901 +}
67902 +
67903 +bitmap_t
67904 +statemap_getseg (statemap_t *map, unsigned int offset)
67905 +{
67906 +   ASSERT (offset < map->size);
67907 +   ASSERT ((offset & BT_ULMASK) == 0);
67908 +
67909 +   return (map->bitmap[offset >> BT_ULSHIFT]);
67910 +}
67911 +
67912 +void
67913 +statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg)
67914 +{
67915 +   ASSERT (offset < map->size);
67916 +   ASSERT ((offset & BT_ULMASK) == 0);
67917 +
67918 +   offset >>= BT_ULSHIFT;
67919 +   if (map->bitmap[offset] == seg)
67920 +      return;
67921 +
67922 +   map->bitmap[offset] = seg;
67923 +
67924 +   if (statemap_setmapbit (map->changemap2, offset,       1) &&
67925 +       statemap_setmapbit (map->changemap1, offset >>= BT_ULSHIFT, 1))
67926 +      statemap_setmapbit (map->changemap0, offset >>= BT_ULSHIFT, 1);
67927 +}
67928 +
67929 +bitmap_t
67930 +statemap_getbits (statemap_t *map, unsigned int offset, int nbits)
67931 +{
67932 +   int      index = offset >> BT_ULSHIFT;
67933 +   bitmap_t mask  = (nbits == BT_NBIPUL) ? (bitmap_t) -1 : (((bitmap_t)1) << nbits) - 1;
67934 +   
67935 +   ASSERT (nbits <= BT_NBIPUL);
67936 +   ASSERT (offset + nbits <= map->size);
67937 +
67938 +   offset &= BT_ULMASK;
67939 +   if (offset + nbits <= BT_NBIPUL)
67940 +      return ((map->bitmap[index] >> offset) & mask);
67941 +   
67942 +   return (((map->bitmap[index] >> offset) |
67943 +           (map->bitmap[index + 1] << (BT_NBIPUL - offset))) & mask);
67944 +}
67945 +
67946 +void
67947 +statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits)
67948 +{
67949 +   int      index = offset >> BT_ULSHIFT;
67950 +   bitmap_t mask;
67951 +   bitmap_t seg;
67952 +   bitmap_t newseg;
67953 +
67954 +   ASSERT (nbits <= BT_NBIPUL);
67955 +   ASSERT (offset + nbits <= map->size);
67956 +
67957 +   offset &= BT_ULMASK;
67958 +   if (offset + nbits <= BT_NBIPUL)
67959 +   {
67960 +      mask = ((nbits == BT_NBIPUL) ? -1 : ((((bitmap_t)1) << nbits) - 1)) << offset;
67961 +      seg = map->bitmap[index];
67962 +      newseg = ((bits << offset) & mask) | (seg & ~mask);
67963 +      
67964 +      if (seg == newseg)
67965 +        return;
67966 +   
67967 +      map->bitmap[index] = newseg;
67968 +      
67969 +      if (statemap_setmapbit (map->changemap2, index,       1) &&
67970 +         statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1))
67971 +        statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1);
67972 +      return;
67973 +   }
67974 +   
67975 +   mask = ((bitmap_t)-1) << offset;
67976 +   seg = map->bitmap[index];
67977 +   newseg = ((bits << offset) & mask) | (seg & ~mask);
67978 +
67979 +   if (seg != newseg)
67980 +   {
67981 +      map->bitmap[index] = newseg;
67982 +      
67983 +      if (statemap_setmapbit (map->changemap2, index,       1) &&
67984 +         statemap_setmapbit (map->changemap1, index >> BT_ULSHIFT, 1))
67985 +        statemap_setmapbit (map->changemap0, index >> (2 * BT_ULSHIFT), 1);
67986 +   }
67987 +   
67988 +   index++;
67989 +   offset = BT_NBIPUL - offset;
67990 +   mask = (((bitmap_t)1) << (nbits - offset)) - 1;
67991 +   seg = map->bitmap[index];
67992 +   newseg = ((bits >> offset) & mask) | (seg & ~mask);
67993 +   
67994 +   if (seg == newseg)
67995 +      return;
67996 +   
67997 +   map->bitmap[index] = newseg;
67998 +   
67999 +   if (statemap_setmapbit (map->changemap2, index,       1) &&
68000 +       statemap_setmapbit (map->changemap1, index >>= BT_ULSHIFT, 1))
68001 +      statemap_setmapbit (map->changemap0, index >>= BT_ULSHIFT, 1);
68002 +}
68003 +
68004 +void
68005 +statemap_zero (statemap_t *dst)
68006 +{
68007 +   int       size       = dst->size;
68008 +   int       offset     = 0;
68009 +   bitmap_t *changemap0 = dst->changemap0;
68010 +   bitmap_t *changemap1 = dst->changemap1;
68011 +   bitmap_t *changemap2 = dst->changemap2;
68012 +   bitmap_t *dstmap     = dst->bitmap;
68013 +   bitmap_t  bit0;
68014 +   bitmap_t  bit1;
68015 +   bitmap_t  bit2;
68016 +
68017 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
68018 +   {
68019 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
68020 +      {
68021 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, offset += BT_NBIPUL)
68022 +        {
68023 +            *dstmap = 0;
68024 +            *changemap2 |= bit2;
68025 +        }
68026 +        *changemap1 |= bit1;
68027 +      }
68028 +      *changemap0 |= bit0;
68029 +   }
68030 +}
68031 +   
68032 +void
68033 +statemap_setmap (statemap_t *dst, statemap_t *src)
68034 +{
68035 +   int       size       = dst->size;
68036 +   int       offset     = 0;
68037 +   bitmap_t *changemap0 = dst->changemap0;
68038 +   bitmap_t *changemap1 = dst->changemap1;
68039 +   bitmap_t *changemap2 = dst->changemap2;
68040 +   bitmap_t *dstmap     = dst->bitmap;
68041 +   bitmap_t *srcmap     = src->bitmap;
68042 +   bitmap_t  bit0;
68043 +   bitmap_t  bit1;
68044 +   bitmap_t  bit2;
68045 +
68046 +   ASSERT (src->size == size);
68047 +   
68048 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
68049 +   {
68050 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
68051 +      {
68052 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL)
68053 +           if (*dstmap != *srcmap)
68054 +           {
68055 +              *dstmap = *srcmap;
68056 +              *changemap2 |= bit2;
68057 +           }
68058 +        if (*changemap2 != 0)
68059 +           *changemap1 |= bit1;
68060 +      }
68061 +      if (*changemap1 != 0)
68062 +        *changemap0 |= bit0;
68063 +   }
68064 +}
68065 +
68066 +void
68067 +statemap_ormap (statemap_t *dst, statemap_t *src)
68068 +{
68069 +   int       size       = dst->size;
68070 +   int       offset     = 0;
68071 +   bitmap_t *changemap0 = dst->changemap0;
68072 +   bitmap_t *changemap1 = dst->changemap1;
68073 +   bitmap_t *changemap2 = dst->changemap2;
68074 +   bitmap_t *dstmap     = dst->bitmap;
68075 +   bitmap_t *srcmap     = src->bitmap;
68076 +   bitmap_t  bit0;
68077 +   bitmap_t  bit1;
68078 +   bitmap_t  bit2;
68079 +   bitmap_t  seg;
68080 +
68081 +   ASSERT (src->size == size);
68082 +   
68083 +   for (bit0 = 1; offset < size; bit0 <<= 1, changemap1++)
68084 +   {
68085 +      for (bit1 = 1; bit1 != 0 && offset < size; bit1 <<= 1, changemap2++)
68086 +      {
68087 +        for (bit2 = 1; bit2 != 0 && offset < size; bit2 <<= 1, dstmap++, srcmap++, offset += BT_NBIPUL)
68088 +        {
68089 +           seg = *dstmap | *srcmap;
68090 +           if (*dstmap != seg)
68091 +           {
68092 +              *dstmap = seg;
68093 +              *changemap2 |= bit2;
68094 +           }
68095 +        }
68096 +        if (*changemap2 != 0)
68097 +           *changemap1 |= bit1;
68098 +      }
68099 +      if (*changemap1 != 0)
68100 +        *changemap0 |= bit0;
68101 +   }
68102 +}
68103 +
68104 +int
68105 +statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange)
68106 +{
68107 +   int          bit0;
68108 +   bitmap_t    *cm1;
68109 +   int          bit1;
68110 +   bitmap_t    *cm2;
68111 +   int          bit2;
68112 +   unsigned int offset;
68113 +
68114 +   bit0 = statemap_firstsegbit (*(map->changemap0));
68115 +   if (bit0 < 0)
68116 +      return (-1);
68117 +
68118 +   offset = bit0;
68119 +   cm1 = map->changemap1 + offset;
68120 +   bit1 = statemap_firstsegbit (*cm1);
68121 +   ASSERT (bit1 >= 0);
68122 +
68123 +   offset = (offset << BT_ULSHIFT) + bit1;
68124 +   cm2 = map->changemap2 + offset;
68125 +   bit2 = statemap_firstsegbit (*cm2);
68126 +   ASSERT (bit2 >= 0);
68127 +   
68128 +   offset = (offset << BT_ULSHIFT) + bit2;
68129 +   *newseg = map->bitmap[offset];
68130 +
68131 +   if (clearchange &&
68132 +       (*cm2 &= ~(((bitmap_t)1) << bit2)) == 0 &&
68133 +       (*cm1 &= ~(((bitmap_t)1) << bit1)) == 0)
68134 +      map->changemap0[0] &= ~(((bitmap_t)1) << bit0);
68135 +
68136 +   return (offset << BT_ULSHIFT);
68137 +}
68138 +
68139 +int
68140 +statemap_changed (statemap_t *map)
68141 +{
68142 +   return ((*(map->changemap0) != 0));
68143 +}
68144 +
68145 +void
68146 +statemap_reset (statemap_t *map)
68147 +{
68148 +   bzero (map->changemap0, map->changemap_nob + map->bitmap_nob);
68149 +}
68150 +
68151 +void
68152 +statemap_copy (statemap_t *dst, statemap_t *src)
68153 +{
68154 +   ASSERT (dst->size == src->size);
68155 +   bcopy (src->changemap0, dst->changemap0, src->changemap_nob + src->bitmap_nob);
68156 +}
68157 +
68158 +void
68159 +statemap_clearchanges (statemap_t *map)
68160 +{
68161 +   if (statemap_changed (map))
68162 +      bzero (map->changemap0, map->changemap_nob);
68163 +}
68164 +
68165 +bitmap_t *
68166 +statemap_tobitmap (statemap_t *map)
68167 +{
68168 +    return (map->bitmap);
68169 +}
68170 +
68171 +statemap_t *
68172 +statemap_create (int size)
68173 +{
68174 +   int   struct_entries     = (sizeof (statemap_t) * 8 + (BT_NBIPUL-1)) >> BT_ULSHIFT;
68175 +   int   bitmap_entries     = (size + (BT_NBIPUL-1)) >> BT_ULSHIFT;
68176 +   int   changemap2_entries = (bitmap_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
68177 +   int   changemap1_entries = (changemap2_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
68178 +   int   changemap0_entries = (changemap1_entries + (BT_NBIPUL-1)) >> BT_ULSHIFT;
68179 +   int   changemap_entries  = changemap0_entries + changemap1_entries + changemap2_entries;
68180 +   int   nob                = (struct_entries + bitmap_entries + changemap_entries) * sizeof (bitmap_t);
68181 +   statemap_t *map;
68182 +
68183 +   ASSERT ((1 << BT_ULSHIFT) == BT_NBIPUL);
68184 +   ASSERT (changemap0_entries == 1);
68185 +
68186 +   KMEM_ZALLOC (map, statemap_t *, nob, 1);
68187 +
68188 +   map->size = size;
68189 +   map->nob  = nob;
68190 +   map->changemap_nob = changemap_entries * sizeof (bitmap_t);
68191 +   map->bitmap_nob = bitmap_entries * sizeof (bitmap_t);
68192 +   map->changemap0 = ((bitmap_t *)map) + struct_entries;
68193 +   map->changemap1 = map->changemap0 + changemap0_entries;
68194 +   map->changemap2 = map->changemap1 + changemap1_entries;
68195 +   map->bitmap     = map->changemap2 + changemap2_entries;
68196 +
68197 +   return (map);
68198 +}
68199 +
68200 +void
68201 +statemap_destroy (statemap_t *map)
68202 +{
68203 +   KMEM_FREE (map, map->nob);
68204 +}
68205 +
68206 +/*
68207 + * Local variables:
68208 + * c-file-style: "stroustrup"
68209 + * End:
68210 + */
68211 diff -urN clean/drivers/net/qsnet/ep/statusmon.h linux-2.6.9/drivers/net/qsnet/ep/statusmon.h
68212 --- clean/drivers/net/qsnet/ep/statusmon.h      1969-12-31 19:00:00.000000000 -0500
68213 +++ linux-2.6.9/drivers/net/qsnet/ep/statusmon.h        2003-10-07 09:22:38.000000000 -0400
68214 @@ -0,0 +1,44 @@
68215 +/*
68216 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
68217 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
68218 + *
68219 + *    For licensing information please see the supplied COPYING file
68220 + *
68221 + */
68222 +
68223 +#ident "@(#)$Id: statusmon.h,v 1.6 2003/10/07 13:22:38 david Exp $"
68224 +/*      $Source: /cvs/master/quadrics/epmod/statusmon.h,v $*/
68225 +
68226 +#ifndef __ELAN3_STATUSMON_H
68227 +#define __ELAN3_STATUSMON_H
68228 +
68229 +typedef struct statusmon_node
68230 +{
68231 +    u_int      NodeId;
68232 +    u_int      State;
68233 +} STATUSMON_SGMT;
68234 +
68235 +typedef struct statusmon_level
68236 +{
68237 +    unsigned      Width;
68238 +    STATUSMON_SGMT Nodes[CM_SGMTS_PER_LEVEL];
68239 +} STATUSMON_LEVEL;
68240 +
68241 +typedef struct statusmon_msg
68242 +{
68243 +    unsigned       Type;
68244 +    unsigned       NodeId;
68245 +    unsigned       NumLevels;
68246 +    unsigned       TopLevel;
68247 +    unsigned        Role;
68248 +    STATUSMON_LEVEL Levels[CM_MAX_LEVELS];
68249 +} STATUSMON_MSG;
68250 +
68251 +
68252 +#endif /* __ELAN3_STATUSMON_H */
68253 +
68254 +/*
68255 + * Local variables:
68256 + * c-file-style: "stroustrup"
68257 + * End:
68258 + */
68259 diff -urN clean/drivers/net/qsnet/ep/support.c linux-2.6.9/drivers/net/qsnet/ep/support.c
68260 --- clean/drivers/net/qsnet/ep/support.c        1969-12-31 19:00:00.000000000 -0500
68261 +++ linux-2.6.9/drivers/net/qsnet/ep/support.c  2004-09-30 10:59:15.000000000 -0400
68262 @@ -0,0 +1,109 @@
68263 +/*
68264 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
68265 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
68266 + *
68267 + *    For licensing information please see the supplied COPYING file
68268 + *
68269 + */
68270 +
68271 +#ident "@(#)$Id: support.c,v 1.39 2004/09/30 14:59:15 david Exp $"
68272 +/*      $Source: /cvs/master/quadrics/epmod/support.c,v $ */
68273 +
68274 +#include <qsnet/kernel.h>
68275 +#include <elan/kcomm.h>
68276 +
68277 +/****************************************************************************************/
68278 +/*
68279 + * Nodeset/flush callbacks.
68280 + */
68281 +int
68282 +ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg)
68283 +{
68284 +    EP_CALLBACK *cb;
68285 +    
68286 +    KMEM_ALLOC (cb, EP_CALLBACK *, sizeof (EP_CALLBACK), 1);
68287 +    
68288 +    cb->Routine = routine;
68289 +    cb->Arg     = arg;
68290 +
68291 +    kmutex_lock (&rail->CallbackLock);
68292 +    cb->Next = rail->CallbackList[idx];
68293 +    rail->CallbackList[idx] = cb;
68294 +    kmutex_unlock (&rail->CallbackLock);
68295 +    
68296 +    return (ESUCCESS);
68297 +}
68298 +
68299 +void
68300 +ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg)
68301 +{
68302 +    EP_CALLBACK  *cb;
68303 +    EP_CALLBACK **predp;
68304 +
68305 +    kmutex_lock (&rail->CallbackLock);
68306 +    for (predp = &rail->CallbackList[idx]; (cb = *predp); predp = &cb->Next)
68307 +       if (cb->Routine == routine && cb->Arg == arg)
68308 +           break;
68309 +
68310 +    if (cb == NULL)
68311 +       panic ("ep_remove_member_callback");
68312 +    
68313 +    *predp = cb->Next;
68314 +    kmutex_unlock (&rail->CallbackLock);
68315 +    
68316 +    KMEM_FREE (cb, sizeof (EP_CALLBACK));
68317 +}
68318 +
68319 +void
68320 +ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *map)
68321 +{
68322 +    EP_CALLBACK *cb;
68323 +
68324 +    kmutex_lock (&rail->CallbackLock);
68325 +
68326 +    rail->CallbackStep = idx;
68327 +
68328 +    for (cb = rail->CallbackList[idx]; cb; cb = cb->Next) {
68329 +       (cb->Routine) (cb->Arg, map);
68330 +    }
68331 +    kmutex_unlock (&rail->CallbackLock);
68332 +}
68333 +
68334 +unsigned int
68335 +ep_backoff (EP_BACKOFF *backoff, int type)
68336 +{
68337 +    static int bcount[EP_NUM_BACKOFF] = {1, 16, 32, 64, 128, 256, 512, 1024};
68338 +    
68339 +    if (backoff->type != type)
68340 +    {
68341 +       backoff->type  = type;
68342 +       backoff->indx  = 0;
68343 +       backoff->count = 0;
68344 +    }
68345 +
68346 +    if (++backoff->count > bcount[backoff->indx] && backoff->indx < (EP_NUM_BACKOFF-1))
68347 +    {
68348 +       backoff->indx++;
68349 +       backoff->count = 0;
68350 +    }
68351 +
68352 +    return (backoff->indx);
68353 +}
68354 +
68355 +/* Generic checksum algorithm */
68356 +uint16_t
68357 +CheckSum (char *msg, int nob)
68358 +{
68359 +    uint16_t sum = 0;
68360 +   
68361 +    while (nob-- > 0)
68362 +       sum = sum * 13 + *msg++;
68363 +
68364 +    return (sum);
68365 +}
68366 +
68367 +/*
68368 + * Local variables:
68369 + * c-file-style: "stroustrup"
68370 + * End:
68371 + */
68372 diff -urN clean/drivers/net/qsnet/ep/support_elan3.c linux-2.6.9/drivers/net/qsnet/ep/support_elan3.c
68373 --- clean/drivers/net/qsnet/ep/support_elan3.c  1969-12-31 19:00:00.000000000 -0500
68374 +++ linux-2.6.9/drivers/net/qsnet/ep/support_elan3.c    2005-07-20 07:35:37.000000000 -0400
68375 @@ -0,0 +1,2123 @@
68376 +/*
68377 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
68378 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
68379 + *
68380 + *    For licensing information please see the supplied COPYING file
68381 + *
68382 + */
68383 +
68384 +#ident "@(#)$Id: support_elan3.c,v 1.47.2.1 2005/07/20 11:35:37 mike Exp $"
68385 +/*      $Source: /cvs/master/quadrics/epmod/support_elan3.c,v $ */
68386 +
68387 +#include <qsnet/kernel.h>
68388 +#include <qsnet/kthread.h>
68389 +
68390 +#include <elan/kcomm.h>
68391 +#include <elan/epsvc.h>
68392 +#include <elan/epcomms.h>
68393 +
68394 +#include "kcomm_vp.h"
68395 +#include "kcomm_elan3.h"
68396 +#include "epcomms_elan3.h"
68397 +#include "debug.h"
68398 +
68399 +#include <elan3/thread.h>
68400 +#include <elan3/urom_addrs.h>
68401 +
68402 +/****************************************************************************************/
68403 +#define DMA_RING_NEXT_POS(ring)      ((ring)->Position+1 == ring->Entries ? 0 : ((ring)->Position+1))
68404 +#define DMA_RING_PREV_POS(ring,pos)  ((pos) == 0 ? (ring)->Entries-1 : (pos) - 1)
68405 +
68406 +static int 
68407 +DmaRingCreate (EP3_RAIL *rail, EP3_DMA_RING *ring, int ctxnum, int entries)
68408 +{
68409 +    unsigned long pgnum = (ctxnum * sizeof (E3_CommandPort)) / PAGE_SIZE;
68410 +    unsigned long pgoff = (ctxnum * sizeof (E3_CommandPort)) & (PAGE_SIZE-1);
68411 +    int           s;    
68412 +        
68413 +    /* set up the initial position */
68414 +    ring->Entries  = entries;
68415 +    ring->Position = 0;
68416 +    
68417 +    if (! (ring->pEvent = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_BlockCopyEvent), 0, &ring->epEvent)))
68418 +    {
68419 +       ring->CommandPort = (ioaddr_t) NULL;
68420 +       return (ENOMEM);
68421 +    }
68422 +    
68423 +    if (! (ring->pDma = ep_alloc_elan (&rail->Generic, entries * sizeof (E3_DMA), 0, &ring->epDma)))
68424 +    {
68425 +       ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent));
68426 +
68427 +       ring->CommandPort = (ioaddr_t) NULL;
68428 +       return (ENOMEM);
68429 +    }
68430 +    
68431 +    if (! (ring->pDoneBlk = ep_alloc_main (&rail->Generic, entries * sizeof (E3_uint32), 0, &ring->epDoneBlk)))
68432 +    {
68433 +       ep_free_elan (&rail->Generic, ring->epEvent, entries * sizeof (E3_BlockCopyEvent));
68434 +       ep_free_elan (&rail->Generic, ring->epDma,   entries * sizeof (E3_DMA));
68435 +
68436 +       ring->CommandPort = (ioaddr_t) NULL;
68437 +       return (ENOMEM);
68438 +    }
68439 +    
68440 +    if (MapDeviceRegister (rail->Device, ELAN3_BAR_COMMAND_PORT, &ring->CommandPage, pgnum * PAGE_SIZE, PAGE_SIZE, &ring->CommandPageHandle) != ESUCCESS)
68441 +    {
68442 +       ep_free_elan (&rail->Generic, ring->epEvent,   entries * sizeof (E3_BlockCopyEvent));
68443 +       ep_free_elan (&rail->Generic, ring->epDma,     entries * sizeof (E3_DMA));
68444 +       ep_free_main (&rail->Generic, ring->epDoneBlk, entries * sizeof (E3_uint32));
68445 +
68446 +       ring->CommandPort = (ioaddr_t) NULL;
68447 +       return (ENOMEM);
68448 +    }
68449 +    ring->CommandPort = ring->CommandPage + pgoff;
68450 +       
68451 +    for (s = 0; s < entries; s++)
68452 +    {
68453 +       /* setup the event */
68454 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Type),   
68455 +                          EV_TYPE_BCOPY | EV_TYPE_DMA | DMA_RING_DMA_ELAN(ring, s));
68456 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Source), DMA_RING_DMA_ELAN(ring,s)  | EV_WCOPY);
68457 +       elan3_sdram_writel(rail->Device, DMA_RING_EVENT(ring,s) + offsetof(E3_BlockCopyEvent,ev_Dest),   DMA_RING_DONE_ELAN(ring,s) | EV_TYPE_BCOPY_WORD );         
68458 +
68459 +       /* need to set all the doneBlks to appear that they have completed */
68460 +       ring->pDoneBlk[s] = DMA_RING_DMA_ELAN(ring,s)  | EV_WCOPY;
68461 +    }
68462 +
68463 +    return 0; /* success */
68464 +}
68465 +
68466 +static void
68467 +DmaRingRelease(EP3_RAIL *rail, EP3_DMA_RING *ring)
68468 +{
68469 +    if (ring->CommandPage != (ioaddr_t) 0)
68470 +    {
68471 +       UnmapDeviceRegister(rail->Device, &ring->CommandPageHandle);
68472 +
68473 +       ep_free_elan (&rail->Generic, ring->epEvent,   ring->Entries * sizeof (E3_BlockCopyEvent));
68474 +       ep_free_elan (&rail->Generic, ring->epDma,     ring->Entries * sizeof (E3_DMA));
68475 +       ep_free_main (&rail->Generic, ring->epDoneBlk, ring->Entries * sizeof (E3_uint32));
68476 +    }
68477 +    ring->CommandPage = (ioaddr_t) 0;
68478 +}
68479 +
68480 +void 
68481 +DmaRingsRelease (EP3_RAIL *rail)
68482 +{
68483 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_CRITICAL]);
68484 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_HIGH_PRI]);
68485 +    DmaRingRelease (rail, &rail->DmaRings[EP3_RING_LOW_PRI]);
68486 +}
68487 +
68488 +int 
68489 +DmaRingsCreate (EP3_RAIL *rail)
68490 +{
68491 +    if (DmaRingCreate (rail, &rail->DmaRings[EP3_RING_CRITICAL], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_CRITICAL, EP3_RING_CRITICAL_LEN) ||
68492 +       DmaRingCreate (rail, &rail->DmaRings[EP3_RING_HIGH_PRI], ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_HIGH_PRI, EP3_RING_HIGH_PRI_LEN) ||
68493 +       DmaRingCreate (rail, &rail->DmaRings[EP3_RING_LOW_PRI],  ELAN3_DMARING_BASE_CONTEXT_NUM + EP3_RING_LOW_PRI,  EP3_RING_LOW_PRI_LEN))
68494 +    {
68495 +       DmaRingsRelease (rail);
68496 +       return (ENOMEM);
68497 +    }
68498 +  
68499 +    return 0;
68500 +}
68501 +
68502 +static int 
68503 +DmaRingNextSlot (EP3_DMA_RING *ring)
68504 +{
68505 +    int pos  = ring->Position;
68506 +    int npos = DMA_RING_NEXT_POS(ring);
68507 +
68508 +    if (ring->pDoneBlk[npos] == EP3_EVENT_ACTIVE)
68509 +       return (-1);
68510 +    
68511 +    ring->pDoneBlk[pos] = EP3_EVENT_ACTIVE;
68512 +
68513 +    ring->Position = npos; /* move on one */
68514 +
68515 +    return (pos);
68516 +}
68517 +
68518 +
68519 +/****************************************************************************************/
68520 +/*
68521 + * Dma/event command issueing - these handle cproc queue overflow traps.
68522 + */
68523 +static int
68524 +DmaRunQueueSizeCheck (EP3_RAIL *rail, E3_uint32 len)
68525 +{
68526 +    E3_uint64  FandBPtr = read_reg64 (rail->Device, DProc_SysCntx_FPtr);
68527 +    E3_uint32  FPtr, BPtr;
68528 +    E3_uint32  qlen;
68529 +
68530 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
68531 +    FPtr = (FandBPtr & 0xFFFFFFFFull);
68532 +    BPtr = (FandBPtr >> 32);
68533 +#else
68534 +    FPtr = (FandBPtr >> 32);
68535 +    BPtr = (FandBPtr & 0xFFFFFFFFull);
68536 +#endif
68537 +    
68538 +    qlen = (((BPtr - FPtr)/sizeof (E3_DMA)) & (E3_SysCntxQueueSize-1));
68539 +    
68540 +    if      (qlen < 4)   IncrStat (rail, DmaQueueLength[0]);
68541 +    else if (qlen < 8)   IncrStat (rail, DmaQueueLength[1]);
68542 +    else if (qlen < 16)  IncrStat (rail, DmaQueueLength[2]);
68543 +    else if (qlen < 32)  IncrStat (rail, DmaQueueLength[3]);
68544 +    else if (qlen < 64)  IncrStat (rail, DmaQueueLength[4]);
68545 +    else if (qlen < 128) IncrStat (rail, DmaQueueLength[5]);
68546 +    else if (qlen < 240) IncrStat (rail, DmaQueueLength[6]);
68547 +    else                 IncrStat (rail, DmaQueueLength[7]);
68548 +       
68549 +    return (qlen < len);
68550 +}
68551 +
68552 +int
68553 +IssueDma (EP3_RAIL *rail, E3_DMA_BE * dmabe, int type, int retryThread)
68554 +{
68555 +    ELAN3_DEV     *dev = rail->Device;
68556 +    EP3_RETRY_DMA *retry;
68557 +    EP3_DMA_RING  *ring;
68558 +    int           slot;
68559 +    int           i, res;
68560 +    unsigned long flags;
68561 +
68562 +    ASSERT (dmabe->s.dma_direction == DMA_WRITE || dmabe->s.dma_direction == DMA_READ_REQUEUE);
68563 +
68564 +    ASSERT (! EP_VP_ISDATA(dmabe->s.dma_destVProc) ||
68565 +           (dmabe->s.dma_direction == DMA_WRITE ? 
68566 +            EP_VP_TO_NODE(dmabe->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
68567 +            EP_VP_TO_NODE(dmabe->s.dma_destVProc) == rail->Generic.Position.pos_nodeid));
68568 +    
68569 +    /*
68570 +     * If we're not the retry thread - then don't issue this DMA
68571 +     * if there are any already queued on the retry lists with
68572 +     * higher or equal priority than this one that are ready to
68573 +     * retry.
68574 +     */
68575 +    if (! retryThread)
68576 +    {
68577 +       for (i = EP_RETRY_BASE; i < type; i++)
68578 +       {
68579 +           if (list_empty (&rail->DmaRetries[i]))
68580 +               continue;
68581 +
68582 +           retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
68583 +               
68584 +           if (AFTER (lbolt, retry->RetryTime))
68585 +           {
68586 +               IncrStat (rail, IssueDmaFail[type]);
68587 +               return (ISSUE_COMMAND_RETRY);
68588 +           }
68589 +       }
68590 +    }
68591 +
68592 +    /*
68593 +     * Depending on the type of DMA we're issuing - throttle back
68594 +     * issueing of it if the DMA run queue is too full.  This then
68595 +     * prioritises the "special" messages and completing data 
68596 +     * transfers which have matched a receive buffer.
68597 +     */
68598 +
68599 +    if (type >= EP_RETRY_LOW_PRI_RETRY)
68600 +    {
68601 +       if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 2))
68602 +       {
68603 +           IncrStat (rail, IssueDmaFail[type]);
68604 +           return (ISSUE_COMMAND_RETRY);
68605 +       }
68606 +       ring = &rail->DmaRings[EP3_RING_LOW_PRI];
68607 +    } 
68608 +    else if (type == EP_RETRY_LOW_PRI)
68609 +    {
68610 +       if (! DmaRunQueueSizeCheck (rail, E3_SysCntxQueueSize / 3))
68611 +       {
68612 +           IncrStat (rail, IssueDmaFail[type]);
68613 +           return (ISSUE_COMMAND_RETRY);
68614 +       }
68615 +       ring = &rail->DmaRings[EP3_RING_LOW_PRI];
68616 +    }
68617 +    else if (type >= EP_RETRY_HIGH_PRI)
68618 +       ring = &rail->DmaRings[EP3_RING_HIGH_PRI];
68619 +    else
68620 +       ring = &rail->DmaRings[EP3_RING_CRITICAL];
68621 +
68622 +    local_irq_save (flags);
68623 +    if (! spin_trylock (&dev->CProcLock))
68624 +    {
68625 +       IncrStat (rail, IssueDmaFail[type]);
68626 +
68627 +       res = ISSUE_COMMAND_RETRY;
68628 +    }
68629 +    else
68630 +    {
68631 +       if ((slot = DmaRingNextSlot (ring)) == -1)
68632 +       {
68633 +           IncrStat (rail, IssueDmaFail[type]);
68634 +           
68635 +           res = ISSUE_COMMAND_RETRY;
68636 +       }
68637 +       else
68638 +       {
68639 +           EPRINTF4 (DBG_COMMAND, "IssueDma: type %08x size %08x Elan source %08x Elan dest %08x\n",
68640 +                     dmabe->s.dma_type, dmabe->s.dma_size, dmabe->s.dma_source, dmabe->s.dma_dest);
68641 +           EPRINTF2 (DBG_COMMAND, "          dst event %08x cookie/proc %08x\n",
68642 +                     dmabe->s.dma_destEvent, dmabe->s.dma_destCookieVProc);
68643 +           EPRINTF2 (DBG_COMMAND, "          src event %08x cookie/proc %08x\n",
68644 +                     dmabe->s.dma_srcEvent, dmabe->s.dma_srcCookieVProc);
68645 +
68646 +           elan3_sdram_copyq_to_sdram (dev,  dmabe,  DMA_RING_DMA(ring, slot), sizeof (E3_DMA));                       /* PCI write block */
68647 +           elan3_sdram_writel (dev, DMA_RING_EVENT(ring, slot) + offsetof (E3_BlockCopyEvent, ev_Count), 1);   /* PCI write */
68648 +           
68649 +           mb();                                                               /* ensure writes to main memory completed */
68650 +           writel (DMA_RING_EVENT_ELAN(ring,slot), (void *)(ring->CommandPort + offsetof (E3_CommandPort, SetEvent)));
68651 +           mmiob();                                                            /* and flush through IO writes */
68652 +           
68653 +           res = ISSUE_COMMAND_OK;
68654 +       }
68655 +       spin_unlock (&dev->CProcLock);
68656 +    }
68657 +    local_irq_restore (flags);
68658 +
68659 +    return (res);
68660 +}
68661 +
68662 +int
68663 +IssueWaitevent (EP3_RAIL *rail, E3_Addr value)
68664 +{
68665 +    ELAN3_DEV     *dev   = rail->Device;
68666 +    int           res;
68667 +    unsigned long flags;
68668 +    
68669 +    spin_lock_irqsave (&dev->IntrLock, flags);
68670 +
68671 +    ASSERT (rail->CommandPortEventTrap == FALSE);
68672 +
68673 +    /*
68674 +     * Disable the command processor interrupts, so that we don't see
68675 +     * spurious interrupts appearing.
68676 +     */
68677 +    DISABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
68678 +
68679 +    EPRINTF1 (DBG_COMMAND, "IssueWaitevent: %08x\n", value);
68680 +
68681 +    mb();                                                              /* ensure writes to main memory completed */
68682 +    writel (value, (void *)(rail->CommandPort + offsetof (E3_CommandPort, WaitEvent0)));
68683 +    mmiob();                                                           /* and flush through IO writes */
68684 +    
68685 +    do {
68686 +       res = CheckCommandQueueFlushed (rail->Ctxt, EventComQueueNotEmpty, ISSUE_COMMAND_CANT_WAIT, &flags);
68687 +
68688 +       EPRINTF1 (DBG_COMMAND, "IssueWaitevent: CheckCommandQueueFlushed -> %d\n", res);
68689 +
68690 +       if (res == ISSUE_COMMAND_WAIT)
68691 +           HandleCProcTrap (dev, 0, NULL);
68692 +    } while (res != ISSUE_COMMAND_OK);
68693 +
68694 +    if (! rail->CommandPortEventTrap)
68695 +       res = ISSUE_COMMAND_OK;
68696 +    else
68697 +    {
68698 +       rail->CommandPortEventTrap = FALSE;
68699 +       res = ISSUE_COMMAND_TRAPPED;
68700 +    }
68701 +
68702 +    EPRINTF1 (DBG_COMMAND, "IssueWaitevent: -> %d\n", res);
68703 +
68704 +    /*
68705 +     * Re-enable the command processor interrupt as we've finished 
68706 +     * polling it.
68707 +     */
68708 +    ENABLE_INT_MASK (dev, INT_CProc | INT_ComQueue);
68709 +
68710 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
68711 +
68712 +    return (res);
68713 +}
68714 +
68715 +void
68716 +IssueSetevent (EP3_RAIL *rail, E3_Addr value)
68717 +{
68718 +    EPRINTF1 (DBG_COMMAND, "IssueSetevent: %08x\n", value);
68719 +
68720 +    mb();                                                              /* ensure writes to main memory completed */
68721 +    writel (value, (void *)(rail->CommandPort + offsetof (E3_CommandPort, SetEvent)));
68722 +    mmiob();                                                           /* and flush through IO writes */
68723 +}
68724 +
68725 +void
68726 +IssueRunThread (EP3_RAIL *rail, E3_Addr value)
68727 +{
68728 +    EPRINTF1 (DBG_COMMAND, "IssueRunThread: %08x\n", value);
68729 +
68730 +    mb();                                                              /* ensure writes to main memory completed */
68731 +    writel (value, (void *)(rail->CommandPort + offsetof (E3_CommandPort, RunThread)));
68732 +    mmiob();                                                           /* and flush through IO writes */
68733 +}
68734 +
68735 +/****************************************************************************************/
68736 +/*
68737 + * DMA retry list management
68738 + */
68739 +static unsigned DmaRetryTimes[EP_NUM_RETRIES]; 
68740 +
68741 +static void
68742 +ep3_dma_retry (EP3_RAIL *rail)
68743 +{
68744 +    EP3_COOKIE    *cp;
68745 +    int            res;
68746 +    int                   vp;
68747 +    unsigned long  flags;
68748 +    int            i;
68749 +
68750 +    kernel_thread_init("ep3_dma_retry");
68751 +
68752 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
68753 +
68754 +    for (;;)
68755 +    {
68756 +       long yieldAt   = lbolt + (hz/10);
68757 +       long retryTime = 0;
68758 +
68759 +       if (rail->DmaRetryThreadShouldStop)
68760 +           break;
68761 +       
68762 +       for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
68763 +       {
68764 +           while (! list_empty (&rail->DmaRetries[i]))
68765 +           {
68766 +               EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
68767 +
68768 +               if (! AFTER (lbolt, retry->RetryTime))
68769 +                   break;
68770 +               
68771 +               if (rail->DmaRetryThreadShouldStall || AFTER (lbolt, yieldAt))
68772 +                   goto cant_do_more;
68773 +
68774 +               EPRINTF2 (DBG_RETRY, "%s: DmaRetryThread: retry %p\n", rail->Generic.Name, retry);
68775 +               EPRINTF5 (DBG_RETRY, "%s:                 %08x %08x %08x %08x\n",
68776 +                         rail->Generic.Name, retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest);
68777 +               EPRINTF5 (DBG_RETRY, "%s:                 %08x %08x %08x %08x\n",
68778 +                         rail->Generic.Name, retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc,
68779 +                         retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc);
68780 +#if defined(DEBUG)
68781 +               if (retry->Dma.s.dma_direction == DMA_WRITE)
68782 +                   cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_srcEvent);
68783 +               else
68784 +                   cp = LookupEventCookie (rail, &rail->CookieTable, retry->Dma.s.dma_destEvent);
68785 +
68786 +               ASSERT (cp != NULL || (retry->Dma.s.dma_srcEvent == 0 && retry->Dma.s.dma_direction == DMA_WRITE && retry->Dma.s.dma_isRemote));
68787 +               
68788 +               if (cp && cp->Operations->DmaVerify)
68789 +                   cp->Operations->DmaVerify (rail, cp->Arg, &retry->Dma);
68790 +#endif
68791 +
68792 +#if defined(DEBUG_ASSERT)
68793 +               if (retry->Dma.s.dma_direction == DMA_WRITE)
68794 +                   vp = retry->Dma.s.dma_destVProc;
68795 +               else
68796 +                   vp = retry->Dma.s.dma_srcVProc;
68797 +
68798 +               ASSERT (!EP_VP_ISDATA(vp) || 
68799 +                       (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED &&
68800 +                        rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE));
68801 +#endif
68802 +               spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68803 +               res = IssueDma (rail, &(retry->Dma), i, TRUE);
68804 +               spin_lock_irqsave (&rail->DmaRetryLock, flags);
68805 +               
68806 +               if (res != ISSUE_COMMAND_OK)
68807 +                   goto cant_do_more;
68808 +               
68809 +               /* Command issued, so remove from list, and add to free list */
68810 +               list_del (&retry->Link);
68811 +               list_add (&retry->Link, &rail->DmaRetryFreeList);
68812 +           }
68813 +       }
68814 +    cant_do_more:
68815 +       
68816 +       for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
68817 +       {
68818 +           if (!list_empty (&rail->DmaRetries[i]))
68819 +           {
68820 +               EP3_RETRY_DMA *retry = list_entry (rail->DmaRetries[i].next, EP3_RETRY_DMA, Link);
68821 +
68822 +               retryTime = retryTime ? MIN(retryTime, retry->RetryTime) : retry->RetryTime;
68823 +           }
68824 +       }
68825 +
68826 +       if (retryTime && !AFTER (retryTime, lbolt))
68827 +           retryTime = lbolt + 1;
68828 +
68829 +       do {
68830 +           EPRINTF3 (DBG_RETRY, "%s: ep_cm_retry: %s %lx\n", rail->Generic.Name, rail->DmaRetryThreadShouldStall ? "stalled" : "sleeping", retryTime);
68831 +           
68832 +           if (rail->DmaRetryTime == 0 || (retryTime != 0 && retryTime < rail->DmaRetryTime))
68833 +               rail->DmaRetryTime = retryTime;
68834 +           
68835 +           rail->DmaRetrySleeping = TRUE;
68836 +           
68837 +           if (rail->DmaRetryThreadShouldStall)                                        /* wakeup threads waiting in StallDmaRetryThread */
68838 +               kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);  /* for us to really go to sleep for good. */
68839 +
68840 +           if (rail->DmaRetryTime == 0 || rail->DmaRetryThreadShouldStall)
68841 +               kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
68842 +           else
68843 +               kcondvar_timedwait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags, rail->DmaRetryTime);
68844 +
68845 +           rail->DmaRetrySleeping = FALSE;
68846 +
68847 +       } while (rail->DmaRetryThreadShouldStall);
68848 +
68849 +       rail->DmaRetryTime = 0;
68850 +    }
68851 +
68852 +    rail->DmaRetryThreadStopped = 1;
68853 +    kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);
68854 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68855 +
68856 +    kernel_thread_exit();
68857 +}
68858 +
68859 +void
68860 +StallDmaRetryThread (EP3_RAIL *rail)
68861 +{
68862 +    unsigned long flags;
68863 +
68864 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
68865 +    rail->DmaRetryThreadShouldStall++;
68866 +
68867 +    while (! rail->DmaRetrySleeping)
68868 +       kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
68869 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68870 +}
68871 +
68872 +void 
68873 +ResumeDmaRetryThread (EP3_RAIL *rail)
68874 +{
68875 +    unsigned long flags;
68876 +
68877 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
68878 +
68879 +    ASSERT (rail->DmaRetrySleeping);
68880 +
68881 +    if (--rail->DmaRetryThreadShouldStall == 0)
68882 +    {
68883 +       rail->DmaRetrySleeping = 0;
68884 +       kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock);
68885 +    }
68886 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68887 +}
68888 +
68889 +int
68890 +InitialiseDmaRetries (EP3_RAIL *rail)
68891 +{
68892 +    int i;
68893 +
68894 +    spin_lock_init (&rail->DmaRetryLock);
68895 +    kcondvar_init (&rail->DmaRetryWait);
68896 +
68897 +    for (i = 0; i < EP_NUM_RETRIES; i++)
68898 +       INIT_LIST_HEAD (&rail->DmaRetries[i]);
68899 +
68900 +    INIT_LIST_HEAD (&rail->DmaRetryFreeList);
68901 +
68902 +    DmaRetryTimes[EP_RETRY_HIGH_PRI]  = EP_RETRY_HIGH_PRI_TIME;
68903 +
68904 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
68905 +       DmaRetryTimes[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i;
68906 +    
68907 +    DmaRetryTimes[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME;
68908 +
68909 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
68910 +       DmaRetryTimes[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i;
68911 +    
68912 +    DmaRetryTimes[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME;
68913 +    DmaRetryTimes[EP_RETRY_NETERR]    = EP_RETRY_NETERR_TIME;
68914 +
68915 +    rail->DmaRetryInitialised = 1;
68916 +
68917 +    if (kernel_thread_create (ep3_dma_retry, (void *) rail) == 0)
68918 +    {
68919 +       spin_lock_destroy (&rail->DmaRetryLock);
68920 +       return (ENOMEM);
68921 +    }
68922 +
68923 +    rail->DmaRetryThreadStarted = 1;
68924 +
68925 +    return (ESUCCESS);
68926 +}
68927 +
68928 +void
68929 +DestroyDmaRetries (EP3_RAIL *rail)
68930 +{
68931 +    unsigned long flags;
68932 +
68933 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
68934 +    rail->DmaRetryThreadShouldStop = 1;
68935 +    while (rail->DmaRetryThreadStarted && !rail->DmaRetryThreadStopped)
68936 +    {
68937 +       kcondvar_wakeupall (&rail->DmaRetryWait, &rail->DmaRetryLock);
68938 +       kcondvar_wait (&rail->DmaRetryWait, &rail->DmaRetryLock, &flags);
68939 +    }
68940 +    rail->DmaRetryThreadStarted = 0;
68941 +    rail->DmaRetryThreadStopped = 0;
68942 +    rail->DmaRetryThreadShouldStop = 0;
68943 +    rail->DmaRetryInitialised = 0;
68944 +
68945 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68946 +
68947 +    /* Everyone should have given back their retry dma's by now */
68948 +    ASSERT (rail->DmaRetryReserved == 0);
68949 +
68950 +    while (! list_empty (&rail->DmaRetryFreeList))
68951 +    {
68952 +       EP3_RETRY_DMA *retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
68953 +       
68954 +       list_del (&retry->Link);
68955 +
68956 +       KMEM_FREE (retry, sizeof (EP3_RETRY_DMA));
68957 +    }
68958 +
68959 +    kcondvar_destroy (&rail->DmaRetryWait);
68960 +    spin_lock_destroy (&rail->DmaRetryLock);
68961 +}
68962 +
68963 +int
68964 +ReserveDmaRetries (EP3_RAIL *rail, int count, EP_ATTRIBUTE attr)
68965 +{
68966 +    EP3_RETRY_DMA *retry;
68967 +    int                  remaining = count;
68968 +    unsigned long flags;
68969 +
68970 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
68971 +    
68972 +    if (remaining <= (rail->DmaRetryCount - rail->DmaRetryReserved))
68973 +    {
68974 +       rail->DmaRetryReserved += remaining;
68975 +
68976 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68977 +       return (ESUCCESS);
68978 +    }
68979 +
68980 +    remaining -= (rail->DmaRetryCount - rail->DmaRetryReserved);
68981 +
68982 +    rail->DmaRetryReserved = rail->DmaRetryCount;
68983 +
68984 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
68985 +
68986 +    while (remaining)
68987 +    {
68988 +       KMEM_ALLOC (retry, EP3_RETRY_DMA *, sizeof (EP3_RETRY_DMA), !(attr & EP_NO_SLEEP));
68989 +       
68990 +       if (retry == NULL)
68991 +           goto failed;
68992 +
68993 +       /* clear E3_DMA */
68994 +       bzero((char *)(&(retry->Dma.s)), sizeof(E3_DMA));
68995 +
68996 +       remaining--; 
68997 +
68998 +       spin_lock_irqsave (&rail->DmaRetryLock, flags);
68999 +
69000 +       list_add (&retry->Link, &rail->DmaRetryFreeList);
69001 +
69002 +       rail->DmaRetryCount++;
69003 +       rail->DmaRetryReserved++;
69004 +
69005 +       spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69006 +    }
69007 +    return (ESUCCESS);
69008 +
69009 + failed:
69010 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
69011 +    rail->DmaRetryReserved -= (count - remaining);
69012 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69013 +    return (ENOMEM);
69014 +}
69015 +
69016 +void
69017 +ReleaseDmaRetries (EP3_RAIL *rail, int count)
69018 +{
69019 +    unsigned long flags;
69020 +
69021 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
69022 +    rail->DmaRetryReserved -= count;
69023 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69024 +}
69025 +
69026 +void
69027 +QueueDmaForRetry (EP3_RAIL *rail, E3_DMA_BE *dma, int interval)
69028 +{
69029 +    EP3_RETRY_DMA *retry;
69030 +    unsigned long flags;
69031 +
69032 +    /*
69033 +     * When requeueing DMAs they must never be "READ" dma's since
69034 +     * these would fetch the DMA descriptor from the retryn descriptor
69035 +     */
69036 +    ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE);
69037 +    ASSERT (dma->s.dma_direction == DMA_WRITE ? 
69038 +           EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
69039 +           EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid);
69040 +
69041 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
69042 +    
69043 +    EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList));
69044 +
69045 +    /* take an item of the free list */
69046 +    retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
69047 +
69048 +    list_del (&retry->Link);
69049 +    
69050 +    EPRINTF5 (DBG_RETRY, "%s: QueueDmaForRetry: %08x %08x %08x %08x\n", rail->Generic.Name,
69051 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
69052 +    EPRINTF5 (DBG_RETRY, "%s:                   %08x %08x %08x %08x\n",rail->Generic.Name,
69053 +            dma->s.dma_destEvent, dma->s.dma_destCookieVProc,
69054 +            dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
69055 +
69056 +    /* copy the DMA into the retry descriptor */
69057 +    retry->Dma.s.dma_type            = dma->s.dma_type;
69058 +    retry->Dma.s.dma_size            = dma->s.dma_size;
69059 +    retry->Dma.s.dma_source          = dma->s.dma_source;
69060 +    retry->Dma.s.dma_dest            = dma->s.dma_dest;
69061 +    retry->Dma.s.dma_destEvent       = dma->s.dma_destEvent;
69062 +    retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc;
69063 +    retry->Dma.s.dma_srcEvent        = dma->s.dma_srcEvent;
69064 +    retry->Dma.s.dma_srcCookieVProc  = dma->s.dma_srcCookieVProc;
69065 +
69066 +    retry->RetryTime = lbolt + DmaRetryTimes[interval];
69067 +
69068 +    /* chain onto the end of the approriate retry list */
69069 +    list_add_tail (&retry->Link, &rail->DmaRetries[interval]);
69070 +
69071 +    /* now wakeup the retry thread */
69072 +    if (rail->DmaRetryTime == 0 || retry->RetryTime < rail->DmaRetryTime)
69073 +       rail->DmaRetryTime = retry->RetryTime;
69074 +    
69075 +    if (rail->DmaRetrySleeping && !rail->DmaRetryThreadShouldStall)
69076 +    {
69077 +       rail->DmaRetrySleeping = 0;
69078 +       kcondvar_wakeupone (&rail->DmaRetryWait, &rail->DmaRetryLock);
69079 +    }
69080 +
69081 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69082 +}
69083 +
69084 +void
69085 +QueueDmaOnStalledList (EP3_RAIL *rail, E3_DMA_BE *dma)
69086 +{
69087 +    EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[dma->s.dma_direction == DMA_WRITE ? 
69088 +                                                 EP_VP_TO_NODE(dma->s.dma_srcVProc) :
69089 +                                                 EP_VP_TO_NODE(dma->s.dma_destVProc)];
69090 +    EP3_RETRY_DMA *retry;
69091 +    unsigned long flags;
69092 +
69093 +    /*
69094 +     * When requeueing DMAs they must never be "READ" dma's since
69095 +     * these would fetch the DMA descriptor from the retryn descriptor
69096 +     */
69097 +    ASSERT (dma->s.dma_direction == DMA_WRITE || dma->s.dma_direction == DMA_READ_REQUEUE);
69098 +    ASSERT (dma->s.dma_direction == DMA_WRITE ? 
69099 +           EP_VP_TO_NODE(dma->s.dma_srcVProc) == rail->Generic.Position.pos_nodeid :
69100 +           EP_VP_TO_NODE(dma->s.dma_destVProc) == rail->Generic.Position.pos_nodeid);
69101 +
69102 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
69103 +    
69104 +    EP_ASSERT (&rail->Generic, !list_empty (&rail->DmaRetryFreeList));
69105 +
69106 +    /* take an item of the free list */
69107 +    retry = list_entry (rail->DmaRetryFreeList.next, EP3_RETRY_DMA, Link);
69108 +
69109 +    list_del (&retry->Link);
69110 +    
69111 +    EPRINTF5 (DBG_RETRY, "%s: QueueDmaOnStalledList: %08x %08x %08x %08x\n", rail->Generic.Name,
69112 +             dma->s.dma_type, dma->s.dma_size, dma->s.dma_source, dma->s.dma_dest);
69113 +    EPRINTF5 (DBG_RETRY, "%s:                        %08x %08x %08x %08x\n", rail->Generic.Name,
69114 +             dma->s.dma_destEvent, dma->s.dma_destCookieVProc,
69115 +             dma->s.dma_srcEvent, dma->s.dma_srcCookieVProc);
69116 +
69117 +    /* copy the DMA into the retry descriptor */
69118 +    retry->Dma.s.dma_type            = dma->s.dma_type;
69119 +    retry->Dma.s.dma_size            = dma->s.dma_size;
69120 +    retry->Dma.s.dma_source          = dma->s.dma_source;
69121 +    retry->Dma.s.dma_dest            = dma->s.dma_dest;
69122 +    retry->Dma.s.dma_destEvent       = dma->s.dma_destEvent;
69123 +    retry->Dma.s.dma_destCookieVProc = dma->s.dma_destCookieVProc;
69124 +    retry->Dma.s.dma_srcEvent        = dma->s.dma_srcEvent;
69125 +    retry->Dma.s.dma_srcCookieVProc  = dma->s.dma_srcCookieVProc;
69126 +
69127 +    /* chain onto the node cancelled dma list */
69128 +    list_add_tail (&retry->Link, &nodeRail->StalledDmas);
69129 +
69130 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69131 +}
69132 +
69133 +void
69134 +FreeStalledDmas (EP3_RAIL *rail, unsigned int nodeId)
69135 +{
69136 +    EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[nodeId];
69137 +    struct list_head *el, *nel;
69138 +    unsigned long flags;
69139 +
69140 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
69141 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
69142 +       list_del (el);
69143 +       list_add (el, &rail->DmaRetryFreeList);
69144 +    }
69145 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69146 +}
69147 +
69148 +/****************************************************************************************/
69149 +/*
69150 + * Connection management.
69151 + */
69152 +static void
69153 +DiscardingHaltOperation (ELAN3_DEV *dev, void *arg)
69154 +{
69155 +    EP3_RAIL *rail = (EP3_RAIL *) arg;
69156 +    unsigned long flags;
69157 +
69158 +    spin_lock_irqsave (&dev->IntrLock, flags);
69159 +    rail->HaltOpCompleted = 1;
69160 +    kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock);
69161 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
69162 +}
69163
69164 +typedef struct {
69165 +     EP3_RAIL  *rail;
69166 +    sdramaddr_t qaddr;
69167 +} SetQueueFullData;
69168
69169 +static void
69170 +SetQueueLockedOperation (ELAN3_DEV *dev, void *arg)
69171 +{
69172 +    SetQueueFullData *data =  (SetQueueFullData *) arg;
69173 +    unsigned long     flags;     
69174 +
69175 +    spin_lock_irqsave (&dev->IntrLock, flags);
69176 +
69177 +    elan3_sdram_writel  (dev, data->qaddr, E3_QUEUE_LOCKED | elan3_sdram_readl(dev, data->qaddr));
69178 +   
69179 +    data->rail->HaltOpCompleted = 1;
69180 +    kcondvar_wakeupall (&data->rail->HaltOpSleep, &dev->IntrLock);
69181 +
69182 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
69183 +}
69184 +
69185 +static void
69186 +FlushDmaQueuesHaltOperation (ELAN3_DEV *dev, void *arg)
69187 +{
69188 +    EP3_RAIL      *rail    = (EP3_RAIL *) arg;
69189 +    sdramaddr_t    FPtr, BPtr;
69190 +    sdramaddr_t           Base, Top;
69191 +    E3_DMA_BE      dma;
69192 +    EP_NODE_RAIL  *node;
69193 +    int            vp;
69194 +    unsigned long  flags;
69195 +
69196 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProc.s.FSR)) == 0);
69197 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData0.s.FSR.Status)) == 0);
69198 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData1.s.FSR.Status)) == 0);
69199 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData2.s.FSR.Status)) == 0);
69200 +    ASSERT (elan3_sdram_readl (dev, dev->TAndQBase + offsetof (E3_TrapAndQueue, DProcData3.s.FSR.Status)) == 0);
69201 +    
69202 +    FPtr  = read_reg32 (dev, DProc_SysCntx_FPtr);
69203 +    BPtr =  read_reg32 (dev, DProc_SysCntx_BPtr);
69204 +    Base  = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[0]);
69205 +    Top   = dev->TAndQBase + offsetof (E3_TrapAndQueue, SysCntxDmaQueue[E3_SysCntxQueueSize-1]);
69206 +    
69207 +    while (FPtr != BPtr)
69208 +    {
69209 +       elan3_sdram_copyq_from_sdram (dev, FPtr, &dma, sizeof (E3_DMA_BE));
69210 +       
69211 +       EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: %08x %08x %08x %08x\n", rail->Generic.Name,
69212 +                 dma.s.dma_type, dma.s.dma_size, dma.s.dma_source, dma.s.dma_dest);
69213 +       EPRINTF5 (DBG_DISCON, "%s:                              %08x %08x %08x %08x\n", rail->Generic.Name,
69214 +                 dma.s.dma_destEvent, dma.s.dma_destCookieVProc,
69215 +                dma.s.dma_srcEvent, dma.s.dma_srcCookieVProc);
69216 +       
69217 +       ASSERT ((dma.s.dma_u.s.Context & SYS_CONTEXT_BIT) != 0);
69218 +
69219 +       if (dma.s.dma_direction == DMA_WRITE)
69220 +           vp = dma.s.dma_destVProc;
69221 +       else
69222 +           vp = dma.s.dma_srcVProc;
69223 +       
69224 +       node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
69225 +
69226 +       ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE));
69227 +
69228 +       if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE)
69229 +       {
69230 +           /*
69231 +            * This is a DMA going to the node which is being removed, 
69232 +            * so move it onto the node dma list where it will get
69233 +            * handled later.
69234 +            */
69235 +           EPRINTF1 (DBG_DISCON, "%s: FlushDmaQueuesHaltOperation: move dma to cancelled list\n", rail->Generic.Name);
69236 +          
69237 +           if (dma.s.dma_direction != DMA_WRITE)
69238 +           {
69239 +               /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been 
69240 +                * modified by the elan to point at the dma in the rxd where it was issued
69241 +                * from */
69242 +               dma.s.dma_direction = (dma.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
69243 +           }
69244 +           
69245 +           QueueDmaOnStalledList (rail, &dma);
69246 +           
69247 +           /*
69248 +            * Remove the DMA from the queue by replacing it with one with
69249 +            * zero size and no events.
69250 +            *
69251 +            * NOTE: we must preserve the SYS_CONTEXT_BIT since the Elan uses this
69252 +            * to mark the approriate run queue as empty.
69253 +            */
69254 +           dma.s.dma_type            = (SYS_CONTEXT_BIT << 16);
69255 +           dma.s.dma_size            = 0;
69256 +           dma.s.dma_source          = (E3_Addr) 0;
69257 +           dma.s.dma_dest            = (E3_Addr) 0;
69258 +           dma.s.dma_destEvent       = (E3_Addr) 0;
69259 +           dma.s.dma_destCookieVProc = 0;
69260 +           dma.s.dma_srcEvent        = (E3_Addr) 0;
69261 +           dma.s.dma_srcCookieVProc  = 0;
69262 +           
69263 +           elan3_sdram_copyq_to_sdram (dev, &dma, FPtr, sizeof (E3_DMA_BE));
69264 +       }
69265 +
69266 +       FPtr = (FPtr == Top) ? Base : FPtr + sizeof (E3_DMA);
69267 +    }
69268 +
69269 +    spin_lock_irqsave (&dev->IntrLock, flags);
69270 +    rail->HaltOpCompleted = 1;
69271 +    kcondvar_wakeupall (&rail->HaltOpSleep, &dev->IntrLock);
69272 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
69273 +}
69274 +
69275 +void
69276 +SetQueueLocked (EP3_RAIL *rail, sdramaddr_t qaddr)
69277 +{
69278 +    ELAN3_DEV        *dev = rail->Device;
69279 +    SetQueueFullData  data;
69280 +    unsigned long     flags;
69281 +    
69282 +    /* Ensure that the context filter changes have been seen by halting
69283 +     * then restarting the inputters - this also ensures that any setevent
69284 +     * commands used to issue dma's have completed and any trap has been
69285 +     * handled. */
69286 +    data.rail  = rail;
69287 +    data.qaddr = qaddr;
69288 +
69289 +    kmutex_lock (&rail->HaltOpMutex);
69290 +    spin_lock_irqsave (&dev->IntrLock, flags);
69291 +    QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx | INT_TProcHalted, SetQueueLockedOperation, &data);
69292 +
69293 +    while (! rail->HaltOpCompleted)
69294 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
69295 +    rail->HaltOpCompleted = 0;
69296 +
69297 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
69298 +    kmutex_unlock (&rail->HaltOpMutex);
69299 +}
69300 +
69301 +void
69302 +ep3_flush_filters (EP_RAIL *r)
69303 +{
69304 +    EP3_RAIL *rail = (EP3_RAIL *) r;
69305 +    ELAN3_DEV *dev  = rail->Device;
69306 +    unsigned long flags;
69307 +
69308 +    /* Ensure that the context filter changes have been seen by halting
69309 +     * then restarting the inputters - this also ensures that any setevent
69310 +     * commands used to issue dma's have completed and any trap has been
69311 +     * handled. */
69312 +    kmutex_lock (&rail->HaltOpMutex);
69313 +    spin_lock_irqsave (&dev->IntrLock, flags);
69314 +    QueueHaltOperation (dev, 0, NULL, INT_DiscardingSysCntx, DiscardingHaltOperation, rail);
69315 +    
69316 +    while (! rail->HaltOpCompleted)
69317 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
69318 +    rail->HaltOpCompleted = 0;
69319 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
69320 +    kmutex_unlock (&rail->HaltOpMutex);
69321 +}
69322 +
69323 +void
69324 +ep3_flush_queues (EP_RAIL *r)
69325 +{
69326 +    EP3_RAIL         *rail = (EP3_RAIL *) r;
69327 +    ELAN3_DEV         *dev  = rail->Device;
69328 +    struct list_head *el;
69329 +    struct list_head *nel;
69330 +    EP_NODE_RAIL     *node;
69331 +    unsigned long flags;
69332 +    int vp, i;
69333 +
69334 +    ASSERT (NO_LOCKS_HELD);
69335 +    
69336 +    /* First - stall the dma retry thread, so that it will no longer
69337 +     *         restart any dma's from the rety lists. */
69338 +    StallDmaRetryThread (rail);
69339 +
69340 +    /* Second - queue a halt operation to flush through all DMA's which are executing
69341 +     *          or on the run queue. */
69342 +    kmutex_lock (&rail->HaltOpMutex);
69343 +    spin_lock_irqsave (&dev->IntrLock, flags);
69344 +    QueueHaltOperation (dev, 0, NULL, INT_DProcHalted | INT_TProcHalted, FlushDmaQueuesHaltOperation, rail);
69345 +    while (! rail->HaltOpCompleted)
69346 +       kcondvar_wait (&rail->HaltOpSleep, &dev->IntrLock, &flags);
69347 +    rail->HaltOpCompleted = 0;
69348 +    spin_unlock_irqrestore (&dev->IntrLock, flags);
69349 +    kmutex_unlock (&rail->HaltOpMutex);
69350 +
69351 +    /* Third - run down the dma retry lists and move all entries to the cancelled
69352 +     *         list.  Any dma's which were on the run queues have already been
69353 +     *         moved there */
69354 +    spin_lock_irqsave (&rail->DmaRetryLock, flags);
69355 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
69356 +    {
69357 +       list_for_each_safe (el, nel, &rail->DmaRetries[i]) {
69358 +           EP3_RETRY_DMA *retry = list_entry (el, EP3_RETRY_DMA, Link);
69359 +
69360 +           if (retry->Dma.s.dma_direction == DMA_WRITE)
69361 +               vp = retry->Dma.s.dma_destVProc;
69362 +           else
69363 +               vp = retry->Dma.s.dma_srcVProc;
69364 +           
69365 +           node = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
69366 +           
69367 +           ASSERT (!EP_VP_ISDATA(vp) || (node->State >= EP_NODE_CONNECTED && node->State <= EP_NODE_LOCAL_PASSIVATE));
69368 +
69369 +           if (EP_VP_ISDATA(vp) && node->State == EP_NODE_LOCAL_PASSIVATE)
69370 +           {
69371 +               EPRINTF5 (DBG_DISCON, "%s: FlushDmaQueues: %08x %08x %08x %08x\n",rail->Generic.Name,
69372 +                         retry->Dma.s.dma_type, retry->Dma.s.dma_size, retry->Dma.s.dma_source, retry->Dma.s.dma_dest);
69373 +               EPRINTF5 (DBG_DISCON, "%s:                 %08x %08x %08x %08x\n", rail->Generic.Name,
69374 +                         retry->Dma.s.dma_destEvent, retry->Dma.s.dma_destCookieVProc,
69375 +                         retry->Dma.s.dma_srcEvent, retry->Dma.s.dma_srcCookieVProc);
69376 +
69377 +               list_del (&retry->Link);
69378 +
69379 +               list_add_tail (&retry->Link, &node->StalledDmas);
69380 +           }
69381 +       }
69382 +    }
69383 +    spin_unlock_irqrestore (&rail->DmaRetryLock, flags);
69384 +
69385 +    /* Finally - allow the dma retry thread to run again */
69386 +    ResumeDmaRetryThread (rail);
69387 +}
69388 +
69389 +/****************************************************************************************/
69390 +/* NOTE - we require that all cookies are non-zero, which is 
69391 + *        achieved because EP_VP_DATA() is non-zero for all
69392 + *        nodes */
69393 +E3_uint32
69394 +LocalCookie (EP3_RAIL *rail, unsigned remoteNode)
69395 +{
69396 +    E3_uint32     cookie;
69397 +    unsigned long flags;
69398 +
69399 +    spin_lock_irqsave (&rail->CookieLock, flags);
69400 +    cookie = DMA_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(rail->Generic.Position.pos_nodeid));
69401 +    spin_unlock_irqrestore (&rail->CookieLock, flags);
69402 +
69403 +    /* Main processor cookie for srcCookie - this is what is sent
69404 +     * to the remote node along with the setevent from the put
69405 +     * or the dma descriptor for a get */
69406 +    return (cookie);
69407 +}
69408 +
69409 +E3_uint32
69410 +RemoteCookie (EP3_RAIL *rail, u_int remoteNode)
69411 +{
69412 +    uint32_t      cookie;
69413 +    unsigned long flags;
69414 +
69415 +    spin_lock_irqsave (&rail->CookieLock, flags);
69416 +    cookie = DMA_REMOTE_COOKIE (rail->MainCookies[remoteNode], EP_VP_DATA(remoteNode));
69417 +    spin_unlock_irqrestore (&rail->CookieLock, flags);
69418 +
69419 +    /* Main processor cookie for dstCookie - this is the cookie
69420 +     * that the "remote put" dma uses for it's setevent packets for
69421 +     * a get dma */
69422 +    
69423 +    return (cookie);
69424 +}
69425 +
69426 +/****************************************************************************************/
69427 +/*
69428 + * Event Cookie management.
69429 + *
69430 + *   We find the ep_cookie in one of two ways:
69431 + *     1) for block copy events
69432 + *          the cookie value is stored in the ev_Source - for EVIRQ events
69433 + *          it is also stored in the ev_Type
69434 + *     2) for normal events
69435 + *          we just use the event address.
69436 + */
69437 +void 
69438 +InitialiseCookieTable (EP3_COOKIE_TABLE *table)
69439 +{
69440 +    register int i;
69441 +    
69442 +    spin_lock_init (&table->Lock);
69443 +    
69444 +    for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++)
69445 +       table->Entries[i] = NULL;
69446 +}
69447 +
69448 +void
69449 +DestroyCookieTable (EP3_COOKIE_TABLE *table)
69450 +{
69451 +    register int i;
69452 +
69453 +    for (i = 0; i < EP3_COOKIE_HASH_SIZE; i++)
69454 +       if (table->Entries[i])
69455 +           printk ("DestroyCookieTable: entry %d not empty\n", i);
69456 +
69457 +    spin_lock_destroy (&table->Lock);
69458 +}
69459 +
69460 +void
69461 +RegisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp, E3_uint32 cookie, EP3_COOKIE_OPS *ops, void *arg)
69462 +{
69463 +    EP3_COOKIE *tcp;
69464 +    int hashval = EP3_HASH_COOKIE(cookie);
69465 +    unsigned long flags;
69466 +
69467 +    spin_lock_irqsave (&table->Lock, flags);
69468 +    
69469 +    cp->Operations = ops;
69470 +    cp->Arg        = arg;
69471 +    cp->Cookie     = cookie;
69472 +    
69473 +#if defined(DEBUG)
69474 +    /* Check that the cookie is unique */
69475 +    for (tcp = table->Entries[hashval]; tcp; tcp = tcp->Next)
69476 +       if (tcp->Cookie == cookie)
69477 +           panic ("RegisterEventCookie: non unique cookie\n");
69478 +#endif
69479 +    cp->Next = table->Entries[hashval];
69480 +    
69481 +    table->Entries[hashval] = cp;
69482 +    
69483 +    spin_unlock_irqrestore (&table->Lock, flags);
69484 +}
69485 +
69486 +void
69487 +DeregisterCookie (EP3_COOKIE_TABLE *table, EP3_COOKIE *cp)
69488 +{
69489 +    EP3_COOKIE **predCookiep;
69490 +    unsigned long flags;
69491 +
69492 +    spin_lock_irqsave (&table->Lock, flags);
69493 +    
69494 +    for (predCookiep = &table->Entries[EP3_HASH_COOKIE (cp->Cookie)]; *predCookiep; predCookiep = &(*predCookiep)->Next)
69495 +    {
69496 +       if (*predCookiep == cp)
69497 +       {
69498 +           *predCookiep = cp->Next;
69499 +           break;
69500 +       }
69501 +    }
69502 +
69503 +    spin_unlock_irqrestore (&table->Lock, flags);
69504 +
69505 +    cp->Operations = NULL;
69506 +    cp->Arg        = NULL;
69507 +    cp->Cookie     = 0;
69508 +    cp->Next       = NULL;
69509 +}
69510 +
69511 +EP3_COOKIE *
69512 +LookupCookie (EP3_COOKIE_TABLE *table, E3_Addr cookie)
69513 +{
69514 +    EP3_COOKIE *cp;
69515 +    unsigned long flags;
69516 +
69517 +    spin_lock_irqsave (&table->Lock, flags);
69518 +    
69519 +    for (cp = table->Entries[EP3_HASH_COOKIE(cookie)]; cp; cp = cp->Next)
69520 +       if (cp->Cookie == cookie)
69521 +           break;
69522 +    
69523 +    spin_unlock_irqrestore (&table->Lock, flags);
69524 +    return (cp);
69525 +}
69526 +
69527 +EP3_COOKIE *
69528 +LookupEventCookie (EP3_RAIL *rail, EP3_COOKIE_TABLE *table, E3_Addr eaddr)
69529 +{
69530 +    sdramaddr_t event;
69531 +    E3_uint32 type;
69532 +
69533 +    if ((event = ep_elan2sdram (&rail->Generic, eaddr)) != (sdramaddr_t) 0)
69534 +    {
69535 +       type = elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Type));
69536 +
69537 +       if (type & EV_TYPE_BCOPY)
69538 +           return (LookupCookie (table, elan3_sdram_readl (rail->Device, event + offsetof (E3_BlockCopyEvent, ev_Source)) & ~EV_WCOPY));
69539 +       else
69540 +           return (LookupCookie (table, eaddr));
69541 +    }
69542 +
69543 +    return (NULL);
69544 +}
69545 +
69546 +/****************************************************************************************/
69547 +/*
69548 + * Elan context operations - note only support interrupt ops.
69549 + */
69550 +static int        ep3_event     (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
69551 +static int        ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
69552 +static int        ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
69553 +static int        ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan);
69554 +static int        ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
69555 +static int        ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf);
69556 +
69557 +static E3_uint8   ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr);
69558 +static void       ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
69559 +static E3_uint16  ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr);
69560 +static void       ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
69561 +static E3_uint32  ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr);
69562 +static void       ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
69563 +static E3_uint64  ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr);
69564 +static void       ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
69565 +
69566 +ELAN3_OPS ep3_elan3_ops = 
69567 +{
69568 +    ELAN3_OPS_VERSION,         /* Version */
69569 +    
69570 +    NULL,                      /* Exception */
69571 +    NULL,                      /* GetWordItem */
69572 +    NULL,                      /* GetBlockItem */
69573 +    NULL,                      /* PutWordItem */
69574 +    NULL,                      /* PutBlockItem */
69575 +    NULL,                      /* PutbackItem */
69576 +    NULL,                      /* FreeWordItem */
69577 +    NULL,                      /* FreeBlockItem */
69578 +    NULL,                      /* CountItems */
69579 +    ep3_event,                 /* Event */
69580 +    NULL,                      /* SwapIn */
69581 +    NULL,                      /* SwapOut */
69582 +    NULL,                      /* FreePrivate */
69583 +    NULL,                      /* FixupNetworkError */
69584 +    ep3_dprocTrap,             /* DProcTrap */
69585 +    ep3_tprocTrap,             /* TProcTrap */
69586 +    ep3_iprocTrap,             /* IProcTrap */
69587 +    ep3_cprocTrap,             /* CProcTrap */
69588 +    ep3_cprocReissue,          /* CProcReissue */
69589 +    NULL,                      /* StartFaultCheck */
69590 +    NULL,                      /* EndFaulCheck */
69591 +    ep3_load8,                 /* Load8 */
69592 +    ep3_store8,                        /* Store8 */
69593 +    ep3_load16,                        /* Load16 */
69594 +    ep3_store16,               /* Store16 */
69595 +    ep3_load32,                        /* Load32 */
69596 +    ep3_store32,               /* Store32 */
69597 +    ep3_load64,                        /* Load64 */
69598 +    ep3_store64,               /* Store64 */
69599 +};
69600 +
69601 +static int
69602 +ep3_event (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag)
69603 +{
69604 +    EP3_RAIL  *rail = (EP3_RAIL *) ctxt->Private;
69605 +    EP3_COOKIE *cp   = LookupCookie (&rail->CookieTable, cookie);
69606 +    
69607 +    if (cp == NULL)
69608 +    {
69609 +       printk ("ep3_event: cannot find event cookie for %x\n", cookie);
69610 +       return (OP_HANDLED);
69611 +    }
69612 +    
69613 +    if (cp->Operations->Event)
69614 +       cp->Operations->Event(rail, cp->Arg);
69615 +    
69616 +    return (OP_HANDLED);
69617 +}
69618 +
69619 +/* Trap interface */
69620 +int
69621 +ep3_dprocTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap)
69622 +{
69623 +    EP3_RAIL        *rail = (EP3_RAIL *) ctxt->Private;
69624 +    ELAN3_DEV        *dev = rail->Device;
69625 +    EP3_COOKIE       *cp;
69626 +    E3_FaultSave_BE *FaultArea;
69627 +    E3_uint16        vp;
69628 +    int                     validTrap;
69629 +    int                     numFaults;
69630 +    int                     i;
69631 +    sdramaddr_t      event;
69632 +    E3_uint32        type;
69633 +    sdramaddr_t      dma;
69634 +    E3_DMA_BE        dmabe;
69635 +    int              status = EAGAIN;
69636 +
69637 +    EPRINTF4 (DBG_EPTRAP, "ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
69638 +             trap->Status.s.WakeupFunction, trap->Status.s.Context, 
69639 +             trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
69640 +    EPRINTF4 (DBG_EPTRAP, "              type %08x size %08x source %08x dest %08x\n",
69641 +             trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
69642 +    EPRINTF2 (DBG_EPTRAP, "              Dest event %08x cookie/proc %08x\n",
69643 +             trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
69644 +    EPRINTF2 (DBG_EPTRAP, "              Source event %08x cookie/proc %08x\n",
69645 +             trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
69646 +
69647 +    ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT);
69648 +
69649 +    switch (trap->Status.s.TrapType)
69650 +    {
69651 +    case MI_DmaPacketTimedOutOrPacketError:
69652 +       if (trap->Desc.s.dma_direction == DMA_WRITE)
69653 +           vp = trap->Desc.s.dma_destVProc;
69654 +       else
69655 +           vp = trap->Desc.s.dma_srcVProc;
69656 +       
69657 +       if (! trap->PacketInfo.s.PacketTimeout)
69658 +           status = ETIMEDOUT;
69659 +       else
69660 +       {
69661 +           status = EHOSTDOWN;
69662 +
69663 +           /* XXXX: dma timedout - might want to "restart" tree ? */
69664 +       }
69665 +       goto retry_dma;
69666 +
69667 +    case MI_DmaFailCountError:
69668 +       goto retry_dma;
69669 +
69670 +    case MI_TimesliceDmaQueueOverflow:
69671 +       IncrStat (rail, DprocDmaQueueOverflow);
69672 +
69673 +       goto retry_dma;
69674 +
69675 +    case MI_RemoteDmaCommand:
69676 +    case MI_RunDmaCommand:
69677 +    case MI_DequeueNonSysCntxDma:
69678 +    case MI_DequeueSysCntxDma:
69679 +       /*
69680 +        * The DMA processor has trapped due to outstanding prefetches from the previous 
69681 +        * dma.  The "current" dma has not been consumed, so we just ignore the trap
69682 +        */
69683 +       return (OP_HANDLED);
69684 +       
69685 +    case MI_EventQueueOverflow:
69686 +       IncrStat (rail, DprocEventQueueOverflow);
69687 +
69688 +       if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 &&
69689 +           ((type  = elan3_sdram_readl (dev, event + offsetof(E3_Event,ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
69690 +       {
69691 +           spin_unlock (&ctxt->Device->IntrLock);
69692 +           ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ | EV_TYPE_MASK_BCOPY)), OP_LWP);
69693 +           spin_lock (&ctxt->Device->IntrLock);
69694 +       }
69695 +       return (OP_HANDLED);
69696 +       
69697 +    case MI_DmaQueueOverflow:
69698 +       IncrStat (rail, DprocDmaQueueOverflow);
69699 +
69700 +       if ((event = ep_elan2sdram (&rail->Generic, trap->Desc.s.dma_srcEvent)) != (sdramaddr_t) 0 &&
69701 +           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA &&
69702 +           (dma  = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0)
69703 +       {
69704 +           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
69705 +           
69706 +           /* We only chain together DMA's of the same direction, so since
69707 +            * we took a DmaQueueOverflow trap - this means that DMA which
69708 +            * trapped was a WRITE dma - hence the one we chain to must also
69709 +            * be a WRITE dma.
69710 +            */
69711 +           ASSERT (dmabe.s.dma_direction == DMA_WRITE);
69712 +           
69713 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
69714 +
69715 +#ifdef DEBUG_ASSERT
69716 +           {
69717 +               E3_uint16     vp       = dmabe.s.dma_destVProc;
69718 +               EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
69719 +
69720 +               ASSERT (cp != NULL && (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE)));
69721 +           }
69722 +#endif
69723 +           cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
69724 +           
69725 +           return (OP_HANDLED);
69726 +       }
69727 +
69728 +       panic ("ep3_dprocTrap\n");
69729 +       return (OP_HANDLED);
69730 +
69731 +    default:
69732 +       break;
69733 +    }
69734 +
69735 +    /* If it's a dma which traps past the end of the source, then */
69736 +    /* just re-issue it */
69737 +    numFaults = validTrap = (trap->FaultSave.s.FSR.Status != 0);
69738 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
69739 +    {
69740 +       if (FaultArea->s.FSR.Status != 0)
69741 +       {
69742 +           numFaults++;
69743 +
69744 +           /* XXXX: Rev B Elans can prefetch data past the end of the dma descriptor */
69745 +           /*       if the fault relates to this, then just ignore it */
69746 +           if (FaultArea->s.FaultAddress >= (trap->Desc.s.dma_source+trap->Desc.s.dma_size))
69747 +           {
69748 +               static int i;
69749 +               if (i < 10 && i++ < 10)
69750 +                   printk ("ep3_dprocTrap: Rev B prefetch trap error %08x %08x\n",
69751 +                            FaultArea->s.FaultAddress, (trap->Desc.s.dma_source+trap->Desc.s.dma_size));
69752 +               continue;
69753 +           }
69754 +
69755 +           validTrap++;
69756 +       }
69757 +    }
69758 +
69759 +    /*
69760 +     * NOTE: for physical errors (uncorrectable ECC/PCI parity errors) the FSR will
69761 +     *       be zero - hence we will not see any faults - and none will be valid, 
69762 +     *       so only ignore a Rev B prefetch trap if we've seen some faults. Otherwise
69763 +     *       we can reissue a DMA which has already sent it's remote event !
69764 +     */
69765 +    if (numFaults != 0 && validTrap == 0)
69766 +    {
69767 +    retry_dma:
69768 +       if (trap->Desc.s.dma_direction == DMA_WRITE)
69769 +       {
69770 +           vp = trap->Desc.s.dma_destVProc;
69771 +           cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_srcEvent);
69772 +       }
69773 +       else
69774 +       {
69775 +           ASSERT (EP3_CONTEXT_ISDATA(trap->Desc.s.dma_queueContext) || trap->Desc.s.dma_direction == DMA_READ_REQUEUE);
69776 +
69777 +           vp = trap->Desc.s.dma_srcVProc;
69778 +           cp = LookupEventCookie (rail, &rail->CookieTable, trap->Desc.s.dma_destEvent);
69779 +
69780 +           /* for read dma's set the DMA_READ_REQUEUE bits as the dma_source has been 
69781 +            * modified by the elan to point at the dma in the rxd where it was issued
69782 +            * from */
69783 +           trap->Desc.s.dma_direction = (trap->Desc.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
69784 +       }
69785 +
69786 +#ifdef DEBUG_ASSERT
69787 +       {
69788 +           EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
69789 +
69790 +           ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
69791 +       }
69792 +#endif
69793 +       
69794 +       if (cp != NULL)
69795 +           cp->Operations->DmaRetry (rail, cp->Arg, &trap->Desc, status);
69796 +       else
69797 +       {
69798 +           ASSERT (trap->Desc.s.dma_direction == DMA_WRITE && trap->Desc.s.dma_srcEvent == 0 && trap->Desc.s.dma_isRemote);
69799 +
69800 +           QueueDmaForRetry (rail, &trap->Desc, EP_RETRY_ANONYMOUS);
69801 +       }
69802 +
69803 +       return (OP_HANDLED);
69804 +    }
69805 +    
69806 +    printk ("ep3_dprocTrap: WakeupFnt=%x Cntx=%x SuspAddr=%x TrapType=%s\n",
69807 +            trap->Status.s.WakeupFunction, trap->Status.s.Context, 
69808 +            trap->Status.s.SuspendAddr, MiToName (trap->Status.s.TrapType));
69809 +    printk ("                    FaultAddr=%x EventAddr=%x FSR=%x\n",
69810 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
69811 +            trap->FaultSave.s.FSR.Status);
69812 +    for (i = 0, FaultArea = &trap->Data0; i < 4; i++, FaultArea++)
69813 +       printk ("                  %d FaultAddr=%x EventAddr=%x FSR=%x\n", i,
69814 +                FaultArea->s.FaultAddress, FaultArea->s.EventAddress, FaultArea->s.FSR.Status);
69815 +    
69816 +    printk ("                  type %08x size %08x source %08x dest %08x\n",
69817 +            trap->Desc.s.dma_type, trap->Desc.s.dma_size, trap->Desc.s.dma_source, trap->Desc.s.dma_dest);
69818 +    printk ("                  Dest event %08x cookie/proc %08x\n",
69819 +            trap->Desc.s.dma_destEvent, trap->Desc.s.dma_destCookieVProc);
69820 +    printk ("                  Source event %08x cookie/proc %08x\n",
69821 +            trap->Desc.s.dma_srcEvent, trap->Desc.s.dma_srcCookieVProc);
69822 +
69823 +//    panic ("ep3_dprocTrap");
69824 +
69825 +    return (OP_HANDLED);
69826 +}
69827 +
69828 +int
69829 +ep3_tprocTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap)
69830 +{
69831 +    EP3_RAIL *rail = (EP3_RAIL *) ctxt->Private;
69832 +
69833 +    EPRINTF6 (DBG_EPTRAP, "ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n",
69834 +             trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits, trap->TrapBits.Bits, MiToName (trap->mi));
69835 +    EPRINTF4 (DBG_EPTRAP, "              g0=%08x g1=%08x g2=%08x g3=%08x\n", 
69836 +             trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
69837 +             trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
69838 +    EPRINTF4 (DBG_EPTRAP, "              g4=%08x g5=%08x g6=%08x g7=%08x\n", 
69839 +             trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
69840 +             trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
69841 +    EPRINTF4 (DBG_EPTRAP, "              o0=%08x o1=%08x o2=%08x o3=%08x\n", 
69842 +             trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
69843 +             trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
69844 +    EPRINTF4 (DBG_EPTRAP, "              o4=%08x o5=%08x o6=%08x o7=%08x\n", 
69845 +             trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
69846 +             trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
69847 +    EPRINTF4 (DBG_EPTRAP, "              l0=%08x l1=%08x l2=%08x l3=%08x\n", 
69848 +             trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
69849 +             trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
69850 +    EPRINTF4 (DBG_EPTRAP, "              l4=%08x l5=%08x l6=%08x l7=%08x\n", 
69851 +             trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
69852 +             trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
69853 +    EPRINTF4 (DBG_EPTRAP, "              i0=%08x i1=%08x i2=%08x i3=%08x\n", 
69854 +             trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
69855 +             trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
69856 +    EPRINTF4 (DBG_EPTRAP, "              i4=%08x i5=%08x i6=%08x i7=%08x\n", 
69857 +             trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
69858 +             trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
69859 +    
69860 +    ASSERT (trap->Status.s.Context & SYS_CONTEXT_BIT);
69861 +
69862 +    switch (trap->mi)
69863 +    {
69864 +    case MI_UnimplementedError:
69865 +       if (trap->TrapBits.s.ForcedTProcTrap)
69866 +       {
69867 +           ASSERT (trap->TrapBits.s.OutputWasOpen == 0);
69868 +           
69869 +           EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ForcedTProcTrap\n");
69870 +
69871 +           IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE));
69872 +           return (OP_HANDLED);
69873 +       }
69874 +
69875 +       if (trap->TrapBits.s.ThreadTimeout)
69876 +       {
69877 +           EPRINTF0 (DBG_EPTRAP, "ep3_tprocTrap: ThreadTimeout\n");
69878 +
69879 +           if (trap->Registers[REG_GLOBALS + (1^WordEndianFlip)] == 0)
69880 +               RollThreadToClose (ctxt, trap, trap->TrapBits.s.PacketAckValue);
69881 +           else
69882 +           {
69883 +               CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], trap->TrapBits.s.PacketAckValue);
69884 +
69885 +               RollThreadToClose (ctxt, trap, EP3_PAckStolen);
69886 +           }
69887 +               
69888 +           IssueRunThread (rail, SaveThreadToStack (ctxt, trap, FALSE));
69889 +           return (OP_HANDLED);
69890 +       }
69891 +
69892 +       if (trap->TrapBits.s.Unimplemented)
69893 +       {
69894 +           E3_uint32 instr = ELAN3_OP_LOAD32 (ctxt, trap->pc & PC_MASK);
69895 +
69896 +           PRINTF1 (ctxt, DBG_EPTRAP, "ep3_tprocTrap: unimplemented instruction %08x\n", instr);
69897 +
69898 +           if ((instr & OPCODE_MASK) == OPCODE_Ticc &&
69899 +               (instr & OPCODE_IMM)  == OPCODE_IMM &&
69900 +               (Ticc_COND(instr)     == Ticc_TA))
69901 +           {
69902 +               switch (INSTR_IMM(instr))
69903 +               {
69904 +               case EP3_UNIMP_TRAP_NO_DESCS:
69905 +                   StallThreadForNoDescs (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], 
69906 +                                          SaveThreadToStack (ctxt, trap, TRUE));
69907 +                   return (OP_HANDLED);
69908 +
69909 +               case EP3_UNIMP_TRAP_PACKET_NACKED:
69910 +                   CompleteEnvelope (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], E3_PAckDiscard);
69911 +
69912 +                   IssueRunThread (rail, SaveThreadToStack (ctxt, trap, TRUE));
69913 +                   return (OP_HANDLED);
69914 +
69915 +               case EP3_UNIMP_THREAD_HALTED: 
69916 +                   StallThreadForHalted (rail, trap->Registers[REG_GLOBALS + (1^WordEndianFlip)], 
69917 +                                         SaveThreadToStack (ctxt, trap, TRUE));
69918 +                   return (OP_HANDLED);
69919 +
69920 +               default:
69921 +                   break;
69922 +                   
69923 +               }
69924 +           }
69925 +       }
69926 +       break;
69927 +
69928 +    default:
69929 +       break;
69930 +    }
69931 +
69932 +    /* All other traps should not happen for kernel comms */
69933 +    printk ("ep3_tprocTrap: SP=%08x PC=%08x NPC=%08x DIRTY=%08x TRAP=%08x MI=%s\n",
69934 +            trap->sp, trap->pc, trap->npc, trap->DirtyBits.Bits,
69935 +            trap->TrapBits.Bits, MiToName (trap->mi));
69936 +    printk ("              FaultSave : FaultAddress %08x EventAddress %08x FSR %08x\n",
69937 +            trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress, trap->FaultSave.s.FSR.Status);
69938 +    printk ("              DataFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
69939 +            trap->DataFaultSave.s.FaultAddress, trap->DataFaultSave.s.EventAddress, trap->DataFaultSave.s.FSR.Status);
69940 +    printk ("              InstFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
69941 +            trap->InstFaultSave.s.FaultAddress, trap->InstFaultSave.s.EventAddress, trap->InstFaultSave.s.FSR.Status);
69942 +    printk ("              OpenFault : FaultAddress %08x EventAddress %08x FSR %08x\n",
69943 +            trap->OpenFaultSave.s.FaultAddress, trap->OpenFaultSave.s.EventAddress, trap->OpenFaultSave.s.FSR.Status);
69944 +
69945 +    if (trap->DirtyBits.s.GlobalsDirty)
69946 +    {
69947 +       printk ("              g0=%08x g1=%08x g2=%08x g3=%08x\n", 
69948 +                trap->Registers[REG_GLOBALS+(0^WordEndianFlip)], trap->Registers[REG_GLOBALS+(1^WordEndianFlip)], 
69949 +                trap->Registers[REG_GLOBALS+(2^WordEndianFlip)], trap->Registers[REG_GLOBALS+(3^WordEndianFlip)]);
69950 +       printk ("              g4=%08x g5=%08x g6=%08x g7=%08x\n", 
69951 +                trap->Registers[REG_GLOBALS+(4^WordEndianFlip)], trap->Registers[REG_GLOBALS+(5^WordEndianFlip)], 
69952 +                trap->Registers[REG_GLOBALS+(6^WordEndianFlip)], trap->Registers[REG_GLOBALS+(7^WordEndianFlip)]);
69953 +    }
69954 +    if (trap->DirtyBits.s.OutsDirty)
69955 +    {
69956 +       printk ("              o0=%08x o1=%08x o2=%08x o3=%08x\n", 
69957 +                trap->Registers[REG_OUTS+(0^WordEndianFlip)], trap->Registers[REG_OUTS+(1^WordEndianFlip)], 
69958 +                trap->Registers[REG_OUTS+(2^WordEndianFlip)], trap->Registers[REG_OUTS+(3^WordEndianFlip)]);
69959 +       printk ("              o4=%08x o5=%08x o6=%08x o7=%08x\n", 
69960 +                trap->Registers[REG_OUTS+(4^WordEndianFlip)], trap->Registers[REG_OUTS+(5^WordEndianFlip)], 
69961 +                trap->Registers[REG_OUTS+(6^WordEndianFlip)], trap->Registers[REG_OUTS+(7^WordEndianFlip)]);
69962 +    }
69963 +    if (trap->DirtyBits.s.LocalsDirty)
69964 +    {
69965 +       printk ("              l0=%08x l1=%08x l2=%08x l3=%08x\n", 
69966 +                trap->Registers[REG_LOCALS+(0^WordEndianFlip)], trap->Registers[REG_LOCALS+(1^WordEndianFlip)], 
69967 +                trap->Registers[REG_LOCALS+(2^WordEndianFlip)], trap->Registers[REG_LOCALS+(3^WordEndianFlip)]);
69968 +       printk ("              l4=%08x l5=%08x l6=%08x l7=%08x\n", 
69969 +                trap->Registers[REG_LOCALS+(4^WordEndianFlip)], trap->Registers[REG_LOCALS+(5^WordEndianFlip)], 
69970 +                trap->Registers[REG_LOCALS+(6^WordEndianFlip)], trap->Registers[REG_LOCALS+(7^WordEndianFlip)]);
69971 +    }
69972 +    if (trap->DirtyBits.s.InsDirty)
69973 +    {
69974 +       printk ("              i0=%08x i1=%08x i2=%08x i3=%08x\n", 
69975 +                trap->Registers[REG_INS+(0^WordEndianFlip)], trap->Registers[REG_INS+(1^WordEndianFlip)], 
69976 +                trap->Registers[REG_INS+(2^WordEndianFlip)], trap->Registers[REG_INS+(3^WordEndianFlip)]);
69977 +       printk ("              i4=%08x i5=%08x i6=%08x i7=%08x\n", 
69978 +                trap->Registers[REG_INS+(4^WordEndianFlip)], trap->Registers[REG_INS+(5^WordEndianFlip)], 
69979 +                trap->Registers[REG_INS+(6^WordEndianFlip)], trap->Registers[REG_INS+(7^WordEndianFlip)]);
69980 +    }
69981 +    
69982 +//    panic ("ep3_tprocTrap");
69983 +
69984 +    return (OP_HANDLED);
69985 +}
69986 +
69987 +int
69988 +ep3_iprocTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int channel)
69989 +{
69990 +    EP3_RAIL      *rail = (EP3_RAIL *) ctxt->Private;
69991 +    ELAN3_DEV      *dev = ctxt->Device;
69992 +    EP3_COOKIE    *cp;
69993 +    sdramaddr_t    event;
69994 +    E3_uint32      type;
69995 +    sdramaddr_t    dma;
69996 +    E3_DMA_BE      dmabe;
69997 +
69998 +    ASSERT (trap->Transactions[0].s.TrTypeCntx.s.Context & SYS_CONTEXT_BIT);
69999 +
70000 +    /*
70001 +     * first process the trap to determine the cause
70002 +     */
70003 +    InspectIProcTrap (ctxt, trap);
70004 +
70005 +    if (! trap->AckSent && trap->LockQueuePointer)             /* Must be a network error in a queueing DMA */
70006 +    {                                                          /* packet - unlock the queue */
70007 +       IncrStat (rail, QueueingPacketTrap);
70008 +
70009 +       SimulateUnlockQueue (ctxt, trap->LockQueuePointer, FALSE);
70010 +       return (OP_HANDLED);
70011 +    }
70012 +
70013 +    if (trap->AckSent && trap->BadTransaction)
70014 +    {
70015 +       spin_unlock (&dev->IntrLock);
70016 +
70017 +       /* NOTE - no network error fixup is necessary for system context
70018 +        *        messages since they are idempotent and are single packet 
70019 +        *        dmas
70020 +        */
70021 +       if (EP3_CONTEXT_ISDATA (trap->Transactions[0].s.TrTypeCntx.s.Context))
70022 +       {
70023 +           int nodeId = EP3_CONTEXT_TO_NODE(trap->Transactions[0].s.TrTypeCntx.s.Context);
70024 +           
70025 +           if (trap->DmaIdentifyTransaction)
70026 +           {
70027 +               printk ("%s: network error on dma identify <%x> from node %d\n", rail->Generic.Name, trap->DmaIdentifyTransaction->s.TrAddr, nodeId);
70028 +
70029 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->DmaIdentifyTransaction->s.TrAddr);
70030 +           }
70031 +           else if (trap->ThreadIdentifyTransaction)
70032 +           {
70033 +               printk ("%s: network error on thread identify <%x> from node %d\n", rail->Generic.Name, trap->ThreadIdentifyTransaction->s.TrAddr, nodeId);
70034 +
70035 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, channel, trap->ThreadIdentifyTransaction->s.TrAddr);
70036 +           }
70037 +           else
70038 +           {
70039 +               printk ("%s: network error on dma packet from node %d\n", rail->Generic.Name, nodeId);
70040 +
70041 +               ep_queue_network_error (&rail->Generic, nodeId, EP_NODE_NETERR_DMA_PACKET, channel, 0);
70042 +           }
70043 +       }
70044 +
70045 +       spin_lock (&dev->IntrLock);
70046 +       return (OP_HANDLED);
70047 +    }
70048 +    
70049 +    if (trap->AckSent)
70050 +    {
70051 +       if (trap->TrappedTransaction == NULL)
70052 +           return (OP_HANDLED);
70053 +       
70054 +       while (! trap->TrappedTransaction->s.TrTypeCntx.s.LastTrappedTrans)
70055 +       {
70056 +           E3_IprocTrapHeader_BE *hdrp  = trap->TrappedTransaction;
70057 +           E3_IprocTrapData_BE   *datap = trap->TrappedDataBuffer;
70058 +           
70059 +           ASSERT (hdrp->s.TrTypeCntx.s.StatusRegValid != 0);
70060 +           
70061 +           if ((hdrp->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) != 0)
70062 +           {
70063 +               printk ("ep3_iprocTrap: WRITEBLOCK : Addr %x\n", hdrp->s.TrAddr);
70064 +//             panic ("ep3_iprocTrap\n");
70065 +           }
70066 +           else
70067 +           {
70068 +               switch (hdrp->s.TrTypeCntx.s.Type & TR_OPCODE_TYPE_MASK)
70069 +               {
70070 +               case TR_SETEVENT & TR_OPCODE_TYPE_MASK:
70071 +                   switch (GET_STATUS_TRAPTYPE (hdrp->s.IProcTrapStatus))
70072 +                   {
70073 +                   case MI_DmaQueueOverflow:
70074 +                       IncrStat (rail, IprocDmaQueueOverflow);
70075 +
70076 +                       if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 &&
70077 +                           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_DMA) == EV_TYPE_DMA &&
70078 +                           (dma  = ep_elan2sdram (&rail->Generic, (type & ~EV_TYPE_MASK2))) != (sdramaddr_t) 0)
70079 +                       {
70080 +                           elan3_sdram_copyq_from_sdram (dev, dma, &dmabe, sizeof (E3_DMA));
70081 +                           
70082 +                           if (dmabe.s.dma_direction == DMA_WRITE)
70083 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
70084 +                           else
70085 +                           {
70086 +                               cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
70087 +                               
70088 +                               /* we MUST convert this into a DMA_READ_REQUEUE dma as if we don't the 
70089 +                                * DMA descriptor will be read from the EP3_RETRY_DMA rather than the 
70090 +                                * original DMA - this can then get reused and an incorrect DMA 
70091 +                                * descriptor sent 
70092 +                                * eventp->ev_Type contains the dma address with type in the lower bits 
70093 +                                */ 
70094 +                               
70095 +                               dmabe.s.dma_source    = (type & ~EV_TYPE_MASK2);
70096 +                               dmabe.s.dma_direction = (dmabe.s.dma_direction & ~DMA_READ) | DMA_READ_REQUEUE;
70097 +                           }
70098 +
70099 +#ifdef DEBUG_ASSERT
70100 +                           {
70101 +                               E3_uint16     vp       = (dmabe.s.dma_direction == DMA_WRITE ? dmabe.s.dma_destVProc : dmabe.s.dma_srcVProc);
70102 +                               EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
70103 +
70104 +                               ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
70105 +                           }
70106 +#endif
70107 +
70108 +                           if (cp != NULL)
70109 +                               cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
70110 +                           else
70111 +                           {
70112 +                               ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
70113 +                               
70114 +                               QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
70115 +                           }
70116 +                           break;
70117 +                       }
70118 +
70119 +                       printk ("ep3_iprocTrap: SETEVENT : %x - cannot find dma to restart\n", hdrp->s.TrAddr);
70120 +//                     panic ("ep3_iprocTrap\n");
70121 +                       break;
70122 +
70123 +                   case MI_EventQueueOverflow:
70124 +                   {
70125 +                       sdramaddr_t event;
70126 +                       E3_uint32   type;
70127 +
70128 +                       IncrStat (rail, IprocEventQueueOverflow);
70129 +
70130 +                       if ((event = ep_elan2sdram (&rail->Generic, hdrp->s.TrAddr)) != (sdramaddr_t) 0 &&
70131 +                           ((type = elan3_sdram_readl (dev, event + offsetof (E3_Event, ev_Type))) & EV_TYPE_MASK_EVIRQ) == EV_TYPE_EVIRQ)
70132 +                       {
70133 +                           spin_unlock (&dev->IntrLock);
70134 +                           ep3_event (ctxt, (type & ~(EV_TYPE_MASK_EVIRQ|EV_TYPE_MASK_BCOPY)), OP_LWP);
70135 +                           spin_lock (&dev->IntrLock);
70136 +
70137 +                           break;
70138 +                       }
70139 +
70140 +                       printk ("ep3_iprocTrap: SETEVENT : %x - cannot find event\n", hdrp->s.TrAddr);
70141 +//                     panic ("ep3_iprocTrap\n");
70142 +                       break;
70143 +                   }
70144 +
70145 +                   default:
70146 +                       printk ("ep3_iprocTrap: SETEVENT : %x MI=%x\n", hdrp->s.TrAddr, GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus));
70147 +//                     panic ("ep3_iprocTrap\n");
70148 +                       break;
70149 +                   }
70150 +                   break;
70151 +                   
70152 +               case TR_SENDDISCARD & TR_OPCODE_TYPE_MASK:
70153 +                   /* Just ignore send-discard transactions */
70154 +                   break;
70155 +                   
70156 +               case TR_REMOTEDMA & TR_OPCODE_TYPE_MASK:
70157 +               {
70158 +                   E3_DMA_BE *dmap = (E3_DMA_BE *) datap;
70159 +
70160 +                   if (GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus) != MI_DmaQueueOverflow)
70161 +                   {
70162 +                       printk ("ep3_iprocTrap: MI=%x\n", GET_STATUS_TRAPTYPE(hdrp->s.IProcTrapStatus));
70163 +                       break;
70164 +                   }
70165 +
70166 +                   IncrStat (rail, IprocDmaQueueOverflow);
70167 +
70168 +                   cp = LookupEventCookie (rail, &rail->CookieTable, dmap->s.dma_srcEvent);
70169 +
70170 +                   /* modify the dma type since it will still be a "read" dma */
70171 +                   dmap->s.dma_type = (dmap->s.dma_type & ~DMA_TYPE_READ) | DMA_TYPE_ISREMOTE;
70172 +
70173 +#ifdef DEBUG_ASSERT
70174 +                   {
70175 +                       E3_uint16     vp       = dmap->s.dma_destVProc;
70176 +                       EP_NODE_RAIL *nodeRail = &rail->Generic.Nodes[EP_VP_TO_NODE(vp)];
70177 +                       
70178 +                       ASSERT (!EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
70179 +                   }
70180 +#endif
70181 +                   if (cp != NULL)
70182 +                       cp->Operations->DmaRetry (rail, cp->Arg, dmap, EAGAIN);
70183 +                   else
70184 +                   {
70185 +                       ASSERT (dmap->s.dma_direction == DMA_WRITE && dmap->s.dma_srcEvent == 0 && dmap->s.dma_isRemote);
70186 +                       
70187 +                       QueueDmaForRetry (rail, dmap, EP_RETRY_ANONYMOUS);
70188 +                   }
70189 +                   break;
70190 +               }   
70191 +               default:
70192 +                   printk ("ep3_iprocTrap: %s\n", IProcTrapString (hdrp, datap));
70193 +                   break;
70194 +               }
70195 +           }
70196 +           
70197 +           /*
70198 +            * We've successfully processed this transaction, so move onto the 
70199 +            * next one.
70200 +            */
70201 +           trap->TrappedTransaction++;
70202 +           trap->TrappedDataBuffer++;
70203 +       }
70204 +
70205 +       return (OP_HANDLED);
70206 +    }
70207 +    
70208 +    /* Workaround WRITEBLOCK transaction executed when LOCKQUEUE transaction missed */
70209 +    if ((trap->TrappedTransaction->s.TrTypeCntx.s.Type & TR_WRITEBLOCK_BIT) &&         /* a DMA packet */
70210 +       trap->LockQueuePointer == 0 && trap->UnlockQueuePointer &&              /* a queueing DMA */
70211 +       trap->TrappedTransaction->s.TrAddr == trap->FaultSave.s.FaultAddress)   /* and missed lockqueue */
70212 +    {
70213 +       printk ("ep3_iprocTrap: missed lockqueue transaction for queue %x\n", trap->UnlockQueuePointer);
70214 +       return (OP_HANDLED);
70215 +    }
70216 +
70217 +    if (trap->FaultSave.s.FaultContext != 0)
70218 +       printk ("ep3_iprocTrap: pagefault at %08x in context %x\n", 
70219 +               trap->FaultSave.s.FaultAddress, trap->FaultSave.s.FaultContext);
70220 +
70221 +//    panic ("ep3_iprocTrap: unexpected inputter trap\n");
70222 +    
70223 +    return (OP_HANDLED);
70224 +}
70225 +
70226 +/*
70227 + * Command processor trap
70228 + *   kernel comms should only be able to generate
70229 + *   queue overflow traps
70230 + */
70231 +int
70232 +ep3_cprocTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap)
70233 +{
70234 +    EP3_RAIL     *rail   = (EP3_RAIL *) ctxt->Private;
70235 +    int           ctxnum = (trap->TrapBuf.r.Breg >> 16) & MAX_ROOT_CONTEXT_MASK;
70236 +    ELAN3_DEV     *dev    = rail->Device;
70237 +    EP3_DMA_RING  *ring;
70238 +    EP3_COOKIE   *cp;
70239 +    E3_DMA_BE     dmabe;
70240 +    int           vp, slot;
70241 +    unsigned long flags;
70242 +
70243 +    switch (trap->Status.s.TrapType)
70244 +    {
70245 +    case MI_DmaQueueOverflow:
70246 +       IncrStat (rail, CprocDmaQueueOverflow);
70247 +
70248 +       /* Use the context number that the setevent was issued in,
70249 +        * to find the appropriate dma ring, then since they are guaranteed
70250 +        * to be issued in order, we just search backwards till we find the
70251 +        * last one which has completed its word copy - this must be the
70252 +        * one which had caused the DmaQueueOverflow trap ! */
70253 +
70254 +       ASSERT (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS));
70255 +
70256 +       spin_lock_irqsave (&dev->CProcLock, flags);
70257 +
70258 +       ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM];
70259 +       slot = DMA_RING_PREV_POS(ring, ring->Position);
70260 +       
70261 +       while (ring->pDoneBlk[slot] == EP3_EVENT_ACTIVE)
70262 +           slot = DMA_RING_PREV_POS(ring, slot);
70263 +       
70264 +       elan3_sdram_copyq_from_sdram (rail->Device , DMA_RING_DMA(ring,slot), &dmabe, sizeof (E3_DMA));
70265 +
70266 +#if defined(DEBUG_ASSERT)
70267 +       while (slot != DMA_RING_PREV_POS(ring, ring->Position))
70268 +       {
70269 +           ASSERT (ring->pDoneBlk[slot] != EP3_EVENT_ACTIVE);
70270 +           
70271 +           slot = DMA_RING_PREV_POS(ring, slot);
70272 +       }
70273 +#endif
70274 +       spin_unlock_irqrestore (&dev->CProcLock, flags);
70275 +
70276 +       if (dmabe.s.dma_direction == DMA_WRITE)
70277 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_srcEvent);
70278 +       else
70279 +       {
70280 +           ASSERT (dmabe.s.dma_direction = DMA_READ_REQUEUE);
70281 +
70282 +           cp = LookupEventCookie (rail, &rail->CookieTable, dmabe.s.dma_destEvent);
70283 +       }
70284 +
70285 +#if defined(DEBUG_ASSERT)
70286 +       if (dmabe.s.dma_direction == DMA_WRITE)
70287 +           vp = dmabe.s.dma_destVProc;
70288 +       else
70289 +           vp = dmabe.s.dma_srcVProc;
70290 +       
70291 +       ASSERT (!EP_VP_ISDATA(vp) || (rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State >= EP_NODE_CONNECTED &&
70292 +                                     rail->Generic.Nodes[EP_VP_TO_NODE(vp)].State <= EP_NODE_LOCAL_PASSIVATE));
70293 +#endif
70294 +
70295 +       if (cp != NULL)
70296 +           cp->Operations->DmaRetry (rail, cp->Arg, &dmabe, EAGAIN);
70297 +       else
70298 +       {
70299 +           ASSERT (dmabe.s.dma_direction == DMA_WRITE && dmabe.s.dma_srcEvent == 0 && dmabe.s.dma_isRemote);
70300 +           
70301 +           QueueDmaForRetry (rail, &dmabe, EP_RETRY_ANONYMOUS);
70302 +       }
70303 +       
70304 +       return (OP_HANDLED);
70305 +
70306 +    case MI_EventQueueOverflow:
70307 +       ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM);
70308 +
70309 +       IncrStat (rail, CprocEventQueueOverflow);
70310 +       
70311 +       rail->CommandPortEventTrap = TRUE;
70312 +       return (OP_HANDLED);
70313 +       
70314 +#if defined(PER_CPU_TIMEOUT)
70315 +    case MI_SetEventReadWait:
70316 +       if (ctxnum == ELAN3_MRF_CONTEXT_NUM && trap->FaultSave.s.EventAddress == EP_PACEMAKER_EVENTADDR)
70317 +       {
70318 +           HeartbeatPacemaker (rail);
70319 +           return (OP_HANDLED);
70320 +       }
70321 +#endif
70322 +
70323 +    default:
70324 +       printk ("ep3_cprocTrap : Context=%x Status=%x TrapType=%x\n", ctxnum, trap->Status.Status, trap->Status.s.TrapType);
70325 +       printk ("               FaultAddr=%x EventAddr=%x FSR=%x\n",
70326 +                trap->FaultSave.s.FaultAddress, trap->FaultSave.s.EventAddress,
70327 +                trap->FaultSave.s.FSR.Status);
70328 +       break;
70329 +    }
70330 +
70331 +//    panic ("ep3_cprocTrap");
70332 +
70333 +    return (OP_HANDLED);
70334 +}
70335 +
70336 +static int
70337 +ep3_cprocReissue (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *tbuf)
70338 +{
70339 +    EP3_RAIL   *rail    = (EP3_RAIL *) ctxt->Private;
70340 +    unsigned  cmdoff = (tbuf->s.ContextType >> 5) & 0xFF;
70341 +    int       ctxnum = (tbuf->s.ContextType >> 16) & MAX_ROOT_CONTEXT_MASK;
70342 +    
70343 +    if (ctxnum >= ELAN3_DMARING_BASE_CONTEXT_NUM && ctxnum < (ELAN3_DMARING_BASE_CONTEXT_NUM+EP3_NUM_RINGS))
70344 +    {
70345 +       EP3_DMA_RING *ring = &rail->DmaRings[ctxnum - ELAN3_DMARING_BASE_CONTEXT_NUM];
70346 +
70347 +       ASSERT ((cmdoff << 2) == offsetof (E3_CommandPort, SetEvent)); /* can only be setevent commands! */
70348 +       ASSERT (tbuf->s.Addr >= DMA_RING_EVENT_ELAN(ring,0) && tbuf->s.Addr < DMA_RING_EVENT_ELAN(ring, ring->Entries));
70349 +       
70350 +       writel (tbuf->s.Addr, (void *)(ring->CommandPort + (cmdoff << 2)));
70351 +    }
70352 +    else
70353 +    {
70354 +       ASSERT (ctxnum == ELAN3_MRF_CONTEXT_NUM);
70355 +
70356 +       writel (tbuf->s.Addr, (void *)(ctxt->CommandPort + (cmdoff << 2)));
70357 +    }
70358 +    
70359 +    return (OP_HANDLED);
70360 +}
70361 +
70362 +static E3_uint8
70363 +ep3_load8 (ELAN3_CTXT *ctxt, E3_Addr addr)
70364 +{
70365 +    EP3_RAIL    *rail  = (EP3_RAIL *) ctxt->Private;
70366 +    ELAN3_DEV    *dev = ctxt->Device;
70367 +    sdramaddr_t offset;
70368 +    E3_uint8   *ptr;
70369 +
70370 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70371 +       return (elan3_sdram_readb (dev, offset));
70372 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != NULL)
70373 +       return (*ptr);
70374 +
70375 +    printk ("ep3_load8: %08x\n", addr);
70376 +    return (0);
70377 +}
70378 +
70379 +static void
70380 +ep3_store8 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val)
70381 +{
70382 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70383 +    ELAN3_DEV   *dev = ctxt->Device;
70384 +    sdramaddr_t offset;
70385 +    E3_uint8   *ptr;
70386 +
70387 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70388 +       elan3_sdram_writeb (dev, offset, val);
70389 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70390 +       *ptr = val;
70391 +    else
70392 +       printk ("ep3_store8 %08x\n", addr);
70393 +}
70394 +
70395 +static E3_uint16
70396 +ep3_load16 (ELAN3_CTXT *ctxt, E3_Addr addr)
70397 +{
70398 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70399 +    ELAN3_DEV   *dev = ctxt->Device;
70400 +    sdramaddr_t offset;
70401 +    E3_uint16  *ptr;
70402 +
70403 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70404 +       return (elan3_sdram_readw (dev, offset));
70405 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70406 +       return (*ptr);
70407 +
70408 +    printk ("ep3_load16 %08x\n", addr);
70409 +    return (0);
70410 +}
70411 +
70412 +static void
70413 +ep3_store16 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val)
70414 +{
70415 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70416 +    ELAN3_DEV   *dev = ctxt->Device;
70417 +    sdramaddr_t offset;
70418 +    E3_uint16  *ptr;
70419 +
70420 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70421 +       elan3_sdram_writew (dev, offset, val);
70422 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70423 +       *ptr = val;
70424 +    else
70425 +       printk ("ep3_store16 %08x\n", addr);
70426 +}
70427 +
70428 +static E3_uint32
70429 +ep3_load32 (ELAN3_CTXT *ctxt, E3_Addr addr)
70430 +{
70431 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70432 +    ELAN3_DEV   *dev = ctxt->Device;
70433 +    sdramaddr_t offset;
70434 +    E3_uint32  *ptr;
70435 +
70436 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70437 +       return (elan3_sdram_readl(dev, offset));
70438 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70439 +       return (*ptr);
70440 +    
70441 +    printk ("ep3_load32 %08x\n", addr);
70442 +    return (0);
70443 +}
70444 +
70445 +static void
70446 +ep3_store32 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val)
70447 +{
70448 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70449 +    ELAN3_DEV   *dev = ctxt->Device;
70450 +    sdramaddr_t offset;
70451 +    E3_uint32  *ptr;
70452 +
70453 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70454 +       elan3_sdram_writel (dev, offset, val);
70455 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70456 +       *ptr = val;
70457 +    else
70458 +       printk ("ep3_store32 %08x\n", addr);
70459 +}
70460 +
70461 +static E3_uint64
70462 +ep3_load64 (ELAN3_CTXT *ctxt, E3_Addr addr)
70463 +{
70464 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70465 +    ELAN3_DEV   *dev = ctxt->Device;
70466 +    sdramaddr_t offset;
70467 +    E3_uint64  *ptr;
70468 +
70469 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70470 +       return (elan3_sdram_readq (dev, offset));
70471 +    if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70472 +       return (*ptr);
70473 +
70474 +    printk ("ep3_load64 %08x\n", addr);
70475 +    return (0);
70476 +}
70477 +
70478 +static void
70479 +ep3_store64 (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val)
70480 +{
70481 +    EP3_RAIL   *rail  = (EP3_RAIL *) ctxt->Private;
70482 +    ELAN3_DEV   *dev = ctxt->Device;
70483 +    sdramaddr_t offset;
70484 +    E3_uint64  *ptr;
70485 +
70486 +    if ((offset = ep_elan2sdram (&rail->Generic, addr)) != 0)
70487 +       elan3_sdram_writeq (dev, offset, val);
70488 +    else if ((ptr = ep_elan2main (&rail->Generic, addr)) != 0)
70489 +       *ptr = val;
70490 +    else
70491 +       printk ("ep3_store64 %08x\n", addr);
70492 +}
70493 +
70494 +/*
70495 + * Local variables:
70496 + * c-file-style: "stroustrup"
70497 + * End:
70498 + */
70499 diff -urN clean/drivers/net/qsnet/ep/support_elan4.c linux-2.6.9/drivers/net/qsnet/ep/support_elan4.c
70500 --- clean/drivers/net/qsnet/ep/support_elan4.c  1969-12-31 19:00:00.000000000 -0500
70501 +++ linux-2.6.9/drivers/net/qsnet/ep/support_elan4.c    2005-08-09 05:57:14.000000000 -0400
70502 @@ -0,0 +1,1192 @@
70503 +/*
70504 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
70505 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
70506 + *
70507 + *    For licensing information please see the supplied COPYING file
70508 + *
70509 + */
70510 +
70511 +#ident "@(#)$Id: support_elan4.c,v 1.24.2.2 2005/08/09 09:57:14 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
70512 +/*      $Source: /cvs/master/quadrics/epmod/support_elan4.c,v $*/
70513 +
70514 +#include <qsnet/kernel.h>
70515 +#include <qsnet/kthread.h>
70516 +
70517 +#include <elan/kcomm.h>
70518 +
70519 +#include "kcomm_vp.h"
70520 +#include "kcomm_elan4.h"
70521 +#include "debug.h"
70522 +
70523 +#include <elan4/trtype.h>
70524 +#include <elan4/debug.h>
70525 +
70526 +void
70527 +ep4_register_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp, E4_uint64 cookie, void (*callback)(EP4_RAIL *r, void *arg), void *arg)
70528 +{
70529 +    unsigned long flags;
70530 +    
70531 +    cp->int_val      = cookie;
70532 +    cp->int_callback = callback;
70533 +    cp->int_arg      = arg;
70534 +       
70535 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
70536 +    list_add_tail (&cp->int_link, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]);
70537 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
70538 +}
70539 +
70540 +void
70541 +ep4_deregister_intcookie (EP4_RAIL *rail, EP4_INTCOOKIE *cp)
70542 +{
70543 +    unsigned long flags;
70544 +    
70545 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
70546 +    list_del (&cp->int_link);
70547 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
70548 +}
70549 +
70550 +
70551 +EP4_INTCOOKIE *
70552 +ep4_lookup_intcookie (EP4_RAIL *rail, E4_uint64 cookie)
70553 +{
70554 +    struct list_head *el;
70555 +    unsigned long flags;
70556 +
70557 +    spin_lock_irqsave (&rail->r_intcookie_lock, flags);
70558 +    list_for_each (el, &rail->r_intcookie_hash[EP4_INTCOOKIE_HASH(cookie)]) {
70559 +       EP4_INTCOOKIE *cp = list_entry (el, EP4_INTCOOKIE, int_link);
70560 +
70561 +       if (cp->int_val == cookie)
70562 +       {
70563 +           spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
70564 +           return cp;
70565 +       }
70566 +    }
70567 +    spin_unlock_irqrestore (&rail->r_intcookie_lock, flags);
70568 +    return NULL;
70569 +}
70570 +
70571 +E4_uint64
70572 +ep4_neterr_cookie (EP4_RAIL *rail, unsigned int node)
70573 +{
70574 +    E4_uint64      cookie;
70575 +    unsigned long  flags;
70576 +
70577 +    spin_lock_irqsave (&rail->r_cookie_lock, flags);
70578 +    cookie = rail->r_cookies[node];
70579 +
70580 +    rail->r_cookies[node] += EP4_COOKIE_INC;
70581 +    
70582 +    spin_unlock_irqrestore (&rail->r_cookie_lock, flags);
70583 +
70584 +    return cookie;
70585 +}
70586 +
70587 +void
70588 +ep4_eproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
70589 +{
70590 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
70591 +    ELAN4_EPROC_TRAP trap;
70592 +
70593 +    elan4_extract_eproc_trap (ctxt->ctxt_dev, status, &trap, 0);
70594 +
70595 +    if (epdebug & DBG_EPTRAP)
70596 +       elan4_display_eproc_trap (DBG_BUFFER, 0, "ep4_eproc_trap", &trap);
70597 +
70598 +    switch (EPROC_TrapType (status))
70599 +    {
70600 +    case EventProcNoFault:
70601 +       EPRINTF1 (DBG_EPTRAP, "%s: EventProcNoFault\n", rail->r_generic.Name);
70602 +       return;
70603 +
70604 +    default:
70605 +       printk ("%s: unhandled eproc trap %d\n", rail->r_generic.Name, EPROC_TrapType (status));
70606 +       elan4_display_eproc_trap (DBG_CONSOLE, 0, "ep4_eproc_trap", &trap);
70607 +    }
70608 +}
70609 +
70610 +void
70611 +ep4_cproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum)
70612 +{
70613 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
70614 +    ELAN4_CPROC_TRAP trap;
70615 +    struct list_head *el;
70616 +    register int      i;
70617 +
70618 +    elan4_extract_cproc_trap (ctxt->ctxt_dev, status, &trap, cqnum);
70619 +
70620 +    if (epdebug & DBG_EPTRAP)
70621 +       elan4_display_cproc_trap (DBG_BUFFER, 0, "ep4_cproc_trap", &trap);
70622 +       
70623 +    switch (CPROC_TrapType (status))
70624 +    {
70625 +    case CommandProcInterruptQueueOverflow:
70626 +       /*
70627 +        * Try and handle a bunch of elan main interrupts
70628 +        */
70629 +       for (i = 0; i <EP4_NUM_ECQ; i++) {
70630 +           list_for_each (el, &rail->r_ecq_list[i]) {
70631 +               EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
70632 +           
70633 +               if (elan4_cq2num (ecq->ecq_cq) == cqnum)
70634 +               {
70635 +                   printk ("%s: defer command queue %d after trap %x\n",
70636 +                           rail->r_generic.Name, cqnum, CPROC_TrapType (status));
70637 +       
70638 +                   elan4_queue_mainintop (ctxt->ctxt_dev, &ecq->ecq_intop);
70639 +                   return;
70640 +               }
70641 +           }
70642 +       }
70643 +       break;
70644 +
70645 +    case CommandProcDmaQueueOverflow:
70646 +    case CommandProcThreadQueueOverflow:
70647 +       for (i = 0; i <EP4_NUM_ECQ; i++) {
70648 +           list_for_each (el, &rail->r_ecq_list[i]) {
70649 +               EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
70650 +           
70651 +               if (elan4_cq2num (ecq->ecq_cq) == cqnum)
70652 +               {
70653 +                   printk ("%s: restart command queue %d after trap %x\n",
70654 +                           rail->r_generic.Name, cqnum, CPROC_TrapType (status));
70655 +
70656 +                   elan4_restartcq (ctxt->ctxt_dev, ecq->ecq_cq);
70657 +                   return;
70658 +               }
70659 +           }
70660 +       }
70661 +       break;
70662 +    }
70663 +
70664 +    printk ("%s: unhandled cproc trap %d for cqnum %d\n", rail->r_generic.Name, CPROC_TrapType (status), cqnum);
70665 +    elan4_display_cproc_trap (DBG_CONSOLE, 0, "ep4_cproc_trap", &trap);
70666 +}
70667 +
70668 +void
70669 +ep4_dproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
70670 +{
70671 +    EP4_RAIL        *rail = EP4_CTXT_TO_RAIL (ctxt);
70672 +    ELAN4_DPROC_TRAP trap;
70673 +
70674 +    elan4_extract_dproc_trap (ctxt->ctxt_dev, status, &trap, unit);
70675 +
70676 +    if (epdebug & DBG_EPTRAP)
70677 +       elan4_display_dproc_trap (DBG_BUFFER, 0, "ep4_dproc_trap", &trap);
70678 +
70679 +    if (! DPROC_PrefetcherFault (trap.tr_status))
70680 +    {
70681 +       switch (DPROC_TrapType (trap.tr_status))
70682 +       {
70683 +       case DmaProcFailCountError:
70684 +           goto retry_this_dma;
70685 +
70686 +       case DmaProcPacketAckError:
70687 +           goto retry_this_dma;
70688 +
70689 +       case DmaProcQueueOverflow:
70690 +           goto retry_this_dma;
70691 +       }
70692 +    }
70693 +
70694 +    printk ("%s: unhandled dproc trap\n", rail->r_generic.Name);
70695 +    elan4_display_dproc_trap (DBG_CONSOLE, 0, "ep4_dproc_trap", &trap);
70696 +    return;
70697 +
70698 + retry_this_dma:
70699 +    /*XXXX implement backoff .... */
70700 +
70701 +    ep4_queue_dma_retry (rail, &trap.tr_desc, EP_RETRY_LOW_PRI);
70702 +}
70703 +
70704 +void
70705 +ep4_tproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status)
70706 +{
70707 +    EP4_RAIL         *rail = EP4_CTXT_TO_RAIL (ctxt);
70708 +    ELAN4_TPROC_TRAP *trap = &rail->r_tproc_trap;
70709 +
70710 +    elan4_extract_tproc_trap (ctxt->ctxt_dev, status, trap);
70711 +
70712 +    if (epdebug & DBG_EPTRAP)
70713 +       elan4_display_tproc_trap (DBG_BUFFER, 0, "ep4_tproc_trap", trap);
70714 +       
70715 +    printk ("%s: unhandled tproc trap\n", rail->r_generic.Name);
70716 +    elan4_display_tproc_trap (DBG_CONSOLE, 0, "ep4_tproc_trap", trap);
70717 +}
70718 +
70719 +void
70720 +ep4_iproc_trap (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit)
70721 +{
70722 +    EP4_RAIL         *rail = EP4_CTXT_TO_RAIL (ctxt);
70723 +    ELAN4_IPROC_TRAP *trap = &rail->r_iproc_trap;
70724 +
70725 +    elan4_extract_iproc_trap (ctxt->ctxt_dev, status, trap, unit);
70726 +
70727 +    if (epdebug & DBG_EPTRAP)
70728 +       elan4_display_iproc_trap (DBG_BUFFER, 0, "ep4_iproc_trap", trap);
70729 +       
70730 +    elan4_inspect_iproc_trap (trap);
70731 +
70732 +    switch (IPROC_TrapValue (trap->tr_transactions[trap->tr_trappedTrans].IProcStatusCntxAndTrType))
70733 +    {
70734 +    case InputDmaQueueOverflow:
70735 +       ep4_queue_dma_retry (rail, (E4_DMA *) &trap->tr_dataBuffers[trap->tr_trappedTrans], EP_RETRY_LOW_PRI);
70736 +       return;
70737 +
70738 +    case InputEventEngineTrapped:
70739 +    {
70740 +       E4_IprocTrapHeader *hdrp = &trap->tr_transactions[trap->tr_trappedTrans];
70741 +       sdramaddr_t         inputq;
70742 +       E4_Addr             event;
70743 +
70744 +       /* XXXX: flow control on the command queue which we issue to is 
70745 +        * rather difficult, we don't want to have space for an event 
70746 +        * for each possible context, nor the mechanism to hold the 
70747 +        * context filter up until the event has been executed.  Given
70748 +        * that the event engine will be restarted by this same interrupt
70749 +        * and we're using high priority command queues, then we just use
70750 +        * a single small command queue for this.
70751 +        */
70752 +       switch (IPROC_TransactionType(hdrp->IProcStatusCntxAndTrType) & TR_OPCODE_MASK)
70753 +       {
70754 +       case TR_SETEVENT & TR_OPCODE_MASK:
70755 +           if (hdrp->TrAddr != 0)
70756 +               ep4_set_event_cmd (rail->r_event_ecq, hdrp->TrAddr);
70757 +           return;
70758 +
70759 +       case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:
70760 +           if ((inputq = ep_elan2sdram (&rail->r_generic, hdrp->TrAddr)) == 0)
70761 +               printk ("%s: TR_INPUT_Q_COMMIT at %llx is not sdram\n", rail->r_generic.Name, (long long)hdrp->TrAddr);
70762 +           else
70763 +           {
70764 +               if ((event = elan4_sdram_readq (rail->r_ctxt.ctxt_dev, inputq + offsetof (E4_InputQueue, q_event))) != 0)
70765 +                   ep4_set_event_cmd (rail->r_event_ecq, event);
70766 +               return;
70767 +           }
70768 +       }
70769 +       break;
70770 +    }
70771 +
70772 +    case InputEopErrorOnWaitForEop:
70773 +    case InputEopErrorTrap:
70774 +    case InputCrcErrorAfterPAckOk:
70775 +       if (! (trap->tr_flags & TR_FLAG_ACK_SENT) || (trap->tr_flags & TR_FLAG_EOP_BAD))
70776 +           return;
70777 +       
70778 +       if (EP4_CONTEXT_ISDATA (IPROC_NetworkContext (status)))
70779 +       {
70780 +           unsigned int nodeId = EP4_CONTEXT_TO_NODE (IPROC_NetworkContext (status));
70781 +
70782 +           if ((trap->tr_flags & (TR_FLAG_DMA_PACKET | TR_FLAG_BAD_TRANS)) || 
70783 +               ((trap->tr_flags & TR_FLAG_EOP_ERROR) && (trap->tr_identifyTrans == TR_TRANS_INVALID)))
70784 +           {
70785 +               EPRINTF2  (DBG_NETWORK_ERROR, "%s: network error on dma packet from node %d\n", rail->r_generic.Name, nodeId);
70786 +               printk ("%s: network error on dma packet from node %d\n", rail->r_generic.Name, nodeId);
70787 +
70788 +               ep_queue_network_error (&rail->r_generic, EP4_CONTEXT_TO_NODE(IPROC_NetworkContext (status)), EP_NODE_NETERR_DMA_PACKET, unit & 1, 0);
70789 +               return;
70790 +           }
70791 +           
70792 +           if (trap->tr_flags & TR_FLAG_EOP_ERROR)
70793 +           {
70794 +               E4_uint64        status = trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType;
70795 +               EP_NETERR_COOKIE cookie = 0;
70796 +
70797 +               switch (IPROC_TransactionType (status) & TR_OPCODE_MASK)
70798 +               {
70799 +               case TR_SETEVENT_IDENTIFY & TR_OPCODE_MASK:
70800 +                   if (IPROC_TrapValue(status) == InputNoFault)
70801 +                       cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
70802 +                   else
70803 +                       cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0];
70804 +                   EPRINTF3(DBG_NETWORK_ERROR, "%s: network error on setevent <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70805 +                   printk ("%s: network error on setevent <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70806 +                   break;
70807 +
70808 +               case TR_INPUT_Q_COMMIT & TR_OPCODE_MASK:
70809 +                   if (IPROC_TrapValue(status) == InputNoFault)
70810 +                       cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
70811 +                   else
70812 +                       cookie = trap->tr_dataBuffers[trap->tr_identifyTrans].Data[0];
70813 +                   EPRINTF3 (DBG_NETWORK_ERROR, "%s: network error on queue commit <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70814 +                   printk ("%s: network error on queue commit <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70815 +                   break;
70816 +                   
70817 +               case TR_REMOTEDMA & TR_OPCODE_MASK:
70818 +                   cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
70819 +                   EPRINTF3 (DBG_NETWORK_ERROR, "%s: network error on remote dma <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70820 +                   printk ("%s: network error on remote dma <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70821 +                   break;
70822 +
70823 +               case TR_IDENTIFY & TR_OPCODE_MASK:
70824 +                   cookie = trap->tr_transactions[trap->tr_identifyTrans].TrAddr;
70825 +                   EPRINTF3 (DBG_NETWORK_ERROR, "%s: network error on identify <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70826 +                   printk ("%s: network error on identify <%lld%s%s%s%s> from node %d\n", rail->r_generic.Name, EP4_COOKIE_STRING(cookie), nodeId);
70827 +                   break;
70828 +
70829 +               default:
70830 +                   panic ("%s: unknown identify transaction type %x for eop error from node %d\n", rail->r_generic.Name,
70831 +                           IPROC_TransactionType (trap->tr_transactions[trap->tr_identifyTrans].IProcStatusCntxAndTrType), nodeId);
70832 +                   break;
70833 +               }
70834 +
70835 +               ep_queue_network_error (&rail->r_generic, nodeId, EP_NODE_NETERR_ATOMIC_PACKET, unit & 1, cookie);
70836 +           }
70837 +       }
70838 +       return;
70839 +    }
70840 +
70841 +    printk ("%s: unhandled iproc trap\n", rail->r_generic.Name);
70842 +    elan4_display_iproc_trap (DBG_CONSOLE, 0, "ep4_iproc_trap", trap);
70843 +}
70844 +
70845 +void
70846 +ep4_interrupt (ELAN4_CTXT *ctxt, E4_uint64 cookie)
70847 +{
70848 +    EP4_RAIL      *rail = EP4_CTXT_TO_RAIL (ctxt);
70849 +    EP4_INTCOOKIE *cp  = ep4_lookup_intcookie (rail, cookie);
70850 +
70851 +    if (cp == NULL)
70852 +    {
70853 +       printk ("ep4_interrupt: cannot find event cookie for %016llx\n", (long long) cookie);
70854 +       return;
70855 +    }
70856 +
70857 +    cp->int_callback (rail, cp->int_arg);
70858 +}
70859 +
70860 +ELAN4_TRAP_OPS ep4_trap_ops = 
70861 +{
70862 +    ep4_eproc_trap,
70863 +    ep4_cproc_trap,
70864 +    ep4_dproc_trap,
70865 +    ep4_tproc_trap,
70866 +    ep4_iproc_trap,
70867 +    ep4_interrupt,
70868 +};
70869 +
70870 +void
70871 +ep4_flush_filters (EP_RAIL *r)
70872 +{
70873 +    /* nothing to do here as elan4_set_filter() flushes the context filter */
70874 +}
70875 +
70876 +struct flush_queues_desc
70877 +{
70878 +    EP4_RAIL      *rail;
70879 +    volatile int   done;
70880 +} ;
70881 +
70882 +static void
70883 +ep4_flush_queues_flushop (ELAN4_DEV *dev, void *arg, int qfull)
70884 +{
70885 +    struct flush_queues_desc *desc  = (struct flush_queues_desc *) arg;
70886 +    EP4_RAIL                 *rail  = desc->rail;
70887 +    E4_uint64                qptrs = read_reg64 (dev, DProcHighPriPtrs);
70888 +    E4_uint32                 qsize = E4_QueueSize (E4_QueueSizeValue (qptrs));
70889 +    E4_uint32                 qfptr = E4_QueueFrontPointer (qptrs);
70890 +    E4_uint32                 qbptr = E4_QueueBackPointer (qptrs);
70891 +    E4_DProcQueueEntry        qentry;
70892 +    unsigned long             flags;
70893 +
70894 +    while ((qfptr != qbptr) || qfull)
70895 +    {
70896 +       E4_uint64 typeSize = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_typeSize));
70897 +       
70898 +       if (DMA_Context (qentry.Desc.dma_typeSize) == rail->r_ctxt.ctxt_num)
70899 +       {
70900 +           E4_uint64     vp       = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_vproc));
70901 +           EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(vp)];
70902 +           
70903 +           EP4_ASSERT (rail, !EP_VP_ISDATA(vp) || (nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE));
70904 +           
70905 +           if (EP_VP_ISDATA(vp) && nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
70906 +           {
70907 +               /*
70908 +                * This is a DMA going to the node which is being removed, 
70909 +                * so move it onto the node dma list where it will get
70910 +                * handled later.
70911 +                */
70912 +               qentry.Desc.dma_typeSize = typeSize;
70913 +               qentry.Desc.dma_cookie   = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_cookie));
70914 +               qentry.Desc.dma_vproc    = vp;
70915 +               qentry.Desc.dma_srcAddr  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcAddr));
70916 +               qentry.Desc.dma_dstAddr  = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstAddr));
70917 +               qentry.Desc.dma_srcEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_srcEvent));
70918 +               qentry.Desc.dma_dstEvent = elan4_sdram_readq (dev, qfptr + offsetof (E4_DProcQueueEntry, Desc.dma_dstEvent));
70919 +               
70920 +               EPRINTF4 (DBG_RETRY, "ep4_flush_dmas: %016llx %016llx %016llx %016llx\n", (long long)qentry.Desc.dma_typeSize, 
70921 +                         (long long)qentry.Desc.dma_cookie,  (long long)qentry.Desc.dma_vproc,  (long long)qentry.Desc.dma_srcAddr);
70922 +               EPRINTF3 (DBG_RETRY, "                %016llx %016llx %016llx\n",  (long long)qentry.Desc.dma_dstAddr, 
70923 +                         (long long)qentry.Desc.dma_srcEvent,  (long long)qentry.Desc.dma_dstEvent);
70924 +               
70925 +               ep4_queue_dma_stalled (rail, &qentry.Desc);
70926 +               
70927 +               qentry.Desc.dma_typeSize = DMA_ShMemWrite | dev->dev_ctxt.ctxt_num;
70928 +               qentry.Desc.dma_cookie   = 0;
70929 +               qentry.Desc.dma_vproc    = 0;
70930 +               qentry.Desc.dma_srcAddr  = 0;
70931 +               qentry.Desc.dma_dstAddr  = 0;
70932 +               qentry.Desc.dma_srcEvent = 0;
70933 +               qentry.Desc.dma_dstEvent = 0;
70934 +               
70935 +               elan4_sdram_copyq_to_sdram (dev, &qentry, qfptr, sizeof (E4_DProcQueueEntry));
70936 +           }
70937 +       }
70938 +
70939 +       qfptr = (qfptr & ~(qsize-1)) | ((qfptr + sizeof (E4_DProcQueueEntry)) & (qsize-1));
70940 +       qfull = 0;
70941 +    }
70942 +
70943 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
70944 +    desc->done = 1;
70945 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
70946 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
70947 +}
70948 +
70949 +static void
70950 +ep4_flush_queues_haltop (ELAN4_DEV *dev, void *arg)
70951 +{
70952 +    struct flush_queues_desc *desc = (struct flush_queues_desc *) arg;
70953 +
70954 +    elan4_queue_dma_flushop (dev, &desc->rail->r_flushop, 1);
70955 +}
70956 +
70957 +void
70958 +ep4_flush_queues (EP_RAIL *r)
70959 +{
70960 +    EP4_RAIL *rail = (EP4_RAIL *) r;
70961 +    struct flush_queues_desc desc;
70962 +    struct list_head *el, *nel;
70963 +    unsigned long flags;
70964 +    int i;
70965 +
70966 +    /* initialise descriptor */
70967 +    desc.rail  = rail;
70968 +    desc.done  = 0;
70969 +
70970 +    /* First -  stall the dma retry thread, so that it will no longer restart
70971 +     *          any dma's from the retry list */
70972 +    ep_kthread_stall (&rail->r_retry_thread);
70973 +
70974 +    /* Second - flush through all command queues targetted by events, thread etc */
70975 +    ep4_flush_ecqs (rail);
70976 +
70977 +    /* Third - queue a halt operation to flush through all DMA's which are executing
70978 +     *         or on the run queues */
70979 +    kmutex_lock (&rail->r_haltop_mutex);
70980 +
70981 +    rail->r_haltop.op_mask      = INT_DProcHalted;
70982 +    rail->r_haltop.op_function  = ep4_flush_queues_haltop;
70983 +    rail->r_haltop.op_arg       = &desc;
70984 +
70985 +    rail->r_flushop.op_function = ep4_flush_queues_flushop;
70986 +    rail->r_flushop.op_arg      = &desc;
70987 +    
70988 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
70989 +
70990 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
70991 +    while (! desc.done)
70992 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
70993 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
70994 +    kmutex_unlock (&rail->r_haltop_mutex);
70995 +
70996 +    /* Fourth - run down the dma retry lists and move all entries to the cancelled
70997 +     *          list.  Any dma's which were on the run queues have already been
70998 +     *          moved there */
70999 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71000 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
71001 +    {
71002 +       list_for_each_safe (el,nel, &rail->r_dma_retrylist[i]) {
71003 +           EP4_DMA_RETRY *retry    = list_entry (el, EP4_DMA_RETRY, retry_link);
71004 +           EP_NODE_RAIL  *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(retry->retry_dma.dma_vproc)];
71005 +
71006 +           EP4_ASSERT (rail, nodeRail->State >= EP_NODE_CONNECTED && nodeRail->State <= EP_NODE_LOCAL_PASSIVATE);
71007 +
71008 +           if (nodeRail->State == EP_NODE_LOCAL_PASSIVATE)
71009 +           {
71010 +               list_del (&retry->retry_link);
71011 +               list_add_tail (&retry->retry_link, &nodeRail->StalledDmas);
71012 +           }
71013 +       }
71014 +    }
71015 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71016 +    
71017 +    /* Finally - allow the retry thread to run again */
71018 +    ep_kthread_resume (&rail->r_retry_thread);
71019 +}
71020 +
71021 +struct write_qdesc_desc
71022 +{
71023 +    EP4_RAIL      *rail;
71024 +    sdramaddr_t    qaddr;
71025 +    E4_InputQueue *qdesc;
71026 +    volatile int   done;
71027 +} ;
71028 +
71029 +static void
71030 +ep4_write_qdesc_haltop (ELAN4_DEV *dev, void *arg)
71031 +{
71032 +    struct write_qdesc_desc *desc = (struct write_qdesc_desc *) arg;
71033 +    EP4_RAIL                *rail = desc->rail;
71034 +    unsigned long            flags;
71035 +
71036 +    elan4_sdram_copyq_to_sdram (dev, desc->qdesc, desc->qaddr, sizeof (E4_InputQueue));
71037 +
71038 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
71039 +    desc->done = 1;
71040 +    kcondvar_wakeupall (&rail->r_haltop_sleep, &rail->r_haltop_lock);
71041 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
71042 +}
71043 +
71044 +void
71045 +ep4_write_qdesc (EP4_RAIL *rail, sdramaddr_t qaddr, E4_InputQueue *qdesc)
71046 +{
71047 +    struct write_qdesc_desc desc;
71048 +    unsigned long flags;
71049 +
71050 +    /* initialise descriptor */
71051 +    desc.rail  = rail;
71052 +    desc.qaddr = qaddr;
71053 +    desc.qdesc = qdesc;
71054 +    desc.done  = 0;
71055 +
71056 +    kmutex_lock (&rail->r_haltop_mutex);
71057 +
71058 +    rail->r_haltop.op_mask     = INT_DiscardingHighPri;
71059 +    rail->r_haltop.op_function = ep4_write_qdesc_haltop;
71060 +    rail->r_haltop.op_arg      = &desc;
71061 +    
71062 +    elan4_queue_haltop (rail->r_ctxt.ctxt_dev, &rail->r_haltop);
71063 +
71064 +    spin_lock_irqsave (&rail->r_haltop_lock, flags);
71065 +    while (! desc.done)
71066 +       kcondvar_wait (&rail->r_haltop_sleep, &rail->r_haltop_lock, &flags);
71067 +    spin_unlock_irqrestore (&rail->r_haltop_lock, flags);
71068 +    
71069 +    kmutex_unlock (&rail->r_haltop_mutex);
71070 +}
71071 +#define CQ_SIZE_NWORDS ((CQ_Size (ecq->ecq_cq->cq_size) >> 3) - 8)     /* available number of dwords (less enough to flush) */
71072 +EP4_ECQ *
71073 +ep4_alloc_ecq (EP4_RAIL *rail, unsigned cqsize)
71074 +{
71075 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
71076 +    EP4_ECQ *ecq;
71077 +    unsigned long pgoff, cqaddr;
71078 +
71079 +    /* no space available, so allocate a new entry */
71080 +    KMEM_ZALLOC (ecq, EP4_ECQ *, sizeof (EP4_ECQ), 1);
71081 +
71082 +    if (ecq == NULL)
71083 +       return 0;
71084 +
71085 +    if ((ecq->ecq_cq = elan4_alloccq (&rail->r_ctxt, cqsize, CQ_EnableAllBits, CQ_Priority)) == NULL)
71086 +    {
71087 +       KMEM_FREE (ecq, sizeof (EP4_ECQ));
71088 +       return 0;
71089 +    }
71090 +
71091 +    pgoff  = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1));
71092 +    cqaddr = (ecq->ecq_cq->cq_cqa->cqa_cqnum + ecq->ecq_cq->cq_idx + dev->dev_cqoffset) * CQ_CommandMappingSize;
71093 +
71094 +    ecq->ecq_addr  = ep_rmalloc (rail->r_ecq_rmap, PAGESIZE, 0) + pgoff;
71095 +    ecq->ecq_avail = CQ_SIZE_NWORDS;                   /* available number of dwords (less enough to flush) */
71096 +
71097 +    ecq->ecq_intop.op_function = (ELAN4_HALTFN *) elan4_restartcq;
71098 +    ecq->ecq_intop.op_arg      = ecq->ecq_cq;
71099 +
71100 +    ep4_cport_map (&rail->r_generic, ecq->ecq_addr - pgoff, cqaddr - pgoff, PAGESIZE, EP_PERM_WRITE);
71101 +
71102 +    spin_lock_init (&ecq->ecq_lock);
71103 +
71104 +    return ecq;
71105 +}
71106 +
71107 +void
71108 +ep4_free_ecq (EP4_RAIL *rail, EP4_ECQ *ecq)
71109 +{
71110 +    unsigned long pgoff = (ecq->ecq_cq->cq_mapping & (PAGE_SIZE-1));
71111 +
71112 +    spin_lock_destroy (&ecq->ecq_lock);
71113 +
71114 +    ep4_unmap (&rail->r_generic, ecq->ecq_addr - pgoff, PAGESIZE);
71115 +    ep_rmfree (rail->r_ecq_rmap, PAGESIZE, ecq->ecq_addr - pgoff);
71116 +
71117 +    elan4_freecq (&rail->r_ctxt, ecq->ecq_cq);
71118 +    
71119 +    KMEM_FREE (ecq, sizeof (EP4_ECQ));
71120 +}
71121 +
71122 +EP4_ECQ *
71123 +ep4_get_ecq (EP4_RAIL *rail, unsigned which, unsigned ndwords)
71124 +{
71125 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
71126 +    struct list_head *el;
71127 +    unsigned long flags;
71128 +    EP4_ECQ *ecq;
71129 +    
71130 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
71131 +    list_for_each (el, &rail->r_ecq_list[which]) {
71132 +       EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
71133 +
71134 +       if (ecq->ecq_avail >= ndwords)
71135 +       {
71136 +           ecq->ecq_avail -= ndwords;
71137 +
71138 +           spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71139 +
71140 +           return ecq;
71141 +       }
71142 +    }
71143 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71144 +
71145 +    if ((ecq = ep4_alloc_ecq (rail, EP4_ECQ_Size (which))) == NULL)
71146 +       return NULL;
71147 +
71148 +    if (which == EP4_ECQ_EVENT)
71149 +    {
71150 +       if ((ecq->ecq_event = ep_alloc_elan (&rail->r_generic, sizeof (E4_Event32), 0, &ecq->ecq_event_addr)) == 0)
71151 +       {
71152 +           ep4_free_ecq (rail, ecq);
71153 +           return NULL;
71154 +       }
71155 +       
71156 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType),
71157 +                           E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
71158 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr),
71159 +                           ecq->ecq_addr);
71160 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue),
71161 +                           SET_EVENT_CMD | (rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event)));
71162 +       
71163 +       if ((ecq->ecq_flushcq = ep4_get_ecq (rail, EP4_ECQ_SINGLE, 1)) == NULL)
71164 +       {
71165 +           ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32));
71166 +           ep4_free_ecq (rail, ecq);
71167 +           return NULL;
71168 +       }
71169 +    }
71170 +
71171 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
71172 +    list_add (&ecq->ecq_link, &rail->r_ecq_list[which]);
71173 +
71174 +    ecq->ecq_avail -= ndwords;
71175 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71176 +
71177 +    return ecq;
71178 +}
71179 +
71180 +void
71181 +ep4_put_ecq (EP4_RAIL *rail, EP4_ECQ *ecq, unsigned ndwords)
71182 +{
71183 +    unsigned long flags;
71184 +
71185 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
71186 +
71187 +    ecq->ecq_avail += ndwords;
71188 +    
71189 +    if (ecq->ecq_avail !=  CQ_SIZE_NWORDS) 
71190 +       spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71191 +    else
71192 +    {
71193 +       list_del (&ecq->ecq_link);
71194 +       spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71195 +       
71196 +       if (ecq->ecq_flushcq)
71197 +           ep4_put_ecq (rail, ecq->ecq_flushcq, 1);
71198 +       if (ecq->ecq_event_addr)
71199 +           ep_free_elan (&rail->r_generic, ecq->ecq_event_addr, sizeof (E4_Event32));
71200 +
71201 +       ep4_free_ecq (rail, ecq);
71202 +    }
71203 +}
71204 +
71205 +void
71206 +ep4_nop_cmd (EP4_ECQ *ecq, E4_uint64 tag)
71207 +{
71208 +    unsigned long flags;
71209 +
71210 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
71211 +    elan4_nop_cmd (ecq->ecq_cq, tag);
71212 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
71213 +    
71214 +}
71215 +
71216 +void
71217 +ep4_set_event_cmd (EP4_ECQ *ecq, E4_Addr event)
71218 +{
71219 +    unsigned long flags;
71220 +
71221 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
71222 +    elan4_set_event_cmd (ecq->ecq_cq, event);
71223 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
71224 +}
71225 +
71226 +void
71227 +ep4_wait_event_cmd (EP4_ECQ *ecq, E4_Addr event, E4_uint64 candt, E4_uint64 param0, E4_uint64 param1)
71228 +{
71229 +    unsigned long flags;
71230 +
71231 +    spin_lock_irqsave (&ecq->ecq_lock, flags);
71232 +    elan4_wait_event_cmd (ecq->ecq_cq, event, candt, param0, param1);
71233 +    spin_unlock_irqrestore (&ecq->ecq_lock, flags);
71234 +}
71235 +
71236 +void
71237 +ep4_flush_interrupt (EP4_RAIL *rail, void *arg)
71238 +{
71239 +    unsigned long flags;
71240 +
71241 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
71242 +    rail->r_flush_count = 0;
71243 +    kcondvar_wakeupone (&rail->r_flush_sleep, &rail->r_ecq_lock);
71244 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71245 +}
71246 +
71247 +void
71248 +ep4_flush_ecqs (EP4_RAIL *rail)
71249 +{
71250 +    ELAN4_DEV *dev = rail->r_ctxt.ctxt_dev;
71251 +    struct list_head *el;
71252 +    unsigned long flags;
71253 +    int i;
71254 +
71255 +    kmutex_lock (&rail->r_flush_mutex);
71256 +
71257 +    EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0));
71258 +
71259 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
71260 +    /* first flush all the "event" queues */
71261 +    list_for_each (el, &rail->r_ecq_list[EP4_ECQ_EVENT]) {
71262 +       EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
71263 +
71264 +       elan4_sdram_writeq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType),
71265 +                           E4_EVENT_INIT_VALUE (-32, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0));
71266 +
71267 +       ep4_set_event_cmd (ecq->ecq_flushcq, ecq->ecq_event_addr);
71268 +
71269 +       rail->r_flush_count++;
71270 +    }
71271 +
71272 +    /* next issue the setevents to all the other queues */
71273 +    for (i = EP4_ECQ_ATOMIC; i <EP4_NUM_ECQ; i++)
71274 +    {
71275 +       list_for_each (el,&rail->r_ecq_list[i]) {
71276 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
71277 +
71278 +           ep4_set_event_cmd (ecq, rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event));
71279 +
71280 +           rail->r_flush_count++;
71281 +       }
71282 +    }
71283 +
71284 +    /* issue the waitevent command */
71285 +    ep4_wait_event_cmd (rail->r_flush_mcq,  rail->r_elan_addr + offsetof (EP4_RAIL_ELAN, r_flush_event),
71286 +                       E4_EVENT_INIT_VALUE (-32 * rail->r_flush_count, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0),
71287 +                       rail->r_flush_ecq->ecq_addr,
71288 +                       INTERRUPT_CMD | (rail->r_flush_intcookie.int_val << E4_MAIN_INT_SHIFT));
71289 +    
71290 +    while (rail->r_flush_count)
71291 +       if (kcondvar_timedwait (&rail->r_flush_sleep, &rail->r_ecq_lock, &flags, (lbolt + (HZ*10))) == -1)
71292 +            elan4_hardware_lock_check(dev, "flush_ecqs");
71293 +    
71294 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71295 +
71296 +    EP4_SDRAM_ASSERT (rail, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event), E4_EVENT_INIT_VALUE (0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG,0));
71297 +
71298 +    kmutex_unlock (&rail->r_flush_mutex);
71299 +}
71300 +
71301 +void
71302 +ep4_init_thread (EP4_RAIL *rail, E4_ThreadRegs *regs, sdramaddr_t stackTop, 
71303 +                EP_ADDR stackAddr, E4_Addr startpc, int nargs,...)
71304 +{
71305 +    sdramaddr_t   sp = stackTop - roundup (nargs * sizeof (E4_uint64), E4_STACK_ALIGN);
71306 +    int           i;
71307 +    va_list       ap;
71308 +    
71309 +    /*
71310 +     * the thread start code expects the following :
71311 +     *   %r1 = stack pointer
71312 +     *   %r6 = frame pointer
71313 +     *   %r2 = function to call
71314 +     *
71315 +     *   function args are store on stack above %sp
71316 +     */
71317 +
71318 +    va_start(ap, nargs);
71319 +    for (i = 0; i < nargs; i++)
71320 +       elan4_sdram_writeq (rail->r_ctxt.ctxt_dev, sp + (i * sizeof (E4_uint64)), va_arg (ap, E4_uint64));
71321 +    va_end (ap);
71322 +    
71323 +    regs->Registers[0] = ep_symbol (&rail->r_threadcode, ".thread_start");             /* %r0 - PC */
71324 +    regs->Registers[1] = stackAddr - (stackTop - sp);                                  /* %r1 - stack pointer */
71325 +    regs->Registers[2] = startpc;                                                      /* %r2 - start pc */
71326 +    regs->Registers[3] = 0;
71327 +    regs->Registers[4] = 0;
71328 +    regs->Registers[5] = 0;
71329 +    regs->Registers[6] = stackTop;                                                     /* %r6 - frame pointer */ 
71330 +}
71331 +
71332 +/* retransmission thread */
71333 +
71334 +void
71335 +ep4_add_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops)
71336 +{
71337 +    ep_kthread_stall (&rail->r_retry_thread);
71338 +    list_add_tail (&ops->op_link, &rail->r_retry_ops);
71339 +    ep_kthread_resume (&rail->r_retry_thread);
71340 +}
71341 +
71342 +void
71343 +ep4_remove_retry_ops (EP4_RAIL *rail, EP4_RETRY_OPS *ops)
71344 +{
71345 +    ep_kthread_stall (&rail->r_retry_thread);
71346 +    list_del (&ops->op_link);
71347 +    ep_kthread_resume (&rail->r_retry_thread);
71348 +}
71349 +
71350 +void
71351 +ep4_retry_thread (EP4_RAIL *rail)
71352 +{
71353 +    struct list_head *el;
71354 +
71355 +    kernel_thread_init ("ep4_retry");
71356 +    
71357 +    for (;;)
71358 +    {
71359 +       long nextRunTime = 0;
71360 +
71361 +       list_for_each (el, &rail->r_retry_ops) {
71362 +           EP4_RETRY_OPS *ops = list_entry (el, EP4_RETRY_OPS, op_link);
71363 +
71364 +           nextRunTime = ops->op_func (rail, ops->op_arg, nextRunTime);
71365 +       }
71366 +
71367 +       if (ep_kthread_sleep (&rail->r_retry_thread, nextRunTime) < 0)
71368 +           break;
71369 +    }
71370 +
71371 +    ep_kthread_stopped (&rail->r_retry_thread);
71372 +
71373 +    kernel_thread_exit();
71374 +}
71375 +
71376 +/* DMA retransmission */
71377 +static unsigned ep4_dma_retry_times[EP_NUM_RETRIES];
71378 +
71379 +static unsigned long
71380 +ep4_retry_dmas (EP4_RAIL *rail, void *arg, unsigned long nextRunTime)
71381 +{
71382 +    unsigned long yieldAt = lbolt + (hz/10);
71383 +    unsigned long flags;
71384 +    int           i;
71385 +
71386 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
71387 +    {
71388 +       while (! list_empty (&rail->r_dma_retrylist[i]))
71389 +       {
71390 +           EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link);
71391 +           
71392 +           if (! AFTER(lbolt, retry->retry_time))
71393 +               break;
71394 +
71395 +           if (ep_kthread_should_stall (&rail->r_retry_thread) || AFTER (lbolt, yieldAt))
71396 +               goto cant_do_more;
71397 +           
71398 +           EPRINTF3 (DBG_RETRY, "%s: ep4_retry_dmas: flowcnt %llx %llx\n", rail->r_generic.Name,  (long long)rail->r_dma_flowcnt,  (long long)rail->r_main->r_dma_flowcnt);
71399 +
71400 +           if ((rail->r_dma_flowcnt - rail->r_main->r_dma_flowcnt) > EP4_DMA_RETRY_FLOWCNT)
71401 +           {
71402 +               printk ("ep4_retry_dmas: flowcnt %llx %llx\n",  (long long)rail->r_dma_flowcnt, (long long)rail->r_main->r_dma_flowcnt);
71403 +
71404 +               goto cant_do_more;
71405 +           }
71406 +
71407 +           EPRINTF4 (DBG_RETRY, "%s: ep4_retry_dmas: %016llx %016llx %016llx\n", rail->r_generic.Name,
71408 +                     (long long)retry->retry_dma.dma_typeSize,  (long long)retry->retry_dma.dma_cookie,  (long long)retry->retry_dma.dma_vproc);
71409 +           EPRINTF5 (DBG_RETRY, "%s:                  %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
71410 +                     (long long)retry->retry_dma.dma_srcAddr,  (long long)retry->retry_dma.dma_dstAddr,  (long long)retry->retry_dma.dma_srcEvent, 
71411 +                     (long long)retry->retry_dma.dma_dstEvent);
71412 +
71413 +           elan4_run_dma_cmd (rail->r_dma_ecq->ecq_cq, &retry->retry_dma);
71414 +           elan4_write_dword_cmd (rail->r_dma_ecq->ecq_cq, rail->r_main_addr + offsetof (EP4_RAIL_MAIN, r_dma_flowcnt), ++rail->r_dma_flowcnt);
71415 +
71416 +           spin_lock_irqsave (&rail->r_dma_lock, flags);
71417 +           list_del (&retry->retry_link);
71418 +           list_add (&retry->retry_link, &rail->r_dma_freelist);
71419 +           spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71420 +       }
71421 +    }
71422 + cant_do_more:
71423 +
71424 +    /* re-compute the next retry time */
71425 +    for (i = EP_RETRY_BASE; i < EP_NUM_RETRIES; i++)
71426 +    {
71427 +       if (! list_empty (&rail->r_dma_retrylist[i]))
71428 +       {
71429 +           EP4_DMA_RETRY *retry = list_entry (rail->r_dma_retrylist[i].next, EP4_DMA_RETRY, retry_link);
71430 +
71431 +           SET_NEXT_RUN_TIME (nextRunTime, retry->retry_time);
71432 +       }
71433 +    }
71434 +
71435 +    return nextRunTime;
71436 +}
71437 +
71438 +void
71439 +ep4_initialise_dma_retries (EP4_RAIL *rail)
71440 +{
71441 +    int i;
71442 +
71443 +    spin_lock_init (&rail->r_dma_lock);
71444 +    
71445 +    for (i = 0; i < EP_NUM_RETRIES; i++)
71446 +       INIT_LIST_HEAD (&rail->r_dma_retrylist[i]);
71447 +    
71448 +    INIT_LIST_HEAD (&rail->r_dma_freelist);
71449 +    
71450 +    rail->r_dma_ecq = ep4_alloc_ecq (rail, EP4_DMA_RETRY_CQSIZE);
71451 +    
71452 +    rail->r_dma_allocated = 0;
71453 +    rail->r_dma_reserved  = 0;
71454 +
71455 +    ep4_dma_retry_times[EP_RETRY_HIGH_PRI] = EP_RETRY_HIGH_PRI_TIME;
71456 +
71457 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
71458 +       ep4_dma_retry_times[EP_RETRY_HIGH_PRI_RETRY+i] = EP_RETRY_HIGH_PRI_TIME << i;
71459 +    
71460 +    ep4_dma_retry_times[EP_RETRY_LOW_PRI] = EP_RETRY_LOW_PRI_TIME;
71461 +
71462 +    for (i =0 ; i < EP_NUM_BACKOFF; i++)
71463 +       ep4_dma_retry_times[EP_RETRY_LOW_PRI_RETRY+i] = EP_RETRY_LOW_PRI_TIME << i;
71464 +    
71465 +    ep4_dma_retry_times[EP_RETRY_ANONYMOUS] = EP_RETRY_ANONYMOUS_TIME;
71466 +    ep4_dma_retry_times[EP_RETRY_NETERR]    = EP_RETRY_NETERR_TIME;
71467 +
71468 +    rail->r_dma_ops.op_func = ep4_retry_dmas;
71469 +    rail->r_dma_ops.op_arg  = NULL;
71470 +
71471 +    ep4_add_retry_ops (rail, &rail->r_dma_ops);
71472 +}
71473 +
71474 +void
71475 +ep4_finalise_dma_retries (EP4_RAIL *rail)
71476 +{
71477 +    ep4_remove_retry_ops (rail, &rail->r_dma_ops);
71478 +
71479 +    /* Everyone should have given back their retry dma's by now */
71480 +    EP4_ASSERT (rail, rail->r_dma_reserved == 0);
71481 +
71482 +    while (! list_empty (&rail->r_dma_freelist))
71483 +    {
71484 +       EP4_DMA_RETRY *retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
71485 +
71486 +       list_del (&retry->retry_link);
71487 +
71488 +       KMEM_FREE (retry, sizeof (EP4_DMA_RETRY));
71489 +    }
71490 +
71491 +    ep4_free_ecq (rail, rail->r_dma_ecq);
71492 +
71493 +    spin_lock_destroy (&rail->r_dma_lock);
71494 +}
71495 +
71496 +int
71497 +ep4_reserve_dma_retries (EP4_RAIL *rail, unsigned int count, EP_ATTRIBUTE attr)
71498 +{
71499 +    EP4_DMA_RETRY *retry;
71500 +    unsigned int   remaining = count;
71501 +    unsigned long  flags;
71502 +
71503 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71504 +
71505 +    if (remaining <= (rail->r_dma_allocated - rail->r_dma_reserved))
71506 +    {
71507 +       rail->r_dma_reserved += remaining;
71508 +
71509 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71510 +
71511 +       return 0;
71512 +    }
71513 +    
71514 +    remaining -= (rail->r_dma_allocated - rail->r_dma_reserved);
71515 +
71516 +    rail->r_dma_reserved = rail->r_dma_allocated;
71517 +
71518 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71519 +
71520 +    while (remaining > 0)
71521 +    {
71522 +       KMEM_ALLOC (retry, EP4_DMA_RETRY *, sizeof (EP4_DMA_RETRY), !(attr & EP_NO_SLEEP));
71523 +
71524 +       if (retry == NULL)
71525 +           goto failed;
71526 +       
71527 +       remaining--;
71528 +
71529 +       spin_lock_irqsave (&rail->r_dma_lock, flags);
71530 +       list_add (&retry->retry_link, &rail->r_dma_freelist);
71531 +
71532 +       rail->r_dma_allocated++;
71533 +       rail->r_dma_reserved++;
71534 +       spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71535 +    }
71536 +
71537 +    return 0;
71538 +
71539 + failed:
71540 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71541 +    rail->r_dma_reserved -= (count - remaining);
71542 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71543 +
71544 +    return 1;
71545 +}
71546 +
71547 +void
71548 +ep4_release_dma_retries (EP4_RAIL *rail, unsigned int count)
71549 +{
71550 +    unsigned long flags;
71551 +
71552 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71553 +    rail->r_dma_reserved -= count;
71554 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71555 +}
71556 +
71557 +void
71558 +ep4_queue_dma_retry (EP4_RAIL *rail, E4_DMA *dma, int interval)
71559 +{
71560 +    EP4_DMA_RETRY *retry;
71561 +    unsigned long  flags;
71562 +    
71563 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71564 +
71565 +    EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist));
71566 +    
71567 +    /* take an item of the free list */
71568 +    retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
71569 +
71570 +    list_del (&retry->retry_link);
71571 +    
71572 +    EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_retry: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
71573 +             (long long)dma->dma_typeSize,  (long long)dma->dma_cookie,  (long long)dma->dma_vproc, (long long)dma->dma_srcAddr);
71574 +    EPRINTF5 (DBG_RETRY, "%s:                      %016llx %016llx %016llx (%d)\n", rail->r_generic.Name,
71575 +             (long long)dma->dma_dstAddr,  (long long)dma->dma_srcEvent, (long long)dma->dma_dstEvent, interval);
71576 +
71577 +    retry->retry_dma.dma_typeSize = dma->dma_typeSize;
71578 +    retry->retry_dma.dma_cookie   = dma->dma_cookie;
71579 +    retry->retry_dma.dma_vproc    = dma->dma_vproc;
71580 +    retry->retry_dma.dma_srcAddr  = dma->dma_srcAddr;
71581 +    retry->retry_dma.dma_dstAddr  = dma->dma_dstAddr;
71582 +    retry->retry_dma.dma_srcEvent = dma->dma_srcEvent;
71583 +    retry->retry_dma.dma_dstEvent = dma->dma_dstEvent;
71584 +
71585 +    retry->retry_time             = lbolt + ep4_dma_retry_times[interval];
71586 +
71587 +    /* chain onto the end of the approriate retry list */
71588 +    list_add_tail (&retry->retry_link, &rail->r_dma_retrylist[interval]);
71589 +
71590 +    ep_kthread_schedule (&rail->r_retry_thread, retry->retry_time);
71591 +
71592 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71593 +}
71594 +
71595 +void
71596 +ep4_queue_dma_stalled (EP4_RAIL *rail, E4_DMA *dma)
71597 +{
71598 +    EP_NODE_RAIL  *nodeRail = &rail->r_generic.Nodes[EP_VP_TO_NODE(dma->dma_vproc)];
71599 +    EP4_DMA_RETRY *retry;
71600 +    unsigned long  flags;
71601 +    
71602 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71603 +
71604 +    EP4_ASSERT (rail, !list_empty (&rail->r_dma_freelist));
71605 +    
71606 +    /* take an item of the free list */
71607 +    retry = list_entry (rail->r_dma_freelist.next, EP4_DMA_RETRY, retry_link);
71608 +
71609 +    list_del (&retry->retry_link);
71610 +    
71611 +    EPRINTF5 (DBG_RETRY, "%s: ep4_queue_dma_stalled: %016llx %016llx %016llx %016llx\n", rail->r_generic.Name,
71612 +             (long long)dma->dma_typeSize, (long long)dma->dma_cookie, (long long)dma->dma_vproc, (long long)dma->dma_srcAddr);
71613 +    EPRINTF4 (DBG_RETRY, "%s:                        %016llx %016llx %016llx\n", rail->r_generic.Name,
71614 +             (long long)dma->dma_dstAddr, (long long)dma->dma_srcEvent, (long long)dma->dma_dstEvent);
71615 +
71616 +    retry->retry_dma.dma_typeSize = dma->dma_typeSize;
71617 +    retry->retry_dma.dma_cookie   = dma->dma_cookie;
71618 +    retry->retry_dma.dma_vproc    = dma->dma_vproc;
71619 +    retry->retry_dma.dma_srcAddr  = dma->dma_srcAddr;
71620 +    retry->retry_dma.dma_dstAddr  = dma->dma_dstAddr;
71621 +    retry->retry_dma.dma_srcEvent = dma->dma_srcEvent;
71622 +    retry->retry_dma.dma_dstEvent = dma->dma_dstEvent;
71623 +
71624 +    /* chain onto the node cancelled dma list */
71625 +    list_add_tail (&retry->retry_link, &nodeRail->StalledDmas);
71626 +
71627 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71628 +}
71629 +
71630 +void
71631 +ep4_free_stalled_dmas (EP4_RAIL *rail, unsigned int nodeId)
71632 +{
71633 +    EP_NODE_RAIL *nodeRail = &rail->r_generic.Nodes[nodeId];
71634 +    struct list_head *el, *nel;
71635 +    unsigned long flags;
71636 +
71637 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71638 +    list_for_each_safe (el, nel, &nodeRail->StalledDmas) {
71639 +       list_del (el);
71640 +       list_add (el, &rail->r_dma_freelist);
71641 +    }
71642 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71643 +}
71644 +
71645 +void
71646 +ep4_display_rail (EP4_RAIL *rail)
71647 +{
71648 +    ELAN4_DEV        *dev = rail->r_ctxt.ctxt_dev;
71649 +    struct list_head *el;
71650 +    register int      i;
71651 +    unsigned long     flags;
71652 +
71653 +    ep_debugf (DBG_DEBUG, "%s: vendorid=%x deviceid=%x\n", rail->r_generic.Name, 
71654 +              rail->r_generic.Devinfo.dev_vendor_id, rail->r_generic.Devinfo.dev_device_id);
71655 +
71656 +    spin_lock_irqsave (&rail->r_ecq_lock, flags);
71657 +    for (i = 0; i < EP4_NUM_ECQ; i++)
71658 +    {
71659 +       list_for_each (el, &rail->r_ecq_list[i]) {
71660 +           EP4_ECQ *ecq = list_entry (el, EP4_ECQ, ecq_link);
71661 +           
71662 +           if (i == EP4_ECQ_EVENT)
71663 +               ep_debugf (DBG_DEBUG, "   ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d event=%llx,%llx,%llx\n",
71664 +                          i, ecq, elan4_cq2num (ecq->ecq_cq), (long long)ecq->ecq_addr, ecq->ecq_avail,
71665 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_CountAndType)),
71666 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WriteValue)),
71667 +                          elan4_sdram_readq (dev, ecq->ecq_event + offsetof (E4_Event32, ev_WritePtr)));
71668 +
71669 +           else
71670 +               ep_debugf (DBG_DEBUG, "   ECQ[%d] ecq=%p cqnum=%d addr=%llx avail=%d\n",
71671 +                          i, ecq, elan4_cq2num (ecq->ecq_cq), (long long)ecq->ecq_addr, ecq->ecq_avail);
71672 +       }
71673 +    }
71674 +    spin_unlock_irqrestore (&rail->r_ecq_lock, flags);
71675 +
71676 +    ep_debugf (DBG_DEBUG, "   flush count=%ld mcq=%p ecq=%p event %llx.%llx.%llx\n", 
71677 +              rail->r_flush_count, rail->r_flush_mcq, rail->r_flush_ecq,
71678 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_CountAndType)),
71679 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WritePtr)),
71680 +              elan4_sdram_readq (dev, rail->r_elan + offsetof (EP4_RAIL_ELAN, r_flush_event.ev_WriteValue)));
71681 +    
71682 +    spin_lock_irqsave (&rail->r_dma_lock, flags);
71683 +    for (i = 0; i < EP_NUM_RETRIES; i++)
71684 +    {
71685 +       list_for_each (el, &rail->r_dma_retrylist[i]) {
71686 +           EP4_DMA_RETRY *retry = list_entry (el, EP4_DMA_RETRY, retry_link);
71687 +           
71688 +           ep_debugf (DBG_DEBUG, "    RETRY[%d] typeSize %llx cookie %llx vproc %llx events %llx %llx\n",
71689 +                      i, (long long)retry->retry_dma.dma_typeSize, (long long)retry->retry_dma.dma_cookie,
71690 +                      (long long)retry->retry_dma.dma_vproc, (long long)retry->retry_dma.dma_srcEvent, (long long)retry->retry_dma.dma_dstEvent);
71691 +       }
71692 +    }
71693 +    spin_unlock_irqrestore (&rail->r_dma_lock, flags);
71694 +}
71695 diff -urN clean/drivers/net/qsnet/ep/threadcode.c linux-2.6.9/drivers/net/qsnet/ep/threadcode.c
71696 --- clean/drivers/net/qsnet/ep/threadcode.c     1969-12-31 19:00:00.000000000 -0500
71697 +++ linux-2.6.9/drivers/net/qsnet/ep/threadcode.c       2003-10-07 09:22:38.000000000 -0400
71698 @@ -0,0 +1,146 @@
71699 +/*
71700 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
71701 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
71702 + *
71703 + *    For licensing information please see the supplied COPYING file
71704 + *
71705 + */
71706 +
71707 +#ident "@(#)$Id: threadcode.c,v 1.11 2003/10/07 13:22:38 david Exp $"
71708 +/*      $Source: /cvs/master/quadrics/epmod/threadcode.c,v $ */
71709 +
71710 +#include <qsnet/kernel.h>
71711 +
71712 +#include <elan/kcomm.h>
71713 +
71714 +EP_ADDR
71715 +ep_symbol (EP_CODE *code, char *name)
71716 +{
71717 +    EP_SYMBOL *s = code->symbols;
71718 +    
71719 +    while (s->name && strcmp (s->name, name))
71720 +       s++;
71721 +    
71722 +    return (s->name ? s->value : (EP_ADDR) 0);
71723 +}
71724 +
71725 +int
71726 +ep_loadcode (EP_RAIL *rail, EP_CODE *code)
71727 +{
71728 +    register int i;
71729 +
71730 +    EP_ADDR  _stext  = ep_symbol (code, "_stext");
71731 +    EP_ADDR  _etext  = ep_symbol (code, "_etext");
71732 +    EP_ADDR  _sdata  = ep_symbol (code, "_sdata");
71733 +    EP_ADDR  _edata  = ep_symbol (code, "_edata");
71734 +    EP_ADDR  _end    = ep_symbol (code, "_end");
71735 +    EP_ADDR  _rodata = roundup (_etext, sizeof (uint64_t));
71736 +
71737 +    if (_stext == (EP_ADDR) 0 || _etext == (EP_ADDR) 0 ||
71738 +       _sdata == (EP_ADDR) 0 || _edata == (EP_ADDR) 0 ||
71739 +       _end == (EP_ADDR) 0)
71740 +    {
71741 +       printk ("ep_loadcode: symbols not defined correctly for code at %p\n", code);
71742 +       return (EINVAL);
71743 +    }
71744 +
71745 +    /*
71746 +     * Include the rodata in the text segment
71747 +     */
71748 +    _etext = _rodata + code->rodata_size;
71749 +
71750 +    /*
71751 +     * If _etext is in the same page as _sdata,  then allocate a contiguous
71752 +     * chunk of memory and map it as read/write. otherwise allocate two chunks
71753 +     * and map the code in as read-only.
71754 +     */
71755 +    if ((_etext & PAGEMASK) == (_sdata & PAGEMASK))
71756 +    {
71757 +       code->ntext  = btopr (_end - (_stext & PAGEMASK));
71758 +       code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0);
71759 +
71760 +       if (code->pptext == (sdramaddr_t) 0)
71761 +           return (ENOMEM);
71762 +       
71763 +       code->_stext  = code->pptext + (_stext & PAGEOFFSET);
71764 +       code->_rodata = code->_stext + (_rodata - _stext);
71765 +       code->_sdata  = code->_stext + (_sdata - _stext);
71766 +    }
71767 +    else
71768 +    {
71769 +       code->ntext  = btopr (_etext - (_stext & PAGEMASK));
71770 +       code->ndata  = btopr (_end - (_sdata & PAGEMASK));
71771 +
71772 +       if (code->ntext)
71773 +       {
71774 +           code->pptext = ep_alloc_memory_elan (rail, _stext & PAGEMASK, ptob (code->ntext), EP_PERM_EXECUTE, 0);
71775 +
71776 +           if (code->pptext == (sdramaddr_t) 0)
71777 +               return (ENOMEM);
71778 +
71779 +           code->_stext  = code->pptext + (_stext & PAGEOFFSET);
71780 +           code->_rodata = code->_stext + (_rodata - _stext);
71781 +       }
71782 +       
71783 +       if (code->ndata)
71784 +       {
71785 +           code->ppdata = ep_alloc_memory_elan (rail, _sdata & PAGEMASK, ptob (code->ndata), EP_PERM_WRITE, 0);
71786 +
71787 +           if (code->ppdata == (sdramaddr_t) 0)
71788 +           {
71789 +               if (code->ntext) ep_free_memory_elan (rail, _sdata & PAGEMASK);
71790 +               code->ntext = 0;
71791 +
71792 +               return (ENOMEM);
71793 +           }
71794 +           
71795 +           code->_sdata = code->ppdata + (_sdata & PAGEOFFSET);
71796 +       }
71797 +    }
71798 +    
71799 +#ifdef __LITTLE_ENDIAN__
71800 +#  define Flip 3
71801 +#else
71802 +#  define Flip  0
71803 +#endif
71804 +
71805 +    /*
71806 +     * Now copy the text and rodata into the SDRAM
71807 +     * this is linked into the module to be byte 
71808 +     * copied to the SDRAM, since we want to copy
71809 +     * with word accesses we have to do the byte
71810 +     * assembly correctly.
71811 +     */
71812 +    for (i = 0; i < code->text_size; i++)
71813 +       rail->Operations.SdramWriteb (rail, code->_stext + i, code->text[i^Flip]);
71814 +
71815 +    for (i = 0; i < code->rodata_size; i++)
71816 +       rail->Operations.SdramWriteb (rail, code->_rodata + i, code->rodata[i^Flip]);
71817 +    
71818 +    /*
71819 +     * And the initialised data segment.
71820 +     */
71821 +    for (i = 0; i < code->data_size; i++)
71822 +       rail->Operations.SdramWriteb (rail, code->_sdata + i, code->data[i^Flip]);
71823 +
71824 +    return (ESUCCESS);
71825 +}
71826 +
71827 +void
71828 +ep_unloadcode (EP_RAIL *rail, EP_CODE *code)
71829 +{
71830 +    EP_ADDR  _stext = ep_symbol (code, "_stext");
71831 +    EP_ADDR  _sdata = ep_symbol (code, "_sdata");
71832 +
71833 +    if (code->pptext)
71834 +       ep_free_memory_elan (rail, _stext & PAGEMASK);
71835 +    if (code->ppdata)
71836 +       ep_free_memory_elan (rail, _sdata & PAGEMASK);
71837 +    code->pptext = code->ppdata = 0;
71838 +}
71839 +
71840 +/*
71841 + * Local variables:
71842 + * c-file-style: "stroustrup"
71843 + * End:
71844 + */
71845 diff -urN clean/drivers/net/qsnet/ep/threadcode_elan3.c linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3.c
71846 --- clean/drivers/net/qsnet/ep/threadcode_elan3.c       1969-12-31 19:00:00.000000000 -0500
71847 +++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3.c 2003-10-07 09:22:38.000000000 -0400
71848 @@ -0,0 +1,85 @@
71849 +/*
71850 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
71851 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
71852 + *
71853 + *    For licensing information please see the supplied COPYING file
71854 + *
71855 + */
71856 +
71857 +#ident "@(#)$Id: threadcode_elan3.c,v 1.11 2003/10/07 13:22:38 david Exp $"
71858 +/*      $Source: /cvs/master/quadrics/epmod/threadcode_elan3.c,v $ */
71859 +
71860 +#include <qsnet/kernel.h>
71861 +
71862 +#include <elan/kcomm.h>
71863 +
71864 +#include "kcomm_elan3.h"
71865 +#include "debug.h"
71866 +
71867 +#include <elan3/thread.h>
71868 +
71869 +E3_Addr
71870 +ep3_init_thread (ELAN3_DEV  *dev,
71871 +                E3_Addr     fn,                                /* Elan address of function */
71872 +                E3_Addr     addr,                              /* Elan address of stack */
71873 +                sdramaddr_t stack,                             /* sdram address of stack */
71874 +                int           stackSize,                       /* stack size (in bytes) */
71875 +                int           nargs,
71876 +                ...)
71877 +{
71878 +    sdramaddr_t  frame;
71879 +    sdramaddr_t  regs;
71880 +    sdramaddr_t  argsp;
71881 +    int                 i;
71882 +    va_list      ap;
71883 +
71884 +    /*
71885 +     * Align the stack pointer at the top of the stack and leave space for a stack frame
71886 +     */
71887 +    stack = ((stack + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame);
71888 +    addr  = ((addr  + stackSize) & ~(E3_STACK_ALIGN-1)) - sizeof (E3_Frame);
71889 +
71890 +    va_start (ap, nargs);
71891 +
71892 +    if (nargs > 6)
71893 +    {
71894 +       stack -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1));
71895 +       addr  -= (((nargs*sizeof (E3_uint32))+E3_STACK_ALIGN-1) & ~(E3_STACK_ALIGN-1));
71896 +    }
71897 +    
71898 +    frame  = stack;
71899 +    regs   = stack - sizeof (E3_OutsRegs);
71900 +
71901 +    /*
71902 +     * Initialise the registers, and stack frame.
71903 +     */
71904 +    elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[6]), fn);
71905 +    elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[7]), 0);
71906 +    
71907 +    if (nargs <= 6)
71908 +    {
71909 +       for (i = 0; i < nargs; i++)
71910 +           elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32));
71911 +    }
71912 +    else
71913 +    {
71914 +       for (i = 0; i < 6; i++)
71915 +           elan3_sdram_writel (dev, regs + offsetof (E3_OutsRegs, o[i]), va_arg (ap, E3_uint32));
71916 +       
71917 +       for (argsp = frame + offsetof (E3_Frame, fr_argx[0]); i < nargs; i++, argsp += sizeof (E3_uint32))
71918 +           elan3_sdram_writel (dev, argsp, va_arg (ap, int));
71919 +    }
71920 +
71921 +    elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savefp), 0);
71922 +    elan3_sdram_writel (dev, frame + offsetof (E3_Frame, fr_savepc), 0);
71923 +
71924 +    va_end (ap);
71925 +
71926 +    return (addr);
71927 +}
71928 +
71929 +/*
71930 + * Local variables:
71931 + * c-file-style: "stroustrup"
71932 + * End:
71933 + */
71934 diff -urN clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.c linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.c
71935 --- clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.c 1969-12-31 19:00:00.000000000 -0500
71936 +++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.c   2005-09-07 10:39:44.000000000 -0400
71937 @@ -0,0 +1,112 @@
71938 +/* --------------------------------------------------------*/
71939 +/* MACHINE GENERATED ELAN CODE                             */
71940 +#include <qsnet/kernel.h>
71941 +#include <elan/kcomm.h>
71942 +#include "kcomm_elan3.h"
71943 +static uint32_t threadcode_elan3_text[] = {
71944 +0x80a0239c, 0x00001082, 0x00e0a280, 0x47008002, 0x0020a380, 0x20600288, 0x20200286, 0x43008002, 
71945 +0x00000001, 0x0a006081, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
71946 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
71947 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
71948 +0x00000001, 0x00000001, 0xa800c613, 0xa300c609, 0x0020108a, 0x0080900b, 0x00006885, 0x0580a080, 
71949 +0x06008002, 0x02a0a080, 0x06008022, 0xffff0296, 0x04008010, 0xff3f0398, 0x1f008010, 0x00201090, 
71950 +0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x00c03f3f, 
71951 +0xf8e017be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 
71952 +0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x00e0a280, 
71953 +0xbfffbf12, 0x0020a380, 0x03008012, 0x02201090, 0x03201090, 0x08e0c381, 0x80a0039c, 0xe0a0239c, 
71954 +0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x080010b8, 0x090010b0, 0x0a0010b2, 0x04000037, 0x402006b4, 
71955 +0x50200690, 0x01201092, 0x20a0239c, 0x00a0a3f0, 0x00c03f3f, 0x8ce117be, 0x04e08f80, 0x06008012, 
71956 +0x00000001, 0x00c01ff8, 0x0000b081, 0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 
71957 +0x00a083f0, 0x20a0039c, 0x582006d0, 0x0020a280, 0x05008002, 0x0900a280, 0x10008002, 0x50200690, 
71958 +0xeaffbf30, 0x5c2006d4, 0x18001090, 0x19001092, 0x1b800294, 0x0a201096, 0x8affff7f, 0x05201098, 
71959 +0x446026d0, 0x302027f4, 0xdfffbf10, 0x50200690, 0xfdffbf10, 0x446026c0, 0x5c2006e0, 0x0020a480, 
71960 +0xf9ffbf06, 0x18001090, 0x19001092, 0x1b000494, 0x14201096, 0x7bffff7f, 0x0a201098, 0x0020a280, 
71961 +0xf4ffbf22, 0x486026e0, 0x00007081, 0x1600801c, 0x00000001, 0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 
71962 +0x40a0a3e0, 0x00c03f3f, 0x60e217be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 0x0000a081, 
71963 +0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 0x00a083c0, 
71964 +0x60a0039c, 0xff3f84a0, 0xe0ffbf1c, 0x18001090, 0xd5ffbf30, 0x60a003de, 0x80a083e0, 0xa0a083f0, 
71965 +0x08e0c381, 0xe0a0039c, 0x00a1239c, 0x60a023de, 0x80a0a3e0, 0xa0a0a3f0, 0x44a123d0, 0x090010b0, 
71966 +0x0a0010b6, 0x0b0010b8, 0x0c0010b4, 0x012010ba, 0xdca023fa, 0x142007d2, 0x082007d0, 0x084002b2, 
71967 +0x000027c0, 0xf42006d0, 0x0020a280, 0x15008032, 0xf42006d0, 0x18200790, 0xdca003d2, 0x20a0239c, 
71968 +0x00a0a3f0, 0x00c03f3f, 0x20e317be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ff8, 0x0000b081, 
71969 +0x06008010, 0x00a083f0, 0x14e007be, 0x00c01ff8, 0x0000b081, 0x00a083f0, 0x20a0039c, 0xf42006d0, 
71970 +0x0020a280, 0x0a008022, 0xdca023c0, 0x042007d0, 0x0840a680, 0x06008032, 0xdca023c0, 0x18001082, 
71971 +0x0220d091, 0xe1ffbf10, 0xf42006d0, 0x06008010, 0x190010a2, 0x042006d0, 0x00c026d0, 0x18001082, 
71972 +0x0020d091, 0x042006d0, 0x01200290, 0x042026d0, 0x000006d0, 0x0020a280, 0x04008002, 0x18001090, 
71973 +0x4f010040, 0x1b001092, 0xf02006e0, 0x0020a480, 0xf1ffbf02, 0x40b03611, 0x004004d2, 0x01201290, 
71974 +0x0840a280, 0x0e018012, 0x10001096, 0x046004d0, 0x01208a80, 0x33008002, 0xa0200484, 0x0c2610ba, 
71975 +0x000024fa, 0x00211090, 0x042024d0, 0x246004d0, 0x80200290, 0x082024d0, 0xec2004d0, 0x00210290, 
71976 +0x0c2024d0, 0x102024c4, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 
71977 +0xc0ff3f3b, 0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 
71978 +0x142024d0, 0x206004d0, 0x10210290, 0x182024d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 
71979 +0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 
71980 +0x08401292, 0x4000003b, 0x1d401292, 0x1c2024d2, 0x01201090, 0xa02024d0, 0x20200496, 0xa8200484, 
71981 +0x306004d0, 0x0020a280, 0x2b008012, 0x00201098, 0x0c2610ba, 0x00c022fa, 0x04e022c0, 0xc0200490, 
71982 +0x10e022d0, 0x186004d2, 0x02602a93, 0x098006d0, 0x0001003b, 0x1d000290, 0x098026d0, 0xc0ff3f3b, 
71983 +0x1d000a90, 0x44a103fa, 0x606007d2, 0x00680292, 0x09001290, 0x4000003b, 0x1d001290, 0x14e022d0, 
71984 +0x206004d0, 0x10210290, 0x18e022d0, 0x186004d0, 0x02202a91, 0x088006d2, 0x0001003b, 0x1d400292, 
71985 +0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x186004d0, 0x00280290, 0x80000015, 0x0a001290, 0x08401292, 
71986 +0x4000003b, 0x1d401292, 0x1ce022d2, 0x4f008010, 0x0020109a, 0x0c00109a, 0x306004d0, 0x0840a380, 
71987 +0x3b00801a, 0xe02004c6, 0x0c2610ba, 0x00c022fa, 0x01202b91, 0x0c000290, 0x02202a91, 0x08400490, 
71988 +0x382002d2, 0x04e022d2, 0x342002d0, 0x08e022d0, 0x0ce022c6, 0x10e022c4, 0x186004d0, 0x02202a91, 
71989 +0x088006d2, 0x0001003b, 0x1d400292, 0x088026d2, 0xc0ff3f3b, 0x1d400a92, 0x44a103fa, 0x606007d0, 
71990 +0x00280290, 0x08401292, 0x4000003b, 0x1d401292, 0x14e022d2, 0x206004d0, 0x10210290, 0x18e022d0, 
71991 +0x186004d0, 0x02202a91, 0x088006d4, 0x0001003b, 0x1d800294, 0x088026d4, 0xc0ff3f3b, 0x1d800a94, 
71992 +0x186004d0, 0x00280290, 0x80000013, 0x09001290, 0x08801294, 0x4000003b, 0x1d801294, 0x1ce022d4, 
71993 +0x01201090, 0x008020d0, 0x04e002d0, 0x08c00086, 0x0840039a, 0x01200398, 0x20e00296, 0x306004d0, 
71994 +0x0800a380, 0xc9ffbf0a, 0x08a00084, 0xc0200490, 0xf0ff22d0, 0xe42004d0, 0x0d00a280, 0x0b00801a, 
71995 +0x00201098, 0x04008010, 0x10001096, 0x01200398, 0x20e00296, 0x306004d0, 0x0800a380, 0xfcffbf2a, 
71996 +0x04e022c0, 0xfc3f109a, 0xe42024da, 0x10001082, 0x186004d0, 0x00280290, 0x08006081, 0x00000001, 
71997 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
71998 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
71999 +0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00201098, 
72000 +0x0c00109a, 0x142004fa, 0xec00823b, 0x3080d61b, 0x00006891, 0x0420a280, 0x3b008002, 0x0c00a280, 
72001 +0x04008002, 0x00000001, 0x0120d091, 0x36008030, 0x7c2006d0, 0x01200290, 0x7c2026d0, 0x782006d0, 
72002 +0x0020a280, 0x04008002, 0x78200690, 0x64000040, 0x40e00692, 0xf02004d0, 0x0020a280, 0x03008012, 
72003 +0xf02026d0, 0x80e026c0, 0x7c2006d0, 0x40e026d0, 0x046004d0, 0x04208a80, 0x13008002, 0x1100108a, 
72004 +0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 0x406099e0, 
72005 +0x40a0b9e0, 0x806099e0, 0x80a0b9e0, 0xc06099e0, 0xc0a0b9e0, 0x00809be0, 0x0780039c, 0x0e008010, 
72006 +0xec2004d2, 0xec2004cc, 0x3fa00b8e, 0x40e0018e, 0x0780239c, 0x0080bbe0, 0x006099e0, 0x00a0b9e0, 
72007 +0x406099e0, 0x40a0b9e0, 0x00809be0, 0x0780039c, 0xec2004d2, 0xe42004d0, 0x886222d0, 0x042006d0, 
72008 +0x00c026d0, 0x000007d0, 0x01208a80, 0x05008012, 0x00000001, 0x142027f2, 0x06008010, 0xdca003fa, 
72009 +0x142027f2, 0xfe3f0a90, 0x000027d0, 0xdca003fa, 0x016007ba, 0xdca023fa, 0x0c2007d0, 0x0840a680, 
72010 +0x04008032, 0x082007d0, 0x03008010, 0x102007f2, 0x084006b2, 0x00007081, 0x1600801c, 0x00000001, 
72011 +0x60a0239c, 0x00a0a3c0, 0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x8ce017be, 0x04e08f80, 0x06008012, 
72012 +0x00000001, 0x00c01ffc, 0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 
72013 +0x40a083e0, 0x20a083f0, 0x00a083c0, 0x60a0039c, 0x042007d0, 0x0840a680, 0xb3febf12, 0x190010a2, 
72014 +0x8afebf10, 0xf42006d0, 0x60a003de, 0x80a083e0, 0xa0a083f0, 0x08e0c381, 0x00a1039c, 0x80a0239c, 
72015 +0x042002c4, 0x004022c4, 0x18008030, 0x00007081, 0x16008012, 0x00000001, 0x60a0239c, 0x00a0a3c0, 
72016 +0x20a0a3f0, 0x40a0a3e0, 0x02c03f3f, 0x24e117be, 0x04e08f80, 0x06008012, 0x00000001, 0x00c01ffc, 
72017 +0x0000a081, 0x06008010, 0x40a083e0, 0x14e007be, 0x00c01ffc, 0x0000a081, 0x40a083e0, 0x20a083f0, 
72018 +0x00a083c0, 0x60a0039c, 0x000002c4, 0x00a0a080, 0xe7ffbf12, 0x00000001, 0x042002c4, 0x01a00084, 
72019 +0x042022c4, 0x000002c4, 0x00a0a080, 0xddffbf12, 0x00000001, 0x08e0c381, 0x80a0039c, };
72020 +#define threadcode_elan3_text_size 0x97c
72021 +static uint32_t threadcode_elan3_data[] = {
72022 +0};
72023 +#define threadcode_elan3_data_size 0x0
72024 +static uint32_t threadcode_elan3_rodata[] = {
72025 +0};
72026 +#define threadcode_elan3_rodata_size 0x0
72027 +static EP_SYMBOL threadcode_elan3_symbols[] = {
72028 +    {"__bss_start", 0xff00297c},
72029 +    {"_edata", 0xff00297c},
72030 +    {"_end", 0xff002988},
72031 +    {"_etext", 0xff00097c},
72032 +    {"_sdata", 0xff00297c},
72033 +    {"_stext", 0xff000000},
72034 +    {"ep3_spinblock", 0xff0008dc},
72035 +    {"ep3comms_rcvr", 0xff0002a8},
72036 +    {"kcomm_probe", 0xff00013c},
72037 +    {"r", 0xff00297c},
72038 +    {"rail", 0xff002984},
72039 +    {"rm", 0xff002980},
72040 +    {0, 0}};
72041 +EP_CODE threadcode_elan3 = {
72042 +   (unsigned char *) threadcode_elan3_text,
72043 +   threadcode_elan3_text_size,
72044 +   (unsigned char *) threadcode_elan3_data,
72045 +   threadcode_elan3_data_size,
72046 +   (unsigned char *) threadcode_elan3_rodata,
72047 +   threadcode_elan3_rodata_size,
72048 +   threadcode_elan3_symbols,
72049 +};
72050 diff -urN clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis
72051 --- clean/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis  1969-12-31 19:00:00.000000000 -0500
72052 +++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan3_Linux.code.dis    2005-09-07 10:39:44.000000000 -0400
72053 @@ -0,0 +1,620 @@
72054 +
72055 +threadcode_elan3_Linux.code:     file format elf32-elan
72056 +
72057 +Disassembly of section .text:
72058 +
72059 +ff000000 <_stext>:
72060 +ff000000:      80 a0 23 9c     sub  %sp, 0x80, %sp
72061 +ff000004:      00 00 10 82     mov  %g0, %g1
72062 +ff000008:      00 e0 a2 80     cmp  %o3, 0
72063 +ff00000c:      47 00 80 02     be  ff000128 <_stext+0x128>
72064 +ff000010:      00 20 a3 80     cmp  %o4, 0
72065 +ff000014:      20 60 02 88     add  %o1, 0x20, %g4
72066 +ff000018:      20 20 02 86     add  %o0, 0x20, %g3
72067 +ff00001c:      43 00 80 02     be  ff000128 <_stext+0x128>
72068 +ff000020:      00 00 00 01     nop 
72069 +ff000024:      0a 00 60 81     open  %o2
72070 +ff000028:      00 00 00 01     nop 
72071 +ff00002c:      00 00 00 01     nop 
72072 +ff000030:      00 00 00 01     nop 
72073 +ff000034:      00 00 00 01     nop 
72074 +ff000038:      00 00 00 01     nop 
72075 +ff00003c:      00 00 00 01     nop 
72076 +ff000040:      00 00 00 01     nop 
72077 +ff000044:      00 00 00 01     nop 
72078 +ff000048:      00 00 00 01     nop 
72079 +ff00004c:      00 00 00 01     nop 
72080 +ff000050:      00 00 00 01     nop 
72081 +ff000054:      00 00 00 01     nop 
72082 +ff000058:      00 00 00 01     nop 
72083 +ff00005c:      00 00 00 01     nop 
72084 +ff000060:      00 00 00 01     nop 
72085 +ff000064:      00 00 00 01     nop 
72086 +ff000068:      00 00 00 01     nop 
72087 +ff00006c:      00 00 00 01     nop 
72088 +ff000070:      00 00 00 01     nop 
72089 +ff000074:      00 00 00 01     nop 
72090 +ff000078:      00 00 00 01     nop 
72091 +ff00007c:      00 00 00 01     nop 
72092 +ff000080:      00 00 00 01     nop 
72093 +ff000084:      00 00 00 01     nop 
72094 +ff000088:      a8 00 c6 13     sendtrans  0x3005<TRACEROUTE>, [ %o0 ], %o1
72095 +ff00008c:      a3 00 c6 09     sendtrans  0x3005<TRACEROUTE>, [ %g3 ], %g4
72096 +ff000090:      00 20 10 8a     clr  %g5
72097 +ff000094:      00 80 90 0b     sendtrans  0x8400<SETEVENT|SENDACK|LAST_TRANS>, %g0, %g5
72098 +ff000098:      00 00 68 85     close  %g2
72099 +ff00009c:      05 80 a0 80     cmp  %g2, %g5
72100 +ff0000a0:      06 00 80 02     be  ff0000b8 <_stext+0xb8>
72101 +ff0000a4:      02 a0 a0 80     cmp  %g2, 2
72102 +ff0000a8:      06 00 80 22     be,a   ff0000c0 <_stext+0xc0>
72103 +ff0000ac:      ff ff 02 96     add  %o3, -1, %o3
72104 +ff0000b0:      04 00 80 10     b  ff0000c0 <_stext+0xc0>
72105 +ff0000b4:      ff 3f 03 98     add  %o4, -1, %o4
72106 +ff0000b8:      1f 00 80 10     b  ff000134 <_stext+0x134>
72107 +ff0000bc:      00 20 10 90     clr  %o0
72108 +ff0000c0:      00 00 70 81     breaktest 
72109 +ff0000c4:      16 00 80 1c     bpos  ff00011c <_stext+0x11c>
72110 +ff0000c8:      00 00 00 01     nop 
72111 +ff0000cc:      60 a0 23 9c     sub  %sp, 0x60, %sp
72112 +ff0000d0:      00 a0 a3 c0     stblock32  %g0, [ %sp ]
72113 +ff0000d4:      20 a0 a3 f0     stblock32  %i0, [ %sp + 0x20 ]
72114 +ff0000d8:      40 a0 a3 e0     stblock32  %l0, [ %sp + 0x40 ]
72115 +ff0000dc:      00 c0 3f 3f     sethi  %hi(0xff000000), %i7
72116 +ff0000e0:      f8 e0 17 be     or  %i7, 0xf8, %i7      ! ff0000f8 <_stext+0xf8>
72117 +ff0000e4:      04 e0 8f 80     btst  4, %i7
72118 +ff0000e8:      06 00 80 12     bne  ff000100 <_stext+0x100>
72119 +ff0000ec:      00 00 00 01     nop 
72120 +ff0000f0:      00 c0 1f fc     ldd  [ %i7 ], %fp
72121 +ff0000f4:      00 00 a0 81     break 
72122 +ff0000f8:      06 00 80 10     b  ff000110 <_stext+0x110>
72123 +ff0000fc:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72124 +ff000100:      14 e0 07 be     add  %i7, 0x14, %i7
72125 +ff000104:      00 c0 1f fc     ldd  [ %i7 ], %fp
72126 +ff000108:      00 00 a0 81     break 
72127 +ff00010c:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72128 +ff000110:      20 a0 83 f0     ldblock32  [ %sp + 0x20 ], %i0
72129 +ff000114:      00 a0 83 c0     ldblock32  [ %sp ], %g0
72130 +ff000118:      60 a0 03 9c     add  %sp, 0x60, %sp
72131 +ff00011c:      00 e0 a2 80     cmp  %o3, 0
72132 +ff000120:      bf ff bf 12     bne  ff00001c <_stext+0x1c>
72133 +ff000124:      00 20 a3 80     cmp  %o4, 0
72134 +ff000128:      03 00 80 12     bne  ff000134 <_stext+0x134>
72135 +ff00012c:      02 20 10 90     mov  2, %o0
72136 +ff000130:      03 20 10 90     mov  3, %o0
72137 +ff000134:      08 e0 c3 81     retl 
72138 +ff000138:      80 a0 03 9c     add  %sp, 0x80, %sp
72139 +
72140 +ff00013c <kcomm_probe>:
72141 +ff00013c:      e0 a0 23 9c     sub  %sp, 0xe0, %sp
72142 +ff000140:      60 a0 23 de     st  %o7, [ %sp + 0x60 ]
72143 +ff000144:      80 a0 a3 e0     stblock32  %l0, [ %sp + 0x80 ]
72144 +ff000148:      a0 a0 a3 f0     stblock32  %i0, [ %sp + 0xa0 ]
72145 +ff00014c:      08 00 10 b8     mov  %o0, %i4
72146 +ff000150:      09 00 10 b0     mov  %o1, %i0
72147 +ff000154:      0a 00 10 b2     mov  %o2, %i1
72148 +ff000158:      04 00 00 37     sethi  %hi(0x1000), %i3
72149 +ff00015c:      40 20 06 b4     add  %i0, 0x40, %i2
72150 +ff000160:      50 20 06 90     add  %i0, 0x50, %o0
72151 +ff000164:      01 20 10 92     mov  1, %o1
72152 +ff000168:      20 a0 23 9c     sub  %sp, 0x20, %sp
72153 +ff00016c:      00 a0 a3 f0     stblock32  %i0, [ %sp ]
72154 +ff000170:      00 c0 3f 3f     sethi  %hi(0xff000000), %i7
72155 +ff000174:      8c e1 17 be     or  %i7, 0x18c, %i7     ! ff00018c <kcomm_probe+0x50>
72156 +ff000178:      04 e0 8f 80     btst  4, %i7
72157 +ff00017c:      06 00 80 12     bne  ff000194 <kcomm_probe+0x58>
72158 +ff000180:      00 00 00 01     nop 
72159 +ff000184:      00 c0 1f f8     ldd  [ %i7 ], %i4
72160 +ff000188:      00 00 b0 81     waitevent 
72161 +ff00018c:      06 00 80 10     b  ff0001a4 <kcomm_probe+0x68>
72162 +ff000190:      00 a0 83 f0     ldblock32  [ %sp ], %i0
72163 +ff000194:      14 e0 07 be     add  %i7, 0x14, %i7
72164 +ff000198:      00 c0 1f f8     ldd  [ %i7 ], %i4
72165 +ff00019c:      00 00 b0 81     waitevent 
72166 +ff0001a0:      00 a0 83 f0     ldblock32  [ %sp ], %i0
72167 +ff0001a4:      20 a0 03 9c     add  %sp, 0x20, %sp
72168 +ff0001a8:      58 20 06 d0     ld  [ %i0 + 0x58 ], %o0
72169 +ff0001ac:      00 20 a2 80     cmp  %o0, 0
72170 +ff0001b0:      05 00 80 02     be  ff0001c4 <kcomm_probe+0x88>
72171 +ff0001b4:      09 00 a2 80     cmp  %o0, %o1
72172 +ff0001b8:      10 00 80 02     be  ff0001f8 <kcomm_probe+0xbc>
72173 +ff0001bc:      50 20 06 90     add  %i0, 0x50, %o0
72174 +ff0001c0:      ea ff bf 30     b,a   ff000168 <kcomm_probe+0x2c>
72175 +ff0001c4:      5c 20 06 d4     ld  [ %i0 + 0x5c ], %o2
72176 +ff0001c8:      18 00 10 90     mov  %i0, %o0
72177 +ff0001cc:      19 00 10 92     mov  %i1, %o1
72178 +ff0001d0:      1b 80 02 94     add  %o2, %i3, %o2
72179 +ff0001d4:      0a 20 10 96     mov  0xa, %o3
72180 +ff0001d8:      8a ff ff 7f     call  ff000000 <_stext>
72181 +ff0001dc:      05 20 10 98     mov  5, %o4
72182 +ff0001e0:      44 60 26 d0     st  %o0, [ %i1 + 0x44 ]
72183 +ff0001e4:      30 20 27 f4     st  %i2, [ %i4 + 0x30 ]
72184 +ff0001e8:      df ff bf 10     b  ff000164 <kcomm_probe+0x28>
72185 +ff0001ec:      50 20 06 90     add  %i0, 0x50, %o0
72186 +ff0001f0:      fd ff bf 10     b  ff0001e4 <kcomm_probe+0xa8>
72187 +ff0001f4:      44 60 26 c0     clr  [ %i1 + 0x44 ]
72188 +ff0001f8:      5c 20 06 e0     ld  [ %i0 + 0x5c ], %l0
72189 +ff0001fc:      00 20 a4 80     cmp  %l0, 0
72190 +ff000200:      f9 ff bf 06     bl  ff0001e4 <kcomm_probe+0xa8>
72191 +ff000204:      18 00 10 90     mov  %i0, %o0
72192 +ff000208:      19 00 10 92     mov  %i1, %o1
72193 +ff00020c:      1b 00 04 94     add  %l0, %i3, %o2
72194 +ff000210:      14 20 10 96     mov  0x14, %o3
72195 +ff000214:      7b ff ff 7f     call  ff000000 <_stext>
72196 +ff000218:      0a 20 10 98     mov  0xa, %o4
72197 +ff00021c:      00 20 a2 80     cmp  %o0, 0
72198 +ff000220:      f4 ff bf 22     be,a   ff0001f0 <kcomm_probe+0xb4>
72199 +ff000224:      48 60 26 e0     st  %l0, [ %i1 + 0x48 ]
72200 +ff000228:      00 00 70 81     breaktest 
72201 +ff00022c:      16 00 80 1c     bpos  ff000284 <kcomm_probe+0x148>
72202 +ff000230:      00 00 00 01     nop 
72203 +ff000234:      60 a0 23 9c     sub  %sp, 0x60, %sp
72204 +ff000238:      00 a0 a3 c0     stblock32  %g0, [ %sp ]
72205 +ff00023c:      20 a0 a3 f0     stblock32  %i0, [ %sp + 0x20 ]
72206 +ff000240:      40 a0 a3 e0     stblock32  %l0, [ %sp + 0x40 ]
72207 +ff000244:      00 c0 3f 3f     sethi  %hi(0xff000000), %i7
72208 +ff000248:      60 e2 17 be     or  %i7, 0x260, %i7     ! ff000260 <kcomm_probe+0x124>
72209 +ff00024c:      04 e0 8f 80     btst  4, %i7
72210 +ff000250:      06 00 80 12     bne  ff000268 <kcomm_probe+0x12c>
72211 +ff000254:      00 00 00 01     nop 
72212 +ff000258:      00 c0 1f fc     ldd  [ %i7 ], %fp
72213 +ff00025c:      00 00 a0 81     break 
72214 +ff000260:      06 00 80 10     b  ff000278 <kcomm_probe+0x13c>
72215 +ff000264:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72216 +ff000268:      14 e0 07 be     add  %i7, 0x14, %i7
72217 +ff00026c:      00 c0 1f fc     ldd  [ %i7 ], %fp
72218 +ff000270:      00 00 a0 81     break 
72219 +ff000274:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72220 +ff000278:      20 a0 83 f0     ldblock32  [ %sp + 0x20 ], %i0
72221 +ff00027c:      00 a0 83 c0     ldblock32  [ %sp ], %g0
72222 +ff000280:      60 a0 03 9c     add  %sp, 0x60, %sp
72223 +ff000284:      ff 3f 84 a0     addcc  %l0, -1, %l0
72224 +ff000288:      e0 ff bf 1c     bpos  ff000208 <kcomm_probe+0xcc>
72225 +ff00028c:      18 00 10 90     mov  %i0, %o0
72226 +ff000290:      d5 ff bf 30     b,a   ff0001e4 <kcomm_probe+0xa8>
72227 +ff000294:      60 a0 03 de     ld  [ %sp + 0x60 ], %o7
72228 +ff000298:      80 a0 83 e0     ldblock32  [ %sp + 0x80 ], %l0
72229 +ff00029c:      a0 a0 83 f0     ldblock32  [ %sp + 0xa0 ], %i0
72230 +ff0002a0:      08 e0 c3 81     retl 
72231 +ff0002a4:      e0 a0 03 9c     add  %sp, 0xe0, %sp
72232 +
72233 +ff0002a8 <ep3comms_rcvr>:
72234 +ff0002a8:      00 a1 23 9c     sub  %sp, 0x100, %sp
72235 +ff0002ac:      60 a0 23 de     st  %o7, [ %sp + 0x60 ]
72236 +ff0002b0:      80 a0 a3 e0     stblock32  %l0, [ %sp + 0x80 ]
72237 +ff0002b4:      a0 a0 a3 f0     stblock32  %i0, [ %sp + 0xa0 ]
72238 +ff0002b8:      44 a1 23 d0     st  %o0, [ %sp + 0x144 ]
72239 +ff0002bc:      09 00 10 b0     mov  %o1, %i0
72240 +ff0002c0:      0a 00 10 b6     mov  %o2, %i3
72241 +ff0002c4:      0b 00 10 b8     mov  %o3, %i4
72242 +ff0002c8:      0c 00 10 b4     mov  %o4, %i2
72243 +ff0002cc:      01 20 10 ba     mov  1, %i5
72244 +ff0002d0:      dc a0 23 fa     st  %i5, [ %sp + 0xdc ]
72245 +ff0002d4:      14 20 07 d2     ld  [ %i4 + 0x14 ], %o1
72246 +ff0002d8:      08 20 07 d0     ld  [ %i4 + 8 ], %o0
72247 +ff0002dc:      08 40 02 b2     add  %o1, %o0, %i1
72248 +ff0002e0:      00 00 27 c0     clr  [ %i4 ]
72249 +ff0002e4:      f4 20 06 d0     ld  [ %i0 + 0xf4 ], %o0
72250 +ff0002e8:      00 20 a2 80     cmp  %o0, 0
72251 +ff0002ec:      15 00 80 32     bne,a   ff000340 <ep3comms_rcvr+0x98>
72252 +ff0002f0:      f4 20 06 d0     ld  [ %i0 + 0xf4 ], %o0
72253 +ff0002f4:      18 20 07 90     add  %i4, 0x18, %o0
72254 +ff0002f8:      dc a0 03 d2     ld  [ %sp + 0xdc ], %o1
72255 +ff0002fc:      20 a0 23 9c     sub  %sp, 0x20, %sp
72256 +ff000300:      00 a0 a3 f0     stblock32  %i0, [ %sp ]
72257 +ff000304:      00 c0 3f 3f     sethi  %hi(0xff000000), %i7
72258 +ff000308:      20 e3 17 be     or  %i7, 0x320, %i7     ! ff000320 <ep3comms_rcvr+0x78>
72259 +ff00030c:      04 e0 8f 80     btst  4, %i7
72260 +ff000310:      06 00 80 12     bne  ff000328 <ep3comms_rcvr+0x80>
72261 +ff000314:      00 00 00 01     nop 
72262 +ff000318:      00 c0 1f f8     ldd  [ %i7 ], %i4
72263 +ff00031c:      00 00 b0 81     waitevent 
72264 +ff000320:      06 00 80 10     b  ff000338 <ep3comms_rcvr+0x90>
72265 +ff000324:      00 a0 83 f0     ldblock32  [ %sp ], %i0
72266 +ff000328:      14 e0 07 be     add  %i7, 0x14, %i7
72267 +ff00032c:      00 c0 1f f8     ldd  [ %i7 ], %i4
72268 +ff000330:      00 00 b0 81     waitevent 
72269 +ff000334:      00 a0 83 f0     ldblock32  [ %sp ], %i0
72270 +ff000338:      20 a0 03 9c     add  %sp, 0x20, %sp
72271 +ff00033c:      f4 20 06 d0     ld  [ %i0 + 0xf4 ], %o0
72272 +ff000340:      00 20 a2 80     cmp  %o0, 0
72273 +ff000344:      0a 00 80 22     be,a   ff00036c <ep3comms_rcvr+0xc4>
72274 +ff000348:      dc a0 23 c0     clr  [ %sp + 0xdc ]
72275 +ff00034c:      04 20 07 d0     ld  [ %i4 + 4 ], %o0
72276 +ff000350:      08 40 a6 80     cmp  %i1, %o0
72277 +ff000354:      06 00 80 32     bne,a   ff00036c <ep3comms_rcvr+0xc4>
72278 +ff000358:      dc a0 23 c0     clr  [ %sp + 0xdc ]
72279 +ff00035c:      18 00 10 82     mov  %i0, %g1
72280 +ff000360:      02 20 d0 91     ta  2
72281 +ff000364:      e1 ff bf 10     b  ff0002e8 <ep3comms_rcvr+0x40>
72282 +ff000368:      f4 20 06 d0     ld  [ %i0 + 0xf4 ], %o0
72283 +ff00036c:      06 00 80 10     b  ff000384 <ep3comms_rcvr+0xdc>
72284 +ff000370:      19 00 10 a2     mov  %i1, %l1
72285 +ff000374:      04 20 06 d0     ld  [ %i0 + 4 ], %o0
72286 +ff000378:      00 c0 26 d0     st  %o0, [ %i3 ]
72287 +ff00037c:      18 00 10 82     mov  %i0, %g1
72288 +ff000380:      00 20 d0 91     ta  0
72289 +ff000384:      04 20 06 d0     ld  [ %i0 + 4 ], %o0
72290 +ff000388:      01 20 02 90     inc  %o0
72291 +ff00038c:      04 20 26 d0     st  %o0, [ %i0 + 4 ]
72292 +ff000390:      00 00 06 d0     ld  [ %i0 ], %o0
72293 +ff000394:      00 20 a2 80     cmp  %o0, 0
72294 +ff000398:      04 00 80 02     be  ff0003a8 <ep3comms_rcvr+0x100>
72295 +ff00039c:      18 00 10 90     mov  %i0, %o0
72296 +ff0003a0:      4f 01 00 40     call  ff0008dc <ep3_spinblock>
72297 +ff0003a4:      1b 00 10 92     mov  %i3, %o1
72298 +ff0003a8:      f0 20 06 e0     ld  [ %i0 + 0xf0 ], %l0
72299 +ff0003ac:      00 20 a4 80     cmp  %l0, 0
72300 +ff0003b0:      f1 ff bf 02     be  ff000374 <ep3comms_rcvr+0xcc>
72301 +ff0003b4:      40 b0 36 11     sethi  %hi(0xdac10000), %o0
72302 +ff0003b8:      00 40 04 d2     ld  [ %l1 ], %o1
72303 +ff0003bc:      01 20 12 90     or  %o0, 1, %o0
72304 +ff0003c0:      08 40 a2 80     cmp  %o1, %o0
72305 +ff0003c4:      0e 01 80 12     bne  ff0007fc <ep3comms_rcvr+0x554>
72306 +ff0003c8:      10 00 10 96     mov  %l0, %o3
72307 +ff0003cc:      04 60 04 d0     ld  [ %l1 + 4 ], %o0
72308 +ff0003d0:      01 20 8a 80     btst  1, %o0
72309 +ff0003d4:      33 00 80 02     be  ff0004a0 <ep3comms_rcvr+0x1f8>
72310 +ff0003d8:      a0 20 04 84     add  %l0, 0xa0, %g2
72311 +ff0003dc:      0c 26 10 ba     mov  0x60c, %i5
72312 +ff0003e0:      00 00 24 fa     st  %i5, [ %l0 ]
72313 +ff0003e4:      00 21 10 90     mov  0x100, %o0
72314 +ff0003e8:      04 20 24 d0     st  %o0, [ %l0 + 4 ]
72315 +ff0003ec:      24 60 04 d0     ld  [ %l1 + 0x24 ], %o0
72316 +ff0003f0:      80 20 02 90     add  %o0, 0x80, %o0
72317 +ff0003f4:      08 20 24 d0     st  %o0, [ %l0 + 8 ]
72318 +ff0003f8:      ec 20 04 d0     ld  [ %l0 + 0xec ], %o0
72319 +ff0003fc:      00 21 02 90     add  %o0, 0x100, %o0
72320 +ff000400:      0c 20 24 d0     st  %o0, [ %l0 + 0xc ]
72321 +ff000404:      10 20 24 c4     st  %g2, [ %l0 + 0x10 ]
72322 +ff000408:      18 60 04 d2     ld  [ %l1 + 0x18 ], %o1
72323 +ff00040c:      02 60 2a 93     sll  %o1, 2, %o1
72324 +ff000410:      09 80 06 d0     ld  [ %i2 + %o1 ], %o0
72325 +ff000414:      00 01 00 3b     sethi  %hi(0x40000), %i5
72326 +ff000418:      1d 00 02 90     add  %o0, %i5, %o0
72327 +ff00041c:      09 80 26 d0     st  %o0, [ %i2 + %o1 ]
72328 +ff000420:      c0 ff 3f 3b     sethi  %hi(0xffff0000), %i5
72329 +ff000424:      1d 00 0a 90     and  %o0, %i5, %o0
72330 +ff000428:      44 a1 03 fa     ld  [ %sp + 0x144 ], %i5
72331 +ff00042c:      60 60 07 d2     ld  [ %i5 + 0x60 ], %o1
72332 +ff000430:      00 68 02 92     add  %o1, 0x800, %o1
72333 +ff000434:      09 00 12 90     or  %o0, %o1, %o0
72334 +ff000438:      40 00 00 3b     sethi  %hi(0x10000), %i5
72335 +ff00043c:      1d 00 12 90     or  %o0, %i5, %o0
72336 +ff000440:      14 20 24 d0     st  %o0, [ %l0 + 0x14 ]
72337 +ff000444:      20 60 04 d0     ld  [ %l1 + 0x20 ], %o0
72338 +ff000448:      10 21 02 90     add  %o0, 0x110, %o0
72339 +ff00044c:      18 20 24 d0     st  %o0, [ %l0 + 0x18 ]
72340 +ff000450:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72341 +ff000454:      02 20 2a 91     sll  %o0, 2, %o0
72342 +ff000458:      08 80 06 d2     ld  [ %i2 + %o0 ], %o1
72343 +ff00045c:      00 01 00 3b     sethi  %hi(0x40000), %i5
72344 +ff000460:      1d 40 02 92     add  %o1, %i5, %o1
72345 +ff000464:      08 80 26 d2     st  %o1, [ %i2 + %o0 ]
72346 +ff000468:      c0 ff 3f 3b     sethi  %hi(0xffff0000), %i5
72347 +ff00046c:      1d 40 0a 92     and  %o1, %i5, %o1
72348 +ff000470:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72349 +ff000474:      00 28 02 90     add  %o0, 0x800, %o0
72350 +ff000478:      80 00 00 15     sethi  %hi(0x20000), %o2
72351 +ff00047c:      0a 00 12 90     or  %o0, %o2, %o0
72352 +ff000480:      08 40 12 92     or  %o1, %o0, %o1
72353 +ff000484:      40 00 00 3b     sethi  %hi(0x10000), %i5
72354 +ff000488:      1d 40 12 92     or  %o1, %i5, %o1
72355 +ff00048c:      1c 20 24 d2     st  %o1, [ %l0 + 0x1c ]
72356 +ff000490:      01 20 10 90     mov  1, %o0
72357 +ff000494:      a0 20 24 d0     st  %o0, [ %l0 + 0xa0 ]
72358 +ff000498:      20 20 04 96     add  %l0, 0x20, %o3
72359 +ff00049c:      a8 20 04 84     add  %l0, 0xa8, %g2
72360 +ff0004a0:      30 60 04 d0     ld  [ %l1 + 0x30 ], %o0
72361 +ff0004a4:      00 20 a2 80     cmp  %o0, 0
72362 +ff0004a8:      2b 00 80 12     bne  ff000554 <ep3comms_rcvr+0x2ac>
72363 +ff0004ac:      00 20 10 98     clr  %o4
72364 +ff0004b0:      0c 26 10 ba     mov  0x60c, %i5
72365 +ff0004b4:      00 c0 22 fa     st  %i5, [ %o3 ]
72366 +ff0004b8:      04 e0 22 c0     clr  [ %o3 + 4 ]
72367 +ff0004bc:      c0 20 04 90     add  %l0, 0xc0, %o0
72368 +ff0004c0:      10 e0 22 d0     st  %o0, [ %o3 + 0x10 ]
72369 +ff0004c4:      18 60 04 d2     ld  [ %l1 + 0x18 ], %o1
72370 +ff0004c8:      02 60 2a 93     sll  %o1, 2, %o1
72371 +ff0004cc:      09 80 06 d0     ld  [ %i2 + %o1 ], %o0
72372 +ff0004d0:      00 01 00 3b     sethi  %hi(0x40000), %i5
72373 +ff0004d4:      1d 00 02 90     add  %o0, %i5, %o0
72374 +ff0004d8:      09 80 26 d0     st  %o0, [ %i2 + %o1 ]
72375 +ff0004dc:      c0 ff 3f 3b     sethi  %hi(0xffff0000), %i5
72376 +ff0004e0:      1d 00 0a 90     and  %o0, %i5, %o0
72377 +ff0004e4:      44 a1 03 fa     ld  [ %sp + 0x144 ], %i5
72378 +ff0004e8:      60 60 07 d2     ld  [ %i5 + 0x60 ], %o1
72379 +ff0004ec:      00 68 02 92     add  %o1, 0x800, %o1
72380 +ff0004f0:      09 00 12 90     or  %o0, %o1, %o0
72381 +ff0004f4:      40 00 00 3b     sethi  %hi(0x10000), %i5
72382 +ff0004f8:      1d 00 12 90     or  %o0, %i5, %o0
72383 +ff0004fc:      14 e0 22 d0     st  %o0, [ %o3 + 0x14 ]
72384 +ff000500:      20 60 04 d0     ld  [ %l1 + 0x20 ], %o0
72385 +ff000504:      10 21 02 90     add  %o0, 0x110, %o0
72386 +ff000508:      18 e0 22 d0     st  %o0, [ %o3 + 0x18 ]
72387 +ff00050c:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72388 +ff000510:      02 20 2a 91     sll  %o0, 2, %o0
72389 +ff000514:      08 80 06 d2     ld  [ %i2 + %o0 ], %o1
72390 +ff000518:      00 01 00 3b     sethi  %hi(0x40000), %i5
72391 +ff00051c:      1d 40 02 92     add  %o1, %i5, %o1
72392 +ff000520:      08 80 26 d2     st  %o1, [ %i2 + %o0 ]
72393 +ff000524:      c0 ff 3f 3b     sethi  %hi(0xffff0000), %i5
72394 +ff000528:      1d 40 0a 92     and  %o1, %i5, %o1
72395 +ff00052c:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72396 +ff000530:      00 28 02 90     add  %o0, 0x800, %o0
72397 +ff000534:      80 00 00 15     sethi  %hi(0x20000), %o2
72398 +ff000538:      0a 00 12 90     or  %o0, %o2, %o0
72399 +ff00053c:      08 40 12 92     or  %o1, %o0, %o1
72400 +ff000540:      40 00 00 3b     sethi  %hi(0x10000), %i5
72401 +ff000544:      1d 40 12 92     or  %o1, %i5, %o1
72402 +ff000548:      1c e0 22 d2     st  %o1, [ %o3 + 0x1c ]
72403 +ff00054c:      4f 00 80 10     b  ff000688 <ep3comms_rcvr+0x3e0>
72404 +ff000550:      00 20 10 9a     clr  %o5
72405 +ff000554:      0c 00 10 9a     mov  %o4, %o5
72406 +ff000558:      30 60 04 d0     ld  [ %l1 + 0x30 ], %o0
72407 +ff00055c:      08 40 a3 80     cmp  %o5, %o0
72408 +ff000560:      3b 00 80 1a     bcc  ff00064c <ep3comms_rcvr+0x3a4>
72409 +ff000564:      e0 20 04 c6     ld  [ %l0 + 0xe0 ], %g3
72410 +ff000568:      0c 26 10 ba     mov  0x60c, %i5
72411 +ff00056c:      00 c0 22 fa     st  %i5, [ %o3 ]
72412 +ff000570:      01 20 2b 91     sll  %o4, 1, %o0
72413 +ff000574:      0c 00 02 90     add  %o0, %o4, %o0
72414 +ff000578:      02 20 2a 91     sll  %o0, 2, %o0
72415 +ff00057c:      08 40 04 90     add  %l1, %o0, %o0
72416 +ff000580:      38 20 02 d2     ld  [ %o0 + 0x38 ], %o1
72417 +ff000584:      04 e0 22 d2     st  %o1, [ %o3 + 4 ]
72418 +ff000588:      34 20 02 d0     ld  [ %o0 + 0x34 ], %o0
72419 +ff00058c:      08 e0 22 d0     st  %o0, [ %o3 + 8 ]
72420 +ff000590:      0c e0 22 c6     st  %g3, [ %o3 + 0xc ]
72421 +ff000594:      10 e0 22 c4     st  %g2, [ %o3 + 0x10 ]
72422 +ff000598:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72423 +ff00059c:      02 20 2a 91     sll  %o0, 2, %o0
72424 +ff0005a0:      08 80 06 d2     ld  [ %i2 + %o0 ], %o1
72425 +ff0005a4:      00 01 00 3b     sethi  %hi(0x40000), %i5
72426 +ff0005a8:      1d 40 02 92     add  %o1, %i5, %o1
72427 +ff0005ac:      08 80 26 d2     st  %o1, [ %i2 + %o0 ]
72428 +ff0005b0:      c0 ff 3f 3b     sethi  %hi(0xffff0000), %i5
72429 +ff0005b4:      1d 40 0a 92     and  %o1, %i5, %o1
72430 +ff0005b8:      44 a1 03 fa     ld  [ %sp + 0x144 ], %i5
72431 +ff0005bc:      60 60 07 d0     ld  [ %i5 + 0x60 ], %o0
72432 +ff0005c0:      00 28 02 90     add  %o0, 0x800, %o0
72433 +ff0005c4:      08 40 12 92     or  %o1, %o0, %o1
72434 +ff0005c8:      40 00 00 3b     sethi  %hi(0x10000), %i5
72435 +ff0005cc:      1d 40 12 92     or  %o1, %i5, %o1
72436 +ff0005d0:      14 e0 22 d2     st  %o1, [ %o3 + 0x14 ]
72437 +ff0005d4:      20 60 04 d0     ld  [ %l1 + 0x20 ], %o0
72438 +ff0005d8:      10 21 02 90     add  %o0, 0x110, %o0
72439 +ff0005dc:      18 e0 22 d0     st  %o0, [ %o3 + 0x18 ]
72440 +ff0005e0:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72441 +ff0005e4:      02 20 2a 91     sll  %o0, 2, %o0
72442 +ff0005e8:      08 80 06 d4     ld  [ %i2 + %o0 ], %o2
72443 +ff0005ec:      00 01 00 3b     sethi  %hi(0x40000), %i5
72444 +ff0005f0:      1d 80 02 94     add  %o2, %i5, %o2
72445 +ff0005f4:      08 80 26 d4     st  %o2, [ %i2 + %o0 ]
72446 +ff0005f8:      c0 ff 3f 3b     sethi  %hi(0xffff0000), %i5
72447 +ff0005fc:      1d 80 0a 94     and  %o2, %i5, %o2
72448 +ff000600:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72449 +ff000604:      00 28 02 90     add  %o0, 0x800, %o0
72450 +ff000608:      80 00 00 13     sethi  %hi(0x20000), %o1
72451 +ff00060c:      09 00 12 90     or  %o0, %o1, %o0
72452 +ff000610:      08 80 12 94     or  %o2, %o0, %o2
72453 +ff000614:      40 00 00 3b     sethi  %hi(0x10000), %i5
72454 +ff000618:      1d 80 12 94     or  %o2, %i5, %o2
72455 +ff00061c:      1c e0 22 d4     st  %o2, [ %o3 + 0x1c ]
72456 +ff000620:      01 20 10 90     mov  1, %o0
72457 +ff000624:      00 80 20 d0     st  %o0, [ %g2 ]
72458 +ff000628:      04 e0 02 d0     ld  [ %o3 + 4 ], %o0
72459 +ff00062c:      08 c0 00 86     add  %g3, %o0, %g3
72460 +ff000630:      08 40 03 9a     add  %o5, %o0, %o5
72461 +ff000634:      01 20 03 98     inc  %o4
72462 +ff000638:      20 e0 02 96     add  %o3, 0x20, %o3
72463 +ff00063c:      30 60 04 d0     ld  [ %l1 + 0x30 ], %o0
72464 +ff000640:      08 00 a3 80     cmp  %o4, %o0
72465 +ff000644:      c9 ff bf 0a     bcs  ff000568 <ep3comms_rcvr+0x2c0>
72466 +ff000648:      08 a0 00 84     add  %g2, 8, %g2
72467 +ff00064c:      c0 20 04 90     add  %l0, 0xc0, %o0
72468 +ff000650:      f0 ff 22 d0     st  %o0, [ %o3 + -16 ]
72469 +ff000654:      e4 20 04 d0     ld  [ %l0 + 0xe4 ], %o0
72470 +ff000658:      0d 00 a2 80     cmp  %o0, %o5
72471 +ff00065c:      0b 00 80 1a     bcc  ff000688 <ep3comms_rcvr+0x3e0>
72472 +ff000660:      00 20 10 98     clr  %o4
72473 +ff000664:      04 00 80 10     b  ff000674 <ep3comms_rcvr+0x3cc>
72474 +ff000668:      10 00 10 96     mov  %l0, %o3
72475 +ff00066c:      01 20 03 98     inc  %o4
72476 +ff000670:      20 e0 02 96     add  %o3, 0x20, %o3
72477 +ff000674:      30 60 04 d0     ld  [ %l1 + 0x30 ], %o0
72478 +ff000678:      08 00 a3 80     cmp  %o4, %o0
72479 +ff00067c:      fc ff bf 2a     bcs,a   ff00066c <ep3comms_rcvr+0x3c4>
72480 +ff000680:      04 e0 22 c0     clr  [ %o3 + 4 ]
72481 +ff000684:      fc 3f 10 9a     mov  -4, %o5
72482 +ff000688:      e4 20 24 da     st  %o5, [ %l0 + 0xe4 ]
72483 +ff00068c:      10 00 10 82     mov  %l0, %g1
72484 +ff000690:      18 60 04 d0     ld  [ %l1 + 0x18 ], %o0
72485 +ff000694:      00 28 02 90     add  %o0, 0x800, %o0
72486 +ff000698:      08 00 60 81     open  %o0
72487 +ff00069c:      00 00 00 01     nop 
72488 +ff0006a0:      00 00 00 01     nop 
72489 +ff0006a4:      00 00 00 01     nop 
72490 +ff0006a8:      00 00 00 01     nop 
72491 +ff0006ac:      00 00 00 01     nop 
72492 +ff0006b0:      00 00 00 01     nop 
72493 +ff0006b4:      00 00 00 01     nop 
72494 +ff0006b8:      00 00 00 01     nop 
72495 +ff0006bc:      00 00 00 01     nop 
72496 +ff0006c0:      00 00 00 01     nop 
72497 +ff0006c4:      00 00 00 01     nop 
72498 +ff0006c8:      00 00 00 01     nop 
72499 +ff0006cc:      00 00 00 01     nop 
72500 +ff0006d0:      00 00 00 01     nop 
72501 +ff0006d4:      00 00 00 01     nop 
72502 +ff0006d8:      00 00 00 01     nop 
72503 +ff0006dc:      00 00 00 01     nop 
72504 +ff0006e0:      00 00 00 01     nop 
72505 +ff0006e4:      00 00 00 01     nop 
72506 +ff0006e8:      00 00 00 01     nop 
72507 +ff0006ec:      00 00 00 01     nop 
72508 +ff0006f0:      00 00 00 01     nop 
72509 +ff0006f4:      00 00 00 01     nop 
72510 +ff0006f8:      00 00 00 01     nop 
72511 +ff0006fc:      00 20 10 98     clr  %o4        ! 0 <*ABS*>
72512 +ff000700:      0c 00 10 9a     mov  %o4, %o5
72513 +ff000704:      14 20 04 fa     ld  [ %l0 + 0x14 ], %i5
72514 +ff000708:      ec 00 82 3b     sendtrans  0x1007<THREADIDENTIFY>, %o4, %i5
72515 +ff00070c:      30 80 d6 1b     sendtrans  0xb401<REMOTEDMA|SENDACK|LAST_TRANS>, [ %l0 ], %o5
72516 +ff000710:      00 00 68 91     close  %o0
72517 +ff000714:      04 20 a2 80     cmp  %o0, 4
72518 +ff000718:      3b 00 80 02     be  ff000804 <ep3comms_rcvr+0x55c>
72519 +ff00071c:      0c 00 a2 80     cmp  %o0, %o4
72520 +ff000720:      04 00 80 02     be  ff000730 <ep3comms_rcvr+0x488>
72521 +ff000724:      00 00 00 01     nop 
72522 +ff000728:      01 20 d0 91     ta  1
72523 +ff00072c:      36 00 80 30     b,a   ff000804 <ep3comms_rcvr+0x55c>
72524 +ff000730:      7c 20 06 d0     ld  [ %i0 + 0x7c ], %o0
72525 +ff000734:      01 20 02 90     inc  %o0
72526 +ff000738:      7c 20 26 d0     st  %o0, [ %i0 + 0x7c ]
72527 +ff00073c:      78 20 06 d0     ld  [ %i0 + 0x78 ], %o0
72528 +ff000740:      00 20 a2 80     cmp  %o0, 0
72529 +ff000744:      04 00 80 02     be  ff000754 <ep3comms_rcvr+0x4ac>
72530 +ff000748:      78 20 06 90     add  %i0, 0x78, %o0
72531 +ff00074c:      64 00 00 40     call  ff0008dc <ep3_spinblock>
72532 +ff000750:      40 e0 06 92     add  %i3, 0x40, %o1
72533 +ff000754:      f0 20 04 d0     ld  [ %l0 + 0xf0 ], %o0
72534 +ff000758:      00 20 a2 80     cmp  %o0, 0
72535 +ff00075c:      03 00 80 12     bne  ff000768 <ep3comms_rcvr+0x4c0>
72536 +ff000760:      f0 20 26 d0     st  %o0, [ %i0 + 0xf0 ]
72537 +ff000764:      80 e0 26 c0     clr  [ %i3 + 0x80 ]
72538 +ff000768:      7c 20 06 d0     ld  [ %i0 + 0x7c ], %o0
72539 +ff00076c:      40 e0 26 d0     st  %o0, [ %i3 + 0x40 ]
72540 +ff000770:      04 60 04 d0     ld  [ %l1 + 4 ], %o0
72541 +ff000774:      04 20 8a 80     btst  4, %o0
72542 +ff000778:      13 00 80 02     be  ff0007c4 <ep3comms_rcvr+0x51c>
72543 +ff00077c:      11 00 10 8a     mov  %l1, %g5
72544 +ff000780:      ec 20 04 cc     ld  [ %l0 + 0xec ], %g6
72545 +ff000784:      3f a0 0b 8e     and  %sp, 0x3f, %g7
72546 +ff000788:      40 e0 01 8e     add  %g7, 0x40, %g7
72547 +ff00078c:      07 80 23 9c     sub  %sp, %g7, %sp
72548 +ff000790:      00 80 bb e0     stblock64  %l0, [ %sp ]
72549 +ff000794:      00 60 99 e0     ldblock64  [ %g5 ], %l0
72550 +ff000798:      00 a0 b9 e0     stblock64  %l0, [ %g6 ]
72551 +ff00079c:      40 60 99 e0     ldblock64  [ %g5 + 0x40 ], %l0
72552 +ff0007a0:      40 a0 b9 e0     stblock64  %l0, [ %g6 + 0x40 ]
72553 +ff0007a4:      80 60 99 e0     ldblock64  [ %g5 + 0x80 ], %l0
72554 +ff0007a8:      80 a0 b9 e0     stblock64  %l0, [ %g6 + 0x80 ]
72555 +ff0007ac:      c0 60 99 e0     ldblock64  [ %g5 + 0xc0 ], %l0
72556 +ff0007b0:      c0 a0 b9 e0     stblock64  %l0, [ %g6 + 0xc0 ]
72557 +ff0007b4:      00 80 9b e0     ldblock64  [ %sp ], %l0
72558 +ff0007b8:      07 80 03 9c     add  %sp, %g7, %sp
72559 +ff0007bc:      0e 00 80 10     b  ff0007f4 <ep3comms_rcvr+0x54c>
72560 +ff0007c0:      ec 20 04 d2     ld  [ %l0 + 0xec ], %o1
72561 +ff0007c4:      ec 20 04 cc     ld  [ %l0 + 0xec ], %g6
72562 +ff0007c8:      3f a0 0b 8e     and  %sp, 0x3f, %g7
72563 +ff0007cc:      40 e0 01 8e     add  %g7, 0x40, %g7
72564 +ff0007d0:      07 80 23 9c     sub  %sp, %g7, %sp
72565 +ff0007d4:      00 80 bb e0     stblock64  %l0, [ %sp ]
72566 +ff0007d8:      00 60 99 e0     ldblock64  [ %g5 ], %l0
72567 +ff0007dc:      00 a0 b9 e0     stblock64  %l0, [ %g6 ]
72568 +ff0007e0:      40 60 99 e0     ldblock64  [ %g5 + 0x40 ], %l0
72569 +ff0007e4:      40 a0 b9 e0     stblock64  %l0, [ %g6 + 0x40 ]
72570 +ff0007e8:      00 80 9b e0     ldblock64  [ %sp ], %l0
72571 +ff0007ec:      07 80 03 9c     add  %sp, %g7, %sp
72572 +ff0007f0:      ec 20 04 d2     ld  [ %l0 + 0xec ], %o1
72573 +ff0007f4:      e4 20 04 d0     ld  [ %l0 + 0xe4 ], %o0
72574 +ff0007f8:      88 62 22 d0     st  %o0, [ %o1 + 0x288 ]
72575 +ff0007fc:      04 20 06 d0     ld  [ %i0 + 4 ], %o0
72576 +ff000800:      00 c0 26 d0     st  %o0, [ %i3 ]
72577 +ff000804:      00 00 07 d0     ld  [ %i4 ], %o0
72578 +ff000808:      01 20 8a 80     btst  1, %o0
72579 +ff00080c:      05 00 80 12     bne  ff000820 <ep3comms_rcvr+0x578>
72580 +ff000810:      00 00 00 01     nop 
72581 +ff000814:      14 20 27 f2     st  %i1, [ %i4 + 0x14 ]
72582 +ff000818:      06 00 80 10     b  ff000830 <ep3comms_rcvr+0x588>
72583 +ff00081c:      dc a0 03 fa     ld  [ %sp + 0xdc ], %i5
72584 +ff000820:      14 20 27 f2     st  %i1, [ %i4 + 0x14 ]
72585 +ff000824:      fe 3f 0a 90     and  %o0, -2, %o0
72586 +ff000828:      00 00 27 d0     st  %o0, [ %i4 ]
72587 +ff00082c:      dc a0 03 fa     ld  [ %sp + 0xdc ], %i5
72588 +ff000830:      01 60 07 ba     inc  %i5
72589 +ff000834:      dc a0 23 fa     st  %i5, [ %sp + 0xdc ]
72590 +ff000838:      0c 20 07 d0     ld  [ %i4 + 0xc ], %o0
72591 +ff00083c:      08 40 a6 80     cmp  %i1, %o0
72592 +ff000840:      04 00 80 32     bne,a   ff000850 <ep3comms_rcvr+0x5a8>
72593 +ff000844:      08 20 07 d0     ld  [ %i4 + 8 ], %o0
72594 +ff000848:      03 00 80 10     b  ff000854 <ep3comms_rcvr+0x5ac>
72595 +ff00084c:      10 20 07 f2     ld  [ %i4 + 0x10 ], %i1
72596 +ff000850:      08 40 06 b2     add  %i1, %o0, %i1
72597 +ff000854:      00 00 70 81     breaktest 
72598 +ff000858:      16 00 80 1c     bpos  ff0008b0 <ep3comms_rcvr+0x608>
72599 +ff00085c:      00 00 00 01     nop 
72600 +ff000860:      60 a0 23 9c     sub  %sp, 0x60, %sp
72601 +ff000864:      00 a0 a3 c0     stblock32  %g0, [ %sp ]
72602 +ff000868:      20 a0 a3 f0     stblock32  %i0, [ %sp + 0x20 ]
72603 +ff00086c:      40 a0 a3 e0     stblock32  %l0, [ %sp + 0x40 ]
72604 +ff000870:      02 c0 3f 3f     sethi  %hi(0xff000800), %i7
72605 +ff000874:      8c e0 17 be     or  %i7, 0x8c, %i7      ! ff00088c <ep3comms_rcvr+0x5e4>
72606 +ff000878:      04 e0 8f 80     btst  4, %i7
72607 +ff00087c:      06 00 80 12     bne  ff000894 <ep3comms_rcvr+0x5ec>
72608 +ff000880:      00 00 00 01     nop 
72609 +ff000884:      00 c0 1f fc     ldd  [ %i7 ], %fp
72610 +ff000888:      00 00 a0 81     break 
72611 +ff00088c:      06 00 80 10     b  ff0008a4 <ep3comms_rcvr+0x5fc>
72612 +ff000890:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72613 +ff000894:      14 e0 07 be     add  %i7, 0x14, %i7
72614 +ff000898:      00 c0 1f fc     ldd  [ %i7 ], %fp
72615 +ff00089c:      00 00 a0 81     break 
72616 +ff0008a0:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72617 +ff0008a4:      20 a0 83 f0     ldblock32  [ %sp + 0x20 ], %i0
72618 +ff0008a8:      00 a0 83 c0     ldblock32  [ %sp ], %g0
72619 +ff0008ac:      60 a0 03 9c     add  %sp, 0x60, %sp
72620 +ff0008b0:      04 20 07 d0     ld  [ %i4 + 4 ], %o0
72621 +ff0008b4:      08 40 a6 80     cmp  %i1, %o0
72622 +ff0008b8:      b3 fe bf 12     bne  ff000384 <ep3comms_rcvr+0xdc>
72623 +ff0008bc:      19 00 10 a2     mov  %i1, %l1
72624 +ff0008c0:      8a fe bf 10     b  ff0002e8 <ep3comms_rcvr+0x40>
72625 +ff0008c4:      f4 20 06 d0     ld  [ %i0 + 0xf4 ], %o0
72626 +ff0008c8:      60 a0 03 de     ld  [ %sp + 0x60 ], %o7
72627 +ff0008cc:      80 a0 83 e0     ldblock32  [ %sp + 0x80 ], %l0
72628 +ff0008d0:      a0 a0 83 f0     ldblock32  [ %sp + 0xa0 ], %i0
72629 +ff0008d4:      08 e0 c3 81     retl 
72630 +ff0008d8:      00 a1 03 9c     add  %sp, 0x100, %sp
72631 +
72632 +ff0008dc <ep3_spinblock>:
72633 +ff0008dc:      80 a0 23 9c     sub  %sp, 0x80, %sp
72634 +ff0008e0:      04 20 02 c4     ld  [ %o0 + 4 ], %g2
72635 +ff0008e4:      00 40 22 c4     st  %g2, [ %o1 ]
72636 +ff0008e8:      18 00 80 30     b,a   ff000948 <ep3_spinblock+0x6c>
72637 +ff0008ec:      00 00 70 81     breaktest 
72638 +ff0008f0:      16 00 80 12     bne  ff000948 <ep3_spinblock+0x6c>
72639 +ff0008f4:      00 00 00 01     nop 
72640 +ff0008f8:      60 a0 23 9c     sub  %sp, 0x60, %sp
72641 +ff0008fc:      00 a0 a3 c0     stblock32  %g0, [ %sp ]
72642 +ff000900:      20 a0 a3 f0     stblock32  %i0, [ %sp + 0x20 ]
72643 +ff000904:      40 a0 a3 e0     stblock32  %l0, [ %sp + 0x40 ]
72644 +ff000908:      02 c0 3f 3f     sethi  %hi(0xff000800), %i7
72645 +ff00090c:      24 e1 17 be     or  %i7, 0x124, %i7     ! ff000924 <ep3_spinblock+0x48>
72646 +ff000910:      04 e0 8f 80     btst  4, %i7
72647 +ff000914:      06 00 80 12     bne  ff00092c <ep3_spinblock+0x50>
72648 +ff000918:      00 00 00 01     nop 
72649 +ff00091c:      00 c0 1f fc     ldd  [ %i7 ], %fp
72650 +ff000920:      00 00 a0 81     break 
72651 +ff000924:      06 00 80 10     b  ff00093c <ep3_spinblock+0x60>
72652 +ff000928:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72653 +ff00092c:      14 e0 07 be     add  %i7, 0x14, %i7
72654 +ff000930:      00 c0 1f fc     ldd  [ %i7 ], %fp
72655 +ff000934:      00 00 a0 81     break 
72656 +ff000938:      40 a0 83 e0     ldblock32  [ %sp + 0x40 ], %l0
72657 +ff00093c:      20 a0 83 f0     ldblock32  [ %sp + 0x20 ], %i0
72658 +ff000940:      00 a0 83 c0     ldblock32  [ %sp ], %g0
72659 +ff000944:      60 a0 03 9c     add  %sp, 0x60, %sp
72660 +ff000948:      00 00 02 c4     ld  [ %o0 ], %g2
72661 +ff00094c:      00 a0 a0 80     cmp  %g2, 0
72662 +ff000950:      e7 ff bf 12     bne  ff0008ec <ep3_spinblock+0x10>
72663 +ff000954:      00 00 00 01     nop 
72664 +ff000958:      04 20 02 c4     ld  [ %o0 + 4 ], %g2
72665 +ff00095c:      01 a0 00 84     inc  %g2
72666 +ff000960:      04 20 22 c4     st  %g2, [ %o0 + 4 ]
72667 +ff000964:      00 00 02 c4     ld  [ %o0 ], %g2
72668 +ff000968:      00 a0 a0 80     cmp  %g2, 0
72669 +ff00096c:      dd ff bf 12     bne  ff0008e0 <ep3_spinblock+0x4>
72670 +ff000970:      00 00 00 01     nop 
72671 +ff000974:      08 e0 c3 81     retl 
72672 +ff000978:      80 a0 03 9c     add  %sp, 0x80, %sp
72673 +Disassembly of section .data:
72674 diff -urN clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.c linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.c
72675 --- clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.c 1969-12-31 19:00:00.000000000 -0500
72676 +++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.c   2005-09-07 10:39:44.000000000 -0400
72677 @@ -0,0 +1,107 @@
72678 +/* --------------------------------------------------------*/
72679 +/* MACHINE GENERATED ELAN CODE                             */
72680 +#include <qsnet/kernel.h>
72681 +#include <elan/kcomm.h>
72682 +#include "kcomm_elan4.h"
72683 +static uint32_t threadcode_elan4_text[] = {
72684 +0x00a00087, 0xc04060cb, 0x00003080, 0x80001080, 0x02606180, 0x02004032, 0x807f60cb, 0x04606180, 
72685 +0x02004032, 0x407f60d3, 0x08606180, 0x02004032, 0x007f60db, 0x10606180, 0x02004032, 0xc07e60e3, 
72686 +0x20606180, 0x02004032, 0x807e60eb, 0x40606180, 0x02004032, 0x407e60f3, 0x80606180, 0x02004032, 
72687 +0x007e60fb, 0x40001180, 0xc3801080, 0xc07f60c3, 0x20002000, 0x20002000, 0x20002000, 0x20002000, 
72688 +0x407f8001, 0x6860c0c7, 0x4060e0d1, 0x00208490, 0x00208080, 0x00208080, 0x6060c0d4, 0x00208292, 
72689 +0x00608290, 0x00a08294, 0xff3f8088, 0x1c381293, 0xc00044c8, 0x13004291, 0xc000c5d1, 0xc00044c8, 
72690 +0x20381288, 0x0020b200, 0x0e004003, 0x01608408, 0x00001088, 0x04204288, 0x0020b200, 0x04004003, 
72691 +0x00208080, 0x74010040, 0x00a08488, 0xc00044c8, 0x20381288, 0x0020b200, 0xf6ff7f13, 0x01608408, 
72692 +0x10161282, 0x800094c2, 0xc00044c8, 0x20381288, 0x0020b200, 0xe7ff7f13, 0x00208080, 0x686040c7, 
72693 +0x406060d1, 0x606040d4, 0x08e00180, 0xc0608001, 0x00000001, 0x00000001, 0x00000001, 0x00000001, 
72694 +0xc07e8001, 0xc060c0c7, 0x4060e0d3, 0x00208490, 0x00208080, 0x00208080, 0x8060e0db, 0x00208698, 
72695 +0x00208080, 0x00208080, 0x4061c0c8, 0x00608295, 0x00a0829b, 0x5861c0cb, 0x6061c0cc, 0x6861c0cd, 
72696 +0x0120809c, 0x08e042d1, 0x1c00900a, 0x05b4128a, 0x606140c8, 0x586140cb, 0x58010040, 0x18e042c9, 
72697 +0x0020809c, 0x586140cd, 0xc04043c8, 0x0840b400, 0x30014003, 0xffff3f08, 0xe023829f, 0x20f4179f, 
72698 +0x10e3879f, 0xe023829e, 0x20b4179e, 0x03a3879e, 0x00a0879d, 0x00608493, 0x18608408, 0x800012c2, 
72699 +0x089a109a, 0x20b4169a, 0x20b8169a, 0x00a88609, 0x20741289, 0x01120008, 0x0a381288, 0x08408297, 
72700 +0x45208088, 0x06341288, 0x406140c9, 0xc84042c8, 0x00288218, 0x04608408, 0x800012c2, 0x089a1088, 
72701 +0x20341288, 0x20381288, 0x00208299, 0x20608408, 0x800012c2, 0x089a1089, 0x20741289, 0x20781289, 
72702 +0x30608408, 0x800012c2, 0x089a1094, 0x20341594, 0x20381594, 0x02604688, 0x0020b200, 0x03004012, 
72703 +0x80608216, 0x60608216, 0x90608509, 0x804012c8, 0x01208208, 0x804092c8, 0x046012c8, 0x043a1288, 
72704 +0x0020b200, 0x04004003, 0x686140c8, 0x7dffff7f, 0x00e0868a, 0x886045d0, 0x0020b400, 0x12004013, 
72705 +0x90608512, 0x808014c8, 0x80c096c8, 0x64010040, 0x00608588, 0x00208290, 0x808014c8, 0x01208208, 
72706 +0x808094c8, 0x04a014c8, 0x043a1288, 0x0020b200, 0x05004003, 0x686140c8, 0x00a08489, 0x69ffff7f, 
72707 +0x00e0868a, 0x80c014c2, 0x139a1089, 0x20741289, 0x20781289, 0x40b03608, 0x01208288, 0x0840b200, 
72708 +0x06004003, 0x90608508, 0x800012c8, 0x80c096c8, 0xbf004010, 0xa86045c8, 0xa02344c3, 0x01604688, 
72709 +0x0020b500, 0x03004013, 0x14008209, 0x01208209, 0x05208088, 0x09009221, 0x0774188a, 0x0a00840b, 
72710 +0x05741888, 0x0800840c, 0x406140cd, 0x29228088, 0x03341288, 0xc84043c9, 0x03b41688, 0xc84042cf, 
72711 +0x01604688, 0x0020b200, 0x1d004002, 0x0023830c, 0xca00c4d7, 0x40c40f09, 0x09608289, 0x08e0c2c9, 
72712 +0x0ae08388, 0x10e0c2c8, 0x81001008, 0x0a341288, 0x18e0c2c8, 0x1de08388, 0x20e0c2c8, 0x28e0c2d8, 
72713 +0x24e08408, 0x800012c2, 0x089a1088, 0x20341288, 0x20381288, 0x80208208, 0x30e0c2c8, 0x00e18008, 
72714 +0x38e0c2c8, 0x40e0c2d6, 0x48e0c2cc, 0xc000c3df, 0x20e0830f, 0x80e0820b, 0x2020830c, 0x0020b500, 
72715 +0x13004033, 0x0020808d, 0xc0c0c2d7, 0x40c40f0a, 0x09a0828a, 0x08e0c2ca, 0x0ae08388, 0x10e0c2c8, 
72716 +0x00040008, 0x18e0c2c8, 0x1de08388, 0x20e0c2c8, 0x28e0c2d8, 0x40e0c2d6, 0x48e0c2cc, 0xc000c3de, 
72717 +0x00208092, 0x4b004010, 0x20e0830f, 0xb8238408, 0x800012c2, 0x089a108e, 0x20b4138e, 0x20b8138e, 
72718 +0x00208092, 0x1480b400, 0x2d00401b, 0x40c40f08, 0x092082a3, 0x00040022, 0xffff3f08, 0xe02382a0, 
72719 +0x203418a0, 0x102388a0, 0x0d408309, 0x0d408209, 0x02741289, 0x09c08409, 0x3860820a, 0x808012c2, 
72720 +0x0a9a108a, 0x20b4128a, 0x20b8128a, 0xc0c0c2d7, 0x08e0c2e3, 0x0ae08388, 0x10e0c2c8, 0x20b41288, 
72721 +0x22008288, 0x18e0c2c8, 0x1de08388, 0x20e0c2c8, 0x28e0c2d8, 0x34608209, 0x804012c2, 0x099a1089, 
72722 +0x20741289, 0x20781289, 0x30e0c2c9, 0x38e0c2ce, 0x40e0c2d6, 0x48e0c2cc, 0xc000c3e0, 0x0a80830e, 
72723 +0x0a808412, 0x20e0830f, 0x80e0820b, 0x0160830d, 0x1440b300, 0xddff7f0b, 0x2020830c, 0xe03f830c, 
72724 +0xc000c3dd, 0xbc238408, 0x800012c2, 0x089a1088, 0x20341288, 0x20381288, 0x1200b200, 0x0e00401b, 
72725 +0x07741888, 0x0060888d, 0x0460b800, 0x08004019, 0x0800840b, 0x00040008, 0x18e0c2c8, 0x0160830d, 
72726 +0x0460b300, 0xfdff7f09, 0x80e0820b, 0xfc3f8092, 0x07741888, 0x08008408, 0x606140cb, 0xc00062e3, 
72727 +0x402062f3, 0xc0c0e2e3, 0xc0c0e2f3, 0x982244c8, 0x8860c5c8, 0x886045c8, 0x0020b200, 0x05004033, 
72728 +0xd82294c0, 0x88608508, 0x8060c5c8, 0xd82294c0, 0x04604688, 0x0020b200, 0x0c004002, 0xdc2294c0, 
72729 +0xc0c064e3, 0x40e064f3, 0xc0c0e0e3, 0x80e064e3, 0x40e0e0f3, 0xc0e064f3, 0x80e0e0e3, 0xc0e0e0f3, 
72730 +0x07004010, 0x88e28008, 0xc0c064e3, 0x40e064f3, 0xc0c0e0e3, 0x40e0e0f3, 0x88e28008, 0x08961482, 
72731 +0x800092c2, 0x406140cd, 0x29228088, 0x03341288, 0xc84043c9, 0x03b41688, 0xc840c2cf, 0x90608508, 
72732 +0x800012c8, 0x80c096c8, 0xa86045c8, 0x0840b400, 0x03004013, 0x00618411, 0xa06045d1, 0x986045c8, 
72733 +0x0020b200, 0x04004013, 0x0120871c, 0x586140c9, 0x0860c2d1, 0xfe21b700, 0x0f004035, 0x986045cb, 
72734 +0x00001088, 0x02204288, 0x0020b200, 0x05004003, 0x586140ca, 0x18000040, 0x606140c8, 0x586140ca, 
72735 +0xc08042c8, 0x0840b400, 0xdcfe7f13, 0x00608493, 0x986045cb, 0x00e0b200, 0xc5fe7f03, 0x1c00900a, 
72736 +0x606140c8, 0x60608509, 0x38000040, 0xe03f808a, 0x586140cb, 0x08e0c2d1, 0xbcfe7f10, 0x0120871c, 
72737 +0xc06040c7, 0x406060d3, 0x806060db, 0x08e00180, 0x40618001, 0x807f8001, 0xc040e0d3, 0x4060e0db, 
72738 +0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x00e08192, 0x02000040, 0x00608091, 0x14e08110, 
72739 +0x17208097, 0xc000f2d3, 0xc04060d3, 0x406060db, 0x08a00080, 0x80608001, 0x407f8001, 0x4060e0d3, 
72740 +0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x00e08192, 0x02000040, 0x00608091, 
72741 +0x40e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 0x00208490, 0x00e08597, 0x00208080, 0x00208080, 
72742 +0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 0x00208293, 0xc000f2d1, 0x406060d3, 0x806060db, 
72743 +0x08a00080, 0xc0608001, 0x407f8001, 0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 
72744 +0x00208080, 0x00e08192, 0x02000040, 0x00608091, 0x54e08110, 0xc040e0d1, 0x37208097, 0x3860c0d7, 
72745 +0x00208490, 0x00e08597, 0x00208080, 0x00208080, 0x1f608290, 0x20b41291, 0x08638491, 0x00608092, 
72746 +0x00208293, 0x0ef41294, 0x0d208594, 0x17208095, 0x17208096, 0x17208097, 0xc000f2d3, 0x406060d3, 
72747 +0x806060db, 0x08a00080, 0xc0608001, 0x01208097, 0xb0e3c0d7, 0x80a060d2, 0x98e28004, 0x98e2c0c0, 
72748 +0x80a0c0c4, 0xc080c4c3, 0x01e0b400, 0x06004002, 0x00a08490, 0x00e08097, 0x02208097, 0xb0e3c0d7, 
72749 +0xd8e2d0d0, 0xd8e2c0d0, 0x03208097, 0xb0e3c0d7, 0x00e08088, 0x0e004010, 0x00a060c3, 0x407f8001, 
72750 +0x4060e0d3, 0x8060e0db, 0x00208490, 0x00208698, 0x00208080, 0x00208080, 0x01208089, 0x8820c2c9, 
72751 +0x00608091, 0x00e08197, 0x0020f2d3, 0x406060d3, 0x806060db, 0x08e00180, 0xc0608001, };
72752 +#define threadcode_elan4_text_size 0x87c
72753 +static uint32_t threadcode_elan4_data[] = {
72754 +0};
72755 +#define threadcode_elan4_data_size 0x0
72756 +static uint32_t threadcode_elan4_rodata[] = {
72757 +0};
72758 +#define threadcode_elan4_rodata_size 0x0
72759 +static EP_SYMBOL threadcode_elan4_symbols[] = {
72760 +    {".thread_restart", 0x00000000f800000c},
72761 +    {".thread_start", 0x00000000f8000000},
72762 +    {"__bss_start", 0x00000000f810087c},
72763 +    {"_edata", 0x00000000f810087c},
72764 +    {"_end", 0x00000000f8100880},
72765 +    {"_etext", 0x00000000f800087c},
72766 +    {"_sdata", 0x00000000f810087c},
72767 +    {"_stext", 0x00000000f8000000},
72768 +    {"c_queue_rxd", 0x00000000f80007ec},
72769 +    {"c_reschedule", 0x00000000f80006b4},
72770 +    {"c_stall_thread", 0x00000000f800083c},
72771 +    {"c_waitevent", 0x00000000f80006f8},
72772 +    {"c_waitevent_interrupt", 0x00000000f8000768},
72773 +    {"ep4_spinblock", 0x00000000f8000080},
72774 +    {"ep4comms_rcvr", 0x00000000f8000140},
72775 +    {0, 0}};
72776 +EP_CODE threadcode_elan4 = {
72777 +   (unsigned char *) threadcode_elan4_text,
72778 +   threadcode_elan4_text_size,
72779 +   (unsigned char *) threadcode_elan4_data,
72780 +   threadcode_elan4_data_size,
72781 +   (unsigned char *) threadcode_elan4_rodata,
72782 +   threadcode_elan4_rodata_size,
72783 +   threadcode_elan4_symbols,
72784 +};
72785 diff -urN clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis
72786 --- clean/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis  1969-12-31 19:00:00.000000000 -0500
72787 +++ linux-2.6.9/drivers/net/qsnet/ep/threadcode_elan4_Linux.code.dis    2005-09-07 10:39:44.000000000 -0400
72788 @@ -0,0 +1,628 @@
72789 +
72790 +threadcode_elan4_Linux.code:     file format elf64-elan
72791 +
72792 +Disassembly of section .text:
72793 +
72794 +00000000f8000000 <_stext>:
72795 +    f8000000:  00 a0 00 87     call  %r2
72796 +    f8000004:  c0 40 60 cb     ld64  [ %sp ], %r8
72797 +    f8000008:  00 00 30 80     unimp 
72798 +
72799 +00000000f800000c <.thread_restart>:
72800 +    f800000c:  80 00 10 80     setflg 
72801 +    f8000010:  02 60 61 80     btst  2, %r5
72802 +    f8000014:  02 00 40 32     bne,a   f800001c <.thread_restart+0x10>
72803 +    f8000018:  80 7f 60 cb     ld64  [ %sp + -128 ], %r8
72804 +    f800001c:  04 60 61 80     btst  4, %r5
72805 +    f8000020:  02 00 40 32     bne,a   f8000028 <.thread_restart+0x1c>
72806 +    f8000024:  40 7f 60 d3     ld64  [ %sp + -192 ], %r16
72807 +    f8000028:  08 60 61 80     btst  8, %r5
72808 +    f800002c:  02 00 40 32     bne,a   f8000034 <.thread_restart+0x28>
72809 +    f8000030:  00 7f 60 db     ld64  [ %sp + -256 ], %r24
72810 +    f8000034:  10 60 61 80     btst  0x10, %r5
72811 +    f8000038:  02 00 40 32     bne,a   f8000040 <.thread_restart+0x34>
72812 +    f800003c:  c0 7e 60 e3     ld64  [ %sp + -320 ], %r32
72813 +    f8000040:  20 60 61 80     btst  0x20, %r5
72814 +    f8000044:  02 00 40 32     bne,a   f800004c <.thread_restart+0x40>
72815 +    f8000048:  80 7e 60 eb     ld64  [ %sp + -384 ], %r40
72816 +    f800004c:  40 60 61 80     btst  0x40, %r5
72817 +    f8000050:  02 00 40 32     bne,a   f8000058 <.thread_restart+0x4c>
72818 +    f8000054:  40 7e 60 f3     ld64  [ %sp + -448 ], %r48
72819 +    f8000058:  80 60 61 80     btst  0x80, %r5
72820 +    f800005c:  02 00 40 32     bne,a   f8000064 <.thread_restart+0x58>
72821 +    f8000060:  00 7e 60 fb     ld64  [ %sp + -512 ], %r56
72822 +    f8000064:  40 00 11 80     ldcc  %r4
72823 +    f8000068:  c3 80 10 80     rti  %r2, %r3
72824 +    f800006c:  c0 7f 60 c3     ld64  [ %sp + -64 ], %r0
72825 +    f8000070:  20 00 20 00     sethi  %hi(0x80008000), %r0
72826 +    f8000074:  20 00 20 00     sethi  %hi(0x80008000), %r0
72827 +    f8000078:  20 00 20 00     sethi  %hi(0x80008000), %r0
72828 +    f800007c:  20 00 20 00     sethi  %hi(0x80008000), %r0
72829 +
72830 +00000000f8000080 <ep4_spinblock>:
72831 +    f8000080:  40 7f 80 01     add  %sp, -192, %sp
72832 +    f8000084:  68 60 c0 c7     st8  %r7, [ %sp + 0x68 ]
72833 +    f8000088:  40 60 e0 d1     st32  %r16, [ %sp + 0x40 ]
72834 +    f800008c:  00 20 84 90     mov  %r16, %r16
72835 +    f8000090:  00 20 80 80     nop 
72836 +    f8000094:  00 20 80 80     nop 
72837 +    f8000098:  60 60 c0 d4     st8  %r20, [ %sp + 0x60 ]
72838 +    f800009c:  00 20 82 92     mov  %r8, %r18
72839 +    f80000a0:  00 60 82 90     mov  %r9, %r16
72840 +    f80000a4:  00 a0 82 94     mov  %r10, %r20
72841 +    f80000a8:  ff 3f 80 88     mov  -1, %r8
72842 +    f80000ac:  1c 38 12 93     srl8  %r8, 0x1c, %r19
72843 +
72844 +00000000f80000b0 <L2>:
72845 +    f80000b0:  c0 00 44 c8     ld8  [ %r16 ], %r8
72846 +    f80000b4:  13 00 42 91     and  %r8, %r19, %r17
72847 +    f80000b8:  c0 00 c5 d1     st8  %r17, [ %r20 ]
72848 +    f80000bc:  c0 00 44 c8     ld8  [ %r16 ], %r8
72849 +    f80000c0:  20 38 12 88     srl8  %r8, 0x20, %r8
72850 +    f80000c4:  00 20 b2 00     cmp  %r8, 0
72851 +    f80000c8:  0e 00 40 03     be  %xcc, f8000100 <L15>
72852 +    f80000cc:  01 60 84 08     add  %r17, 1, %r8
72853 +
72854 +00000000f80000d0 <L10>:
72855 +    f80000d0:  00 00 10 88     breaktest  %r8
72856 +    f80000d4:  04 20 42 88     and  %r8, 4, %r8
72857 +    f80000d8:  00 20 b2 00     cmp  %r8, 0
72858 +    f80000dc:  04 00 40 03     be  %xcc, f80000ec <L5>
72859 +    f80000e0:  00 20 80 80     nop 
72860 +    f80000e4:  74 01 00 40     call  f80006b4 <c_reschedule>
72861 +    f80000e8:  00 a0 84 88     mov  %r18, %r8
72862 +
72863 +00000000f80000ec <L5>:
72864 +    f80000ec:  c0 00 44 c8     ld8  [ %r16 ], %r8
72865 +    f80000f0:  20 38 12 88     srl8  %r8, 0x20, %r8
72866 +    f80000f4:  00 20 b2 00     cmp  %r8, 0
72867 +    f80000f8:  f6 ff 7f 13     bne  %xcc, f80000d0 <L10>
72868 +    f80000fc:  01 60 84 08     add  %r17, 1, %r8
72869 +
72870 +00000000f8000100 <L15>:
72871 +    f8000100:  10 16 12 82     sll8, byte   %r8, %r16, %r2
72872 +    f8000104:  80 00 94 c2     st4  %r2, [ %r16 ]
72873 +    f8000108:  c0 00 44 c8     ld8  [ %r16 ], %r8
72874 +    f800010c:  20 38 12 88     srl8  %r8, 0x20, %r8
72875 +    f8000110:  00 20 b2 00     cmp  %r8, 0
72876 +    f8000114:  e7 ff 7f 13     bne  %xcc, f80000b0 <L2>
72877 +    f8000118:  00 20 80 80     nop 
72878 +    f800011c:  68 60 40 c7     ld8  [ %sp + 0x68 ], %r7
72879 +    f8000120:  40 60 60 d1     ld32  [ %sp + 0x40 ], %r16
72880 +    f8000124:  60 60 40 d4     ld8  [ %sp + 0x60 ], %r20
72881 +    f8000128:  08 e0 01 80     retl 
72882 +    f800012c:  c0 60 80 01     add  %sp, 0xc0, %sp
72883 +    f8000130:  00 00 00 01     sethi  %hi(0), %sp
72884 +    f8000134:  00 00 00 01     sethi  %hi(0), %sp
72885 +    f8000138:  00 00 00 01     sethi  %hi(0), %sp
72886 +    f800013c:  00 00 00 01     sethi  %hi(0), %sp
72887 +
72888 +00000000f8000140 <ep4comms_rcvr>:
72889 +    f8000140:  c0 7e 80 01     add  %sp, -320, %sp
72890 +    f8000144:  c0 60 c0 c7     st8  %r7, [ %sp + 0xc0 ]
72891 +    f8000148:  40 60 e0 d3     st64  %r16, [ %sp + 0x40 ]
72892 +    f800014c:  00 20 84 90     mov  %r16, %r16
72893 +    f8000150:  00 20 80 80     nop 
72894 +    f8000154:  00 20 80 80     nop 
72895 +    f8000158:  80 60 e0 db     st64  %r24, [ %sp + 0x80 ]
72896 +    f800015c:  00 20 86 98     mov  %r24, %r24
72897 +    f8000160:  00 20 80 80     nop 
72898 +    f8000164:  00 20 80 80     nop 
72899 +    f8000168:  40 61 c0 c8     st8  %r8, [ %sp + 0x140 ]
72900 +    f800016c:  00 60 82 95     mov  %r9, %r21
72901 +    f8000170:  00 a0 82 9b     mov  %r10, %r27
72902 +    f8000174:  58 61 c0 cb     st8  %r11, [ %sp + 0x158 ]
72903 +    f8000178:  60 61 c0 cc     st8  %r12, [ %sp + 0x160 ]
72904 +    f800017c:  68 61 c0 cd     st8  %r13, [ %sp + 0x168 ]
72905 +    f8000180:  01 20 80 9c     mov  1, %r28
72906 +    f8000184:  08 e0 42 d1     ld8  [ %r11 + 8 ], %r17
72907 +
72908 +00000000f8000188 <L78>:
72909 +    f8000188:  1c 00 90 0a     neg  %r28, %r10
72910 +
72911 +00000000f800018c <L87>:
72912 +    f800018c:  05 b4 12 8a     sll8  %r10, 5, %r10
72913 +    f8000190:  60 61 40 c8     ld8  [ %sp + 0x160 ], %r8
72914 +    f8000194:  58 61 40 cb     ld8  [ %sp + 0x158 ], %r11
72915 +    f8000198:  58 01 00 40     call  f80006f8 <c_waitevent>
72916 +    f800019c:  18 e0 42 c9     ld8  [ %r11 + 0x18 ], %r9
72917 +    f80001a0:  00 20 80 9c     mov  %r0, %r28
72918 +    f80001a4:  58 61 40 cd     ld8  [ %sp + 0x158 ], %r13
72919 +    f80001a8:  c0 40 43 c8     ld8  [ %r13 ], %r8
72920 +    f80001ac:  08 40 b4 00     cmp  %r17, %r8
72921 +    f80001b0:  30 01 40 03     be  %xcc, f8000670 <L21>
72922 +    f80001b4:  ff ff 3f 08     sethi  %hi(0xfffffc00), %r8
72923 +    f80001b8:  e0 23 82 9f     or  %r8, 0x3e0, %r31
72924 +    f80001bc:  20 f4 17 9f     sll8  %r31, 0x20, %r31
72925 +    f80001c0:  10 e3 87 9f     or  %r31, 0x310, %r31
72926 +    f80001c4:  e0 23 82 9e     or  %r8, 0x3e0, %r30
72927 +    f80001c8:  20 b4 17 9e     sll8  %r30, 0x20, %r30
72928 +    f80001cc:  03 a3 87 9e     or  %r30, 0x303, %r30
72929 +    f80001d0:  00 a0 87 9d     mov  %r30, %r29
72930 +    f80001d4:  00 60 84 93     mov  %r17, %r19
72931 +
72932 +00000000f80001d8 <L86>:
72933 +    f80001d8:  18 60 84 08     add  %r17, 0x18, %r8
72934 +    f80001dc:  80 00 12 c2     ld4  [ %r8 ], %r2
72935 +    f80001e0:  08 9a 10 9a     srl8, byte   %r2, %r8, %r26
72936 +    f80001e4:  20 b4 16 9a     sll8  %r26, 0x20, %r26
72937 +    f80001e8:  20 b8 16 9a     srl8  %r26, 0x20, %r26
72938 +    f80001ec:  00 a8 86 09     add  %r26, 0x800, %r9
72939 +    f80001f0:  20 74 12 89     sll8  %r9, 0x20, %r9
72940 +    f80001f4:  01 12 00 08     sethi  %hi(0x480400), %r8
72941 +    f80001f8:  0a 38 12 88     srl8  %r8, 0xa, %r8
72942 +    f80001fc:  08 40 82 97     or  %r9, %r8, %r23
72943 +    f8000200:  45 20 80 88     mov  0x45, %r8
72944 +    f8000204:  06 34 12 88     sll8  %r8, 6, %r8
72945 +    f8000208:  40 61 40 c9     ld8  [ %sp + 0x140 ], %r9
72946 +    f800020c:  c8 40 42 c8     ld8  [ %r9 + %r8 ], %r8
72947 +    f8000210:  00 28 82 18     add  %r8, 0x800, %r24
72948 +    f8000214:  04 60 84 08     add  %r17, 4, %r8
72949 +    f8000218:  80 00 12 c2     ld4  [ %r8 ], %r2
72950 +    f800021c:  08 9a 10 88     srl8, byte   %r2, %r8, %r8
72951 +    f8000220:  20 34 12 88     sll8  %r8, 0x20, %r8
72952 +    f8000224:  20 38 12 88     srl8  %r8, 0x20, %r8
72953 +    f8000228:  00 20 82 99     mov  %r8, %r25
72954 +    f800022c:  20 60 84 08     add  %r17, 0x20, %r8
72955 +    f8000230:  80 00 12 c2     ld4  [ %r8 ], %r2
72956 +    f8000234:  08 9a 10 89     srl8, byte   %r2, %r8, %r9
72957 +    f8000238:  20 74 12 89     sll8  %r9, 0x20, %r9
72958 +    f800023c:  20 78 12 89     srl8  %r9, 0x20, %r9
72959 +    f8000240:  30 60 84 08     add  %r17, 0x30, %r8
72960 +    f8000244:  80 00 12 c2     ld4  [ %r8 ], %r2
72961 +    f8000248:  08 9a 10 94     srl8, byte   %r2, %r8, %r20
72962 +    f800024c:  20 34 15 94     sll8  %r20, 0x20, %r20
72963 +    f8000250:  20 38 15 94     srl8  %r20, 0x20, %r20
72964 +    f8000254:  02 60 46 88     and  %r25, 2, %r8
72965 +    f8000258:  00 20 b2 00     cmp  %r8, 0
72966 +    f800025c:  03 00 40 12     bne  f8000268 <L28>
72967 +    f8000260:  80 60 82 16     add  %r9, 0x80, %r22
72968 +    f8000264:  60 60 82 16     add  %r9, 0x60, %r22
72969 +
72970 +00000000f8000268 <L28>:
72971 +    f8000268:  90 60 85 09     add  %r21, 0x90, %r9
72972 +    f800026c:  80 40 12 c8     ld4  [ %r9 ], %r8
72973 +    f8000270:  01 20 82 08     inc  %r8
72974 +    f8000274:  80 40 92 c8     st4  %r8, [ %r9 ]
72975 +    f8000278:  04 60 12 c8     ld4  [ %r9 + 4 ], %r8
72976 +    f800027c:  04 3a 12 88     srl8, byte   %r8, 4, %r8
72977 +    f8000280:  00 20 b2 00     cmp  %r8, 0
72978 +    f8000284:  04 00 40 03     be  %xcc, f8000294 <L29>
72979 +    f8000288:  68 61 40 c8     ld8  [ %sp + 0x168 ], %r8
72980 +    f800028c:  7d ff ff 7f     call  f8000080 <ep4_spinblock>
72981 +    f8000290:  00 e0 86 8a     mov  %r27, %r10
72982 +
72983 +00000000f8000294 <L29>:
72984 +    f8000294:  88 60 45 d0     ld8  [ %r21 + 0x88 ], %r16
72985 +    f8000298:  00 20 b4 00     cmp  %r16, 0
72986 +    f800029c:  12 00 40 13     bne  %xcc, f80002e4 <L31>
72987 +    f80002a0:  90 60 85 12     add  %r21, 0x90, %r18
72988 +    f80002a4:  80 80 14 c8     ld4  [ %r18 ], %r8
72989 +    f80002a8:  80 c0 96 c8     st4  %r8, [ %r27 ]
72990 +    f80002ac:  64 01 00 40     call  f800083c <c_stall_thread>
72991 +    f80002b0:  00 60 85 88     mov  %r21, %r8
72992 +    f80002b4:  00 20 82 90     mov  %r8, %r16
72993 +    f80002b8:  80 80 14 c8     ld4  [ %r18 ], %r8
72994 +    f80002bc:  01 20 82 08     inc  %r8
72995 +    f80002c0:  80 80 94 c8     st4  %r8, [ %r18 ]
72996 +    f80002c4:  04 a0 14 c8     ld4  [ %r18 + 4 ], %r8
72997 +    f80002c8:  04 3a 12 88     srl8, byte   %r8, 4, %r8
72998 +    f80002cc:  00 20 b2 00     cmp  %r8, 0
72999 +    f80002d0:  05 00 40 03     be  %xcc, f80002e4 <L31>
73000 +    f80002d4:  68 61 40 c8     ld8  [ %sp + 0x168 ], %r8
73001 +    f80002d8:  00 a0 84 89     mov  %r18, %r9
73002 +    f80002dc:  69 ff ff 7f     call  f8000080 <ep4_spinblock>
73003 +    f80002e0:  00 e0 86 8a     mov  %r27, %r10
73004 +
73005 +00000000f80002e4 <L31>:
73006 +    f80002e4:  80 c0 14 c2     ld4  [ %r19 ], %r2
73007 +    f80002e8:  13 9a 10 89     srl8, byte   %r2, %r19, %r9
73008 +    f80002ec:  20 74 12 89     sll8  %r9, 0x20, %r9
73009 +    f80002f0:  20 78 12 89     srl8  %r9, 0x20, %r9
73010 +    f80002f4:  40 b0 36 08     sethi  %hi(0xdac10000), %r8
73011 +    f80002f8:  01 20 82 88     or  %r8, 1, %r8
73012 +    f80002fc:  08 40 b2 00     cmp  %r9, %r8
73013 +    f8000300:  06 00 40 03     be  %xcc, f8000318 <L35>
73014 +    f8000304:  90 60 85 08     add  %r21, 0x90, %r8
73015 +    f8000308:  80 00 12 c8     ld4  [ %r8 ], %r8
73016 +    f800030c:  80 c0 96 c8     st4  %r8, [ %r27 ]
73017 +    f8000310:  bf 00 40 10     b  f800060c <L79>
73018 +    f8000314:  a8 60 45 c8     ld8  [ %r21 + 0xa8 ], %r8
73019 +
73020 +00000000f8000318 <L35>:
73021 +    f8000318:  a0 23 44 c3     ld8  [ %r16 + 0x3a0 ], %r3
73022 +    f800031c:  01 60 46 88     and  %r25, 1, %r8
73023 +    f8000320:  00 20 b5 00     cmp  %r20, 0
73024 +    f8000324:  03 00 40 13     bne  %xcc, f8000330 <L40>
73025 +    f8000328:  14 00 82 09     add  %r8, %r20, %r9
73026 +    f800032c:  01 20 82 09     add  %r8, 1, %r9
73027 +
73028 +00000000f8000330 <L40>:
73029 +    f8000330:  05 20 80 88     mov  5, %r8
73030 +    f8000334:  09 00 92 21     sub  %r8, %r9, %r33
73031 +    f8000338:  07 74 18 8a     sll8  %r33, 7, %r10
73032 +    f800033c:  0a 00 84 0b     add  %r16, %r10, %r11
73033 +    f8000340:  05 74 18 88     sll8  %r33, 5, %r8
73034 +    f8000344:  08 00 84 0c     add  %r16, %r8, %r12
73035 +    f8000348:  40 61 40 cd     ld8  [ %sp + 0x140 ], %r13
73036 +    f800034c:  29 22 80 88     mov  0x229, %r8
73037 +    f8000350:  03 34 12 88     sll8  %r8, 3, %r8
73038 +    f8000354:  c8 40 43 c9     ld8  [ %r13 + %r8 ], %r9
73039 +    f8000358:  03 b4 16 88     sll8  %r26, 3, %r8
73040 +    f800035c:  c8 40 42 cf     ld8  [ %r9 + %r8 ], %r15
73041 +    f8000360:  01 60 46 88     and  %r25, 1, %r8
73042 +    f8000364:  00 20 b2 00     cmp  %r8, 0
73043 +    f8000368:  1d 00 40 02     be  f80003dc <L41>
73044 +    f800036c:  00 23 83 0c     add  %r12, 0x300, %r12
73045 +    f8000370:  ca 00 c4 d7     st8  %r23, [ %r16 + %r10 ]
73046 +    f8000374:  40 c4 0f 09     sethi  %hi(0x3f110000), %r9
73047 +    f8000378:  09 60 82 89     or  %r9, 9, %r9 ! 3f110009 <*ABS*+0x3f110009>
73048 +    f800037c:  08 e0 c2 c9     st8  %r9, [ %r11 + 8 ]
73049 +    f8000380:  0a e0 83 88     or  %r15, 0xa, %r8
73050 +    f8000384:  10 e0 c2 c8     st8  %r8, [ %r11 + 0x10 ]
73051 +    f8000388:  81 00 10 08     sethi  %hi(0x40020400), %r8
73052 +    f800038c:  0a 34 12 88     sll8  %r8, 0xa, %r8
73053 +    f8000390:  18 e0 c2 c8     st8  %r8, [ %r11 + 0x18 ]
73054 +    f8000394:  1d e0 83 88     or  %r15, 0x1d, %r8
73055 +    f8000398:  20 e0 c2 c8     st8  %r8, [ %r11 + 0x20 ]
73056 +    f800039c:  28 e0 c2 d8     st8  %r24, [ %r11 + 0x28 ]
73057 +    f80003a0:  24 e0 84 08     add  %r19, 0x24, %r8
73058 +    f80003a4:  80 00 12 c2     ld4  [ %r8 ], %r2
73059 +    f80003a8:  08 9a 10 88     srl8, byte   %r2, %r8, %r8
73060 +    f80003ac:  20 34 12 88     sll8  %r8, 0x20, %r8
73061 +    f80003b0:  20 38 12 88     srl8  %r8, 0x20, %r8
73062 +    f80003b4:  80 20 82 08     add  %r8, 0x80, %r8
73063 +    f80003b8:  30 e0 c2 c8     st8  %r8, [ %r11 + 0x30 ]
73064 +    f80003bc:  00 e1 80 08     add  %r3, 0x100, %r8
73065 +    f80003c0:  38 e0 c2 c8     st8  %r8, [ %r11 + 0x38 ]
73066 +    f80003c4:  40 e0 c2 d6     st8  %r22, [ %r11 + 0x40 ]
73067 +    f80003c8:  48 e0 c2 cc     st8  %r12, [ %r11 + 0x48 ]
73068 +    f80003cc:  c0 00 c3 df     st8  %r31, [ %r12 ]
73069 +    f80003d0:  20 e0 83 0f     add  %r15, 0x20, %r15
73070 +    f80003d4:  80 e0 82 0b     add  %r11, 0x80, %r11
73071 +    f80003d8:  20 20 83 0c     add  %r12, 0x20, %r12
73072 +
73073 +00000000f80003dc <L41>:
73074 +    f80003dc:  00 20 b5 00     cmp  %r20, 0
73075 +    f80003e0:  13 00 40 33     bne,a   %xcc, f800042c <L43>
73076 +    f80003e4:  00 20 80 8d     mov  %r0, %r13
73077 +    f80003e8:  c0 c0 c2 d7     st8  %r23, [ %r11 ]
73078 +    f80003ec:  40 c4 0f 0a     sethi  %hi(0x3f110000), %r10
73079 +    f80003f0:  09 a0 82 8a     or  %r10, 9, %r10
73080 +    f80003f4:  08 e0 c2 ca     st8  %r10, [ %r11 + 8 ]
73081 +    f80003f8:  0a e0 83 88     or  %r15, 0xa, %r8
73082 +    f80003fc:  10 e0 c2 c8     st8  %r8, [ %r11 + 0x10 ]
73083 +    f8000400:  00 04 00 08     sethi  %hi(0x100000), %r8
73084 +    f8000404:  18 e0 c2 c8     st8  %r8, [ %r11 + 0x18 ]
73085 +    f8000408:  1d e0 83 88     or  %r15, 0x1d, %r8
73086 +    f800040c:  20 e0 c2 c8     st8  %r8, [ %r11 + 0x20 ]
73087 +    f8000410:  28 e0 c2 d8     st8  %r24, [ %r11 + 0x28 ]
73088 +    f8000414:  40 e0 c2 d6     st8  %r22, [ %r11 + 0x40 ]
73089 +    f8000418:  48 e0 c2 cc     st8  %r12, [ %r11 + 0x48 ]
73090 +    f800041c:  c0 00 c3 de     st8  %r30, [ %r12 ]
73091 +    f8000420:  00 20 80 92     mov  %r0, %r18
73092 +    f8000424:  4b 00 40 10     b  f8000550 <L44>
73093 +    f8000428:  20 e0 83 0f     add  %r15, 0x20, %r15
73094 +
73095 +00000000f800042c <L43>:
73096 +    f800042c:  b8 23 84 08     add  %r16, 0x3b8, %r8
73097 +    f8000430:  80 00 12 c2     ld4  [ %r8 ], %r2
73098 +    f8000434:  08 9a 10 8e     srl8, byte   %r2, %r8, %r14
73099 +    f8000438:  20 b4 13 8e     sll8  %r14, 0x20, %r14
73100 +    f800043c:  20 b8 13 8e     srl8  %r14, 0x20, %r14
73101 +    f8000440:  00 20 80 92     mov  %r0, %r18
73102 +    f8000444:  14 80 b4 00     cmp  %r18, %r20
73103 +    f8000448:  2d 00 40 1b     bcc  %xcc, f80004fc <L75>
73104 +    f800044c:  40 c4 0f 08     sethi  %hi(0x3f110000), %r8
73105 +    f8000450:  09 20 82 a3     or  %r8, 9, %r35
73106 +    f8000454:  00 04 00 22     sethi  %hi(0x100000), %r34
73107 +    f8000458:  ff ff 3f 08     sethi  %hi(0xfffffc00), %r8
73108 +    f800045c:  e0 23 82 a0     or  %r8, 0x3e0, %r32
73109 +    f8000460:  20 34 18 a0     sll8  %r32, 0x20, %r32
73110 +    f8000464:  10 23 88 a0     or  %r32, 0x310, %r32
73111 +
73112 +00000000f8000468 <L52>:
73113 +    f8000468:  0d 40 83 09     add  %r13, %r13, %r9
73114 +    f800046c:  0d 40 82 09     add  %r9, %r13, %r9
73115 +    f8000470:  02 74 12 89     sll8  %r9, 2, %r9
73116 +    f8000474:  09 c0 84 09     add  %r19, %r9, %r9
73117 +    f8000478:  38 60 82 0a     add  %r9, 0x38, %r10
73118 +    f800047c:  80 80 12 c2     ld4  [ %r10 ], %r2
73119 +    f8000480:  0a 9a 10 8a     srl8, byte   %r2, %r10, %r10
73120 +    f8000484:  20 b4 12 8a     sll8  %r10, 0x20, %r10
73121 +    f8000488:  20 b8 12 8a     srl8  %r10, 0x20, %r10
73122 +    f800048c:  c0 c0 c2 d7     st8  %r23, [ %r11 ]
73123 +    f8000490:  08 e0 c2 e3     st8  %r35, [ %r11 + 8 ]
73124 +    f8000494:  0a e0 83 88     or  %r15, 0xa, %r8
73125 +    f8000498:  10 e0 c2 c8     st8  %r8, [ %r11 + 0x10 ]
73126 +    f800049c:  20 b4 12 88     sll8  %r10, 0x20, %r8
73127 +    f80004a0:  22 00 82 88     or  %r8, %r34, %r8
73128 +    f80004a4:  18 e0 c2 c8     st8  %r8, [ %r11 + 0x18 ]
73129 +    f80004a8:  1d e0 83 88     or  %r15, 0x1d, %r8
73130 +    f80004ac:  20 e0 c2 c8     st8  %r8, [ %r11 + 0x20 ]
73131 +    f80004b0:  28 e0 c2 d8     st8  %r24, [ %r11 + 0x28 ]
73132 +    f80004b4:  34 60 82 09     add  %r9, 0x34, %r9
73133 +    f80004b8:  80 40 12 c2     ld4  [ %r9 ], %r2
73134 +    f80004bc:  09 9a 10 89     srl8, byte   %r2, %r9, %r9
73135 +    f80004c0:  20 74 12 89     sll8  %r9, 0x20, %r9
73136 +    f80004c4:  20 78 12 89     srl8  %r9, 0x20, %r9
73137 +    f80004c8:  30 e0 c2 c9     st8  %r9, [ %r11 + 0x30 ]
73138 +    f80004cc:  38 e0 c2 ce     st8  %r14, [ %r11 + 0x38 ]
73139 +    f80004d0:  40 e0 c2 d6     st8  %r22, [ %r11 + 0x40 ]
73140 +    f80004d4:  48 e0 c2 cc     st8  %r12, [ %r11 + 0x48 ]
73141 +    f80004d8:  c0 00 c3 e0     st8  %r32, [ %r12 ]
73142 +    f80004dc:  0a 80 83 0e     add  %r14, %r10, %r14
73143 +    f80004e0:  0a 80 84 12     add  %r18, %r10, %r18
73144 +    f80004e4:  20 e0 83 0f     add  %r15, 0x20, %r15
73145 +    f80004e8:  80 e0 82 0b     add  %r11, 0x80, %r11
73146 +    f80004ec:  01 60 83 0d     inc  %r13
73147 +    f80004f0:  14 40 b3 00     cmp  %r13, %r20
73148 +    f80004f4:  dd ff 7f 0b     bcs  %xcc, f8000468 <L52>
73149 +    f80004f8:  20 20 83 0c     add  %r12, 0x20, %r12
73150 +
73151 +00000000f80004fc <L75>:
73152 +    f80004fc:  e0 3f 83 0c     add  %r12, -32, %r12
73153 +    f8000500:  c0 00 c3 dd     st8  %r29, [ %r12 ]
73154 +    f8000504:  bc 23 84 08     add  %r16, 0x3bc, %r8
73155 +    f8000508:  80 00 12 c2     ld4  [ %r8 ], %r2
73156 +    f800050c:  08 9a 10 88     srl8, byte   %r2, %r8, %r8
73157 +    f8000510:  20 34 12 88     sll8  %r8, 0x20, %r8
73158 +    f8000514:  20 38 12 88     srl8  %r8, 0x20, %r8
73159 +    f8000518:  12 00 b2 00     cmp  %r8, %r18
73160 +    f800051c:  0e 00 40 1b     bcc  %xcc, f8000554 <L81>
73161 +    f8000520:  07 74 18 88     sll8  %r33, 7, %r8
73162 +    f8000524:  00 60 88 8d     mov  %r33, %r13
73163 +    f8000528:  04 60 b8 00     cmp  %r33, 4
73164 +    f800052c:  08 00 40 19     bgu  %xcc, f800054c <L77>
73165 +    f8000530:  08 00 84 0b     add  %r16, %r8, %r11
73166 +    f8000534:  00 04 00 08     sethi  %hi(0x100000), %r8
73167 +
73168 +00000000f8000538 <L59>:
73169 +    f8000538:  18 e0 c2 c8     st8  %r8, [ %r11 + 0x18 ]
73170 +    f800053c:  01 60 83 0d     inc  %r13
73171 +    f8000540:  04 60 b3 00     cmp  %r13, 4
73172 +    f8000544:  fd ff 7f 09     bleu  %xcc, f8000538 <L59>
73173 +    f8000548:  80 e0 82 0b     add  %r11, 0x80, %r11
73174 +
73175 +00000000f800054c <L77>:
73176 +    f800054c:  fc 3f 80 92     mov  -4, %r18
73177 +
73178 +00000000f8000550 <L44>:
73179 +    f8000550:  07 74 18 88     sll8  %r33, 7, %r8
73180 +
73181 +00000000f8000554 <L81>:
73182 +    f8000554:  08 00 84 08     add  %r16, %r8, %r8
73183 +    f8000558:  60 61 40 cb     ld8  [ %sp + 0x160 ], %r11
73184 +    f800055c:  c0 00 62 e3     ld64  [ %r8 ], %r32
73185 +    f8000560:  40 20 62 f3     ld64  [ %r8 + 0x40 ], %r48
73186 +    f8000564:  c0 c0 e2 e3     st64  %r32, [ %r11 ]
73187 +    f8000568:  c0 c0 e2 f3     st64  %r48, [ %r11 ]
73188 +    f800056c:  98 22 44 c8     ld8  [ %r16 + 0x298 ], %r8
73189 +    f8000570:  88 60 c5 c8     st8  %r8, [ %r21 + 0x88 ]
73190 +    f8000574:  88 60 45 c8     ld8  [ %r21 + 0x88 ], %r8
73191 +    f8000578:  00 20 b2 00     cmp  %r8, 0
73192 +    f800057c:  05 00 40 33     bne,a   %xcc, f8000590 <L82>
73193 +    f8000580:  d8 22 94 c0     st4  %r0, [ %r16 + 0x2d8 ]
73194 +    f8000584:  88 60 85 08     add  %r21, 0x88, %r8
73195 +    f8000588:  80 60 c5 c8     st8  %r8, [ %r21 + 0x80 ]
73196 +    f800058c:  d8 22 94 c0     st4  %r0, [ %r16 + 0x2d8 ]
73197 +
73198 +00000000f8000590 <L82>:
73199 +    f8000590:  04 60 46 88     and  %r25, 4, %r8
73200 +    f8000594:  00 20 b2 00     cmp  %r8, 0
73201 +    f8000598:  0c 00 40 02     be  f80005c8 <L61>
73202 +    f800059c:  dc 22 94 c0     st4  %r0, [ %r16 + 0x2dc ]
73203 +    f80005a0:  c0 c0 64 e3     ld64  [ %r19 ], %r32
73204 +    f80005a4:  40 e0 64 f3     ld64  [ %r19 + 0x40 ], %r48
73205 +    f80005a8:  c0 c0 e0 e3     st64  %r32, [ %r3 ]
73206 +    f80005ac:  80 e0 64 e3     ld64  [ %r19 + 0x80 ], %r32
73207 +    f80005b0:  40 e0 e0 f3     st64  %r48, [ %r3 + 0x40 ]
73208 +    f80005b4:  c0 e0 64 f3     ld64  [ %r19 + 0xc0 ], %r48
73209 +    f80005b8:  80 e0 e0 e3     st64  %r32, [ %r3 + 0x80 ]
73210 +    f80005bc:  c0 e0 e0 f3     st64  %r48, [ %r3 + 0xc0 ]
73211 +    f80005c0:  07 00 40 10     b  f80005dc <L80>
73212 +    f80005c4:  88 e2 80 08     add  %r3, 0x288, %r8
73213 +
73214 +00000000f80005c8 <L61>:
73215 +    f80005c8:  c0 c0 64 e3     ld64  [ %r19 ], %r32
73216 +    f80005cc:  40 e0 64 f3     ld64  [ %r19 + 0x40 ], %r48
73217 +    f80005d0:  c0 c0 e0 e3     st64  %r32, [ %r3 ]
73218 +    f80005d4:  40 e0 e0 f3     st64  %r48, [ %r3 + 0x40 ]
73219 +    f80005d8:  88 e2 80 08     add  %r3, 0x288, %r8
73220 +
73221 +00000000f80005dc <L80>:
73222 +    f80005dc:  08 96 14 82     sll8, byte   %r18, %r8, %r2
73223 +    f80005e0:  80 00 92 c2     st4  %r2, [ %r8 ]
73224 +    f80005e4:  40 61 40 cd     ld8  [ %sp + 0x140 ], %r13
73225 +    f80005e8:  29 22 80 88     mov  0x229, %r8
73226 +    f80005ec:  03 34 12 88     sll8  %r8, 3, %r8
73227 +    f80005f0:  c8 40 43 c9     ld8  [ %r13 + %r8 ], %r9
73228 +    f80005f4:  03 b4 16 88     sll8  %r26, 3, %r8
73229 +    f80005f8:  c8 40 c2 cf     st8  %r15, [ %r9 + %r8 ]
73230 +    f80005fc:  90 60 85 08     add  %r21, 0x90, %r8
73231 +    f8000600:  80 00 12 c8     ld4  [ %r8 ], %r8
73232 +    f8000604:  80 c0 96 c8     st4  %r8, [ %r27 ]
73233 +
73234 +00000000f8000608 <L38>:
73235 +    f8000608:  a8 60 45 c8     ld8  [ %r21 + 0xa8 ], %r8
73236 +
73237 +00000000f800060c <L79>:
73238 +    f800060c:  08 40 b4 00     cmp  %r17, %r8
73239 +    f8000610:  03 00 40 13     bne  %xcc, f800061c <L66>
73240 +    f8000614:  00 61 84 11     add  %r17, 0x100, %r17
73241 +    f8000618:  a0 60 45 d1     ld8  [ %r21 + 0xa0 ], %r17
73242 +
73243 +00000000f800061c <L66>:
73244 +    f800061c:  98 60 45 c8     ld8  [ %r21 + 0x98 ], %r8
73245 +    f8000620:  00 20 b2 00     cmp  %r8, 0
73246 +    f8000624:  04 00 40 13     bne  %xcc, f8000634 <L83>
73247 +    f8000628:  01 20 87 1c     inc  %r28
73248 +    f800062c:  58 61 40 c9     ld8  [ %sp + 0x158 ], %r9
73249 +    f8000630:  08 60 c2 d1     st8  %r17, [ %r9 + 8 ]
73250 +
73251 +00000000f8000634 <L83>:
73252 +    f8000634:  fe 21 b7 00     cmp  %r28, 0x1fe
73253 +    f8000638:  0f 00 40 35     bg,a   %xcc, f8000674 <L84>
73254 +    f800063c:  98 60 45 cb     ld8  [ %r21 + 0x98 ], %r11
73255 +    f8000640:  00 00 10 88     breaktest  %r8
73256 +    f8000644:  02 20 42 88     and  %r8, 2, %r8
73257 +    f8000648:  00 20 b2 00     cmp  %r8, 0
73258 +    f800064c:  05 00 40 03     be  %xcc, f8000660 <L85>
73259 +    f8000650:  58 61 40 ca     ld8  [ %sp + 0x158 ], %r10
73260 +    f8000654:  18 00 00 40     call  f80006b4 <c_reschedule>
73261 +    f8000658:  60 61 40 c8     ld8  [ %sp + 0x160 ], %r8
73262 +    f800065c:  58 61 40 ca     ld8  [ %sp + 0x158 ], %r10
73263 +
73264 +00000000f8000660 <L85>:
73265 +    f8000660:  c0 80 42 c8     ld8  [ %r10 ], %r8
73266 +    f8000664:  08 40 b4 00     cmp  %r17, %r8
73267 +    f8000668:  dc fe 7f 13     bne  %xcc, f80001d8 <L86>
73268 +    f800066c:  00 60 84 93     mov  %r17, %r19
73269 +
73270 +00000000f8000670 <L21>:
73271 +    f8000670:  98 60 45 cb     ld8  [ %r21 + 0x98 ], %r11
73272 +
73273 +00000000f8000674 <L84>:
73274 +    f8000674:  00 e0 b2 00     cmp  %r11, 0
73275 +    f8000678:  c5 fe 7f 03     be  %xcc, f800018c <L87>
73276 +    f800067c:  1c 00 90 0a     neg  %r28, %r10
73277 +    f8000680:  60 61 40 c8     ld8  [ %sp + 0x160 ], %r8
73278 +    f8000684:  60 60 85 09     add  %r21, 0x60, %r9
73279 +    f8000688:  38 00 00 40     call  f8000768 <c_waitevent_interrupt>
73280 +    f800068c:  e0 3f 80 8a     mov  -32, %r10
73281 +    f8000690:  58 61 40 cb     ld8  [ %sp + 0x158 ], %r11
73282 +    f8000694:  08 e0 c2 d1     st8  %r17, [ %r11 + 8 ]
73283 +    f8000698:  bc fe 7f 10     b  f8000188 <L78>
73284 +    f800069c:  01 20 87 1c     inc  %r28
73285 +    f80006a0:  c0 60 40 c7     ld8  [ %sp + 0xc0 ], %r7
73286 +    f80006a4:  40 60 60 d3     ld64  [ %sp + 0x40 ], %r16
73287 +    f80006a8:  80 60 60 db     ld64  [ %sp + 0x80 ], %r24
73288 +    f80006ac:  08 e0 01 80     retl 
73289 +    f80006b0:  40 61 80 01     add  %sp, 0x140, %sp
73290 +
73291 +00000000f80006b4 <c_reschedule>:
73292 +    f80006b4:  80 7f 80 01     add  %sp, -128, %sp
73293 +    f80006b8:  c0 40 e0 d3     st64  %r16, [ %sp ]
73294 +    f80006bc:  40 60 e0 db     st64  %r24, [ %sp + 0x40 ]
73295 +    f80006c0:  00 20 84 90     mov  %r16, %r16
73296 +    f80006c4:  00 20 86 98     mov  %r24, %r24
73297 +    f80006c8:  00 20 80 80     nop 
73298 +    f80006cc:  00 20 80 80     nop 
73299 +    f80006d0:  00 e0 81 92     mov  %r7, %r18
73300 +    f80006d4:  02 00 00 40     call  f80006dc <c_reschedule+0x28>
73301 +    f80006d8:  00 60 80 91     mov  %sp, %r17
73302 +    f80006dc:  14 e0 81 10     add  %r7, 0x14, %r16
73303 +    f80006e0:  17 20 80 97     mov  0x17, %r23
73304 +    f80006e4:  c0 00 f2 d3     st64suspend  %r16, [ %r8 ]
73305 +    f80006e8:  c0 40 60 d3     ld64  [ %sp ], %r16
73306 +    f80006ec:  40 60 60 db     ld64  [ %sp + 0x40 ], %r24
73307 +    f80006f0:  08 a0 00 80     jmp  %r2 + 8
73308 +    f80006f4:  80 60 80 01     add  %sp, 0x80, %sp
73309 +
73310 +00000000f80006f8 <c_waitevent>:
73311 +    f80006f8:  40 7f 80 01     add  %sp, -192, %sp
73312 +    f80006fc:  40 60 e0 d3     st64  %r16, [ %sp + 0x40 ]
73313 +    f8000700:  80 60 e0 db     st64  %r24, [ %sp + 0x80 ]
73314 +    f8000704:  00 20 84 90     mov  %r16, %r16
73315 +    f8000708:  00 20 86 98     mov  %r24, %r24
73316 +    f800070c:  00 20 80 80     nop 
73317 +    f8000710:  00 20 80 80     nop 
73318 +    f8000714:  00 e0 81 92     mov  %r7, %r18
73319 +    f8000718:  02 00 00 40     call  f8000720 <c_waitevent+0x28>
73320 +    f800071c:  00 60 80 91     mov  %sp, %r17
73321 +    f8000720:  40 e0 81 10     add  %r7, 0x40, %r16
73322 +    f8000724:  c0 40 e0 d1     st32  %r16, [ %sp ]
73323 +    f8000728:  37 20 80 97     mov  0x37, %r23
73324 +    f800072c:  38 60 c0 d7     st8  %r23, [ %sp + 0x38 ]
73325 +    f8000730:  00 20 84 90     mov  %r16, %r16
73326 +    f8000734:  00 e0 85 97     mov  %r23, %r23
73327 +    f8000738:  00 20 80 80     nop 
73328 +    f800073c:  00 20 80 80     nop 
73329 +    f8000740:  1f 60 82 90     or  %r9, 0x1f, %r16
73330 +    f8000744:  20 b4 12 91     sll8  %r10, 0x20, %r17
73331 +    f8000748:  08 63 84 91     or  %r17, 0x308, %r17
73332 +    f800074c:  00 60 80 92     mov  %sp, %r18
73333 +    f8000750:  00 20 82 93     mov  %r8, %r19
73334 +    f8000754:  c0 00 f2 d1     st32suspend  %r16, [ %r8 ]
73335 +    f8000758:  40 60 60 d3     ld64  [ %sp + 0x40 ], %r16
73336 +    f800075c:  80 60 60 db     ld64  [ %sp + 0x80 ], %r24
73337 +    f8000760:  08 a0 00 80     jmp  %r2 + 8
73338 +    f8000764:  c0 60 80 01     add  %sp, 0xc0, %sp
73339 +
73340 +00000000f8000768 <c_waitevent_interrupt>:
73341 +    f8000768:  40 7f 80 01     add  %sp, -192, %sp
73342 +    f800076c:  40 60 e0 d3     st64  %r16, [ %sp + 0x40 ]
73343 +    f8000770:  80 60 e0 db     st64  %r24, [ %sp + 0x80 ]
73344 +    f8000774:  00 20 84 90     mov  %r16, %r16
73345 +    f8000778:  00 20 86 98     mov  %r24, %r24
73346 +    f800077c:  00 20 80 80     nop 
73347 +    f8000780:  00 20 80 80     nop 
73348 +    f8000784:  00 e0 81 92     mov  %r7, %r18
73349 +    f8000788:  02 00 00 40     call  f8000790 <c_waitevent_interrupt+0x28>
73350 +    f800078c:  00 60 80 91     mov  %sp, %r17
73351 +    f8000790:  54 e0 81 10     add  %r7, 0x54, %r16
73352 +    f8000794:  c0 40 e0 d1     st32  %r16, [ %sp ]
73353 +    f8000798:  37 20 80 97     mov  0x37, %r23
73354 +    f800079c:  38 60 c0 d7     st8  %r23, [ %sp + 0x38 ]
73355 +    f80007a0:  00 20 84 90     mov  %r16, %r16
73356 +    f80007a4:  00 e0 85 97     mov  %r23, %r23
73357 +    f80007a8:  00 20 80 80     nop 
73358 +    f80007ac:  00 20 80 80     nop 
73359 +    f80007b0:  1f 60 82 90     or  %r9, 0x1f, %r16
73360 +    f80007b4:  20 b4 12 91     sll8  %r10, 0x20, %r17
73361 +    f80007b8:  08 63 84 91     or  %r17, 0x308, %r17
73362 +    f80007bc:  00 60 80 92     mov  %sp, %r18
73363 +    f80007c0:  00 20 82 93     mov  %r8, %r19
73364 +    f80007c4:  0e f4 12 94     sll8  %r11, 0xe, %r20
73365 +    f80007c8:  0d 20 85 94     or  %r20, 0xd, %r20
73366 +    f80007cc:  17 20 80 95     mov  0x17, %r21
73367 +    f80007d0:  17 20 80 96     mov  0x17, %r22
73368 +    f80007d4:  17 20 80 97     mov  0x17, %r23
73369 +    f80007d8:  c0 00 f2 d3     st64suspend  %r16, [ %r8 ]
73370 +    f80007dc:  40 60 60 d3     ld64  [ %sp + 0x40 ], %r16
73371 +    f80007e0:  80 60 60 db     ld64  [ %sp + 0x80 ], %r24
73372 +    f80007e4:  08 a0 00 80     jmp  %r2 + 8
73373 +    f80007e8:  c0 60 80 01     add  %sp, 0xc0, %sp
73374 +
73375 +00000000f80007ec <c_queue_rxd>:
73376 +    f80007ec:  01 20 80 97     mov  1, %r23
73377 +    f80007f0:  b0 e3 c0 d7     st8  %r23, [ %r3 + 0x3b0 ]
73378 +    f80007f4:  80 a0 60 d2     ld16  [ %r2 + 0x80 ], %r18
73379 +    f80007f8:  98 e2 80 04     add  %r3, 0x298, %r4
73380 +    f80007fc:  98 e2 c0 c0     st8  %r0, [ %r3 + 0x298 ]
73381 +    f8000800:  80 a0 c0 c4     st8  %r4, [ %r2 + 0x80 ]
73382 +    f8000804:  c0 80 c4 c3     st8  %r3, [ %r18 ]
73383 +    f8000808:  01 e0 b4 00     cmp  %r19, 1
73384 +    f800080c:  06 00 40 02     be  f8000824 <c_queue_rxd+0x38>
73385 +    f8000810:  00 a0 84 90     mov  %r18, %r16
73386 +    f8000814:  00 e0 80 97     mov  %r3, %r23
73387 +    f8000818:  02 20 80 97     mov  2, %r23
73388 +    f800081c:  b0 e3 c0 d7     st8  %r23, [ %r3 + 0x3b0 ]
73389 +    f8000820:  d8 e2 d0 d0     st8suspend  %r16, [ %r3 + 0x2d8 ]
73390 +    f8000824:  d8 e2 c0 d0     st8  %r16, [ %r3 + 0x2d8 ]
73391 +    f8000828:  03 20 80 97     mov  3, %r23
73392 +    f800082c:  b0 e3 c0 d7     st8  %r23, [ %r3 + 0x3b0 ]
73393 +    f8000830:  00 e0 80 88     mov  %r3, %r8
73394 +    f8000834:  0e 00 40 10     b  f800086c <.epcomms_resume_thread>
73395 +    f8000838:  00 a0 60 c3     ld64  [ %r2 ], %r0
73396 +
73397 +00000000f800083c <c_stall_thread>:
73398 +    f800083c:  40 7f 80 01     add  %sp, -192, %sp
73399 +    f8000840:  40 60 e0 d3     st64  %r16, [ %sp + 0x40 ]
73400 +    f8000844:  80 60 e0 db     st64  %r24, [ %sp + 0x80 ]
73401 +    f8000848:  00 20 84 90     mov  %r16, %r16
73402 +    f800084c:  00 20 86 98     mov  %r24, %r24
73403 +    f8000850:  00 20 80 80     nop 
73404 +    f8000854:  00 20 80 80     nop 
73405 +    f8000858:  01 20 80 89     mov  1, %r9
73406 +    f800085c:  88 20 c2 c9     st8  %r9, [ %r8 + 0x88 ]
73407 +    f8000860:  00 60 80 91     mov  %sp, %r17
73408 +    f8000864:  00 e0 81 97     mov  %r7, %r23
73409 +    f8000868:  00 20 f2 d3     st64suspend  %r16, [ %r8 ]
73410 +
73411 +00000000f800086c <.epcomms_resume_thread>:
73412 +    f800086c:  40 60 60 d3     ld64  [ %sp + 0x40 ], %r16
73413 +    f8000870:  80 60 60 db     ld64  [ %sp + 0x80 ], %r24
73414 +    f8000874:  08 e0 01 80     retl 
73415 +    f8000878:  c0 60 80 01     add  %sp, 0xc0, %sp
73416 +Disassembly of section .data:
73417 diff -urN clean/drivers/net/qsnet/jtag/jtagdrv.c linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.c
73418 --- clean/drivers/net/qsnet/jtag/jtagdrv.c      1969-12-31 19:00:00.000000000 -0500
73419 +++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.c        2003-06-07 12:02:35.000000000 -0400
73420 @@ -0,0 +1,451 @@
73421 +/*
73422 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
73423 + *
73424 + *    For licensing information please see the supplied COPYING file
73425 + *
73426 + */
73427 +
73428 +#ident "@(#)$Id: jtagdrv.c,v 1.12 2003/06/07 16:02:35 david Exp $"
73429 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv.c,v $*/
73430 +
73431 +#include <qsnet/types.h>
73432 +
73433 +#include "jtagdrv.h"
73434 +#include <jtag/jtagio.h>
73435 +
73436 +int
73437 +jtagdrv_strobe_data (JTAG_DEV *dev, u_char data)
73438 +{
73439 +    u_char dsr;
73440 +
73441 +    PRINTF (DBG_ECPP, ("jtagdrv_strobe_data: %s %s %s -> ", (data & LPT_DATA_TRST) ? "TRST" : "trst", 
73442 +                      (data & LPT_DATA_TDI) ? "TDI" : "tdi", (data & LPT_DATA_TMS) ? "TMS" : "tms"));
73443 +
73444 +
73445 +    LPT_WRITE_DATA (dev, data); DELAY(5);                      /* Drive NEW values on data wires */
73446 +    LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(5);             /* Drive strobe low */
73447 +    LPT_READ_STAT  (dev, dsr); DELAY(5);                       /* Sample TDI from ring */
73448 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe high */
73449 +
73450 +    PRINTF (DBG_ECPP, ("%s\n", (dsr & LPT_STAT_PE) ? "TDO" : "tdo"));
73451 +
73452 +    return ((dsr & LPT_STAT_PE) ? 1 : 0);
73453 +}
73454 +
73455 +void
73456 +jtagdrv_select_ring (JTAG_DEV *dev, u_int ring)
73457 +{
73458 +    PRINTF (DBG_ECPP, ("jtagdrv_select_ring: ring=0x%x\n", ring));
73459 +
73460 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe and TCLK high */
73461 +    LPT_WRITE_DATA (dev, ring);        DELAY(5);                       /* Drive ring address */
73462 +    LPT_WRITE_CTRL (dev, LPT_CTRL_RCLK); DELAY(5);             /* Drive strobe low */
73463 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* Drive strobe high */
73464 +}
73465 +
73466 +void
73467 +jtagdrv_reset (JTAG_DEV *dev)
73468 +{
73469 +    register int i;
73470 +
73471 +    for (i = 0; i < 5; i++)
73472 +       jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                /* 5 clocks to Reset from any state */
73473 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
73474 +}
73475 +
73476 +void
73477 +jtagdrv_shift_ir (JTAG_DEV *dev, u_char *value, int nbits)
73478 +{
73479 +    register int i;
73480 +    register int bit;
73481 +
73482 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select DR-Scan */
73483 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select IR-Scan */
73484 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Capture-IR */
73485 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Shift-IR */
73486 +    
73487 +    for (i = 0; i < nbits; i++)
73488 +    {
73489 +       /* strobe through the instruction bits,  asserting TMS on the last bit */
73490 +
73491 +       if (i == (nbits-1))
73492 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
73493 +       else
73494 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
73495 +       
73496 +       if (bit)
73497 +           JTAG_SET_BIT(value, i);
73498 +       else
73499 +           JTAG_CLR_BIT(value, i);
73500 +    }
73501 +    
73502 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Update-IR */
73503 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
73504 +}
73505 +
73506 +
73507 +void
73508 +jtagdrv_shift_dr (JTAG_DEV *dev, u_char *value, int nbits)
73509 +{
73510 +    register int i;
73511 +    register int bit;
73512 +
73513 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Select DR-Scan */
73514 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Capture-DR */
73515 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Shift-DR */
73516 +    
73517 +    for (i = 0; i < nbits; i++)
73518 +    {
73519 +       /* strobe through the data bits,  asserting TMS on the last bit */
73520 +
73521 +       if (i == (nbits-1))
73522 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
73523 +       else
73524 +           bit = jtagdrv_strobe_data (dev, LPT_DATA_TRST | (JTAG_BIT(value, i) ? LPT_DATA_TDI : 0));
73525 +       
73526 +       if (bit)
73527 +           JTAG_SET_BIT(value, i);
73528 +       else
73529 +           JTAG_CLR_BIT(value, i);
73530 +    }
73531 +    
73532 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST | LPT_DATA_TMS);                   /* to Update-DR */
73533 +    jtagdrv_strobe_data (dev, LPT_DATA_TRST);                                  /* to Run-Test/Idle */
73534 +}
73535 +
73536 +static int
73537 +jtagdrv_i2c_start (JTAG_DEV *dev)
73538 +{
73539 +    u_char dsr;
73540 +    int i;
73541 +
73542 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_start\n"));
73543 +    
73544 +    /* Issue a stop sequence */
73545 +    LPT_WRITE_CTRL (dev,  LPT_CTRL_SCLK); DELAY(1);            /* SCLK low */
73546 +    LPT_WRITE_DATA (dev, 0); DELAY(5);                         /* SDA low */
73547 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* SCLK high */
73548 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);              /* SDA high */
73549 +    
73550 +    /* sample the line to see if we're idle */
73551 +    LPT_READ_STAT (dev, dsr);                                  /* sample SDA */
73552 +    if ((dsr & LPT_STAT_SDA) == 0)                             /* Cannot start if SDA already driven */
73553 +    {
73554 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_start: cannot start - sda driven low\n"));
73555 +
73556 +       for (i = 0; i < 16 ; i++)
73557 +       {
73558 +           LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5);      /* SCLK low */
73559 +           LPT_WRITE_CTRL (dev, 0); DELAY(5);                  /* SCLK high */
73560 +           LPT_READ_STAT  (dev, dsr);
73561 +           
73562 +           if (dsr & LPT_STAT_SDA)
73563 +           {
73564 +               PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - stopped after %d clocks\n", i));
73565 +               break;
73566 +           }
73567 +       }
73568 +
73569 +       if ((dsr & LPT_STAT_SDA) == 0)
73570 +       {
73571 +           PRINTF (DBG_ECPP, ("jtagdrv_i2c_start - cannot start - not idle\n"));
73572 +           return (0);
73573 +       }
73574 +
73575 +       /* seen SDA float high, so issue a stop sequence */
73576 +       LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);          /* SCLK low */
73577 +       LPT_WRITE_DATA (dev, 0); DELAY(5);                      /* SDA low */
73578 +       LPT_WRITE_CTRL (dev, 0); DELAY(5);                      /* SCLK high */
73579 +       LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);           /* SDA high */
73580 +    }
73581 +
73582 +    LPT_WRITE_DATA (dev, 0); DELAY(4);                         /* drive SDA low */
73583 +    return (1);
73584 +}
73585 +
73586 +static void
73587 +jtagdrv_i2c_stop (JTAG_DEV *dev)
73588 +{
73589 +    u_char dsr;
73590 +    int    i;
73591 +
73592 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop\n"));
73593 +
73594 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
73595 +    LPT_WRITE_DATA (dev, 0); DELAY(5);                         /* SDA low */
73596 +    LPT_WRITE_CTRL (dev, 0); DELAY(5);                         /* SCLK high */
73597 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA); DELAY(5);              /* SDA high */
73598 +
73599 +    /* 
73600 +     * bug fix for temperature sensor chip
73601 +     * if it's still driving SDA, then clock
73602 +     * it until it stops driving it 
73603 +     */
73604 +    LPT_READ_STAT (dev, dsr);
73605 +    if ((dsr & LPT_STAT_SDA) == 0)
73606 +    {
73607 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - slave not stodeved\n"));
73608 +       for (i = 0; i < 16 ; i++)
73609 +       {
73610 +           LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(5);      /* SCLK low */
73611 +           LPT_WRITE_CTRL (dev, 0); DELAY(5);                  /* SCLK high */
73612 +           LPT_READ_STAT  (dev, dsr);
73613 +           
73614 +           if (dsr & LPT_STAT_SDA)
73615 +               break;
73616 +       }
73617 +       PRINTF (DBG_ECPP, ("jtagdrv_i2c_stop - stodeved after %d clocks\n", i));
73618 +    }
73619 +}
73620 +
73621 +static int
73622 +jtagdrv_i2c_strobe (JTAG_DEV *dev, u_char data)
73623 +{
73624 +    u_char dsr;
73625 +    
73626 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_strobe : %s", (data & LPT_DATA_SDA) ? "SDA" : "sda"));
73627 +
73628 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
73629 +    LPT_WRITE_DATA (dev, data);        DELAY(5);                       /* write data */
73630 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
73631 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA */
73632 +
73633 +    PRINTF (DBG_ECPP, (" -> %s\n", (dsr & LPT_STAT_SDA) ? "SDA" : "sda"));
73634 +
73635 +    return ((dsr & LPT_STAT_SDA) ? 1 : 0);
73636 +}
73637 +
73638 +static int
73639 +jtagdrv_i2c_get_ack (JTAG_DEV *dev)
73640 +{
73641 +    u_char dsr;
73642 +
73643 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
73644 +    LPT_WRITE_DATA (dev, LPT_DATA_SDA);        DELAY(5);               /* SDA high */
73645 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
73646 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA */
73647 +
73648 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_get_ack -> %s\n", (dsr & LPT_STAT_SDA) ? "no ack" : "ack"));
73649 +    
73650 +    return ((dsr & LPT_STAT_SDA) ? 0 : 1);
73651 +}
73652 +
73653 +static int
73654 +jtagdrv_i2c_drive_ack (JTAG_DEV *dev, int nack)
73655 +{
73656 +    u_char dsr;
73657 +
73658 +    LPT_WRITE_CTRL (dev, LPT_CTRL_SCLK); DELAY(1);             /* SCLK low */
73659 +    LPT_WRITE_DATA (dev, nack ? LPT_DATA_SDA : 0); DELAY(5);   /* SDA low for ack, high for nack */
73660 +    LPT_WRITE_CTRL (dev, 0);                                   /* SCLK high */
73661 +    LPT_READ_STAT  (dev, dsr); DELAY(4);                       /* Sample SDA for ack */
73662 +
73663 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_drive_ack %d -> %s\n", nack, (dsr & LPT_STAT_SDA) ? "done" : "more"));
73664 +    
73665 +    return ((dsr & LPT_STAT_SDA) ? 1 : 0);
73666 +}
73667 +
73668 +static void
73669 +jtagdrv_i2c_shift_addr (JTAG_DEV *dev, u_int address, int readNotWrite)
73670 +{
73671 +    register int i;
73672 +
73673 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_addr: %x\n", address));
73674 +
73675 +    for (i = I2C_ADDR_LEN-1; i >= 0; i--)
73676 +       jtagdrv_i2c_strobe (dev, (address & (1 << i)) ? LPT_DATA_SDA : 0);
73677 +    
73678 +    jtagdrv_i2c_strobe (dev, readNotWrite ? LPT_DATA_SDA : 0);
73679 +}
73680 +
73681 +static u_char
73682 +jtagdrv_i2c_shift_data (JTAG_DEV *dev, u_char data)
73683 +{
73684 +    register int i;
73685 +    u_char val = 0;
73686 +    
73687 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : %02x\n", data));
73688 +
73689 +    for (i = I2C_DATA_LEN-1; i >= 0; i--)
73690 +       if (jtagdrv_i2c_strobe (dev, data & (1 << i) ? LPT_DATA_SDA : 0))
73691 +           val |= (1 << i);
73692 +
73693 +    PRINTF (DBG_ECPP, ("jtagdrv_i2c_shift_data : -> %02x\n", val));
73694 +
73695 +    return (val);
73696 +}
73697 +
73698 +int
73699 +jtagdrv_i2c_write (JTAG_DEV *dev, u_int address, u_int count, u_char *data)
73700 +{
73701 +    register int i;
73702 +
73703 +    PRINTF (DBG_FN, ("jtagdrv_i2c_write: address=%x count=%d data=%02x\n", address, count, data[0]));
73704 +
73705 +    if (! jtagdrv_i2c_start (dev))
73706 +       return (I2C_OP_NOT_IDLE);
73707 +
73708 +    jtagdrv_i2c_shift_addr (dev, address, 0);
73709 +    
73710 +    if (! jtagdrv_i2c_get_ack (dev))
73711 +    {
73712 +       PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on address phase\n"));
73713 +
73714 +       jtagdrv_i2c_stop (dev);
73715 +       return (I2C_OP_NO_DEVICE);
73716 +    }
73717 +    
73718 +    for (i = 0; i < count; i++)
73719 +    {
73720 +       jtagdrv_i2c_shift_data (dev, data[i]);
73721 +       
73722 +       if (! jtagdrv_i2c_get_ack (dev))
73723 +       {
73724 +           PRINTF (DBG_FN, ("jtagdrv_i2c_write: no ack on data phase %d\n", i));
73725 +
73726 +           jtagdrv_i2c_stop (dev);
73727 +           return (I2C_OP_WRITE_TO_BIG);
73728 +       }
73729 +    }
73730 +
73731 +    jtagdrv_i2c_stop (dev);
73732 +    return (I2C_OP_SUCCESS);
73733 +}
73734 +
73735 +int
73736 +jtagdrv_i2c_read (JTAG_DEV *dev, u_int address, u_int count, u_char *data)
73737 +{
73738 +    register int i;
73739 +
73740 +    PRINTF (DBG_FN, ("jtagdrv_i2c_read: address=%x count=%d\n", address, count));
73741 +
73742 +    if (! jtagdrv_i2c_start (dev))
73743 +       return (I2C_OP_NOT_IDLE);
73744 +
73745 +    jtagdrv_i2c_shift_addr (dev, address, 1);
73746 +    
73747 +    if (! jtagdrv_i2c_get_ack (dev))
73748 +    {
73749 +       PRINTF (DBG_FN, ("jtagdrv_i2c_read: no ack on address phase\n"));
73750 +
73751 +       jtagdrv_i2c_stop (dev);
73752 +       return (I2C_OP_NO_DEVICE);
73753 +    }
73754 +    
73755 +    for (i = 0; i < count; i++)
73756 +    {
73757 +       data[i] = jtagdrv_i2c_shift_data (dev, 0xff);
73758 +
73759 +       jtagdrv_i2c_drive_ack (dev, (i == (count-1) ? 1 : 0));
73760 +    }
73761 +
73762 +    jtagdrv_i2c_stop (dev);
73763 +    
73764 +    return (I2C_OP_SUCCESS);
73765 +}
73766 +
73767 +int
73768 +jtagdrv_i2c_writereg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data)
73769 +{
73770 +    register int i;
73771 +
73772 +    PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: address=%x count=%d\n", address, count));
73773 +
73774 +    if (! jtagdrv_i2c_start (dev))
73775 +       return (I2C_OP_NOT_IDLE);
73776 +
73777 +    jtagdrv_i2c_shift_addr (dev, address, 0);
73778 +    
73779 +    if (! jtagdrv_i2c_get_ack (dev))
73780 +    {
73781 +       PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on address phase\n"));
73782 +
73783 +       jtagdrv_i2c_stop (dev);
73784 +       return (I2C_OP_NO_DEVICE);
73785 +    }
73786 +    
73787 +    jtagdrv_i2c_shift_data (dev, intaddress);
73788 +    
73789 +    if (! jtagdrv_i2c_get_ack (dev))
73790 +    {
73791 +       PRINTF (DBG_FN, ("jtagdrv_i2c_writereg: no ack on intaddress phase\n"));
73792 +       jtagdrv_i2c_stop (dev);
73793 +       return (I2C_OP_NO_DEVICE);
73794 +    }
73795 +    
73796 +    for (i = 0; i < count; i++)
73797 +    {
73798 +       jtagdrv_i2c_shift_data (dev, data[i]);
73799 +       if (! jtagdrv_i2c_get_ack (dev))
73800 +       {
73801 +           PRINTF (DBG_FN, ("jtagdrv_i2c_writedate: no ack on byte %d\n", i));
73802 +           jtagdrv_i2c_stop (dev);
73803 +           return (I2C_OP_WRITE_TO_BIG);
73804 +       }
73805 +    }
73806 +    
73807 +    jtagdrv_i2c_stop (dev);
73808 +    return (I2C_OP_SUCCESS);
73809 +}
73810 +
73811 +int
73812 +jtagdrv_i2c_readreg (JTAG_DEV *dev, u_int address, u_int intaddress, u_int count, u_char *data)
73813 +{
73814 +    PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: address=%x count=%d\n", address, count));
73815 +
73816 +    if (! jtagdrv_i2c_start (dev))
73817 +       return (I2C_OP_NOT_IDLE);
73818 +
73819 +    jtagdrv_i2c_shift_addr (dev, address, 0);
73820 +    
73821 +    if (! jtagdrv_i2c_get_ack (dev))
73822 +    {
73823 +       PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on address phase\n"));
73824 +
73825 +       jtagdrv_i2c_stop (dev);
73826 +       return (I2C_OP_NO_DEVICE);
73827 +    }
73828 +    
73829 +    jtagdrv_i2c_shift_data (dev, intaddress);
73830 +    
73831 +    if (! jtagdrv_i2c_get_ack (dev))
73832 +    {
73833 +       PRINTF (DBG_FN, ("jtagdrv_i2c_readreg: no ack on intaddress phase\n"));
73834 +       jtagdrv_i2c_stop (dev);
73835 +       return (I2C_OP_NO_DEVICE);
73836 +    }
73837 +
73838 +    jtagdrv_i2c_stop (dev);
73839 +
73840 +    return (jtagdrv_i2c_read (dev, address, count, data));
73841 +}
73842 +
73843 +void
73844 +jtagdrv_i2c_clock_shift (JTAG_DEV *dev, u_int t, u_int n, u_int m)
73845 +{
73846 +    int i;
73847 +
73848 +    for (i = 2; i >= 0; i--)
73849 +    {
73850 +       LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
73851 +       LPT_WRITE_DATA (dev, ((t & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1);     /* clock high | data */
73852 +    }
73853 +
73854 +    for (i = 1; i >= 0; i--)
73855 +    {
73856 +       LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
73857 +       LPT_WRITE_DATA (dev, ((n & (1 << i)) ? LPT_DATA_TDI : 0)| LPT_DATA_TMS); DELAY(1);      /* clock high | data */
73858 +    }    
73859 +
73860 +    for (i = 6; i >= 0; i--)
73861 +    {
73862 +       LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0)); DELAY(1);                    /* clock low  | data */
73863 +       LPT_WRITE_DATA (dev, ((m & (1 << i)) ? LPT_DATA_TDI : 0) | LPT_DATA_TMS); DELAY(1);     /* clock high | data */
73864 +    }    
73865 +
73866 +    LPT_WRITE_DATA (dev, 0); DELAY(1);                                                         /* clock low  | 0 */
73867 +
73868 +    LPT_WRITE_CTRL (dev, LPT_CTRL_TCLK); DELAY(1);                                             /* strobe low */
73869 +    LPT_WRITE_CTRL (dev, 0); DELAY(1);                                                         /* strobe low */
73870 +}
73871 +
73872 diff -urN clean/drivers/net/qsnet/jtag/jtagdrv.h linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.h
73873 --- clean/drivers/net/qsnet/jtag/jtagdrv.h      1969-12-31 19:00:00.000000000 -0500
73874 +++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv.h        2002-08-09 07:18:37.000000000 -0400
73875 @@ -0,0 +1,57 @@
73876 +/*
73877 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
73878 + *
73879 + *    For licensing information please see the supplied COPYING file
73880 + *
73881 + */
73882 +
73883 +#ifndef __JTAGDRV_COMMON_H
73884 +#define __JTAGDRV_COMMON_H
73885 +
73886 +#ident "@(#)$Id: jtagdrv.h,v 1.5 2002/08/09 11:18:37 addy Exp $"
73887 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv.h,v $*/
73888 +
73889 +#include <qsnet/config.h>
73890 +
73891 +/* include OS specific header file */
73892 +#if defined(LINUX)
73893 +#  include "jtagdrv_Linux.h"
73894 +#elif defined(DIGITAL_UNIX)
73895 +#  include "jtagdrv_OSF1.h"
73896 +#elif defined(QNX)
73897 +#  include "jtagdrv_QNX.h"
73898 +#else
73899 +#  error cannot determint os type
73900 +#endif
73901 +
73902 +extern int jtagdebug;
73903 +
73904 +#define DBG_CFG                (1 << 0)
73905 +#define DBG_OPEN       (1 << 1)
73906 +#define DBG_IOCTL      (1 << 2)
73907 +#define DBG_ECPP       (1 << 3)
73908 +#define DBG_FN         (1 << 4)
73909 +
73910 +#define DRIVER_NAME    "jtag"
73911 +
73912 +#if defined(LINUX)
73913 +#define PRINTF(n,X)    ((n) & jtagdebug ? (void) printk X : (void) 0)
73914 +#define PRINTMSG(fmt, arg...) printk(KERN_INFO DRIVER_NAME ": " fmt, ##arg)
73915 +#else
73916 +#define PRINTF(n,X)    ((n) & jtagdebug ? (void) printf X : (void) 0)
73917 +#define PRINTMSG(M, A) printf ("jtag: " M, A)
73918 +#endif
73919 +
73920 +extern void jtagdrv_select_ring (JTAG_DEV *pp, u_int ring);
73921 +extern void jtagdrv_reset (JTAG_DEV *pp);
73922 +extern void jtagdrv_shift_ir (JTAG_DEV *pp, u_char *value, int nbits);
73923 +extern void jtagdrv_shift_dr (JTAG_DEV *pp, u_char *value, int nbits);
73924 +
73925 +extern int  jtagdrv_i2c_write (JTAG_DEV *pp, u_int address, u_int count, u_char *data);
73926 +extern int  jtagdrv_i2c_read (JTAG_DEV *pp, u_int address, u_int count, u_char *data);
73927 +extern int  jtagdrv_i2c_writereg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data);
73928 +extern int  jtagdrv_i2c_readreg (JTAG_DEV *pp, u_int address, u_int intaddress, u_int count, u_char *data);
73929 +extern void jtagdrv_i2c_clock_shift (JTAG_DEV *pp, u_int t, u_int n, u_int m);
73930 +
73931 +
73932 +#endif /* __JTAGDRV_COMMON_H */
73933 diff -urN clean/drivers/net/qsnet/jtag/jtagdrv_Linux.c linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.c
73934 --- clean/drivers/net/qsnet/jtag/jtagdrv_Linux.c        1969-12-31 19:00:00.000000000 -0500
73935 +++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.c  2005-09-07 10:35:03.000000000 -0400
73936 @@ -0,0 +1,326 @@
73937 +/*
73938 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
73939 + *
73940 + *    For licensing information please see the supplied COPYING file
73941 + *
73942 + */
73943 +
73944 +/*
73945 + * $Id: jtagdrv_Linux.c,v 1.19.2.3 2005/09/07 14:35:03 mike Exp $
73946 + * $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.c,v $
73947 + */
73948 +
73949 +#include "jtagdrv.h"
73950 +#include <linux/slab.h>
73951 +#include <jtag/jtagio.h>
73952 +
73953 +#include <qsnet/module.h>
73954 +#include <linux/ioport.h>
73955 +
73956 +MODULE_AUTHOR("Quadrics Ltd.");
73957 +MODULE_DESCRIPTION("JTAG Parallel port QsNet switch interface");
73958 +
73959 +MODULE_LICENSE("GPL");
73960 +
73961 +#define MAJOR_INSTANCE 0       /* 0 is dynamic assign of device major  */ 
73962 +#define MAX_JTAG_DEV   4
73963 +
73964 +int jtag_major = MAJOR_INSTANCE;
73965 +int jtagdebug  = 0;
73966 +module_param(jtag_major, uint, 0);
73967 +module_param(jtagdebug, uint, 0);
73968 +
73969 +JTAG_DEV       jtag_devs[MAX_JTAG_DEV];
73970 +
73971 +int io[MAX_JTAG_DEV]= { 0, };
73972 +MODULE_PARM(io, "1-4i");
73973 +
73974 +
73975 +/* The fops functions */
73976 +int jtag_open(struct inode *, struct file *);
73977 +int jtag_close(struct inode *, struct file *);
73978 +int jtag_ioctl(struct inode *, struct file *, unsigned int, unsigned long );
73979 +
73980 +struct file_operations jtag_fops = {
73981 +    ioctl:   jtag_ioctl,
73982 +    open:    jtag_open,
73983 +    release: jtag_close,
73984 +};
73985 +
73986 +int
73987 +jtag_probe(void)
73988 +{
73989 +       int i=0;        
73990 +       int default_io = 1;
73991 +       JTAG_DEV *dev;
73992 +       unsigned char value=0xff;
73993 +
73994 +
73995 +       /* see if there are any user supplied io addr */
73996 +       for ( i = 0; i < MAX_JTAG_DEV; i++) {
73997 +               if ( io[i] != 0x00)
73998 +                       default_io = 0;
73999 +               jtag_devs[i].base = io[i];
74000 +       }
74001 +       
74002 +       if ( default_io ) {
74003 +               jtag_devs[0].base = 0x3bc;
74004 +               jtag_devs[1].base = 0x378;
74005 +               jtag_devs[2].base = 0x278;
74006 +               jtag_devs[3].base = 0x268;
74007 +       }
74008 +
74009 +       for ( i = 0 ; i < MAX_JTAG_DEV; i++) {
74010 +               if ( jtag_devs[i].base == 0x3bc ) 
74011 +                       jtag_devs[i].region = 3;
74012 +               else
74013 +                       jtag_devs[i].region = 8;
74014 +               jtag_devs[i].present = 0;
74015 +       }       
74016 +
74017 +
74018 +       if( default_io )
74019 +       {
74020 +               for( i = 0 ; i < MAX_JTAG_DEV; i++) {
74021 +                       dev=&(jtag_devs[i]);
74022 +                       if(dev->base && request_region(dev->base, dev->region, "jtag")) {
74023 +                               LPT_WRITE(dev, 0,0);
74024 +                               LPT_READ(dev, 0,value);
74025 +                               if ( value != 0xff) {
74026 +                                       PRINTMSG("(%d , %d) present, io=0x%04lx\n",jtag_major,i,dev->base);
74027 +                       
74028 +                                       dev->present=1; 
74029 +                               }
74030 +                               else
74031 +                                   release_region(dev->base, dev->region);
74032 +                       }
74033 +                       else
74034 +                       {
74035 +                           PRINTMSG("failed to request_region (%d , %d), io=0x%04lx\n",jtag_major,i,dev->base);
74036 +                           return -1;
74037 +                       }
74038 +               }
74039 +               return 0;
74040 +       }     
74041 +       else /* Force the region to be present, this makes the PCI parallel cards work */
74042 +       {
74043 +               for( i = 0 ; i < MAX_JTAG_DEV; i++) 
74044 +               {
74045 +                        dev=&(jtag_devs[i]);
74046 +                        if(dev->base && request_region(dev->base, dev->region, "jtag") && (dev->base != 0)) 
74047 +                       {
74048 +                                PRINTMSG("(%d , %d) forced by user, io=0x%04lx\n",jtag_major,i,dev->base);
74049 +                                        dev->present=1;
74050 +                       }       
74051 +                        else   
74052 +                       {
74053 +                                if( dev->base != 0)
74054 +                                       release_region(dev->base, dev->region);
74055 +                       }
74056 +               }
74057 +                return 0;
74058 +       }
74059 +}
74060 +
74061 +int init_module(void)
74062 +{
74063 +       int result,i;
74064 +       result = register_chrdev(jtag_major, DRIVER_NAME, &jtag_fops);
74065 +       if (result < 0) {
74066 +               PRINTMSG("Couldn't register char device err == %d\n",jtag_major);
74067 +               return -1;
74068 +       }
74069 +
74070 +       if ( jtag_major == 0 ) 
74071 +               jtag_major = result;
74072 +
74073 +       for ( i = 0; i < MAX_JTAG_DEV; i++) {
74074 +               jtag_devs[i].base=io[i];        
74075 +       }
74076 +
74077 +       jtag_probe();
74078 +
74079 +       PRINTMSG("Registered character device, major == %d\n",jtag_major);      
74080 +       return 0;
74081 +}      
74082 +
74083 +void cleanup_module(void)
74084 +{
74085 +       int i=0;
74086 +
74087 +       for( i = 0; i < MAX_JTAG_DEV; i++) {
74088 +               if( jtag_devs[i].present)       
74089 +                       release_region(jtag_devs[i].base, jtag_devs[i].region);
74090 +       }
74091 +                       
74092 +       unregister_chrdev(jtag_major, DRIVER_NAME);
74093 +       PRINTMSG("Unloaded char device\n");
74094 +}
74095 +
74096 +
74097 +int
74098 +jtag_open (struct inode *inode, struct file *filp)
74099 +{
74100 +    int unit = MINOR(inode->i_rdev);
74101 +    JTAG_DEV *dev = &jtag_devs[unit];
74102 +    
74103 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
74104 +       return (-ENXIO);
74105 +
74106 +    /*
74107 +     * Only allow a single open at a time 
74108 +     */
74109 +    if (dev->open)
74110 +       return (-EBUSY);
74111 +    dev->open = 1;
74112 +    
74113 +    /*
74114 +     * Initialise the hardware registers
74115 +     */
74116 +   
74117 +    LPT_WRITE (dev, LPT_CTRL, 0);
74118 +    DELAY(50);
74119 +    LPT_WRITE (dev, LPT_CTRL, LPT_CTRL_INIT);
74120 +
74121 +    MOD_INC_USE_COUNT;
74122 +
74123 +    return (0);
74124 +}
74125 +
74126 +int
74127 +jtag_close(struct inode *inode, struct file *filp)
74128 +{
74129 +  
74130 +    int unit = MINOR(inode->i_rdev);
74131 +    JTAG_DEV *dev = &jtag_devs[unit];
74132 +    
74133 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
74134 +       return (-ENXIO);
74135 +    
74136 +    dev->open = 0;
74137 +
74138 +    MOD_DEC_USE_COUNT;
74139 +
74140 +    return (0);
74141 +}
74142 +
74143 +int
74144 +jtag_ioctl (struct inode *inode, struct file *filp, unsigned int io_cmd, unsigned long io_data)
74145 +{
74146 +    int                  unit = MINOR(inode->i_rdev);
74147 +    JTAG_DEV             *dev = &jtag_devs[unit];
74148 +    JTAG_RESET_ARGS      *resetargs;
74149 +    JTAG_SHIFT_ARGS      *shiftargs;
74150 +    I2C_ARGS            *i2cargs;
74151 +    I2C_CLOCK_SHIFT_ARGS *clockargs;
74152 +    u_char              *buf;
74153 +    int                          freq;
74154 +
74155 +    if (unit < 0 || unit > MAX_JTAG_DEV || !dev->present)
74156 +       return (-ENXIO);
74157 +    
74158 +    PRINTF (DBG_IOCTL, ("jtag_ioctl: device %d cmd=%x\n", unit, io_cmd));
74159 +
74160 +    switch (io_cmd)
74161 +    {
74162 +    case JTAG_RESET:
74163 +       resetargs = (JTAG_RESET_ARGS *) io_data;
74164 +
74165 +       if (! VALID_JTAG_RING (resetargs->ring))
74166 +           return (-EINVAL);
74167 +       
74168 +       jtagdrv_select_ring (dev, resetargs->ring);
74169 +       jtagdrv_reset (dev);
74170 +       return (0);
74171 +       
74172 +    case JTAG_SHIFT_IR:
74173 +    case JTAG_SHIFT_DR:
74174 +       shiftargs = (JTAG_SHIFT_ARGS *) io_data;
74175 +       
74176 +       if (! VALID_JTAG_RING (shiftargs->ring) || shiftargs->nbits > (JTAG_MAX_DATA_LEN*JTAG_MAX_CHIPS)) {
74177 +           return (-EFAULT);
74178 +               }
74179 +
74180 +       buf = (u_char *) kmalloc (JTAG_NBYTES(shiftargs->nbits), GFP_KERNEL);
74181 +
74182 +       if (buf == (u_char *) NULL)
74183 +           return (-ENOMEM);
74184 +       
74185 +       if (copy_from_user (buf, shiftargs->value, JTAG_NBYTES(shiftargs->nbits)))
74186 +       {
74187 +           kfree(buf);
74188 +           return (-EFAULT);
74189 +       }
74190 +
74191 +
74192 +       jtagdrv_select_ring (dev, shiftargs->ring);
74193 +
74194 +       if (io_cmd == JTAG_SHIFT_IR)
74195 +           jtagdrv_shift_ir (dev, buf, shiftargs->nbits);
74196 +       else
74197 +           jtagdrv_shift_dr (dev, buf, shiftargs->nbits);
74198 +       
74199 +       if (copy_to_user (shiftargs->value, buf, JTAG_NBYTES (shiftargs->nbits)))
74200 +       {
74201 +           kfree (buf);
74202 +           return (-EFAULT);
74203 +       }
74204 +
74205 +       kfree (buf);
74206 +       return (0);
74207 +
74208 +    case I2C_WRITE:
74209 +    case I2C_READ:
74210 +    case I2C_WRITEREG:
74211 +    case I2C_READREG:
74212 +       i2cargs = (I2C_ARGS *) io_data;
74213 +
74214 +       if (! VALID_I2C_RING(i2cargs->ring) || i2cargs->count > I2C_MAX_DATA_LEN)
74215 +           return (-EFAULT);
74216 +
74217 +       jtagdrv_select_ring (dev, RING_I2C_BIT | i2cargs->ring);
74218 +       switch (io_cmd)
74219 +       {
74220 +       case I2C_WRITE:
74221 +           i2cargs->ok = jtagdrv_i2c_write (dev, i2cargs->device, i2cargs->count, i2cargs->data);
74222 +           break;
74223 +
74224 +       case I2C_READ:
74225 +           i2cargs->ok = jtagdrv_i2c_read (dev, i2cargs->device, i2cargs->count, i2cargs->data);
74226 +           break;
74227 +
74228 +       case I2C_WRITEREG:
74229 +           i2cargs->ok = jtagdrv_i2c_writereg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data);
74230 +           break;
74231 +
74232 +       case I2C_READREG:
74233 +           i2cargs->ok = jtagdrv_i2c_readreg (dev, i2cargs->device, i2cargs->reg, i2cargs->count, i2cargs->data);
74234 +           break;
74235 +       }
74236 +       return (0);
74237 +
74238 +    case I2C_CLOCK_SHIFT:
74239 +       clockargs = (I2C_CLOCK_SHIFT_ARGS *) io_data;
74240 +
74241 +       freq = (10 * clockargs->m / (1 << (((clockargs->n + 1) & 3))));
74242 +       
74243 +       /* validate the value, and initialise the ring */
74244 +       if (clockargs->t != 0 || clockargs->n > 3 || clockargs->m > 127)
74245 +           return (-EINVAL);
74246 +       
74247 +       jtagdrv_select_ring (dev, RING_I2C_BIT | RING_CLOCK_SHIFT);
74248 +       jtagdrv_i2c_clock_shift (dev, clockargs->t, clockargs->n, clockargs->m);
74249 +       jtagdrv_select_ring (dev, 0);
74250 +       return (0);
74251 +
74252 +    default:
74253 +       return (-EINVAL);
74254 +    }
74255 +    return (-EINVAL);
74256 +}
74257 +
74258 +/*
74259 + * Local variables:
74260 + * c-file-style: "stroustrup"
74261 + * End:
74262 + */
74263 diff -urN clean/drivers/net/qsnet/jtag/jtagdrv_Linux.h linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.h
74264 --- clean/drivers/net/qsnet/jtag/jtagdrv_Linux.h        1969-12-31 19:00:00.000000000 -0500
74265 +++ linux-2.6.9/drivers/net/qsnet/jtag/jtagdrv_Linux.h  2002-08-09 07:18:37.000000000 -0400
74266 @@ -0,0 +1,174 @@
74267 +/*
74268 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
74269 + *
74270 + *    For licensing information please see the supplied COPYING file
74271 + *
74272 + */
74273 +
74274 +#ident "@(#)$Id: jtagdrv_Linux.h,v 1.3 2002/08/09 11:18:37 addy Exp $"
74275 +/*      $Source: /cvs/master/quadrics/jtagmod/jtagdrv_Linux.h,v $*/
74276 +
74277 +#ifndef __JTAGDRV_LINUX_H
74278 +#define __JTAGDRV_LINUX_H
74279 +
74280 +#include <qsnet/kernel.h>
74281 +#include <asm/io.h>
74282 +
74283 +typedef struct jtag_dev
74284 +{
74285 +    unsigned long      base;
74286 +    int                 region;
74287 +
74288 +    u_int              present:1;
74289 +    u_int              open:1;
74290 +} JTAG_DEV;
74291 +
74292 +/*
74293 +**
74294 +**                     Hardware Defines
74295 +**
74296 +*/
74297 +
74298 +/*
74299 + * Assume that bit 4 of the Control Register is set to 1 (by default) 
74300 + * to enable the printer port (CS3).
74301 + *
74302 + * The default base address is 3BC-3BF. 
74303 + */
74304 +
74305 +#define LPT0   0x3BC                   /* CSR Base Address - note this can
74306 +                                        * change depending on the setting
74307 +                                        * in the Control Register 0.
74308 +                                        *
74309 +                                        * LPT1 0x378
74310 +                                        * LPT2 0x278
74311 +                                        * LPT3 0x268
74312 +                                       */
74313 +
74314 +/*
74315 + *     Register offsets from the port base address
74316 + */
74317 +
74318 +#define LPT_REGISTER_0 0
74319 +#define LPT_REGISTER_1 1
74320 +#define LPT_REGISTER_2 2
74321 +#define LPT_REGISTER_3 0x400
74322 +#define LPT_REGISTER_4 0x401
74323 +#define LPT_REGISTER_5 0x402
74324 +
74325 +/*
74326 + *     Chip control registers
74327 + */
74328 +                                       /* Base address for Super I/O National*/
74329 +
74330 +#define SIO_BASE_ADDR  0x26e           /* Semiconductor PC87332VLJ combo-chip*/
74331 +#define CR4_REG                0x04            /* index 4, printer control reg 4 */
74332 +
74333 +#define LPT_EPP                0x01            /* Enable bit for epp */
74334 +#define LPT_ECP                0x04            /* Enable bit for ecp */
74335 +
74336 +/*
74337 + *     Registers for use with centronics, nibble and byte modes.
74338 + */
74339 +
74340 +#define LPT_DATA       LPT_REGISTER_0          /* line printer port data */
74341 +#define LPT_STAT       LPT_REGISTER_1          /* LPT port status        */
74342 +#define LPT_CTRL       LPT_REGISTER_2          /* LPT port control       */
74343 +
74344 +/*
74345 + *     Registers for use with ECP mode.
74346 + */ 
74347 +
74348 +#define LPT_DFIFO      LPT_REGISTER_3          /* r/w fifo register    */
74349 +#define LPT_CFGB       LPT_REGISTER_4          /* Configuration B      */
74350 +#define LPT_ECR                LPT_REGISTER_5          /* Exteded control      */
74351 +
74352 +/*
74353 + * Bit assignments for ECR register.
74354 + */
74355 +
74356 +       /* Bits 0-4 */
74357 +
74358 +#define LPT_ECR_EMPTY  0x01            /* FIFO is empty */
74359 +#define LPT_ECR_FULL   0x02            /* FIFO is full */
74360 +#define LPT_ECR_SERV   0x04            /* Service bit */
74361 +#define LPT_ECR_DMA    0x08            /* DMA enable */
74362 +#define LPT_ECR_nINTR  0x10            /* Interrupt disable */
74363 +
74364 +       /*
74365 +        * Bits 5-7 are ECR modes.
74366 +        */
74367 +
74368 +#define LPT_ECR_PAR    0x20            /* Parallel port FIFO mode */
74369 +#define LPT_ECR_ECP    0x60            /* ECP mode */
74370 +#define LPT_ECR_CFG    0xE0            /* Configuration mode */
74371 +#define LPT_ECR_CLEAR  ~0xE0           /* Cear mode bits */
74372 +
74373 +/*
74374 + * Bit assignments for the parallel port STATUS register:
74375 + */
74376 +
74377 +#define LPT_STAT_BIT0  0X1     /* Reserved. Bit always set.            */
74378 +#define LPT_STAT_BIT1  0X2     /* Reserved. Bit always set.            */
74379 +#define LPT_STAT_IRQ   0x4     /* interrupt status bit                 */
74380 +#define LPT_STAT_ERROR 0x8     /* set to 0 to indicate error           */
74381 +#define LPT_STAT_SLCT  0x10    /* status of SLCT lead from printer     */
74382 +#define LPT_STAT_PE    0x20    /* set to 1 when out of paper           */
74383 +#define LPT_STAT_ACK   0x40    /* acknowledge - set to 0 when ready    */
74384 +#define LPT_STAT_nBUSY 0x80    /* busy status bit, 0=busy, 1=ready     */
74385 +
74386 +/*
74387 + * Bit assignments for the parallel port CONTROL register:
74388 + */
74389 +
74390 +#define LPT_CTRL_nSTROBE       0x1     /* Printer Strobe Control       */
74391 +#define LPT_CTRL_nAUTOFD       0x2     /* Auto Feed Control            */
74392 +#define LPT_CTRL_INIT          0x4     /* Initialize Printer Control   */
74393 +#define LPT_CTRL_nSLCTIN       0x8     /* 0=select printer, 1=not selected */
74394 +#define LPT_CTRL_IRQ           0x10    /* Interrupt Request Enable Control */
74395 +#define LPT_CTRL_DIR           0x20    /* Direction control            */
74396 +#define LPT_CTRL_BIT6          0X40    /* Reserved. Bit always set.    */
74397 +#define LPT_CTRL_BIT7          0X80    /* Reserved. Bit always set.    */
74398 +
74399 +
74400 +#define LPT_WRITE(dev, regname, value) do { outb(value, (dev)->base + regname); } while (0)
74401 +#define LPT_READ(dev, regname,value)   do { value = inb((dev)->base + regname); } while (0)
74402 +
74403 +
74404 +
74405 +/* Standard register access macros */
74406 +#define LPT_WRITE_CTRL(dev, value)     LPT_WRITE(dev, LPT_CTRL, LPT_CTRL_INIT | value)
74407 +#define LPT_WRITE_DATA(dev, value)     LPT_WRITE(dev, LPT_DATA, value)
74408 +#define LPT_READ_STAT(dev, value)      LPT_READ(dev, LPT_STAT, value)
74409 +
74410 +/*
74411 + * The jtag signals are connected to the parallel port as follows :
74412 + *
74413 + *  TRST       bit 0
74414 + *  TDI                bit 1
74415 + *  TMS                bit 2
74416 + *  TCLK       AFX
74417 + *  TDO                PE
74418 + */
74419 +#define LPT_DATA_TRST  1
74420 +#define LPT_DATA_TDI   2
74421 +#define LPT_DATA_TMS   4
74422 +#define LPT_CTRL_TCLK  LPT_CTRL_nAUTOFD
74423 +#define LPT_STAT_TDO   LPT_STAT_PE
74424 +
74425 +/*
74426 + * The I2C signals are connected as follows :
74427 + */
74428 +#define LPT_DATA_SDA   2
74429 +#define LPT_CTRL_SCLK  LPT_CTRL_nAUTOFD
74430 +#define LPT_STAT_SDA   LPT_STAT_PE
74431 +
74432 +/*
74433 + * The ring selection signals are as follows :
74434 + *  addr       bit 0-7
74435 + *  clock      nSLCTIN
74436 + */
74437 +#define LPT_CTRL_RCLK  LPT_CTRL_nSLCTIN
74438 +
74439 +
74440 +#endif /* __JTAGDRV_LINUX_H */
74441 diff -urN clean/drivers/net/qsnet/jtag/Makefile linux-2.6.9/drivers/net/qsnet/jtag/Makefile
74442 --- clean/drivers/net/qsnet/jtag/Makefile       1969-12-31 19:00:00.000000000 -0500
74443 +++ linux-2.6.9/drivers/net/qsnet/jtag/Makefile 2005-10-10 17:47:31.000000000 -0400
74444 @@ -0,0 +1,15 @@
74445 +#
74446 +# Makefile for Quadrics QsNet
74447 +#
74448 +# Copyright (c) 2002-2004 Quadrics Ltd
74449 +#
74450 +# File: drivers/net/qsnet/jtag/Makefile
74451 +#
74452 +
74453 +
74454 +#
74455 +
74456 +obj-$(CONFIG_JTAG)     += jtag.o
74457 +jtag-objs      := jtagdrv_Linux.o jtagdrv.o
74458 +
74459 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
74460 diff -urN clean/drivers/net/qsnet/jtag/Makefile.conf linux-2.6.9/drivers/net/qsnet/jtag/Makefile.conf
74461 --- clean/drivers/net/qsnet/jtag/Makefile.conf  1969-12-31 19:00:00.000000000 -0500
74462 +++ linux-2.6.9/drivers/net/qsnet/jtag/Makefile.conf    2005-09-07 10:39:49.000000000 -0400
74463 @@ -0,0 +1,10 @@
74464 +# Flags for generating QsNet Linux Kernel Makefiles
74465 +MODNAME                =       jtag.o
74466 +MODULENAME     =       jtag
74467 +KOBJFILES      =       jtagdrv_Linux.o jtagdrv.o
74468 +EXPORT_KOBJS   =       
74469 +CONFIG_NAME    =       CONFIG_JTAG
74470 +SGALFC         =       
74471 +# EXTRALINES START
74472 +
74473 +# EXTRALINES END
74474 diff -urN clean/drivers/net/qsnet/jtag/quadrics_version.h linux-2.6.9/drivers/net/qsnet/jtag/quadrics_version.h
74475 --- clean/drivers/net/qsnet/jtag/quadrics_version.h     1969-12-31 19:00:00.000000000 -0500
74476 +++ linux-2.6.9/drivers/net/qsnet/jtag/quadrics_version.h       2005-09-07 10:39:49.000000000 -0400
74477 @@ -0,0 +1 @@
74478 +#define QUADRICS_VERSION "5.11.3qsnet"
74479 diff -urN clean/drivers/net/qsnet/Kconfig linux-2.6.9/drivers/net/qsnet/Kconfig
74480 --- clean/drivers/net/qsnet/Kconfig     1969-12-31 19:00:00.000000000 -0500
74481 +++ linux-2.6.9/drivers/net/qsnet/Kconfig       2005-10-10 17:47:30.000000000 -0400
74482 @@ -0,0 +1,79 @@
74483 +#
74484 +# Kconfig for Quadrics QsNet
74485 +#
74486 +# Copyright (c) 2004 Quadrics Ltd
74487 +#
74488 +# File: driver/net/qsnet/Kconfig
74489 +#
74490 +
74491 +menu "Quadrics QsNet"
74492 +        depends on NETDEVICES
74493 +
74494 +config QSNET
74495 +        tristate "Quadrics QsNet support"
74496 +       default m
74497 +        depends on PCI
74498 +        ---help---
74499 +          Quadrics QsNet is a high bandwidth, ultra low latency cluster interconnect
74500 +          which provides both user and kernel programmers with secure, direct access
74501 +          to the Quadrics network.
74502 +
74503 +config ELAN3
74504 +        tristate "Elan 3 device driver"
74505 +       default m
74506 +        depends on QSNET
74507 +        ---help---
74508 +          This is the main device driver for the Quadrics QsNet (Elan3) PCI device.
74509 +          This is a high bandwidth, ultra low latency interconnect which provides
74510 +          both user and kernel programmers with secure, direct access to the
74511 +          Quadrics network.
74512 +
74513 +config ELAN4
74514 +        tristate "Elan 4 device driver"
74515 +       default m
74516 +        depends on QSNET
74517 +        ---help---
74518 +          This is the main device driver for the Quadrics QsNetII (Elan4) PCI-X device.
74519 +          This is a high bandwidth, ultra low latency interconnect which provides
74520 +          both user and kernel programmers with secure, direct access to the
74521 +          Quadrics network.
74522 +
74523 +config EP
74524 +        tristate "Elan Kernel Comms"
74525 +       default m
74526 +        depends on QSNET && (ELAN4 || ELAN3)
74527 +        ---help---
74528 +          This module implements the QsNet kernel communications layer. This
74529 +          is used to layer kernel level facilities on top of the basic Elan
74530 +          device drivers. These can be used to implement subsystems such as
74531 +          TCP/IP and remote filing systems over the QsNet interconnect.
74532 +
74533 +config EIP
74534 +        tristate "Elan IP device driver"
74535 +       default m
74536 +        depends on QSNET && EP && NET
74537 +        ---help---
74538 +        This is a network IP device driver for the Quadrics QsNet device.
74539 +        It allows the TCP/IP protocol to be run over the Quadrics interconnect.
74540 +
74541 +config RMS
74542 +        tristate "Resource Management System support"
74543 +       default m
74544 +        depends on QSNET
74545 +        ---help---
74546 +        This is a support module for the Quadrics RMS resource manager. It provides kernel
74547 +        services for monitoring and controlling user job execution, termination and cleanup.
74548 +
74549 +config JTAG
74550 +        tristate "Switch monitoring"
74551 +       default m
74552 +        depends on QSNET
74553 +        ---help---
74554 +          The jtag interface is used to allow processes to send and retrieve jtag
74555 +          information to a Quadrics QsNet Elite switch via the parallel port.
74556 +          The module requires a /dev/jtag[0-3] entry (usually there is only a /dev/jtag0)
74557 +          device and a particular device only allows one process at a time to access this
74558 +          resource.
74559 +          For more information about JTag interface, please refer to the IEEE document on
74560 +          http://www.ieee.org/
74561 +endmenu
74562 diff -urN clean/drivers/net/qsnet/Makefile linux-2.6.9/drivers/net/qsnet/Makefile
74563 --- clean/drivers/net/qsnet/Makefile    1969-12-31 19:00:00.000000000 -0500
74564 +++ linux-2.6.9/drivers/net/qsnet/Makefile      2005-10-10 17:47:30.000000000 -0400
74565 @@ -0,0 +1,15 @@
74566 +#
74567 +# Makefile for Quadrics QsNet
74568 +#
74569 +# Copyright (c) 2002-2005 Quadrics Ltd.
74570 +#
74571 +# File: driver/net/qsnet/Makefile
74572 +#
74573 +
74574 +obj-$(CONFIG_QSNET)     += qsnet/ elan/
74575 +obj-$(CONFIG_ELAN3)     += elan3/
74576 +obj-$(CONFIG_ELAN4)     += elan4/
74577 +obj-$(CONFIG_EP)       += ep/ 
74578 +obj-$(CONFIG_EIP)       += eip/ 
74579 +obj-$(CONFIG_RMS)       += rms/ 
74580 +obj-$(CONFIG_JTAG)      += jtag/
74581 diff -urN clean/drivers/net/qsnet/qsnet/debug.c linux-2.6.9/drivers/net/qsnet/qsnet/debug.c
74582 --- clean/drivers/net/qsnet/qsnet/debug.c       1969-12-31 19:00:00.000000000 -0500
74583 +++ linux-2.6.9/drivers/net/qsnet/qsnet/debug.c 2005-03-23 06:04:54.000000000 -0500
74584 @@ -0,0 +1,583 @@
74585 +/*
74586 + *    Copyright (c) 2003 by Quadrics Ltd.
74587 + * 
74588 + *    For licensing information please see the supplied COPYING file
74589 + *
74590 + */
74591 +
74592 +#ident "@(#)$Id: debug.c,v 1.22 2005/03/23 11:04:54 david Exp $"
74593 +/*      $Source: /cvs/master/quadrics/qsnet/debug.c,v $ */
74594 +
74595 +#include <qsnet/kernel.h>
74596 +#include <qsnet/debug.h>
74597 +#include <qsnet/procfs_linux.h>
74598 +
74599 +caddr_t        qsnet_debug_buffer_ptr = NULL;
74600 +int           qsnet_debug_front      = 0;
74601 +int           qsnet_debug_back       = 0;
74602 +int            qsnet_debug_lost_lines = 0;
74603 +int           qsnet_debug_disabled   = 0;
74604 +
74605 +int           qsnet_debug_line_size  = 256;
74606 +int           qsnet_debug_num_lines  = 8192;
74607 +
74608 +int           qsnet_assfail_mode     = 1;                      /* default to BUG() */
74609 +
74610 +int            qsnet_debug_running    = 0;
74611 +int            kqsnet_debug_running   = 0;
74612 +
74613 +static spinlock_t qsnet_debug_lock;
74614 +static kcondvar_t qsnet_debug_wait;
74615 +static char       qsnet_debug_buffer_space[8192];
74616 +
74617 +#define QSNET_DEBUG_PREFIX_MAX_SIZE    32
74618 +#define QSNET_DEBUG_MAX_WORDWRAP       15
74619 +
74620 +/* must be larger than  QSNET_DEBUG_PREFIX_MAX_SIZE +  QSNET_DEBUG_MAX_WORDWRAP + 2 */
74621 +#if defined(DIGITAL_UNIX) 
74622 +#define QSNET_DEBUG_CONSOLE_WIDTH 80
74623 +#elif defined(LINUX)
74624 +#define QSNET_DEBUG_CONSOLE_WIDTH 128
74625 +#endif
74626 +
74627 +#define isspace(CH)    ((CH==' ') | (CH=='\t') | (CH=='\n'))
74628 +
74629 +#ifdef LINUX
74630 +#define ALLOC_DEBUG_BUFFER(ptr)                do { (ptr) = (void *)__get_free_pages (GFP_KERNEL, get_order (qsnet_debug_num_lines * qsnet_debug_line_size)); } while (0)
74631 +#define FREE_DEBUG_BUFFER(ptr)         free_pages ((unsigned long) ptr, get_order (qsnet_debug_num_lines * qsnet_debug_line_size))
74632 +#else
74633 +#define ALLOC_DEBUG_BUFFER(ptr)                KMEM_ALLOC (ptr, caddr_t, qsnet_debug_num_lines * qsnet_debug_line_size, 1)
74634 +#define FREE_DEBUG_BUFFER(ptr)         KMEM_FREE (ptr, qsnet_debug_num_lines * qsnet_debug_line_size)
74635 +#endif
74636 +
74637 +void
74638 +qsnet_debug_init ()
74639 +{
74640 +       spin_lock_init (&qsnet_debug_lock);
74641 +       kcondvar_init  (&qsnet_debug_wait);
74642 +
74643 +       qsnet_debug_front      = 0;
74644 +       qsnet_debug_back       = 0;
74645 +       qsnet_debug_lost_lines = 0;
74646 +
74647 +       if (qsnet_debug_line_size < (QSNET_DEBUG_PREFIX_MAX_SIZE + QSNET_DEBUG_MAX_WORDWRAP + 2))
74648 +               qsnet_debug_line_size = 256;
74649 +
74650 +       qsnet_debug_running    = 1;
74651 +
74652 +       qsnet_proc_register_int (qsnet_procfs_config, "assfail_mode", &qsnet_assfail_mode, 0);
74653 +}
74654 +
74655 +void
74656 +qsnet_debug_fini()
74657 +{
74658 +       if (!qsnet_debug_running) return;
74659 +
74660 +       remove_proc_entry ("assfail_mode", qsnet_procfs_config);
74661 +
74662 +       spin_lock_destroy (&qsnet_debug_lock);
74663 +       kcondvar_destroy  (&qsnet_debug_wait);
74664 +       
74665 +       if (qsnet_debug_buffer_ptr)
74666 +               FREE_DEBUG_BUFFER (qsnet_debug_buffer_ptr);
74667 +
74668 +       qsnet_debug_buffer_ptr     = NULL;
74669 +       qsnet_debug_lost_lines = 0;     
74670 +       qsnet_debug_running    = 0;     
74671 +}
74672 +
74673 +void
74674 +qsnet_debug_disable(int val)
74675 +{
74676 +       qsnet_debug_disabled = val;
74677 +}
74678 +
74679 +void
74680 +qsnet_debug_alloc()
74681 +{
74682 +       caddr_t ptr;
74683 +       unsigned long flags;
74684 +
74685 +       if (!qsnet_debug_running) return;
74686 +
74687 +       if (qsnet_debug_buffer_ptr == NULL)
74688 +       {
74689 +               ALLOC_DEBUG_BUFFER (ptr);
74690 +
74691 +               if (ptr != NULL)
74692 +               {
74693 +                       spin_lock_irqsave (&qsnet_debug_lock, flags);
74694 +                       if (qsnet_debug_buffer_ptr == NULL)
74695 +                       {
74696 +                               qsnet_debug_buffer_ptr = ptr;
74697 +                               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
74698 +                       }
74699 +                       else
74700 +                       {
74701 +                               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
74702 +
74703 +                               FREE_DEBUG_BUFFER (ptr);
74704 +                       }
74705 +               }
74706 +       }
74707 +       
74708 +}
74709 +
74710 +static void 
74711 +qsnet_prefix_debug(unsigned int mode, const char *prefix, char *buffer) 
74712 +{
74713 +       /* assumes caller has lock */
74714 +
74715 +       int  prefixlen = strlen(prefix);
74716 +       char pref[QSNET_DEBUG_PREFIX_MAX_SIZE];
74717 +       int  prefix_done = 0;
74718 +
74719 +       if (!qsnet_debug_running) return;
74720 +
74721 +       if (qsnet_debug_disabled)
74722 +               return;
74723 +
74724 +       if (prefixlen >= QSNET_DEBUG_PREFIX_MAX_SIZE) 
74725 +       {
74726 +               strncpy(pref,prefix,QSNET_DEBUG_PREFIX_MAX_SIZE -2);
74727 +               strcpy (&pref[QSNET_DEBUG_PREFIX_MAX_SIZE-5],"... ");
74728 +
74729 +               prefix = pref;
74730 +                prefixlen = strlen(prefix);
74731 +       }
74732 +
74733 +#ifdef CONFIG_MPSAS
74734 +       {
74735 +               char *p;
74736 +#define TRAP_PUTCHAR_B                 (0x17a - 256)
74737 +#define SAS_PUTCHAR(c)                 do {\
74738 +                       register int o0 asm ("o0") = (c);\
74739 +\
74740 +                       asm volatile ("ta %0; nop" \
74741 +                                     : /* no outputs */\
74742 +                                     : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\
74743 +                                     : /* clobbered */ "o0");\
74744 +\
74745 +                       if (o0 == '\n') {\
74746 +                               o0 = '\r';\
74747 +\
74748 +                               asm volatile ("ta %0; nop" \
74749 +                                             : /* no outputs */\
74750 +                                             : /* inputs */ "i" (TRAP_PUTCHAR_B), "r" (o0)\
74751 +                                             : /* clobbered */ "o0");\
74752 +                       }\
74753 +               } while(0)
74754 +
74755 +               for (p = prefix; *p; p++)
74756 +                       SAS_PUTCHAR (*p);
74757 +
74758 +               for (p = buffer; *p; p++)
74759 +                       SAS_PUTCHAR (*p);
74760 +       }
74761 +#else
74762 +       if (mode & QSNET_DEBUG_BUFFER)
74763 +       {
74764 +               if (qsnet_debug_buffer_ptr == NULL)
74765 +                       qsnet_debug_lost_lines++;
74766 +               else
74767 +               {                   
74768 +                       caddr_t base = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back];
74769 +                       caddr_t lim  = base + qsnet_debug_line_size - 2;
74770 +                       caddr_t p;
74771 +               
74772 +                       p = buffer; 
74773 +                       prefix_done = 0;
74774 +                       while (*p) 
74775 +                       {
74776 +                               /* sort out prefix */
74777 +                               if ( prefix_done++ ) 
74778 +                               {
74779 +                                       int i;
74780 +                                       for(i=0;i<prefixlen;i++)
74781 +                                               base[i] = ' ';
74782 +                                       /* memset(base,' ',prefixlen); */
74783 +                               }
74784 +                               else
74785 +                                       strcpy(base,prefix);
74786 +                               base += prefixlen; /* move the base on */
74787 +
74788 +                               /* copy data */
74789 +                               for ( ; *p && (base < lim); )
74790 +                                       *base++ = *p++;
74791 +
74792 +                               /* if line split then add \n */
74793 +                               if ((base == lim) && (*base != '\n'))
74794 +                               {
74795 +                                       char *ptr;
74796 +                                       int   count;
74797 +
74798 +                                       *base = '\n';
74799 +                                       /* we added a \n cos it was end of line put next char was \n */
74800 +                                       if (*p == '\n') 
74801 +                                               p++;
74802 +                                       else
74803 +                                       {
74804 +                                               /* lets see if we can back track and find a white space to break on */
74805 +                                               ptr = base-1;
74806 +                                               count = 1;
74807 +                                               while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP ))
74808 +                                               {
74809 +                                                       count++;
74810 +                                                       ptr--;
74811 +                                               }
74812 +
74813 +                                               if ( isspace(*ptr) ) 
74814 +                                               {
74815 +                                                       /* found somewhere to wrap to */
74816 +                                                       p -= (count-1); /* need to loose the white space */
74817 +                                                       base = ptr;
74818 +                                                       *base = '\n';
74819 +                                               }
74820 +                                       }
74821 +                                       base++;
74822 +                               }
74823 +                               *base = '\0';
74824 +
74825 +                               /* move on pointers */
74826 +                               qsnet_debug_back = (++qsnet_debug_back == qsnet_debug_num_lines) ? 0 : qsnet_debug_back;            
74827 +                               if (qsnet_debug_back == qsnet_debug_front)
74828 +                               {
74829 +                                       qsnet_debug_lost_lines++;
74830 +                                       qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
74831 +                               }
74832 +                               base  = &qsnet_debug_buffer_ptr[qsnet_debug_line_size * qsnet_debug_back];
74833 +                               lim  =  base + qsnet_debug_line_size - 2;
74834 +                       }
74835 +                       kcondvar_wakeupone (&qsnet_debug_wait, &qsnet_debug_lock);
74836 +               }
74837 +       }
74838 +
74839 +       if (mode & QSNET_DEBUG_CONSOLE)
74840 +       {
74841 +               int     remaining = QSNET_DEBUG_CONSOLE_WIDTH - prefixlen;
74842 +               caddr_t p;
74843 +               char    line[QSNET_DEBUG_CONSOLE_WIDTH +2];
74844 +               int     len;
74845 +           
74846 +               strcpy (pref,prefix);
74847 +               prefix_done = 0;
74848 +
74849 +               p = buffer;
74850 +               while ( *p )
74851 +               {
74852 +                       /* use the prefix only once */
74853 +                       if  ( prefix_done++ > 0 ) 
74854 +                               {
74855 +                                       int i;
74856 +                                       for(i=0;i<prefixlen;i++)
74857 +                                               pref[i] = ' ';
74858 +                                       /* memset(perf,' ',prefixlen); */
74859 +                               }       
74860 +
74861 +                       len=strlen(p);
74862 +                       if (len > remaining) len = remaining;
74863 +                 
74864 +                       strncpy(line, p, len);
74865 +                       line[len] = 0;
74866 +                       p += len;
74867 +                   
74868 +                       /* word wrap */
74869 +                       if ((len == remaining) && *p && !isspace(*p))
74870 +                       {
74871 +                               /* lets see if we can back track and find a white space to break on */
74872 +                               char * ptr = &line[len-1];
74873 +                               int    count = 1;
74874 +
74875 +                               while ( ( !isspace(*ptr) ) && ( count < QSNET_DEBUG_MAX_WORDWRAP ))
74876 +                               {
74877 +                                       count++;
74878 +                                       ptr--;
74879 +                               }
74880 +
74881 +                               if ( isspace(*ptr) ) 
74882 +                               {
74883 +                                       /* found somewhere to wrap to */
74884 +                                       p -= (count-1); /* need to loose the white space */
74885 +                                       len -= count;
74886 +                               }               
74887 +                       }
74888 +
74889 +                       if (line[len-1] != '\n' ) 
74890 +                       {
74891 +                               line[len] = '\n';
74892 +                               line[len+1] = 0;
74893 +                       }
74894 +
74895 +                       /* we put a \n in so dont need another one next */
74896 +                       if ( *p == '\n')
74897 +                               p++;
74898 +
74899 +#if defined(DIGITAL_UNIX)
74900 +                       {
74901 +                               char *pr;
74902 +
74903 +                               for (pr = pref; *pr; pr++)
74904 +                                       cnputc (*pr);
74905 +
74906 +                               for (pr = line; *pr; pr++)
74907 +                                       cnputc (*pr); 
74908 +                       }
74909 +#elif defined(LINUX)
74910 +                       printk("%s%s",pref,line);
74911 +#endif
74912 +               }
74913 +       }
74914 +#endif /* CONFIG_MPSAS */
74915 +}
74916 +
74917 +void
74918 +qsnet_vdebugf (unsigned int mode, const char *prefix, const char *fmt, va_list ap)
74919 +{
74920 +       unsigned long flags;
74921 +
74922 +       if (!qsnet_debug_running) return;
74923 +
74924 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
74925 +
74926 +       qsnet_debug_buffer_space[0] = '\0';
74927 +
74928 +#if defined(DIGITAL_UNIX)
74929 +       prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap);
74930 +#elif defined(LINUX)
74931 +       vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap);
74932 +#endif
74933 +
74934 +       if (prefix == NULL)
74935 +               printk ("qsnet_vdebugf: prefix==NULL\n");
74936 +       else
74937 +               qsnet_prefix_debug(mode, prefix, qsnet_debug_buffer_space);
74938 +
74939 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
74940 +}
74941 +
74942 +void kqsnet_debugf(char *fmt,...)
74943 +{
74944 +       if ( kqsnet_debug_running ) {
74945 +               va_list ap;
74946 +               char string[20];
74947 +               
74948 +               sprintf (string, "mm=%p:", current->mm);
74949 +               va_start(ap, fmt);
74950 +               qsnet_vdebugf(QSNET_DEBUG_BUFFER, string, fmt, ap);
74951 +               va_end(ap);
74952 +       }       
74953 +}
74954 +void 
74955 +qsnet_debugf(unsigned int mode, const char *fmt,...)
74956 +{
74957 +       va_list       ap;
74958 +       unsigned long flags;
74959 +
74960 +       if (!qsnet_debug_running) return;
74961 +
74962 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
74963 +
74964 +       qsnet_debug_buffer_space[0] = '\0';
74965 +
74966 +       va_start (ap, fmt);
74967 +#if defined(DIGITAL_UNIX)
74968 +       prf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), NULL, fmt, ap);
74969 +#elif defined(LINUX)
74970 +       vsprintf (qsnet_debug_buffer_space+strlen(qsnet_debug_buffer_space), fmt, ap);
74971 +#endif
74972 +       va_end (ap);
74973 +
74974 +       qsnet_prefix_debug(mode, "", qsnet_debug_buffer_space); 
74975 +
74976 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
74977 +}
74978 +
74979 +int
74980 +qsnet_debug_buffer (caddr_t ubuffer, int len)
74981 +{
74982 +       caddr_t buffer, ptr, base;
74983 +       int     remain, len1;
74984 +       unsigned long flags;
74985 +       static  char qsnet_space[65536];
74986 +
74987 +       if (!qsnet_debug_running) return (0);
74988 +
74989 +       if (len < qsnet_debug_line_size)
74990 +               return (-1);
74991 +
74992 +       if (len > (qsnet_debug_line_size * qsnet_debug_num_lines))
74993 +               len = qsnet_debug_line_size * qsnet_debug_num_lines;
74994 +    
74995 +       if ( len > 65536 ) {
74996 +               KMEM_ZALLOC (buffer, caddr_t, len, 1);
74997 +       } else 
74998 +               buffer = qsnet_space;
74999 +
75000 +       if (buffer == NULL)
75001 +               return (-1);
75002 +
75003 +       if (qsnet_debug_buffer_ptr == NULL)
75004 +               qsnet_debug_alloc();
75005 +
75006 +       if (qsnet_debug_buffer_ptr == NULL)
75007 +       {
75008 +               if ( len > 65536 )
75009 +                       KMEM_FREE (buffer, len);
75010 +               return (-1);
75011 +       }
75012 +
75013 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
75014 +    
75015 +       while (!qsnet_debug_lost_lines && (qsnet_debug_back == qsnet_debug_front))
75016 +               if (kcondvar_waitsig (&qsnet_debug_wait, &qsnet_debug_lock, &flags) == 0)
75017 +                       break;
75018 +    
75019 +       ptr    = buffer;
75020 +       remain = len;
75021 +
75022 +       if (qsnet_debug_lost_lines)
75023 +       {
75024 +               qsnet_debug_lost_lines = 0;
75025 +               strcpy (ptr, "Debug Buffer has overflowed!!\n");
75026 +               len1 = strlen (ptr);
75027 +
75028 +               remain -= len1;
75029 +               ptr    += len1;
75030 +       }
75031 +
75032 +       while (qsnet_debug_front != qsnet_debug_back)
75033 +       {
75034 +               /* copy the line from DebugFront */
75035 +               base = &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size];
75036 +
75037 +               len1 = strlen (base);
75038 +
75039 +               if (len1 > remain)
75040 +                       break;
75041 +       
75042 +               bcopy (base, ptr, len1);
75043 +       
75044 +               ptr += len1;
75045 +               remain -= len1;
75046 +
75047 +               qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
75048 +       }
75049 +
75050 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
75051 +
75052 +       len1 = ptr - buffer;
75053 +
75054 +       if (len1 != 0 && copyout (buffer, ubuffer, len1))
75055 +               len1 = -1;
75056 +
75057 +       if ( len > 65536 )
75058 +               KMEM_FREE (buffer, len);
75059 +   
75060 +       return (len1);
75061 +}
75062 +
75063 +void
75064 +qsnet_debug_buffer_on() 
75065 +{
75066 +       if (qsnet_debug_buffer_ptr == NULL)
75067 +               qsnet_debug_alloc();
75068 +}
75069 +
75070 +void 
75071 +qsnet_debug_buffer_clear()
75072 +{
75073 +       unsigned long flags;
75074 +
75075 +       qsnet_debug_buffer_on();
75076 +       
75077 +       if (qsnet_debug_buffer_ptr != NULL){
75078 +               spin_lock_irqsave (&qsnet_debug_lock, flags);
75079 +               qsnet_debug_front      = 0;
75080 +               qsnet_debug_back       = 0;
75081 +               qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Clear","");
75082 +               spin_unlock_irqrestore (&qsnet_debug_lock, flags);      
75083 +       }
75084 +}
75085 +
75086 +void 
75087 +qsnet_debug_buffer_mark(char *str)
75088 +{
75089 +       unsigned long flags;    
75090 +
75091 +       qsnet_debug_buffer_on();
75092 +
75093 +       if (qsnet_debug_buffer_ptr != NULL) {
75094 +               spin_lock_irqsave (&qsnet_debug_lock, flags);
75095 +               qsnet_prefix_debug(QSNET_DEBUG_BUFFER,"Mark",str);
75096 +               spin_unlock_irqrestore (&qsnet_debug_lock, flags);
75097 +       }
75098 +}
75099 +int
75100 +qsnet_debug_dump ()
75101 +{
75102 +       unsigned long flags;
75103 +
75104 +       if (!qsnet_debug_running) return (0);
75105 +
75106 +       if (qsnet_debug_buffer_ptr == NULL)
75107 +               qsnet_debug_alloc();
75108 +
75109 +       if (qsnet_debug_buffer_ptr == NULL)
75110 +               return (-1);
75111 +
75112 +       spin_lock_irqsave (&qsnet_debug_lock, flags);
75113 +
75114 +       while (qsnet_debug_front != qsnet_debug_back)
75115 +       {
75116 +               printk ("%s", &qsnet_debug_buffer_ptr[qsnet_debug_front*qsnet_debug_line_size]);
75117 +
75118 +               qsnet_debug_front = (++qsnet_debug_front == qsnet_debug_num_lines) ? 0 : qsnet_debug_front;
75119 +       }
75120 +
75121 +       if (qsnet_debug_lost_lines)
75122 +               printk ("\n**** Debug buffer has lost %d lines\n****\n",qsnet_debug_lost_lines);
75123 +
75124 +       spin_unlock_irqrestore (&qsnet_debug_lock, flags);
75125 +
75126 +       return (0);
75127 +}
75128 +
75129 +int
75130 +qsnet_debug_kmem (void *handle)
75131 +{
75132 +       if (!qsnet_debug_running) return (0);
75133 +
75134 +#ifdef KMEM_DEBUG
75135 +       qsnet_kmem_display(handle);
75136 +#endif
75137 +       return (0);
75138 +}
75139 +
75140 +int
75141 +qsnet_assfail (char *ex, const char *func, char *file, int line)
75142 +{
75143 +       qsnet_debugf (QSNET_DEBUG_BUFFER, "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
75144 +
75145 +       printk (KERN_EMERG "qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
75146 +
75147 +       if (panicstr)
75148 +               return (0);
75149 +
75150 +       if (qsnet_assfail_mode & 1)                             /* return to BUG() */
75151 +               return 1;
75152 +
75153 +       if (qsnet_assfail_mode & 2)
75154 +               panic ("qsnet: assertion failure: %s, function: %s, file %s, line: %d\n", ex, func, file, line);
75155 +       if (qsnet_assfail_mode & 4)
75156 +               qsnet_debug_disable (1);
75157 +
75158 +       return 0;
75159 +
75160 +}
75161 +
75162 +
75163 +/*
75164 + * Local variables:
75165 + * c-file-style: "linux"
75166 + * End:
75167 + */
75168 diff -urN clean/drivers/net/qsnet/qsnet/i686_mmx.c linux-2.6.9/drivers/net/qsnet/qsnet/i686_mmx.c
75169 --- clean/drivers/net/qsnet/qsnet/i686_mmx.c    1969-12-31 19:00:00.000000000 -0500
75170 +++ linux-2.6.9/drivers/net/qsnet/qsnet/i686_mmx.c      2004-01-05 07:08:25.000000000 -0500
75171 @@ -0,0 +1,99 @@
75172 +/*
75173 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75174 + * 
75175 + *    For licensing information please see the supplied COPYING file
75176 + *
75177 + */
75178 +
75179 +#ident "@(#)$Id: i686_mmx.c,v 1.11 2004/01/05 12:08:25 mike Exp $"
75180 +/*      $Source: /cvs/master/quadrics/qsnet/i686_mmx.c,v $*/
75181 +
75182 +#include <qsnet/kernel.h>
75183 +
75184 +#if defined(LINUX_I386)
75185 +
75186 +#include <linux/config.h>
75187 +#include <linux/sched.h>
75188 +#include <asm/processor.h>
75189 +#include <asm/i387.h>
75190 +
75191 +int mmx_disabled = 0;
75192 +
75193 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
75194 +/* These functions are lifted from arch/i386/kernel/i387.c
75195 + * and MUST be kept in step with the kernel (currently 2.4.17)
75196 + * alternatively we should export the kernel_fpu_begin() function
75197 + */
75198 +static inline void __save_init_fpu( struct task_struct *tsk )
75199 +{
75200 +       if ( cpu_has_fxsr ) {
75201 +               asm volatile( "fxsave %0 ; fnclex"
75202 +                             : "=m" (tsk->thread.i387.fxsave) );
75203 +       } else {
75204 +               asm volatile( "fnsave %0 ; fwait"
75205 +                             : "=m" (tsk->thread.i387.fsave) );
75206 +       }
75207 +       tsk->flags &= ~PF_USEDFPU;
75208 +}
75209 +#if defined(MODULE)
75210 +void kernel_fpu_begin(void)
75211 +{
75212 +       struct task_struct *tsk = current;
75213 +
75214 +       if (tsk->flags & PF_USEDFPU) {
75215 +               __save_init_fpu(tsk);
75216 +               return;
75217 +       }
75218 +       clts();
75219 +}
75220 +#endif
75221 +#endif
75222 +
75223 +extern inline int
75224 +mmx_preamble(void)
75225 +{
75226 +    if (mmx_disabled || in_interrupt())
75227 +       return (0);
75228 +
75229 +    kernel_fpu_begin();
75230 +
75231 +    return (1);
75232 +}
75233 +
75234 +extern inline void
75235 +mmx_postamble(void)
75236 +{
75237 +    kernel_fpu_end();
75238 +}
75239 +
75240 +extern u64
75241 +qsnet_readq (volatile u64 *ptr)
75242 +{
75243 +    u64 value;
75244 +
75245 +    if (! mmx_preamble())
75246 +       value = *ptr;
75247 +    else
75248 +    {
75249 +       asm volatile ("movq (%0), %%mm0\n"
75250 +                     "movq %%mm0, (%1)\n"
75251 +                     : : "r" (ptr), "r" (&value) : "memory");
75252 +       mmx_postamble();
75253 +    }
75254 +    return (value);
75255 +}
75256 +
75257 +void
75258 +qsnet_writeq(u64 value, volatile u64 *ptr)
75259 +{
75260 +    if (! mmx_preamble())
75261 +       *ptr = value;
75262 +    else
75263 +    {
75264 +       asm volatile ("movq (%0), %%mm0\n"
75265 +                     "movq %%mm0, (%1)\n"
75266 +                     : : "r" (&value), "r" (ptr) : "memory");
75267 +       mmx_postamble();
75268 +    }
75269 +}
75270 +#endif
75271 diff -urN clean/drivers/net/qsnet/qsnet/kernel_linux.c linux-2.6.9/drivers/net/qsnet/qsnet/kernel_linux.c
75272 --- clean/drivers/net/qsnet/qsnet/kernel_linux.c        1969-12-31 19:00:00.000000000 -0500
75273 +++ linux-2.6.9/drivers/net/qsnet/qsnet/kernel_linux.c  2005-09-07 10:35:03.000000000 -0400
75274 @@ -0,0 +1,902 @@
75275 +/*
75276 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
75277 + * 
75278 + *    For licensing information please see the supplied COPYING file
75279 + *
75280 + */
75281 +
75282 +#ident "@(#)$Id: kernel_linux.c,v 1.74.2.5 2005/09/07 14:35:03 mike Exp $"
75283 +/*      $Source: /cvs/master/quadrics/qsnet/kernel_linux.c,v $*/
75284 +
75285 +#include <qsnet/kernel.h>
75286 +#include <qsnet/ctrl_linux.h>
75287 +#include <qsnet/kpte.h>
75288 +
75289 +#include <linux/sysctl.h>
75290 +#include <linux/init.h>
75291 +#include <qsnet/module.h>
75292 +#include <linux/vmalloc.h>
75293 +
75294 +#include <qsnet/procfs_linux.h>
75295 +
75296 +#include <linux/smp.h>         /* for smp_call_function() prototype */
75297 +#include <linux/smp_lock.h>
75298 +#include <linux/mm.h>
75299 +
75300 +#include <linux/highmem.h>
75301 +
75302 +extern int mmx_disabled;
75303 +extern int qsnet_debug_line_size;
75304 +extern int qsnet_debug_num_lines;
75305 +
75306 +gid_t                 qsnet_procfs_gid;
75307 +struct proc_dir_entry *qsnet_procfs_root;
75308 +struct proc_dir_entry *qsnet_procfs_config;
75309 +
75310 +MODULE_AUTHOR("Quadrics Ltd.");
75311 +MODULE_DESCRIPTION("QsNet Kernel support code");
75312 +
75313 +MODULE_LICENSE("GPL");
75314 +
75315 +#if defined(LINUX_I386)
75316 +module_param(mmx_disabled, uint, 0);
75317 +#endif
75318 +
75319 +module_param(qsnet_debug_line_size, uint, 0);
75320 +module_param(qsnet_debug_num_lines, uint, 0);
75321 +
75322 +module_param(qsnet_procfs_gid, uint, 0);
75323 +
75324 +#ifdef KMEM_DEBUG
75325 +EXPORT_SYMBOL(qsnet_kmem_alloc_debug);
75326 +EXPORT_SYMBOL(qsnet_kmem_free_debug);
75327 +#else
75328 +EXPORT_SYMBOL(qsnet_kmem_alloc);
75329 +EXPORT_SYMBOL(qsnet_kmem_free);
75330 +#endif
75331 +
75332 +EXPORT_SYMBOL(qsnet_kmem_display);
75333 +EXPORT_SYMBOL(kmem_to_phys);
75334 +
75335 +EXPORT_SYMBOL(cpu_hold_all);
75336 +EXPORT_SYMBOL(cpu_release_all);
75337 +
75338 +#if defined(LINUX_I386)
75339 +EXPORT_SYMBOL(qsnet_readq);
75340 +EXPORT_SYMBOL(qsnet_writeq);
75341 +#endif
75342 +
75343 +/* debug.c */
75344 +EXPORT_SYMBOL(qsnet_debugf);
75345 +EXPORT_SYMBOL(kqsnet_debugf);
75346 +EXPORT_SYMBOL(qsnet_vdebugf);
75347 +EXPORT_SYMBOL(qsnet_debug_buffer);
75348 +EXPORT_SYMBOL(qsnet_debug_alloc);
75349 +EXPORT_SYMBOL(qsnet_debug_dump);
75350 +EXPORT_SYMBOL(qsnet_debug_kmem);
75351 +EXPORT_SYMBOL(qsnet_debug_disable);
75352 +
75353 +EXPORT_SYMBOL(qsnet_assfail);
75354 +
75355 +EXPORT_SYMBOL(qsnet_procfs_gid);
75356 +EXPORT_SYMBOL(qsnet_procfs_root);
75357 +
75358 +static int qsnet_open    (struct inode *ino, struct file *fp);
75359 +static int qsnet_release (struct inode *ino, struct file *fp);
75360 +static int qsnet_ioctl   (struct inode *ino, struct file *fp, unsigned int cmd, unsigned long arg);
75361 +
75362 +static struct file_operations qsnet_ioctl_fops = 
75363 +{
75364 +       ioctl:   qsnet_ioctl,
75365 +       open:    qsnet_open,
75366 +       release: qsnet_release,
75367 +};
75368 +
75369 +static int
75370 +qsnet_open (struct inode *inode, struct file *fp)
75371 +{
75372 +       MOD_INC_USE_COUNT;
75373 +       fp->private_data = NULL;
75374 +       return (0);
75375 +}
75376 +
75377 +static int
75378 +qsnet_release (struct inode *inode, struct file *fp)
75379 +{
75380 +       MOD_DEC_USE_COUNT;
75381 +       return (0);
75382 +}
75383 +
75384 +static int 
75385 +qsnet_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
75386 +{
75387 +       int res=0;
75388 +
75389 +       switch (cmd) 
75390 +       {
75391 +       case QSNETIO_DEBUG_KMEM:
75392 +       {
75393 +               QSNETIO_DEBUG_KMEM_STRUCT args;
75394 +
75395 +               if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_KMEM_STRUCT)))
75396 +                       return (-EFAULT);
75397 +
75398 +               /* doesnt use handle as a pointer */
75399 +               qsnet_kmem_display(args.handle);
75400 +               break;
75401 +       }
75402 +
75403 +       case QSNETIO_DEBUG_DUMP : 
75404 +       {
75405 +               res = qsnet_debug_dump();
75406 +               break;
75407 +       }
75408 +
75409 +       case QSNETIO_DEBUG_BUFFER :
75410 +       {
75411 +               QSNETIO_DEBUG_BUFFER_STRUCT args;
75412 +
75413 +               if (copy_from_user (&args, (void *) arg, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT)))
75414 +                       return (-EFAULT);
75415 +
75416 +               /* qsnet_debug_buffer uses copyout */
75417 +               if ((res = qsnet_debug_buffer (args.addr, args.len)) != -1)
75418 +               {
75419 +                       args.len = res;
75420 +                       if (copy_to_user ((void *) arg, &args, sizeof (QSNETIO_DEBUG_BUFFER_STRUCT)))
75421 +                               return (-EFAULT);
75422 +                       res = 0;
75423 +               }
75424 +               break;
75425 +       }
75426 +       default:
75427 +               res = EINVAL;
75428 +               break;
75429 +       }
75430 +
75431 +       return ((res == 0) ? 0 : -res);
75432 +}
75433 +
75434 +#ifdef KMEM_DEBUG
75435 +static int qsnet_kmem_open    (struct inode *ino, struct file *fp);
75436 +static int qsnet_kmem_release (struct inode *ino, struct file *fp);
75437 +static ssize_t qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos);
75438 +
75439 +static struct file_operations qsnet_kmem_fops = 
75440 +{
75441 +       open:    qsnet_kmem_open,
75442 +       release: qsnet_kmem_release,
75443 +       read:    qsnet_kmem_read,
75444 +};
75445 +
75446 +typedef struct qsnet_private_space
75447 +{
75448 +       char * space;
75449 +       int    size;
75450 +       struct qsnet_private_space *next;
75451 +} QSNET_PRIVATE_SPACE;
75452 +
75453 +typedef struct qsnet_private  
75454 +{
75455 +       QSNET_PRIVATE_SPACE *space_chain;
75456 +        QSNET_PRIVATE_SPACE *current_space;
75457 +       int                  current_pos;
75458 +
75459 +} QSNET_PRIVATE;
75460 +
75461 +#define QSNET_KMEM_DEBUG_LINE_SIZE ((int)512)
75462 +#define QSNET_PRIVATE_PAGE_SIZE    ((int)(4*1024))
75463 +
75464 +static int qsnet_kmem_fill(QSNET_PRIVATE *pd);
75465 +
75466 +void
75467 +destroy_chain(QSNET_PRIVATE * pd)
75468 +{
75469 +       QSNET_PRIVATE_SPACE *mem, *next;
75470 +       
75471 +       if (pd == NULL) return;
75472 +
75473 +       for(mem = pd->space_chain ; mem != NULL; )
75474 +       {
75475 +               next = mem->next; 
75476 +               if ( mem->space ) 
75477 +                       kfree ( mem->space);
75478 +               kfree(mem);
75479 +               mem = next;
75480 +       }
75481 +       kfree (pd);
75482 +}
75483 +
75484 +QSNET_PRIVATE *
75485 +make_chain(int len)
75486 +{
75487 +       QSNET_PRIVATE       * pd;
75488 +       QSNET_PRIVATE_SPACE * mem;
75489 +       int                   i;
75490 +
75491 +       /* make the private data block */
75492 +       if ((pd = kmalloc (sizeof (QSNET_PRIVATE), GFP_KERNEL)) == NULL)
75493 +               return NULL;
75494 +       pd->space_chain = NULL;
75495 +
75496 +       /* first make the holders */
75497 +       for(i=0;i<len;i++)
75498 +       {
75499 +               if ((mem = kmalloc (sizeof (QSNET_PRIVATE_SPACE), GFP_KERNEL)) == NULL)
75500 +               {
75501 +                       destroy_chain(pd);
75502 +                       return (NULL);
75503 +               }
75504 +               mem->next  = pd->space_chain;
75505 +               mem->size  = 0;
75506 +               mem->space = 0;
75507 +               pd->space_chain = mem;
75508 +
75509 +               /* now add the space */
75510 +               if ((mem->space = kmalloc (QSNET_PRIVATE_PAGE_SIZE, GFP_KERNEL)) == NULL)
75511 +               {
75512 +                       destroy_chain(pd);
75513 +                       return (NULL);
75514 +               }                       
75515 +
75516 +               mem->space[0] = 0;
75517 +
75518 +       }
75519 +
75520 +       pd->current_space = pd->space_chain;
75521 +       pd->current_pos   = 0;
75522 +
75523 +       return pd;
75524 +}
75525 +
75526 +static int
75527 +qsnet_kmem_open (struct inode *inode, struct file *fp)
75528 +{
75529 +       MOD_INC_USE_COUNT;
75530 +       fp->private_data = NULL;
75531 +       return (0);
75532 +}
75533 +
75534 +static int
75535 +qsnet_kmem_release (struct inode *inode, struct file *fp)
75536 +{
75537 +       if ( fp->private_data )
75538 +       {
75539 +               QSNET_PRIVATE * pd = (QSNET_PRIVATE *) fp->private_data;
75540 +
75541 +               /* free the space */
75542 +               if (pd->space_chain)
75543 +                       kfree (pd->space_chain);        
75544 +
75545 +               /* free struct */
75546 +               kfree (pd);
75547 +       }
75548 +       MOD_DEC_USE_COUNT;
75549 +       return (0);
75550 +}
75551 +
75552 +static ssize_t
75553 +qsnet_kmem_read (struct file *file, char *buf, size_t count, loff_t *ppos)
75554 +{
75555 +       QSNET_PRIVATE * pd = (QSNET_PRIVATE *) file->private_data;
75556 +       int             error;
75557 +       int             output_count;
75558 +       int             num_of_links=10;
75559 +
75560 +       /* make a buffer to output count bytes in */
75561 +       if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
75562 +               return (error);
75563 +
75564 +       if ( pd == NULL) 
75565 +       {
75566 +               /* first time */
75567 +
75568 +               /* ok we have to guess at how much space we are going to need  */
75569 +               /* if it fails we up the space and carry try again             */
75570 +               /* we have to do it this way as we cant get more memory whilst */
75571 +               /* holding the lock                                            */
75572 +               if ((pd = make_chain(num_of_links)) == NULL)
75573 +                       return (-ENOMEM);       
75574 +
75575 +               while ( qsnet_kmem_fill(pd) ) 
75576 +               {
75577 +                       destroy_chain(pd);
75578 +                       num_of_links += 10;
75579 +                       if ((pd = make_chain(num_of_links)) == NULL)
75580 +                               return (-ENOMEM);       
75581 +               }
75582 +
75583 +               /* we have the space and filled it */
75584 +               file->private_data = (void *)pd;        
75585 +       }
75586 +               
75587 +       /* output buffer */
75588 +       if ( pd->current_pos >= pd->current_space->size )
75589 +               return (0); /* finished */
75590 +
75591 +       output_count = pd->current_space->size - pd->current_pos;
75592 +       if ( output_count > count ) 
75593 +               output_count = count;
75594 +
75595 +       copy_to_user(buf, (pd->current_space->space + pd->current_pos), output_count);
75596 +
75597 +       pd->current_pos += output_count;
75598 +       ppos            += output_count;
75599 +
75600 +       /* just check to see if we have finished the current space */
75601 +       if ( pd->current_pos >= pd->current_space->size )
75602 +       {
75603 +               if ( pd->current_space->next )
75604 +               {
75605 +                       pd->current_space = pd->current_space->next;
75606 +                       pd->current_pos   = 0;
75607 +               }
75608 +       }       
75609 +
75610 +       return (output_count);
75611 +}
75612 +#endif /* KMEM_DEBUG */
75613 +
75614 +static int
75615 +proc_write_qsnetdebug(struct file *file, const char *buffer,
75616 +                     unsigned long count, void *data)
75617 +{
75618 +       char    tmpbuf[128];
75619 +       int     res;
75620 +       
75621 +       if (count > sizeof (tmpbuf)-1)
75622 +               return (-EINVAL);
75623 +       
75624 +       MOD_INC_USE_COUNT;
75625 +       
75626 +       if (copy_from_user (tmpbuf, buffer, count))
75627 +               res = -EFAULT;
75628 +       else 
75629 +       {
75630 +               tmpbuf[count] = '\0';   
75631 +               
75632 +               if (tmpbuf[count-1] == '\n')
75633 +                       tmpbuf[count-1] = '\0';
75634 +               
75635 +               if (! strcmp (tmpbuf, "on"))
75636 +                       qsnet_debug_buffer_on();
75637 +
75638 +               if (! strcmp (tmpbuf, "clear"))
75639 +                       qsnet_debug_buffer_clear();
75640 +
75641 +               if (! strncmp (tmpbuf, "mark",4))
75642 +                       qsnet_debug_buffer_mark( &tmpbuf[4] );
75643 +               
75644 +               res = count;
75645 +       }
75646 +       
75647 +       MOD_DEC_USE_COUNT;
75648 +       
75649 +       return (res);
75650 +}
75651 +
75652 +static int
75653 +proc_read_qsnetdebug(char *page, char **start, off_t off,
75654 +                    int count, int *eof, void *data)
75655 +{
75656 +       int len = sprintf (page, "echo command > /proc/qsnet/config/qsnetdebug\ncommand = on | off | clear | mark text\n");
75657 +       return (qsnet_proc_calc_metrics (page, start, off, count, eof, len));
75658 +}
75659 +
75660 +#include "quadrics_version.h"
75661 +extern int kqsnet_debug_running;
75662 +static char       quadrics_version[] = QUADRICS_VERSION;
75663 +
75664 +static int __init qsnet_init(void)
75665 +{
75666 +       struct proc_dir_entry *p;
75667 +
75668 +       printk ("qsnet Module (version %s)\n", quadrics_version);
75669 +       if ((qsnet_procfs_root = proc_mkdir ("qsnet", 0)) == NULL)
75670 +       {
75671 +               printk ("qsnet: failed to create /proc/qsnet \n");
75672 +               return (-ENXIO);
75673 +       }
75674 +       
75675 +       if ((p = create_proc_entry ("ioctl", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_root)) == NULL)
75676 +       {
75677 +               printk ("qsnet: failed to register /proc/qsnet/ioctl\n");
75678 +               return (-ENXIO);
75679 +       }
75680 +       p->proc_fops = &qsnet_ioctl_fops;
75681 +       p->owner     = THIS_MODULE;
75682 +       p->data      = NULL;
75683 +       p->gid       = qsnet_procfs_gid;
75684 +
75685 +       qsnet_proc_register_str (qsnet_procfs_root, "version", quadrics_version, S_IRUGO);
75686 +
75687 +       if ((qsnet_procfs_config = proc_mkdir ("config", qsnet_procfs_root)) == NULL)
75688 +       {
75689 +               printk ("qsnet: failed to create /proc/qsnet/config \n");
75690 +               return (-ENXIO);
75691 +       }
75692 +
75693 +#ifdef KMEM_DEBUG
75694 +       if ((p = create_proc_entry ("kmem_debug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL)
75695 +       {
75696 +               printk ("qsnet: failed to register /proc/qsnet/config/kmem_debug\n");
75697 +               return (-ENXIO);
75698 +       }
75699 +       p->proc_fops = &qsnet_kmem_fops;
75700 +       p->owner     = THIS_MODULE;
75701 +       p->data      = NULL;
75702 +       p->gid       = qsnet_procfs_gid;
75703 +#endif         
75704 +      
75705 +       qsnet_debug_init(); 
75706 +
75707 +       qsnet_proc_register_int (qsnet_procfs_config, "kqsnet_debug_running", &kqsnet_debug_running, 0);
75708 +
75709 +       if ((p = create_proc_entry ("qsnetdebug", S_IRUGO|S_IWUSR|S_IWGRP, qsnet_procfs_config)) == NULL)
75710 +       {
75711 +               printk ("qsnet: failed to register /proc/qsnet/config/qsnetdebug\n");
75712 +               return (-ENXIO);
75713 +       }
75714 +       p->read_proc  = proc_read_qsnetdebug;
75715 +       p->write_proc = proc_write_qsnetdebug;
75716 +       p->owner      = THIS_MODULE;
75717 +       p->data       = NULL;
75718 +       p->gid        = qsnet_procfs_gid;
75719 +       
75720 +       return (0);
75721 +}
75722 +
75723 +static void __exit qsnet_exit(void)
75724 +{
75725 +#ifdef KMEM_DEBUG
75726 +       qsnet_kmem_display(0);
75727 +#endif
75728 +       qsnet_debug_fini();
75729 +
75730 +       remove_proc_entry ("qsnetdebug",           qsnet_procfs_config);
75731 +       remove_proc_entry ("kqsnet_debug_running", qsnet_procfs_config);
75732 +#ifdef KMEM_DEBUG
75733 +       remove_proc_entry ("kmem_debug",           qsnet_procfs_config);
75734 +#endif
75735 +       remove_proc_entry ("config",               qsnet_procfs_root);
75736 +
75737 +       remove_proc_entry ("version", qsnet_procfs_root);
75738 +       remove_proc_entry ("ioctl",   qsnet_procfs_root);
75739 +
75740 +       remove_proc_entry ("qsnet", 0);
75741 +}
75742 +
75743 +/* Declare the module init and exit functions */
75744 +module_init(qsnet_init);
75745 +module_exit(qsnet_exit);
75746 +
75747 +#ifdef KMEM_DEBUG
75748 +/*
75749 + * Kernel memory allocation.  We maintain our own list of allocated mem
75750 + * segments so we can free them on module cleanup.
75751 + * 
75752 + * We use kmalloc for allocations less than one page in size; vmalloc for
75753 + * larger sizes.
75754 + */
75755 +
75756 +typedef struct {
75757 +       struct list_head list;
75758 +       void            *ptr;
75759 +       int             len;
75760 +       int             used_vmalloc;
75761 +       void            *owner;
75762 +       void            *caller;
75763 +       unsigned int     time;
75764 +       int              line;
75765 +       char             filename[20];
75766 +} kmalloc_t;
75767 +
75768 +static LIST_HEAD(kmalloc_head);
75769 +
75770 +static spinlock_t      kmalloc_lock = SPIN_LOCK_UNLOCKED;
75771 +
75772 +/*
75773 + * Kernel memory allocation.  We use kmalloc for allocations less 
75774 + * than one page in size; vmalloc for larger sizes.
75775 + */
75776 +
75777 +static int
75778 +qsnet_kmem_fill(QSNET_PRIVATE *pd)
75779 +{
75780 +       kmalloc_t *kp;
75781 +       struct list_head *lp;
75782 +       unsigned long flags;
75783 +       char str[QSNET_KMEM_DEBUG_LINE_SIZE];
75784 +       QSNET_PRIVATE_SPACE * current_space;
75785 +       int                   current_pos;
75786 +       int                   len;
75787 +       current_space = pd->space_chain;
75788 +       current_pos   = 0;
75789 +       
75790 +       
75791 +       current_space->space[0] = 0;    
75792 +       spin_lock_irqsave(&kmalloc_lock, flags);
75793 +       for (lp = kmalloc_head.next; lp != &kmalloc_head;  lp = lp->next) {
75794 +               kp = list_entry(lp, kmalloc_t, list);
75795 +               
75796 +               /* make the next line */
75797 +               sprintf(str,"%p %d %d %p %p %u %d %s\n",
75798 +                       kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->filename);
75799 +               len = strlen(str);
75800 +               
75801 +               /* does it fit on the current page */
75802 +               if ( (current_pos + len + 1) >=  QSNET_PRIVATE_PAGE_SIZE)
75803 +               {
75804 +                       /* move onto next page */
75805 +                       if ((current_space = current_space->next) == NULL)
75806 +                       {
75807 +                               /* run out of space !!!! */
75808 +                               spin_unlock_irqrestore(&kmalloc_lock, flags);
75809 +                               return (1);
75810 +                       }
75811 +                       current_space->space[0] = 0;    
75812 +                       current_pos = 0;
75813 +               }
75814 +               strcat( current_space->space + current_pos, str);
75815 +               current_pos += len;
75816 +
75817 +               /* remember how much we wrote to this page */
75818 +               current_space->size = current_pos;
75819 +
75820 +       }
75821 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
75822 +
75823 +       return (0);
75824 +}
75825 +
75826 +void * 
75827 +qsnet_kmem_alloc_debug(int len, int cansleep, int zerofill, char *file, int line)
75828 +{
75829 +       void *new;
75830 +       unsigned long flags;
75831 +       kmalloc_t *kp;
75832 +
75833 +       if (len < PAGE_SIZE || !cansleep)
75834 +               new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC);
75835 +       else
75836 +               new = vmalloc(len);
75837 +
75838 +       if (len >= PAGE_SIZE)
75839 +               ASSERT(PAGE_ALIGNED((uintptr_t) new));
75840 +
75841 +       if (new && zerofill)
75842 +               memset(new,0,len);
75843 +
75844 +       /* record allocation */
75845 +       kp = kmalloc(sizeof(kmalloc_t), cansleep ? GFP_KERNEL : GFP_ATOMIC);
75846 +       ASSERT(kp != NULL);
75847 +       kp->len = len;
75848 +       kp->ptr = new;
75849 +       kp->used_vmalloc = (len >= PAGE_SIZE || cansleep);
75850 +       kp->owner  = current;
75851 +       kp->caller = __builtin_return_address(0);
75852 +       kp->time = lbolt;
75853 +       kp->line = line;
75854 +       len = strlen(file);
75855 +
75856 +       if (len > 18) 
75857 +               strcpy(kp->filename,&file[len-18]);
75858 +       else
75859 +               strcpy(kp->filename,file);
75860 +
75861 +       spin_lock_irqsave(&kmalloc_lock, flags);
75862 +       list_add(&kp->list, &kmalloc_head);
75863 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
75864 +
75865 +       return new;
75866 +}
75867 +
75868 +void 
75869 +qsnet_kmem_free_debug(void *ptr, int len, char *file, int line)
75870 +{
75871 +       unsigned long flags;
75872 +       kmalloc_t *kp;
75873 +       struct list_head *lp;
75874 +
75875 +       spin_lock_irqsave(&kmalloc_lock, flags);
75876 +       for (lp = kmalloc_head.next; lp != &kmalloc_head; lp = lp->next) {
75877 +               kp = list_entry(lp, kmalloc_t, list);
75878 +               if (kp->ptr == ptr) {
75879 +                       if (kp->len != len)
75880 +                               printk("qsnet_kmem_free_debug(%p) ptr %p len %d mismatch: expected %d caller %p owner %p (%s:%d)\n",
75881 +                                      current, ptr, len, kp->len, __builtin_return_address(0), kp->caller, file, line);
75882 +                       list_del(lp);
75883 +                       kfree(kp); /* free off descriptor */
75884 +                       break;
75885 +               }
75886 +       }
75887 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
75888 +
75889 +       if (lp == &kmalloc_head) /* segment must be found */
75890 +       {
75891 +               printk( "qsnet_kmem_free_debug(%p) ptr %p len %d not found: caller %p (%s:%d)\n",
75892 +                       current, ptr, len, __builtin_return_address(0), file, line);
75893 +       }
75894 +
75895 +       if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) 
75896 +               vfree (ptr);
75897 +       else
75898 +               kfree (ptr);
75899 +}
75900 +
75901 +#else /* !KMEM_DEBUG */
75902 +
75903 +void * 
75904 +qsnet_kmem_alloc(int len, int cansleep, int zerofill)
75905 +{
75906 +       void *new;
75907 +
75908 +       if (len < PAGE_SIZE || !cansleep)
75909 +               new = kmalloc(len, cansleep ? GFP_KERNEL : GFP_ATOMIC);
75910 +       else
75911 +               new = vmalloc(len);
75912 +
75913 +       if (len >= PAGE_SIZE)
75914 +               ASSERT(PAGE_ALIGNED((unsigned long) new));
75915 +
75916 +       if (new && zerofill)
75917 +               memset(new,0,len);
75918 +
75919 +       return new;
75920 +}
75921 +
75922 +void 
75923 +qsnet_kmem_free(void *ptr, int len)
75924 +{
75925 +       if ((((unsigned long) ptr) >= VMALLOC_START && ((unsigned long) ptr) < VMALLOC_END)) 
75926 +               vfree (ptr);
75927 +       else
75928 +               kfree (ptr);
75929 +}
75930 +#endif /* !KMEM_DEBUG */
75931 +
75932 +void
75933 +qsnet_kmem_display(void *handle)
75934 +{
75935 +#ifdef KMEM_DEBUG
75936 +       kmalloc_t *kp;
75937 +       struct list_head *lp;
75938 +       unsigned long flags;
75939 +       int count = 0, totsize = 0;
75940 +
75941 +       spin_lock_irqsave(&kmalloc_lock, flags);
75942 +       for (lp = kmalloc_head.next; lp != &kmalloc_head;  lp = lp->next) {
75943 +               kp = list_entry(lp, kmalloc_t, list);
75944 +
75945 +               if (!handle || handle == kp->owner)
75946 +               {
75947 +                       printk("qsnet_kmem_display(%p): mem %p len %d unfreed caller %p (%p) \n",
75948 +                              handle, kp->ptr, kp->len, kp->caller, kp->owner);
75949 +                   
75950 +                       count++;
75951 +                       totsize += kp->len;
75952 +               }
75953 +       }
75954 +       spin_unlock_irqrestore(&kmalloc_lock, flags);
75955 +
75956 +       printk("qsnet_kmem_display(%p): %d bytes left in %d objects\n", handle, totsize, count);
75957 +#endif
75958 +}
75959 +
75960 +physaddr_t
75961 +kmem_to_phys(void *ptr)
75962 +{
75963 +       virtaddr_t virt = (virtaddr_t) ptr;
75964 +       physaddr_t phys;
75965 +       pte_t     *pte;
75966 +
75967 +       if ((virt >= VMALLOC_START && virt < VMALLOC_END))
75968 +       {
75969 +               pte = find_pte_kernel(virt);
75970 +               ASSERT(pte && !pte_none(*pte));
75971 +               phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1));
75972 +       }
75973 +#if defined(PKMAP_BASE)
75974 +       else if (virt >= PKMAP_BASE && virt < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE))
75975 +       {
75976 +               pte = find_pte_kernel(virt);
75977 +               ASSERT(pte && !pte_none(*pte));
75978 +               phys = pte_phys(*pte) + (virt & (PAGE_SIZE-1));
75979 +       }
75980 +#endif
75981 +#if defined(__ia64)
75982 +       else if (virt >= __IA64_UNCACHED_OFFSET && virt < PAGE_OFFSET)
75983 +       {
75984 +               /* ia64 non-cached KSEG */
75985 +               phys = ((physaddr_t) ptr - __IA64_UNCACHED_OFFSET);
75986 +       }
75987 +#endif
75988 +       else /* otherwise it's KSEG */
75989 +       {
75990 +               phys = __pa(virt);
75991 +       }
75992 +           
75993 +#if defined(CONFIG_ALPHA_GENERIC) || (defined(CONFIG_ALPHA_EV6) && !defined(USE_48_BIT_KSEG))
75994 +       /* 
75995 +        * with TS_BIAS as bit 40 - the tsunami pci space is mapped into
75996 +        * the kernel at 0xfffff500.00000000 however we need to convert
75997 +        * this to the true physical address 0x00000800.00000000.
75998 +        *
75999 +        * there is no need for PHYS_TWIDDLE since we knew we'd get a kernel
76000 +        * virtual address already and handled this with __pa().
76001 +        */
76002 +       if (phys & (1ul << 40)) {
76003 +               phys &= ~(1ul << 40);   /*   clear bit 40 (kseg I/O select) */
76004 +               phys |= (1ul << 43);    /*   set   bit 43 (phys I/O select) */
76005 +       }
76006 +#endif
76007 +       return phys;
76008 +}
76009 +
76010 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
76011 +
76012 +EXPORT_SYMBOL(pci_resource_size);
76013 +EXPORT_SYMBOL(pci_get_base_address);
76014 +EXPORT_SYMBOL(pci_base_to_kseg);
76015 +
76016 +
76017 +/*
76018 + * PCI stuff.  
76019 + *
76020 + * XXX pci_base_to_kseg() and pci_kseg_to_phys() are problematic
76021 + * in that they may not work on non-Tsunami (DS20, ES40, etc) 
76022 + * architectures, and may not work in non-zero PCI bus numbers.
76023 + */
76024 +
76025 +unsigned long 
76026 +pci_get_base_address(struct pci_dev *pdev, int index)
76027 +{
76028 +       unsigned long base;
76029 +
76030 +       ASSERT(index >= 0 && index <= 5);
76031 +       /* borrowed in part from drivers/scsi/sym53c8xx.c */
76032 +       base = pdev->base_address[index++];
76033 +
76034 +#if BITS_PER_LONG > 32
76035 +       if ((base & 0x7) == 0x4)
76036 +               base |= (((unsigned long)pdev->base_address[index]) << 32);
76037 +#endif
76038 +       return base;
76039 +}
76040 +
76041 +unsigned long 
76042 +pci_resource_size(struct pci_dev *pdev, int index)
76043 +{
76044 +       u32 addr, mask, size;
76045 +
76046 +       static u32 bar_addr[] = {
76047 +               PCI_BASE_ADDRESS_0, 
76048 +               PCI_BASE_ADDRESS_1, 
76049 +               PCI_BASE_ADDRESS_2,
76050 +               PCI_BASE_ADDRESS_3, 
76051 +               PCI_BASE_ADDRESS_4, 
76052 +               PCI_BASE_ADDRESS_5, 
76053 +       };
76054 +       ASSERT(index >= 0 && index <= 5);
76055 +
76056 +       /* algorithm from Rubini book */
76057 +       pci_read_config_dword (pdev,    bar_addr[index], &addr);
76058 +       pci_write_config_dword(pdev,    bar_addr[index], ~0);
76059 +       pci_read_config_dword (pdev,    bar_addr[index], &mask);
76060 +       pci_write_config_dword(pdev,    bar_addr[index], addr);
76061 +
76062 +       mask &= PCI_BASE_ADDRESS_MEM_MASK;
76063 +       size = ~mask + 1;
76064 +       return size;
76065 +}
76066 +
76067 +/*
76068 + * Convert BAR register value to KSEG address.
76069 + */
76070 +void *
76071 +pci_base_to_kseg(u64 baddr, int bus)
76072 +{
76073 +       u64 kseg;
76074 +
76075 +       /* XXX tsunami specific */
76076 +       baddr &= ~(u64)0x100000000;  /* mask out hose bit */
76077 +       kseg = TSUNAMI_MEM(bus) + baddr;
76078 +       return (void *)kseg; 
76079 +}
76080 +
76081 +#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,0) */
76082 +
76083 +/*
76084 + * Spin the other CPU's in an SMP system.
76085 + * smp_call_function() needed to be exported to modules.  It will be
76086 + * papered over in <linux/smp.h> if running on a non-SMP box.
76087 + */
76088 +static spinlock_t hold_lock = SPIN_LOCK_UNLOCKED;
76089 +
76090 +#if 0
76091 +static void cpu_hold(void *unused)
76092 +{
76093 +       spin_lock(&hold_lock);
76094 +       spin_unlock(&hold_lock);
76095 +}
76096 +#endif
76097 +
76098 +void cpu_hold_all(void)
76099 +{
76100 +       spin_lock(&hold_lock);
76101 +
76102 +#if 0
76103 +       {
76104 +               int res;
76105 +               int retries = 10; 
76106 +           
76107 +               /* XXXXX: cannot call smp_call_function() from interrupt context */
76108 +           
76109 +               do {
76110 +                       /* only request blocking retry if not in interrupt context */
76111 +                       res = smp_call_function(cpu_hold, NULL, !in_interrupt(), 0);
76112 +                       if (res)
76113 +                               mdelay(5);
76114 +               } while (res && retries--);
76115 +           
76116 +               if (res)
76117 +                       printk("cpu_hold_all: IPI timeout\n");
76118 +       }
76119 +#endif
76120 +}
76121 +
76122 +void cpu_release_all(void)
76123 +{
76124 +       spin_unlock(&hold_lock);
76125 +}
76126 +
76127 +void
76128 +qsnet_proc_character_fill (long mode, char *fmt, ...)
76129 +{
76130 +    int len;
76131 +    va_list ap;
76132 +    QSNET_PROC_PRIVATE *private = (QSNET_PROC_PRIVATE *)mode;
76133 +    
76134 +    /* is the buffer already full */
76135 +    if (private->pr_len >= private->pr_data_len) 
76136 +       return;
76137 +    
76138 +    /* attempt to fill up to the remaining space */
76139 +    va_start (ap, fmt);
76140 +    len = vsnprintf ( & private->pr_data[private->pr_len], (private->pr_data_len - private->pr_len), fmt, ap);
76141 +    va_end (ap);
76142 +    
76143 +    if (len < 0 ) 
76144 +    {
76145 +       /* we have reached the end of buffer and need to fail all future writes
76146 +        * the caller can check (pr_len >= pr_data_len) and recall with more space 
76147 +        */
76148 +       private->pr_len = private->pr_data_len;
76149 +       return;
76150 +    }
76151 +    
76152 +    /* move the length along */
76153 +    private->pr_len += len;   
76154 +}
76155 +
76156 +int
76157 +qsnet_proc_release (struct inode *inode, struct file *file)
76158 +{
76159 +    QSNET_PROC_PRIVATE *pr = (QSNET_PROC_PRIVATE *) file->private_data;
76160 +    
76161 +    if (pr->pr_data)
76162 +       KMEM_FREE (pr->pr_data, pr->pr_data_len);
76163 +    kfree (pr);
76164 +    
76165 +    MOD_DEC_USE_COUNT;
76166 +    return (0);
76167 +}
76168 +
76169 +EXPORT_SYMBOL(qsnet_proc_character_fill);
76170 +EXPORT_SYMBOL(qsnet_proc_release);
76171 +
76172 +/*
76173 + * Local variables:
76174 + * c-file-style: "linux"
76175 + * End:
76176 + */
76177 diff -urN clean/drivers/net/qsnet/qsnet/Makefile linux-2.6.9/drivers/net/qsnet/qsnet/Makefile
76178 --- clean/drivers/net/qsnet/qsnet/Makefile      1969-12-31 19:00:00.000000000 -0500
76179 +++ linux-2.6.9/drivers/net/qsnet/qsnet/Makefile        2005-10-10 17:47:31.000000000 -0400
76180 @@ -0,0 +1,15 @@
76181 +#
76182 +# Makefile for Quadrics QsNet
76183 +#
76184 +# Copyright (c) 2002-2004 Quadrics Ltd
76185 +#
76186 +# File: drivers/net/qsnet/qsnet/Makefile
76187 +#
76188 +
76189 +
76190 +#
76191 +
76192 +obj-$(CONFIG_QSNET)    += qsnet.o
76193 +qsnet-objs     := debug.o kernel_linux.o i686_mmx.o
76194 +
76195 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
76196 diff -urN clean/drivers/net/qsnet/qsnet/Makefile.conf linux-2.6.9/drivers/net/qsnet/qsnet/Makefile.conf
76197 --- clean/drivers/net/qsnet/qsnet/Makefile.conf 1969-12-31 19:00:00.000000000 -0500
76198 +++ linux-2.6.9/drivers/net/qsnet/qsnet/Makefile.conf   2005-09-07 10:39:34.000000000 -0400
76199 @@ -0,0 +1,10 @@
76200 +# Flags for generating QsNet Linux Kernel Makefiles
76201 +MODNAME                =       qsnet.o
76202 +MODULENAME     =       qsnet
76203 +KOBJFILES      =       debug.o kernel_linux.o i686_mmx.o
76204 +EXPORT_KOBJS   =       kernel_linux.o
76205 +CONFIG_NAME    =       CONFIG_QSNET
76206 +SGALFC         =       
76207 +# EXTRALINES START
76208 +
76209 +# EXTRALINES END
76210 diff -urN clean/drivers/net/qsnet/qsnet/qsnetkmem_linux.c linux-2.6.9/drivers/net/qsnet/qsnet/qsnetkmem_linux.c
76211 --- clean/drivers/net/qsnet/qsnet/qsnetkmem_linux.c     1969-12-31 19:00:00.000000000 -0500
76212 +++ linux-2.6.9/drivers/net/qsnet/qsnet/qsnetkmem_linux.c       2003-08-13 06:03:27.000000000 -0400
76213 @@ -0,0 +1,325 @@
76214 +/*
76215 + *    Copyright (c) 2003 by Quadrics Ltd.
76216 + * 
76217 + *    For licensing information please see the supplied COPYING file
76218 + *
76219 + */
76220 +
76221 +#ident "@(#)$Id: qsnetkmem_linux.c,v 1.3 2003/08/13 10:03:27 fabien Exp $"
76222 +/*      $Source: /cvs/master/quadrics/qsnet/qsnetkmem_linux.c,v $*/
76223 +
76224 +/* macro macros */
76225 +#define MACRO_BEGIN     do {
76226 +#define MACRO_END       } while (0)
76227 +#define offsetof(T,F) ((int )&(((T *)0)->F))
76228 +
76229 +#include <stdio.h>
76230 +#include <stdlib.h>
76231 +#include <ctype.h>
76232 +#include <sys/types.h>
76233 +#include <errno.h>
76234 +#include <unistd.h>
76235 +#include <string.h>
76236 +#include <qsnet/config.h>
76237 +#include <qsnet/list.h>
76238 +#include <qsnet/procfs_linux.h>
76239 +#include <signal.h>
76240 +#include <sys/wait.h>
76241 +
76242 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
76243 +
76244 +#define LIST_HEAD(name) \
76245 +       struct list_head name = LIST_HEAD_INIT(name)
76246 +
76247 +typedef struct {
76248 +       struct list_head list;
76249 +       void            *ptr;
76250 +       int             len;
76251 +       int             used_vmalloc;
76252 +       void            *owner;
76253 +       void            *caller;
76254 +       unsigned int     time;
76255 +       int              mark;
76256 +       int              line;
76257 +       char             file[256];
76258 +       
76259 +} kmalloc_t;
76260 +
76261 +
76262 +static LIST_HEAD(current_kmem);
76263 +static LIST_HEAD(stored_kmem);
76264 +
76265 +void
76266 +count_kmem(struct list_head * list, long * count, long * size )
76267 +{
76268 +       long              c,s;
76269 +       struct list_head *tmp;
76270 +       kmalloc_t        *kmem_ptr = NULL;
76271 +
76272 +
76273 +       c = s = 0L;
76274 +
76275 +       list_for_each(tmp, list) {
76276 +               kmem_ptr = list_entry(tmp, kmalloc_t , list);
76277 +               c++;
76278 +               s += kmem_ptr->len;
76279 +       }       
76280 +
76281 +       *count = c;
76282 +       *size  = s;
76283 +}
76284 +
76285 +void
76286 +clear_kmem(struct list_head * list)
76287 +{
76288 +       struct list_head *tmp,*tmp2;
76289 +       kmalloc_t        *kmem_ptr = NULL;
76290 +
76291 +       list_for_each_safe(tmp, tmp2, list) {
76292 +               kmem_ptr = list_entry(tmp, kmalloc_t , list);
76293 +               list_del_init(&kmem_ptr->list);
76294 +               free( kmem_ptr );
76295 +       }
76296 +}
76297 +
76298 +void 
76299 +move_kmem(struct list_head * dest, struct list_head *src)
76300 +{
76301 +       struct list_head *tmp,*tmp2;
76302 +       kmalloc_t        *kp= NULL;
76303 +
76304 +       list_for_each_safe(tmp, tmp2, src) {
76305 +               kp = list_entry(tmp, kmalloc_t , list);
76306 +               list_del_init(&kp->list);
76307 +
76308 +/*
76309 +               printf("mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
76310 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
76311 +*/                 
76312 +
76313 +               list_add_tail(&kp->list, dest);
76314 +       }
76315 +}
76316 +
76317 +void
76318 +read_kmem(struct list_head * list)
76319 +{
76320 +       FILE      * fd;
76321 +       char        line[1024];
76322 +       int         line_size = 100;
76323 +       char      * rep;
76324 +       kmalloc_t * kp;
76325 +
76326 +       clear_kmem(list);
76327 +
76328 +       fd = fopen(QSNET_PROCFS_KMEM_DEBUG,"r");
76329 +       if ( fd == NULL) 
76330 +       {
76331 +               printf("No Kmem Debug\n");
76332 +               return;
76333 +       }
76334 +
76335 +       rep = fgets(line,line_size, fd);
76336 +
76337 +       while ( rep != NULL ) 
76338 +       {
76339 +               kp = malloc(sizeof(kmalloc_t));
76340 +
76341 +               sscanf(line,"%p %d %d %p %p %u %d %s\n",
76342 +                      &kp->ptr, &kp->len, &kp->used_vmalloc, &kp->caller, &kp->owner, &kp->time, &kp->line, &kp->file[0]);
76343 +
76344 +/*
76345 +               printf(">>%s<<\n",line);
76346 +               printf("%p %d %d %p %p %u %d %s\n",
76347 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->time, kp->line, kp->file);
76348 +*/
76349 +
76350 +               list_add_tail(&kp->list, list);
76351 +
76352 +               rep = fgets(line,line_size, fd);
76353 +       }
76354 +       fclose(fd);
76355 +}
76356 +
76357 +void
76358 +mark_kmem(struct list_head * list, int mark)
76359 +{
76360 +       struct list_head *tmp;
76361 +       kmalloc_t        *kp = NULL;
76362 +
76363 +       list_for_each(tmp, list) {
76364 +               kp = list_entry(tmp, kmalloc_t , list);
76365 +
76366 +               kp->mark = mark;
76367 +       }
76368 +}
76369 +
76370 +kmalloc_t *
76371 +find_kmem(kmalloc_t * value, struct list_head * list)
76372 +{
76373 +       struct list_head *tmp;
76374 +       kmalloc_t        *kp = NULL;
76375 +
76376 +       
76377 +       list_for_each(tmp, list) {
76378 +               kp = list_entry(tmp, kmalloc_t , list);
76379 +               if ( (kp->ptr == value->ptr)
76380 +                    && (kp->len == value->len)
76381 +                    && (kp->used_vmalloc  == value->used_vmalloc )
76382 +                    && (kp->owner  == value->owner )
76383 +                    && (kp->caller  == value->caller )
76384 +                    && (kp->time  == value->time )
76385 +                    && (kp->line  == value->line )
76386 +                    && !(strcmp(kp->file,value->file) ))
76387 +                       return kp;
76388 +       }       
76389 +       return NULL;
76390 +}
76391 +
76392 +void 
76393 +diff_kmem(struct list_head *curr, struct list_head *stored)
76394 +{
76395 +       struct list_head *tmp;
76396 +       kmalloc_t        *kp = NULL;
76397 +       long              c,s;
76398 +
76399 +       mark_kmem(stored,  0);
76400 +       mark_kmem(curr,    0);
76401 +       
76402 +       list_for_each(tmp, stored) {
76403 +               kp = list_entry(tmp, kmalloc_t , list);
76404 +               if (find_kmem( kp, curr) != NULL) 
76405 +                       kp->mark = 1;
76406 +       }
76407 +       
76408 +       list_for_each(tmp, curr) {
76409 +               kp = list_entry(tmp, kmalloc_t , list);
76410 +               if (find_kmem( kp, stored) != NULL) 
76411 +                       kp->mark = 1;
76412 +       }               
76413 +
76414 +       c=s=0L;
76415 +       list_for_each(tmp, stored) {
76416 +               kp = list_entry(tmp, kmalloc_t , list);
76417 +               if (kp->mark != 1)
76418 +               {
76419 +                       printf("-- mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
76420 +                              kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
76421 +                       c++;
76422 +                       s+= kp->len;
76423 +               }
76424 +       }
76425 +       printf("-- %4ld %10ld \n",c,s);
76426 +       
76427 +       c=s=0L;
76428 +       list_for_each(tmp, curr) {
76429 +               kp = list_entry(tmp, kmalloc_t , list);
76430 +               if (kp->mark != 1)
76431 +               {
76432 +                       printf("++ mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
76433 +                              kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
76434 +                       c++;
76435 +                       s+= kp->len;
76436 +               }
76437 +       }               
76438 +       printf("++ %4ld %10ld \n",c,s);
76439 +}
76440 +
76441 +
76442 +void
76443 +print_kmem(struct list_head * list)
76444 +{
76445 +       struct list_head *tmp;
76446 +       kmalloc_t        *kp = NULL;
76447 +
76448 +       list_for_each(tmp, list) {
76449 +               kp = list_entry(tmp, kmalloc_t , list);
76450 +
76451 +               printf("mem %p len %d (vm=%d)  caller %p owner %p (%s:%d)\n",
76452 +                      kp->ptr, kp->len, kp->used_vmalloc, kp->caller, kp->owner, kp->file, kp->line);
76453 +                   
76454 +       }
76455 +}
76456 +
76457 +void 
76458 +print_cmds()
76459 +{
76460 +       long c,s;
76461 +
76462 +       printf("q : quits \n");
76463 +       printf("r : read\n");
76464 +       printf("c : print current\n");
76465 +       printf("o : print stored\n");
76466 +       printf("s : store\n");
76467 +
76468 +       count_kmem(&current_kmem, &c, &s );
76469 +       printf("\ncurrent : %4ld %10ld\n", c , s);
76470
76471 +       count_kmem(&stored_kmem, &c, &s );
76472 +       printf("store   : %4ld %10ld\n", c , s);
76473
76474 +}
76475 +
76476 +int
76477 +main()
76478 +{
76479 +       char            line[128];
76480 +       int             line_size=127;
76481 +       int             len;
76482 +
76483 +
76484 +       while (1)
76485 +       {
76486 +               
76487 +               printf(">> ");
76488 +               fgets(line,line_size, stdin);
76489 +       
76490 +               
76491 +               len = strlen( line ) -1;
76492 +               if ( len ) 
76493 +               {
76494 +                       switch ( tolower(line[0]) ) 
76495 +                       {
76496 +                       case 'q':
76497 +                               exit(0);
76498 +
76499 +                       case 'r' :
76500 +                               read_kmem(&current_kmem);
76501 +                               break;
76502 +
76503 +                       case 'c' :
76504 +                               print_kmem(&current_kmem);
76505 +                               break;
76506 +
76507 +                       case 'o' :
76508 +                               print_kmem(&stored_kmem);
76509 +                               break;
76510 +
76511 +                       case 's' :
76512 +                               clear_kmem(&stored_kmem);
76513 +                               move_kmem(&stored_kmem, &current_kmem);
76514 +                               break;
76515 +
76516 +                       case 'd' :
76517 +                               diff_kmem(&current_kmem, &stored_kmem);
76518 +                               break;
76519 +
76520 +                       default:
76521 +                               print_cmds();   
76522 +                       }
76523 +
76524 +               
76525 +                       
76526 +               }
76527 +               else
76528 +                       print_cmds();
76529 +       }
76530 +
76531 +}
76532 +
76533 +
76534 +/*
76535 + * Local variables:
76536 + * c-file-style: "linux"
76537 + * End:
76538 + */
76539 diff -urN clean/drivers/net/qsnet/qsnet/quadrics_version.h linux-2.6.9/drivers/net/qsnet/qsnet/quadrics_version.h
76540 --- clean/drivers/net/qsnet/qsnet/quadrics_version.h    1969-12-31 19:00:00.000000000 -0500
76541 +++ linux-2.6.9/drivers/net/qsnet/qsnet/quadrics_version.h      2005-09-07 10:39:49.000000000 -0400
76542 @@ -0,0 +1 @@
76543 +#define QUADRICS_VERSION "5.11.3qsnet"
76544 diff -urN clean/drivers/net/qsnet/rms/Makefile linux-2.6.9/drivers/net/qsnet/rms/Makefile
76545 --- clean/drivers/net/qsnet/rms/Makefile        1969-12-31 19:00:00.000000000 -0500
76546 +++ linux-2.6.9/drivers/net/qsnet/rms/Makefile  2005-10-10 17:47:31.000000000 -0400
76547 @@ -0,0 +1,15 @@
76548 +#
76549 +# Makefile for Quadrics QsNet
76550 +#
76551 +# Copyright (c) 2002-2004 Quadrics Ltd
76552 +#
76553 +# File: drivers/net/qsnet/rms/Makefile
76554 +#
76555 +
76556 +
76557 +#
76558 +
76559 +obj-$(CONFIG_RMS)      += rms.o
76560 +rms-objs       := rms_kern.o rms_kern_Linux.o
76561 +
76562 +EXTRA_CFLAGS           +=  -DDEBUG -DDEBUG_PRINTF -DDEBUG_ASSERT
76563 diff -urN clean/drivers/net/qsnet/rms/Makefile.conf linux-2.6.9/drivers/net/qsnet/rms/Makefile.conf
76564 --- clean/drivers/net/qsnet/rms/Makefile.conf   1969-12-31 19:00:00.000000000 -0500
76565 +++ linux-2.6.9/drivers/net/qsnet/rms/Makefile.conf     2005-09-07 10:39:48.000000000 -0400
76566 @@ -0,0 +1,10 @@
76567 +# Flags for generating QsNet Linux Kernel Makefiles
76568 +MODNAME                =       rms.o
76569 +MODULENAME     =       rms
76570 +KOBJFILES      =       rms_kern.o rms_kern_Linux.o
76571 +EXPORT_KOBJS   =       
76572 +CONFIG_NAME    =       CONFIG_RMS
76573 +SGALFC         =       
76574 +# EXTRALINES START
76575 +
76576 +# EXTRALINES END
76577 diff -urN clean/drivers/net/qsnet/rms/quadrics_version.h linux-2.6.9/drivers/net/qsnet/rms/quadrics_version.h
76578 --- clean/drivers/net/qsnet/rms/quadrics_version.h      1969-12-31 19:00:00.000000000 -0500
76579 +++ linux-2.6.9/drivers/net/qsnet/rms/quadrics_version.h        2005-09-07 10:39:49.000000000 -0400
76580 @@ -0,0 +1 @@
76581 +#define QUADRICS_VERSION "5.11.3qsnet"
76582 diff -urN clean/drivers/net/qsnet/rms/rms_kern.c linux-2.6.9/drivers/net/qsnet/rms/rms_kern.c
76583 --- clean/drivers/net/qsnet/rms/rms_kern.c      1969-12-31 19:00:00.000000000 -0500
76584 +++ linux-2.6.9/drivers/net/qsnet/rms/rms_kern.c        2005-09-07 10:35:04.000000000 -0400
76585 @@ -0,0 +1,1484 @@
76586 +/*
76587 + * Copyright (c) 1996-2003 by Quadrics Supercomputers World Ltd.
76588 + * Copyright (c) 2004-2006 by Quadrics Ltd
76589 + *
76590 + * For licensing information please see the supplied COPYING file
76591 + *
76592 + * rms_kern.c:    RMS kernel module
76593 + *
76594 + * $Source: /cvs/master/quadrics/rmsmod/rms_kern.c,v $
76595 + */
76596 +
76597 +#ident "@(#)$Id: rms_kern.c,v 1.77.2.8 2005/09/07 14:35:04 mike Exp $"
76598 +
76599 +#include <stddef.h>
76600 +#include <qsnet/kernel.h>
76601 +#include <qsnet/autoconf.h>
76602 +#include <rms/rmscall.h>
76603 +
76604 +
76605 +/*
76606 + * extended stats added in version 5
76607 + * patch free kernel support added in version 6
76608 + */
76609 +#define RMS_MODVERSION 6
76610 +
76611 +#ifdef PROCESS_ACCT 
76612 +#define TIMEVAL_TO_MSEC(tv) ((tv)->tv_sec * 1000 + (tv)->tv_usec / 1000)
76613 +#define TIMEVAL_TO_CT(tv)   ((tv)->tv_sec * HZ + (tv)->tv_usec / (1000000L / HZ))
76614 +#endif
76615 +
76616 +#ifdef get_mm_counter
76617 +#define PROC_RSS(proc) ((proc)->mm ? get_mm_counter(proc->mm, rss) : 0)
76618 +#else
76619 +#ifdef RSS_ATOMIC
76620 +#define PROC_RSS(proc) ((proc)->mm ? atomic_read(&(proc)->mm->rss) : 0)
76621 +#else
76622 +#define PROC_RSS(proc) ((proc)->mm ? (proc)->mm->rss : 0)
76623 +#endif
76624 +#endif
76625 +
76626 +/*
76627 + * 2.6 kernels don't consistently export put_task_struct
76628 + */
76629 +
76630 +#ifdef free_task_struct
76631 +#undef NO_PUTGET_TASK
76632 +#else
76633 +#define NO_PUTGET_TASK
76634 +#endif
76635 +
76636 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
76637 +#      define  RMS_NCPUS()     smp_num_cpus
76638 +#else
76639 +#      define RMS_NCPUS()      num_online_cpus()
76640 +#endif
76641 +
76642 +#define CURUID()       CURPROC()->uid
76643 +#define p_pid          pid
76644 +
76645 +
76646 +/* care needed with conversion to millisecs on 32-bit Linux */
76647 +#ifdef LINUX_I386
76648 +#define CT_TO_MSEC(x)  ct_to_msec(x)
76649 +
76650 +uint64_t ct_to_msec(clock_t t)
76651 +{
76652 +    uint64_t msecs;
76653 +    if (t < 2000000)
76654 +    {
76655 +       t = (1000 * t)/HZ;
76656 +       msecs = t;
76657 +    }
76658 +    else
76659 +    {
76660 +       t = t / HZ;
76661 +       msecs = t * 1000;
76662 +    }
76663 +    return(msecs);
76664 +}
76665 +#else
76666 +#define CT_TO_MSEC(x)  (((x) * 1000)/HZ)
76667 +#endif
76668 +
76669 +#ifndef FALSE
76670 +#define FALSE          (0)
76671 +#define TRUE           (!FALSE)
76672 +#endif
76673 +
76674 +#include <linux/time.h>
76675 +#include <linux/proc_fs.h>
76676 +#ifndef NO_PTRACK
76677 +#include <linux/ptrack.h>
76678 +#endif
76679 +#include <qsnet/module.h>
76680 +#ifndef NO_SHM_CLEANUP
76681 +extern int shm_cleanup(void);
76682 +#endif
76683 +
76684 +struct cap_desc {
76685 +
76686 +    struct cap_desc *next;
76687 +    int              index;    /* index of capability in program */
76688 +    ELAN_CAPABILITY  cap;      /* elan capability */
76689 +
76690 +};
76691 +
76692 +struct proc_desc {
76693 +    
76694 +    struct proc_desc    *next;
76695 +    struct task_struct  *task;
76696 +    struct prg_desc     *program;      /* controlling program         */
76697 +    int                  mycap;                /* index of my capability      */
76698 +    int                  myctx;                /* context number for process  */
76699 +    int                  flags;
76700 +    int                  vp;           /* elan virtual process number */
76701 +    int                  pid;          /* process id                  */
76702 +
76703 +    /* last set of stats sampled */
76704 +    uint64_t             utime;
76705 +    uint64_t             stime;
76706 +    uint64_t             majflt;
76707 +    int                  maxrss;
76708 +
76709 +};
76710 +
76711 +struct prg_desc {
76712 +    
76713 +    struct prg_desc  *next;            
76714 +    int               id;      /* program id                          */
76715 +    int               flags;   /* program status flags                */
76716 +    uid_t             uid;     /* user id                             */
76717 +    int               ncpus;   /* number of cpus allocated to program */
76718 +    int               nprocs;  /* number of processes in program      */
76719 +    struct proc_desc *pdescs;  /* processes in this program           */
76720 +    int               ncaps;   /* number of capabilities              */
76721 +    struct cap_desc  *caps;    /* elan capabilities                   */
76722 +    char             *corepath;        /* core path for parallel program      */
76723 +    int               psid;    /* processor set id                    */
76724 +
76725 +    uint64_t       cutime;     /* user time accumulated by children   */
76726 +    uint64_t       cstime;     /* system time accumulated by children */
76727 +    uint64_t       start_time; /* time program created                */
76728 +    uint64_t       end_time;   /* time last process exited            */
76729 +    uint64_t       sched_time; /* last time job was scheduled         */
76730 +    uint64_t       accum_atime;        /* allocated time last deschedule      */
76731 +    uint64_t       memint;     /* accumulated memory integral         */
76732 +    uint64_t       ebytes;     /* data transferred by the Elan(s)     */
76733 +    uint64_t       exfers;     /* number of Elan data transfers       */
76734 +    uint64_t       majflt;     /* number of major faults              */
76735 +    int            maxrss;     /* maximum size to date                */
76736 +    
76737 +    struct proc_dir_entry *proc_entry;
76738 +
76739 +};
76740 +
76741 +static int rms_ptrack_callback (void *arg, int phase, struct task_struct *child);
76742 +
76743 +static void prgsignal(struct prg_desc *program, int signo);
76744 +static uint64_t gettime(void);
76745 +static void freeProgram(struct prg_desc *program);
76746 +
76747 +static struct prg_desc *programs = 0;
76748 +
76749 +kmutex_t rms_lock;
76750 +
76751 +#ifdef NO_PTRACK
76752 +int      ptrack_enabled = 0;
76753 +#else
76754 +int      ptrack_enabled = 1;
76755 +#endif
76756 +
76757 +int rms_init(void)
76758 +{
76759 +    kmutex_init (&rms_lock);
76760 +
76761 +    DBG(printk("rms: initialising ptrack %d\n", ptrack_enabled));
76762 +
76763 +    return(ESUCCESS);
76764 +}
76765 +
76766 +int rms_reconfigure(void)
76767 +{
76768 +    return(ESUCCESS);
76769 +}
76770 +
76771 +int rms_programs_registered(void)
76772 +{
76773 +    /*
76774 +    ** Called when trying to unload rms.mod will not succeed
76775 +    ** if programs registered
76776 +    */
76777
76778 +   struct prg_desc *program, **pp;
76779 +
76780 +   kmutex_lock(&rms_lock);
76781 +
76782 +   for (program = programs; program; program = program->next)
76783 +   {
76784 +       if (program->nprocs != 0)
76785 +       {
76786 +            kmutex_unlock(&rms_lock);
76787 +            return(EBUSY);
76788 +       }
76789 +   }
76790 +
76791 +   /*
76792 +   ** We have traversed the programs list and no processes registered
76793 +   ** Now free the memory
76794 +   */
76795 +       
76796 +    pp = &programs;
76797 +    while ((program = *pp) != NULL)
76798 +    {
76799 +        *pp = program->next;
76800 +        freeProgram(program);
76801 +    }
76802 +    kmutex_unlock(&rms_lock);
76803 +   
76804 +    return(ESUCCESS);
76805 +
76806 +}
76807 +
76808 +int rms_fini(void)
76809 +{
76810 +    /*
76811 +     * don't allow an unload if there are programs registered
76812 +     */
76813 +    if (rms_programs_registered())
76814 +        return(EBUSY);
76815 +
76816 +    kmutex_destroy (&rms_lock);
76817 +
76818 +    DBG(printk("rms: removed\n"));
76819 +
76820 +    return(ESUCCESS);
76821 +}
76822 +
76823 +extern struct proc_dir_entry *rms_procfs_programs;
76824 +
76825 +/*
76826 + * display one pid per line if there isn't enough space 
76827 + * for another pid then add "...\n" and stop 
76828 + */
76829 +int pids_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
76830 +{
76831 +    struct prg_desc *program = (struct prg_desc *)data;
76832 +    struct proc_desc *pdesc;
76833 +    char *ptr = page;
76834 +    int bytes = 0, nb;
76835 +
76836 +    kmutex_lock(&rms_lock);
76837 +    
76838 +    for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
76839 +    {
76840 +       if (bytes > count - 15)
76841 +       {
76842 +           bytes += sprintf(ptr,"...\n");
76843 +           break;
76844 +       }
76845 +       nb = sprintf(ptr, "%d %d\n", pdesc->pid, pdesc->vp);
76846 +       bytes += nb;
76847 +       ptr += nb;
76848 +    }
76849 +    kmutex_unlock(&rms_lock);
76850 +    
76851 +    return(bytes);
76852 +}
76853 +
76854 +int status_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
76855 +{
76856 +    struct prg_desc *program = (struct prg_desc *)data;
76857 +    int bytes;
76858 +    if (program->flags & PRG_KILLED)
76859 +       bytes = sprintf(page, "killed\n");
76860 +    else if (program->flags & PRG_SUSPEND)
76861 +       bytes = sprintf(page, "suspended\n");
76862 +    else
76863 +       bytes = sprintf(page, "running\n");
76864 +    return(bytes);
76865 +}
76866 +
76867 +void rms_create_proc_entry(struct prg_desc *program)
76868 +{
76869 +    struct proc_dir_entry *p;
76870 +    char name[32];
76871 +
76872 +    if (rms_procfs_programs)
76873 +    {
76874 +       sprintf(name,"%d", program->id);
76875 +       if ((program->proc_entry = proc_mkdir(name, rms_procfs_programs)) != NULL)
76876 +       {
76877 +           if ((p = create_proc_entry ("pids", S_IRUGO, program->proc_entry)) != NULL)
76878 +           {
76879 +               p->owner = THIS_MODULE;
76880 +               p->data = program;
76881 +               p->read_proc = pids_callback;
76882 +           }
76883 +           if ((p = create_proc_entry ("status", S_IRUGO, program->proc_entry)) != NULL)
76884 +           {
76885 +               p->owner = THIS_MODULE;
76886 +               p->data = program;
76887 +               p->read_proc = status_callback;
76888 +           }
76889 +       }
76890 +    }
76891 +}
76892 +
76893 +void rms_remove_proc_entry(struct prg_desc *program)
76894 +{
76895 +    char name[32];
76896 +    if (rms_procfs_programs)
76897 +    {
76898 +       if (program->proc_entry)
76899 +       {
76900 +           remove_proc_entry ("pids", program->proc_entry);
76901 +           remove_proc_entry ("status", program->proc_entry);
76902 +       }
76903 +       sprintf(name,"%d", program->id);
76904 +       remove_proc_entry (name, rms_procfs_programs);
76905 +    }
76906 +}
76907 +
76908 +/*
76909 + * find a program from its index/pid
76910 + */
76911 +static struct prg_desc *findProgram(const int id)
76912 +{
76913 +    struct prg_desc *program;
76914 +    for (program = programs; program; program = program->next)
76915 +       if (program->id == id)
76916 +           return(program);
76917 +    return(0);
76918 +}
76919 +
76920 +static struct proc_desc *findProcess(const int pid)
76921 +{
76922 +    struct prg_desc *program;
76923 +    struct proc_desc *pdesc;
76924 +
76925 +    for (program = programs; program; program = program->next)
76926 +       for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
76927 +           if (pdesc->pid == pid)
76928 +               return(pdesc);
76929 +    
76930 +    return(0);
76931 +}
76932 +
76933 +static void freeProgram(struct prg_desc *program)
76934 +{
76935 +    struct proc_desc *pdesc;
76936 +    struct cap_desc *cdesc;
76937 +
76938 +    rms_remove_proc_entry(program);
76939 +
76940 +    while ((pdesc = program->pdescs) != NULL)
76941 +    {
76942 +       program->pdescs = pdesc->next;
76943 +       KMEM_FREE(pdesc, sizeof(struct proc_desc));
76944 +    }
76945 +
76946 +    while ((cdesc = program->caps) != NULL)
76947 +    {
76948 +       program->caps = cdesc->next;
76949 +       KMEM_FREE(cdesc, sizeof(struct cap_desc));
76950 +    }
76951 +
76952 +    if (program->corepath)
76953 +       KMEM_FREE(program->corepath, MAXCOREPATHLEN + 1);
76954 +
76955 +    KMEM_FREE(program, sizeof(struct prg_desc));
76956 +
76957 +    MOD_DEC_USE_COUNT;
76958 +}
76959 +
76960 +/*
76961 + * rms_prgcreate
76962 + *
76963 + * create a new program description
76964 + */
76965 +int rms_prgcreate(int id, uid_t uid, int cpus)
76966 +{
76967 +    struct prg_desc *program;
76968 +    struct proc_desc *pdesc;
76969 +    
76970 +    DBG(printk("rms_prgcreate :: program %d pid %d uid %d cpus %d\n", id, CURPROC()->p_pid, uid, cpus));
76971 +    
76972 +    /*
76973 +     * parallel programs are created as root by the rmsd as it forks the loader
76974 +     */
76975 +    if (CURUID())
76976 +       return(EACCES);
76977 +    
76978 +    /*
76979 +     * program ids must be unique
76980 +     */
76981 +    kmutex_lock(&rms_lock);
76982 +    program = findProgram(id);
76983 +    kmutex_unlock(&rms_lock);
76984 +    if (program)
76985 +       return(EINVAL);
76986 +
76987 +    /*
76988 +     * create a new program description
76989 +     */
76990 +    KMEM_ALLOC(program, struct prg_desc *, sizeof(struct prg_desc), TRUE);
76991 +    if (!program)
76992 +       return(ENOMEM);
76993 +
76994 +    program->id = id;
76995 +    program->flags = PRG_RUNNING;
76996 +    program->ncpus = cpus;
76997 +    program->nprocs = 1;
76998 +    program->uid = uid;
76999 +    program->ncaps = 0;
77000 +    program->caps = 0;
77001 +    program->corepath = 0;
77002 +    program->psid = 0;
77003 +    program->start_time = program->sched_time = gettime();
77004 +    program->end_time = 0;
77005 +    program->accum_atime = 0;
77006 +    program->cutime = 0;
77007 +    program->cstime = 0;
77008 +    program->maxrss = 0;
77009 +    program->memint = 0;
77010 +    program->majflt = 0;
77011 +    program->ebytes = 0;
77012 +    program->exfers = 0;
77013 +
77014 +    KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
77015 +    if (!pdesc)
77016 +    {
77017 +       KMEM_FREE (program,sizeof (struct prg_desc));
77018 +       return(ENOMEM);
77019 +    }
77020 +
77021 +    /* bump the reference count on the caller */
77022 +    pdesc->task = ptrack_enabled ? CURPROC() : NULL;
77023 +
77024 +    pdesc->next = 0;
77025 +    pdesc->mycap = ELAN_CAP_UNINITIALISED;
77026 +    pdesc->myctx = ELAN_CAP_UNINITIALISED;
77027 +    pdesc->vp = -1;            /* rmsloader */
77028 +    pdesc->program = program;
77029 +    pdesc->pid = CURPROC()->p_pid;
77030 +    program->pdescs = pdesc;
77031 +    
77032 +    rms_create_proc_entry(program);
77033 +    
77034 +    kmutex_lock(&rms_lock);
77035 +
77036 +#ifndef NO_PTRACK
77037 +
77038 +    if (ptrack_enabled)
77039 +    {
77040 +       DBG(printk("rms_prgcreate :: ptrack enabled - fork callback\n"));
77041 +       if (ptrack_register (rms_ptrack_callback, NULL) != 0)
77042 +       {
77043 +           kmutex_unlock(&rms_lock);
77044 +           KMEM_FREE(pdesc,sizeof(struct proc_desc));
77045 +           KMEM_FREE(program,sizeof(struct prg_desc));
77046 +           return(ENOMEM);
77047 +       }
77048 +    }
77049 +    else
77050 +    {
77051 +       DBG(printk("rms_prgcreate :: ptrack disabled - no fork callback\n"));
77052 +    }
77053 +#endif
77054 +
77055 +    program->next = programs;
77056 +    programs = program;
77057 +    
77058 +    MOD_INC_USE_COUNT;
77059 +    
77060 +    kmutex_unlock(&rms_lock);
77061 +    return(ESUCCESS);
77062 +}
77063 +
77064 +
77065 +/*
77066 + * can't rely on put_task_struct being exported
77067 + * so we need to make sure that a proc is valid
77068 + * before extracting the stats
77069 + */
77070 +
77071 +int getProcessStats(struct proc_desc *pdesc)
77072 +{
77073 +    struct task_struct *task = 0;
77074 +    
77075 +    if (ptrack_enabled)
77076 +       task = pdesc->task;
77077 +    else
77078 +    {
77079 +       read_lock(&tasklist_lock);
77080 +       task = find_task_by_pid(pdesc->pid);
77081 +    }
77082 +    
77083 +    if (task)
77084 +    {
77085 +#ifdef PROCESS_ACCT
77086 +       pdesc->utime = TIMEVAL_TO_MSEC(&task->utime);
77087 +       pdesc->stime = TIMEVAL_TO_MSEC(&task->stime);      
77088 +       
77089 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)                           
77090 +       pdesc->utime = CT_TO_MSEC(task->times.tms_utime);
77091 +       pdesc->stime = CT_TO_MSEC(task->times.tms_stime);
77092 +       
77093 +#else
77094 +       pdesc->utime = CT_TO_MSEC(task->utime);
77095 +       pdesc->stime = CT_TO_MSEC(task->stime);
77096 +#endif
77097 +       pdesc->majflt = task->maj_flt;
77098 +
77099 +       /*
77100 +        * the ptrack exit callbacks occur before exit_mm
77101 +        * but if ptrack is not present we can get called
77102 +        * with task->mm = 0
77103 +        */
77104 +       pdesc->maxrss = PROC_RSS(task) >> (20 - PAGE_SHIFT);
77105 +    }
77106 +    
77107 +    if (!ptrack_enabled)
77108 +       read_unlock(&tasklist_lock);
77109 +    
77110 +    return(task ? 0 : -1);
77111 +}
77112 +
77113 +
77114 +
77115 +
77116 +/*
77117 + * remove a process descriptor keeping track of the
77118 + * accumulated resource usage
77119 + */
77120 +
77121 +static void removeProcDesc(struct prg_desc *program, struct proc_desc *pdesc)
77122 +{
77123 +#ifndef NO_PTRACK
77124 +    struct proc_desc *p;
77125 +#endif
77126 +    int maxrss;
77127 +
77128 +    /*
77129 +     * keep track of the resources used by processes that have 
77130 +     * exited, if ptrack is enabled then we will be called 
77131 +     * as the process exists, otherwise we will have the last
77132 +     * sample
77133 +     */
77134 +    getProcessStats(pdesc);
77135 +    
77136 +    program->cutime += pdesc->utime;
77137 +    program->cstime += pdesc->stime;
77138 +    program->majflt += pdesc->majflt;
77139 +    maxrss = pdesc->maxrss;
77140 +
77141 +    /*
77142 +     * process specific shared memory cleanup requires the shm_cleanup
77143 +     * patch, otherwise the run time system is left to do the job with
77144 +     * a blunt axe 
77145 +     */
77146 +#ifndef NO_SHM_CLEANUP
77147 +    shm_cleanup();
77148 +#endif
77149 +    
77150 +    /* 
77151 +     * remove process from program 
77152 +     */
77153 +    KMEM_FREE(pdesc, sizeof(struct proc_desc));
77154 +    program->nprocs--;
77155 +    
77156 +    /*
77157 +     * update the memory high water mark for the program
77158 +     *
77159 +     * safe to access the task structures if we have incremented
77160 +     * their reference counts as they were added to the program 
77161 +     * however, the mm can be zero
77162 +     */
77163 +#ifndef NO_PTRACK
77164 +    for (p = program->pdescs; p; p = p->next)
77165 +       maxrss += PROC_RSS(p->task) >> (20 - PAGE_SHIFT);
77166 +    
77167 +    if (maxrss > program->maxrss)
77168 +       program->maxrss = maxrss;
77169 +#endif
77170 +    
77171 +    DBG(printk("rms_removproc :: program %d procs %d cutime %lld cstime %lld mem %d\n", 
77172 +              program->id, program->nprocs, 
77173 +              (long long)program->cutime, (long long)program->cstime, 
77174 +              program->maxrss));
77175 +    
77176 +    /*
77177 +     * final update to the program if this is the last process
77178 +     */
77179 +    if (program->nprocs == 0)
77180 +    {
77181 +       program->end_time = gettime();
77182 +       program->flags &= ~PRG_RUNNING;
77183 +       program->accum_atime += program->ncpus * (program->end_time - program->sched_time);
77184 +       DBG(printk("rms_removproc :: last process has gone\n"));
77185 +    }
77186 +}
77187 +
77188 +
77189 +/*
77190 + * rms_prgdestroy
77191 + *
77192 + * destroy a program description
77193 + */
77194 +int rms_prgdestroy(int id)
77195 +{
77196 +    struct prg_desc *program, **pp;
77197 +    struct proc_desc *pdesc;
77198 +
77199 +    int status = ESRCH;
77200 +
77201 +    /*
77202 +     * parallel programs are created and destroyed by the rmsd
77203 +     */
77204 +    if (CURUID())
77205 +       return(EACCES);
77206 +
77207 +    kmutex_lock(&rms_lock);
77208 +    
77209 +    pp = &programs;
77210 +    while ((program = *pp) != NULL)
77211 +    {
77212 +       if (program->id == id)
77213 +       {
77214 +           /*
77215 +            * with ptrack disabled there won't be any exit callbacks
77216 +            */
77217 +           if (!ptrack_enabled)
77218 +           {
77219 +               while ((pdesc = program->pdescs))
77220 +               {
77221 +                   program->pdescs = pdesc->next;
77222 +                   removeProcDesc(program, pdesc);
77223 +               }
77224 +           }
77225 +
77226 +           if (program->nprocs == 0)
77227 +           {
77228 +               DBG(printk("rms_prgdestro :: removing program %d\n", program->id));
77229 +               *pp = program->next;
77230 +               freeProgram(program);
77231 +               status = ESUCCESS;
77232 +           }
77233 +           else
77234 +           {
77235 +               DBG(printk("rms_prgdestro :: failed to remove program %d: %d\n", program->id, program->nprocs));
77236 +               status = ECHILD;
77237 +               pp = &program->next;
77238 +           }
77239 +       }
77240 +       else
77241 +           pp = &program->next;
77242 +    }
77243 +    
77244 +    kmutex_unlock(&rms_lock);
77245 +    return(status);
77246 +}
77247 +
77248 +/*
77249 + * rms_prgids
77250 + */
77251 +int rms_prgids(int maxids, int *prgids, int *nprgs)
77252 +{
77253 +    struct prg_desc *program;
77254 +    int count = 0, *buf, *bufp;
77255 +    int status = ESUCCESS;
77256 +
77257 +    if (maxids < 1)
77258 +        return(EINVAL);
77259 +
77260 +    kmutex_lock(&rms_lock);
77261 +
77262 +    for (program = programs; program; program = program->next)
77263 +        count++;
77264 +    count = MIN(count, maxids);
77265 +
77266 +    if (count > 0)
77267 +    {
77268 +        KMEM_ALLOC(buf, int *, count * sizeof(int), TRUE);
77269 +       if (buf)
77270 +       {                  
77271 +           for (program = programs, bufp=buf; bufp < buf + count; 
77272 +                program = program->next)
77273 +               *bufp++ = program->id;
77274 +       
77275 +           if (copyout(buf, prgids, sizeof(int) * count))
77276 +               status = EFAULT;
77277 +
77278 +           KMEM_FREE(buf, count * sizeof(int));
77279 +       }
77280 +       else
77281 +           status = ENOMEM;
77282 +    }
77283 +    
77284 +    if (copyout(&count, nprgs, sizeof(int)))
77285 +       status = EFAULT;
77286 +
77287 +    kmutex_unlock(&rms_lock);
77288 +    
77289 +    return(status);
77290 +}
77291 +
77292 +/*
77293 + * rms_prginfo
77294 + */
77295 +int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs)
77296 +{
77297 +    struct prg_desc *program;
77298 +    struct proc_desc *pdesc;
77299 +    pid_t *pidp, *buf;
77300 +    int status = ESUCCESS;
77301 +
77302 +    kmutex_lock(&rms_lock);
77303 +
77304 +    if ((program = findProgram(id)) != NULL)
77305 +    {
77306 +       if (program->nprocs > 0)
77307 +       {
77308 +           KMEM_ALLOC(buf, pid_t *, program->nprocs * sizeof(pid_t), TRUE);
77309 +           if (buf)
77310 +           {
77311 +               for (pidp = buf, pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
77312 +                   *pidp++ = pdesc->pid;
77313 +               
77314 +               if (copyout(buf, pids, sizeof(pid_t) * MIN(program->nprocs, maxpids)))
77315 +                   status = EFAULT;
77316 +               
77317 +               KMEM_FREE(buf, program->nprocs * sizeof(pid_t));
77318 +           }
77319 +           else
77320 +               status = ENOMEM;
77321 +       }
77322 +       
77323 +       if (copyout(&program->nprocs, nprocs, sizeof(int)))
77324 +           status = EFAULT;
77325 +    }
77326 +    else
77327 +       status = ESRCH;
77328 +
77329 +    kmutex_unlock(&rms_lock);
77330 +    
77331 +    return(status);
77332 +}
77333 +
77334 +/*
77335 + * Deliver a signal to all the processes in a program
77336 + */
77337 +static void prgsignal(struct prg_desc *program, int signo)
77338 +{
77339 +    struct proc_desc *pdesc;
77340 +    DBG(printk("rms_prgsignal :: program %d signal %d\n", program->id, signo));
77341 +    for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
77342 +       kill_proc(pdesc->pid, signo, 1);
77343 +}
77344 +
77345 +int rms_prgsignal(int id, int signo)
77346 +{
77347 +    struct prg_desc *program;
77348 +    int status = ESUCCESS;
77349 +    
77350 +    kmutex_lock(&rms_lock);
77351 +    
77352 +    if ((program = findProgram(id)) != NULL)
77353 +    {
77354 +       if (CURUID() == 0 || CURUID() == program->uid)
77355 +       {
77356 +           prgsignal(program, signo);
77357 +           if (signo == SIGKILL)
77358 +               program->flags |= PRG_KILLED;
77359 +       }
77360 +       else
77361 +           status = EACCES;
77362 +    }
77363 +    else
77364 +       status = ESRCH;
77365 +    
77366 +    kmutex_unlock(&rms_lock);
77367 +    
77368 +    return(status);
77369 +}
77370 +
77371 +int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap)
77372 +{
77373 +    struct prg_desc *program;
77374 +    struct cap_desc *cdesc;
77375 +    int status = ESUCCESS;
77376 +
77377 +    if (cap == NULL)
77378 +        return(EINVAL);
77379 +
77380 +    kmutex_lock(&rms_lock);
77381 +    if ((program = findProgram(id)) != NULL)
77382 +    {
77383 +       KMEM_ALLOC(cdesc, struct cap_desc *, sizeof(struct cap_desc), TRUE);
77384 +       if (cdesc)
77385 +       {
77386 +           cdesc->index = index;
77387 +           if (copyin(cap, &cdesc->cap, sizeof(ELAN_CAPABILITY)))
77388 +           {
77389 +               KMEM_FREE(cdesc, sizeof(struct cap_desc));
77390 +               status = EFAULT;
77391 +           }
77392 +           else
77393 +           {
77394 +               DBG(printk("rms_prgaddcap :: program %d index %d context %d<-->%d\n",
77395 +                          program->id, index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext));
77396 +               cdesc->next = program->caps;
77397 +               program->caps = cdesc;
77398 +               program->ncaps++;
77399 +           }
77400 +       }
77401 +       else
77402 +           status = ENOMEM;
77403 +    }
77404 +    else
77405 +       status = ESRCH;
77406 +
77407 +    kmutex_unlock(&rms_lock);
77408 +    return(status);
77409 +}
77410 +
77411 +static uint64_t gettime(void)
77412 +{
77413 +    uint64_t now;
77414 +
77415 +    struct timeval tv;
77416 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17)
77417 +    get_fast_time(&tv);
77418 +#else
77419 +    do_gettimeofday(&tv);
77420 +#endif
77421 +    now = tv.tv_sec * 1000 + tv.tv_usec / 1000;
77422 +    return(now);
77423 +}
77424 +
77425 +
77426 +/*
77427 + * new stats collection interface, 64-bit with addition of Elan stats
77428 + */
77429 +int rms_prggetstats(int id, prgstats_t *stats)
77430 +{
77431 +    struct prg_desc *program = 0;
77432 +    struct proc_desc *pdesc;
77433 +
77434 +    int maxrss, status = ESUCCESS;
77435 +    prgstats_t totals;
77436 +    uint64_t now = gettime();
77437 +
77438 +    kmutex_lock(&rms_lock);
77439 +    
77440 +    if (id < 0)
77441 +    {
77442 +       if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77443 +           program = pdesc->program;
77444 +    }
77445 +    else
77446 +       program = findProgram(id);
77447 +
77448 +    if (program)
77449 +    {
77450 +       if (CURUID() == 0 || CURUID() == program->uid)
77451 +       {
77452 +           totals.flags = program->flags;
77453 +           totals.ncpus = program->ncpus;
77454 +           maxrss = 0;
77455 +           
77456 +           if (program->nprocs > 0)
77457 +               totals.etime = now - program->start_time;
77458 +           else
77459 +               totals.etime = program->end_time - program->start_time;
77460 +           
77461 +           totals.atime = program->accum_atime;
77462 +           if (program->flags & PRG_RUNNING)
77463 +               totals.atime += program->ncpus * (now - program->sched_time);
77464 +           
77465 +           totals.utime = program->cutime;
77466 +           totals.stime = program->cstime;
77467 +           totals.pageflts = program->majflt;
77468 +           totals.memint = program->memint;
77469 +           
77470 +           for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
77471 +           {
77472 +               getProcessStats(pdesc);
77473 +               totals.utime += pdesc->utime;
77474 +               totals.stime += pdesc->stime;
77475 +               totals.pageflts += pdesc->majflt;
77476 +               maxrss += pdesc->maxrss;
77477 +           }
77478 +           
77479 +           if (maxrss > program->maxrss)
77480 +               program->maxrss = maxrss;
77481 +           
77482 +           totals.mem = program->maxrss;
77483 +           totals.ebytes = program->ebytes;
77484 +           totals.exfers = program->exfers;
77485 +           
77486 +           DBG(printk("rms_prggetsta :: program %d utime %lld stime %lld mem %d flt %lld\n", 
77487 +                      program->id, (long long)totals.utime, (long long)totals.stime, 
77488 +                      totals.mem, (long long)totals.pageflts));
77489 +           
77490 +           if (copyout(&totals, stats, sizeof(prgstats_t)))
77491 +               status = EFAULT;
77492 +       }
77493 +       else
77494 +           status = EACCES;
77495 +    }
77496 +    else
77497 +       status = ESRCH;
77498 +    
77499 +    kmutex_unlock(&rms_lock);
77500 +    return(status);
77501 +}
77502 +
77503 +int rms_prgsuspend(int id)
77504 +{
77505 +    struct prg_desc *program;
77506 +    int status = ESUCCESS;
77507 +
77508 +    kmutex_lock(&rms_lock);
77509 +    
77510 +    if ((program = findProgram(id)) != NULL)
77511 +    {
77512 +       if (CURUID() == 0 || CURUID() == program->uid)
77513 +       {
77514 +           program->flags &= ~PRG_RUNNING;
77515 +           program->flags |=  PRG_SUSPEND;
77516 +           program->accum_atime += program->ncpus * (gettime() - program->sched_time);
77517 +
77518 +           /* suspend/resume just use signals for now */
77519 +           prgsignal(program, SIGSTOP);
77520 +       }
77521 +       else
77522 +           status = EACCES;
77523 +    }
77524 +    else
77525 +       status = ESRCH;
77526 +
77527 +    kmutex_unlock(&rms_lock);
77528 +    return(status);
77529 +}
77530 +
77531 +int rms_prgresume(int id)
77532 +{
77533 +    struct prg_desc *program;
77534 +    int status = ESUCCESS;
77535 +
77536 +    kmutex_lock(&rms_lock);
77537 +    
77538 +    if ((program = findProgram(id)) != NULL)
77539 +    {
77540 +       if (CURUID() == 0 || CURUID() == program->uid)
77541 +       {
77542 +           program->flags &= ~PRG_SUSPEND;
77543 +           program->flags |=  PRG_RUNNING;
77544 +           program->sched_time = gettime();
77545 +           prgsignal(program, SIGCONT);
77546 +       }
77547 +       else
77548 +           status = EACCES;
77549 +    }
77550 +    else
77551 +       status = ESRCH;
77552 +
77553 +    kmutex_unlock(&rms_lock);
77554 +    return(status);
77555 +}
77556 +
77557 +
77558 +int rms_ncaps(int *ncaps)
77559 +{
77560 +    struct proc_desc *pdesc;
77561 +    int status = ESUCCESS;
77562 +    
77563 +    kmutex_lock(&rms_lock);
77564 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77565 +    {
77566 +       if (copyout(&pdesc->program->ncaps, ncaps, sizeof(int)))
77567 +           status = EFAULT;
77568 +    }
77569 +    else
77570 +       status = ESRCH;
77571 +
77572 +    kmutex_unlock(&rms_lock);
77573 +    return(status);
77574 +}
77575 +
77576 +int rms_getprgid(pid_t pid, int *id)
77577 +{
77578 +    struct proc_desc *pdesc;
77579 +    int status = ESUCCESS;
77580 +    
77581 +    if (pid == 0)
77582 +       pid = CURPROC()->p_pid;
77583 +    
77584 +    kmutex_lock(&rms_lock);
77585 +    if ((pdesc = findProcess(pid)) != NULL)
77586 +    {
77587 +       if (copyout(&pdesc->program->id, id, sizeof(int)))
77588 +           status = EFAULT;
77589 +    }
77590 +    else
77591 +       status = ESRCH;
77592 +
77593 +    kmutex_unlock(&rms_lock);
77594 +    return(status);
77595 +}
77596 +
77597 +int rms_setcap(int index, int ctx)
77598 +{
77599 +    struct proc_desc *pdesc;
77600 +    struct cap_desc *cdesc;
77601 +    int status = EINVAL;
77602 +    
77603 +    DBG(printk("rms_setcap    :: process %d cap %d ctx %d\n",CURPROC()->p_pid,index,ctx));
77604 +
77605 +    kmutex_lock(&rms_lock);
77606 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77607 +    {
77608 +       for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next)
77609 +           if (cdesc->index == index && 0 <= ctx && ctx <= (cdesc->cap.cap_highcontext - cdesc->cap.cap_lowcontext + 1))
77610 +           {
77611 +               pdesc->mycap = index;
77612 +               pdesc->myctx = cdesc->cap.cap_lowcontext + ctx;
77613 +               status = ESUCCESS;
77614 +           }
77615 +    }
77616 +    else
77617 +       status = ESRCH;
77618 +
77619 +    kmutex_unlock(&rms_lock);
77620 +    return(status);
77621 +}
77622 +
77623 +
77624 +int rms_mycap(int *index)
77625 +{
77626 +    struct proc_desc *pdesc;
77627 +    int status = ESUCCESS;
77628 +    
77629 +    DBG(printk("rms_mycap :: process %d\n", CURPROC()->p_pid));
77630 +    
77631 +    kmutex_lock(&rms_lock);
77632 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77633 +    {
77634 +       DBG(printk("rms_mycap :: found process %d mycap = %d\n", CURPROC()->p_pid, pdesc->mycap));
77635 +       if (copyout(&pdesc->mycap, index, sizeof(int)))
77636 +           status = EFAULT;
77637 +    }
77638 +    else
77639 +       status = ESRCH;
77640 +
77641 +    kmutex_unlock(&rms_lock);
77642 +    return(status);
77643 +}
77644 +
77645 +int rms_getcap(int index, ELAN_CAPABILITY *cap)
77646 +{
77647 +    struct proc_desc *pdesc;
77648 +    struct cap_desc *cdesc;
77649 +    int status = ESUCCESS;
77650 +    
77651 +    kmutex_lock(&rms_lock);
77652 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77653 +    {
77654 +       for (cdesc = pdesc->program->caps; cdesc; cdesc = cdesc->next)
77655 +           if (cdesc->index == index)
77656 +               break;
77657 +       
77658 +       if (cdesc)
77659 +       {
77660 +           /* tell each process about its own context */
77661 +           cdesc->cap.cap_mycontext = pdesc->myctx;
77662 +           
77663 +           if (copyout(&cdesc->cap, cap, ELAN_CAP_SIZE(&cdesc->cap)))
77664 +               status = EFAULT;
77665 +           
77666 +           DBG(printk("rms_getcap    :: program %d index %d context %d<-->%d\n", pdesc->program->id, 
77667 +                      cdesc->index, cdesc->cap.cap_lowcontext, cdesc->cap.cap_highcontext));
77668 +       }
77669 +       else
77670 +           status = EINVAL;
77671 +    }
77672 +    else
77673 +       status = ESRCH;
77674 +    
77675 +    kmutex_unlock(&rms_lock);
77676 +    return(status);
77677 +}
77678 +
77679 +static void
77680 +addProcDesc (struct proc_desc *pdesc,
77681 +            struct prg_desc *program, 
77682 +            struct proc_desc *parent, 
77683 +            int               pid)
77684 +{
77685 +       pdesc->mycap = (parent ? parent->mycap : ELAN_CAP_UNINITIALISED);
77686 +       pdesc->myctx = (parent ? parent->myctx : ELAN_CAP_UNINITIALISED);
77687 +       pdesc->program = program;
77688 +       pdesc->vp = -1;              /* assigned by elaninitdone */
77689 +       pdesc->pid = pid;
77690 +
77691 +       pdesc->next = program->pdescs;
77692 +       program->pdescs = pdesc;
77693 +       program->nprocs++;
77694 +}
77695 +
77696 +static int
77697 +rms_fork_callback (struct task_struct *curproc, struct task_struct *child)
77698 +{
77699 +    struct prg_desc *program;
77700 +    struct proc_desc *parent;
77701 +    struct proc_desc *pdesc = NULL;
77702 +
77703 +    kmutex_lock(&rms_lock);
77704 +    
77705 +    DBG(printk("rms_fork_func :: phase is fork pid %d child %d\n", curproc->p_pid, child->p_pid));
77706 +
77707 +    /*
77708 +     * find the process that forked
77709 +     */
77710 +    if ((parent = findProcess(curproc->p_pid)) != NULL)
77711 +    {
77712 +       program = parent->program;
77713 +       
77714 +       DBG(printk("rms_fork_func :: program is %d flags %d\n", program->id, program->flags));
77715 +       
77716 +       /*
77717 +        * processes can be blocked in fork while prgsignal is in progress
77718 +        * so check to see if the PRG_KILLED flag is set
77719 +        */
77720 +       if (program->flags & PRG_KILLED)
77721 +           DBG(printk("rms_fork_func :: fork handler called after program killed\n"));
77722 +       else
77723 +       {
77724 +           /*
77725 +            * create a new process description and add to program
77726 +            */
77727 +           KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
77728 +           if (pdesc == NULL)
77729 +               printk("rms_creatproc :: memory allocation failed\n");
77730 +           else
77731 +           {
77732 +               addProcDesc (pdesc, program, parent, child->p_pid);
77733 +               
77734 +               pdesc->task = child;
77735 +               
77736 +           }
77737 +       }
77738 +    }
77739 +    else
77740 +       DBG(printk("rms_fork_func :: no program\n"));
77741 +    
77742 +    kmutex_unlock (&rms_lock);
77743 +    
77744 +    return(pdesc == NULL);
77745 +}
77746 +
77747 +static void
77748 +rms_exit_callback (struct task_struct *curproc)
77749 +{
77750 +    struct prg_desc *program;
77751 +    struct proc_desc *pdesc, **pdescp;
77752 +
77753 +    kmutex_lock(&rms_lock);
77754 +    
77755 +    DBG(printk("rms_exit_func :: process %d exiting\n", curproc->p_pid));
77756 +
77757 +    /*
77758 +     * find the process that exited and accumulate 
77759 +     * resource usage in its parent program
77760 +     */
77761 +    for (program = programs, pdesc = 0; program && !pdesc; program = program->next)
77762 +    {
77763 +       pdescp = &program->pdescs;
77764 +       while ((pdesc = *pdescp) != NULL)
77765 +       {
77766 +           if (pdesc->pid == curproc->p_pid)
77767 +           {
77768 +               *pdescp = pdesc->next;
77769 +               removeProcDesc(program, pdesc);
77770 +               break;
77771 +           }
77772 +           else
77773 +               pdescp = &pdesc->next;
77774 +       }
77775 +    }
77776 +    kmutex_unlock  (&rms_lock);
77777 +}
77778 +
77779 +#ifndef NO_PTRACK
77780 +
77781 +static int
77782 +rms_ptrack_callback (void *arg, int phase, struct task_struct *child)
77783 +{
77784 +    switch (phase)
77785 +    {
77786 +    case PTRACK_PHASE_CLONE:
77787 +       if (rms_fork_callback (current, child))
77788 +           return PTRACK_DENIED;
77789 +       else
77790 +           return PTRACK_INNHERIT;
77791 +
77792 +    case PTRACK_PHASE_CLONE_FAIL:
77793 +       DBG(printk("rms_fork_func :: fork failed pid %d child %d\n", current->p_pid, child->p_pid));
77794 +       rms_exit_callback(child);
77795 +       break;
77796 +
77797 +    case PTRACK_PHASE_EXIT:
77798 +       rms_exit_callback(current);
77799 +       break;
77800 +    }
77801 +    return PTRACK_FINISHED;
77802 +}
77803 +
77804 +#endif
77805 +
77806 +/*
77807 + * rms_elaninitdone - mark a process as having successfully completed elan initialisation
77808 + */
77809 +int rms_elaninitdone(int vp)
77810 +{
77811 +    int status = ESUCCESS;
77812 +    struct proc_desc *pdesc;
77813 +    
77814 +    DBG(printk("rms_elaninit  :: process %d vp %d\n", CURPROC()->p_pid, vp));
77815 +    
77816 +    kmutex_lock(&rms_lock);
77817 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77818 +       pdesc->vp = vp;
77819 +    else
77820 +       status = ESRCH;
77821 +    kmutex_unlock(&rms_lock);
77822 +    return(status);
77823 +}
77824 +
77825 +
77826 +/*
77827 + * rms_prgelanpids - return the ids of processes that have completed elan initialisation
77828 + */
77829 +int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids)
77830 +{
77831 +    struct prg_desc *program;
77832 +    struct proc_desc *pdesc;
77833 +    pid_t *pidbuf;
77834 +    int status = ESUCCESS, count = 0, *vpbuf;
77835 +    
77836 +    DBG(printk("rms_elanpids  :: process %d id %d\n", CURPROC()->p_pid, id));
77837 +    
77838 +    kmutex_lock(&rms_lock);
77839 +    
77840 +    if ((program = findProgram(id)) != NULL)
77841 +    {
77842 +       if (program->nprocs > 0)
77843 +       {
77844 +           KMEM_ALLOC(pidbuf, pid_t *, program->nprocs * sizeof(pid_t), TRUE);
77845 +           KMEM_ALLOC(vpbuf, int *, program->nprocs * sizeof(int), TRUE);
77846 +           if (pidbuf && vpbuf)
77847 +           {
77848 +               for (pdesc = program->pdescs; pdesc; pdesc = pdesc->next)
77849 +                   if (pdesc->vp >= 0)
77850 +                   {
77851 +                       pidbuf[count] = pdesc->pid;
77852 +                       vpbuf[count] = pdesc->vp;
77853 +                       count++;
77854 +                   }
77855 +           
77856 +               if (count > 0 && (copyout(pidbuf, pids, sizeof(pid_t) * MIN(count, maxpids)) ||
77857 +                                 copyout(vpbuf, vps, sizeof(int) * MIN(count, maxpids))))
77858 +                   status = EFAULT;
77859 +               
77860 +               KMEM_FREE(pidbuf, program->nprocs * sizeof(pid_t));
77861 +               KMEM_FREE(vpbuf, program->nprocs * sizeof(int));
77862 +           }
77863 +           else
77864 +               status = ENOMEM;
77865 +       }
77866 +
77867 +       if (copyout(&count, npids, sizeof(int)))
77868 +           status = EFAULT;
77869 +    }
77870 +    else
77871 +       status = ESRCH;
77872 +
77873 +    kmutex_unlock(&rms_lock);
77874 +    
77875 +    return(status);
77876 +
77877 +}
77878 +
77879 +int rms_setpset(int psid)
77880 +{
77881 +    struct prg_desc *program;
77882 +    struct proc_desc *pdesc;
77883 +    int status = ESUCCESS;
77884 +
77885 +    if (CURUID())
77886 +       return(EACCES);
77887 +
77888 +    kmutex_lock(&rms_lock);
77889 +    
77890 +    if ((pdesc = findProcess(CURPROC()->p_pid)) != NULL)
77891 +    {
77892 +       program = pdesc->program;
77893 +       program->psid = psid;
77894 +    }
77895 +    else
77896 +       status = ESRCH;
77897 +
77898 +    kmutex_unlock(&rms_lock);
77899 +    return(status);
77900 +}
77901 +
77902 +
77903 +int rms_getpset(int id, int *psid)
77904 +{
77905 +    struct prg_desc *program;
77906 +    int status = ESUCCESS;
77907 +    
77908 +    kmutex_lock(&rms_lock);
77909 +    if ((program = findProgram(id)) != NULL)
77910 +    {
77911 +       if (copyout(&program->psid, psid, sizeof(int)))
77912 +           status = EFAULT;
77913 +    }
77914 +    else
77915 +       status = ESRCH;
77916 +    
77917 +    kmutex_unlock(&rms_lock);
77918 +    return(status);
77919 +}
77920 +
77921 +int
77922 +rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers)
77923 +{
77924 +    struct prg_desc *program;
77925 +    int status = ESUCCESS;
77926 +    
77927 +    DBG(printk("rms_setelanst :: process %d id %d\n", CURPROC()->p_pid, id));
77928 +
77929 +    kmutex_lock(&rms_lock);
77930 +    if ((program = findProgram(id)) != NULL)
77931 +    {
77932 +       if (CURUID() == 0 || CURUID() == program->uid)
77933 +       {
77934 +           program->ebytes = ebytes;
77935 +           program->exfers = exfers;
77936 +       }
77937 +       else
77938 +           status = EACCES;
77939 +    }
77940 +    else
77941 +       status = ESRCH;
77942 +    
77943 +    kmutex_unlock(&rms_lock);
77944 +    return(status);
77945 +}
77946 +
77947 +int
77948 +rms_modversion(void)
77949 +{
77950 +    return(RMS_MODVERSION);
77951 +}
77952 +
77953 +int
77954 +rms_addproc(int id, pid_t pid)
77955 +{
77956 +    struct prg_desc *program;
77957 +    struct task_struct *task;
77958 +    struct proc_desc *parent;
77959 +    struct proc_desc *pdesc;
77960 +    int status;
77961 +    
77962 +    DBG(printk("rms_addproc   :: program %d proc %d pid %d\n", id, CURPROC()->p_pid, pid));
77963 +    
77964 +    kmutex_lock(&rms_lock);
77965 +    if ((program = findProgram(id)) != NULL)
77966 +    {
77967 +       if (CURUID() == 0 || CURUID() == program->uid)
77968 +       {
77969 +           if (findProcess(pid))
77970 +               status = ESRCH;
77971 +           else
77972 +           {
77973 +               KMEM_ALLOC(pdesc, struct proc_desc *, sizeof(struct proc_desc), TRUE);
77974 +
77975 +               if (pdesc == NULL)
77976 +                   status = ENOMEM;
77977 +               else
77978 +               {
77979 +                   read_lock(&tasklist_lock);
77980 +
77981 +                   if ((task = find_task_by_pid(pid)) == NULL)
77982 +                       status = ESRCH;
77983 +                   else
77984 +                   {
77985 +#ifdef NO_NPTL
77986 +                       pid_t ppid = task->p_pptr->pid;
77987 +#else
77988 +                       pid_t ppid = task->parent->pid;
77989 +#endif
77990 +                       for (parent = program->pdescs; parent; parent = parent->next)
77991 +                           if (parent->pid == ppid)
77992 +                               break;
77993 +                   
77994 +                       addProcDesc (pdesc, program, parent, pid);
77995 +                       status = ESUCCESS;
77996 +                   }
77997 +
77998 +                   read_unlock (&tasklist_lock);
77999 +                   
78000 +                   if (status != ESUCCESS)
78001 +                       KMEM_FREE (pdesc, sizeof (struct proc_desc));
78002 +               }
78003 +           }
78004 +       }
78005 +       else
78006 +           status = EACCES;
78007 +    }
78008 +    else
78009 +       status = ESRCH;
78010 +    
78011 +    kmutex_unlock(&rms_lock);
78012 +    return(status);
78013 +}
78014 +
78015 +
78016 +int
78017 +rms_removeproc(int id, pid_t pid)
78018 +{
78019 +    struct prg_desc *program;
78020 +    struct proc_desc *pdesc, **pdescp;
78021 +    int status;
78022 +    
78023 +    DBG(printk("rms_removproc :: program %d proc %d pid %d\n", id, CURPROC()->p_pid, pid));
78024 +
78025 +    kmutex_lock(&rms_lock);
78026 +    if ((program = findProgram(id)) != NULL)
78027 +    {
78028 +       if (CURUID() == 0 || CURUID() == program->uid)
78029 +       {
78030 +           status = ESRCH;
78031 +           pdescp = &program->pdescs;
78032 +           while ((pdesc = *pdescp) != NULL)
78033 +           {
78034 +               if (pdesc->pid == pid)
78035 +               {
78036 +                   
78037 +                   *pdescp = pdesc->next;
78038 +                   removeProcDesc(program, pdesc);
78039 +                   status = ESUCCESS;
78040 +                   break;
78041 +               }
78042 +               else
78043 +                   pdescp = &pdesc->next;
78044 +           }
78045 +       }
78046 +       else
78047 +           status = EACCES;
78048 +    }
78049 +    else
78050 +       status = ESRCH;
78051 +    
78052 +    kmutex_unlock(&rms_lock);
78053 +    return(status);
78054 +}
78055 +
78056 +
78057 +
78058 +/*
78059 + * Local variables:
78060 + * c-file-style: "stroustrup"
78061 + * End:
78062 + */
78063 +
78064 +
78065 +
78066 +
78067 +
78068 +
78069 +
78070 diff -urN clean/drivers/net/qsnet/rms/rms_kern_Linux.c linux-2.6.9/drivers/net/qsnet/rms/rms_kern_Linux.c
78071 --- clean/drivers/net/qsnet/rms/rms_kern_Linux.c        1969-12-31 19:00:00.000000000 -0500
78072 +++ linux-2.6.9/drivers/net/qsnet/rms/rms_kern_Linux.c  2005-09-07 10:35:04.000000000 -0400
78073 @@ -0,0 +1,489 @@
78074 +/*
78075 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78076 + *
78077 + *    For licensing information please see the supplied COPYING file
78078 + *
78079 + */
78080 +
78081 +#ident "$Id: rms_kern_Linux.c,v 1.25.2.3 2005/09/07 14:35:04 mike Exp $"
78082 +/*      $Source: /cvs/master/quadrics/rmsmod/rms_kern_Linux.c,v $*/
78083 +
78084 +#include <qsnet/kernel.h>
78085 +#include <qsnet/autoconf.h>
78086 +
78087 +#include <linux/sysctl.h>
78088 +#include <linux/init.h>
78089 +#include <qsnet/module.h>
78090 +#include <linux/proc_fs.h>
78091 +#ifndef NO_PTRACK
78092 +#include <linux/ptrack.h>
78093 +#endif
78094 +
78095 +#include <rms/rmscall.h>
78096 +#include <rms/rmsio.h>
78097 +
78098 +extern int ptrack_enabled;
78099 +
78100 +MODULE_AUTHOR("Quadrics Ltd");
78101 +MODULE_DESCRIPTION("RMS support module");
78102 +MODULE_LICENSE("GPL");
78103 +
78104 +#ifndef NO_PTRACK
78105 +module_param(ptrack_enabled, uint, 0);
78106 +#endif
78107 +
78108 +int rms_debug = 0;
78109 +
78110 +ctl_table rms_table[] = {
78111 +    {
78112 +       .ctl_name = 1,
78113 +       .procname = "rms_debug",
78114 +       .data     = &rms_debug,
78115 +       .maxlen   = sizeof(int),
78116 +       .mode     = 0644,
78117 +       .child    = NULL,
78118 +       .proc_handler = &proc_dointvec,
78119 +    },
78120 +    {0}
78121 +};
78122 +
78123 +ctl_table rms_root_table[] = {
78124 +    {
78125 +       .ctl_name = CTL_DEBUG,
78126 +       .procname = "rms",
78127 +       .data     = NULL,
78128 +       .maxlen   = 0,
78129 +       .mode     = 0555,
78130 +       .child    = rms_table,
78131 +    },
78132 +    {0}
78133 +};
78134 +
78135 +static struct ctl_table_header *rms_sysctl_header;
78136 +
78137 +static int rms_open (struct inode *ino, struct file *fp);
78138 +static int rms_release (struct inode *ino, struct file *fp);
78139 +static int rms_ioctl (struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg);
78140 +
78141 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
78142 +static int
78143 +rms_ioctl32_cmds[] =
78144 +{
78145 +    RMSIO_GETPRGID32,
78146 +    RMSIO_GETCAP32
78147 +};
78148 +
78149 +static int      rms_ioctl32 (unsigned int fd, unsigned int cmd, 
78150 +                            unsigned long arg, struct file *file);
78151 +#endif
78152 +
78153 +static struct file_operations rms_fops =
78154 +{
78155 +    .owner   = THIS_MODULE,
78156 +    .ioctl   = rms_ioctl,
78157 +    .open    = rms_open,
78158 +    .release = rms_release,
78159 +};
78160 +
78161 +struct proc_dir_entry *rms_procfs_programs;
78162 +static struct proc_dir_entry *rms_procfs_root;
78163 +
78164 +int version_callback(char* page, char** start, off_t off, int count, int* eof, void* data)
78165 +{
78166 +    return(sprintf(page, "$Id: rms_kern_Linux.c,v 1.25.2.3 2005/09/07 14:35:04 mike Exp $\n"));
78167 +}
78168 +
78169 +int ptrack_callback(char *page, char **start, off_t off, int count, int *eof, void *data)
78170 +{
78171 +    if (ptrack_enabled)
78172 +       return(sprintf(page, "enabled\n"));
78173 +    else
78174 +       return(sprintf(page, "disabled\n"));
78175 +}
78176 +
78177 +
78178 +static int __init rms_start(void)
78179 +{
78180 +    struct proc_dir_entry *p;
78181 +    int res;
78182 +
78183 +    if ((rms_sysctl_header = register_sysctl_table(rms_root_table, 1)) == 0)
78184 +    {
78185 +       printk ("rmsmod: failed to register sysctl table\n");
78186 +       return (-ENXIO);
78187 +    }
78188 +    
78189 +    if ((rms_procfs_root = proc_mkdir("rms",  NULL)) == NULL ||
78190 +       (rms_procfs_programs = proc_mkdir("programs",  rms_procfs_root)) == NULL ||
78191 +       (p = create_proc_entry ("control", S_IRUGO, rms_procfs_root)) == NULL)
78192 +    {
78193 +       unregister_sysctl_table (rms_sysctl_header);
78194 +       printk ("rmsmod: failed to register /proc/rms\n");
78195 +       return (-ENXIO);
78196 +    }
78197 +    p->proc_fops = &rms_fops;
78198 +    p->owner     = THIS_MODULE;
78199 +    p->data      = NULL;
78200 +
78201 +    if ((p = create_proc_entry ("version", S_IRUGO, rms_procfs_root)) != NULL)
78202 +    {
78203 +       p->owner = THIS_MODULE;
78204 +       p->data = NULL;
78205 +       p->read_proc = version_callback;
78206 +    }
78207 +
78208 +#ifndef NO_PTRACK
78209 +    if ((p = create_proc_entry ("ptrack", S_IRUGO, rms_procfs_root)) != NULL)
78210 +    {
78211 +       p->owner = THIS_MODULE;
78212 +       p->data = NULL;
78213 +       p->read_proc = ptrack_callback;
78214 +    }
78215 +#endif
78216 +
78217 +    if ((res = rms_init()) != ESUCCESS)
78218 +    {
78219 +#ifndef NO_PTRACK
78220 +       remove_proc_entry ("ptrack", rms_procfs_root);
78221 +#endif
78222 +       remove_proc_entry ("version", rms_procfs_root);
78223 +       remove_proc_entry ("programs", rms_procfs_root);
78224 +       remove_proc_entry ("control", rms_procfs_root);
78225 +       remove_proc_entry ("rms", NULL);
78226 +       unregister_sysctl_table (rms_sysctl_header);
78227 +       return (-res);
78228 +    }
78229 +
78230 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
78231 +    lock_kernel();
78232 +    {
78233 +       extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
78234 +       register int i;
78235 +       for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++)
78236 +           register_ioctl32_conversion (rms_ioctl32_cmds[i], rms_ioctl32);
78237 +    }
78238 +    unlock_kernel();
78239 +#endif
78240 +    return (0);
78241 +}
78242 +
78243 +static void __exit rms_exit(void)
78244 +{
78245 +    rms_fini();
78246 +
78247 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
78248 +    lock_kernel();
78249 +    {
78250 +       extern void unregister_ioctl32_conversion(unsigned int cmd);
78251 +       register int i;
78252 +
78253 +       for (i = 0; i < sizeof (rms_ioctl32_cmds)/sizeof(rms_ioctl32_cmds[0]); i++)
78254 +           unregister_ioctl32_conversion (rms_ioctl32_cmds[i]);
78255 +    }
78256 +    unlock_kernel();
78257 +#endif
78258 +
78259 +#ifndef NO_PTRACK
78260 +    remove_proc_entry ("ptrack", rms_procfs_root);
78261 +#endif
78262 +    remove_proc_entry ("version", rms_procfs_root);
78263 +    remove_proc_entry ("programs", rms_procfs_root);
78264 +    remove_proc_entry ("control", rms_procfs_root);
78265 +    remove_proc_entry ("rms", NULL);
78266 +    unregister_sysctl_table(rms_sysctl_header);
78267 +}
78268 +
78269 +/* Declare the module init and exit functions */
78270 +module_init(rms_start);
78271 +module_exit(rms_exit);
78272 +
78273 +static int
78274 +rms_open (struct inode *inode, struct file *fp)
78275 +{
78276 +    MOD_INC_USE_COUNT;
78277 +    fp->private_data = NULL;
78278 +
78279 +    return (0);
78280 +}
78281 +
78282 +static int
78283 +rms_release (struct inode *inode, struct file *fp)
78284 +{
78285 +    MOD_DEC_USE_COUNT;
78286 +    return (0);
78287 +}
78288 +
78289 +static int 
78290 +rms_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, unsigned long arg)
78291 +{
78292 +    int res;
78293 +
78294 +    /* printk ("rmsmod: ioctl %x\n", cmd); */
78295 +
78296 +    switch (cmd) 
78297 +    {
78298 +/* no corepath support in Linux yet */
78299 +#if 0
78300 +    case RMSIO_SETCOREPATH:
78301 +       res = rms_setcorepath((caddr_t)arg);
78302 +       break;
78303 +       
78304 +    case RMSIO_GETCOREPATH:
78305 +    {
78306 +       RMSIO_GETCOREPATH_STRUCT args;
78307 +
78308 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78309 +           return (-EFAULT);
78310 +
78311 +       res = rms_getcorepath(args.pid, args.corepath, args.maxlen);
78312 +       break;
78313 +    }
78314 +#endif
78315 +       
78316 +    case RMSIO_PRGCREATE:
78317 +    {
78318 +       RMSIO_PRGCREATE_STRUCT args;
78319 +
78320 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78321 +           return (-EFAULT);
78322 +
78323 +       res = rms_prgcreate(args.id, args.uid, args.cpus);
78324 +       break;
78325 +    }
78326 +
78327 +    case RMSIO_PRGDESTROY:
78328 +       res = rms_prgdestroy(arg);
78329 +       break;
78330 +       
78331 +    case RMSIO_PRGIDS:
78332 +    {
78333 +       RMSIO_PRGIDS_STRUCT args;
78334 +       
78335 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78336 +           return (-EFAULT);
78337 +
78338 +       res = rms_prgids(args.maxids, args.prgids, args.nprgs);
78339 +       break;
78340 +    }
78341 +
78342 +    case RMSIO_PRGINFO:
78343 +    {
78344 +       RMSIO_PRGINFO_STRUCT args;
78345 +       
78346 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78347 +           return (-EFAULT);
78348 +
78349 +       res = rms_prginfo(args.id, args.maxpids, args.pids, args.nprocs);
78350 +       break;
78351 +    }
78352 +       
78353 +    case RMSIO_PRGSIGNAL:
78354 +    {
78355 +       RMSIO_PRGSIGNAL_STRUCT args;
78356 +
78357 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78358 +           return (-EFAULT);
78359 +
78360 +       res = rms_prgsignal(args.id, args.signo);
78361 +       break;
78362 +    }
78363 +       
78364 +    case RMSIO_PRGADDCAP:
78365 +    {
78366 +       RMSIO_PRGADDCAP_STRUCT args;
78367 +
78368 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78369 +           return (-EFAULT);
78370 +
78371 +       res = rms_prgaddcap(args.id, args.index, args.cap);
78372 +       break;
78373 +    }
78374 +
78375 +    case RMSIO_SETCAP:
78376 +    {
78377 +       RMSIO_SETCAP_STRUCT args;
78378 +
78379 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78380 +           return (-EFAULT);
78381 +
78382 +       res = rms_setcap(args.index, args.ctx);
78383 +       break;
78384 +    }
78385 +       
78386 +    case RMSIO_NCAPS:
78387 +       res = rms_ncaps((int *)arg);
78388 +       break;
78389 +       
78390 +    case RMSIO_GETPRGID:
78391 +    {
78392 +       RMSIO_GETPRGID_STRUCT args;
78393 +
78394 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78395 +           return (-EFAULT);
78396 +
78397 +       res = rms_getprgid(args.pid, args.id);
78398 +       break;
78399 +    }
78400 +
78401 +    case RMSIO_GETMYCAP:
78402 +       res = rms_mycap((int *)arg);
78403 +       break;
78404 +       
78405 +    case RMSIO_GETCAP:
78406 +    {
78407 +       RMSIO_GETCAP_STRUCT args;
78408 +
78409 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78410 +           return (-EFAULT);
78411 +
78412 +       res = rms_getcap(args.index, args.cap);
78413 +       break;
78414 +    }
78415 +
78416 +    case RMSIO_PRGGETSTATS:
78417 +    {
78418 +       /* no longer supported */
78419 +       res = EINVAL;
78420 +       break;
78421 +    }
78422 +    
78423 +    case RMSIO_PRGGETSTATS2:
78424 +    {
78425 +       RMSIO_PRGGETSTATS2_STRUCT args;
78426 +
78427 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78428 +           return (-EFAULT);
78429 +
78430 +       res = rms_prggetstats(args.id, args.stats);
78431 +       break;
78432 +    }
78433 +
78434 +    case RMSIO_PRGSUSPEND:
78435 +       res = rms_prgsuspend(arg);
78436 +       break;
78437 +       
78438 +    case RMSIO_PRGRESUME:
78439 +       res = rms_prgresume(arg);
78440 +       break;
78441 +
78442 +    case RMSIO_ELANINITDONE:
78443 +       res = rms_elaninitdone(arg);
78444 +       break;
78445 +
78446 +    case RMSIO_PRGELANPIDS:
78447 +    {
78448 +       RMSIO_PRGELANPIDS_STRUCT args;
78449 +
78450 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78451 +           return (-EFAULT);
78452 +
78453 +       res = rms_prgelanpids(args.id, args.maxpids, args.vps, args.pids, args.npids);
78454 +       break;
78455 +    }
78456 +
78457 +    case RMSIO_SETELANSTATS:
78458 +    {
78459 +       RMSIO_SETELANSTATS_STRUCT args;
78460 +       elanstats_t estats;
78461 +
78462 +       if (copy_from_user(&args, (void *)arg, sizeof(args)) ||
78463 +           copy_from_user(&estats, (void *)args.estats, sizeof(estats)))
78464 +           return(-EFAULT);
78465 +       
78466 +       res = rms_setelanstats(args.id, estats.ebytes, estats.exfers);
78467 +       break;
78468 +    }
78469 +
78470 +    case RMSIO_MODVERSION:
78471 +    {
78472 +       RMSIO_MODVERSION_STRUCT args;
78473 +       int version = rms_modversion();
78474 +       
78475 +       if (copy_from_user (&args, (void *)arg, sizeof (args)))
78476 +           return (-EFAULT);
78477 +       
78478 +       if (copyout(&version, args.version, sizeof(int)))
78479 +           res = EFAULT;
78480 +       else
78481 +           res = ESUCCESS;
78482 +
78483 +       break;
78484 +    }
78485 +
78486 +    /* 
78487 +     * Patch free kernel support, proc entries added manually
78488 +     */
78489 +    case RMSIO_ADDPROC:
78490 +    {
78491 +       RMSIO_PROC_STRUCT args;
78492 +       
78493 +       if (copy_from_user (&args, (void *)arg, sizeof (args)))
78494 +           return (-EFAULT);
78495 +
78496 +       res = rms_addproc(args.id, args.pid);
78497 +       break;
78498 +    }
78499 +    case RMSIO_REMOVEPROC:
78500 +    {
78501 +       RMSIO_PROC_STRUCT args;
78502 +       
78503 +       if (copy_from_user (&args, (void *)arg, sizeof (args)))
78504 +           return (-EFAULT);
78505 +
78506 +       res = rms_removeproc(args.id, args.pid);
78507 +       break;
78508 +    }
78509 +
78510 +    default:
78511 +       res = EINVAL;
78512 +       break;
78513 +    }
78514 +    
78515 +    /* printk ("rmsmod: ioctl %x res %d\n", cmd, res); */
78516 +    
78517 +    return ((res == 0) ? 0 : -res);
78518 +}
78519 +
78520 +#if defined(CONFIG_PPC64) || defined(CONFIG_SPARC64) || defined(CONFIG_X86_64)
78521 +static int
78522 +rms_ioctl32 (unsigned int fd, unsigned int cmd, unsigned long arg, struct file *file)
78523 +{
78524 +    int res;
78525 +
78526 +    switch (cmd)
78527 +    {
78528 +    case RMSIO_GETPRGID32:
78529 +    {
78530 +       RMSIO_GETPRGID_STRUCT32 args;
78531 +
78532 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78533 +           return (-EFAULT);
78534 +
78535 +       res = rms_getprgid(args.pid, (int *)(unsigned long) args.idptr);
78536 +       break;
78537 +    }
78538 +       
78539 +    case RMSIO_GETCAP32:
78540 +    {
78541 +       RMSIO_GETCAP_STRUCT32 args;
78542 +
78543 +       if (copy_from_user (&args, (void *) arg, sizeof (args)))
78544 +           return (-EFAULT);
78545 +
78546 +       res = rms_getcap(args.index, (ELAN_CAPABILITY *)(unsigned long) args.capptr);
78547 +       break;
78548 +    }
78549 +
78550 +    default:
78551 +       return (sys_ioctl (fd, cmd, arg));
78552 +    }
78553 +
78554 +    return ((res == 0) ? 0 : -res);
78555 +}
78556 +#endif
78557 +
78558 +/*
78559 + * Local variables:
78560 + * c-file-style: "stroustrup"
78561 + * End:
78562 + */
78563 diff -urN clean/drivers/net/Kconfig linux-2.6.9/drivers/net/Kconfig
78564 --- clean/drivers/net/Kconfig    2005-10-10 18:50:31.000000000 -0400
78565 +++ linux-2.6.9/drivers/net/Kconfig 2005-10-10 18:50:34.000000000 -0400
78566 @@ -2271,6 +2271,8 @@
78567
78568  source "drivers/net/tokenring/Kconfig"
78569
78570 +source "drivers/net/qsnet/Kconfig"
78571 +
78572  source "drivers/net/wireless/Kconfig"
78573
78574  source "drivers/net/pcmcia/Kconfig"
78575 --- clean/drivers/net/Makefile   2005-10-10 18:59:11.000000000 -0400
78576 +++ linux-2.6.9/drivers/net/Makefile        2005-10-10 18:59:28.000000000 -0400
78577 @@ -197,3 +197,4 @@
78578
78579  obj-$(CONFIG_NETCONSOLE) += netconsole.o
78580  obj-$(CONFIG_NETDUMP) += netdump.o
78581 +obj-$(CONFIG_QSNET) += qsnet/
78582 diff -urN clean/fs/exec.c linux-2.6.9/fs/exec.c
78583 --- clean/fs/exec.c     2005-10-10 17:43:57.000000000 -0400
78584 +++ linux-2.6.9/fs/exec.c       2005-10-10 17:47:17.000000000 -0400
78585 @@ -54,6 +54,8 @@
78586  #include <linux/kmod.h>
78587  #endif
78588  
78589 +#include <linux/ptrack.h>
78590 +
78591  int core_uses_pid;
78592  char core_pattern[65] = "core";
78593  int suid_dumpable = 0;
78594 @@ -1175,6 +1177,9 @@
78595         if (retval < 0)
78596                 goto out;
78597  
78598 +       /* notify any ptrack callbacks of the process exec */
78599 +       ptrack_call_callbacks(PTRACK_PHASE_EXEC, NULL);
78600 +
78601         retval = search_binary_handler(bprm,regs);
78602         if (retval >= 0) {
78603                 free_arg_pages(bprm);
78604 diff -urN clean/fs/open.c linux-2.6.9/fs/open.c
78605 --- clean/fs/open.c     2005-10-10 17:43:57.000000000 -0400
78606 +++ linux-2.6.9/fs/open.c       2005-10-10 17:47:17.000000000 -0400
78607 @@ -1029,6 +1029,8 @@
78608         goto out;
78609  }
78610  
78611 +EXPORT_SYMBOL(sys_open);
78612 +
78613  #ifndef __alpha__
78614  
78615  /*
78616 diff -urN clean/fs/read_write.c linux-2.6.9/fs/read_write.c
78617 --- clean/fs/read_write.c       2005-05-13 13:39:11.000000000 -0400
78618 +++ linux-2.6.9/fs/read_write.c 2005-10-10 17:47:17.000000000 -0400
78619 @@ -145,6 +145,7 @@
78620  bad:
78621         return retval;
78622  }
78623 +EXPORT_SYMBOL(sys_lseek);
78624  
78625  #ifdef __ARCH_WANT_SYS_LLSEEK
78626  asmlinkage long sys_llseek(unsigned int fd, unsigned long offset_high,
78627 diff -urN clean/fs/select.c linux-2.6.9/fs/select.c
78628 --- clean/fs/select.c   2005-05-13 13:39:11.000000000 -0400
78629 +++ linux-2.6.9/fs/select.c     2005-10-10 17:47:17.000000000 -0400
78630 @@ -529,3 +529,4 @@
78631         poll_freewait(&table);
78632         return err;
78633  }
78634 +EXPORT_SYMBOL_GPL(sys_poll);
78635 diff -urN clean/include/elan/bitmap.h linux-2.6.9/include/elan/bitmap.h
78636 --- clean/include/elan/bitmap.h 1969-12-31 19:00:00.000000000 -0500
78637 +++ linux-2.6.9/include/elan/bitmap.h   2004-01-20 12:32:15.000000000 -0500
78638 @@ -0,0 +1,74 @@
78639 +/*
78640 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78641 + *
78642 + *    For licensing information please see the supplied COPYING file
78643 + *
78644 + */
78645 +
78646 +#ifndef __QSNET_BITMAP_H
78647 +#define __QSNET_BITMAP_H
78648 +
78649 +#ident "$Id: bitmap.h,v 1.5 2004/01/20 17:32:15 david Exp $"
78650 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/bitmap.h,v $ */
78651 +
78652 +typedef unsigned int                   bitmap_t;
78653 +
78654 +#define BT_NBIPUL                      32                      /* n bits per bitmap_t */
78655 +#define BT_ULSHIFT                     5                       /* log 2 BT_NBIPUL to extract word index */
78656 +#define BT_ULMASK                      0x1f                    /* to extract bit index */
78657 +
78658 +#define BT_WIM(bitmap,bitindex)                ((bitmap)[(bitindex) >> BT_ULSHIFT])            /* word in map */
78659 +#define BT_BIW(bitindex)               (1 << ((bitindex) & BT_ULMASK))         /* bit in word */
78660 +
78661 +/* BT_BITOUL -- n bits to n words */
78662 +#define BT_BITOUL(nbits)               (((nbits) + BT_NBIPUL -1) / BT_NBIPUL)
78663 +
78664 +#define BT_TEST(bitmap,bitindex)       ((BT_WIM((bitmap), (bitindex)) & BT_BIW(bitindex)) ? 1 : 0)
78665 +#define BT_SET(bitmap,bitindex)                do { BT_WIM((bitmap), (bitindex)) |= BT_BIW(bitindex); } while (0)
78666 +#define BT_CLEAR(bitmap,bitindex)      do { BT_WIM((bitmap), (bitindex)) &= ~BT_BIW(bitindex); } while (0)
78667 +
78668 +/* return first free bit in the bitmap, or -1 for failure */
78669 +extern int  bt_freebit (bitmap_t *bitmap, int nbits);
78670 +
78671 +/* return the index of the lowest set bit in the bitmap or -1 for failure */
78672 +extern int bt_lowbit (bitmap_t *bitmap, int nbits);
78673 +
78674 +/* return the index of the next set/clear bit in the bitmap or -1 for failure */
78675 +extern int bt_nextbit (bitmap_t *bitmap, int nbits, int last, int isset);
78676 +
78677 +/* copy/zero/fill/compare a bit map */
78678 +extern void bt_copy (bitmap_t *a, bitmap_t *b, int nbits);
78679 +extern void bt_zero (bitmap_t *a, int nbits);
78680 +extern void bt_fill (bitmap_t *a, int nbits);
78681 +extern int  bt_cmp (bitmap_t *a, bitmap_t *b, int nbits);
78682 +
78683 +/* intersect bitmap 'a' with bitmap 'b' and return in 'a' */
78684 +extern void bt_intersect (bitmap_t *a, bitmap_t *b, int nbits);
78685 +
78686 +/* remove/add bitmap 'b' from bitmap 'a' */
78687 +extern void bt_remove (bitmap_t *a, bitmap_t *b, int nbits);
78688 +extern void bt_add (bitmap_t *a, bitmap_t *b, int nbits);
78689 +
78690 +/* check whether bitmap 'a' spans bitmap 'b' */
78691 +extern int  bt_spans (bitmap_t *a, bitmap_t *b, int nbits);
78692 +
78693 +/* copy [base,base+nbits-1] from 'a' to 'b' */
78694 +extern void bt_subset (bitmap_t *a, bitmap_t *b, int base, int nbits);
78695 +
78696 +/* find bits clear in 'a' and set in 'b', put result in 'c' */
78697 +extern void bt_up (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits);
78698 +
78699 +/* find bits set in 'a' and clear in 'b', put result in 'c' */
78700 +extern void bt_down (bitmap_t *a, bitmap_t *b, bitmap_t *c, int nbits);
78701 +
78702 +/* return number of bits set in bitmap */
78703 +extern int  bt_nbits (bitmap_t *a, int nbits);
78704 +
78705 +
78706 +#endif /* __QSNET_BITMAP_H */
78707 +
78708 +/*
78709 + * Local variables:
78710 + * c-file-style: "linux"
78711 + * End:
78712 + */
78713 diff -urN clean/include/elan/capability.h linux-2.6.9/include/elan/capability.h
78714 --- clean/include/elan/capability.h     1969-12-31 19:00:00.000000000 -0500
78715 +++ linux-2.6.9/include/elan/capability.h       2005-05-17 05:52:53.000000000 -0400
78716 @@ -0,0 +1,198 @@
78717 +/*
78718 + *    Copyright (c) 2003 by Quadrics Limited.
78719 + * 
78720 + *    For licensing information please see the supplied COPYING file
78721 + *
78722 + */
78723 +
78724 +#ident "@(#)$Id: capability.h,v 1.18 2005/05/17 09:52:53 addy Exp $"
78725 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/capability.h,v $*/
78726 +
78727 +#ifndef __ELAN_CAPABILITY_H
78728 +#define __ELAN_CAPABILITY_H
78729 +
78730 +#include <elan/bitmap.h>
78731 +
78732 +/* Maximum number of rails */
78733 +#define ELAN_MAX_RAILS          (31)
78734 +/* Maximum number of virtual processes we support */
78735 +#define ELAN_MAX_VPS           (16384)
78736 +
78737 +/* Number of words in a bitmap capability */
78738 +#define ELAN_BITMAPSIZE                BT_BITOUL(ELAN_MAX_VPS)
78739 +
78740 +/* Guaranteed invalid values */
78741 +#define ELAN_INVALID_PROCESS   (0x7fffffff)            /* A GUARANTEED invalid process # */
78742 +#define ELAN_INVALID_NODE      (0xFFFF)
78743 +#define ELAN_INVALID_CONTEXT   (0xFFFF)
78744 +
78745 +/* Number of values in a user key */
78746 +#define ELAN_USERKEY_ENTRIES   4
78747 +
78748 +typedef void * ELAN_CAP_OWNER;
78749 +
78750 +/* 
78751 + * When used in userspace this is relative to the base of
78752 + * the capabality but is an absolute location for kernel space.
78753 + */
78754 +typedef struct elan_location
78755 +{
78756 +       unsigned short loc_node;
78757 +       unsigned short loc_context;
78758 +} ELAN_LOCATION;
78759 +
78760 +typedef struct elan_userkey
78761 +{
78762 +       unsigned        key_values[ELAN_USERKEY_ENTRIES];
78763 +} ELAN_USERKEY;
78764 +
78765 +typedef struct elan_capability
78766 +{
78767 +       ELAN_USERKEY    cap_userkey;                            /* User defined protection */
78768 +
78769 +       int             cap_version;                            /* Version number */
78770 +       unsigned short  cap_type;                               /* Capability Type */
78771 +       unsigned short  cap_spare;                              /* spare was cap_elan_type */
78772 +
78773 +       int             cap_lowcontext;                         /* low context number in block */
78774 +       int             cap_highcontext;                        /* high context number in block */
78775 +       int             cap_mycontext;                          /* my context number */
78776 +    
78777 +       int             cap_lownode;                            /* low elan id of group */
78778 +       int             cap_highnode;                           /* high elan id of group */
78779 +
78780 +       unsigned int    cap_railmask;                           /* which rails this capability is valid for */
78781 +       
78782 +       bitmap_t        cap_bitmap[ELAN_BITMAPSIZE];            /* Bitmap of process to processor translation */
78783 +} ELAN_CAPABILITY;
78784 +
78785 +#define ELAN_CAP_UNINITIALISED         (-1)
78786 +
78787 +#define ELAN_CAP_VERSION_NUMBER                (0x00010002)
78788 +
78789 +#define ELAN_CAP_NUM_NODES(cap)                ((cap)->cap_highnode - (cap)->cap_lownode + 1)
78790 +#define ELAN_CAP_NUM_CONTEXTS(cap)     ((cap)->cap_highcontext - (cap)->cap_lowcontext + 1)
78791 +
78792 +/* using or defining our own MIN/MAX had confilicts with dunix so we define ELAN_ ones */
78793 +#define ELAN_MIN(a,b)  ((a) > (b) ? (b) : (a))
78794 +#define ELAN_MAX(a,b)  ((a) > (b) ? (a) : (b))
78795 +#define ELAN_CAP_BITMAPSIZE(cap)       (ELAN_MAX (ELAN_MIN (ELAN_CAP_NUM_NODES(cap) * ELAN_CAP_NUM_CONTEXTS(cap), ELAN_MAX_VPS), 0))
78796 +
78797 +#define ELAN_CAP_SIZE(cap)             (offsetof (ELAN_CAPABILITY, cap_bitmap[BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap))]))
78798 +#define ELAN_CAP_ENTRIES(cap)           (((cap)->cap_type & ELAN_CAP_TYPE_NO_BITMAP) ? ELAN_CAP_BITMAPSIZE((cap)) : bt_nbits((cap)->cap_bitmap, ELAN_CAP_BITMAPSIZE((cap))))
78799 +
78800 +#define ELAN_CAP_IS_RAIL_SET(cap,rail)  ((cap)->cap_railmask & (1<<rail))
78801 +
78802 +#define ELAN_CAP_KEY_MATCH(cap1,cap2)  ((cap1)->cap_userkey.key_values[0] == (cap2)->cap_userkey.key_values[0] && \
78803 +                                        (cap1)->cap_userkey.key_values[1] == (cap2)->cap_userkey.key_values[1] && \
78804 +                                        (cap1)->cap_userkey.key_values[2] == (cap2)->cap_userkey.key_values[2] && \
78805 +                                        (cap1)->cap_userkey.key_values[3] == (cap2)->cap_userkey.key_values[3])
78806 +
78807 +#define ELAN_CAP_TYPE_MATCH(cap1,cap2)  ((cap1)->cap_version           == (cap2)->cap_version           && \
78808 +                                        (cap1)->cap_type              == (cap2)->cap_type)
78809 +
78810 +#define ELAN_CAP_GEOM_MATCH(cap1,cap2) ((cap1)->cap_lowcontext        == (cap2)->cap_lowcontext        && \
78811 +                                        (cap1)->cap_highcontext       == (cap2)->cap_highcontext       && \
78812 +                                        (cap1)->cap_lownode           == (cap2)->cap_lownode           && \
78813 +                                        (cap1)->cap_highnode          == (cap2)->cap_highnode          && \
78814 +                                         (cap1)->cap_railmask          == (cap2)->cap_railmask          && \
78815 +                                        !bcmp (&(cap1)->cap_bitmap[0], &(cap2)->cap_bitmap[0],            \
78816 +                                               BT_BITOUL(ELAN_CAP_BITMAPSIZE(cap1)*sizeof(bitmap_t))))
78817 +
78818 +#define ELAN_CAP_MATCH(cap1,cap2)      (ELAN_CAP_KEY_MATCH (cap1, cap2)  && \
78819 +                                        ELAN_CAP_TYPE_MATCH (cap1, cap2) && \
78820 +                                        ELAN_CAP_GEOM_MATCH (cap1, cap2))
78821 +
78822 +#define ELAN_CAP_VALID_MYCONTEXT(cap)   (    ((cap)->cap_lowcontext  != ELAN_CAP_UNINITIALISED)     \
78823 +                                         && ((cap)->cap_mycontext   != ELAN_CAP_UNINITIALISED)     \
78824 +                                         && ((cap)->cap_highcontext != ELAN_CAP_UNINITIALISED)     \
78825 +                                         && ((cap)->cap_lowcontext <= (cap)->cap_mycontext)        \
78826 +                                         && ((cap)->cap_mycontext <= (cap)->cap_highcontext)) 
78827 +
78828 +/*
78829 + * Definitions for type 
78830 + */
78831 +#define ELAN_CAP_TYPE_BLOCK            1               /* Block distribution */
78832 +#define ELAN_CAP_TYPE_CYCLIC           2               /* Cyclic distribution */
78833 +#define ELAN_CAP_TYPE_KERNEL           3               /* Kernel capability */
78834 +
78835 +#define ELAN_CAP_TYPE_MASK             (0xFFF)         /* Mask for type */
78836 +
78837 +/* OR these bits in for extra features */
78838 +#define ELAN_CAP_TYPE_HWTEST           (1 << 12)       /* Hardware test capability type */
78839 +#define ELAN_CAP_TYPE_MULTI_RAIL       (1 << 13)       /* "new" multi rail capability */
78840 +#define ELAN_CAP_TYPE_NO_BITMAP                (1 << 14)       /* don't use bit map */
78841 +#define ELAN_CAP_TYPE_BROADCASTABLE    (1 << 15)       /* broadcastable */
78842 +
78843 +
78844 +extern void          elan_nullcap     (ELAN_CAPABILITY *cap);
78845 +extern char         *elan_capability_string (ELAN_CAPABILITY *cap, char *str);
78846 +extern ELAN_LOCATION elan_vp2location (unsigned process, ELAN_CAPABILITY *cap);
78847 +extern int           elan_location2vp (ELAN_LOCATION location, ELAN_CAPABILITY *cap);
78848 +extern int           elan_nvps        (ELAN_CAPABILITY *cap);
78849 +extern int           elan_nlocal      (int node, ELAN_CAPABILITY *cap);
78850 +extern int           elan_maxlocal    (ELAN_CAPABILITY *cap);
78851 +extern int           elan_localvps    (int node, ELAN_CAPABILITY *cap, int *vps, int size);
78852 +extern int           elan_nrails      (ELAN_CAPABILITY *cap);
78853 +extern int           elan_rails       (ELAN_CAPABILITY *cap, int *rails);
78854 +extern int           elan_cap_overlap (ELAN_CAPABILITY *cap1, ELAN_CAPABILITY *cap2);
78855 +
78856 +/*
78857 + * capability creation/access fns provide for running
78858 + * new libelan code on old OS releases
78859 + */
78860 +extern int elan_lowcontext(ELAN_CAPABILITY *cap);
78861 +extern int elan_mycontext(ELAN_CAPABILITY *cap);
78862 +extern int elan_highcontext(ELAN_CAPABILITY *cap);
78863 +extern int elan_lownode(ELAN_CAPABILITY *cap);
78864 +extern int elan_highnode(ELAN_CAPABILITY *cap);
78865 +extern int elan_captype(ELAN_CAPABILITY *cap);
78866 +extern int elan_railmask(ELAN_CAPABILITY *cap);
78867 +
78868 +extern int elan_getenvCap (ELAN_CAPABILITY *cap, int index);
78869 +extern ELAN_CAPABILITY *elan_createCapability(void);
78870 +extern ELAN_CAPABILITY *elan_copyCapability(ELAN_CAPABILITY *from, int ctxShift);
78871 +extern int elan_generateCapability(char *string);
78872 +extern int elan_getMachinesCap (char *filename, ELAN_CAPABILITY *cap);
78873 +
78874 +typedef struct elan_cap_struct
78875 +{
78876 +       ELAN_CAP_OWNER   owner;
78877 +       ELAN_CAPABILITY  cap;
78878 +
78879 +       int              attached; /* count of people attached */
78880 +       unsigned int     active;   /* ie not being destroyed   */
78881 +} ELAN_CAP_STRUCT;
78882 +
78883 +#if ! defined(__KERNEL__)
78884 +extern void          elan_get_random_key(ELAN_USERKEY *key);
78885 +extern int           elan_prefrails(ELAN_CAPABILITY *cap, int *pref, int nvp);
78886 +#endif
78887 +
78888 +#if defined(__KERNEL__)
78889 +/* capability.c */
78890 +extern int elan_validate_cap  (ELAN_CAPABILITY *cap);
78891 +extern int elan_validate_map  (ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
78892 +
78893 +extern int elan_create_cap  (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
78894 +extern int elan_destroy_cap (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
78895 +extern int elan_create_vp   (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
78896 +extern int elan_destroy_vp  (ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
78897 +
78898 +typedef        void (*ELAN_DESTROY_CB)(void *args, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
78899 +
78900 +extern int elan_attach_cap  (ELAN_CAPABILITY *cap, unsigned int rail, void *args, ELAN_DESTROY_CB callback);
78901 +extern int elan_detach_cap  (ELAN_CAPABILITY *cap, unsigned int rail);
78902 +
78903 +extern int elan_get_caps    (uint *number_of_results, uint array_size, ELAN_CAP_STRUCT *caps);
78904 +extern int elan_cap_dump    (void);
78905 +#endif /* __KERNEL__ */
78906 +
78907 +
78908 +#endif /* __ELAN_CAPABILITY_H */
78909 +
78910 +/*
78911 + * Local variables:
78912 + * c-file-style: "linux"
78913 + * End:
78914 + */
78915 diff -urN clean/include/elan/cm.h linux-2.6.9/include/elan/cm.h
78916 --- clean/include/elan/cm.h     1969-12-31 19:00:00.000000000 -0500
78917 +++ linux-2.6.9/include/elan/cm.h       2005-03-30 09:06:34.000000000 -0500
78918 @@ -0,0 +1,396 @@
78919 +/*
78920 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
78921 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
78922 + *
78923 + *    For licensing information please see the supplied COPYING file
78924 + *
78925 + */
78926 +
78927 +#ifndef __ELAN_CM_H
78928 +#define __ELAN_CM_H
78929 +
78930 +#ident "@(#)$Id: cm.h,v 1.16 2005/03/30 14:06:34 mike Exp $"
78931 +/*      $Source: /cvs/master/quadrics/epmod/cm.h,v $*/
78932 +
78933 +#include <elan/statemap.h>
78934 +
78935 +#if defined(DIGITAL_UNIX)
78936 +/*
78937 + * On Tru64 - SMP doesn't mean Symmetric - cpu 0 is a master cpu and is responsible
78938 + * for handling all PCI interrupts and "funneled" operations.  When a kernel thread
78939 + * is made runnable, the scheduler will choose which cpu it will run on at that time,
78940 + * and will only execute a higher priority thread from another cpu's run queue when 
78941 + * it becomes totally idle (apparently also including user processes).  Also the 
78942 + * assert_wait_mesg_timo function uses a per-cpu timeout - these can only get executed
78943 + * at "preemptable" places - so again have no guarantee on when they will execute if
78944 + * they happen to be queued on a "hogged" cpu. The combination of these mean that the Tru64
78945 + * is incapable of scheduling a high priority kernel  thread within a deterministic time
78946 + * of when it should have become runnable - wonderfull.
78947 + *
78948 + * Hence the solution Compaq have proposed it to schedule a timeout onto all of the
78949 + * cpu's timeouts lists at the maximum frequency that we could want to execute code,
78950 + * then to handle the scheduling of work between these ourselves.  With a bit of luck
78951 + * ..... at least one cpu will be sufficiently unloaded to allow us to get a chance
78952 + * to do our important work.
78953 + *
78954 + * However ..... this still is not reliable, since timeouts under Tru64 are still 
78955 + * only run when the currently running kernel thread "co-operates" by calling one
78956 + * of a number of functions which is permitted to run the "lwc"s AND is not holding
78957 + * any spinlocks AND is running ai IPL 0.   However Compaq are unable to provide
78958 + * any upper limit on the time between the "lwc"'s being run and so it is possible
78959 + * for all 4 cpus to not run them for an unbounded time.
78960 + *
78961 + * The solution proposed is to use the RM_TEMP_BACKDOOR hook which was added to 
78962 + * hardclock() to "solve" this problem for Memory Channel.  However, since it
78963 + * is called within the clock interrupt it is not permissible to aquire any
78964 + * spinlocks, nor to run for "too long".  This means that it is not possible to
78965 + * call the heartbeat algorithm from this hook.  
78966 + *
78967 + * Our solution to these limitations is to use the hook to cause an elan interrupt 
78968 + * to be delivered, by issueing a mis-aligned SetEvent command - this causes the device 
78969 + * to trap and ep_cprocTrap() can then run the heartbeat code.  However there is a lock 
78970 + * order violation between the elan_dev::IntrLock and ep_dev::Lock, so we have to 
78971 + * use a trylock and if we fail, then hope that when the interrupt is delievered again
78972 + * some time later we will succeed.
78973 + *
78974 + * However this only works if the kernel is able to respond to the Elan interrupt,
78975 + * so we panic inside the RM_TEMP_BACKDOOR hook if the SetEvent's interrupt has
78976 + * not been taken for more than an CM_TIMER_SCHEDULE_TIMEOUT interval.
78977 + *
78978 + * In fact this is exactly the mechanism that other operating systems use to
78979 + * execute timeouts, since the hardclock interrupt posts a low priority 
78980 + * "soft interrupt" which "pre-eempts" the currently running thread and then
78981 + * executes the timeouts.To block timeouts you use splsoftclock() the same as 
78982 + * in Tru64.
78983 + */
78984 +#define PER_CPU_TIMEOUT                        TRUE
78985 +#endif
78986 +
78987 +
78988 +#define CM_SGMTS_PER_LEVEL             8                       /* maximum nodes in each segment */
78989 +#define CM_MAX_LEVELS                  6                       /* maximum depth of tree */
78990 +
78991 +/* message buffers/dmas/events etc */
78992 +#define CM_NUM_NODE_MSG_BUFFERS                (CM_MAX_LEVELS * CM_SGMTS_PER_LEVEL) /* subordinates and leader */
78993 +#define CM_NUM_SPARE_MSG_BUFFERS       8                       /* spare msg buffers for non-connected nodes */
78994 +#define CM_NUM_MSG_BUFFERS             (CM_NUM_NODE_MSG_BUFFERS + CM_NUM_SPARE_MSG_BUFFERS)
78995 +
78996 +#define CM_INPUTQ_ENTRIES              128                     /* # entries in input queue */
78997 +
78998 +#define CM_PERIODIC_DISCOVER_INTERVAL  (5000)          /* 5s (infrequent resolution of established leader conflicts) */
78999 +#define CM_URGENT_DISCOVER_INTERVAL    (50)            /* 0.05s (more frequently than heartbeats 'cause they don't retry) */
79000 +#define CM_HEARTBEAT_INTERVAL          (125)           /* 0.125s */
79001 +#define CM_TIMER_SCHEDULE_TIMEOUT      (4000)          /* 4s     Maximum time before a timer that's secheduled to run gets to run (eg blocked in interrupt handlers etc) */
79002 +#define CM_THREAD_SCHEDULE_TIMEOUT     (30000)         /* 30s    Maximum time before a thread that's scheduled to run gets to run */
79003 +#define CM_THREAD_RUNNING_TIMEOUT      (30000)         /* 30s    Don't expect the manager thread to be running longer than this */
79004 +
79005 +#ifdef PER_CPU_TIMEOUT
79006 +#define CM_PERCPU_TIMEOUT_INTERVAL     (50)            /* 0.05s (must be less than all above intervals) */
79007 +#define CM_PACEMAKER_INTERVAL          (500)           /* 0.05s */
79008 +
79009 +#define CM_HEARTBEAT_OVERDUE           (250)           /* 0.25s Maximum time a timeout can be overdue before taking extreme action */
79010 +#endif
79011 +
79012 +#define CM_P2P_DMA_RETRIES             31
79013 +
79014 +/* We expect at least 1 point-to-point message in CM_P2P_MSG_RETRIES
79015 + * attempts to send one to be successfully received */
79016 +#define CM_P2P_MSG_RETRIES             8
79017 +
79018 +/* We expect at least 1 broadcast message in CM_BCAST_MSG_RETRIES attempts
79019 + * to send one to be successfully received. */
79020 +#define CM_BCAST_MSG_RETRIES           40
79021 +
79022 +/* Heartbeat timeout allows for a node stalling and still getting its
79023 + * heartbeat. The 2 is to allow for unsynchronised polling times. */
79024 +#define CM_HEARTBEAT_TIMEOUT           (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_P2P_MSG_RETRIES) * CM_HEARTBEAT_INTERVAL)
79025 +
79026 +/* Discover timeout must be > CM_HEARTBEAT_TIMEOUT to guarantee that people
79027 + * who don't see discovery are considered dead by their leader.  This
79028 + * ensures that by the time a node "discovers" it is a leader of a segment,
79029 + * the previous leader of that segment will have been deemed to be dead by
79030 + * its the parent segment's leader */
79031 +#define CM_DISCOVER_TIMEOUT            (CM_TIMER_SCHEDULE_TIMEOUT + (2 + CM_BCAST_MSG_RETRIES) * CM_URGENT_DISCOVER_INTERVAL)
79032 +
79033 +#define CM_WAITING_TIMEOUT             (CM_DISCOVER_TIMEOUT * 100)
79034 +
79035 +/*
79036 + * Convert all timeouts specified in mS into "ticks"
79037 + */
79038 +#define MSEC2TICKS(MSEC)               (((MSEC)*HZ)/1000)
79039 +
79040 +
79041 +/* statemap entry */
79042 +typedef struct cm_state_entry
79043 +{
79044 +    int16_t           level;                   /* cluster level to apply to */
79045 +    int16_t          offset;                   /* from statemap_findchange() */
79046 +    uint16_t          seg[BT_NBIPUL/16];       /* ditto */
79047 +} CM_STATEMAP_ENTRY;
79048 +
79049 +/* offset is >= 0 for a change to apply and */
79050 +#define STATEMAP_NOMORECHANGES (-1)            /* end of a set of updates */
79051 +#define STATEMAP_RESET         (-2)            /* reset the target map */
79052 +#define STATEMAP_NOOP          (-3)            /* null token */
79053 +
79054 +/* CM message format */
79055 +typedef int8_t CM_SEQ;                         /* heartbeat sequence numbers; at least 2 bits, signed */
79056 +
79057 +/*
79058 + * The message header is received into the last 64 byte block of 
79059 + * the input queue and the Version *MUST* be the last word of the 
79060 + * block to ensure that we can see that the whole of the message
79061 + * has reached main memory after we've seen the input queue pointer
79062 + * have been updated.
79063 + */
79064 +typedef struct ep_cm_hdr
79065 +{
79066 +    uint32_t          Pad0;
79067 +    uint32_t          Pad1;
79068 +
79069 +    uint8_t           Type;
79070 +    uint8_t           Level;
79071 +    CM_SEQ            Seq;                     /* precision at least 2 bits each*/
79072 +    CM_SEQ            AckSeq;
79073 +    
79074 +    uint16_t          NumMaps;
79075 +    uint16_t          MachineId;
79076 +
79077 +    uint16_t          NodeId;
79078 +    uint16_t          Checksum;
79079 +
79080 +    uint32_t           Timestamp;
79081 +    uint32_t           ParamHash;
79082 +    uint32_t          Version;
79083 +} CM_HDR;
79084 +
79085 +#define CM_HDR_SIZE        sizeof (CM_HDR)
79086 +
79087 +typedef struct cm_msg
79088 +{
79089 +    union {
79090 +       CM_STATEMAP_ENTRY   Statemaps[1];               /* piggy-backed statemap updates start here */
79091 +       uint8_t             Space[EP_SYSTEMQ_MSG_MAX - CM_HDR_SIZE];
79092 +    } Payload;
79093 +    
79094 +    CM_HDR                 Hdr;
79095 +} CM_MSG;
79096 +
79097 +/* The maximum number of statemap entries that can fit within an EP_CM_MSG_BUFFER */
79098 +#define CM_MSG_MAXMAPS         (offsetof (CM_MSG, Hdr) / sizeof (CM_STATEMAP_ENTRY))
79099 +#define CM_MSG_MAP(mapno)      (CM_MSG_MAXMAPS - (mapno) - 1)
79100 +
79101 +/* The actual special message base & size, including 'nmaps' piggy-backed statemap entries */
79102 +#define CM_MSG_BASE(nmaps)     (nmaps == 0 ? offsetof (CM_MSG, Hdr) : offsetof (CM_MSG, Payload.Statemaps[CM_MSG_MAXMAPS - nmaps]))
79103 +#define CM_MSG_SIZE(nmaps)     (sizeof (CM_MSG) - CM_MSG_BASE(nmaps))
79104 +
79105 +#define CM_MSG_VERSION                         0xcad00005
79106 +#define CM_MSG_TYPE_RESOLVE_LEADER             0
79107 +#define CM_MSG_TYPE_DISCOVER_LEADER            1
79108 +#define CM_MSG_TYPE_NOTIFY                     2
79109 +#define CM_MSG_TYPE_DISCOVER_SUBORDINATE       3
79110 +#define CM_MSG_TYPE_IMCOMING                   4
79111 +#define CM_MSG_TYPE_HEARTBEAT                  5
79112 +#define CM_MSG_TYPE_REJOIN                     6
79113 +
79114 +/* CM machine segment */
79115 +typedef struct cm_sgmtMaps
79116 +{
79117 +    u_char       InputMapValid;                        /* Input map has been set */
79118 +    u_char       OutputMapValid;               /* Output map has been set */
79119 +    u_char       SentChanges;                  /* got an outstanding STATEMAP_NOMORECHANGES to send */
79120 +    statemap_t  *OutputMap;                    /* state to send */
79121 +    statemap_t  *InputMap;                     /* state received */
79122 +    statemap_t  *CurrentInputMap;              /* state being received */
79123 +} CM_SGMTMAPS;
79124 +
79125 +typedef struct cm_sgmt
79126 +{
79127 +   u_char       State;
79128 +   u_char       SendMaps;
79129 +   u_char       MsgAcked;
79130 +   CM_SEQ      MsgSeq;
79131 +   CM_SEQ      AckSeq;
79132 +   u_int       NodeId;
79133 +   long                UpdateTick;
79134 +   long                WaitingTick;
79135 +   uint32_t    Timestamp;
79136 +   CM_SGMTMAPS  Maps[CM_MAX_LEVELS];           /* Maps[i] == state for cluster level i */
79137 +   u_short      MsgNumber;                     /* msg buffer to use */
79138 +   u_short     NumMaps;                        /* # maps in message buffer */
79139 +   u_short      Level;
79140 +   u_short      Sgmt;
79141 +} CM_SGMT;
79142 +
79143 +#define CM_SGMT_ABSENT         0               /* no one there at all */
79144 +#define CM_SGMT_WAITING                1               /* waiting for subtree to connect */
79145 +#define CM_SGMT_COMING         2               /* expecting a subtree to reconnect */
79146 +#define CM_SGMT_PRESENT                3               /* connected */
79147 +
79148 +typedef struct cm_level
79149 +{
79150 +    int               SwitchLevel;
79151 +    u_int             MinNodeId;
79152 +    u_int              NumNodes;
79153 +    u_int              NumSegs;
79154 +    u_int              MySgmt;
79155 +   
79156 +    /* SubordinateMap[i] == OR of all subordinate maps on this level and down for cluster level i */
79157 +    u_char             SubordinateMapValid[CM_MAX_LEVELS];
79158 +    statemap_t        *SubordinateMap[CM_MAX_LEVELS];
79159 +
79160 +    /* maps/flags for this cluster level */
79161 +    u_int              Online:1;                               /* I've gone online (seen myself running) */
79162 +    u_int             Restarting:1;                            /* driving my owm restart bit */
79163 +    u_char            OfflineReasons;                          /* forced offline by broadcast */
79164 +
79165 +    u_char             GlobalMapValid;
79166 +    u_char             SubTreeMapValid;
79167 +    u_long            Connected;
79168 +
79169 +    statemap_t        *LocalMap;               /* state bits I drive */
79170 +    statemap_t        *SubTreeMap;             /* OR of my and my subtree states */
79171 +    statemap_t        *GlobalMap;              /* OR of all node states */
79172 +    statemap_t        *LastGlobalMap;          /* last map I saw */
79173 +    statemap_t        *TmpMap;                 /* scratchpad */
79174 +
79175 +    CM_SGMT           Sgmts[CM_SGMTS_PER_LEVEL];
79176 +} CM_LEVEL;
79177 +
79178 +#define CM_ROLE_LEADER_CANDIDATE       0
79179 +#define CM_ROLE_LEADER                 1
79180 +#define CM_ROLE_SUBORDINATE            2
79181 +
79182 +/* global status bits */
79183 +#define CM_GSTATUS_STATUS_MASK         0x03    /* bits nodes drive to broadcast their status */
79184 +#define CM_GSTATUS_ABSENT              0x00    /* Off the network */
79185 +#define CM_GSTATUS_STARTING            0x01    /* I'm waiting for everyone to see me online */
79186 +#define CM_GSTATUS_RUNNING              0x03   /* up and running */
79187 +#define CM_GSTATUS_CLOSING             0x02    /* I'm waiting for everyone to see me offline */
79188 +
79189 +#define CM_GSTATUS_ACK_MASK            0x0c    /* bits node drive to ack other status */
79190 +#define CM_GSTATUS_MAY_START           0x04    /* Everyone thinks I may not start */
79191 +#define CM_GSTATUS_MAY_RUN             0x08    /* Everyone thinks I may not run */
79192 +
79193 +#define CM_GSTATUS_RESTART             0x10    /* Someone thinks I should restart */
79194 +#define CM_GSTATUS_BITS                        5
79195 +
79196 +#define CM_GSTATUS_BASE(node)          ((node) * CM_GSTATUS_BITS)
79197 +
79198 +#if defined(PER_CPU_TIMEOUT)
79199 +typedef struct cm_timeout_data
79200 +{
79201 +    long               ScheduledAt;                            /* lbolt timeout was scheduled to run at */
79202 +
79203 +    unsigned long       EarlyCount;                            /* # times run early than NextRun */
79204 +    unsigned long      MissedCount;                            /* # times run on time - but someone else was running it */
79205 +    unsigned long       WastedCount;                           /* # times we failed to get the spinlock */
79206 +    unsigned long      WorkCount;                              /* # times we're the one running */
79207 +
79208 +    unsigned long      WorstDelay;                             /* worst scheduling delay */
79209 +    unsigned long      BestDelay;                              /* best scheduling delay */
79210 +
79211 +    unsigned long      WorstLockDelay;                         /* worst delay before getting rail->Lock */
79212 +
79213 +    unsigned long      WorstHearbeatDelay;                     /* worst delay before calling DoHeartbeatWork */
79214 +} CM_TIMEOUT_DATA;
79215 +#endif
79216 +
79217 +typedef struct cm_rail
79218 +{
79219 +    EP_RAIL          *Rail;                                    /* rail we're associated with */
79220 +    struct list_head   Link;                                   /*   and linked on the CM_SUBSYS */
79221 +
79222 +    uint32_t          ParamHash;                               /* hash of critical parameters */
79223 +    uint32_t           Timestamp;
79224 +    long              DiscoverStartTick;                       /* when discovery start */
79225 +
79226 +    unsigned int       NodeId;                                 /* my node id */
79227 +    unsigned int       NumNodes;                               /*   and number of nodes */
79228 +    unsigned int       NumLevels;                              /* number of levels computed from machine size */
79229 +    int                       BroadcastLevel;
79230 +    long              BroadcastLevelTick;
79231 +    unsigned int       TopLevel;                               /* level at which I'm not a leader */
79232 +    unsigned char      Role;                                   /* state at TopLevel */
79233 +
79234 +    EP_INPUTQ        *PolledQueue;                             /* polled input queue */
79235 +    EP_INPUTQ        *IntrQueue;                               /* intr input queue */
79236 +    EP_OUTPUTQ       *MsgQueue;                                /* message  */
79237 +    unsigned int       NextSpareMsg;                           /* next "spare" message buffer to use */
79238 +
79239 +    EP_CM_RAIL_STATS   Stats;                                  /* statistics */
79240 +
79241 +    kmutex_t          Mutex;
79242 +    spinlock_t        Lock;
79243 +    
79244 +    long              NextHeartbeatTime;                       /* next time to check/send heartbeats */
79245 +    long              NextDiscoverTime;                        /* next time to progress discovery  */
79246 +    long              NextRunTime;                             /* the earlier of the above two or intr requires inputq poll*/
79247 +
79248 +    unsigned int       OfflineReasons;                         /* forced offline by procfs/manager thread stuck */
79249 +
79250 +#if defined(PER_CPU_TIMEOUT)
79251 +    spinlock_t        HeartbeatTimeoutsLock;                   /* spinlock to sequentialise per-cpu timeouts */
79252 +    long              HeartbeatTimeoutsStarted;                /* bitmap of which timeouts have started */
79253 +    long              HeartbeatTimeoutsStopped;                /* bitmap of which timeouts have stopped */
79254 +    long              HeartbeatTimeoutsShouldStop;             /* flag to indicate timeouts should stop */
79255 +    kcondvar_t        HeartbeatTimeoutsWait;                   /* place to sleep waiting for timeouts to stop */
79256 +    long              HeartbeatTimeoutRunning;                 /* someone is running the timeout - don't try for the lock */
79257 +
79258 +    long              HeartbeatTimeoutOverdue;                 /* heartbeat seen as overdue - interrupt requested */
79259 +
79260 +    CM_TIMEOUT_DATA   *HeartbeatTimeoutsData;                  /* per timeout data */
79261 +#else
79262 +    struct timer_list  HeartbeatTimer;                         /* timer for heartbeat/discovery */
79263 +#endif
79264 +
79265 +    CM_LEVEL           Levels[CM_MAX_LEVELS];
79266 +} CM_RAIL;
79267 +
79268 +/* OfflineReasons (both per-rail and  */
79269 +#define CM_OFFLINE_BROADCAST           (1 << 0)
79270 +#define CM_OFFLINE_PROCFS              (1 << 1)
79271 +#define CM_OFFLINE_MANAGER             (1 << 2)
79272 +
79273 +typedef struct cm_subsys
79274 +{
79275 +    EP_SUBSYS          Subsys;
79276 +    CM_RAIL            *Rails[EP_MAX_RAILS];
79277 +} CM_SUBSYS;
79278 +
79279 +extern int  MachineId;
79280 +
79281 +extern void cm_node_disconnected (EP_RAIL *rail, unsigned nodeId);
79282 +extern void cm_restart_node (EP_RAIL *rail, unsigned nodeId);
79283 +extern void cm_restart_comms (CM_RAIL *cmRail);
79284 +extern int  cm_init (EP_SYS *sys);
79285 +
79286 +extern void DisplayRail(EP_RAIL *rail);
79287 +extern void DisplaySegs (EP_RAIL *rail);
79288 +extern void DisplayStatus (EP_RAIL *rail);
79289 +
79290 +extern void DisplayNodeMaps  (DisplayInfo *di, CM_RAIL *cmRail);
79291 +extern void DisplayNodeSgmts (DisplayInfo *di, CM_RAIL *cmRail);
79292 +extern void DisplayRailDo    (DisplayInfo *di, EP_RAIL *rail);
79293 +
79294 +extern int    cm_read_cluster(EP_RAIL *rail,char *page);
79295 +extern void   cm_force_offline (EP_RAIL *rail, int offline, unsigned int reason);
79296 +
79297 +extern int    cm_svc_indicator_set      (EP_RAIL *rail, int svc_indicator);
79298 +extern int    cm_svc_indicator_clear    (EP_RAIL *rail, int svc_indicator);
79299 +extern int    cm_svc_indicator_is_set   (EP_RAIL *rail, int svc_indicator, int nodeId);
79300 +extern int    cm_svc_indicator_bitmap   (EP_RAIL *rail, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
79301 +
79302 +/* cm_procfs.c */
79303 +extern void   cm_procfs_init (CM_SUBSYS *subsys);
79304 +extern void   cm_procfs_fini (CM_SUBSYS *subsys);
79305 +extern void   cm_procfs_rail_init (CM_RAIL *rail);
79306 +extern void   cm_procfs_rail_fini (CM_RAIL *rail);
79307 +
79308 +/*
79309 + * Local variables:
79310 + * c-file-style: "stroustrup"
79311 + * End:
79312 + */
79313 +#endif /* __ELAN_CM_H */
79314 +
79315 diff -urN clean/include/elan/compat.h linux-2.6.9/include/elan/compat.h
79316 --- clean/include/elan/compat.h 1969-12-31 19:00:00.000000000 -0500
79317 +++ linux-2.6.9/include/elan/compat.h   2003-12-03 08:18:48.000000000 -0500
79318 @@ -0,0 +1,23 @@
79319 +/*
79320 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79321 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
79322 + *
79323 + *    For licensing information please see the supplied COPYING file
79324 + *
79325 + */
79326 +
79327 +#ident "@(#)$Id: compat.h,v 1.1 2003/12/03 13:18:48 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
79328 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/compat.h,v $*/
79329 +
79330 +#ifndef __ELAN_COMPAT_H
79331 +#define __ELAN_COMPAT_H
79332 +
79333 +#define ELANMOD_STATS_MAP      ELAN_STATS_MAP
79334 +
79335 +#endif  /* __ELAN_COMPAT_H */
79336 +
79337 +/*
79338 + * Local variables:
79339 + * c-file-style: "stroustrup"
79340 + * End:
79341 + */
79342 diff -urN clean/include/elan/device.h linux-2.6.9/include/elan/device.h
79343 --- clean/include/elan/device.h 1969-12-31 19:00:00.000000000 -0500
79344 +++ linux-2.6.9/include/elan/device.h   2003-09-24 09:55:37.000000000 -0400
79345 @@ -0,0 +1,62 @@
79346 +/*
79347 + *    Copyright (c) 2003 by Quadrics Limited.
79348 + * 
79349 + *    For licensing information please see the supplied COPYING file
79350 + *
79351 + */
79352 +
79353 +#ident "@(#)$Id: device.h,v 1.5 2003/09/24 13:55:37 david Exp $"
79354 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/device.h,v $*/
79355 +
79356 +#ifndef __ELAN_DEVICE_H
79357 +#define __ELAN_DEVICE_H
79358 +
79359 +/* non-kernel headings */
79360 +typedef unsigned int ELAN_DEV_IDX;
79361 +
79362 +#if defined(__KERNEL__)
79363 +
79364 +/* device callbacks */
79365 +#define ELAN_DEV_OPS_VERSION ((u_int)1)
79366 +
79367 +typedef struct elan_dev_ops
79368 +{
79369 +       /* dev info */
79370 +       int (*get_position)          (void *user_data, ELAN_POSITION *position);
79371 +       int (*set_position)          (void *user_data, unsigned short nodeId, unsigned short numNodes);
79372 +
79373 +       /* cap */
79374 +
79375 +       u_int  ops_version;
79376 +} ELAN_DEV_OPS;
79377 +
79378 +typedef struct elan_dev_struct
79379 +{
79380 +       struct list_head node;
79381 +
79382 +       ELAN_DEV_IDX     devidx;
79383 +       ELAN_DEVINFO    *devinfo;
79384 +       void            *user_data;
79385 +       ELAN_DEV_OPS *ops;
79386 +} ELAN_DEV_STRUCT;
79387 +
79388 +/* device.c */
79389 +extern ELAN_DEV_IDX         elan_dev_register   (ELAN_DEVINFO    *devinfo, 
79390 +                                                   ELAN_DEV_OPS *ops,
79391 +                                                   void            *userdata);
79392 +extern int                  elan_dev_deregister (ELAN_DEVINFO *devinfo);
79393 +
79394 +extern ELAN_DEV_STRUCT * elan_dev_find       (ELAN_DEV_IDX devidx);
79395 +
79396 +extern ELAN_DEV_STRUCT * elan_dev_find_byrail(unsigned short deviceid, unsigned rail);
79397 +extern int                  elan_dev_dump       (void);
79398 +
79399 +#endif /* __KERNEL__ */
79400 +
79401 +#endif /* __ELAN_DEVICE_H */
79402 +
79403 +/*
79404 + * Local variables:
79405 + * c-file-style: "linux"
79406 + * End:
79407 + */
79408 diff -urN clean/include/elan/devinfo.h linux-2.6.9/include/elan/devinfo.h
79409 --- clean/include/elan/devinfo.h        1969-12-31 19:00:00.000000000 -0500
79410 +++ linux-2.6.9/include/elan/devinfo.h  2005-02-01 07:35:53.000000000 -0500
79411 @@ -0,0 +1,92 @@
79412 +/*
79413 + *    Copyright (c) 2003 by Quadrics Limited.
79414 + * 
79415 + *    For licensing information please see the supplied COPYING file
79416 + *
79417 + */
79418 +
79419 +#ident "@(#)$Id: devinfo.h,v 1.16 2005/02/01 12:35:53 david Exp $"
79420 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/devinfo.h,v $*/
79421 +
79422 +#ifndef __ELAN_DEVINFO_H
79423 +#define __ELAN_DEVINFO_H
79424 +
79425 +#define ELAN_MAX_LEVELS                        8                       /* maximum number of levels in switch network */
79426 +
79427 +typedef struct elan_position
79428 +{
79429 +       unsigned        pos_mode;                               /* mode we're operating in */
79430 +       unsigned        pos_nodeid;                             /* port this device connected to */
79431 +       unsigned        pos_levels;                             /* number of levels to top switch */
79432 +       unsigned        pos_nodes;                              /* number of nodes in the machine */
79433 +       unsigned        pos_random_disabled;                    /* levels at which "random" routing is not possible */
79434 +       unsigned char   pos_arity[ELAN_MAX_LEVELS];             /* number of downlinks per switch level */
79435 +} ELAN_POSITION;
79436 +
79437 +#define ELAN4_PARAM_PCI_PADDING_FLAGS          0               /* A bit field, representing good places to burst across the pci                      */
79438 +#define ELAN4_PARAM_EVENT_COPY_WIN             1               /* The num of cmds when it becomes quicker to send via event copy than write directly */
79439 +#define ELAN4_PARAM_WRITE_COMBINING            2               /* If set the device supports bursts accesses across the pci bus                      */
79440 +#define ELAN4_PARAM_DRIVER_FEATURES            11              /* device driver features */
79441 +#define ELAN4_PARAM_COUNT                      12
79442 +
79443 +/* values for ELAN4_PARAM_DRIVER_FEATURES, dev_features */
79444 +#define ELAN4_FEATURE_PCI_MAP          (1 << 0)                        /* must use pci mapping functions */
79445 +#define ELAN4_FEATURE_64BIT_READ       (1 << 1)                        /* must perform 64 bit PIO reads */
79446 +#define ELAN4_FEATURE_PIN_DOWN         (1 << 2)                        /* must pin down pages */
79447 +#define ELAN4_FEATURE_NO_WRITE_COMBINE (1 << 3)                        /* don't allow write combinig at all */
79448 +#define ELAN4_FEATURE_NO_IOPROC                (1 << 4)                        /* unpatched kernel or disabled by procfs */
79449 +#define ELAN4_FEATURE_NO_IOPROC_UPDATE (1 << 5)                        /* don't do coproc update xlation loading */
79450 +#define ELAN4_FEATURE_NO_PAGEFAULT     (1 << 6)                        /* don't do pagefaulting */
79451 +#define ELAN4_FEATURE_NO_PREFETCH      (1 << 7)                        /* don't allow prefetching of elan sdram/cports */
79452 +
79453 +typedef struct elan_params
79454 +{
79455 +       unsigned        values[ELAN4_PARAM_COUNT];
79456 +} ELAN_PARAMS;
79457 +
79458 +/* values for pos_mode */
79459 +#define ELAN_POS_UNKNOWN               0                       /* network position unknown */
79460 +#define ELAN_POS_MODE_SWITCHED         1                       /* connected to a switch */
79461 +#define ELAN_POS_MODE_LOOPBACK         2                       /* loopback connector */
79462 +#define ELAN_POS_MODE_BACKTOBACK       3                       /* cabled back-to-back to another node */
79463 +
79464 +typedef struct elan_devinfo
79465 +{
79466 +       unsigned short  dev_vendor_id;                          /* pci vendor id */
79467 +       unsigned short  dev_device_id;                          /* pci device id */
79468 +       unsigned char   dev_revision_id;                        /* pci revision id */
79469 +       unsigned char   dev_instance;                           /* device instance number */
79470 +       unsigned char   dev_rail;                               /* device rail number */
79471 +
79472 +       unsigned short  dev_driver_version;                     /* device driver version */
79473 +       unsigned short  dev_params_mask;                        /* mask for valid entries in dev_params array */
79474 +       ELAN_PARAMS     dev_params;                             /* device parametization */
79475 +
79476 +       unsigned        dev_num_down_links_value;               /* hint to machine size */
79477 +} ELAN_DEVINFO;
79478 +
79479 +#define PCI_VENDOR_ID_QUADRICS         0x14fc
79480 +#define PCI_DEVICE_ID_ELAN3            0x0000
79481 +#define   PCI_REVISION_ID_ELAN3_REVA   0x0000
79482 +#define   PCI_REVISION_ID_ELAN3_REVB   0x0001
79483 +#define PCI_DEVICE_ID_ELAN4            0x0001
79484 +#define   PCI_REVISION_ID_ELAN4_REVA   0x0000
79485 +#define   PCI_REVISION_ID_ELAN4_REVB   0x0001
79486 +
79487 +#if defined(__KERNEL__)
79488 +/* devinfo.c */
79489 +#include <elan/capability.h>
79490 +#include <elan/device.h>
79491 +extern int elan_get_devinfo  (ELAN_DEV_IDX devidx, ELAN_DEVINFO  *devinfo);
79492 +extern int elan_get_position (ELAN_DEV_IDX devidx, ELAN_POSITION *position);
79493 +extern int elan_set_position (ELAN_DEV_IDX devidx, unsigned short nodeId, unsigned short numNodes);
79494 +#endif /* __KERNEL__ */
79495 +
79496 +
79497 +#endif /* __ELAN_DEVINFO_H */
79498 +
79499 +/*
79500 + * Local variables:
79501 + * c-file-style: "linux"
79502 + * End:
79503 + */
79504 diff -urN clean/include/elan/elanmoddebug.h linux-2.6.9/include/elan/elanmoddebug.h
79505 --- clean/include/elan/elanmoddebug.h   1969-12-31 19:00:00.000000000 -0500
79506 +++ linux-2.6.9/include/elan/elanmoddebug.h     2005-05-24 13:07:44.000000000 -0400
79507 @@ -0,0 +1,64 @@
79508 +/*
79509 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79510 + *
79511 + *    For licensing information please see the supplied COPYING file
79512 + *
79513 + */
79514 +
79515 +#ifndef _ELAN_DEBUG_H
79516 +#define _ELAN_DEBUG_H
79517 +
79518 +
79519 +#ident "$Id: elanmoddebug.h,v 1.6 2005/05/24 17:07:44 addy Exp $"
79520 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmoddebug.h,v $ */
79521 +
79522 +#if defined(__KERNEL__)
79523 +
79524 +/* 0 | QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE */
79525 +extern int elan_debug_mode; 
79526 +extern int elan_debug_mask;
79527 +
79528 +#define ELAN_DBG_VP            0x00000001
79529 +#define ELAN_DBG_CAP            0x00000002
79530 +#define ELAN_DBG_CTRL           0x00000004
79531 +#define ELAN_DBG_SYS_FN         0x00000008
79532 +#define ELAN_DBG_USERCOPY      0x00000010
79533 +#define ELAN_DBG_ALL           0xffffffff
79534 +
79535 +
79536 +#if defined(DEBUG_PRINTF)
79537 +#  define ELAN_DEBUG0(m,fmt)                   ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt)             : (void)0)
79538 +#  define ELAN_DEBUG1(m,fmt,a)                 ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a)           : (void)0)
79539 +#  define ELAN_DEBUG2(m,fmt,a,b)               ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b)         : (void)0)
79540 +#  define ELAN_DEBUG3(m,fmt,a,b,c)             ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c)       : (void)0)
79541 +#  define ELAN_DEBUG4(m,fmt,a,b,c,d)           ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d)     : (void)0)
79542 +#  define ELAN_DEBUG5(m,fmt,a,b,c,d,e)         ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e)   : (void)0)
79543 +#  define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f)       ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode,fmt,a,b,c,d,e,f) : (void)0)
79544 +#ifdef __GNUC__
79545 +#  define ELAN_DEBUG(m,args...)                        ((elan_debug_mask&(m)) ? qsnet_debugf(elan_debug_mode, ##args)         : (void)0)
79546 +#endif
79547 +
79548 +#else
79549 +
79550 +#  define ELAN_DEBUG0(m,fmt)                   (0)
79551 +#  define ELAN_DEBUG1(m,fmt,a)                 (0)
79552 +#  define ELAN_DEBUG2(m,fmt,a,b)               (0)
79553 +#  define ELAN_DEBUG3(m,fmt,a,b,c)             (0)
79554 +#  define ELAN_DEBUG4(m,fmt,a,b,c,d)           (0)
79555 +#  define ELAN_DEBUG5(m,fmt,a,b,c,d,e)         (0)
79556 +#  define ELAN_DEBUG6(m,fmt,a,b,c,d,e,f)       (0)
79557 +#ifdef __GNUC__
79558 +#  define ELAN_DEBUG(m,args...)
79559 +#endif
79560 +
79561 +#endif /* DEBUG_PRINTF */
79562 +
79563 +
79564 +#endif /* __KERNEL__ */
79565 +#endif /* _ELAN_DEBUG_H */
79566 +
79567 +/*
79568 + * Local variables:
79569 + * c-file-style: "linux"
79570 + * End:
79571 + */
79572 diff -urN clean/include/elan/elanmod.h linux-2.6.9/include/elan/elanmod.h
79573 --- clean/include/elan/elanmod.h        1969-12-31 19:00:00.000000000 -0500
79574 +++ linux-2.6.9/include/elan/elanmod.h  2005-05-26 12:14:21.000000000 -0400
79575 @@ -0,0 +1,83 @@
79576 +/*
79577 + *    Copyright (c) 2003 by Quadrics Limited.
79578 + * 
79579 + *    For licensing information please see the supplied COPYING file
79580 + *
79581 + */
79582 +
79583 +#ident "@(#)$Id: elanmod.h,v 1.13 2005/05/26 16:14:21 addy Exp $"
79584 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod.h,v $*/
79585 +
79586 +#ifndef __ELAN_MOD_H
79587 +#define __ELAN_MOD_H
79588 +
79589 +#include <elan/devinfo.h>
79590 +#include <elan/device.h>
79591 +#include <elan/capability.h>
79592 +#include <elan/stats.h>
79593 +
79594 +#if defined(__KERNEL__)
79595 +
79596 +#include <elan/elanmoddebug.h>
79597 +
79598 +/* Linux RW semaphores */
79599 +#include <asm/semaphore.h>
79600 +#include <linux/rwsem.h>
79601 +
79602 +#define ELANMOD_RWLOCK                 struct rw_semaphore
79603 +#define ELANMOD_RWLOCK_INIT(l)         init_rwsem(l)
79604 +#define ELANMOD_RWLOCK_DESTROY(l)      
79605 +#define ELANMOD_RWLOCK_READ(l)         down_read(l)
79606 +#define ELANMOD_RWLOCK_WRITE(l)                down_write(l)
79607 +#define ELANMOD_RWLOCK_READ_UNLOCK(l)  up_read(l)
79608 +#define ELANMOD_RWLOCK_WRITE_UNLOCK(l) up_write(l)
79609 +
79610 +extern ELANMOD_RWLOCK elan_rwlock;
79611 +
79612 +/* elan_general.c */
79613 +extern int elan_init(void);
79614 +extern int elan_fini(void);
79615 +
79616 +/* return codes, -ve => errno, +ve => success */
79617 +#define ELAN_CAP_OK  (0)
79618 +#define ELAN_CAP_RMS (1)
79619 +
79620 +#define ELAN_USER_ATTACH    (1)
79621 +#define ELAN_USER_DETACH    (2)
79622 +#define ELAN_USER_P2P       (3)
79623 +#define ELAN_USER_BROADCAST (4)
79624 +
79625 +extern int elanmod_classify_cap (ELAN_POSITION *position, ELAN_CAPABILITY *cap, unsigned use);
79626 +
79627 +#define ELAN_USER_BASE_CONTEXT_NUM     0x000                   /* first user allowable context */
79628 +#define ELAN_USER_TOP_CONTEXT_NUM      0x7FF                   /* last user allowable context */
79629 +
79630 +#define ELAN_RMS_BASE_CONTEXT_NUM      0x400                   /* reserved for RMS allocation */
79631 +#define ELAN_RMS_TOP_CONTEXT_NUM       0x7FF
79632 +
79633 +#define ELAN_USER_CONTEXT(ctx)         ((ctx) >= ELAN_USER_BASE_CONTEXT_NUM && \
79634 +                                        (ctx) <= ELAN_USER_TOP_CONTEXT_NUM)    
79635 +
79636 +#define ELAN_RMS_CONTEXT(ctx)          ((ctx) >= ELAN_RMS_BASE_CONTEXT_NUM && \
79637 +                                        (ctx) <= ELAN_RMS_TOP_CONTEXT_NUM)    
79638 +
79639 +
79640 +/* capability.c */
79641 +struct elan_cap_node_struct;
79642 +extern int elan_usercopy_attach (ELAN_CAPABILITY *cap, struct elan_cap_node_struct **node_ptr, void *handle, void *owner);
79643 +extern int elan_usercopy_detach (struct elan_cap_node_struct *cap_ptr, void *owner);
79644 +extern int elan_usercopy_handle (struct elan_cap_node_struct *cap_ptr, int ctxId, void **handlep);
79645 +
79646 +/* usercopy.c */
79647 +extern int elan_usercopy (void *remote, void *local, size_t len, int write,
79648 +                         int ctxId, struct elan_cap_node_struct *cap_ptr);
79649 +
79650 +#endif /* __KERNEL__ */
79651 +
79652 +#endif /* __ELAN_MOD_H */
79653 +
79654 +/*
79655 + * Local variables:
79656 + * c-file-style: "linux"
79657 + * End:
79658 + */
79659 diff -urN clean/include/elan/elanmod_linux.h linux-2.6.9/include/elan/elanmod_linux.h
79660 --- clean/include/elan/elanmod_linux.h  1969-12-31 19:00:00.000000000 -0500
79661 +++ linux-2.6.9/include/elan/elanmod_linux.h    2005-02-22 07:29:22.000000000 -0500
79662 @@ -0,0 +1,164 @@
79663 +/*
79664 + *    Copyright (c) 2003 by Quadrics Ltd.
79665 + * 
79666 + *    For licensing information please see the supplied COPYING file
79667 + *
79668 + */
79669 +
79670 +#ident "@(#)$Id: elanmod_linux.h,v 1.7 2005/02/22 12:29:22 addy Exp $"
79671 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/elanmod_linux.h,v $*/
79672 +
79673 +#ifndef __ELAN_MOD_LINUX_H
79674 +#define __ELAN_MOD_LINUX_H
79675 +
79676 +#define ELANCRTL_USER_BASE             0x40
79677 +
79678 +/* stats */
79679 +typedef struct elanctrl_stats_get_next_struct
79680 +{
79681 +       ELAN_STATS_IDX   statidx; 
79682 +       ELAN_STATS_IDX  *next_statidx; /* return value */
79683 +} ELANCTRL_STATS_GET_NEXT_STRUCT;
79684 +#define ELANCTRL_STATS_GET_NEXT   _IOR   ('e', ELANCRTL_USER_BASE + 0,  ELANCTRL_STATS_GET_NEXT_STRUCT)
79685 +
79686 +typedef struct elanctrl_stats_find_index_struct
79687 +{
79688 +       caddr_t          block_name;
79689 +       ELAN_STATS_IDX  *statidx; /* return value */
79690 +       uint        *num_entries; /* return value */
79691 +} ELANCTRL_STATS_FIND_INDEX_STRUCT;
79692 +#define ELANCTRL_STATS_FIND_INDEX   _IOR   ('e', ELANCRTL_USER_BASE + 1,  ELANCTRL_STATS_FIND_INDEX_STRUCT)
79693 +
79694 +typedef struct elanctrl_stats_get_block_info_struct
79695 +{
79696 +       ELAN_STATS_IDX  statidx; 
79697 +       caddr_t       block_name; /* return value */
79698 +       uint        *num_entries; /* return value */
79699 +} ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT;
79700 +#define ELANCTRL_STATS_GET_BLOCK_INFO   _IOR   ('e', ELANCRTL_USER_BASE + 2, ELANCTRL_STATS_GET_BLOCK_INFO_STRUCT)
79701 +
79702 +typedef struct elanctrl_stats_get_index_name_struct
79703 +{
79704 +       ELAN_STATS_IDX statidx; 
79705 +       uint           index;
79706 +       caddr_t        name; /* return value */
79707 +} ELANCTRL_STATS_GET_INDEX_NAME_STRUCT;
79708 +#define ELANCTRL_STATS_GET_INDEX_NAME   _IOR   ('e', ELANCRTL_USER_BASE + 3, ELANCTRL_STATS_GET_INDEX_NAME_STRUCT)
79709 +
79710 +typedef struct elanctrl_stats_clear_block_struct
79711 +{
79712 +       ELAN_STATS_IDX statidx; 
79713 +} ELANCTRL_STATS_CLEAR_BLOCK_STRUCT;
79714 +#define ELANCTRL_STATS_CLEAR_BLOCK   _IOR   ('e', ELANCRTL_USER_BASE + 4, ELANCTRL_STATS_CLEAR_BLOCK_STRUCT)
79715 +
79716 +typedef struct elanctrl_stats_get_block_struct
79717 +{
79718 +       ELAN_STATS_IDX statidx; 
79719 +       uint           entries;  
79720 +       ulong         *values; /* return values */
79721 +} ELANCTRL_STATS_GET_BLOCK_STRUCT;
79722 +#define ELANCTRL_STATS_GET_BLOCK        _IOR   ('e', ELANCRTL_USER_BASE + 5, ELANCTRL_STATS_GET_BLOCK_STRUCT)
79723 +
79724 +
79725 +typedef struct elanctrl_get_devinfo_struct
79726 +{
79727 +       ELAN_DEV_IDX  devidx; 
79728 +       ELAN_DEVINFO *devinfo; /* return values */
79729 +} ELANCTRL_GET_DEVINFO_STRUCT;
79730 +#define ELANCTRL_GET_DEVINFO        _IOR   ('e', ELANCRTL_USER_BASE + 6, ELANCTRL_GET_DEVINFO_STRUCT)
79731 +
79732 +typedef struct elanctrl_get_position_struct
79733 +{
79734 +       ELAN_DEV_IDX   devidx; 
79735 +       ELAN_POSITION *position; /* return values */
79736 +} ELANCTRL_GET_POSITION_STRUCT;
79737 +#define ELANCTRL_GET_POSITION        _IOR   ('e', ELANCRTL_USER_BASE + 7, ELANCTRL_GET_POSITION_STRUCT)
79738 +
79739 +typedef struct elanctrl_set_position_struct
79740 +{
79741 +       ELAN_DEV_IDX   devidx; 
79742 +       unsigned short nodeId;
79743 +       unsigned short numNodes;
79744 +} ELANCTRL_SET_POSITION_STRUCT;
79745 +#define ELANCTRL_SET_POSITION        _IOR   ('e', ELANCRTL_USER_BASE + 8, ELANCTRL_SET_POSITION_STRUCT)
79746 +
79747 +typedef struct elanctrl_create_cap_struct
79748 +{
79749 +       ELAN_CAPABILITY cap;
79750 +} ELANCTRL_CREATE_CAP_STRUCT;
79751 +#define ELANCTRL_CREATE_CAP             _IOW   ('e', ELANCRTL_USER_BASE + 9, ELANCTRL_CREATE_CAP_STRUCT)
79752 +
79753 +typedef struct elanctrl_destroy_cap_struct
79754 +{
79755 +       ELAN_CAPABILITY cap;
79756 +} ELANCTRL_DESTROY_CAP_STRUCT;
79757 +#define ELANCTRL_DESTROY_CAP             _IOW   ('e', ELANCRTL_USER_BASE + 10, ELANCTRL_DESTROY_CAP_STRUCT)
79758 +
79759 +typedef struct elanctrl_create_vp_struct
79760 +{
79761 +       ELAN_CAPABILITY cap;
79762 +       ELAN_CAPABILITY map;
79763 +} ELANCTRL_CREATE_VP_STRUCT;
79764 +#define ELANCTRL_CREATE_VP             _IOW   ('e', ELANCRTL_USER_BASE + 11, ELANCTRL_CREATE_VP_STRUCT)
79765 +
79766 +typedef struct elanctrl_destroy_vp_struct
79767 +{
79768 +       ELAN_CAPABILITY cap;
79769 +       ELAN_CAPABILITY map;
79770 +} ELANCTRL_DESTROY_VP_STRUCT;
79771 +#define ELANCTRL_DESTROY_VP          _IOW   ('e', ELANCRTL_USER_BASE + 12, ELANCTRL_DESTROY_VP_STRUCT)
79772 +
79773 +#define ELANCTRL_DEBUG_DUMP          _IO    ('e', ELANCRTL_USER_BASE + 13)
79774 +
79775 +typedef struct elanctrl_get_caps_struct
79776 +{
79777 +       uint            *number_of_results;
79778 +       uint             array_size;
79779 +       ELAN_CAP_STRUCT *caps;
79780 +} ELANCTRL_GET_CAPS_STRUCT;
79781 +#define ELANCTRL_GET_CAPS          _IOW   ('e', ELANCRTL_USER_BASE + 14, ELANCTRL_GET_CAPS_STRUCT)
79782 +
79783 +
79784 +typedef struct elanctrl_debug_buffer_struct
79785 +{
79786 +       caddr_t buffer;
79787 +       int     size;
79788 +} ELANCTRL_DEBUG_BUFFER_STRUCT;
79789 +#define ELANCTRL_DEBUG_BUFFER _IOW ('e', ELANCRTL_USER_BASE + 15, ELANCTRL_DEBUG_BUFFER_STRUCT)
79790 +
79791 +
79792 +/*
79793 + * Usercopy ioctl definitions
79794 + */
79795 +typedef struct elanctrl_usercopy_attach_struct
79796 +{
79797 +       ELAN_CAPABILITY cap;    /* process capability (for security checks) */
79798 +} ELANCTRL_USERCOPY_ATTACH_STRUCT;
79799 +#define ELANCTRL_USERCOPY_ATTACH      _IOR ('u', ELANCRTL_USER_BASE + 0, ELANCTRL_USERCOPY_ATTACH_STRUCT)
79800 +#define ELANCTRL_USERCOPY_DETACH      _IO  ('u', ELANCRTL_USER_BASE + 1)
79801 +
79802 +typedef struct elanctrl_usercopy_struct
79803 +{
79804 +       void *remote;           /* remote process buffer */
79805 +       void *local;            /* local process buffer */
79806 +       size_t len;
79807 +       int   write;            /* Direction */
79808 +
79809 +       int   ctxId;            /* remote process context id (0 .. nlocal-1) */
79810 +
79811 +} ELANCTRL_USERCOPY_STRUCT;
79812 +#define ELANCTRL_USERCOPY             _IOR ('u', ELANCRTL_USER_BASE + 2, ELANCTRL_USERCOPY_STRUCT)
79813 +
79814 +#define ELANMOD_PROCFS_IOCTL      "/proc/qsnet/elan/ioctl"
79815 +#define ELANMOD_PROCFS_USER_IOCTL "/proc/qsnet/elan/user"
79816 +#define ELANMOD_PROCFS_VERSION    "/proc/qsnet/elan/version"
79817 +#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask"
79818 +#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode"
79819 +
79820 +#endif /* __ELAN_MOD_LINUX_H */
79821 +
79822 +/*
79823 + * Local variables:
79824 + * c-file-style: "linux"
79825 + * End:
79826 + */
79827 diff -urN clean/include/elan/elanmod_subsystem.h linux-2.6.9/include/elan/elanmod_subsystem.h
79828 --- clean/include/elan/elanmod_subsystem.h      1969-12-31 19:00:00.000000000 -0500
79829 +++ linux-2.6.9/include/elan/elanmod_subsystem.h        2003-09-29 11:35:13.000000000 -0400
79830 @@ -0,0 +1,138 @@
79831 +/*
79832 + *    Copyright (c) 2003 by Quadrics Limited.
79833 + * 
79834 + *    For licensing information please see the supplied COPYING file
79835 + *
79836 + */
79837 +
79838 +#ifndef __ELAN_SUBSYSTEM_H
79839 +#define __ELAN_SUBSYSTEM_H
79840 +
79841 +#include <sys/types.h>
79842 +#include <sys/param.h>
79843 +
79844 +#if defined( __KERNEL__) 
79845 +int elan_configure(
79846 +    cfg_op_t op,
79847 +    caddr_t  indata,
79848 +    ulong    indata_size,
79849 +    caddr_t  outdata,
79850 +    ulong    outdata_size);
79851 +#endif
79852 +
79853 +#define ELAN_KMOD_CODE(x)      ((x)+CFG_OP_SUBSYS_MIN)
79854 +#define ELAN_MAX_KMOD_CODES 100
79855 +
79856 +#define ELAN_SUBSYS "elan"
79857 +
79858 +#define ELAN_STATS_GET_NEXT    0x01
79859 +typedef struct {
79860 +       ELAN_STATS_IDX statidx;
79861 +       ELAN_STATS_IDX *next_statidx;   
79862 +} elan_stats_get_next_struct;
79863 +
79864 +
79865 +#define ELAN_STATS_FIND_INDEX   0x02
79866 +typedef struct {
79867 +       caddr_t          block_name;
79868 +       ELAN_STATS_IDX  *statidx; /* return value */
79869 +       uint        *num_entries; /* return value */
79870 +} elan_stats_find_index_struct;
79871 +
79872 +#define ELAN_STATS_GET_BLOCK_INFO  0x03
79873 +typedef struct {
79874 +       ELAN_STATS_IDX  statidx; 
79875 +       caddr_t       block_name; /* return value */
79876 +       uint        *num_entries; /* return value */
79877 +} elan_stats_get_block_info_struct;
79878 +
79879 +#define ELAN_STATS_GET_INDEX_NAME  0x04
79880 +typedef struct {
79881 +       ELAN_STATS_IDX statidx; 
79882 +       uint           index;
79883 +       caddr_t        name; /* return value */
79884 +} elan_stats_get_index_name_struct;
79885 +
79886 +#define ELAN_STATS_CLEAR_BLOCK  0x05
79887 +typedef struct {
79888 +       ELAN_STATS_IDX statidx; 
79889 +} elan_stats_clear_block_struct;
79890 +
79891 +#define ELAN_STATS_GET_BLOCK     0x06
79892 +typedef struct 
79893 +{
79894 +       ELAN_STATS_IDX statidx; 
79895 +       uint           entries;  
79896 +       ulong         *values; /* return values */
79897 +} elan_stats_get_block_struct;
79898 +
79899 +#define ELAN_GET_DEVINFO     0x07
79900 +typedef struct 
79901 +{
79902 +       ELAN_DEV_IDX  devidx; 
79903 +       ELAN_DEVINFO *devinfo; /* return values */
79904 +} elan_get_devinfo_struct;
79905 +
79906 +#define ELAN_GET_POSITION  0x08
79907 +typedef struct {
79908 +       ELAN_DEV_IDX   devidx; 
79909 +       ELAN_POSITION *position; /* return values */
79910 +} elan_get_position_struct;
79911 +
79912 +#define ELAN_SET_POSITION   0x09
79913 +typedef struct {
79914 +       ELAN_DEV_IDX   devidx; 
79915 +       unsigned short nodeId;
79916 +       unsigned short numNodes;
79917 +} elan_set_position_struct;
79918 +
79919 +#define ELAN_CREATE_CAP  0x0a
79920 +typedef struct {
79921 +       ELAN_CAPABILITY cap;
79922 +} elan_create_cap_struct;
79923 +
79924 +#define ELAN_DESTROY_CAP    0x0b
79925 +typedef struct {
79926 +       ELAN_CAPABILITY cap;
79927 +} elan_destroy_cap_struct;
79928 +
79929 +#define ELAN_CREATE_VP   0x0c
79930 +typedef struct {
79931 +       ELAN_CAPABILITY cap;
79932 +       ELAN_CAPABILITY map;
79933 +} elan_create_vp_struct;
79934 +
79935 +#define ELAN_DESTROY_VP    0x0d
79936 +typedef struct {
79937 +       ELAN_CAPABILITY cap;
79938 +       ELAN_CAPABILITY map;
79939 +} elan_destroy_vp_struct;
79940 +
79941 +
79942 +#define ELAN_DEBUG_DUMP   0x0e
79943 +
79944 +#define ELAN_GET_CAPS    0x0f
79945 +typedef struct {
79946 +       uint            *number_of_results;
79947 +       uint             array_size;
79948 +       ELAN_CAP_STRUCT *caps;
79949 +} elan_get_caps_struct;
79950 +
79951 +#define ELAN_DEBUG_BUFFER 0x10
79952 +typedef struct {
79953 +       caddr_t addr;
79954 +       int     len;
79955 +} elan_debug_buffer_struct;
79956 +
79957 +#define ELANMOD_PROCFS_IOCTL      "/proc/qsnet/elan/ioctl"
79958 +#define ELANMOD_PROCFS_VERSION    "/proc/qsnet/elan/version"
79959 +#define ELANMOD_PROCFS_DEBUG_MASK "/proc/qsnet/elan/debug_mask"
79960 +#define ELANMOD_PROCFS_DEBUG_MODE "/proc/qsnet/elan/debug_mode"
79961 +
79962 +#endif /* __ELAN_SUBSYSTEM_H */
79963 +
79964 +/*
79965 + * Local variables:
79966 + * c-file-style: "linux"
79967 + * End:
79968 + */
79969 diff -urN clean/include/elan/epcomms.h linux-2.6.9/include/elan/epcomms.h
79970 --- clean/include/elan/epcomms.h        1969-12-31 19:00:00.000000000 -0500
79971 +++ linux-2.6.9/include/elan/epcomms.h  2004-11-12 05:55:03.000000000 -0500
79972 @@ -0,0 +1,635 @@
79973 +/*
79974 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
79975 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
79976 + *
79977 + *    For licensing information please see the supplied COPYING file
79978 + *
79979 + */
79980 +
79981 +#ifndef __ELAN_EPCOMMS_H
79982 +#define __ELAN_EPCOMMS_H
79983 +
79984 +#ident "$Id: epcomms.h,v 1.46 2004/11/12 10:55:03 mike Exp $"
79985 +/*      $Source: /cvs/master/quadrics/epmod/epcomms.h,v $ */
79986 +
79987 +#include <elan/kcomm.h>
79988 +#include <elan/bitmap.h>
79989 +
79990 +#define EPCOMMS_SUBSYS_NAME    "epcomms"
79991 +
79992 +/* message service numbers */
79993 +#define EP_MSG_SVC_EIP512              0x00                            /* Quadrics EIP services */
79994 +#define EP_MSG_SVC_EIP1K               0x01
79995 +#define EP_MSG_SVC_EIP2K               0x02
79996 +#define EP_MSG_SVC_EIP4K               0x03
79997 +#define EP_MSG_SVC_EIP8K               0x04
79998 +#define EP_MSG_SVC_EIP16K              0x05
79999 +#define EP_MSG_SVC_EIP32K              0x06
80000 +#define EP_MSG_SVC_EIP64K              0x07
80001 +#define EP_MSG_SVC_EIP128K             0x08
80002 +
80003 +#define EP_MSG_SVC_PFS                 0x09                            /* Quadrics PFS rpc service */
80004 +
80005 +#define EP_MSG_SVC_PORTALS_SMALL       0x10                            /* Lustre Portals */
80006 +#define EP_MSG_SVC_PORTALS_LARGE       0x11
80007 +
80008 +#define EP_MSG_NSVC                    0x40                            /* Max number of services */
80009 +
80010 +#define EP_MSGQ_ADDR(qnum)             (EP_EPCOMMS_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE)
80011 +
80012 +/*
80013 + * EP_ENVELOPE
80014 + *   Messages are sent by sending an envelope to the destination
80015 + *   describing the source buffers to transfer.  The receiving thread
80016 + *   then allocates a receive buffer and fetches the data by issuing
80017 + *   "get" dmas.
80018 + *
80019 + * NOTE:  envelopes are not explicitly converted to network byte order
80020 + *        since they are always transferred little endian as they are
80021 + *        copied to/from elan memory using word operations.
80022 + */
80023 +typedef struct ep_envelope
80024 +{
80025 +    uint32_t     Version;                                      /* Protocol version field */
80026 +
80027 +    EP_ATTRIBUTE  Attr;                                        /* Attributes */
80028 +
80029 +    EP_XID       Xid;                                          /* transaction id */
80030 +
80031 +    uint32_t     NodeId;                                       /* Source processor */
80032 +    uint32_t     Range;                                        /* range we're sending to (high << 16 | low) */
80033 +
80034 +    EP_ADDR      TxdRail;                                      /* address of per-rail txd */
80035 +    EP_NMD       TxdMain;                                      /* address of main memory portion of txd */
80036 +
80037 +    uint32_t      nFrags;                                      /* # fragments */
80038 +    EP_NMD       Frags[EP_MAXFRAG];                            /* network mapping handles of source data */
80039 +
80040 +    uint32_t      CheckSum;                                     /* holds the check sum value when active 
80041 +                                                                * must be after all members to be checksum'd
80042 +                                                                */
80043 +
80044 +    uint32_t     Pad[6];                                       /* Pad to 128 bytes */
80045 +} EP_ENVELOPE;
80046 +
80047 +#define EP_ENVELOPE_VERSION            0xdac10001
80048 +#define EP_ENVELOPE_SIZE               roundup (sizeof (EP_ENVELOPE), EP_BLK_SIZE)
80049 +
80050 +/*
80051 + * RPC payload - this small amount of data is transfered in
80052 + * the envelope for RPCs
80053 + */
80054 +typedef struct ep_payload
80055 +{
80056 +    uint32_t   Data[128/sizeof(uint32_t)];
80057 +} EP_PAYLOAD;
80058 +
80059 +#define EP_PAYLOAD_SIZE                        roundup (sizeof (EP_PAYLOAD), EP_BLK_SIZE)
80060 +
80061 +#define EP_INPUTQ_SIZE                 (EP_ENVELOPE_SIZE + EP_PAYLOAD_SIZE)
80062 +
80063 +/*
80064 + * EP_STATUSBLK
80065 + *   RPC completion transfers a status block to the client.
80066 + */
80067 +typedef struct ep_statusblk
80068 +{
80069 +    uint32_t   Data[128/sizeof(uint32_t)];
80070 +} EP_STATUSBLK;
80071 +
80072 +#define EP_STATUSBLK_SIZE              roundup (sizeof(EP_STATUSBLK), EP_BLK_SIZE)
80073 +
80074 +#define EP_RANGE(low,high)             ((high) << 16 | (low))
80075 +#define EP_RANGE_LOW(range)            ((range) & 0xFFFF)
80076 +#define EP_RANGE_HIGH(range)           (((range) >> 16) & 0xFFFF)
80077 +
80078 +/* return codes from functions, + 'res' parameter to txd callback, ep_rxd_status() */
80079 +typedef enum
80080 +{
80081 +    EP_SUCCESS         = 0,                                    /* message sent/received successfully */
80082 +    EP_RXD_PENDING     = -1,                                   /* rxd not completed by thread */
80083 +    EP_CONN_RESET      = -2,                                   /* virtual circuit reset */
80084 +    EP_NODE_DOWN       = -3,                                   /* node down - transmit not attempted */
80085 +    EP_MSG_TOO_BIG      = -4,                                  /* received message larger than buffer */
80086 +    EP_ENOMEM          = -5,                                   /* memory alloc failed */
80087 +    EP_EINVAL          = -6,                                   /* invalid parameters */
80088 +    EP_SHUTDOWN                = -7,                                   /* receiver is being shut down */
80089 +} EP_STATUS;
80090 +
80091 +/* forward declarations */
80092 +typedef struct ep_rxd          EP_RXD;
80093 +typedef struct ep_txd          EP_TXD;
80094 +typedef struct ep_rcvr_rail    EP_RCVR_RAIL;
80095 +typedef struct ep_rcvr         EP_RCVR;
80096 +typedef struct ep_xmtr_rail    EP_XMTR_RAIL;
80097 +typedef struct ep_xmtr         EP_XMTR;
80098 +typedef struct ep_comms_rail    EP_COMMS_RAIL;
80099 +typedef struct ep_comms_subsys  EP_COMMS_SUBSYS;
80100 +
80101 +typedef struct ep_rcvr_stats           EP_RCVR_STATS;
80102 +typedef struct ep_xmtr_stats           EP_XMTR_STATS;
80103 +typedef struct ep_rcvr_rail_stats      EP_RCVR_RAIL_STATS;
80104 +typedef struct ep_xmtr_rail_stats      EP_XMTR_RAIL_STATS;
80105 +
80106 +typedef void (EP_RXH)(EP_RXD *rxd);                            /* callback function from receive completion */
80107 +typedef void (EP_TXH)(EP_TXD *txd, void *arg, EP_STATUS res);  /* callback function from transmit completion  */
80108 +
80109 +/* Main memory portion shared descriptor */
80110 +typedef struct ep_rxd_main
80111 +{
80112 +    EP_ENVELOPE                Envelope;                               /* 128 byte aligned envelope */
80113 +    EP_PAYLOAD         Payload;                                /* 128 byte aligned payload */
80114 +    bitmap_t           Bitmap[BT_BITOUL(EP_MAX_NODES)];        /* broadcast bitmap */
80115 +    EP_STATUSBLK       StatusBlk;                              /* RPC status block to return */
80116 +    uint64_t           Next;                                   /* linked list when on active list (main address) */
80117 +    int32_t            Len;                                    /* Length of message received */
80118 +} EP_RXD_MAIN;
80119 +
80120 +#define EP_RXD_MAIN_SIZE       roundup (sizeof (EP_RXD_MAIN), EP_BLK_SIZE)
80121 +
80122 +/* Phases for message/rpc */
80123 +#ifndef __ELAN__
80124 +
80125 +/* Kernel memory portion of per-rail receive descriptor */
80126 +typedef struct ep_rxd_rail
80127 +{
80128 +    struct list_head    Link;                                  /* linked on freelist */
80129 +    EP_RCVR_RAIL       *RcvrRail;                              /* rvcr we're associated with */
80130 +    
80131 +    EP_RXD            *Rxd;                                    /* receive descriptor we're bound to */
80132 +} EP_RXD_RAIL;
80133 +
80134 +#define RXD_BOUND2RAIL(rxdRail,rcvrRail)       ((rxdRail) != NULL && ((EP_RXD_RAIL *) (rxdRail))->RcvrRail == (EP_RCVR_RAIL *) rcvrRail)
80135 +
80136 +struct ep_rxd
80137 +{
80138 +    struct list_head   Link;                                   /* linked on free/active list */
80139 +    EP_RCVR           *Rcvr;                                   /* owning receiver */
80140 +
80141 +    EP_RXD_MAIN               *RxdMain;                                /* shared main memory portion. */
80142 +    EP_NMD             NmdMain;                                /*  and network mapping descriptor */
80143 +
80144 +    EP_RXD_RAIL               *RxdRail;                                /* per-rail rxd we're bound to */
80145 +    
80146 +    EP_RXH            *Handler;                                /* completion function */
80147 +    void              *Arg;                                    /*    and arguement */
80148 +
80149 +    unsigned int       State;                                  /* RXD status (active,stalled,failed) */
80150 +
80151 +    EP_NMD             Data;                                   /* network mapping descriptor for user buffer */
80152 +
80153 +    int                        nFrags;                                 /* network mapping descriptor for put/get/complete */
80154 +    EP_NMD             Local[EP_MAXFRAG];
80155 +    EP_NMD             Remote[EP_MAXFRAG];
80156 +
80157 +    long               NextRunTime;                            /* time to resend failover/map requests */
80158 +    EP_XID             MsgXid;                                 /*   and transaction id */
80159 +
80160 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
80161 +    struct list_head   CheckSumLink;                           /* linked on check sum list */
80162 +#endif
80163 +};
80164 +
80165 +#define EP_NUM_RXD_PER_BLOCK   16
80166 +
80167 +/* rxd->State */
80168 +#define EP_RXD_FREE            0
80169 +
80170 +#define EP_RXD_RECEIVE_UNBOUND 1
80171 +#define EP_RXD_RECEIVE_ACTIVE  2
80172 +
80173 +#define EP_RXD_PUT_ACTIVE      3
80174 +#define EP_RXD_PUT_STALLED     4
80175 +#define EP_RXD_GET_ACTIVE      5
80176 +#define EP_RXD_GET_STALLED     6
80177 +
80178 +#define EP_RXD_COMPLETE_ACTIVE 7
80179 +#define EP_RXD_COMPLETE_STALLED        8
80180 +
80181 +#define EP_RXD_RPC_IN_PROGRESS 9
80182 +#define EP_RXD_COMPLETED       10      
80183 +
80184 +#define EP_RXD_BEEN_ABORTED    11                              /* rxd was aborted while in a private state */
80185 +
80186 +typedef struct ep_rxd_block
80187 +{
80188 +    struct list_head   Link;
80189 +
80190 +    EP_NMD             NmdMain;
80191 +
80192 +    EP_RXD             Rxd[EP_NUM_RXD_PER_BLOCK];
80193 +} EP_RXD_BLOCK;
80194 +
80195 +struct ep_rcvr_rail_stats 
80196 +{
80197 +    EP_STATS_COUNT rx;
80198 +    EP_STATS_COUNT rx_len;
80199 +};
80200 +
80201 +struct ep_rcvr_rail
80202 +{
80203 +    EP_RCVR           *Rcvr;                                   /* associated receiver */
80204 +    EP_COMMS_RAIL      *CommsRail;                             /* comms rail */
80205 +
80206 +    struct proc_dir_entry *procfs_root;                         /* root of this rcvr_rail's procfs entry */
80207 +    EP_RCVR_RAIL_STATS     stats;                               /* generic rcvr_rail stats */
80208 +};
80209 +
80210 +struct ep_rcvr_stats
80211 +{
80212 +    EP_STATS_COUNT rx;
80213 +    EP_STATS_COUNT rx_len;
80214 +};
80215 +
80216 +struct ep_rcvr
80217 +{
80218 +    struct list_head  Link;                                    /* queued on subsystem */
80219 +    EP_COMMS_SUBSYS  *Subsys;                                  /* kernel comms subsystem */
80220 +    EP_SERVICE        Service;                                 /* service number */
80221 +
80222 +    unsigned int      InputQueueEntries;                       /* # entries on receive queue */
80223 +
80224 +    EP_RAILMASK              RailMask;                                 /* bitmap of which rails are available */
80225 +    EP_RCVR_RAIL     *Rails[EP_MAX_RAILS];
80226 +
80227 +    spinlock_t       Lock;                                     /* spinlock for rails/receive lists */
80228 +
80229 +    struct list_head  ActiveDescList;                          /* List of pending/active receive descriptors */
80230 +
80231 +    EP_XID_CACHE      XidCache;                                        /* XID cache (protected by Lock) */
80232 +
80233 +    struct list_head  FreeDescList;                            /* List of free receive descriptors */
80234 +    unsigned int      FreeDescCount;                           /*   and number on free list */
80235 +    unsigned int      TotalDescCount;                           /*   total number created */
80236 +    spinlock_t       FreeDescLock;                             /*   and lock for free list */
80237 +    kcondvar_t       FreeDescSleep;                            /*   with place to sleep for rx desc */
80238 +    int                      FreeDescWanted;                           /*   and flag */
80239 +    struct list_head  DescBlockList;
80240 +
80241 +    unsigned int      ForwardRxdCount;                         /* count of rxd's being forwarded */
80242 +    unsigned int      CleanupWaiting;                          /* waiting for cleanup */
80243 +    kcondvar_t       CleanupSleep;                             /*   and place to sleep */
80244 +
80245 +    struct proc_dir_entry *procfs_root;                         /* place where this rcvr's proc entry is */
80246 +    EP_RCVR_STATS          stats;                                    
80247 +};
80248 +
80249 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
80250 +#define EP_ENVELOPE_CHECK_SUM      (1<<31)
80251 +extern uint32_t ep_calc_check_sum (EP_SYS *sys, EP_ENVELOPE *env, EP_NMD *nmd, int nFrags);
80252 +#endif
80253 +
80254 +#endif /* ! __ELAN__ */
80255 +
80256 +typedef struct ep_txd_main
80257 +{
80258 +    EP_STATUSBLK      StatusBlk;                               /* RPC status block */
80259 +    bitmap_t          Bitmap[BT_BITOUL(EP_MAX_NODES)];         /* broadcast bitmap */
80260 +} EP_TXD_MAIN;
80261 +
80262 +#define EP_TXD_MAIN_SIZE       roundup (sizeof (EP_TXD_MAIN), EP_BLK_SIZE)
80263 +
80264 +#ifndef __ELAN__
80265 +typedef struct ep_txd_rail
80266 +{
80267 +    struct list_head  Link;                                    /* linked on freelist */
80268 +    EP_XMTR_RAIL     *XmtrRail;                                        /* xmtr we're associated with */
80269 +
80270 +    EP_TXD          *Txd;                                      /* txd we're bound to */
80271 +} EP_TXD_RAIL;
80272 +
80273 +#define TXD_BOUND2RAIL(rxdRail,xmtrRail)       ((txdRail) != NULL && ((EP_TXD_RAIL *) (txdRail))->XmtrRail == (EP_XMTR_RAIL *) xmtrRail)
80274 +
80275 +struct ep_txd
80276 +{
80277 +    struct list_head  Link;                                    /* linked on free/active list */
80278 +    EP_XMTR         *Xmtr;                                     /* service we're associated with */
80279 +
80280 +    EP_TXD_MAIN             *TxdMain;                                  /* shared main memory portion */
80281 +    EP_NMD           NmdMain;                                  /*   and network mapping descriptor */
80282 +
80283 +    EP_TXD_RAIL      *TxdRail;                                 /* per-rail txd for this phase */
80284 +
80285 +    EP_TXH          *Handler;                                  /* completion function */
80286 +    void            *Arg;                                      /*    and arguement */
80287 +    
80288 +    unsigned short    NodeId;                                  /* node transmit is to. */
80289 +    EP_SERVICE        Service;                                 /*    and seervice */
80290 +
80291 +    long              TimeStamp;                                 /* time we where created at, to find sends taking too long */
80292 +    long             RetryTime;
80293 +    EP_BACKOFF       Backoff;
80294 +
80295 +    EP_ENVELOPE              Envelope;                                 /* envelope for transmit */
80296 +    EP_PAYLOAD       Payload;                                  /* payload for transmit */
80297 +};
80298 +
80299 +#define EP_NUM_TXD_PER_BLOCK   16
80300 +
80301 +/* "phase" parameter to BindTxd */
80302 +#define EP_TXD_PHASE_ACTIVE            1
80303 +#define EP_TXD_PHASE_PASSIVE           2
80304 +
80305 +typedef struct ep_txd_block
80306 +{
80307 +    struct list_head   Link;
80308 +    EP_NMD             NmdMain;
80309 +    EP_TXD             Txd[EP_NUM_TXD_PER_BLOCK];              /* transmit descriptors */
80310 +} EP_TXD_BLOCK;
80311 +
80312 +struct ep_xmtr_rail_stats
80313 +{
80314 +    EP_STATS_COUNT tx;
80315 +    EP_STATS_COUNT tx_len;
80316 +};
80317 +
80318 +struct ep_xmtr_rail
80319 +{
80320 +    EP_COMMS_RAIL      *CommsRail;                             /* associated comms rail */
80321 +    EP_XMTR           *Xmtr;                                   /* associated transmitter */
80322 +
80323 +    struct proc_dir_entry *procfs_root;                         /* place where this xmtr's proc entry is */
80324 +
80325 +    EP_XMTR_RAIL_STATS     stats;
80326 +};
80327 +
80328 +struct ep_xmtr_stats
80329 +{
80330 +    EP_STATS_COUNT tx;
80331 +    EP_STATS_COUNT tx_len;
80332 +};
80333 +
80334 +struct ep_xmtr
80335 +{
80336 +    struct list_head  Link;                                    /* Linked on subsys */
80337 +    EP_COMMS_SUBSYS  *Subsys;                                  /* kernel comms subsystem */
80338 +
80339 +    EP_RAILMASK              RailMask;                                 /* bitmap of which rails are available */
80340 +    EP_XMTR_RAIL     *Rails[EP_MAX_RAILS];                     /* per-rail state */
80341 +
80342 +    spinlock_t       Lock;                                     /* lock for active descriptor list */
80343 +
80344 +    struct list_head  ActiveDescList;                          /* list of active transmit descriptors */
80345 +
80346 +    EP_XID_CACHE      XidCache;                                        /* XID cache (protected by Lock) */
80347 +
80348 +    struct list_head  FreeDescList;                            /* List of free receive descriptors */
80349 +    unsigned int      FreeDescCount;                           /*   and number on free list */
80350 +    unsigned int      TotalDescCount;
80351 +    spinlock_t       FreeDescLock;                             /*   and lock for free list */
80352 +    kcondvar_t       FreeDescSleep;                            /*   with place to sleep for rx desc */
80353 +    int                      FreeDescWanted;                           /*   and flag */
80354 +    struct list_head  DescBlockList;
80355 +
80356 +    struct proc_dir_entry *procfs_root;                         /* place where this rcvr's proc entry is */
80357 +    EP_XMTR_STATS          stats;   
80358 +};
80359 +
80360 +/* forward descriptor */
80361 +#define EP_TREE_ARITY          3
80362 +
80363 +typedef struct ep_fwd_desc
80364 +{
80365 +    struct list_head    Link;                                  /* linked on forward/free lists */
80366 +    EP_RXD            *Rxd;                                    /* rxd to forward */
80367 +    EP_NMD             Data;                                   /* nmd of subset of receive buffer */
80368 +    unsigned           NumChildren;                            /*   number of places we're forwarding */
80369 +    unsigned           Children[EP_TREE_ARITY];
80370 +} EP_FWD_DESC;
80371 +
80372 +typedef struct ep_comms_ops
80373 +{
80374 +    void            (*DelRail) (EP_COMMS_RAIL *rail);
80375 +    void            (*DisplayRail) (EP_COMMS_RAIL *rail);
80376 +
80377 +    struct {
80378 +       void         (*AddRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
80379 +       void         (*DelRail) (EP_RCVR *rcvr, EP_COMMS_RAIL *rail);
80380 +
80381 +       long         (*Check) (EP_RCVR_RAIL *rcvrRail, long nextRunTime);
80382 +
80383 +       int          (*QueueRxd) (EP_RXD *rxd, EP_RCVR_RAIL *rcvrRail);
80384 +       void         (*RpcPut)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
80385 +       void         (*RpcGet)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
80386 +       void         (*RpcComplete)(EP_RXD *rxd, EP_NMD *local, EP_NMD *remote, unsigned nFrags);
80387 +
80388 +       EP_RXD      *(*StealRxd)(EP_RCVR_RAIL *rcvrRail);
80389 +
80390 +       void         (*DisplayRcvr) (DisplayInfo *di, EP_RCVR_RAIL *rcvrRail);
80391 +       void         (*DisplayRxd)  (DisplayInfo *di, EP_RXD_RAIL *rxdRail);
80392 +
80393 +       void         (*FillOutRailStats) (EP_RCVR_RAIL *rcvr_rail, char *str);
80394 +
80395 +    } Rcvr;
80396 +
80397 +    struct {
80398 +       void         (*AddRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail);
80399 +       void         (*DelRail) (EP_XMTR *xmtr, EP_COMMS_RAIL *rail);
80400 +
80401 +       long         (*Check) (EP_XMTR_RAIL *xmtrRail, long nextRunTime);
80402 +       
80403 +       int          (*BindTxd) (EP_TXD *txd, EP_XMTR_RAIL *xmtrRail, unsigned int phase);
80404 +       void         (*UnbindTxd) (EP_TXD *txd, unsigned int phase);
80405 +       int          (*PollTxd) (EP_XMTR_RAIL *xmtrRail, EP_TXD_RAIL *txdRail, int how);
80406 +
80407 +       void         (*DisplayXmtr) (DisplayInfo *di, EP_XMTR_RAIL *xmtrRail);
80408 +       void         (*DisplayTxd)  (DisplayInfo *di, EP_TXD_RAIL *txdRail);
80409 +
80410 +       int          (*CheckTxdState) (EP_TXD *txd);
80411 +
80412 +       void         (*FillOutRailStats) (EP_XMTR_RAIL *xmtr_rail, char *str);
80413 +
80414 +    } Xmtr;
80415 +} EP_COMMS_OPS;
80416 +
80417 +#define EP_RAIL_OP(commsRail, Which)   (commsRail)->Ops.Which
80418 +#define EP_RCVR_OP(rcvrRail, Which)    (rcvrRail)->CommsRail->Ops.Rcvr.Which
80419 +#define EP_XMTR_OP(xmtrRail, Which)    (xmtrRail)->CommsRail->Ops.Xmtr.Which
80420 +
80421 +/* "how" parameter to PollTxd */
80422 +#define POLL_TX_LIST           0
80423 +#define ENABLE_TX_CALLBACK     1
80424 +#define DISABLE_TX_CALLBACK    2
80425 +
80426 +struct ep_comms_rail
80427 +{
80428 +    struct list_head   Link;                                   /* Linked on subsys */
80429 +    EP_RAIL           *Rail;                                   /* kernel comms rail */
80430 +    EP_COMMS_SUBSYS    *Subsys;
80431 +    EP_COMMS_OPS        Ops;
80432 +
80433 +    EP_COMMS_RAIL_STATS Stats;                                 /* statistics */
80434 +};
80435 +
80436 +struct ep_comms_subsys
80437 +{
80438 +    EP_SUBSYS          Subsys;                                 /* is a kernel comms subsystem */
80439 +
80440 +    kmutex_t           Lock;                                   /* global lock */
80441 +
80442 +    EP_COMMS_STATS     Stats;                                  /* statistics */
80443 +
80444 +    struct list_head   Rails;                                  /* list of all rails */
80445 +
80446 +    struct list_head    Receivers;                             /* list of receivers */
80447 +    struct list_head   Transmitters;                           /* and transmitters */
80448 +
80449 +    /* forward/allocator thread */
80450 +    EP_KTHREAD         Thread;                                 /* place thread sleeps */
80451 +
80452 +    /* message passing "broadcast" forward lists */
80453 +    spinlock_t         ForwardDescLock;                        /* Lock for broadcast forwarding */
80454 +    struct list_head    ForwardDescList;                       /* List of rxd's to forward */
80455 +
80456 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
80457 +    spinlock_t         CheckSumDescLock;                       /* Lock for CheckSums */
80458 +    struct list_head    CheckSumDescList;                      /* List of rxd's to be CheckSumed */
80459 +#endif
80460 +
80461 +    EP_XMTR           *ForwardXmtr;                            /* and transmitter to forward with */
80462 +};
80463 +
80464 +/* epcomms.c subsystem initialisation */
80465 +extern unsigned int   epcomms_forward_limit;
80466 +
80467 +extern int           ep_comms_init (EP_SYS *sys);
80468 +extern void           ep_comms_display (EP_SYS *sys, char *how);
80469 +extern EP_RAILMASK    ep_rcvr_railmask (EP_SYS *epsys, EP_SERVICE service);
80470 +
80471 +/* epcomms_elan3.c */
80472 +extern EP_COMMS_RAIL *ep3comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail);
80473 +
80474 +/* epcomms_elan4.c */
80475 +extern EP_COMMS_RAIL *ep4comms_add_rail (EP_SUBSYS *s, EP_SYS *sys, EP_RAIL *rail);
80476 +
80477 +/* epcommsTx.c */
80478 +extern int            TxdShouldStabalise (EP_TXD_RAIL *txdRail, EP_RAIL *rail);
80479 +extern void          FreeTxd (EP_XMTR *xmtr, EP_TXD *txd);
80480 +
80481 +extern unsigned int   ep_txd_lowat;
80482 +extern long           ep_check_xmtr (EP_XMTR *xmtr, long nextRunTime);
80483 +extern void           ep_display_xmtr (DisplayInfo *di, EP_XMTR *xmtr);
80484 +extern void           ep_xmtr_flush_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail);
80485 +extern void           ep_xmtr_reloc_callback (EP_XMTR *xmtr, EP_XMTR_RAIL *xmtrRail);
80486 +
80487 +extern void           ep_xmtr_fillout_stats      (EP_XMTR      *xmtr,      char *str);
80488 +extern void           ep_xmtr_rail_fillout_stats (EP_XMTR_RAIL *xmtr_rail, char *str);
80489 +
80490 +extern void           ep_xmtr_txd_stat (EP_XMTR *xmtr, EP_TXD *txd);
80491 +
80492 +/* epcommsRx.c */
80493 +extern EP_RXD        *StealRxdFromOtherRail (EP_RCVR *rcvr);
80494 +
80495 +extern unsigned int   ep_rxd_lowat;
80496 +extern long          ep_check_rcvr (EP_RCVR *rcvr, long nextRunTime);
80497 +extern void           ep_rcvr_flush_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail);
80498 +extern void           ep_rcvr_reloc_callback (EP_RCVR *rcvr, EP_RCVR_RAIL *rcvrRail);
80499 +extern void           ep_display_rcvr (DisplayInfo *di, EP_RCVR *rcvr, int full);
80500 +
80501 +extern long           ep_forward_rxds (EP_COMMS_SUBSYS *subsys, long nextRunTime);
80502 +
80503 +extern void           ep_rcvr_fillout_stats      (EP_RCVR      *rcvr,      char *str);
80504 +extern void           ep_rcvr_rail_fillout_stats (EP_RCVR_RAIL *rcvr_rail, char *str);
80505 +
80506 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
80507 +extern void           ep_csum_rxds    (EP_COMMS_SUBSYS *subsys);
80508 +extern void           ep_rxd_queue_csum (EP_RXD *rxd);
80509 +#endif
80510 +
80511 +extern void           ep_rxd_received     (EP_RXD *rxd);
80512 +extern void           ep_rxd_received_now (EP_RXD *rxd);
80513 +
80514 +/* ep_procfs.c */
80515 +extern struct proc_dir_entry *ep_procfs_root;
80516 +
80517 +extern void ep_procfs_rcvr_xmtr_init(void);
80518 +extern void ep_procfs_rcvr_xmtr_fini(void);
80519 +
80520 +extern void ep_procfs_rcvr_add(EP_RCVR *rcvr);
80521 +extern void ep_procfs_rcvr_del(EP_RCVR *rcvr);
80522 +
80523 +extern void ep_procfs_rcvr_add_rail(EP_RCVR_RAIL *rcvrRail);
80524 +extern void ep_procfs_rcvr_del_rail(EP_RCVR_RAIL *rcvrRail);
80525 +
80526 +extern void ep_procfs_xmtr_add(EP_XMTR *xmtr);
80527 +extern void ep_procfs_xmtr_del(EP_XMTR *xmtr);
80528 +
80529 +extern void ep_procfs_xmtr_add_rail(EP_XMTR_RAIL *xmtrRail);
80530 +extern void ep_procfs_xmtr_del_rail(EP_XMTR_RAIL *xmtrRail);
80531 +
80532 +
80533 +/* Public Interface */
80534 +
80535 +
80536 +/* epcomms.c message xmtr functions */
80537 +extern EP_XMTR       *ep_alloc_xmtr (EP_SYS *sys);
80538 +extern void           ep_free_xmtr (EP_XMTR *xmtr);
80539 +
80540 +extern EP_STATUS      ep_transmit_message (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
80541 +                                          EP_TXH *handler, void *arg, EP_PAYLOAD *payload,
80542 +                                          EP_NMD *nmd, int nFrag);
80543 +extern EP_STATUS      ep_multicast_message (EP_XMTR *xmtr, unsigned int destLo, unsigned int destHi, bitmap_t *bitmap, 
80544 +                                           EP_SERVICE service, EP_ATTRIBUTE attr, EP_TXH *handler, void *arg, 
80545 +                                           EP_PAYLOAD *payload, EP_NMD *nmd, int nFrag);
80546 +extern EP_STATUS      ep_transmit_rpc (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
80547 +                                      EP_TXH *handler, void *arg, EP_PAYLOAD *payload,
80548 +                                      EP_NMD *nmd, int nFrag);
80549 +extern EP_STATUS      ep_multicast_forward (EP_XMTR *xmtr, unsigned int dest, EP_SERVICE service, EP_ATTRIBUTE attr, 
80550 +                                           EP_TXH *handler, void *arg, EP_ENVELOPE *env, EP_PAYLOAD *payload, 
80551 +                                           bitmap_t *bitmap, EP_NMD *nmd, int nFrags);
80552 +
80553 +/* epcomms.c functions for use with polled transmits */
80554 +extern int            ep_poll_transmits (EP_XMTR *xmtr);
80555 +extern int            ep_enable_txcallbacks (EP_XMTR *xmtr);
80556 +extern int            ep_disable_txcallbacks (EP_XMTR *xmtr);
80557 +
80558 +/* epcomms.c message rcvr functions */
80559 +extern EP_RCVR       *ep_alloc_rcvr (EP_SYS *sys, EP_SERVICE svc, unsigned int nenvelopes);
80560 +extern void          ep_free_rcvr (EP_RCVR *rcvr);
80561 +
80562 +extern EP_STATUS      ep_queue_receive (EP_RCVR *rcvr, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr);
80563 +extern void          ep_requeue_receive (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *nmd, EP_ATTRIBUTE attr);
80564 +extern EP_STATUS      ep_rpc_put (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags);
80565 +extern EP_STATUS      ep_rpc_get (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_NMD *from, EP_NMD *to, int nFrags);
80566 +extern EP_STATUS      ep_complete_rpc (EP_RXD *rxd, EP_RXH *handler, void *arg, EP_STATUSBLK *blk, 
80567 +                                      EP_NMD *from, EP_NMD *to, int nFrags);
80568 +extern void          ep_complete_receive (EP_RXD *rxd);
80569 +
80570 +/* railhints.c */
80571 +extern int            ep_xmtr_bcastrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails);
80572 +extern int            ep_xmtr_prefrail (EP_XMTR *xmtr, EP_RAILMASK allowedRails, unsigned nodeId);
80573 +extern EP_RAILMASK    ep_xmtr_availrails (EP_XMTR *xmtr);
80574 +extern EP_RAILMASK    ep_xmtr_noderails (EP_XMTR *xmtr, unsigned nodeId);
80575 +extern int            ep_rcvr_prefrail (EP_RCVR *rcvr, EP_RAILMASK allowedRails);
80576 +extern EP_RAILMASK    ep_rcvr_availrails (EP_RCVR *rcvr);
80577 +extern EP_RAILMASK    ep_rxd_railmask (EP_RXD *rxd);
80578 +
80579 +/* epcomms.c functions for accessing fields of rxds */
80580 +extern void          *ep_rxd_arg(EP_RXD *rxd);
80581 +extern int            ep_rxd_len(EP_RXD *rxd);
80582 +extern EP_STATUS      ep_rxd_status(EP_RXD *rxd);
80583 +extern int            ep_rxd_isrpc(EP_RXD *rxd);
80584 +extern EP_ENVELOPE   *ep_rxd_envelope(EP_RXD *rxd);
80585 +extern EP_PAYLOAD    *ep_rxd_payload(EP_RXD *rxd);
80586 +extern int            ep_rxd_node(EP_RXD *rxd);
80587 +extern EP_STATUSBLK  *ep_rxd_statusblk(EP_RXD *rxd);
80588 +
80589 +/* functions for accessing fields of txds */
80590 +extern int            ep_txd_node(EP_TXD *txd);
80591 +extern EP_STATUSBLK  *ep_txd_statusblk(EP_TXD *txd);
80592 +
80593 +/* functions for controlling how many processes are using module */
80594 +extern void              ep_mod_dec_usecount (void);
80595 +extern void              ep_mod_inc_usecount (void);
80596 +
80597 +extern EP_RAILMASK ep_xmtr_svc_indicator_railmask (EP_XMTR *xmtr, int svc_indicator, int nodeId);
80598 +extern int ep_xmtr_svc_indicator_bitmap (EP_XMTR *xmtr, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
80599 +
80600 +#endif /* ! __ELAN__ */
80601 +/*
80602 + * Local variables:
80603 + * c-file-style: "stroustrup"
80604 + * End:
80605 + */
80606 +#endif /* __ELAN_EPCOMMS_H */
80607 +
80608 diff -urN clean/include/elan/epsvc.h linux-2.6.9/include/elan/epsvc.h
80609 --- clean/include/elan/epsvc.h  1969-12-31 19:00:00.000000000 -0500
80610 +++ linux-2.6.9/include/elan/epsvc.h    2004-02-13 05:03:27.000000000 -0500
80611 @@ -0,0 +1,36 @@
80612 +/*
80613 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80614 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
80615 + *
80616 + *    For licensing information please see the supplied COPYING file
80617 + *
80618 + */
80619 +
80620 +#ifndef __ELAN_EPSVC_H
80621 +#define __ELAN_EPSVC_H
80622 +
80623 +#ident "@(#)$Id: epsvc.h,v 1.9 2004/02/13 10:03:27 david Exp $"
80624 +/*      $Source: /cvs/master/quadrics/epmod/epsvc.h,v $ */
80625 +
80626 +
80627 +#define EP_SVC_NUM_INDICATORS       8
80628 +#define EP_SVC_INDICATOR_MAX_NAME  32
80629 +
80630 +#define EP_SVC_EIP     0
80631 +#define EP_SVC_NAMES   {"eip", "1", "2", "3", "4", "5", "6", "7"};
80632 +
80633 +#if defined(__KERNEL__)
80634 +extern int         ep_svc_indicator_set      (EP_SYS *epsys, int svc_indicator);
80635 +extern int         ep_svc_indicator_clear    (EP_SYS *epsys, int svc_indicator);
80636 +extern int         ep_svc_indicator_is_set   (EP_SYS *epsys, int svc_indicator, int nodeId);
80637 +extern int         ep_svc_indicator_bitmap   (EP_SYS *epsys, int svc_indicator, bitmap_t * bitmap, int low, int nnodes);
80638 +extern EP_RAILMASK ep_svc_indicator_railmask (EP_SYS *epsys, int svc_indicator, int nodeId);
80639 +#endif
80640 +
80641 +#endif /* __ELAN_EPSVC_H */
80642 +
80643 +/*
80644 + * Local variables:
80645 + * c-file-style: "stroustrup"
80646 + * End:
80647 + */
80648 diff -urN clean/include/elan/kalloc.h linux-2.6.9/include/elan/kalloc.h
80649 --- clean/include/elan/kalloc.h 1969-12-31 19:00:00.000000000 -0500
80650 +++ linux-2.6.9/include/elan/kalloc.h   2004-05-19 06:23:59.000000000 -0400
80651 @@ -0,0 +1,108 @@
80652 +/*
80653 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80654 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
80655 + *
80656 + *    For licensing information please see the supplied COPYING file
80657 + *
80658 + */
80659 +
80660 +#ifndef __ELAN3_KALLOC_H
80661 +#define __ELAN3_KALLOC_H
80662 +
80663 +#ident "$Id: kalloc.h,v 1.11 2004/05/19 10:23:59 david Exp $"
80664 +/*      $Source: /cvs/master/quadrics/epmod/kalloc.h,v $ */
80665 +
80666 +#include <elan/rmap.h>
80667 +
80668 +/*
80669 + * Memory allocator
80670 + */
80671 +#define LN2_MIN_SIZE   6                                       /* 64 bytes */
80672 +#define LN2_MAX_SIZE   16                                      /* 64k bytes */
80673 +#define NUM_FREELISTS  (LN2_MAX_SIZE-LN2_MIN_SIZE + 1)
80674 +#define MIN_SIZE       (1 << LN2_MIN_SIZE)
80675 +#define MAX_SIZE       (1 << LN2_MAX_SIZE)
80676 +
80677 +#define HASHSHIFT      LN2_MAX_SIZE
80678 +#define NHASH          32
80679 +#define HASH(addr)     (((addr) >> HASHSHIFT) & (NHASH-1))
80680 +
80681 +typedef enum
80682 +{
80683 +    EP_ALLOC_TYPE_PRIVATE_SDRAM,
80684 +    EP_ALLOC_TYPE_PRIVATE_MAIN,
80685 +    EP_ALLOC_TYPE_SHARED_MAIN,
80686 +} EP_ALLOC_TYPE;
80687 +
80688 +typedef struct ep_pool
80689 +{
80690 +    EP_NMH               Handle;                               /* network mapping handle */
80691 +
80692 +    struct list_head     HashBase;                             /* linked on hash lists */
80693 +    struct list_head     HashTop;                              /* linked on hash lists */
80694 +
80695 +    struct list_head     Link[NUM_FREELISTS];                  /* linked on free lists */
80696 +    bitmap_t            *Bitmaps[NUM_FREELISTS];               /* bitmaps for each size */
80697 +
80698 +    union {
80699 +       sdramaddr_t     Sdram;
80700 +       unsigned long   Ptr;
80701 +    } Buffer;
80702 +} EP_POOL;
80703 +
80704 +typedef struct ep_alloc
80705 +{
80706 +    spinlock_t      Lock;
80707 +    
80708 +    EP_ALLOC_TYPE    Type;
80709 +    unsigned int     Perm;
80710 +
80711 +    EP_RMAP         *ResourceMap;
80712 +
80713 +    struct list_head HashBase[NHASH];
80714 +    struct list_head HashTop[NHASH];
80715 +    struct list_head Freelists[NUM_FREELISTS];
80716 +
80717 +    union {
80718 +       struct {
80719 +           EP_SYS             *System;
80720 +           struct list_head    Rails;
80721 +       } Shared;
80722 +       
80723 +       struct {
80724 +           EP_RAIL            *Rail;
80725 +       } Private;
80726 +    } Data;
80727 +} EP_ALLOC;
80728 +
80729 +extern void            ep_display_alloc (EP_ALLOC *alloc);
80730 +
80731 +extern void            ep_alloc_init (EP_RAIL *rail);
80732 +extern void            ep_alloc_fini (EP_RAIL *rail);
80733 +
80734 +extern sdramaddr_t     ep_alloc_memory_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size, unsigned int perm, EP_ATTRIBUTE attr);
80735 +extern void            ep_free_memory_elan (EP_RAIL *rail, EP_ADDR addr);
80736 +
80737 +extern sdramaddr_t     ep_alloc_elan (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addrp);
80738 +extern void            ep_free_elan (EP_RAIL *rail, EP_ADDR addr, unsigned size);
80739 +extern void           *ep_alloc_main (EP_RAIL *rail, unsigned size, EP_ATTRIBUTE attr, EP_ADDR *addr);
80740 +extern void            ep_free_main (EP_RAIL *rail, EP_ADDR addr, unsigned size);
80741 +
80742 +extern sdramaddr_t     ep_elan2sdram (EP_RAIL *rail, EP_ADDR addr);
80743 +extern void            *ep_elan2main (EP_RAIL *rail, EP_ADDR addr);
80744 +
80745 +extern void            ep_shared_alloc_init (EP_SYS *sys);
80746 +extern void            ep_shared_alloc_fini (EP_SYS *sys);
80747 +extern int             ep_shared_alloc_add_rail (EP_SYS *sys, EP_RAIL *rail);
80748 +extern void            ep_shared_alloc_remove_rail (EP_SYS *sys, EP_RAIL *rail);
80749 +
80750 +extern void           *ep_shared_alloc_main (EP_SYS *sys, unsigned size, EP_ATTRIBUTE attr, EP_NMD *nmd);
80751 +extern void            ep_shared_free_main (EP_SYS *sys, EP_NMD *nmd);
80752 +
80753 +#endif /* __ELAN_KALLOC_H */
80754 +
80755 +/*
80756 + * Local variables:
80757 + * c-file-style: "stroustrup"
80758 + * End:
80759 + */
80760 diff -urN clean/include/elan/kcomm.h linux-2.6.9/include/elan/kcomm.h
80761 --- clean/include/elan/kcomm.h  1969-12-31 19:00:00.000000000 -0500
80762 +++ linux-2.6.9/include/elan/kcomm.h    2005-04-05 12:36:28.000000000 -0400
80763 @@ -0,0 +1,831 @@
80764 +/*
80765 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
80766 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
80767 + *
80768 + *    For licensing information please see the supplied COPYING file
80769 + *
80770 + */
80771 +
80772 +#ifndef __ELAN_KCOMM_H
80773 +#define __ELAN_KCOMM_H
80774 +
80775 +#ident "$Id: kcomm.h,v 1.82 2005/04/05 16:36:28 mike Exp $"
80776 +/*      $Source: /cvs/master/quadrics/epmod/kcomm.h,v $*/
80777 +#define EP_KCOMM_MAJOR_VERSION         3
80778 +#define EP_KCOMM_MINOR_VERSION         1
80779 +
80780 +#define EP_PROTOCOL_VERSION            1                       /* CM/KCOMM protocol revision */
80781 +
80782 +#define EP_MAX_NODES                   2048                    /* Max nodes we support */
80783 +#define EP_MAX_RAILS                   16                      /* max number of rails (we use an unsigned short for bitmaps !) */
80784 +#define EP_MAXFRAG                     4                       /* max number of fragments */
80785 +
80786 +#define EP_BLK_SIZE                    64                      /* align objects for elan access */
80787 +
80788 +/* Elan virtual address address space */
80789 +#define EP_SYSTEM_QUEUE_BASE           0x00010000              /* Base address for system queues */
80790 +#define EP_MSGSYS_QUEUE_BASE           0x00020000              /* Base address for msgsys queues */
80791 +#define EP_EPCOMMS_QUEUE_BASE          0x00030000              /* Base address for message queues */
80792 +#define EP_DVMA_BASE                   0x10000000              /* elan address range for dvma mapping. */
80793 +#define EP_DVMA_TOP                    0xE0000000
80794 +
80795 +#define EP_SHARED_BASE                 0xE0000000              /* shared main/elan allocators */
80796 +#define EP_SHARED_TOP                  0xF0000000
80797 +
80798 +#define EP_PRIVATE_BASE                        0xF0000000              /* private main/elan allocators */
80799 +#define EP_PRIVATE_TOP                 0xF8000000
80800 +
80801 +#define EP_DVMA_RMAP_SIZE              1024                    /* size of resource map for dvma address space */
80802 +#define EP_SHARED_RMAP_SIZE            1024                    /* size of resource map for shared address space */
80803 +#define EP_PRIVATE_RMAP_SIZE           1024                    /* size of resource map for private address space */
80804 +
80805 +/* Input queue descriptors fit into 64 bytes */
80806 +#define EP_QUEUE_DESC_SIZE             64
80807 +
80808 +/* Timeouts for checking network position */
80809 +#define EP_POSITION_TIMEOUT            (4*HZ)          /* 1s   time to notice CheckNetworkPosition changes */
80810 +#define EP_WITHDRAW_TIMEOUT            (2*HZ)          /* 2s   time before withdrawing from unreachable nodes */
80811 +
80812 +/* Time to try again due to resource failue (eg malloc etc) */
80813 +#define RESOURCE_RETRY_TIME            (HZ/20)
80814 +
80815 +/* Time to retransmit message when send failed */
80816 +#define MSGBUSY_RETRY_TIME             (HZ/20)
80817 +
80818 +/* Time between retransmits of messages network flush requests */
80819 +#define MESSAGE_RETRY_TIME             (HZ/5)
80820 +
80821 +/* time to hold the context filter up to ensure that the
80822 + * next packet of a dma is guaranteed to get nacked (8mS) */
80823 +#define NETWORK_ERROR_TIMEOUT          (1 + roundup (HZ * 8 / 1000, 1))
80824 +
80825 +/* Time between retransmits of message failover requests */
80826 +#define FAILOVER_RETRY_TIME            (HZ/5)
80827 +
80828 +/* compute earliest time */
80829 +#define SET_NEXT_RUN_TIME(nextRunTime, time) \
80830 +do { \
80831 +    if ((nextRunTime) == 0 || AFTER(nextRunTime, (time)))\
80832 +       (nextRunTime) = (time);\
80833 +} while (0)
80834 +
80835 +/* DMA retry backoff/priorities/issue rings */
80836 +#define EP_NUM_BACKOFF                 8
80837 +#define EP_RETRY_STABALISING            0
80838 +#define EP_RETRY_BASE                  1
80839 +
80840 +#define EP_RETRY_CRITICAL              EP_RETRY_BASE
80841 +#define EP_RETRY_HIGH_PRI              (EP_RETRY_CRITICAL + 1)
80842 +#define EP_RETRY_HIGH_PRI_TIME         (1)
80843 +#define EP_RETRY_HIGH_PRI_RETRY                (EP_RETRY_HIGH_PRI + 1)
80844 +#define EP_RETRY_HIGH_PRI_RETRY_TIME   (2)
80845 +#define EP_RETRY_LOW_PRI               (EP_RETRY_HIGH_PRI_RETRY + EP_NUM_BACKOFF)
80846 +#define EP_RETRY_LOW_PRI_TIME          (2)
80847 +#define EP_RETRY_LOW_PRI_RETRY         (EP_RETRY_LOW_PRI + 1)
80848 +#define EP_RETRY_LOW_PRI_RETRY_TIME    (4)
80849 +#define EP_RETRY_ANONYMOUS             (EP_RETRY_LOW_PRI_RETRY + EP_NUM_BACKOFF)
80850 +#define EP_RETRY_ANONYMOUS_TIME                (10)
80851 +#define EP_RETRY_NETERR                        (EP_RETRY_ANONYMOUS + EP_NUM_BACKOFF)
80852 +#define EP_RETRY_NETERR_TIME           (10)
80853 +#define EP_NUM_RETRIES                 (EP_RETRY_NETERR + 1)
80854 +
80855 +typedef unsigned short EP_SERVICE;
80856 +
80857 +/* EP_ATTRIBUTE 32 bits 
80858 + *
80859 + * 0-2
80860 + *   for initial call :-
80861 + *     0 (0x1) EP_NO_ALLOC                             used once
80862 + *     1 (0x2) EP_NO_SLEEP                             used once
80863 + *     2 (0x4) EP_NOT_MYSELF                           used once
80864 + *
80865 + *   when stored and transmited :-
80866 + *     0 (0x0) EP_MULTICAST                            envelope
80867 + *     1 (0x2) EP_RPC                                  envelope
80868 + *     2 (0x4) EP_HAS_PAYLOAD                          envelope
80869 + *
80870 + * 3-11
80871 + *     3   (0x08) EP_PREFRAIL_SET                      preserved
80872 + *     4-7 (0xf0) Pref Rail
80873 + *     8  (0x100) EP_NO_INTERUPT
80874 + *     9  (0x200) EP_NO_FAILOVER
80875 + *
80876 + *    10 (0x400) EP_INTERRUPT_ENABLED                  internal
80877 + *    11 (0x800) EP_TXD_STABALISING                    internal
80878 + *
80879 + * 12-13 Not Used.
80880 + * 
80881 + * 14-15 (0xC000) Data Type.                           passed in
80882 + *    00 none. 
80883 + *    01 Service Indicator.
80884 + *    10 TimeOut.
80885 + *    11 RailMask
80886 + *          
80887 + * 16-31 (0x10000)  Data.  Service Indicator, TimeOut, RailMask, Pref Rail.
80888 + *         
80889 +*/
80890 +
80891 +typedef uint32_t EP_ATTRIBUTE;
80892 +
80893 +#define EP_LOCAL_ATTR_MASK 0x07
80894 +#define EP_CLEAR_LOCAL_ATTR(ATTR)  ( (ATTR) & ~EP_LOCAL_ATTR_MASK )
80895 +
80896 +#define EP_NO_ALLOC      0x01  /* Don't call allocators if no free descriptors */
80897 +#define EP_NO_SLEEP      0x02  /* Don't sleep if no free descriptors */
80898 +#define EP_NOT_MYSELF    0x04  /* Don't send multicast to me */
80899 +
80900 +#define EP_MULTICAST         0x01      /* Message is a multicast */
80901 +#define EP_RPC               0x02      /* Wait for RPC reply */
80902 +#define EP_HAS_PAYLOAD_BIT   0x04      /* transfer payload */
80903 +
80904 +
80905 +#define EP_PREFRAIL_SET  0x08  /* preferred rail is set (otherwise pick one from the NMDs) */
80906 +
80907 +#define EP_PREFRAIL_SHIFT  (4)
80908 +#define EP_PREFRAIL_MASK   0xf0
80909 +#define EP_IS_PREFRAIL_SET(ATTR)      (((ATTR) &  EP_PREFRAIL_SET) != 0)
80910 +#define EP_CLEAR_PREFRAIL(ATTR)       (((ATTR) & ~EP_PREFRAIL_SET) & ~EP_PREFRAIL_MASK) 
80911 +#define EP_SET_PREFRAIL(ATTR,RAIL)    (EP_CLEAR_PREFRAIL(ATTR) | (((RAIL) <<  EP_PREFRAIL_SHIFT ) &  EP_PREFRAIL_MASK ) |  EP_PREFRAIL_SET)
80912 +
80913 +
80914 +#define EP_ATTR2PREFRAIL(ATTR)            (((ATTR) & EP_PREFRAIL_MASK) >> EP_PREFRAIL_SHIFT)
80915 +
80916 +
80917 +#define EP_INTERRUPT_ENABLED 0x400     /* event interrupt enabled on EP_NO_INTERRUPT */
80918 +#define EP_TXD_STABALISING   0x800      /* flag to indicate this is attempting to stabalise */
80919 +
80920 +#define EP_IS_MULTICAST(ATTR)                 (((ATTR) &  EP_MULTICAST) != 0)
80921 +#define EP_SET_MULTICAST(ATTR)                ( (ATTR) |  EP_MULTICAST)
80922 +#define EP_CLEAR_MULTICAST(ATTR)              ( (ATTR) & ~EP_MULTICAST)
80923 +
80924 +#define EP_IS_RPC(ATTR)                       (((ATTR) &  EP_RPC) != 0)
80925 +#define EP_SET_RPC(ATTR)                      ( (ATTR) |  EP_RPC)
80926 +#define EP_CLEAR_RPC(ATTR)                    ( (ATTR) & ~EP_RPC)
80927 +
80928 +#define EP_HAS_PAYLOAD(ATTR)                  (((ATTR) &  EP_HAS_PAYLOAD_BIT) != 0)
80929 +#define EP_SET_HAS_PAYLOAD(ATTR)              ( (ATTR) |  EP_HAS_PAYLOAD_BIT)
80930 +#define EP_CLEAR_HAS_PAYLOAD(ATTR)            ( (ATTR) & ~EP_HAS_PAYLOAD_BIT)
80931 +
80932 +#define EP_IS_INTERRUPT_ENABLED(ATTR)         (((ATTR) &  EP_INTERRUPT_ENABLED) != 0)
80933 +#define EP_SET_INTERRUPT_ENABLED(ATTR)        ( (ATTR) |  EP_INTERRUPT_ENABLED)
80934 +#define EP_CLEAR_INTERRUPT_ENABLED(ATTR)      ( (ATTR) & ~EP_INTERRUPT_ENABLED)
80935 +
80936 +#define EP_IS_TXD_STABALISING(ATTR)           (((ATTR) &  EP_TXD_STABALISING) != 0)
80937 +#define EP_SET_TXD_STABALISING(ATTR)          ( (ATTR) |  EP_TXD_STABALISING)
80938 +#define EP_CLEAR_TXD_STABALISING(ATTR)        ( (ATTR) & ~EP_TXD_STABALISING)
80939 +
80940 +#define EP_NO_INTERRUPT      0x100     /* Don't generate completion interrupt (tx) */
80941 +#define EP_NO_FAILOVER       0x200     /* don't attempt rail failover, just abort */
80942 +
80943 +#define EP_IS_NO_INTERRUPT(ATTR)    (((ATTR) &  EP_NO_INTERRUPT) != 0)
80944 +#define EP_SET_NO_INTERRUPT(ATTR)   ( (ATTR) |  EP_NO_INTERRUPT)
80945 +#define EP_CLEAR_NO_INTERRUPT(ATTR) ( (ATTR) & ~EP_NO_INTERRUPT)
80946 +
80947 +#define EP_IS_NO_FAILOVER(ATTR)    (((ATTR) &  EP_NO_FAILOVER) != 0)
80948 +#define EP_SET_NO_FAILOVER(ATTR)   ( (ATTR) |  EP_NO_FAILOVER)
80949 +#define EP_CLEAR_NO_FAILOVER(ATTR) ( (ATTR) & ~EP_NO_FAILOVER)
80950 +
80951 +#define EP_TYPE_MASK           0xC000
80952 +#define EP_TYPE_SVC_INDICATOR  0x4000
80953 +#define EP_TYPE_TIMEOUT        0x8000
80954 +#define EP_TYPE_RAILMASK       0xC000
80955 +
80956 +#define EP_ATTR2TYPE(ATTR)        ( (ATTR) & EP_TYPE_MASK )
80957 +
80958 +#define EP_IS_SVC_INDICATOR(ATTR) (EP_ATTR2TYPE(ATTR) == EP_TYPE_SVC_INDICATOR)
80959 +#define EP_IS_TIMEOUT(ATTR)       (EP_ATTR2TYPE(ATTR) == EP_TYPE_TIMEOUT)
80960 +#define EP_IS_RAILMASK(ATTR)      (EP_ATTR2TYPE(ATTR) == EP_TYPE_RAILMASK)
80961 +#define EP_IS_NO_TYPE(ATTR)       (EP_ATTR2TYPE(ATTR) == 0)
80962 +
80963 +#define EP_DATA_SHIFT          (16)
80964 +#define EP_DATA_MASK           0xffff0000
80965 +
80966 +#define EP_ATTR2DATA(ATTR)     (((ATTR) & EP_DATA_MASK) >> EP_DATA_SHIFT)
80967 +#define EP_DATA2ATTR(DATA)     (((DATA) <<  EP_DATA_SHIFT) & EP_DATA_MASK)
80968 +
80969 +#define EP_CLEAR_DATA(ATTR)    (((ATTR) & ~EP_TYPE_MASK) & ~EP_DATA_MASK)
80970 +#define EP_SET_DATA(ATTR,TYPE,DATA)   (EP_CLEAR_DATA(ATTR) | ((TYPE) & EP_TYPE_MASK) | (((DATA) <<  EP_DATA_SHIFT) & EP_DATA_MASK))
80971 +
80972 +#define EP_DEFAULT_TIMEOUT     (HZ*30)
80973 +
80974 +#if !defined(offsetof)
80975 +#define offsetof(s, m)         (unsigned long)(&(((s *)0)->m))
80976 +#endif
80977 +#if !defined(roundup)
80978 +#define roundup(x, y)          ((((x)+((y)-1))/(y))*(y))
80979 +#endif
80980 +
80981 +/* 
80982 + * Message transaction ID's - these are unique 64 bts 
80983 + * numbers which include the initial rail number.
80984 + */
80985 +typedef struct ep_xid
80986 +{
80987 +    uint32_t   Generation;
80988 +    uint32_t   Handle;
80989 +    uint64_t   Unique;
80990 +} EP_XID;
80991 +
80992 +#define EP_INVALIDATE_XID(xid) ((xid).Generation = (xid).Handle = (xid).Unique = 0)
80993 +
80994 +#define EP_XID_INVALID(xid)    ((xid).Generation == 0 && (xid).Handle == 0 && (xid).Unique == 0)
80995 +#define EP_XIDS_MATCH(a,b)     ((a).Generation == (b).Generation && (a).Handle == (b).Handle && (a).Unique == (b).Unique)
80996 +
80997 +typedef struct ep_backoff
80998 +{
80999 +    unsigned char      type;
81000 +    unsigned char      indx;
81001 +    unsigned short     count;
81002 +} EP_BACKOFF;
81003 +
81004 +/* values for "type" */
81005 +#define EP_BACKOFF_FREE                0
81006 +#define EP_BACKOFF_ENVELOPE    1
81007 +#define EP_BACKOFF_FETCH       2
81008 +#define EP_BACKOFF_DATA                3
81009 +#define EP_BACKOFF_DONE                4
81010 +#define EP_BACKOFF_STABILISE   5
81011 +
81012 +#ifndef __ELAN__
81013 +
81014 +/* forward declaration of types */
81015 +typedef struct ep_rail EP_RAIL;
81016 +typedef struct ep_sys  EP_SYS;
81017 +
81018 +#include <elan/nmh.h>
81019 +#include <elan/kmap.h>
81020 +#include <elan/statemap.h>
81021 +#include <elan/kalloc.h>
81022 +#include <elan/kthread.h>
81023 +#include <elan/kcomm_stats.h>
81024 +#include <elan/devinfo.h>
81025 +
81026 +typedef struct ep_callback
81027 +{
81028 +    struct ep_callback *Next;
81029 +    void              (*Routine)(void *, statemap_t *);
81030 +    void              *Arg;
81031 +} EP_CALLBACK;
81032 +
81033 +#define EP_CB_FLUSH_FILTERING          0
81034 +#define EP_CB_FLUSH_FLUSHING           1
81035 +#define EP_CB_PASSIVATED               2
81036 +#define EP_CB_FAILOVER                 3
81037 +#define EP_CB_DISCONNECTING            4
81038 +#define EP_CB_DISCONNECTED             5
81039 +#define EP_CB_NODESET                  6
81040 +#define EP_CB_COUNT                    7
81041 +
81042 +#endif /* !defined(__ELAN__) */
81043 +
81044 +/* Small unreliable system message queues */
81045 +#define EP_SYSTEMQ_INTR                        0                       /* input queue for cluster membership generating an interrupt */
81046 +#define EP_SYSTEMQ_POLLED              1                       /* input queue for cluster membership polled on clock tick */
81047 +#define EP_SYSTEMQ_MANAGER             2                       /* input queue for manager messages */
81048 +#define EP_NUM_SYSTEMQ                 64
81049 +
81050 +#define EP_SYSTEMQ_ADDR(qnum)          (EP_SYSTEM_QUEUE_BASE + (qnum) * EP_QUEUE_DESC_SIZE)
81051 +#define EP_SYSTEMQ_DESC(base,qnum)     ((base) + (qnum) * EP_QUEUE_DESC_SIZE)
81052 +
81053 +#define EP_SYSTEMQ_MSG_ALIGN           64                      /* message sizes aligned to 64 byte boundaries */
81054 +#define EP_SYSTEMQ_MSG_MAX             (4*64)                  /* max message size */
81055 +
81056 +/* Special flag for Version field to indicate message not
81057 + * seen in main memory yet and time limit to poll for it */
81058 +#define EP_SYSTEMQ_UNRECEIVED                  0xdeadbabe
81059 +#define EP_SYSTEMQ_UNRECEIVED_TLIMIT           16384                   /* 1023 uS */
81060 +
81061 +#ifndef __ELAN__
81062 +
81063 +typedef void (EP_INPUTQ_HANDLER) (EP_RAIL *rail, void *arg, void *msg);
81064 +typedef void (EP_INPUTQ_CALLBACK) (EP_RAIL *rail, void *arg);
81065 +
81066 +typedef struct ep_inputq
81067 +{
81068 +    unsigned long      q_hidden;                               /* implementation hidden as ep3 or ep4 */
81069 +} EP_INPUTQ;
81070 +
81071 +typedef struct ep_outputq
81072 +{
81073 +    unsigned long      q_hidden;                               /* implementation hidden as ep3 or ep4 */
81074 +} EP_OUTPUTQ;
81075 +
81076 +/* returned values for ep_outputq_state */
81077 +#define EP_OUTPUTQ_BUSY                0
81078 +#define EP_OUTPUTQ_FAILED      1
81079 +#define EP_OUTPUTQ_FINISHED    2
81080 +
81081 +typedef struct ep_switch
81082 +{
81083 +    unsigned    present:1;
81084 +    unsigned   invalid:1;
81085 +    unsigned   link:3;
81086 +    unsigned   bcast:3;
81087 +    unsigned   lnr;
81088 +} EP_SWITCH;
81089 +
81090 +/*
81091 + * Network error fixup, flush, relocation messges
81092 + */
81093 +typedef struct ep_map_nmd_body
81094 +{
81095 +    uint32_t           nFrags;
81096 +    EP_RAILMASK                Railmask;
81097 +    EP_NMD             Nmd[EP_MAXFRAG];
81098 +} EP_MAP_NMD_BODY;
81099 +
81100 +typedef struct ep_failover_body
81101 +{
81102 +    EP_XID             Xid;
81103 +    EP_RAILMASK                Railmask;
81104 +} EP_FAILOVER_BODY;
81105 +
81106 +typedef struct ep_failover_txd
81107 +{
81108 +    EP_XID             Xid;
81109 +    uint32_t           Rail;
81110 +    EP_ADDR            TxdRail;
81111 +} EP_FAILOVER_TXD;
81112 +
81113 +typedef uint64_t EP_NETERR_COOKIE;
81114 +
81115 +#define EP_PANIC_STRLEN                31
81116 +
81117 +typedef struct ep_node_state
81118 +{
81119 +    unsigned char       State;
81120 +    unsigned char       NetworkErrorState;
81121 +    EP_RAILMASK         Railmask;
81122 +} EP_NODE_STATE;
81123 +
81124 +#define EP_MANAGER_MSG_SIZE            (2 * EP_SYSTEMQ_MSG_ALIGN)
81125 +
81126 +typedef struct ep_manager_msg_hdr
81127 +{
81128 +    EP_XID             Xid;                                    /* Message transaction id */
81129 +
81130 +    uint16_t           NodeId;                                 /* Originating node number */
81131 +    uint16_t           DestId;                                 /* destination node id */
81132 +
81133 +    uint16_t           Checksum;                               /* Message checksum */
81134 +    uint8_t            Rail;                                   /* Rail message associated with */
81135 +    uint8_t            Type;                                   /* Message type */
81136 +
81137 +    uint32_t           Pad;                                    /* pad to 32 bytes */
81138 +
81139 +    uint32_t           Version;                                /* Message Version */
81140 +} EP_MANAGER_MSG_HDR;
81141 +
81142 +typedef union ep_manager_msg_body
81143 +{
81144 +    unsigned char       Space[EP_MANAGER_MSG_SIZE - sizeof (EP_MANAGER_MSG_HDR)];
81145 +
81146 +    EP_NETERR_COOKIE   Cookies[2];                             /* EP_MSG_TYPE_NETERR */
81147 +    EP_MAP_NMD_BODY    MapNmd;                                 /* EP_MSG_TYPE_MAP_NMD */
81148 +    EP_FAILOVER_BODY   Failover;                               /* EP_MSG_TYPE_FAILOVER_REQUEST */
81149 +    EP_FAILOVER_TXD    FailoverTxd;                            /* EP_MSG_TYPE_FAILOVER_RESPONSE */
81150 +    unsigned char       PanicReason[EP_PANIC_STRLEN+1];                /* EP_MSG_TYPE_REMOTE_PANIC */
81151 +    EP_NODE_STATE       NodeState;                              /* EP_MSG_TYPE_GET_NODE_STATE_RESPONSE */   
81152 +    EP_SERVICE          Service;                                /* EP_MSG_TYPE_GET_NODE_STATE */
81153 +} EP_MANAGER_MSG_BODY;
81154 +
81155 +typedef struct ep_manager_msg
81156 +{
81157 +    EP_MANAGER_MSG_BODY Body;
81158 +    EP_MANAGER_MSG_HDR  Hdr;
81159 +} EP_MANAGER_MSG;
81160 +
81161 +#define EP_MANAGER_MSG_VERSION                         0xcad01000
81162 +#define EP_MANAGER_MSG_TYPE_REMOTE_PANIC               0x00
81163 +#define EP_MANAGER_MSG_TYPE_NETERR_REQUEST             0x01
81164 +#define EP_MANAGER_MSG_TYPE_NETERR_RESPONSE            0x02
81165 +#define EP_MANAGER_MSG_TYPE_FLUSH_REQUEST              0x03
81166 +#define EP_MANAGER_MSG_TYPE_FLUSH_RESPONSE             0x04
81167 +#define EP_MANAGER_MSG_TYPE_MAP_NMD_REQUEST            0x05
81168 +#define EP_MANAGER_MSG_TYPE_MAP_NMD_RESPONSE           0x06
81169 +#define EP_MANAGER_MSG_TYPE_FAILOVER_REQUEST           0x07
81170 +#define EP_MANAGER_MSG_TYPE_FAILOVER_RESPONSE          0x08
81171 +#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE              0x09
81172 +#define EP_MANAGER_MSG_TYPE_GET_NODE_STATE_RESPONSE     0x0a
81173 +
81174 +/* Message types which should only be sent when a rail is connected */
81175 +#define EP_MANAGER_MSG_TYPE_CONNECTED(type)            (((type) & 1) == 1)
81176 +
81177 +#define EP_MANAGER_OUTPUTQ_SLOTS       128                     /* # entries in outputq */
81178 +#define EP_MANAGER_INPUTQ_SLOTS                128                     /* # entries in inputq */
81179 +#define EP_MANAGER_OUTPUTQ_RETRIES     31                      /* # retries for manager messages */
81180 +
81181 +/* XID's are allocated from a cache, which doesn't
81182 + * require locking since it relies on the caller to
81183 + * manage the locking for us.
81184 + */
81185 +typedef struct ep_xid_cache
81186 +{
81187 +    struct list_head   Link;
81188 +
81189 +    uint32_t           Handle;                                 /* my XID cache handle */
81190 +    uint64_t           Current;                                /* range of XID.Unique we can allocate from */
81191 +    uint64_t           Last;
81192 +
81193 +    void             (*MessageHandler)(void *arg, EP_MANAGER_MSG *);
81194 +    void              *Arg;
81195 +} EP_XID_CACHE;
81196 +
81197 +#define EP_XID_CACHE_CHUNKS    (10000)
81198 +
81199 +typedef struct ep_node_rail
81200 +{
81201 +    struct list_head    Link;                                  /* can be linked on work lists */
81202 +
81203 +    unsigned char       State;                                 /* node connection state */
81204 +    unsigned char      NetworkErrorState;                      /* reasons for keeping the context filter up */
81205 +    unsigned char      MessageState;                           /* state of messages during passivate/relocate */
81206 +
81207 +    EP_XID             MsgXid;                                 /* neterr/flush transaction id */
81208 +    long               NextRunTime;                            /* time to drop context filter for destroyed dma packet, or to send next request */
81209 +    EP_NETERR_COOKIE   NetworkErrorCookies[2];                 /* identify cookie for destroyed atomic packet */
81210 +
81211 +    uint32_t           Cookie;                                 /* per-node network error cookie */
81212 +    spinlock_t         CookieLock;                             /* and spinlock for it. */
81213 +
81214 +    struct list_head    StalledDmas;                           /* list of stalled DMAs */
81215 +} EP_NODE_RAIL;
81216 +
81217 +#define EP_NODE_DISCONNECTED           0                       /* node is disconnected */
81218 +#define EP_NODE_CONNECTING             1                       /* awaiting connection */
81219 +#define EP_NODE_CONNECTED              2                       /* node is connected */
81220 +#define EP_NODE_LEAVING_CONNECTED      3                       /* node is starting to disconnect */
81221 +#define EP_NODE_LOCAL_PASSIVATE        4                       /* flushing context filter/run queues */
81222 +#define EP_NODE_REMOTE_PASSIVATE       5                       /* stalling for neterr flush */
81223 +#define EP_NODE_PASSIVATED             6                       /* relocating active/passive messages */
81224 +#define EP_NODE_DISCONNECTING          7                       /* entering disconncted - abort remaining comms */
81225 +#define EP_NODE_NUM_STATES             8
81226 +
81227 +#define EP_NODE_NETERR_ATOMIC_PACKET   (1 << 0)
81228 +#define EP_NODE_NETERR_DMA_PACKET      (1 << 1)
81229 +
81230 +#define EP_NODE_PASSIVE_MESSAGES       (1 << 0)
81231 +#define EP_NODE_ACTIVE_MESSAGES                (1 << 1)
81232 +
81233 +/*
81234 + * Kernel thread code is loaded as a table.
81235 + */
81236 +typedef struct ep_symbol
81237 +{
81238 +    char   *name;
81239 +    EP_ADDR value;
81240 +} EP_SYMBOL;
81241 +
81242 +typedef struct ep_code
81243 +{
81244 +    u_char        *text;
81245 +    u_int         text_size;
81246 +    u_char        *data;
81247 +    u_int         data_size;
81248 +    u_char        *rodata;
81249 +    u_int         rodata_size;
81250 +    EP_SYMBOL     *symbols;
81251 +    
81252 +    int                   ntext;
81253 +    sdramaddr_t    pptext;
81254 +    EP_ADDR       etext;
81255 +    sdramaddr_t   _stext;
81256 +    sdramaddr_t          _rodata;
81257 +
81258 +    int                   ndata;
81259 +    sdramaddr_t    ppdata;
81260 +    EP_ADDR       edata;
81261 +    sdramaddr_t   _sdata;
81262 +} EP_CODE;
81263 +
81264 +typedef struct ep_switchstate
81265 +{
81266 +    unsigned char       linkid;
81267 +    unsigned char       LNR;
81268 +    unsigned char       bcast;
81269 +    unsigned char       uplink;
81270 +} EP_SWITCHSTATE;
81271 +
81272 +typedef struct ep_rail_ops
81273 +{
81274 +    void       (*DestroyRail) (EP_RAIL *rail);
81275 +
81276 +    int        (*StartRail) (EP_RAIL *rail);
81277 +    void       (*StallRail) (EP_RAIL *rail);
81278 +    void       (*StopRail) (EP_RAIL *rail);
81279 +
81280 +    sdramaddr_t (*SdramAlloc) (EP_RAIL *rail, EP_ADDR addr, unsigned size);
81281 +    void        (*SdramFree) (EP_RAIL *rail, sdramaddr_t addr, unsigned size);
81282 +    void        (*SdramWriteb) (EP_RAIL *rail, sdramaddr_t addr, unsigned char val);
81283 +    
81284 +    void       (*KaddrMap) (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t kaddr, unsigned len, unsigned int perm, int ep_attr);
81285 +    void       (*SdramMap) (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned len, unsigned int perm, int ep_attr);
81286 +    void       (*Unmap) (EP_RAIL *rail, EP_ADDR eaddr, unsigned len);
81287 +
81288 +    void       *(*DvmaReserve) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages);
81289 +    void       (*DvmaRelease) (EP_RAIL *rail, EP_ADDR eaddr, unsigned npages, void *private);
81290 +    void       (*DvmaSetPte) (EP_RAIL *rail, void *private, unsigned index, physaddr_t phys, unsigned int perm);
81291 +    physaddr_t (*DvmaReadPte) (EP_RAIL *rail, void *private, unsigned index);
81292 +    void       (*DvmaUnload)(EP_RAIL *rail, void *private, unsigned index, unsigned npages);
81293 +    void       (*FlushTlb) (EP_RAIL *rail);
81294 +
81295 +    int        (*ProbeRoute) (EP_RAIL *r, int level, int sw, int nodeid, int *linkup, 
81296 +                              int *linkdown, int attempts, EP_SWITCH *lsw);
81297 +    void       (*PositionFound) (EP_RAIL *rail, ELAN_POSITION *pos);
81298 +    int                (*CheckPosition) (EP_RAIL *rail);
81299 +    void       (*NeterrFixup) (EP_RAIL *rail, unsigned int nodeId, EP_NETERR_COOKIE *cookies);
81300 +
81301 +    void       (*LoadSystemRoute) (EP_RAIL *rail, unsigned int vp, unsigned int lowNode, unsigned int highNode);
81302 +
81303 +    void       (*LoadNodeRoute) (EP_RAIL *rail, unsigned nodeId);
81304 +    void       (*UnloadNodeRoute) (EP_RAIL *rail, unsigned nodeId);
81305 +    void       (*LowerFilter) (EP_RAIL *rail, unsigned nodeId);
81306 +    void       (*RaiseFilter) (EP_RAIL *rail, unsigned nodeId);
81307 +    void       (*NodeDisconnected) (EP_RAIL *rail, unsigned nodeId);
81308 +
81309 +    void       (*FlushFilters) (EP_RAIL *rail);
81310 +    void       (*FlushQueues) (EP_RAIL *rail);
81311 +
81312 +
81313 +    EP_INPUTQ  *(*AllocInputQ) (EP_RAIL *rail, unsigned qnum, unsigned slotSize, unsigned slotCount,
81314 +                               void (*callback)(EP_RAIL *rail, void *arg), void *arg);
81315 +    void       (*FreeInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
81316 +    void       (*EnableInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
81317 +    void       (*DisableInputQ) (EP_RAIL *rail, EP_INPUTQ *q);
81318 +    int                (*PollInputQ) (EP_RAIL *rail, EP_INPUTQ *q, int maxCount, EP_INPUTQ_HANDLER *handler, void *arg);
81319 +
81320 +    EP_OUTPUTQ *(*AllocOutputQ) (EP_RAIL *rail, unsigned slotSize, unsigned slotCount);
81321 +    void       (*FreeOutputQ) (EP_RAIL *rail, EP_OUTPUTQ *outputq);
81322 +    void       *(*OutputQMsg) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum);
81323 +    int         (*OutputQState) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum);
81324 +    int                (*OutputQSend) (EP_RAIL *rail, EP_OUTPUTQ *outputq, unsigned slotNum, unsigned size,
81325 +                               unsigned vp, unsigned qnum, unsigned retries);
81326 +
81327 +    void        (*FillOutStats) (EP_RAIL *rail, char *str);
81328 +    void       (*Debug) (EP_RAIL *rail);
81329 +
81330 +} EP_RAIL_OPS;
81331 +
81332 +#define ep_alloc_inputq(rail,qnum,slotSize,slotCount,callback,arg) \
81333 +       (rail)->Operations.AllocInputQ(rail,qnum,slotSize,slotCount,callback,arg)
81334 +#define ep_free_inputq(rail,inputq) \
81335 +       (rail)->Operations.FreeInputQ(rail,inputq)
81336 +#define ep_enable_inputq(rail,inputq) \
81337 +       (rail)->Operations.EnableInputQ(rail,inputq)
81338 +#define ep_disable_inputq(rail,inputq) \
81339 +       (rail)->Operations.DisableInputQ(rail,inputq)
81340 +#define ep_poll_inputq(rail,inputq,maxCount,handler,arg) \
81341 +       (rail)->Operations.PollInputQ(rail,inputq,maxCount,handler,arg)
81342 +#define ep_alloc_outputq(rail,slotSize,slotCount)\
81343 +       (rail)->Operations.AllocOutputQ(rail,slotSize,slotCount)
81344 +#define ep_free_outputq(rail,outputq)\
81345 +       (rail)->Operations.FreeOutputQ(rail,outputq)
81346 +#define ep_outputq_msg(rail,outputq,slotNum)\
81347 +       (rail)->Operations.OutputQMsg(rail,outputq,slotNum)
81348 +#define ep_outputq_state(rail,outputq,slotNum)\
81349 +       (rail)->Operations.OutputQState(rail,outputq,slotNum)
81350 +#define ep_outputq_send(rail,outputq,slotNum,size,vp,qnum,retries)\
81351 +       (rail)->Operations.OutputQSend(rail,outputq,slotNum,size,vp,qnum,retries)
81352 +
81353 +struct ep_rail
81354 +{
81355 +    EP_SYS            *System;                                 /* "system" we've attached to */
81356 +
81357 +    unsigned char      Number;                                 /* Rail number */
81358 +    unsigned char       State;                                 /* Rail state */
81359 +    char               Name[32];                               /* Rail name */
81360 +
81361 +    struct list_head    ManagerLink;                           /* linked on ManagedRails list */
81362 +
81363 +    ELAN_DEVINFO       Devinfo;                                /* Device information for this rail */
81364 +    ELAN_POSITION       Position;                              /* Position on switch device is connected to */
81365 +
81366 +    EP_RAIL_OPS                Operations;                             /* device specific operations */
81367 +    EP_RAIL_STATS      Stats;                                  /* statistics */
81368 +
81369 +    EP_ALLOC            ElanAllocator;                         /* per-rail elan memory allocator */
81370 +    EP_ALLOC            MainAllocator;                         /* per-rail main memory allocator */
81371 +
81372 +    unsigned           TlbFlushRequired;                       /* lazy TLB flushing */
81373 +
81374 +    int                SwitchBroadcastLevel;                   /* current switch level ok for broadcast */
81375 +    unsigned long       SwitchBroadcastLevelTick;
81376 +
81377 +    int                        SwitchProbeLevel;                       /* result of last switch probe */
81378 +    EP_SWITCHSTATE      SwitchState[ELAN_MAX_LEVELS];
81379 +    EP_SWITCHSTATE      SwitchLast[ELAN_MAX_LEVELS];
81380 +    unsigned long       SwitchProbeTick[ELAN_MAX_LEVELS];
81381 +    
81382 +    /* Node disconnecting/connecting state */
81383 +    EP_CALLBACK        *CallbackList[EP_CB_COUNT];             /* List of callbacks */
81384 +    kmutex_t           CallbackLock;                           /*   and lock for it. */
81385 +    unsigned           CallbackStep;                           /*  step through UpdateConnectionState. */
81386 +
81387 +    /* back pointer for cluster membership */
81388 +    void              *ClusterRail;
81389 +
81390 +    /* Per node state for message passing */
81391 +    EP_NODE_RAIL       *Nodes;                                 /* array of per-node state */
81392 +    statemap_t         *NodeSet;                               /* per-rail statemap of connected nodes */
81393 +    statemap_t        *NodeChangeMap;                          /* statemap of nodes to being connected/disconnected */
81394 +    statemap_t        *NodeChangeTmp;                          /*   and temporary copies */
81395 +
81396 +    struct list_head    NetworkErrorList;                      /* list of nodes resolving network errors */
81397 +    struct list_head    LocalPassivateList;                    /* list of nodes in state LOCAL_PASSIVATE */
81398 +    struct list_head    RemotePassivateList;                   /* list of nodes waiting for remote network error flush */
81399 +    struct list_head    PassivatedList;                                /* list of nodes performing message relocation */
81400 +    struct list_head    DisconnectingList;                     /* list of nodes transitioning to disconnected */
81401 +
81402 +    EP_XID_CACHE       XidCache;                               /* XID cache for node messages (single threaded access) */
81403 +
81404 +    /* Manager messages */
81405 +    EP_INPUTQ         *ManagerInputQ;
81406 +    EP_OUTPUTQ        *ManagerOutputQ;
81407 +    unsigned           ManagerOutputQNextSlot;
81408 +    spinlock_t         ManagerOutputQLock;
81409 +
81410 +    /* /proc entries */
81411 +    struct proc_dir_entry *ProcDir;
81412 +    struct proc_dir_entry *SvcIndicatorDir;
81413 +    int                    CallbackRegistered;
81414 +};
81415 +
81416 +/* values for State */
81417 +#define EP_RAIL_STATE_UNINITIALISED    0                       /* device uninitialised */
81418 +#define EP_RAIL_STATE_STARTED          1                       /* device started but network position unknown */
81419 +#define EP_RAIL_STATE_RUNNING          2                       /* device started and position known */
81420 +#define EP_RAIL_STATE_INCOMPATIBLE     3                       /* device started, but position incompatible */
81421 +
81422 +typedef struct ep_rail_entry
81423 +{
81424 +    struct list_head   Link;
81425 +    EP_RAIL           *Rail;
81426 +} EP_RAIL_ENTRY;
81427 +
81428 +typedef struct ep_subsys
81429 +{
81430 +    EP_SYS            *Sys;
81431 +
81432 +    struct list_head   Link;                                   /* Linked on sys->Subsystems */
81433 +    char              *Name;                                   /* Name to lookup */
81434 +    
81435 +    void              (*Destroy)    (struct ep_subsys *subsys, EP_SYS *sys);
81436 +
81437 +    int                       (*AddRail)    (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail);
81438 +    void              (*RemoveRail) (struct ep_subsys *subsys, EP_SYS *sys, EP_RAIL *rail);
81439 +} EP_SUBSYS;
81440 +
81441 +typedef struct ep_node
81442 +{
81443 +    EP_RAILMASK                ConnectedRails;
81444 +} EP_NODE;
81445 +
81446 +struct ep_sys
81447 +{
81448 +    EP_RAIL         *Rails[EP_MAX_RAILS];                      /* array of all available devices */
81449 +
81450 +    kmutex_t        StartStopLock;                             /* lock for starting stopping rails */
81451 +
81452 +    ELAN_POSITION    Position;                                 /* primary node position */
81453 +
81454 +    EP_NMH_TABLE     MappingTable;                             /* Network mapping handle table */
81455 +
81456 +    EP_ALLOC        Allocator;                                 /* shared main memory allocator */
81457 +
81458 +    EP_DVMA_STATE    DvmaState;                                        /* dvma state */
81459 +
81460 +    kmutex_t        SubsysLock;                               /* lock on the Subsytems list */
81461 +    struct list_head Subsystems;                               /* list of subsystems */
81462 +
81463 +    /* device manager state */
81464 +    struct list_head ManagedRails;                             /* list of managed devices */
81465 +    EP_KTHREAD       ManagerThread;                            /* place for manager thread to sleep */
81466 +
81467 +    /* global node state */
81468 +    spinlock_t      NodeLock;                                  /* spinlock for node state (including per-device node state) */
81469 +    EP_NODE        *Nodes;                                     /* system wide node state */
81470 +    statemap_t      *NodeSet;                                  /* system wide nodeset */
81471 +    struct list_head NodesetCallbackList;                      /* list of "callbacks" */
81472 +
81473 +    /* Transaction Id */
81474 +    struct list_head XidCacheList;                             /* list of XID caches */
81475 +    uint32_t        XidGeneration;                             /* XID generation number (distinguishes reboots) */
81476 +    uint32_t        XidHandle;                                 /* XID handles (distinguishes XID caches) */
81477 +    uint64_t        XidNext;                                   /* next XID to prime cache */
81478 +    spinlock_t      XidLock;                                   /*   and it's spinlock  */
81479 +
81480 +    /* Shutdown/Panic */
81481 +    unsigned int     Shutdown;                                 /* node has shutdown/panic'd */
81482 +};
81483 +
81484 +#if defined(DEBUG_ASSERT)
81485 +extern int ep_assfail (EP_RAIL *rail, const char *string, const char *func, const char *file, const int line);
81486 +extern int sdram_assert;
81487 +extern int assfail_mode;
81488 +
81489 +#define EP_ASSERT(rail, EX)    do { \
81490 +    if (!(EX) && ep_assfail ((EP_RAIL *) (rail), #EX, __FUNCTION__, __FILE__, __LINE__)) { \
81491 +       BUG(); \
81492 +    } \
81493 +} while (0)
81494 +#define EP_ASSFAIL(rail,EX)    do { \
81495 +   if (ep_assfail ((EP_RAIL *) (rail), EX, __FUNCTION__, __FILE__, __LINE__)) { \
81496 +       BUG(); \
81497 +    } \
81498 +} while (0)
81499 +#define SDRAM_ASSERT(EX)       (sdram_assert ? (EX) : 1)
81500 +#else
81501 +#define EP_ASSERT(rail, EX)    ((void) 0)
81502 +#define EP_ASSFAIL(rail,str)   ((void) 0)
81503 +#define SDRAM_ASSERT(EX)       (1)
81504 +#endif
81505 +
81506 +/* conf_osdep.c */
81507 +extern EP_SYS    *ep_system(void);
81508 +extern void       ep_mod_dec_usecount (void);
81509 +extern void       ep_mod_inc_usecount (void);
81510 +
81511 +/* procfs_osdep.c */
81512 +extern struct proc_dir_entry *ep_procfs_root;
81513 +extern struct proc_dir_entry *ep_config_root;
81514 +
81515 +/* kcomm.c */
81516 +extern int        ep_sys_init (EP_SYS *sys);
81517 +extern void       ep_sys_fini (EP_SYS *sys);
81518 +extern void      ep_shutdown (EP_SYS *sys);
81519 +extern int        ep_init_rail (EP_SYS *sys, EP_RAIL *rail);
81520 +extern void       ep_destroy_rail (EP_RAIL *rail);
81521 +extern int        ep_start_rail (EP_RAIL *rail);
81522 +extern void       ep_stop_rail (EP_RAIL *rail);
81523 +
81524 +extern void       ep_connect_node (EP_RAIL *rail, int nodeId);
81525 +extern int        ep_disconnect_node (EP_RAIL *rail, int nodeId);
81526 +
81527 +extern EP_XID     ep_xid_cache_alloc (EP_SYS *sys, EP_XID_CACHE *cache);
81528 +extern void       ep_xid_cache_init (EP_SYS *sys, EP_XID_CACHE *cache);
81529 +extern void       ep_xid_cache_destroy (EP_SYS *sys, EP_XID_CACHE *cache);
81530 +
81531 +extern int        ep_send_message (EP_RAIL *rail, int nodeId, int type, EP_XID xid, EP_MANAGER_MSG_BODY *body);
81532 +
81533 +extern void       ep_panic_node (EP_SYS *sys, int nodeId, unsigned char *reason);
81534 +
81535 +extern void      ep_subsys_add (EP_SYS *sys, EP_SUBSYS *subsys);
81536 +extern void      ep_subsys_del (EP_SYS *sys, EP_SUBSYS *subsys);
81537 +extern EP_SUBSYS *ep_subsys_find (EP_SYS *sys, char *name);
81538 +
81539 +extern void       DisplayNodes (EP_RAIL *rail);
81540 +
81541 +extern void       ep_fillout_stats(EP_RAIL *rail, char *str);
81542 +
81543 +/* neterr.c */
81544 +extern void       ep_queue_network_error (EP_RAIL *rail, int nodeId, int what, int channel, EP_NETERR_COOKIE cookie);
81545 +
81546 +/* kcomm_elan3.c */
81547 +extern unsigned int ep3_create_rails (EP_SYS *sys, unsigned int disabled);
81548 +
81549 +/* kcomm_elan4.c */
81550 +extern unsigned int ep4_create_rails (EP_SYS *sys, unsigned int disabled);
81551 +
81552 +/* probenetwork.c */
81553 +extern int       ProbeNetwork (EP_RAIL *rail, ELAN_POSITION *pos);
81554 +extern void      CheckPosition (EP_RAIL *rail);
81555 +
81556 +extern uint16_t   CheckSum (char *msg, int nob);
81557 +
81558 +/* threadcode.c */
81559 +extern EP_ADDR    ep_symbol (EP_CODE *code, char *name);
81560 +extern int        ep_loadcode (EP_RAIL *rail, EP_CODE *code);
81561 +extern void       ep_unloadcode (EP_RAIL *rail, EP_CODE *code);
81562 +
81563 +/* Public interface */
81564 +/* debug.c */
81565 +extern int              ep_sprintf_bitmap (char *str, unsigned nbytes, bitmap_t *bitmap, int base, int count, int off);
81566 +extern void             ep_display_bitmap (char *prefix, char *tag, bitmap_t *bitmap, unsigned base, unsigned nbits);
81567 +
81568 +/* epcomms.c */
81569 +extern int              ep_waitfor_nodeid (EP_SYS *sys);
81570 +extern int              ep_nodeid (EP_SYS *sys);
81571 +extern int              ep_numnodes (EP_SYS *sys);
81572 +
81573 +/* railhints.c */
81574 +extern int              ep_pickRail(EP_RAILMASK railmask);
81575 +
81576 +/* support.c */
81577 +extern int              ep_register_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg);
81578 +extern void             ep_remove_nodeset_callback (EP_SYS *sys, void (*routine)(void *, statemap_t *), void *arg);
81579 +extern void             ep_call_nodeset_callbacks (EP_SYS *sys, statemap_t *map);
81580 +
81581 +extern int              ep_register_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg);
81582 +extern void             ep_remove_callback (EP_RAIL *rail, unsigned idx, void (*routine)(void *, statemap_t *), void *arg);
81583 +extern void             ep_call_callbacks (EP_RAIL *rail, unsigned idx, statemap_t *);
81584 +extern unsigned int     ep_backoff (EP_BACKOFF *backoff, int type);
81585 +
81586 +#endif /* !__ELAN__ */
81587 +
81588 +#endif /* __ELAN_KCOMM_H */
81589 +
81590 +/*
81591 + * Local variables:
81592 + * c-file-style: "stroustrup"
81593 + * End:
81594 + */
81595 diff -urN clean/include/elan/kcomm_stats.h linux-2.6.9/include/elan/kcomm_stats.h
81596 --- clean/include/elan/kcomm_stats.h    1969-12-31 19:00:00.000000000 -0500
81597 +++ linux-2.6.9/include/elan/kcomm_stats.h      2005-05-31 07:42:43.000000000 -0400
81598 @@ -0,0 +1,153 @@
81599 +/*
81600 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81601 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
81602 + *
81603 + *    For licensing information please see the supplied COPYING file
81604 + *
81605 + */
81606 +
81607 +#ifndef __EP_EPSTATS_H
81608 +#define __EP_EPSTATS_H
81609 +
81610 +#ident "$Id: kcomm_stats.h,v 1.5.2.1 2005/05/31 11:42:43 mike Exp $"
81611 +/*      $Source: /cvs/master/quadrics/epmod/kcomm_stats.h,v $ */
81612 +
81613 +#define EP_BUCKET_SLOTS                        8
81614 +
81615 +#define BucketStat(obj,stat,size)      ((size) < 128   ? (obj)->Stats.stat[0]++ : \
81616 +                                        (size) < 512   ? (obj)->Stats.stat[1]++ : \
81617 +                                        (size) < 1024  ? (obj)->Stats.stat[2]++ : \
81618 +                                        (size) < 8192  ? (obj)->Stats.stat[3]++ : \
81619 +                                        (size) < 16384 ? (obj)->Stats.stat[4]++ : \
81620 +                                        (size) < 32768 ? (obj)->Stats.stat[5]++ : \
81621 +                                        (size) < 65536 ? (obj)->Stats.stat[6]++ : \
81622 +                                        (obj)->Stats.stat[7]++)
81623 +#define IncrStat(obj,stat)             ((obj)->Stats.stat++)
81624 +
81625 +
81626 +#define EP3_NUM_DMA_FAIL               11      /* NOTE - the same as EP_NUM_RETRIES */
81627 +
81628 +#define ADD_STAT(STATS,STAT,VALUE) { unsigned long now = lbolt;\
81629 +   STATS.STAT.total += VALUE; \
81630 +   if  ( (long)( now - STATS.STAT.last_time) > HZ ){ \
81631 + STATS.STAT.last_per_sec = ( STATS.STAT.total - STATS.STAT.last_count)/ ( ( (long)( now - STATS.STAT.last_time) + (HZ/2)) / HZ);\
81632 + STATS.STAT.last_time = now; \
81633 + STATS.STAT.last_count = STATS.STAT.total; \
81634 +   }} \
81635 +
81636 +#define INC_STAT(STATS,STAT) ADD_STAT(STATS,STAT,1)
81637 +
81638 +#define GET_STAT_PER_SEC(STATS, STAT) (  (( lbolt - STATS.STAT.last_time ) < (HZ * 5)) ? STATS.STAT.last_per_sec : 0 )
81639 +#define GET_STAT_TOTAL(STATS, STAT) ( STATS.STAT.total )
81640 +
81641 +struct ep_stats_count 
81642 +{
81643 +    unsigned long total;
81644 +    unsigned long last_time;
81645 +    unsigned long last_count;
81646 +    unsigned long last_per_sec;
81647 +};
81648 +
81649 +typedef struct ep_stats_count          EP_STATS_COUNT;
81650 +
81651 +typedef struct ep3_rail_stats
81652 +{
81653 +    unsigned long      IssueDmaFail[EP3_NUM_DMA_FAIL];
81654 +
81655 +    unsigned long      DmaQueueLength[EP_BUCKET_SLOTS];
81656 +    unsigned long      CprocDmaQueueOverflow;
81657 +    unsigned long      DprocDmaQueueOverflow;
81658 +    unsigned long      IprocDmaQueueOverflow;
81659 +    unsigned long      CprocEventQueueOverflow;
81660 +    unsigned long      DprocEventQueueOverflow;
81661 +    unsigned long      IprocEventQueueOverflow;
81662 +
81663 +    unsigned long      QueueingPacketTrap;
81664 +    unsigned long      DmaIdentifyTrap;
81665 +    unsigned long      ThreadIdentifyTrap;
81666 +    unsigned long      DmaPacketTrap;
81667 +} EP3_RAIL_STATS;
81668 +
81669 +typedef struct ep4_rail_stats
81670 +{
81671 +    unsigned long       somestatsgohere;
81672 +} EP4_RAIL_STATS;
81673 +
81674 +typedef struct ep_rail_stats
81675 +{
81676 +    unsigned long      SendMessageFailed;
81677 +    unsigned long      NeterrAtomicPacket;
81678 +    unsigned long       NeterrDmaPacket;
81679 +
81680 +    EP_STATS_COUNT      rx;
81681 +    EP_STATS_COUNT      rx_len;
81682 +
81683 +    EP_STATS_COUNT      tx;
81684 +    EP_STATS_COUNT      tx_len;
81685 +
81686 +} EP_RAIL_STATS;
81687 +
81688 +typedef struct ep_cm_rail_stats
81689 +{
81690 +    /* cluster membership statistics */
81691 +    unsigned long      HeartbeatsSent;
81692 +    unsigned long      HeartbeatsRcvd;
81693 +    
81694 +    unsigned long      RetryHeartbeat;
81695 +    unsigned long      RejoinRequest;
81696 +    unsigned long      RejoinTooSlow;
81697 +    unsigned long      LaunchMessageFail;
81698 +    unsigned long      MapChangesSent;
81699 +
81700 +    /* Heartbeat scheduling stats */
81701 +    unsigned long      HeartbeatOverdue;
81702 +} EP_CM_RAIL_STATS;
81703 +
81704 +typedef struct ep_comms_rail_stats
81705 +{
81706 +    /* kernel comms large message statistics */
81707 +    unsigned long      TxEnveEvent;
81708 +    unsigned long      TxDataEvent;
81709 +    unsigned long      TxDoneEvent;
81710 +    unsigned long      RxDoneEvent;
81711 +    unsigned long      MulticastTxDone;
81712 +    unsigned long      QueueReceive;
81713 +
81714 +    unsigned long      TxEnveRetry;
81715 +    unsigned long      TxDataRetry;
81716 +    unsigned long      TxDoneRetry;
81717 +    unsigned long      RxThrdEvent;
81718 +    unsigned long      RxDataRetry;
81719 +    unsigned long      RxDoneRetry;
81720 +    unsigned long      StallThread;
81721 +    unsigned long      ThrdWaiting;
81722 +    unsigned long      CompleteEnvelope;
81723 +
81724 +    unsigned long      NoFreeTxds;
81725 +    unsigned long      NoFreeRxds;
81726 +
81727 +    unsigned long      LockRcvrTrapped;
81728 +} EP_COMMS_RAIL_STATS;
81729 +
81730 +typedef struct ep_comms_stats
81731 +{
81732 +    unsigned long      DataXmit[8];
81733 +    unsigned long      McastXmit[8];
81734 +    unsigned long      RPCXmit[8];
81735 +    unsigned long      RPCPut[8];
81736 +    unsigned long      RPCGet[8];
81737 +    unsigned long      CompleteRPC[8];
81738 +    unsigned long      RxData[8];
81739 +    unsigned long      RxMcast[8];
81740 +
81741 +    unsigned long      NoFreeTxds;
81742 +    unsigned long      NoFreeRxds;
81743 +} EP_COMMS_STATS;
81744 +
81745 +#endif /* __EP_EPSTATS_H */
81746 +
81747 +/*
81748 + * Local variables:
81749 + * c-file-style: "stroustrup"
81750 + * End:
81751 + */
81752 diff -urN clean/include/elan/kmap.h linux-2.6.9/include/elan/kmap.h
81753 --- clean/include/elan/kmap.h   1969-12-31 19:00:00.000000000 -0500
81754 +++ linux-2.6.9/include/elan/kmap.h     2004-12-14 05:19:23.000000000 -0500
81755 @@ -0,0 +1,68 @@
81756 +/*
81757 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81758 + *
81759 + *    For licensing information please see the supplied COPYING file
81760 + *
81761 + */
81762 +
81763 +#ifndef __ELAN_KMAP_H
81764 +#define __ELAN_KMAP_H
81765 +
81766 +#ident "$Id: kmap.h,v 1.4 2004/12/14 10:19:23 mike Exp $"
81767 +/*      $Source: /cvs/master/quadrics/epmod/kmap.h,v $ */
81768 +
81769 +#include <elan/rmap.h>
81770 +
81771 +extern void ep_perrail_kaddr_map (EP_RAIL *rail, EP_ADDR eaddr, virtaddr_t vaddr, unsigned long len, unsigned int perm, int ep_attr);
81772 +extern void ep_perrail_sdram_map (EP_RAIL *rail, EP_ADDR eaddr, sdramaddr_t saddr, unsigned long len, unsigned int perm, int ep_attr);
81773 +extern void ep_perrail_unmap (EP_RAIL *rail, EP_ADDR eaddr, unsigned long len);
81774 +extern void ep_perrail_dvma_sync (EP_RAIL *rail);
81775 +
81776 +typedef struct ep_dvma_nmh
81777 +{
81778 +    EP_NMH             dvma_nmh;
81779 +    
81780 +    struct list_head   dvma_link;                              /* chained on ep_dvma_state */
81781 +    unsigned           dvma_perm;                              /* permissions for region */
81782 +
81783 +    spinlock_t         dvma_lock;
81784 +    EP_RAILMASK                dvma_railmask;                          /* bitmap of rails */
81785 +    EP_RAIL           *dvma_rails[EP_MAX_RAILS];               /* assoicated rails */
81786 +    void              *dvma_private[EP_MAX_RAILS];             /* pointers to rail private data */
81787 +    unsigned int        dvma_attrs[1];                         /* bitmap of which rails pages are loaded NOTE - max 32 rails */
81788 +} EP_DVMA_NMH;
81789 +
81790 +/* values for dvma_perm */
81791 +#define EP_PERM_EXECUTE                0
81792 +#define EP_PERM_READ           1
81793 +#define EP_PERM_WRITE          2
81794 +#define EP_PERM_ALL            3
81795 +
81796 +typedef struct ep_dvma_state
81797 +{
81798 +    kmutex_t           dvma_lock;
81799 +    struct list_head    dvma_handles;
81800 +    struct list_head    dvma_rails;
81801 +    EP_RMAP           *dvma_rmap;
81802 +} EP_DVMA_STATE;
81803 +
81804 +extern void    ep_dvma_init (EP_SYS *sys);
81805 +extern void    ep_dvma_fini (EP_SYS *sys);
81806 +extern EP_NMH *ep_dvma_reserve (EP_SYS *sys, unsigned npages, unsigned perm);
81807 +extern void    ep_dvma_release (EP_SYS *sys, EP_NMH *nmh);
81808 +extern void    ep_dvma_load (EP_SYS *sys, void *map, caddr_t vaddr, unsigned len, 
81809 +                            EP_NMH *nmh, unsigned index, EP_RAILMASK *hints, EP_NMD *subset);
81810 +extern void    ep_dvma_unload (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd);
81811 +  
81812 +extern void    ep_dvma_remove_rail (EP_SYS *sys, EP_RAIL *rail);
81813 +extern int     ep_dvma_add_rail (EP_SYS *sys, EP_RAIL *rail);
81814 +
81815 +extern uint16_t rolling_check_sum (char *msg, int nob, uint16_t sum);
81816 +
81817 +#endif /* __ELAN_KMAP_H */
81818 +
81819 +/*
81820 + * Local variables:
81821 + * c-file-style: "stroustrup"
81822 + * End:
81823 + */
81824 diff -urN clean/include/elan/kmsg.h linux-2.6.9/include/elan/kmsg.h
81825 --- clean/include/elan/kmsg.h   1969-12-31 19:00:00.000000000 -0500
81826 +++ linux-2.6.9/include/elan/kmsg.h     2003-09-23 09:55:12.000000000 -0400
81827 @@ -0,0 +1,14 @@
81828 +/*
81829 + *    Copyright (c) 2003 by Quadrics Ltd.
81830 + *
81831 + *    For licensing information please see the supplied COPYING file
81832 + *
81833 + */
81834 +
81835 +#ifndef __ELAN_KMSG_H
81836 +#define __ELAN_KMSG_H
81837 +
81838 +#ident "@(#)$Id: kmsg.h,v 1.1 2003/09/23 13:55:12 david Exp $"
81839 +/*      $Source: /cvs/master/quadrics/epmod/kmsg.h,v $ */
81840 +
81841 +#endif /* __ELAN_KMSG_H */
81842 diff -urN clean/include/elan/kthread.h linux-2.6.9/include/elan/kthread.h
81843 --- clean/include/elan/kthread.h        1969-12-31 19:00:00.000000000 -0500
81844 +++ linux-2.6.9/include/elan/kthread.h  2004-05-06 10:24:08.000000000 -0400
81845 @@ -0,0 +1,53 @@
81846 +/*
81847 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81848 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
81849 + *
81850 + *    For licensing information please see the supplied COPYING file
81851 + *
81852 + */
81853 +
81854 +#ifndef __ELAN3_KTHREAD_H
81855 +#define __ELAN3_KTHREAD_H
81856 +
81857 +#ident "@(#)$Id: kthread.h,v 1.4 2004/05/06 14:24:08 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
81858 +/*      $Source: /cvs/master/quadrics/epmod/kthread.h,v $*/
81859 +
81860 +typedef struct ep_kthread
81861 +{
81862 +       kcondvar_t      wait;                                   /* place to sleep */
81863 +       spinlock_t      lock;                                   /* and lock */
81864 +       long            next_run;                               /* tick when thread should next run */
81865 +       long            running;                                /* tick when thread started to run */
81866 +       unsigned short  should_stall;
81867 +       unsigned char   state;
81868 +       unsigned int    started:1;
81869 +       unsigned int    should_stop:1;
81870 +       unsigned int    stopped:1;
81871 +} EP_KTHREAD;
81872 +
81873 +#define KT_STATE_SLEEPING              0
81874 +#define KT_STATE_SCHEDULED             1
81875 +#define KT_STATE_RUNNING               2
81876 +#define KT_STATE_STALLED               3
81877 +
81878 +#define AFTER(a, b)                    ((((long)(a)) - ((long)(b))) > 0)
81879 +#define BEFORE(a,b)                    ((((long)(a)) - ((long)(b))) < 0)
81880 +
81881 +extern void ep_kthread_init (EP_KTHREAD *kt);
81882 +extern void ep_kthread_destroy (EP_KTHREAD *kt);
81883 +extern void ep_kthread_started (EP_KTHREAD *kt);
81884 +extern void ep_kthread_stopped (EP_KTHREAD *kt);
81885 +extern int  ep_kthread_should_stall (EP_KTHREAD *kth);
81886 +extern int  ep_kthread_sleep (EP_KTHREAD *kth, long next_run);
81887 +extern void ep_kthread_schedule (EP_KTHREAD *kt, long when);
81888 +extern void ep_kthread_stall (EP_KTHREAD *kth);
81889 +extern void ep_kthread_resume (EP_KTHREAD *kt);
81890 +extern void ep_kthread_stop (EP_KTHREAD *kt);
81891 +extern int  ep_kthread_state (EP_KTHREAD *kt, long *time);
81892 +#endif /* __ELAN3_KTHREAD_H */
81893 +
81894 +/*
81895 + * Local variables:
81896 + * c-file-style: "linux"
81897 + * End:
81898 + */
81899 diff -urN clean/include/elan/nmh.h linux-2.6.9/include/elan/nmh.h
81900 --- clean/include/elan/nmh.h    1969-12-31 19:00:00.000000000 -0500
81901 +++ linux-2.6.9/include/elan/nmh.h      2004-01-06 05:29:55.000000000 -0500
81902 @@ -0,0 +1,95 @@
81903 +/*
81904 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
81905 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
81906 + *
81907 + *    For licensing information please see the supplied COPYING file
81908 + *
81909 + */
81910 +
81911 +#ifndef __ELAN3_NMH_H
81912 +#define __ELAN3_NMH_H
81913 +
81914 +#ident "@(#)$Id: nmh.h,v 1.7 2004/01/06 10:29:55 david Exp $"
81915 +/*      $Source: /cvs/master/quadrics/epmod/nmh.h,v $*/
81916 +
81917 +
81918 +/* Forward declarations */
81919 +typedef struct ep_nmd          EP_NMD;
81920 +typedef struct ep_nmh_ops      EP_NMH_OPS;
81921 +typedef struct ep_nmh          EP_NMH;
81922 +
81923 +/* Railmask held in 16 bit field (packs with nodeId into NMD */
81924 +typedef uint16_t               EP_RAILMASK;
81925 +
81926 +#define EP_RAIL2RAILMASK(rnum) (1 << (rnum))
81927 +#define EP_RAILMASK_ALL                0xffff
81928 +
81929 +/* kernel comms elan network address */
81930 +typedef uint32_t               EP_ADDR;
81931 +
81932 +/* network mapping descriptor - this is returned to the user from a map operation,
81933 + * and is what is passed to all communication functions */
81934 +struct ep_nmd
81935 +{
81936 +    EP_ADDR    nmd_addr;                                       /* base address */
81937 +    uint32_t   nmd_len;                                        /* size in bytes */
81938 +    uint32_t   nmd_attr;                                       /* nodeid << 16 | railmask */
81939 +};
81940 +
81941 +#define EP_NMD_ATTR(nodeid,railmask)   (((nodeid) << 16) | (railmask))
81942 +#define EP_NMD_NODEID(nmd)             ((nmd)->nmd_attr >> 16)
81943 +#define EP_NMD_RAILMASK(nmd)           ((nmd)->nmd_attr & EP_RAILMASK_ALL)
81944 +
81945 +#if !defined(__ELAN__)
81946 +
81947 +struct ep_nmh_ops
81948 +{
81949 +    int           (*op_map_rails) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, EP_RAILMASK mask);   /* add mappings to different rail(s) */
81950 +
81951 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
81952 +    uint16_t (*op_calc_check_sum) (EP_SYS *sys, EP_NMH *nmh, EP_NMD *nmd, uint16_t check_sum); /* calculates check sum              */
81953 +#endif
81954 +};
81955 +
81956 +struct ep_nmh
81957 +{
81958 +    EP_NMD          nmh_nmd;                                   /* public field */
81959 +    struct list_head nmh_link;                                 /* linked on hash table */
81960 +    EP_NMH_OPS     *nmh_ops;                                   /* operations to perform on object */
81961 +};
81962 +
81963 +#define EP_NMH_NUMHASH                 (32 - 11 + 1)           /* one hash table for each power of 2 above pagesize */
81964 +#define EP_NMH_HASHSIZE                        (64)                    /* max size of each hash table */
81965 +
81966 +typedef struct ep_nmh_table
81967 +{
81968 +    struct list_head *tbl_hash[EP_NMH_NUMHASH];
81969 +    unsigned         tbl_size[EP_NMH_NUMHASH];
81970 +} EP_NMH_TABLE;
81971 +
81972 +extern int         ep_nmh_init (EP_NMH_TABLE *tbl);
81973 +extern void        ep_nmh_fini (EP_NMH_TABLE *tbl);
81974 +
81975 +extern void        ep_nmh_insert (EP_NMH_TABLE *tbl, EP_NMH *nmd);
81976 +extern void        ep_nmh_remove (EP_NMH_TABLE *tbl, EP_NMH *nmd);
81977 +extern EP_NMH     *ep_nmh_find (EP_NMH_TABLE *tbl, EP_NMD *nmh);
81978 +
81979 +#if ! defined(CONFIG_EP_NO_CHECK_SUM)
81980 +extern uint32_t    ep_nmd_calc_data_check_sum(EP_SYS *sys, EP_NMD *nmd, int nFrags);
81981 +#endif
81982 +
81983 +/* Public interface */
81984 +extern EP_RAILMASK ep_nmd2railmask (EP_NMD *frags, int nFrags);
81985 +extern void        ep_nmd_subset (EP_NMD *subset, EP_NMD *nmd, unsigned off, unsigned len);
81986 +extern int        ep_nmd_merge (EP_NMD *merged, EP_NMD *a, EP_NMD *b);
81987 +extern int         ep_nmd_map_rails (EP_SYS *sys, EP_NMD *nmd, unsigned railmask);
81988 +
81989 +#endif /* __ELAN__ */
81990 +
81991 +#endif /* __ELAN3_NMH_H */
81992 +
81993 +/*
81994 + * Local variables:
81995 + * c-file-style: "stroustrup"
81996 + * End:
81997 + */
81998 diff -urN clean/include/elan/rmap.h linux-2.6.9/include/elan/rmap.h
81999 --- clean/include/elan/rmap.h   1969-12-31 19:00:00.000000000 -0500
82000 +++ linux-2.6.9/include/elan/rmap.h     2004-05-19 06:24:40.000000000 -0400
82001 @@ -0,0 +1,49 @@
82002 +/*
82003 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82004 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
82005 + *
82006 + *    For licensing information please see the supplied COPYING file
82007 + *
82008 + */
82009 +
82010 +#ifndef __ELAN_RMAP_H
82011 +#define __ELAN_RMAP_H
82012 +
82013 +#ident "$Id: rmap.h,v 1.8 2004/05/19 10:24:40 david Exp $"
82014 +/*      $Source: /cvs/master/quadrics/epmod/rmap.h,v $ */
82015 +
82016 +
82017 +typedef struct ep_rmap_entry 
82018 +{
82019 +    size_t     m_size;
82020 +    u_long     m_addr;
82021 +} EP_RMAP_ENTRY;
82022 +
82023 +typedef struct ep_rmap 
82024 +{
82025 +    spinlock_t      m_lock;
82026 +    kcondvar_t       m_wait;
82027 +    u_int           m_size;
82028 +    u_int           m_free;
82029 +    u_int           m_want;
82030 +    char            *m_name;
82031 +    EP_RMAP_ENTRY    m_map[1];
82032 +} EP_RMAP;
82033 +
82034 +extern void         ep_display_rmap (EP_RMAP *map);
82035 +
82036 +extern void          ep_rmapinit (EP_RMAP *rmap, char *name, u_int mapsize);
82037 +extern unsigned long ep_rmalloc (EP_RMAP *rmap, size_t size, int cansleep);
82038 +extern unsigned long ep_rmalloc_constrained (EP_RMAP *mp, size_t size, unsigned long alo, unsigned long ahi, unsigned long align, int cansleep);
82039 +extern void          ep_rmfree (EP_RMAP *rmap, size_t size, unsigned long addr);
82040 +extern unsigned long ep_rmget (EP_RMAP *rmap, size_t size, unsigned long addr);
82041 +extern EP_RMAP      *ep_rmallocmap (size_t size, char *name, int cansleep);
82042 +extern void          ep_rmfreemap (EP_RMAP *map);
82043 +
82044 +#endif /* __ELAN3_RMAP_H */
82045 +
82046 +/*
82047 + * Local variables:
82048 + * c-file-style: "stroustrup"
82049 + * End:
82050 + */
82051 diff -urN clean/include/elan/statemap.h linux-2.6.9/include/elan/statemap.h
82052 --- clean/include/elan/statemap.h       1969-12-31 19:00:00.000000000 -0500
82053 +++ linux-2.6.9/include/elan/statemap.h 2003-10-07 09:22:38.000000000 -0400
82054 @@ -0,0 +1,52 @@
82055 +/*
82056 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82057 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
82058 + *
82059 + *    For licensing information please see the supplied COPYING file
82060 + *
82061 + */
82062 +
82063 +#ifndef __ELAN_STATEMAP_H
82064 +#define __ELAN_STATEMAP_H
82065 +
82066 +#ident "$Id: statemap.h,v 1.8 2003/10/07 13:22:38 david Exp $"
82067 +/*      $Source: /cvs/master/quadrics/epmod/statemap.h,v $ */
82068 +
82069 +#include <elan/bitmap.h>
82070 +
82071 +/******************************** global state bitmap stuff **********************************/
82072 +typedef struct
82073 +{
82074 +   unsigned int size;
82075 +   unsigned int nob;
82076 +   unsigned int changemap_nob;
82077 +   unsigned int bitmap_nob;
82078 +   bitmap_t    *changemap0;
82079 +   bitmap_t    *changemap1;
82080 +   bitmap_t    *changemap2;
82081 +   bitmap_t    *bitmap;
82082 +} statemap_t;
82083 +
82084 +extern bitmap_t              statemap_getseg (statemap_t *map, unsigned int offset);
82085 +extern void           statemap_setseg (statemap_t *map, unsigned int offset, bitmap_t seg);
82086 +extern bitmap_t       statemap_getbits (statemap_t *map, unsigned int offset, int nbits);
82087 +extern void           statemap_setbits (statemap_t *map, unsigned int offset, bitmap_t bits, int nbits);
82088 +extern void           statemap_zero (statemap_t *map);
82089 +extern void           statemap_setmap (statemap_t *dst, statemap_t *src);
82090 +extern void           statemap_ormap (statemap_t *dst, statemap_t *src);
82091 +extern int           statemap_findchange (statemap_t *map, bitmap_t *newseg, int clearchange);
82092 +extern int            statemap_changed (statemap_t *map);
82093 +extern void           statemap_reset (statemap_t *map);
82094 +extern void           statemap_copy (statemap_t *dst, statemap_t *src);
82095 +extern void           statemap_clearchanges (statemap_t *map);
82096 +extern bitmap_t      *statemap_tobitmap (statemap_t *map);
82097 +extern statemap_t    *statemap_create (int size);
82098 +extern void           statemap_destroy (statemap_t *map);
82099 +
82100 +#endif /* __ELAN_STATEMAP_H */
82101 +
82102 +/*
82103 + * Local variables:
82104 + * c-file-style: "stroustrup"
82105 + * End:
82106 + */
82107 diff -urN clean/include/elan/stats.h linux-2.6.9/include/elan/stats.h
82108 --- clean/include/elan/stats.h  1969-12-31 19:00:00.000000000 -0500
82109 +++ linux-2.6.9/include/elan/stats.h    2003-09-24 09:55:37.000000000 -0400
82110 @@ -0,0 +1,85 @@
82111 +/*
82112 + *    Copyright (c) 2003 by Quadrics Limited.
82113 + * 
82114 + *    For licensing information please see the supplied COPYING file
82115 + *
82116 + */
82117 +
82118 +#ident "@(#)$Id: stats.h,v 1.5 2003/09/24 13:55:37 david Exp $"
82119 +/*      $Source: /cvs/master/quadrics/elanmod/modsrc/stats.h,v $*/
82120 +
82121 +#ifndef __ELAN_STATS_H
82122 +#define __ELAN_STATS_H
82123 +
82124 +
82125 +/* non-kernel headings */
82126 +#define ELAN_STATS_NAME_MAX_LEN ((uint)64)
82127 +typedef unsigned int ELAN_STATS_IDX;
82128 +
82129 +typedef struct elan_stats_map
82130 +{
82131 +       char entry_name[ELAN_STATS_NAME_MAX_LEN];
82132 +       int  index;
82133 +} ELAN_STATS_MAP;
82134 +
82135 +#if defined(__KERNEL__)
82136 +
82137 +/* stats callbacks */
82138 +#define ELAN_STATS_OPS_VERSION ((u_int)1)
82139 +typedef struct elan_stats_ops
82140 +{
82141 +       u_int  ops_version;
82142 +
82143 +       int (*elan_stats_get_name)    (void * arg, uint index,   caddr_t  name);
82144 +       int (*elan_stats_get_block)   (void * arg, uint entries, ulong   *values);
82145 +       int (*elan_stats_clear_block) (void * arg);
82146 +
82147 +} ELAN_STATS_OPS;
82148 +
82149 +typedef struct elan_stats_struct
82150 +{
82151 +       struct list_head   node;
82152 +
82153 +       ELAN_STATS_IDX     statidx;
82154 +       char               block_name[ELAN_STATS_NAME_MAX_LEN];
82155 +       uint               num_entries;
82156 +       ELAN_STATS_OPS *ops;
82157 +       void              *arg;
82158 +
82159 +} ELAN_STATS_STRUCT;
82160 +
82161 +/* stats.c */
82162 +extern int                   elan_stats_register   (ELAN_STATS_IDX    *statidx, 
82163 +                                                      char              *block_name, 
82164 +                                                      uint               num_entries,
82165 +                                                      ELAN_STATS_OPS *ops,
82166 +                                                      void              *arg);
82167 +
82168 +extern int                   elan_stats_deregister  (ELAN_STATS_IDX  statidx);
82169 +extern ELAN_STATS_STRUCT *elan_stats_find        (ELAN_STATS_IDX  statidx);
82170 +extern ELAN_STATS_STRUCT *elan_stats_find_by_name(caddr_t         block_name);
82171 +extern ELAN_STATS_STRUCT *elan_stats_find_next   (ELAN_STATS_IDX  statidx);
82172 +
82173 +
82174 +/* elan_stats.c */
82175 +extern int elan_stats_get_next_index (ELAN_STATS_IDX statidx, ELAN_STATS_IDX *next_statidx);
82176 +
82177 +extern int elan_stats_find_index     (caddr_t  block_name, ELAN_STATS_IDX *statidx, uint *num_entries);
82178 +
82179 +extern int elan_stats_get_block_info (ELAN_STATS_IDX statidx, caddr_t block_name, uint *num_entries);
82180 +
82181 +extern int elan_stats_get_index_name (ELAN_STATS_IDX statidx, uint index, caddr_t name);
82182 +
82183 +extern int elan_stats_get_block      (ELAN_STATS_IDX statidx, uint entries, ulong   *values);
82184 +
82185 +extern int elan_stats_clear_block    (ELAN_STATS_IDX statidx);
82186 +
82187 +#endif /* __KERNEL__ */
82188 +
82189 +#endif /* __ELAN_STATS_H */
82190 +
82191 +/*
82192 + * Local variables:
82193 + * c-file-style: "linux"
82194 + * End:
82195 + */
82196 diff -urN clean/include/elan3/compat.h linux-2.6.9/include/elan3/compat.h
82197 --- clean/include/elan3/compat.h        1969-12-31 19:00:00.000000000 -0500
82198 +++ linux-2.6.9/include/elan3/compat.h  2004-06-09 05:07:03.000000000 -0400
82199 @@ -0,0 +1,177 @@
82200 +/*
82201 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82202 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
82203 + *
82204 + *    For licensing information please see the supplied COPYING file
82205 + *
82206 + */
82207 +
82208 +#ident "@(#)$Id: compat.h,v 1.4 2004/06/09 09:07:03 mike Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
82209 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/compat.h,v $*/
82210 +
82211 +#ifndef __ELAN3_COMPAT_H
82212 +#define __ELAN3_COMPAT_H
82213 +
82214 +/* compatibility header to allow Eagle branch QSNETLIBS 
82215 + * to compile against head kernel */
82216 +
82217 +#define ELAN_EAGLE_COMPAT
82218 +
82219 +/* vmseg.h */
82220 +#define ELAN_FLAGSTATS                         ELAN3_FLAGSTATS
82221 +
82222 +/* uregs.h */
82223 +#define ELAN_STATS_NAME                                ELAN3_STATS_NAME
82224 +#define elan3_stats_names                      elan_stats_names
82225 +
82226 +/* spinlock.h */
82227 +#define ELAN_SPINLOCK                          ELAN3_SPINLOCK
82228 +#define ELAN_SPINLOCK_MAIN                     ELAN3_SPINLOCK_MAIN
82229 +#define ELAN_SPINLOCK_ELAN                     ELAN3_SPINLOCK_ELAN
82230 +#define ELAN_ME_SPINENTER                      ELAN3_ME_SPINENTER
82231 +#define ELAN_ME_FORCEENTER                     ELAN3_ME_FORCEENTER
82232 +#define ELAN_ME_SPINEXIT                       ELAN3_ME_SPINEXIT
82233 +#define ELAN_SPINENTER                         ELAN3_SPINENTER
82234 +#define ELAN_SPINEXIT                          ELAN3_SPINEXIT
82235 +#define elan3_me_spinblock                     elan_me_spinblock
82236 +#define elan3_spinenter                                elan_spinenter
82237 +
82238 +/* elanio.h */
82239 +#define ELANIO_CONTROL_PATHNAME                        ELAN3IO_CONTROL_PATHNAME
82240 +#define ELANIO_USER_PATHNAME                   ELAN3IO_USER_PATHNAME
82241 +#define ELANIO_SDRAM_PATHNAME                  ELAN3IO_SDRAM_PATHNAME
82242 +#define ELANIO_MAX_PATHNAMELEN                 ELAN3IO_MAX_PATHNAMELEN
82243 +
82244 +#define ELANIO_SET_BOUNDARY_SCAN               ELAN3IO_SET_BOUNDARY_SCAN
82245 +#define ELANIO_CLEAR_BOUNDARY_SCAN             ELAN3IO_CLEAR_BOUNDARY_SCAN
82246 +#define ELANIO_READ_LINKVAL                    ELAN3IO_READ_LINKVAL
82247 +#define ELANIO_WRITE_LINKVAL                   ELAN3IO_WRITE_LINKVAL
82248 +#define ELANIO_SET_DEBUG_STRUCT                        ELAN3IO_SET_DEBUG_STRUCT
82249 +#define ELANIO_SET_DEBUG                       ELAN3IO_SET_DEBUG
82250 +#define ELANIO_DEBUG_BUFFER_STRUCT             ELAN3IO_DEBUG_BUFFER_STRUCT
82251 +#define ELANIO_DEBUG_BUFFER                    ELAN3IO_DEBUG_BUFFER
82252 +#define ELANIO_NETERR_SERVER_STRUCT            ELAN3IO_NETERR_SERVER_STRUCT
82253 +#define ELANIO_NETERR_SERVER                   ELAN3IO_NETERR_SERVER
82254 +#define ELANIO_NETERR_FIXUP                    ELAN3IO_NETERR_FIXUP
82255 +
82256 +#define ELANIO_FREE                            ELAN3IO_FREE
82257 +#define ELANIO_ATTACH                          ELAN3IO_ATTACH
82258 +#define ELANIO_DETACH                          ELAN3IO_DETACH
82259 +#define ELANIO_ADDVP_STRUCT                    ELAN3IO_ADDVP_STRUCT
82260 +#define ELANIO_ADDVP                           ELAN3IO_ADDVP
82261 +#define ELANIO_REMOVEVP                                ELAN3IO_REMOVEVP
82262 +#define ELANIO_BCASTVP_STRUCT                  ELAN3IO_BCASTVP_STRUCT
82263 +#define ELANIO_BCASTVP                         ELAN3IO_BCASTVP
82264 +#define ELANIO_LOAD_ROUTE_STRUCT               ELAN3IO_LOAD_ROUTE_STRUCT
82265 +#define ELANIO_LOAD_ROUTE                      ELAN3IO_LOAD_ROUTE
82266 +#define ELANIO_PROCESS                         ELAN3IO_PROCESS
82267 +#define ELANIO_SETPERM_STRUCT                  ELAN3IO_SETPERM_STRUCT
82268 +#define ELANIO_SETPERM                         ELAN3IO_SETPERM
82269 +#define ELANIO_CLEARPERM_STRUCT                        ELAN3IO_CLEARPERM_STRUCT
82270 +#define ELANIO_CLEARPERM                       ELAN3IO_CLEARPERM
82271 +#define ELANIO_CHANGEPERM_STRUCT               ELAN3IO_CHANGEPERM_STRUCT
82272 +#define ELANIO_CHANGEPERM                      ELAN3IO_CHANGEPERM
82273 +#define ELANIO_HELPER_THREAD                   ELAN3IO_HELPER_THREAD
82274 +#define ELANIO_WAITCOMMAND                     ELAN3IO_WAITCOMMAND
82275 +#define ELANIO_BLOCK_INPUTTER                  ELAN3IO_BLOCK_INPUTTER
82276 +#define ELANIO_SET_FLAGS                       ELAN3IO_SET_FLAGS
82277 +#define ELANIO_WAITEVENT                       ELAN3IO_WAITEVENT
82278 +#define ELANIO_ALLOC_EVENTCOOKIE               ELAN3IO_ALLOC_EVENTCOOKIE
82279 +#define ELANIO_FREE_EVENTCOOKIE                        ELAN3IO_FREE_EVENTCOOKIE
82280 +#define ELANIO_ARM_EVENTCOOKIE                 ELAN3IO_ARM_EVENTCOOKIE
82281 +#define ELANIO_WAIT_EVENTCOOKIE                        ELAN3IO_WAIT_EVENTCOOKIE
82282 +#define ELANIO_SWAPSPACE                       ELAN3IO_SWAPSPACE
82283 +#define ELANIO_EXCEPTION_SPACE                 ELAN3IO_EXCEPTION_SPACE
82284 +#define ELANIO_GET_EXCEPTION                   ELAN3IO_GET_EXCEPTION
82285 +#define ELANIO_UNLOAD_STRUCT                   ELAN3IO_UNLOAD_STRUCT
82286 +#define ELANIO_UNLOAD                          ELAN3IO_UNLOAD
82287 +#define ELANIO_GET_ROUTE_STRUCT                        ELAN3IO_GET_ROUTE_STRUCT
82288 +#define ELANIO_GET_ROUTE                       ELAN3IO_GET_ROUTE
82289 +#define ELANIO_RESET_ROUTE_STRUCT              ELAN3IO_RESET_ROUTE_STRUCT
82290 +#define ELANIO_RESET_ROUTE                     ELAN3IO_RESET_ROUTE
82291 +#define ELANIO_CHECK_ROUTE_STRUCT              ELAN3IO_CHECK_ROUTE_STRUCT
82292 +#define ELANIO_CHECK_ROUTE                     ELAN3IO_CHECK_ROUTE
82293 +#define ELANIO_VP2NODEID_STRUCT                        ELAN3IO_VP2NODEID_STRUCT
82294 +#define ELANIO_VP2NODEID                       ELAN3IO_VP2NODEID
82295 +#define ELANIO_SET_SIGNAL                      ELAN3IO_SET_SIGNAL
82296 +#define ELANIO_PROCESS_2_LOCATION_STRUCT       ELAN3IO_PROCESS_2_LOCATION_STRUCT
82297 +#define ELANIO_PROCESS_2_LOCATION              ELAN3IO_PROCESS_2_LOCATION
82298 +#define ELANIO_GET_DEVINFO_STRUCT              ELAN3IO_GET_DEVINFO_STRUCT
82299 +#define ELANIO_GET_DEVINFO                     ELAN3IO_GET_DEVINFO
82300 +#define ELANIO_GET_POSITION_STRUCT             ELAN3IO_GET_POSITION_STRUCT
82301 +#define ELANIO_GET_POSITION                    ELAN3IO_GET_POSITION
82302 +#define ELANIO_STATS_STRUCT                    ELAN3IO_STATS_STRUCT
82303 +#define ELANIO_STATS                           ELAN3IO_STATS
82304 +#  define ELAN_SYS_STATS_DEVICE                        ELAN3_SYS_STATS_DEVICE
82305 +#  define ELAN_SYS_STATS_ELAN3MMU              ELAN3_SYS_STATS_MMU
82306 +
82307 +#define ELANIO_OFF_FLAG_PAGE                   ELAN3IO_OFF_FLAG_PAGE
82308 +#define ELANIO_OFF_UREG_PAGE                   ELAN3IO_OFF_UREG_PAGE
82309 +#define ELANIO_OFF_COMMAND_PAGE                        ELAN3IO_OFF_COMMAND_PAGE
82310 +
82311 +
82312 +/* elanvp.h */
82313 +#define ELAN_ROUTE_SUCCESS                     ELAN3_ROUTE_SUCCESS
82314 +#define ELAN_ROUTE_SYSCALL_FAILED              ELAN3_ROUTE_SYSCALL_FAILED
82315 +#define ELAN_ROUTE_INVALID                     ELAN3_ROUTE_INVALID
82316 +#define ELAN_ROUTE_TOO_LONG                    ELAN3_ROUTE_TOO_LONG
82317 +#define ELAN_ROUTE_LOAD_FAILED                 ELAN3_ROUTE_LOAD_FAILED
82318 +#define ELAN_ROUTE_PROC_RANGE                  ELAN3_ROUTE_PROC_RANGE
82319 +#define ELAN_ROUTE_INVALID_LEVEL               ELAN3_ROUTE_INVALID_LEVEL
82320 +#define ELAN_ROUTE_OCILATES                    ELAN3_ROUTE_OCILATES
82321 +#define ELAN_ROUTE_WRONG_DEST                  ELAN3_ROUTE_WRONG_DEST
82322 +#define ELAN_ROUTE_TURN_LEVEL                  ELAN3_ROUTE_TURN_LEVEL
82323 +#define ELAN_ROUTE_NODEID_UNKNOWN              ELAN3_ROUTE_NODEID_UNKNOWN
82324 +
82325 +/* elandev.h */
82326 +#define ELAN_STATS                             ELAN3_STATS
82327 +#define ELAN_STATS_VERSION                     ELAN3_STATS_VERSION
82328 +
82329 +/* perm.h */
82330 +#define ELAN_PERM_NOREMOTE                     ELAN3_PERM_NOREMOTE
82331 +#define ELAN_PERM_LOCAL_READ                   ELAN3_PERM_LOCAL_READ
82332 +#define ELAN_PERM_REMOTEALL                    ELAN3_PERM_REMOTEALL
82333 +
82334 +/* threadsyscall.h */
82335 +#define ELAN_ABORT_TRAPNUM                     ELAN3_ABORT_TRAPNUM
82336 +#define ELAN_ELANCALL_TRAPNUM                  ELAN3_ELANCALL_TRAPNUM
82337 +#define ELAN_SYSCALL_TRAPNUM                   ELAN3_SYSCALL_TRAPNUM
82338 +#define ELAN_SYS_close                         ELAN3_SYS_close
82339 +#define ELAN_SYS_getpid                                ELAN3_SYS_getpid
82340 +#define ELAN_SYS_ioctl                         ELAN3_SYS_ioctl
82341 +#define ELAN_SYS_kill                          ELAN3_SYS_kill
82342 +#define ELAN_SYS_lseek                         ELAN3_SYS_lseek
82343 +#define ELAN_SYS_mmap                          ELAN3_SYS_mmap
82344 +#define ELAN_SYS_munmap                                ELAN3_SYS_munmap
82345 +#define ELAN_SYS_open                          ELAN3_SYS_open
82346 +#define ELAN_SYS_poll                          ELAN3_SYS_poll
82347 +#define ELAN_SYS_read                          ELAN3_SYS_read
82348 +#define ELAN_SYS_write                         ELAN3_SYS_write
82349 +#define ELAN_T_SYSCALL_CODE                    ELAN3_T_SYSCALL_CODE
82350 +#define ELAN_T_SYSCALL_ERRNO                   ELAN3_T_SYSCALL_ERRNO
82351 +
82352 +/* elansyscall.h */
82353 +#define ELAN_SYS_FLAG_DMA_BADVP                        ELAN3_SYS_FLAG_DMA_BADVP
82354 +#define ELAN_SYS_FLAG_THREAD_BADVP             ELAN3_SYS_FLAG_THREAD_BADVP
82355 +#define ELAN_SYS_FLAG_DMAFAIL                  ELAN3_SYS_FLAG_DMAFAIL
82356 +#define ELAN_SYS_FLAG_NETERR                   ELAN3_SYS_FLAG_NETERR
82357 +
82358 +/* intrinsics.h */
82359 +#define elan_copy64w                           elan3_copy64w
82360 +#define elan_read64dw                          elan3_read64dw
82361 +#define elan_write64dw                         elan3_write64dw
82362 +
82363 +#ifndef ELAN_POLL_EVENT
82364 +#define ELAN_POLL_EVENT                                ELAN3_POLL_EVENT
82365 +#endif
82366 +#ifndef ELAN_WAIT_EVENT
82367 +#define ELAN_WAIT_EVENT                                ELAN3_WAIT_EVENT
82368 +#endif
82369 +
82370 +#endif /* __ELAN3_COMPAT_H */
82371 +/*
82372 + * Local variables:
82373 + * c-file-style: "stroustrup"
82374 + * End:
82375 + */
82376 +
82377 diff -urN clean/include/elan3/dma.h linux-2.6.9/include/elan3/dma.h
82378 --- clean/include/elan3/dma.h   1969-12-31 19:00:00.000000000 -0500
82379 +++ linux-2.6.9/include/elan3/dma.h     2002-08-21 08:43:27.000000000 -0400
82380 @@ -0,0 +1,213 @@
82381 +/*
82382 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82383 + *
82384 + *    For licensing information please see the supplied COPYING file
82385 + *
82386 + */
82387 +
82388 +#ifndef __ELAN3_DMA_H
82389 +#define __ELAN3_DMA_H
82390 +
82391 +#ident "$Id: dma.h,v 1.38 2002/08/21 12:43:27 david Exp $"
82392 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/dma.h,v $ */
82393 +
82394 +#include <elan3/e3types.h>
82395 +#include <elan3/events.h>
82396 +
82397 +/* Alignment for a DMA descriptor */
82398 +#define E3_DMA_ALIGN           (32)
82399 +
82400 +/* The maximum size a DMA can be (i.e. < 2GB) */
82401 +#define E3_MAX_DMA_SIZE                0x7fffffff
82402 +
82403 +/* This macro returns TRUE if a fixup for the ELAN_REVB_BUG_2 problem is required 
82404 + * i.e. if the DMA begins in the last 64-bytes of a page and its size causes it to enter the
82405 + * next page, hence causing the Elan to issue 2 (64-byte) block reads to different pages.
82406 + * See GNAT hw-elan3/3263
82407 + */
82408 +#define E3_DMA_REVB_BUG_2(SIZE, ADDR, PAGESIZE)        \
82409 +       ( (((int) (ADDR) & (PAGESIZE-64)) == (PAGESIZE-64)) && (-(((int) (ADDR) | ~(PAGESIZE-1))) < (SIZE)) )
82410 +
82411 +/* There is a point where a dma runs quicker from main memory than
82412 + * when running from sdram and having to copy all the data down
82413 + * first.
82414 + */
82415 +#define E3_DMA_SDRAM_CUTOFF    128
82416 +
82417 +typedef union _e3_DmaType
82418 +{
82419 +    E3_uint32 type;
82420 +    struct
82421 +    {
82422 +#if defined(__LITTLE_ENDIAN__)
82423 +       E3_uint32 dataType:2;   /* Bits 0 to 1   */
82424 +       E3_uint32 direction:3;  /* Bit  4 to 2   */
82425 +       E3_uint32 opCode:4;     /* Bits 5 to 8   */
82426 +       E3_uint32 failCount:6;  /* Bits 9 to 14  */
82427 +       E3_uint32 isRemote:1;   /* Bit  15       */
82428 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
82429 +       E3_uint32 :3;           /* Bits 29 to 31 */
82430 +#else
82431 +       E3_uint32 :3;           /* Bits 29 to 31 */
82432 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
82433 +       E3_uint32 isRemote:1;   /* Bit  15       */
82434 +       E3_uint32 failCount:6;  /* Bits 9 to 14  */
82435 +       E3_uint32 opCode:4;     /* Bits 5 to 8   */
82436 +       E3_uint32 direction:3;  /* Bit  4 to 2   */
82437 +       E3_uint32 dataType:2;   /* Bits 0 to 1   */
82438 +#endif
82439 +    } s;
82440 +} E3_DmaType;
82441 +
82442 +#define E3_DMA_CONTEXT_MASK    (ALL_CONTEXT_BITS << 16)
82443 +
82444 +#define E3_DMA_CONTEXT(type)   (((type) >> 16) & ALL_CONTEXT_BITS)
82445 +#define E3_DMA_ISREMOTE(type)  (((type) >> 15) & 1)
82446 +#define E3_DMA_FAILCOUNT(type) (((type) >> 9) & 0x3F)
82447 +#define E3_DMA_OPCODE(type)    (((type) >> 5) & 0xF)
82448 +#define E3_DMA_DIRECTION(type) (((type) >> 2) & 0x7)
82449 +#define EP_DMA_DATATYPE(type)  (((type) >> 0) & 0x3)
82450 +
82451 +#define E3_DMA_TYPE(dataType, direction, opCode, failCount) \
82452 +    (((dataType) & 0x3) | (((direction) & 7) << 2) | (((opCode) & 0xF) << 5) | (((failCount) & 0x3F) << 9))
82453 +
82454 +
82455 +typedef union _e3_CookieVProc
82456 +{
82457 +    E3_uint32 cookie_vproc;
82458 +    struct
82459 +    {
82460 +#if defined(__LITTLE_ENDIAN__)
82461 +       E3_uint32 vproc:16;                     /* Bit  15 to 0  */
82462 +       E3_uint32 cookie:16;                    /* Bits 31 to 16 */
82463 +#else
82464 +       E3_uint32 cookie:16;                    /* Bits 31 to 16 */
82465 +       E3_uint32 vproc:16;                     /* Bit  15 to 0  */
82466 +#endif
82467 +    } s;
82468 +} E3_CookieVProc;
82469 +
82470 +#define E3_DMA_COOKIE_PROC(Cookie, VProc)  (((VProc) & 0xffff) | (((Cookie) << 16)))
82471 +
82472 +#define DMA_COOKIE_MASK                (0xffff0000)
82473 +#define DMA_PROCESS_MASK       (0x0000ffff)
82474 +
82475 +/* We use the bottom bit of the cookie to
82476 + * distinguish main/thread generated cookies
82477 + */
82478 +#define DMA_COOKIE_THREAD      (0x01 << 16)
82479 +
82480 +/* We use the next bit of the cookie to
82481 + * distinguish locally/remotely generated cookies 
82482 + */
82483 +#define DMA_COOKIE_REMOTE      (0x02 << 16)
82484 +
82485 +/* Assign and increment cookie (NB: we have reserved the bottom two bits)
82486 + */
82487 +#define DMA_COOKIE(COOKIE, VPROC)       ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | VPROC)
82488 +#define DMA_REMOTE_COOKIE(COOKIE, VPROC) ((((COOKIE) += (0x4 << 16)) & DMA_COOKIE_MASK) | DMA_COOKIE_REMOTE | VPROC)
82489 +
82490 +#define DMA_COOKIE_REFRESH(COOKIEVP, COOKIE)                           \
82491 +do {                                                                   \
82492 +       COOKIEVP &= ~DMA_COOKIE_MASK;           /* Clear cookie */      \
82493 +       COOKIEVP |= DMA_COOKIE(COOKIE,0);       /* Assign new cookie */ \
82494 +} while (0)
82495 +
82496 +typedef struct e3_dma
82497 +{
82498 +    E3_DmaType         dma_u;
82499 +    E3_uint32          dma_size;
82500 +    E3_Addr            dma_source;
82501 +    E3_Addr            dma_dest;
82502 +    E3_Addr            dma_destEvent;
82503 +    E3_CookieVProc     dma_destCookieProc;
82504 +    E3_Addr            dma_srcEvent;
82505 +    E3_CookieVProc     dma_srcCookieProc;
82506 +} E3_DMA;
82507 +
82508 +
82509 +/*
82510 + * Word-swapped version of DMA descriptor.
82511 + * This is used by the UltraSPARC code to format the descriptor
82512 + * in main memory before block-copying it down to Elan SDRAM.
82513 + * In the process it does a dword (64-bit) conversion and so swaps
82514 + * the word order on a double-word pair basis
82515 + */
82516 +typedef struct e3_dma_swapped
82517 +{
82518 +    E3_uint32          dma_size;
82519 +    E3_DmaType         dma_u;
82520 +    E3_Addr            dma_dest;
82521 +    E3_Addr            dma_source;
82522 +    E3_CookieVProc     dma_destCookieProc;
82523 +    E3_Addr            dma_destEvent;
82524 +    E3_CookieVProc     dma_srcCookieProc;
82525 +    E3_Addr            dma_srcEvent;
82526 +} E3_DMA_SWAPPED;
82527 +
82528 +/* Define a Main memory structure for DMA desc based on Endianess of machine */
82529 +#if defined(__LITTLE_ENDIAN__)
82530 +#define E3_DMA_MAIN E3_DMA
82531 +#else
82532 +#define E3_DMA_MAIN E3_DMA_SWAPPED;
82533 +#endif
82534 +
82535 +#define dma_type        dma_u.type
82536 +#define dma_failCount    dma_u.s.failCount
82537 +#define dma_isRemote     dma_u.s.isRemote
82538 +#define dma_opCode       dma_u.s.opCode
82539 +#define dma_direction    dma_u.s.direction
82540 +#define dma_dataType     dma_u.s.dataType
82541 +#define dma_queueContext dma_u.s.Context
82542 +
82543 +#define dma_destCookieVProc   dma_destCookieProc.cookie_vproc
82544 +#define dma_destVProc        dma_destCookieProc.s.vproc
82545 +#define dma_destCookie       dma_destCookieProc.s.cookie
82546 +#define dma_srcCookieVProc    dma_srcCookieProc.cookie_vproc
82547 +#define dma_srcVProc         dma_srcCookieProc.s.vproc
82548 +#define dma_srcCookie        dma_srcCookieProc.s.cookie
82549 +
82550 +/*
82551 + * Values for dma_opCode
82552 + */
82553 +#define DMA_NORMAL                     0
82554 +#define DMA_QUEUED                     1
82555 +#define DMA_NORMAL_BROADCAST           2
82556 +#define DMA_QUEUED_BROADCAST           3
82557 +#define DMA_NORMAL_UNSAFE              4
82558 +#define DMA_QUEUED_UNSAFE              5
82559 +#define DMA_NORMAL_BROADCAST_UNSAFE    6
82560 +#define DMA_QUEUED_BROADCAST_UNSAFE    7
82561 +
82562 +/*
82563 + * Values for dma_direction
82564 + */
82565 +#define DMA_WRITE              0
82566 +#define DMA_READ_REQUEUE       1
82567 +#define DMA_READ               3
82568 +#define DMA_READ_BROADCAST     7
82569 +
82570 +/*
82571 + * Values for dma_dataType
82572 + */
82573 +#define DMA_BYTE               0
82574 +#define DMA_HALFWORD           1
82575 +#define DMA_WORD               2
82576 +#define DMA_DOUBLE             3
82577 +
82578 +/* OUT OF DATE ?
82579 +  #define DMA_OPCODE_SHIFT     3
82580 +  #define DMA_FAILCOUNT_SHIFT  9
82581 +*/
82582 +#define DMA_TYPE_ISREMOTE      (1 << 15)
82583 +#define DMA_TYPE_READ          (3 << 2)
82584 +#define DMA_TYPE_READ_REQUEUE  (1 << 2)
82585 +#define DMA_TYPE_DIRECTION_MASK        (3 << 2)
82586 +
82587 +#endif /* __ELAN3_DMA_H */
82588 +
82589 +/*
82590 + * Local variables:
82591 + * c-file-style: "stroustrup"
82592 + * End:
82593 + */
82594 diff -urN clean/include/elan3/e3types.h linux-2.6.9/include/elan3/e3types.h
82595 --- clean/include/elan3/e3types.h       1969-12-31 19:00:00.000000000 -0500
82596 +++ linux-2.6.9/include/elan3/e3types.h 2002-08-09 07:23:33.000000000 -0400
82597 @@ -0,0 +1,82 @@
82598 +/*
82599 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82600 + *
82601 + *    For licensing information please see the supplied COPYING file
82602 + *
82603 + */
82604 +
82605 +#ifndef __ELAN3_E3TYPES_H
82606 +#define __ELAN3_E3TYPES_H
82607 +
82608 +#ident "$Id: e3types.h,v 1.18 2002/08/09 11:23:33 addy Exp $"
82609 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/e3types.h,v $ */
82610 +
82611 +#include <qsnet/config.h>
82612 +/*
82613 + * "flip" values for correctly indexing into
82614 + * block data which was copied from the Elan
82615 + * using 64 bit accesses.
82616 + */
82617 +#if defined(__LITTLE_ENDIAN__)
82618 +#  define ByteEndianFlip  0
82619 +#  define ShortEndianFlip 0
82620 +#  define WordEndianFlip  0
82621 +#else
82622 +#  define ByteEndianFlip  7
82623 +#  define ShortEndianFlip 3
82624 +#  define WordEndianFlip  1
82625 +#endif
82626 +
82627 +
82628 +#ifndef _ASM
82629 +
82630 +typedef signed int        E3_int;
82631 +typedef unsigned int              E3_uint;
82632 +
82633 +typedef signed char       E3_int8;
82634 +typedef unsigned char     E3_uint8;
82635 +
82636 +typedef signed short      E3_int16;
82637 +typedef unsigned short            E3_uint16;
82638 +
82639 +typedef signed int        E3_int32;
82640 +typedef unsigned int              E3_uint32;
82641 +
82642 +#ifdef __ELAN3__
82643 +typedef signed long long   E3_int64;
82644 +typedef unsigned long long E3_uint64;
82645 +#ifdef _MAIN_LP64
82646 +/* NOTE: If the Main is 64-bit we declare the Elan thread's
82647 + * E3_uintptr to be 64-bits too
82648 + */
82649 +typedef unsigned long long E3_uintptr;
82650 +#else
82651 +typedef unsigned long      E3_uintptr;
82652 +#endif
82653 +
82654 +#else
82655 +
82656 +#ifdef _LP64
82657 +typedef signed long        E3_int64;
82658 +typedef unsigned long      E3_uint64;
82659 +typedef unsigned long      E3_uintptr;
82660 +#else /* _ILP32 */
82661 +typedef signed long long   E3_int64;
82662 +typedef unsigned long long E3_uint64;
82663 +typedef unsigned long      E3_uintptr;
82664 +#endif
82665 +
82666 +#endif /* __ELAN3__ */
82667 +
82668 +/* 32-bit Elan3 address */
82669 +typedef E3_uint32         E3_Addr;
82670 +
82671 +#endif /* _ASM */
82672 +
82673 +#endif /* __ELAN3_E3TYPES_H */
82674 +
82675 +/*
82676 + * Local variables:
82677 + * c-file-style: "stroustrup"
82678 + * End:
82679 + */
82680 diff -urN clean/include/elan3/elan3mmu.h linux-2.6.9/include/elan3/elan3mmu.h
82681 --- clean/include/elan3/elan3mmu.h      1969-12-31 19:00:00.000000000 -0500
82682 +++ linux-2.6.9/include/elan3/elan3mmu.h        2004-12-14 05:19:32.000000000 -0500
82683 @@ -0,0 +1,346 @@
82684 +/*
82685 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
82686 + *
82687 + *    For licensing information please see the supplied COPYING file
82688 + *
82689 + */
82690 +
82691 +#ifndef __ELAN3_ELAN3MMU_H
82692 +#define __ELAN3_ELAN3MMU_H
82693 +
82694 +#ident "$Id: elan3mmu.h,v 1.41 2004/12/14 10:19:32 mike Exp $"
82695 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu.h,v $*/
82696 +
82697 +
82698 +#include <elan3/pte.h>
82699 +
82700 +#ifdef __cplusplus
82701 +extern "C" {
82702 +#endif
82703 +
82704 +typedef struct elan3mmu_global_stats
82705 +{
82706 +    int                version;
82707 +    int                pteload;
82708 +    int                pteunload;
82709 +    int                ptereload;
82710 +
82711 +    int                streamable_alloc;
82712 +    int                streamable_free;
82713 +    int                streamable_alloc_failed;
82714 +
82715 +    int                num_ptbl_level[4]; /* number of level N  ptbls */
82716 +
82717 +    int                create_ptbl_failed;                     /* count of ptbl creation failure */
82718 +
82719 +    int         lX_alloc_l3;                           /* count of l3 ptbls used as lX */
82720 +    int         lX_freed_l3;                           /* count of lX ptbls freed as l3 */
82721 +
82722 +    int                l2_alloc_l3;                            /* count of l3 ptbls used as l2 */
82723 +    int                l2_freed_l3;                            /* count of l2 ptbls freed as l3 */
82724 +
82725 +    int                stolen_ptbls;                           /* count of l3 ptbls stolen */
82726 +} ELAN3MMU_GLOBAL_STATS;
82727 +
82728 +#define ELAN3MMU_STATS_VERSION         1
82729 +
82730 +#define ELAN3MMU_STAT(what)            (elan3mmu_global_stats.what++)
82731 +#define ELAN3MMU_SET_STAT(what,count)  (elan3mmu_global_stats.what = count)
82732 +
82733 +#ifdef __KERNEL__
82734 +
82735 +#define ELAN3_PT_SHIFT (ELAN3_L2_SHIFT + 2)
82736 +
82737 +typedef struct elan3_ptbl
82738 +{
82739 +    struct elan3_ptbl   *ptbl_parent;                          /* Parent page table, or next on freelist */
82740 +    struct elan3mmu     *ptbl_elan3mmu;                        /* elan3mmu we're allocated for */
82741 +    E3_Addr              ptbl_base;                            /* Virtual address we're mapping */
82742 +    u_char               ptbl_index;                           /* Index in ptbl group */
82743 +    u_char               ptbl_valid;                           /* Number of valid entries */
82744 +    u_char               ptbl_flags;                           /* Flags, defined below. */
82745 +    u_char               ptbl_spare;
82746 +} ELAN3_PTBL;
82747 +
82748 +#define ptbl_next      ptbl_parent                             /* Parent pointer is next pointer when on free list */
82749 +
82750 +#define PTBL_LEVEL_X            0x00
82751 +#define PTBL_LEVEL_1           0x01
82752 +#define PTBL_LEVEL_2           0x02
82753 +#define PTBL_LEVEL_3           0x03
82754 +#define PTBL_LEVEL_MASK                0x03
82755 +#define PTBL_LOCKED            0x04                            /* Page table is locked,  protects all fields */
82756 +#define PTBL_KEEP              0x08                            /* This ptbl is not to be stolen */
82757 +#define PTBL_ALLOCED           0x10                            /* This ptbl has been allocated, and is not free */
82758 +#define PTBL_GROUPED           0x20                            /* This ptbl is a member of a group of ptbls */
82759 +#define PTBL_KERNEL            0x80                            /* This ptbl is allocated for the kernel */
82760 +
82761 +#define PTBL_LEVEL(flags)      ((flags) & PTBL_LEVEL_MASK)
82762 +#define PTBL_IS_LOCKED(flags)  (((flags) & (PTBL_LOCKED|PTBL_ALLOCED)) == (PTBL_LOCKED|PTBL_ALLOCED))
82763 +
82764 +#if ELAN3_PAGE_SHIFT == 13
82765 +#  define PTBL_GROUP_SIZE      8192                            /* page table groups are 8k bytes */
82766 +#  define PTBLS_PER_GROUP_L1   8                               /* Number of level 1 tables in a group */
82767 +#  define PTBLS_PER_GROUP_L2   32                              /*   ... level 2 */
82768 +#  define PTBLS_PER_GROUP_L3   32                              /*   ... level 3 */
82769 +#  define PTBLS_PER_GROUP_LX   32                              /*   ... level X */
82770 +#  define PTBLS_PER_GROUP_MAX  32                              /*  max of l1,l2,l3,lX */
82771 +#else
82772 +#  define PTBL_GROUP_SIZE      4096                            /* page table groups are 4k bytes */
82773 +#  define PTBLS_PER_GROUP_L1   4                               /* Number of level 1 tables in a group */
82774 +#  define PTBLS_PER_GROUP_L2   16                              /*   ... level 2 */
82775 +#  define PTBLS_PER_GROUP_L3   8                               /*   ... level 3 */
82776 +#  define PTBLS_PER_GROUP_LX   16                              /*   ... level X */
82777 +#  define PTBLS_PER_GROUP_MAX  16                              /*  max of l1,l2,l3,lX */
82778 +#endif
82779 +
82780 +#define HMES_PER_GROUP         (PTBLS_PER_GROUP_L3*ELAN3_L3_ENTRIES)
82781 +
82782 +#if ELAN3_PAGE_SHIFT == 13
82783 +#  define PTBLS_PER_PTBL_L1    4                               /* 256 PTPs */
82784 +#  define PTBLS_PER_PTBL_L2    1                               /* 64 PTPs */
82785 +#  define PTBLS_PER_PTBL_L3    1                               /* 32 PTEs */
82786 +#else
82787 +#  define PTBLS_PER_PTBL_L1    4                               /* 256 PTPs */
82788 +#  define PTBLS_PER_PTBL_L2    1                               /* 64 PTPs */
82789 +#  define PTBLS_PER_PTBL_L3    2                               /* 64 PTEs */
82790 +#endif
82791 +
82792 +#define ELAN3_LX_ENTRIES     (32) 
82793 +#define PTBLS_PER_PTBL_LX   (1)        
82794 +
82795 +#define L1_VA_PER_PTBL (ELAN3_L1_SIZE*(ELAN3_L1_ENTRIES/PTBLS_PER_PTBL_L1))    /* 4 ptbl for L1 */
82796 +#define L2_VA_PER_PTBL (ELAN3_L2_SIZE*(ELAN3_L2_ENTRIES/PTBLS_PER_PTBL_L2))    /* 1 ptbl for L2 */
82797 +#define L3_VA_PER_PTBL (ELAN3_L3_SIZE*(ELAN3_L3_ENTRIES/PTBLS_PER_PTBL_L3))    /* 1 ptbl for L3 */
82798 +
82799 +typedef struct elan3_ptbl_gr
82800 +{
82801 +    struct elan3_ptbl_gr *pg_next;                             /* Next in list. */
82802 +    int                         pg_level;                              /* Level PG allocated for */
82803 +    sdramaddr_t                 pg_addr;                               /* sdram offset of ptes/ptps */    
82804 +    ELAN3_PTBL          pg_ptbls[PTBLS_PER_GROUP_MAX];         /* The actual page tables */
82805 +} ELAN3_PTBL_GR;
82806 +
82807 +
82808 +/*
82809 + * The elan3mmu structure is the mmu dependant hardware address translation
82810 + * structure linked to the address space structure to show the translatioms
82811 + * provided by the elan for an address sapce.
82812 + *
82813 + * We also have a doubly linked list of 'regions' which allow the 
82814 + * elan3mmu code to determine the access permissions for the elan 
82815 + * dependant on the virtual address that the translation is being
82816 + * loaded at.
82817 + */
82818 +
82819 +typedef struct elan3mmu_rgn
82820 +{
82821 +    struct elan3mmu_rgn *rgn_mnext;                            /* Doubly linked list of regions */
82822 +    struct elan3mmu_rgn *rgn_mprev;                            /*   sorted on main address */ 
82823 +    caddr_t             rgn_mbase;                             /* main address of base of region */
82824 +
82825 +    struct elan3mmu_rgn *rgn_enext;                            /* Doubly linked list of regions */
82826 +    struct elan3mmu_rgn *rgn_eprev;                            /*   sorted on elan address */
82827 +    E3_Addr             rgn_ebase;                             /* elan address of base of region */
82828 +
82829 +    u_int               rgn_len;                               /* length of region */
82830 +    u_int               rgn_perm;                              /* elan access permission */
82831 +} ELAN3MMU_RGN;
82832 +
82833 +typedef struct elan3mmu
82834 +{
82835 +    spinlock_t             elan3mmu_lock;                      /* spinlock lock for regions */
82836 +    ELAN3MMU_RGN           *elan3mmu_mrgns;                    /* Doubly linked list of memory regions */
82837 +    ELAN3MMU_RGN          *elan3mmu_mtail;                     /* Last memory region on list */
82838 +    ELAN3MMU_RGN          *elan3mmu_mrgnlast;                  /* Last region 'hit' */
82839 +
82840 +    ELAN3MMU_RGN           *elan3mmu_ergns;                    /* Doubly linked list of memory regions */
82841 +    ELAN3MMU_RGN          *elan3mmu_etail;                     /* Last memory region on list */
82842 +    ELAN3MMU_RGN          *elan3mmu_ergnlast;                  /* Last region 'hit' */
82843 +
82844 +    struct elan3_dev        *elan3mmu_dev;                     /* Elan device we're using. */
82845 +    struct elan3_ctxt     *elan3mmu_ctxt;                      /* Elan ctxt we're associated with */
82846 +
82847 +    sdramaddr_t             elan3mmu_ctp;                      /* Context table entry for our context */
82848 +    ELAN3_PTBL            *elan3mmu_l1ptbl;                    /* Level 1 Page table (first of 4) */
82849 +
82850 +    spinlock_t             elan3mmu_lXptbl_lock;               /* spinlock for level X table list */
82851 +    ELAN3_PTBL              *elan3mmu_lXptbl;                    /* Level X Page table list         */
82852 +
82853 +#ifdef LINUX
82854 +    struct mm_struct       *elan3mmu_coproc_mm;                        /* Linux mm we're mapping */
82855 +#endif
82856 +} ELAN3MMU;
82857 +
82858 +_NOTE(LOCK_ORDER(elan3mmu::elan3mmu_lock elan3_dev::IntrLock))
82859 +
82860 +_NOTE(MUTEX_PROTECTS_DATA(elan3mmu::elan3mmu_lock,
82861 +                         elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail
82862 +                         elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail))
82863 +/* protected by dev->IntrLock for read by device driver */
82864 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_mrgns elan3mmu::elan3mmu_mtail
82865 +                                elan3mmu::elan3mmu_ergns elan3mmu::elan3mmu_etail))
82866 +
82867 +_NOTE(SCHEME_PROTECTS_DATA("only set to valid region", 
82868 +                          elan3mmu::elan3mmu_ergnlast elan3mmu::elan3mmu_mrgnlast))
82869 +
82870 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock,
82871 +                         elan3mmu::elan3mmu_l1ptbl 
82872 +                         elan3mmu::elan3mmu_ctp 
82873 +                         elan3mmu::elan3mmu_dev))
82874 +
82875 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3mmu::elan3mmu_l1ptbl
82876 +                                elan3mmu::elan3mmu_ctp 
82877 +                                elan3mmu::elan3mmu_dev))
82878 +
82879 +/*
82880 + * Macros for accessing ptes/ptbls/ptbl_grs
82881 + */
82882 +
82883 +#define OFFSETOF(object,member)        /* calculate offset of structure member */ \
82884 +       ((size_t) (&(((object *)0)->member)))
82885 +#define PTBL_TO_GR(ptbl)       /* convert ptbl to ptbl group */ \
82886 +       ((ELAN3_PTBL_GR *) ((caddr_t) ((ptbl) - (ptbl)->ptbl_index) - OFFSETOF(ELAN3_PTBL_GR,pg_ptbls[0])))
82887 +#define PTBL_TO_PTADDR(ptbl)   /* convert ptbl to a ptp pointing at it */ \
82888 +        (PTBL_TO_GR(ptbl)->pg_addr + ((ptbl)->ptbl_index<<ELAN3_PT_SHIFT))
82889 +#define PTE_TO_HME(ptbl,pte)   /* convert pte to corresponding hme */ \
82890 +        (PTBL_TO_GR(ptbl)->pg_hmes + ((pte) - (ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr))
82891 +#define HME_TO_PTE(ptebl,hme)  /* convert hme to corresponding pte */ \
82892 +        ((ELAN3_PTE *) PTBL_TO_GR(ptbl)->pg_vaddr + ((hme) - (PTBL_TO_GR(ptbl)->pg_hmes)))
82893 +
82894 +
82895 +/* Flags for lock_ptbl */
82896 +#define LK_PTBL_NOWAIT         0x1
82897 +#define LK_PTBL_FAILOK         0x2
82898 +
82899 +/* Return values for lock_ptbl */
82900 +#define LK_PTBL_OK             0x0
82901 +#define LK_PTBL_MISMATCH       0x1
82902 +#define LK_PTBL_FAILED         0x2
82903 +
82904 +/* Flags for elan3mmu_ptesync */
82905 +#define        NO_MLIST_LOCK   0
82906 +#define        MLIST_LOCKED    1
82907 +
82908 +/* Flags for elan3mmu_pteload */
82909 +#define PTE_LOAD               0x00
82910 +#define PTE_LOAD_LOCK          0x01                            /* translation should be locked */
82911 +#define PTE_LOAD_NOSYNC                0x02                            /* ref/mod bits should not be sync'ed to page */
82912 +#define PTE_NO_SLEEP            0x04                            /* true if we cant sleep */
82913 +#define PTE_NO_STEAL           0x08                            /* true if we don't want to steal ptbls */
82914 +
82915 +#define PTE_LOAD_ENDIAN_MASK   0x10                            /* mask for endian-ness */
82916 +#define PTE_LOAD_LITTLE_ENDIAN 0x00                            /* translation is to little-endian memory */
82917 +#define PTE_LOAD_BIG_ENDIAN    0x10                            /* translation is to big-endian memory */
82918 +
82919 +
82920 +/* Flags for elan3mmu_unload */
82921 +#define PTE_UNLOAD             0x00
82922 +#define PTE_UNLOAD_UNLOCK      0x01
82923 +#define PTE_UNLOAD_NOFLUSH     0x02
82924 +#define PTE_UNLOAD_NOSYNC      0x04
82925 +
82926 +extern int         elan3mmu_debug;
82927 +#ifdef DEBUG_PRINTF
82928 +#  define HAT_PRINTF0(n,msg)            ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg)             : (void) 0)
82929 +#  define HAT_PRINTF1(n,msg,a)          ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a)           : (void) 0)
82930 +#  define HAT_PRINTF2(n,msg,a,b)        ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b)         : (void) 0)
82931 +#  define HAT_PRINTF3(n,msg,a,b,c)      ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c)       : (void) 0)
82932 +#  define HAT_PRINTF4(n,msg,a,b,c,d)    ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d)     : (void) 0)
82933 +#  define HAT_PRINTF5(n,msg,a,b,c,d,e)  ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e)   : (void) 0)
82934 +#  define HAT_PRINTF6(n,msg,a,b,c,d,e,f) ((elan3mmu_debug & n) ? (void) elan3_debugf (NULL, DBG_HAT, msg,a,b,c,d,e,f) : (void) 0)
82935 +#  ifdef LINUX
82936 +#    define HAT_PRINTF(n,args...)        ((elan3mmu_debug & n) ? (void) elan3_debugf(NULL, DBG_HAT, ##args) : (void) 0)
82937 +#  endif
82938 +#else
82939 +#  define HAT_PRINTF0(n,msg)
82940 +#  define HAT_PRINTF1(n,msg,a)
82941 +#  define HAT_PRINTF2(n,msg,a,b)
82942 +#  define HAT_PRINTF3(n,msg,a,b,c)
82943 +#  define HAT_PRINTF4(n,msg,a,b,c,d)
82944 +#  define HAT_PRINTF5(n,msg,a,b,c,d,e)
82945 +#  define HAT_PRINTF6(n,msg,a,b,c,d,e,f)
82946 +#  ifdef LINUX
82947 +#    define HAT_PRINTF(n,args...)
82948 +#  endif
82949 +#endif
82950 +
82951 +/* elan3mmu_generic.c */
82952 +extern ELAN3MMU_GLOBAL_STATS elan3mmu_global_stats;
82953 +
82954 +extern void         elan3mmu_init (void);
82955 +extern void         elan3mmu_fini (void);
82956 +
82957 +extern ELAN3MMU           *elan3mmu_alloc (struct elan3_ctxt *ctxt);
82958 +extern void        elan3mmu_free (ELAN3MMU *elan3mmu);
82959 +
82960 +extern void          elan3mmu_set_context_filter (ELAN3_DEV *dev, int ctx, int disabled, E3_uint32 Pend, E3_uint32 *Maskp);
82961 +extern int          elan3mmu_attach (ELAN3_DEV *dev, int ctx, ELAN3MMU *elan3mmu, sdramaddr_t routeTable, E3_uint32 routeMask);
82962 +extern void         elan3mmu_detach (ELAN3_DEV *dev, int ctx);
82963 +
82964 +extern ELAN3MMU_RGN *elan3mmu_findrgn_elan (ELAN3MMU *elan3mmu, E3_Addr addr, int tail);
82965 +extern int           elan3mmu_addrgn_elan (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn);
82966 +extern ELAN3MMU_RGN *elan3mmu_removergn_elan (ELAN3MMU *elan3mmu, E3_Addr addr);
82967 +extern ELAN3MMU_RGN *elan3mmu_rgnat_elan (ELAN3MMU *elan3mmu, E3_Addr addr);
82968 +extern ELAN3MMU_RGN *elan3mmu_findrgn_main (ELAN3MMU *elan3mmu, caddr_t addr, int tail);
82969 +extern int           elan3mmu_addrgn_main (ELAN3MMU *elan3mmu, ELAN3MMU_RGN *nrgn);
82970 +extern ELAN3MMU_RGN *elan3mmu_removergn_main (ELAN3MMU *elan3mmu, caddr_t addr);
82971 +extern ELAN3MMU_RGN *elan3mmu_rgnat_main (ELAN3MMU *elan3mmu, caddr_t addr);
82972 +
82973 +extern int          elan3mmu_setperm (ELAN3MMU *elan3mmu, caddr_t maddr, E3_Addr eaddr, u_int len, u_int perm);
82974 +extern void         elan3mmu_clrperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len);
82975 +extern int          elan3mmu_checkperm (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int access);
82976 +extern caddr_t      elan3mmu_mainaddr (ELAN3MMU *elan3mmu, E3_Addr addr);
82977 +extern E3_Addr      elan3mmu_elanaddr (ELAN3MMU *elan3mmu, caddr_t addr);
82978 +
82979 +extern void          elan3mmu_expand (ELAN3MMU *elan3mmu, E3_Addr addr, int len, int level, int attr);
82980 +extern void          elan3mmu_reserve (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *);
82981 +extern void          elan3mmu_release (ELAN3MMU *elan3mmu, E3_Addr addr, u_int npages, sdramaddr_t *);
82982 +
82983 +extern void          elan3mmu_pteload (ELAN3MMU *elan3mmu, int level, E3_Addr addr, physaddr_t paddr, int perm, int attr);
82984 +extern void         elan3mmu_unload (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, int flags);
82985 +extern void         elan3mmu_sync (ELAN3MMU *elan3mmu, E3_Addr addr, u_int len, u_int clearflag);
82986 +extern void         elan3mmu_pteunload (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock);
82987 +extern void         elan3mmu_ptesync (ELAN3_PTBL *ptbl, sdramaddr_t pte, int flags, int got_mlist_lock);
82988 +extern sdramaddr_t   elan3mmu_ptp2pte (ELAN3MMU *elan3mmu, sdramaddr_t ptp, int level);
82989 +extern sdramaddr_t   elan3mmu_ptefind (ELAN3MMU *elan3mmu, E3_Addr, int *level, ELAN3_PTBL **pptbl, spinlock_t **plock, unsigned long *flags);
82990 +extern sdramaddr_t   elan3mmu_ptealloc (ELAN3MMU *elan3mmu, E3_Addr, int level, ELAN3_PTBL **pptbl, spinlock_t **plock, int attr, unsigned long *flags);
82991 +extern void         elan3mmu_l1inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l1ptbl, int flags);
82992 +extern int           elan3mmu_l2inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l2ptbl, int flags, E3_Addr addr, spinlock_t **pl2lock, unsigned long *lock_flags);
82993 +extern int           elan3mmu_l3inval (ELAN3MMU *elan3mmu, ELAN3_PTBL *l3ptbl, int flags, E3_Addr addr, spinlock_t **pl3lock, unsigned long *lock_flags);
82994 +
82995 +extern void          elan3mmu_free_l1ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
82996 +extern void          elan3mmu_free_l2ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
82997 +extern void          elan3mmu_free_l3ptbl (ELAN3_DEV *dev, ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
82998 +
82999 +extern int          elan3mmu_lock_this_ptbl (ELAN3_PTBL *ptbl, int flag, spinlock_t **plock, unsigned long *flags);
83000 +extern int           elan3mmu_lock_ptbl (ELAN3_PTBL *ptbl, u_int flag, ELAN3MMU *elan3mmu, E3_Addr va, int level, spinlock_t **plock, unsigned long *flags);
83001 +extern void         elan3mmu_unlock_ptbl (ELAN3_PTBL *ptbl, spinlock_t *lock, unsigned long flags);
83002 +
83003 +/* elan3mmu_osdep.c */
83004 +extern void         elan3mmu_init_osdep (void);
83005 +extern void         elan3mmu_fini_osdep (void);
83006 +extern void         elan3mmu_alloc_osdep (ELAN3MMU *elan3mmu);
83007 +extern void         elan3mmu_free_osdep (ELAN3MMU *elan3mmu);
83008 +extern ELAN3_PTE     elan3mmu_phys_to_pte (ELAN3_DEV *dev, physaddr_t paddr, int perm);
83009 +extern ELAN3_PTE     elan3mmu_kernel_invalid_pte (ELAN3MMU *elan3mmu);
83010 +
83011 +#if defined (DIGITAL_UNIX)
83012 +#  include <elan3/elan3mmu_dunix.h>
83013 +#elif defined (LINUX)
83014 +#  include <elan3/elan3mmu_linux.h>
83015 +#endif
83016 +
83017 +#endif /* __KERNEL__ */
83018 +
83019 +#ifdef __cplusplus
83020 +}
83021 +#endif
83022 +
83023 +#endif /* __ELAN3_ELAN3MMU_H */
83024 +
83025 +/*
83026 + * Local variables:
83027 + * c-file-style: "stroustrup"
83028 + * End:
83029 + */
83030 diff -urN clean/include/elan3/elan3mmu_linux.h linux-2.6.9/include/elan3/elan3mmu_linux.h
83031 --- clean/include/elan3/elan3mmu_linux.h        1969-12-31 19:00:00.000000000 -0500
83032 +++ linux-2.6.9/include/elan3/elan3mmu_linux.h  2003-09-24 09:57:24.000000000 -0400
83033 @@ -0,0 +1,39 @@
83034 +/*
83035 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83036 + *
83037 + *    For licensing information please see the supplied COPYING file
83038 + *
83039 + */
83040 +
83041 +#ifndef __ELAN3_MMU_LINUX_H
83042 +#define __ELAN3_MMU_LINUX_H
83043 +
83044 +#ident "$Id: elan3mmu_linux.h,v 1.12 2003/09/24 13:57:24 david Exp $"
83045 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3mmu_linux.h,v $*/
83046 +
83047 +/* XXX copy of elan3mmu_dunix.h */
83048 +
83049 +#define ALLOC_ELAN3MMU(ptr,cansleep)           KMEM_ALLOC(ptr, ELAN3MMU *, sizeof (ELAN3MMU), cansleep)
83050 +#define ALLOC_PTBL_GR(ptr,cansleep)            KMEM_ALLOC(ptr, ELAN3_PTBL_GR *, sizeof (ELAN3_PTBL_GR), cansleep)
83051 +#define ALLOC_ELAN3MMU_RGN(ptr,cansleep)       KMEM_ALLOC(ptr, ELAN3MMU_RGN *, sizeof (ELAN3MMU_RGN), cansleep)
83052 +#define ALLOC_HMENTS(ptr,cansleep)             KMEM_ALLOC((ptr,ELAN3_HMENT *, sizeof (ELAN3_HMENT), cansleep)
83053 +
83054 +#define FREE_ELAN3MMU(ptr)                     KMEM_FREE(ptr,sizeof (ELAN3MMU))
83055 +#define FREE_PTBL_GR(ptr)                      KMEM_FREE(ptr,sizeof (ELAN3_PTBL_GR))
83056 +#define FREE_ELAN3MMU_RGN(ptr)                 KMEM_FREE(ptr,sizeof (ELAN3MMU_RGN))
83057 +#define FREE_HMENTS(ptr)                       KMEM_FREE(ptr,sizeof (ELAN3_HMENT))
83058 +
83059 +extern void         elan3mmu_init_osdep(void);
83060 +extern void         elan3mmu_fini_osdep(void);
83061 +
83062 +extern void          elan3mmu_pte_range_unload (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len);
83063 +extern void          elan3mmu_pte_range_update (ELAN3MMU *elan3mmu, struct mm_struct *mm, caddr_t addr, unsigned long len);
83064 +extern void          elan3mmu_pte_ctxt_unload(ELAN3MMU *elan3mmu);
83065 +
83066 +#endif
83067 +
83068 +/*
83069 + * Local variables:
83070 + * c-file-style: "stroustrup"
83071 + * End:
83072 + */
83073 diff -urN clean/include/elan3/elan3ops.h linux-2.6.9/include/elan3/elan3ops.h
83074 --- clean/include/elan3/elan3ops.h      1969-12-31 19:00:00.000000000 -0500
83075 +++ linux-2.6.9/include/elan3/elan3ops.h        2003-09-24 09:57:24.000000000 -0400
83076 @@ -0,0 +1,42 @@
83077 +/*
83078 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83079 + *
83080 + *    For licensing information please see the supplied COPYING file
83081 + *
83082 + */
83083 +
83084 +/* $Id: elan3ops.h,v 1.3 2003/09/24 13:57:24 david Exp $ */
83085 +/* $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elan3ops.h,v $ */
83086 +
83087 +#ifndef _ELAN3_OPS_H
83088 +#define _ELAN3_OPS_H
83089 +
83090 +int get_position          (void *arg, ELAN_POSITION *position);
83091 +int set_position          (void *arg, unsigned short nodeId, unsigned short numNodes);
83092 +
83093 +int elan3mod_create_cap   (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
83094 +int elan3mod_destroy_cap  (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap);
83095 +
83096 +int elan3mod_create_vp    (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
83097 +int elan3mod_destroy_vp   (void *arg, ELAN_CAP_OWNER owner, ELAN_CAPABILITY *cap, ELAN_CAPABILITY *map);
83098 +
83099 +int elan3mod_attach_cap   (void *arg_ctxt, ELAN_CAPABILITY *cap);
83100 +int elan3mod_detach_cap   (void *arg_ctxt);
83101 +
83102 +extern ELAN_DEV_OPS elan3_dev_ops;
83103 +
83104 +int stats_get_index_name  (void *arg, uint index, caddr_t name);
83105 +int stats_get_block       (void *arg, uint entries, ulong *value);
83106 +int stats_clear_block     (void *arg);
83107 +
83108 +int elan3_register_dev_stats   (ELAN3_DEV * dev);
83109 +void elan3_deregister_dev_stats (ELAN3_DEV * dev);
83110 +
83111 +
83112 +#endif /* __ELAN3_OPS_H */
83113 +
83114 +/*
83115 + * Local variables:
83116 + * c-file-style: "linux"
83117 + * End:
83118 + */
83119 diff -urN clean/include/elan3/elanctxt.h linux-2.6.9/include/elan3/elanctxt.h
83120 --- clean/include/elan3/elanctxt.h      1969-12-31 19:00:00.000000000 -0500
83121 +++ linux-2.6.9/include/elan3/elanctxt.h        2003-09-24 09:57:24.000000000 -0400
83122 @@ -0,0 +1,856 @@
83123 +/*
83124 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83125 + *
83126 + *    For licensing information please see the supplied COPYING file
83127 + *
83128 + */
83129 +
83130 +#ifndef _ELAN3_ELANCTXT_H
83131 +#define _ELAN3_ELANCTXT_H
83132 +
83133 +#ident "$Id: elanctxt.h,v 1.81 2003/09/24 13:57:24 david Exp $"
83134 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanctxt.h,v $*/
83135 +
83136 +#ifdef __cplusplus
83137 +extern "C" {
83138 +#endif
83139 +
83140 +#include <elan3/elanregs.h>
83141 +#include <elan3/vmseg.h>
83142 +
83143 +#define BumpUserStat(ctxt, stat)       ((ctxt)->FlagPage->stat++)
83144 +
83145 +#if defined(__LITTLE_ENDIAN__)
83146 +
83147 +typedef union _CProcTrapBuf
83148 +{
83149 +    E3_uint64 Align64;
83150 +    struct
83151 +    {
83152 +       E3_uint32 Areg;
83153 +       E3_uint32 Breg;
83154 +    } r;
83155 +    struct
83156 +    {
83157 +       E3_uint32 Addr;
83158 +       E3_uint32 ContextType;
83159 +    } s;
83160 +} CProcTrapBuf_BE;
83161 +
83162 +typedef E3_EventInt        E3_EventInt_BE;
83163 +typedef E3_IprocTrapHeader E3_IprocTrapHeader_BE;
83164 +typedef E3_IprocTrapData   E3_IprocTrapData_BE;
83165 +typedef E3_FaultSave      E3_FaultSave_BE;
83166 +
83167 +typedef union
83168 +{
83169 +    E3_uint64  Align64;
83170 +    E3_DMA      s;
83171 +} E3_DMA_BE;
83172 +
83173 +typedef E3_ThreadQueue     E3_ThreadQueue_BE;
83174 +
83175 +#else
83176 +
83177 +/* "Big-Endian" data structures copied by 64 bit loads, these are 32 bit word flipped */
83178 +/* from the corresponding data structure. */
83179 +
83180 +typedef union _CProcTrapBuf
83181 +{
83182 +    E3_uint64 Align64;
83183 +    struct
83184 +    {
83185 +       E3_uint32 Breg;
83186 +       E3_uint32 Areg;
83187 +    } r;
83188 +    struct
83189 +    {
83190 +       E3_uint32 ContextType;
83191 +       E3_uint32 Addr;
83192 +    } s;
83193 +} CProcTrapBuf_BE;
83194 +
83195 +typedef union _E3_EventInt_BE
83196 +{
83197 +    E3_uint64    Align64;
83198 +    struct {
83199 +       E3_uint32 EventContext; /* Bits 16 to 28 */
83200 +       E3_uint32 IntCookie;
83201 +    } s;
83202 +} E3_EventInt_BE;
83203 +
83204 +typedef union _E3_IprocTrapHeader_BE
83205 +{
83206 +   E3_uint64            Align64;
83207 +
83208 +   struct
83209 +   {
83210 +      E3_uint32                 TrAddr;
83211 +      E3_TrTypeCntx     TrTypeCntx;
83212 +      union
83213 +      {
83214 +        E3_IProcStatus_Reg u_IProcStatus;
83215 +        E3_uint32          u_TrData1;
83216 +      } ipsotd;
83217 +      E3_uint32                 TrData0;
83218 +   } s;
83219 +} E3_IprocTrapHeader_BE;
83220 +
83221 +typedef E3_IprocTrapData E3_IprocTrapData_BE;
83222 +
83223 +typedef union _E3_FaultSave_be
83224 +{
83225 +    E3_uint64                  Align64;
83226 +    struct {
83227 +       volatile E3_uint32      FaultContext;
83228 +       E3_FaultStatusReg       FSR;
83229 +       volatile E3_uint32      EventAddress;
83230 +       volatile E3_uint32      FaultAddress;
83231 +    } s;
83232 +} E3_FaultSave_BE;
83233 +
83234 +typedef union _e3_dma_be
83235 +{
83236 +    E3_uint64          Align64;
83237 +    struct {
83238 +       E3_uint32       dma_size;
83239 +       E3_DmaType      dma_u;
83240 +       E3_Addr         dma_dest;
83241 +       E3_Addr         dma_source;
83242 +       E3_CookieVProc  dma_destCookieProc;
83243 +       E3_Addr         dma_destEvent;
83244 +       E3_CookieVProc  dma_srcCookieProc;
83245 +       E3_Addr         dma_srcEvent;
83246 +    } s;
83247 +} E3_DMA_BE;
83248 +
83249 +typedef union _E3_ThreadQueue_BE
83250 +{
83251 +   E3_uint64   Align64;
83252 +   struct
83253 +   {
83254 +       /* copied by 64 bit copy from elan to main */
83255 +       E3_uint32 :3;           /* Bits 29 to 31 */
83256 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
83257 +       E3_uint32 :16;          /* Bits 0  to 15 */
83258 +       E3_Addr  Thread;        /* Bits 32 to 63 */
83259 +   } s;
83260 +} E3_ThreadQueue_BE;
83261 +
83262 +#endif /* defined(LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__) */
83263 +
83264 +typedef struct neterr_msg
83265 +{
83266 +    E3_uint32          Rail;                                   /* Rail error received on */
83267 +    ELAN_CAPABILITY    SrcCapability;                          /* Capability of source of packet */
83268 +    ELAN_CAPABILITY    DstCapability;                          /* Capability of dest of packet */
83269 +
83270 +    E3_uint32          DstProcess;                             /* Virtual Process of dest of packet */
83271 +    E3_Addr            CookieAddr;                             /* Cookie Address (or NULL for DMA) */
83272 +    E3_uint32          CookieVProc;                            /* Cookie and VP (identifies DMA) */
83273 +    E3_uint32          NextCookie;                             /* Next Cookie value (for thread) */
83274 +    E3_uint32          WaitForEop;                             /* Wait for EOP transaction */
83275 +} NETERR_MSG;
83276 +
83277 +#ifdef __KERNEL__
83278 +
83279 +/*
83280 + * Associated with each input channel can be a network error
83281 + * resolver structure, which can be queued on the network 
83282 + * error resolver threads to perform RPCs to the other kernels
83283 + * when a network error occurs with an identify transaction
83284 + * included
83285 + */
83286 +typedef struct neterr_resolver
83287 +{
83288 +    struct neterr_resolver *Next;
83289 +
83290 +    spinlock_t             Lock;
83291 +
83292 +    struct elan3_ctxt       *Ctxt;
83293 +    ELAN_LOCATION          Location;
83294 +
83295 +    int                            Completed;
83296 +    int                            Status;
83297 +    long                   Timestamp;
83298 +
83299 +    NETERR_MSG             Message;
83300 +} NETERR_RESOLVER;
83301 +
83302 +
83303 +typedef struct neterr_fixup
83304 +{
83305 +    struct neterr_fixup           *Next;
83306 +
83307 +    kcondvar_t             Wait;
83308 +    int                            Completed;
83309 +    int                            Status;
83310 +
83311 +    NETERR_MSG             Message;
83312 +} NETERR_FIXUP;
83313 +
83314 +#endif /* __KERNEL__ */
83315 +
83316 +/* Each of the following structures must be padded to a whole */
83317 +/* number of 64 bit words since the kernel uses 64 bit load/stores */
83318 +/* to transfer the elan register state. */
83319 +typedef struct command_trap
83320 +{
83321 +    E3_Status_Reg      Status;                                 /* 4  bytes */
83322 +    E3_uint32          Pad;                                    /* 4  bytes */
83323 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
83324 +    CProcTrapBuf_BE            TrapBuf;                                /* 8  bytes */
83325 +} COMMAND_TRAP;
83326 +
83327 +typedef struct thread_trap
83328 +{
83329 +    E3_uint32          Registers[32];                          /* 128 bytes */
83330 +#define REG_GLOBALS    0
83331 +#define REG_OUTS       8
83332 +#define REG_LOCALS     16
83333 +#define REG_INS                24
83334 +
83335 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
83336 +    E3_FaultSave_BE     DataFaultSave;                         /* 16 bytes */
83337 +    E3_FaultSave_BE     InstFaultSave;                         /* 16 bytes */
83338 +    E3_FaultSave_BE     OpenFaultSave;                         /* 16 bytes */
83339 +    
83340 +    E3_Status_Reg      Status;                                 /* 4 bytes */
83341 +
83342 +    E3_Addr            pc;                                     /* 4 bytes */
83343 +    E3_Addr            npc;                                    /* 4 bytes */
83344 +    E3_Addr            StartPC;                                /* 4 bytes */
83345 +    E3_Addr            sp;                                     /* 4 bytes */
83346 +    E3_uint32          mi;                                     /* 4 bytes */
83347 +    E3_TrapBits                TrapBits;                               /* 4 bytes */
83348 +    E3_DirtyBits       DirtyBits;                              /* 4 bytes */
83349 +} THREAD_TRAP;
83350 +
83351 +typedef struct dma_trap
83352 +{
83353 +    E3_DMA_BE          Desc;                                   /* 32 bytes */
83354 +    E3_FaultSave_BE    FaultSave;                              /* 16 bytes */
83355 +    E3_FaultSave_BE    Data0;                                  /* 16 bytes */
83356 +    E3_FaultSave_BE    Data1;                                  /* 16 bytes */
83357 +    E3_FaultSave_BE    Data2;                                  /* 16 bytes */
83358 +    E3_FaultSave_BE    Data3;                                  /* 16 bytes */
83359 +    E3_Status_Reg      Status;                                 /* 4 bytes */
83360 +    E3_DmaInfo         PacketInfo;                             /* 4 bytes */
83361 +} DMA_TRAP;
83362 +
83363 +typedef struct input_trap
83364 +{
83365 +    E3_uint32             State;                               /* 4 bytes */
83366 +    E3_Status_Reg         Status;                              /* 4 bytes */
83367 +    E3_FaultSave_BE       FaultSave;                           /* 16 bytes */
83368 +    
83369 +    u_int                 NumTransactions;                     /* 4 bytes */
83370 +    u_int                 Overflow;                            /* 4 bytes */
83371 +    u_int                 AckSent;                             /* 4 bytes */
83372 +    u_int                 BadTransaction;                      /* 4 bytes */
83373 +
83374 +    E3_IprocTrapHeader_BE *TrappedTransaction;                 /* 4 bytes */
83375 +    E3_IprocTrapData_BE   *TrappedDataBuffer;                  /* 4 bytes */
83376 +    E3_IprocTrapHeader_BE *WaitForEopTransaction;              /* 4 bytes */
83377 +    E3_IprocTrapData_BE   *WaitForEopDataBuffer;               /* 4 bytes */
83378 +    E3_IprocTrapHeader_BE *DmaIdentifyTransaction;             /* 4 bytes */
83379 +    E3_IprocTrapHeader_BE *ThreadIdentifyTransaction;          /* 4 bytes */
83380 +    E3_Addr               LockQueuePointer;                    /* 4 bytes */
83381 +    E3_Addr               UnlockQueuePointer;                  /* 4 bytes */
83382 +
83383 +    E3_IprocTrapHeader_BE  Transactions[MAX_TRAPPED_TRANS];    /* n * 8 bytes */
83384 +    E3_IprocTrapData_BE           DataBuffers[MAX_TRAPPED_TRANS];      /* n * 64 bytes */
83385 +} INPUT_TRAP;
83386 +
83387 +typedef struct input_fault_save
83388 +{
83389 +    struct input_fault_save *Next;
83390 +    E3_Addr                 Addr;
83391 +    E3_uint32               Count;
83392 +} INPUT_FAULT_SAVE;
83393 +
83394 +#define NUM_INPUT_FAULT_SAVE   32
83395 +#define MIN_INPUT_FAULT_PAGES  8
83396 +#define MAX_INPUT_FAULT_PAGES  128
83397 +
83398 +typedef E3_uint32 EVENT_COOKIE;
83399 +
83400 +#ifdef __KERNEL__
83401 +
83402 +typedef struct event_cookie_entry
83403 +{
83404 +    struct event_cookie_entry *ent_next;
83405 +    struct event_cookie_entry *ent_prev;
83406 +
83407 +    spinlock_t                ent_lock;
83408 +    unsigned                  ent_ref;
83409 +
83410 +    EVENT_COOKIE              ent_cookie;
83411 +    EVENT_COOKIE              ent_fired;
83412 +    kcondvar_t                ent_wait;
83413 +} EVENT_COOKIE_ENTRY;
83414 +
83415 +typedef struct event_cookie_table
83416 +{
83417 +    struct event_cookie_table *tbl_next;
83418 +    struct event_cookie_table *tbl_prev;
83419 +
83420 +    unsigned long              tbl_task;
83421 +    unsigned long              tbl_handle;
83422 +
83423 +    spinlock_t                tbl_lock;
83424 +    unsigned                  tbl_ref;
83425 +    EVENT_COOKIE_ENTRY        *tbl_entries;
83426 +} EVENT_COOKIE_TABLE;
83427 +
83428 +#define NBYTES_PER_SMALL_ROUTE 8
83429 +#define NBYTES_PER_LARGE_ROUTE 16
83430 +
83431 +#define ROUTE_BLOCK_SIZE       ELAN3_PAGE_SIZE
83432 +#define NROUTES_PER_BLOCK      (ROUTE_BLOCK_SIZE/NBYTES_PER_LARGE_ROUTE)
83433 +
83434 +typedef struct elan3_routes
83435 +{
83436 +    struct elan3_routes                *Next;                                  /* Can be chained together */
83437 +
83438 +    sdramaddr_t                         Routes;                                /* sdram offset of route entries */
83439 +    bitmap_t                    Bitmap[BT_BITOUL(NROUTES_PER_BLOCK)];  /* Bitmap of which entries are used */
83440 +} ELAN3_ROUTES; 
83441 +
83442 +
83443 +typedef struct elan3_route_table
83444 +{
83445 +    spinlock_t          Lock;                          /* Route lock */
83446 +    sdramaddr_t                 Table;                         /* Kernel address for route table */
83447 +    u_int               Size;                          /* # entries in route table */
83448 +
83449 +    ELAN3_ROUTES       *LargeRoutes;                   /* Large routes */
83450 +} ELAN3_ROUTE_TABLE;
83451 +
83452 +typedef struct elan3_vpseg
83453 +{
83454 +    struct elan3_vpseg         *Next;
83455 +    int                                 Process;                       /* Virtual process */
83456 +    int                                 Entries;                       /*  and # processes */
83457 +    int                                 Type;                          /* Type of cookie */
83458 +
83459 +    union
83460 +    {
83461 +       
83462 +       ELAN_CAPABILITY Capability;                     /* Capability of remote segment */
83463 +#  define SegCapability                SegUnion.Capability
83464 +       struct {
83465 +           u_short             LowProc;                        /* Base process number */
83466 +           u_short             HighProc;                       /*   and high process number */
83467 +#  define SegLowProc           SegUnion.BROADCAST.LowProc
83468 +#  define SegHighProc          SegUnion.BROADCAST.HighProc
83469 +       } BROADCAST;
83470 +    } SegUnion;
83471 +} ELAN3_VPSEG;
83472 +
83473 +#define ELAN3_VPSEG_UNINT      0                               /* Unitialised */
83474 +#define ELAN3_VPSEG_P2P                1                               /* Point to Point */
83475 +#define ELAN3_VPSEG_BROADCAST  2                               /* Broadcast */
83476 +
83477 +#define NUM_LISTS      7                                       /* Number of "swap" lists */
83478 +
83479 +typedef struct elan3_ctxt
83480 +{
83481 +    struct elan3_ctxt    *Next;                                        /* can be queued on a task */
83482 +    struct elan3_ctxt    *Prev;
83483 +
83484 +    CtxtHandle          Handle;                                /* user handle */
83485 +    int                         RefCnt;                                /* reference count */
83486 +
83487 +    ELAN3MMU           *Elan3mmu;                              /* elan3mmu allocated for Elan translations */
83488 +
83489 +    struct elan3_ops     *Operations;                          /* User supplied helper functions */
83490 +    void               *Private;                               /* Users private pointer */
83491 +
83492 +    int                         Status;                                /* Status (guarded by dev_mutex) */
83493 +    int                         OthersState;                           /* State of halt queueing for dma/thread */
83494 +    int                         LwpCount;                              /* Number of lwp's running */
83495 +
83496 +    ELAN3_DEV          *Device;                                /* Elan device */
83497 +
83498 +    ELAN_CAPABILITY     Capability;                            /* Capability I've attached as */
83499 +    ELAN_POSITION       Position;                              /* Position when I was created */
83500 +    
83501 +    ELAN3_VPSEG                *VpSegs;                                /* List of virtual process segments */
83502 +    ELAN3_ROUTE_TABLE    *RouteTable;
83503 +
83504 +    krwlock_t           VpLock;                                /* Reader/writer lock for vp list */
83505 +    kmutex_t            SwapListsLock;                         /* mutex to lock swap lists */
83506 +    kmutex_t            CmdLock;                               /* mutex to lock trapped dma command */
83507 +    kmutex_t            CmdPortLock;                           /* mutex to load/unload commandport xlation */
83508 +
83509 +    kcondvar_t          Wait;                                  /* Condition variable to sleep on */
83510 +    kcondvar_t          CommandPortWait;                       /* Condition variable to wait for commandport */
83511 +    kcondvar_t          LwpWait;                               /* Condition variable to wait for lwps to stop */
83512 +    kcondvar_t          HaltWait;                              /* Condition variable to wait for halt */
83513 +    int                         Halted;                                /*  and flag for halt cv */
83514 +
83515 +    caddr_t             CommandPageMapping;                    /* user virtual address for command page mapping */
83516 +    ioaddr_t             CommandPage;                          /* Elan command port mapping page */
83517 +    DeviceMappingHandle  CommandPageHandle;                    /* DDI Handle */
83518 +    ioaddr_t            CommandPort;                           /* Elan command port */
83519 +    void               *CommandPortItem;                       /* Item we're re-issuing to commandport */
83520 +
83521 +    ELAN3_FLAGSTATS      *FlagPage;                            /* Page visible to user process */
83522 +
83523 +    COMMAND_TRAP       *CommandTraps;                          /* Command port traps */
83524 +    ELAN3_SPLIT_QUEUE     CommandTrapQ;
83525 +                                                                  
83526 +    CProcTrapBuf_BE    *Commands;                              /* Overflowed commands */
83527 +    ELAN3_QUEUE           CommandQ;
83528 +
83529 +    THREAD_TRAP                *ThreadTraps;                           /* Thread processor traps */
83530 +    ELAN3_QUEUE                 ThreadTrapQ;
83531 +    
83532 +    DMA_TRAP           *DmaTraps;                              /* Dma processor tra[ed */
83533 +    ELAN3_QUEUE                 DmaTrapQ;
83534 +
83535 +    INPUT_TRAP          Input0Trap;                            /* Inputter channel 0 trap */
83536 +    INPUT_TRAP          Input1Trap;                            /* Inputter channel 1 trap */
83537 +    NETERR_RESOLVER    *Input0Resolver;                        /* Inputter channel 0 network error resolver */
83538 +    NETERR_RESOLVER    *Input1Resolver;                        /* Inputter channel 1 network error resolver */
83539 +
83540 +    INPUT_FAULT_SAVE    InputFaults[NUM_INPUT_FAULT_SAVE];     /* stored writeblock addresses */
83541 +    INPUT_FAULT_SAVE    *InputFaultList;                       /* organized in list for LRU */
83542 +    spinlock_t          InputFaultLock;                        /* and lock for list */
83543 +
83544 +    kmutex_t            NetworkErrorLock;
83545 +    NETERR_FIXUP       *NetworkErrorFixups;
83546 +
83547 +    EVENT_COOKIE        *EventCookies;                         /* Event cookies. */
83548 +    ELAN3_QUEUE                 EventCookieQ;
83549 +
83550 +    E3_Addr            *SwapThreads;                           /* Swapped Thread Queue */
83551 +    ELAN3_QUEUE                 SwapThreadQ;
83552 +
83553 +    E3_DMA_BE          *SwapDmas;                              /* Swapped Dmas Queue */
83554 +    ELAN3_QUEUE                 SwapDmaQ;
83555 +
83556 +    int                         ItemCount[NUM_LISTS];                  /* Count of items on each swap list */
83557 +    int                         inhibit;                               /* if set lwp not to reload translations */
83558 +
83559 +    int                  Disabled;
83560 +} ELAN3_CTXT;
83561 +
83562 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, 
83563 +                         elan3_ctxt::Status elan3_ctxt::OthersState
83564 +                         elan3_ctxt::CommandTrapQ elan3_ctxt::CommandQ elan3_ctxt::ThreadTrapQ elan3_ctxt::DmaTrapQ 
83565 +                         elan3_ctxt::Input0Trap elan3_ctxt::Input1Trap elan3_ctxt::EventCookieQ elan3_ctxt::SwapThreadQ 
83566 +                         elan3_ctxt::SwapDmaQ elan3_ctxt::CommandPortItem elan3_ctxt::LwpCount))
83567 +_NOTE(MUTEX_PROTECTS_DATA(elan3_ctxt::SwapListsLock, 
83568 +                         elan3_ctxt::ItemCount))
83569 +_NOTE(RWLOCK_PROTECTS_DATA(elan3_ctxt::VpLock, 
83570 +                          elan3_ctxt::VpSegs elan3_vpseg::Next elan3_vpseg::Process 
83571 +                          elan3_vpseg::Entries elan3_vpseg::Type))
83572 +
83573 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_ctxt::ItemCount elan3_ctxt::Status elan3_ctxt::CommandPortItem))
83574 +
83575 +_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock elan3_ctxt::CmdLock elan3_dev::IntrLock))
83576 +_NOTE(LOCK_ORDER(elan3_ctxt::SwapListsLock as::a_lock))                        /* implicit by pagefault */
83577 +
83578 +#define CTXT_DETACHED                          (1 << 0)                /* Context is detached. */
83579 +#define CTXT_NO_LWPS                           (1 << 1)                /* No lwp's to handle faults */
83580 +#define CTXT_EXITING                           (1 << 2)                /* User process is exiting */
83581 +
83582 +#define CTXT_SWAPPING_OUT                      (1 << 3)                /* Context is swapping out */
83583 +#define CTXT_SWAPPED_OUT                       (1 << 4)                /* Context is swapped out */
83584 +
83585 +#define CTXT_SWAP_FREE                         (1 << 5)                /* Swap buffer is free */
83586 +#define CTXT_SWAP_VALID                                (1 << 6)                /* Swap buffer has queue entries in it */
83587 +
83588 +#define CTXT_DMA_QUEUE_FULL                    (1 << 7)                /* Dma trap queue is full */
83589 +#define CTXT_THREAD_QUEUE_FULL                 (1 << 8)                /* Thread trap queue is full */
83590 +#define CTXT_EVENT_QUEUE_FULL                  (1 << 9)                /* Event interrupt queue is full */
83591 +#define CTXT_COMMAND_OVERFLOW_ERROR            (1 << 10)               /* Trap queue overflow */
83592 +
83593 +#define CTXT_SWAP_WANTED                       (1 << 11)               /* Some one wanted to swap */
83594 +#define CTXT_WAITING_SWAPIN                    (1 << 12)               /* Someone waiting on swapin */
83595 +
83596 +#define CTXT_WAITING_COMMAND                   (1 << 13)               /* swgelan waiting on command port */
83597 +#define CTXT_COMMAND_MAPPED_MAIN               (1 << 14)               /* segelan has mapped command port */
83598 +
83599 +#define CTXT_QUEUES_EMPTY                      (1 << 15)               /* dma/thread run queues are empty */
83600 +#define CTXT_QUEUES_EMPTYING                   (1 << 16)               /* dma/thread run queues are being emptied */
83601 +
83602 +#define CTXT_USER_FILTERING                    (1 << 17)               /* user requested context filter */
83603 +
83604 +#define CTXT_KERNEL                            (1 << 18)               /* context is a kernel context */
83605 +#define CTXT_COMMAND_MAPPED_ELAN               (1 << 19)               /* command port is mapped for elan */
83606 +#define CTXT_FIXUP_NETERR                      (1 << 20)               /* fixing up a network error */
83607 +
83608 +
83609 +#define CTXT_SWAPPED_REASONS           (CTXT_NO_LWPS   |               \
83610 +                                        CTXT_DETACHED  |               \
83611 +                                        CTXT_EXITING   |               \
83612 +                                        CTXT_FIXUP_NETERR)
83613 +
83614 +#define CTXT_OTHERS_REASONS            (CTXT_EVENT_QUEUE_FULL  |       \
83615 +                                        CTXT_DMA_QUEUE_FULL    |       \
83616 +                                        CTXT_THREAD_QUEUE_FULL |       \
83617 +                                        CTXT_COMMAND_OVERFLOW_ERROR |  \
83618 +                                        CTXT_SWAPPED_REASONS)
83619 +
83620 +#define CTXT_INPUTTER_REASONS          (CTXT_USER_FILTERING |          \
83621 +                                        CTXT_OTHERS_REASONS)
83622 +
83623 +#define CTXT_COMMAND_MAPPED            (CTXT_COMMAND_MAPPED_MAIN |     \
83624 +                                        CTXT_COMMAND_MAPPED_ELAN)
83625 +
83626 +#define CTXT_IS_KERNEL(ctxt)           ((ctxt)->Status & CTXT_KERNEL)
83627 +
83628 +/*
83629 + * State values for ctxt_inputterState/ctxt_commandportStats
83630 + */
83631 +#define CTXT_STATE_OK                  0
83632 +#define CTXT_STATE_TRAPPED             1               /* Inputter channel 0 trapped */
83633 +#define CTXT_STATE_RESOLVING           2               /* An LWP is resolving the trap */
83634 +#define CTXT_STATE_NEEDS_RESTART       3               /* Th trapped packet needs to be executed */
83635 +#define CTXT_STATE_NETWORK_ERROR       4               /* We're waiting on an RPC for the identify transaction */
83636 +#define CTXT_STATE_EXECUTING           5               /* An LWP is executing the trapped packet */
83637 +
83638 +/*
83639 + * State values for OthersState.
83640 + */
83641 +#define CTXT_OTHERS_RUNNING            0
83642 +#define CTXT_OTHERS_HALTING            1
83643 +#define CTXT_OTHERS_SWAPPING           2
83644 +#define CTXT_OTHERS_HALTING_MORE       3
83645 +#define CTXT_OTHERS_SWAPPING_MORE      4
83646 +#define CTXT_OTHERS_SWAPPED            5
83647 +
83648 +typedef struct elan3_ops
83649 +{
83650 +    u_int  Version;
83651 +
83652 +    int         (*Exception)   (ELAN3_CTXT *ctxt, int type, int proc, void *trap, va_list ap);
83653 +
83654 +    /* swap item list functions */
83655 +    int  (*GetWordItem)                (ELAN3_CTXT *ctxt, int list, void **itemp, E3_uint32 *valuep);
83656 +    int  (*GetBlockItem)       (ELAN3_CTXT *ctxt, int list, void **itemp, E3_Addr *valuep);
83657 +    void (*PutWordItem)                (ELAN3_CTXT *ctxt, int list, E3_Addr value);
83658 +    void (*PutBlockItem)       (ELAN3_CTXT *ctxt, int list, E3_uint32 *ptr);
83659 +    void (*PutbackItem)                (ELAN3_CTXT *ctxt, int list, void *item);
83660 +    void (*FreeWordItem)       (ELAN3_CTXT *ctxt, void *item);
83661 +    void (*FreeBlockItem)      (ELAN3_CTXT *ctxt, void *item);
83662 +    int  (*CountItems)         (ELAN3_CTXT *ctxt, int list);
83663 +
83664 +    /* event interrupt cookie */
83665 +    int  (*Event)              (ELAN3_CTXT *ctxt, E3_uint32 cookie, int flag);
83666 +
83667 +    /* swapin/swapout functions. */
83668 +    void (*Swapin)             (ELAN3_CTXT *ctxt);
83669 +    void (*Swapout)            (ELAN3_CTXT *ctxt);
83670 +
83671 +    /* Free of private data */
83672 +    void (*FreePrivate)                (ELAN3_CTXT *ctxt);
83673 +
83674 +    /* Fixup a network error */
83675 +    int  (*FixupNetworkError)  (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef);
83676 +
83677 +    /* Interrupt handler trap interface */
83678 +    int  (*DProcTrap)          (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
83679 +    int  (*TProcTrap)          (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
83680 +    int         (*IProcTrap)           (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, int chan);
83681 +    int         (*CProcTrap)           (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
83682 +    int  (*CProcReissue)        (ELAN3_CTXT *ctxt, CProcTrapBuf_BE *TrapBuf);
83683 +
83684 +    /* User memory access functions */
83685 +    int              (*StartFaultCheck)(ELAN3_CTXT *ctxt);
83686 +    void      (*EndFaultCheck)  (ELAN3_CTXT *ctxt);
83687 +
83688 +    E3_uint8  (*Load8)         (ELAN3_CTXT *ctxt, E3_Addr addr);
83689 +    void      (*Store8)                (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint8 val);
83690 +    E3_uint16 (*Load16)                (ELAN3_CTXT *ctxt, E3_Addr addr);
83691 +    void      (*Store16)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint16 val);
83692 +    E3_uint32 (*Load32)                (ELAN3_CTXT *ctxt, E3_Addr addr);
83693 +    void      (*Store32)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint32 val);
83694 +    E3_uint64 (*Load64)                (ELAN3_CTXT *ctxt, E3_Addr addr);
83695 +    void      (*Store64)       (ELAN3_CTXT *ctxt, E3_Addr addr, E3_uint64 val);
83696 +
83697 +} ELAN3_OPS;
83698 +
83699 +#define ELAN3_OPS_VERSION      0xdeef0001
83700 +
83701 +/*
83702 + * Flags for ops_event.
83703 + */
83704 +#define OP_INTR                        0               /* Called from interrupt handler */
83705 +#define OP_LWP                 1               /* Called from "lwp" */
83706 +
83707 +/*
83708 + * Return codes for "ops" functions.
83709 + */
83710 +#define OP_DEFER               0               /* Defer to next lower interrupt */
83711 +#define OP_IGNORE              1               /* No event hander, so ignore it */
83712 +#define OP_HANDLED             2               /* Handled event (resume thread) */
83713 +#define OP_FAILED              3               /* Failed */
83714 +
83715 +#define ELAN3_CALL_OP(ctxt,fn)                         ((ctxt)->Operations && (ctxt)->Operations->fn) ? (ctxt)->Operations->fn 
83716 +
83717 +#define ELAN3_OP_EXCEPTION(ctxt,type,proc,trap,ap)     (ELAN3_CALL_OP(ctxt,Exception)    (ctxt,type,proc,trap,ap)      : OP_IGNORE)
83718 +#define ELAN3_OP_GET_WORD_ITEM(ctxt,list,itemp,valuep) (ELAN3_CALL_OP(ctxt,GetWordItem)  (ctxt,list,itemp,valuep)      : 0)
83719 +#define ELAN3_OP_GET_BLOCK_ITEM(ctxt,list,itemp,valuep)        (ELAN3_CALL_OP(ctxt,GetBlockItem) (ctxt,list,itemp,valuep)      : 0)
83720 +#define ELAN3_OP_PUT_WORD_ITEM(ctxt,list,value)                (ELAN3_CALL_OP(ctxt,PutWordItem)  (ctxt,list,value)             : (void)0)
83721 +#define ELAN3_OP_PUT_BLOCK_ITEM(ctxt,list,ptr)         (ELAN3_CALL_OP(ctxt,PutBlockItem) (ctxt,list,ptr)               : (void)0)
83722 +#define ELAN3_OP_PUTBACK_ITEM(ctxt,list,item)          (ELAN3_CALL_OP(ctxt,PutbackItem)  (ctxt,list,item)              : (void)0)
83723 +#define ELAN3_OP_FREE_WORD_ITEM(ctxt,item)             (ELAN3_CALL_OP(ctxt,FreeWordItem) (ctxt,item)                   : (void)0)
83724 +#define ELAN3_OP_FREE_BLOCK_ITEM(ctxt,item)            (ELAN3_CALL_OP(ctxt,FreeBlockItem)(ctxt,item)                   : (void)0)
83725 +#define ELAN3_OP_COUNT_ITEMS(ctxt,list)                        (ELAN3_CALL_OP(ctxt,CountItems)(ctxt,list)                      : 0)
83726 +#define ELAN3_OP_EVENT(ctxt,cookie,flag)               (ELAN3_CALL_OP(ctxt,Event)(ctxt,cookie,flag)                    : OP_IGNORE)
83727 +#define ELAN3_OP_SWAPIN(ctxt)                          (ELAN3_CALL_OP(ctxt,Swapin)(ctxt)                               : (void)0)
83728 +#define ELAN3_OP_SWAPOUT(ctxt)                         (ELAN3_CALL_OP(ctxt,Swapout)(ctxt)                              : (void)0)
83729 +#define ELAN3_OP_FREE_PRIVATE(ctxt)                    (ELAN3_CALL_OP(ctxt,FreePrivate)(ctxt)                          : (void)0)
83730 +#define ELAN3_OP_FIXUP_NETWORK_ERROR(ctxt, nef)                (ELAN3_CALL_OP(ctxt,FixupNetworkError)(ctxt,nef)                        : OP_FAILED)
83731 +
83732 +#define ELAN3_OP_DPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,DProcTrap)(ctxt,trap)                       : OP_DEFER)
83733 +#define ELAN3_OP_TPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,TProcTrap)(ctxt,trap)                       : OP_DEFER)
83734 +#define ELAN3_OP_IPROC_TRAP(ctxt, trap, chan)          (ELAN3_CALL_OP(ctxt,IProcTrap)(ctxt,trap,chan)                  : OP_DEFER)
83735 +#define ELAN3_OP_CPROC_TRAP(ctxt, trap)                        (ELAN3_CALL_OP(ctxt,CProcTrap)(ctxt,trap)                       : OP_DEFER)
83736 +#define ELAN3_OP_CPROC_REISSUE(ctxt,tbuf)              (ELAN3_CALL_OP(ctxt,CProcReissue)(ctxt, tbuf)                   : OP_DEFER)
83737 +
83738 +#define ELAN3_OP_START_FAULT_CHECK(ctxt)               (ELAN3_CALL_OP(ctxt,StartFaultCheck)(ctxt)                      : 0)
83739 +#define ELAN3_OP_END_FAULT_CHECK(ctxt)                 (ELAN3_CALL_OP(ctxt,EndFaultCheck)(ctxt)                                : (void)0)
83740 +#define ELAN3_OP_LOAD8(ctxt,addr)                      (ELAN3_CALL_OP(ctxt,Load8)(ctxt,addr)                           : 0)
83741 +#define ELAN3_OP_STORE8(ctxt,addr,val)                 (ELAN3_CALL_OP(ctxt,Store8)(ctxt,addr,val)                      : (void)0)
83742 +#define ELAN3_OP_LOAD16(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load16)(ctxt,addr)                          : 0)
83743 +#define ELAN3_OP_STORE16(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store16)(ctxt,addr,val)                     : (void)0)
83744 +#define ELAN3_OP_LOAD32(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load32)(ctxt,addr)                          : 0)
83745 +#define ELAN3_OP_STORE32(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store32)(ctxt,addr,val)                     : (void)0)
83746 +#define ELAN3_OP_LOAD64(ctxt,addr)                     (ELAN3_CALL_OP(ctxt,Load64)(ctxt,addr)                          : 0)
83747 +#define ELAN3_OP_STORE64(ctxt,addr,val)                        (ELAN3_CALL_OP(ctxt,Store64)(ctxt,addr,val)                     : (void)0)
83748 +
83749 +#endif /* __KERNEL__ */
83750 +
83751 +/* "list" arguement to ops functions */
83752 +#define LIST_DMA_PTR           0
83753 +#define LIST_DMA_DESC          1
83754 +#define LIST_THREAD                    2
83755 +#define LIST_COMMAND           3
83756 +#define LIST_SETEVENT          4
83757 +#define LIST_FREE_WORD         5
83758 +#define LIST_FREE_BLOCK                6
83759 +
83760 +#define MAX_LISTS              7
83761 +
83762 +#if defined(__KERNEL__) && MAX_LISTS != NUM_LISTS
83763 +#  error Check NUM_LISTS == MAX_LISTS
83764 +#endif
83765 +
83766 +/*
83767 + * Values for the 'type' field to PostException().
83768 + */
83769 +#define EXCEPTION_INVALID_ADDR         1               /* FaultArea, res */
83770 +#define EXCEPTION_UNIMP_INSTR          2               /* instr */
83771 +#define EXCEPTION_INVALID_PROCESS      3               /* proc, res */
83772 +#define EXCEPTION_SIMULATION_FAILED    4               /* */
83773 +#define EXCEPTION_UNIMPLEMENTED                5               /* */
83774 +#define EXCEPTION_SWAP_FAULT           6               /* */
83775 +#define EXCEPTION_SWAP_FAILED          7               /* */
83776 +#define EXCEPTION_BAD_PACKET           8               /* */
83777 +#define EXCEPTION_FAULTED              9               /* addr */
83778 +#define EXCEPTION_QUEUE_OVERFLOW       10              /* FaultArea, TrapType */
83779 +#define EXCEPTION_COMMAND_OVERFLOW     11              /* count */
83780 +#define EXCEPTION_DMA_RETRY_FAIL       12              /* */
83781 +#define EXCEPTION_CHAINED_EVENT                13              /* EventAddr */
83782 +#define EXCEPTION_THREAD_KILLED                14              /* */
83783 +#define EXCEPTION_CANNOT_SAVE_THREAD   15
83784 +#define EXCEPTION_BAD_SYSCALL          16              /* */
83785 +#define EXCEPTION_DEBUG                        17
83786 +#define EXCEPTION_BAD_EVENT            18              /* */
83787 +#define EXCEPTION_NETWORK_ERROR                19              /* rvp */
83788 +#define EXCEPTION_BUS_ERROR            20
83789 +#define EXCEPTION_COOKIE_ERROR         21
83790 +#define EXCEPTION_PACKET_TIMEOUT       22
83791 +#define EXCEPTION_BAD_DMA              23              /* */
83792 +#define EXCEPTION_ENOMEM               24
83793 +
83794 +/*
83795 + * Values for the 'proc' field to ElanException().
83796 + */
83797 +#define COMMAND_PROC                   1
83798 +#define THREAD_PROC                    2
83799 +#define DMA_PROC                       3
83800 +#define INPUT_PROC                     4
83801 +#define EVENT_PROC                     5
83802 +
83803 +/* Flags to IssueDmaCommand */
83804 +#define ISSUE_COMMAND_FOR_CPROC                1
83805 +#define ISSUE_COMMAND_CANT_WAIT                2
83806 +
83807 +/* Return code from IssueDmaCommand.*/
83808 +#define ISSUE_COMMAND_OK               0
83809 +#define ISSUE_COMMAND_TRAPPED          1
83810 +#define ISSUE_COMMAND_RETRY            2
83811 +#define ISSUE_COMMAND_WAIT             3
83812 +
83813 +#ifdef __KERNEL__
83814 +
83815 +extern ELAN3_CTXT *elan3_alloc(ELAN3_DEV *dev, int kernel);
83816 +extern void       elan3_free      (ELAN3_CTXT *ctxt);
83817 +
83818 +extern int        elan3_attach    (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
83819 +extern int         elan3_doattach  (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
83820 +extern void       elan3_detach    (ELAN3_CTXT *ctxt);
83821 +extern void        elan3_dodetach  (ELAN3_CTXT *ctxt);
83822 +
83823 +extern int        elan3_addvp     (ELAN3_CTXT *ctxt, int process, ELAN_CAPABILITY *cap);
83824 +extern int        elan3_removevp  (ELAN3_CTXT *ctxt, int process);
83825 +extern int        elan3_addbcastvp(ELAN3_CTXT *ctxt, int process, int base, int count);
83826 +
83827 +extern int         elan3_process   (ELAN3_CTXT *ctxt);
83828 +
83829 +extern int        elan3_load_route (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits);
83830 +extern int        elan3_check_route(ELAN3_CTXT *ctxt, int process, E3_uint16 *flits, E3_uint32 *routeError);
83831 +
83832 +extern int        elan3_lwp       (ELAN3_CTXT *ctxt);
83833 +
83834 +extern void       elan3_swapin (ELAN3_CTXT *ctxt, int reason);
83835 +extern void       elan3_swapout (ELAN3_CTXT *ctxt, int reason);
83836 +extern int         elan3_pagefault (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSave, int npages);
83837 +extern void        elan3_block_inputter (ELAN3_CTXT *ctxt, int block);
83838 +
83839 +
83840 +extern E3_Addr     elan3_init_thread (ELAN3_DEV *dev, E3_Addr fn, E3_Addr addr, sdramaddr_t stack, int stackSize, int nargs, ...);
83841 +
83842 +extern void       SetInputterState (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
83843 +extern void       SetInputterStateForContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
83844 +extern void        UnloadCommandPageMapping (ELAN3_CTXT *ctxt);
83845 +extern void       StartSwapoutContext (ELAN3_CTXT *ctxt, E3_uint32 Pend, E3_uint32 *Maskp);
83846 +
83847 +extern int        HandleExceptions (ELAN3_CTXT *ctxt, unsigned long *flags);
83848 +extern int        RestartContext (ELAN3_CTXT *ctxt, unsigned long *flags);
83849 +extern int         CheckCommandQueueFlushed (ELAN3_CTXT *ctxt, E3_uint32 cflags, int how, unsigned long *flags);
83850 +extern int        IssueCommand (ELAN3_CTXT *ctxt, unsigned cmdoff, E3_Addr value, int flags);
83851 +extern int        IssueDmaCommand (ELAN3_CTXT *ctxt, E3_Addr value, void *item, int flags);
83852 +extern int         WaitForDmaCommand (ELAN3_CTXT *ctxt, void *item, int flags);
83853 +extern void       FixupEventTrap (ELAN3_CTXT *ctxt, int proc, void *trap, E3_uint32 TrapType, 
83854 +                                  E3_FaultSave_BE *FaultSaveArea, int flags);
83855 +extern int        SimulateBlockCopy (ELAN3_CTXT *ctxt, E3_Addr EventAddress);
83856 +extern void       ReissueEvent (ELAN3_CTXT *ctxt, E3_Addr addr,int flags);
83857 +extern int         SetEventsNeedRestart (ELAN3_CTXT *ctxt);
83858 +extern void        RestartSetEvents (ELAN3_CTXT *ctxt);
83859 +extern int        RunEventType (ELAN3_CTXT *ctxt, E3_FaultSave_BE *FaultSaveArea, E3_uint32 EventType);
83860 +extern void        WakeupLwp (ELAN3_DEV *dev, void *arg);
83861 +extern void       QueueEventInterrupt (ELAN3_CTXT *ctxt, E3_uint32 cookie);
83862 +extern int         WaitForCommandPort (ELAN3_CTXT *ctxt);
83863 +
83864 +extern int        ElanException (ELAN3_CTXT *ctxt, int type, int proc, void *trap, ...);
83865 +
83866 +/* context_osdep.c */
83867 +extern int        LoadElanTranslation (ELAN3_CTXT *ctxt, E3_Addr elanAddr, int len, int protFault, int writeable);
83868 +extern void       LoadCommandPortTranslation (ELAN3_CTXT *ctxt);
83869 +
83870 +#if defined(DIGITAL_UNIX)
83871 +/* seg_elan.c */
83872 +extern caddr_t    elan3_segelan3_create (ELAN3_CTXT *ctxt);
83873 +extern void       elan3_segelan3_destroy (ELAN3_CTXT *ctxt);
83874 +extern int         elan3_segelan3_map (ELAN3_CTXT *ctxt);
83875 +extern void        elan3_segelan3_unmap (ELAN3_CTXT *ctxt);
83876 +
83877 +/* seg_elanmem.c */
83878 +extern int        elan3_segelanmem_create (ELAN3_DEV *dev, unsigned object, unsigned off, vm_offset_t *addrp, int len);
83879 +#endif /* defined(DIGITAL_UNIX) */
83880 +
83881 +/* route_table.c */
83882 +extern ELAN3_ROUTE_TABLE *AllocateRouteTable (ELAN3_DEV *dev, int size);
83883 +extern void              FreeRouteTable  (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl);
83884 +extern int               LoadRoute       (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp, int ctxnum, int nflits, E3_uint16 *flits);
83885 +extern int               GetRoute        (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int process, E3_uint16 *flits);
83886 +extern void             InvalidateRoute (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
83887 +extern void             ValidateRoute   (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
83888 +extern void             ClearRoute      (ELAN3_DEV *dev, ELAN3_ROUTE_TABLE *tbl, int vp);
83889 +
83890 +extern int               GenerateRoute (ELAN_POSITION *pos, E3_uint16 *flits, int lowid, int highid, int timeout, int highPri);
83891 +extern int               GenerateProbeRoute (E3_uint16 *flits, int nodeid, int level, int *linkup, int *linkdown, int adaptive);
83892 +extern int               GenerateCheckRoute (ELAN_POSITION *pos, E3_uint16 *flits, int level, int adaptive);
83893 +
83894 +/* virtual_process.c */
83895 +extern ELAN_LOCATION  ProcessToLocation     (ELAN3_CTXT *ctxt, ELAN3_VPSEG *seg, int process, ELAN_CAPABILITY *cap);
83896 +extern int           ResolveVirtualProcess (ELAN3_CTXT *ctxt, int process);
83897 +extern caddr_t        CapabilityString      (ELAN_CAPABILITY *cap);
83898 +extern void           UnloadVirtualProcess  (ELAN3_CTXT *ctxt, ELAN_CAPABILITY *cap);
83899 +
83900 +extern int           elan3_get_route   (ELAN3_CTXT *ctxt, int process, E3_uint16 *flits);
83901 +extern int           elan3_reset_route (ELAN3_CTXT *ctxt, int process);
83902 +
83903 +/* cproc.c */
83904 +extern int       NextCProcTrap (ELAN3_CTXT *ctxt, COMMAND_TRAP *trap);
83905 +extern void      ResolveCProcTrap (ELAN3_CTXT *ctxt);
83906 +extern int       RestartCProcTrap (ELAN3_CTXT *ctxt);
83907 +
83908 +/* iproc.c */
83909 +extern void       InspectIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap);
83910 +extern void      ResolveIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvp);
83911 +extern int       RestartIProcTrap (ELAN3_CTXT *ctxt, INPUT_TRAP *trap);
83912 +extern char      *IProcTrapString (E3_IprocTrapHeader_BE *hdrp, E3_IprocTrapData *datap);
83913 +extern void       SimulateUnlockQueue (ELAN3_CTXT *ctxt, E3_Addr QueuePointer, int SentAck);
83914 +
83915 +/* tproc.c */
83916 +extern int       NextTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
83917 +extern void      ResolveTProcTrap (ELAN3_CTXT *ctxt, THREAD_TRAP *trap);
83918 +extern int       TProcNeedsRestart (ELAN3_CTXT *ctxt);
83919 +extern void      RestartTProcItems (ELAN3_CTXT *ctxt);
83920 +extern E3_Addr    SaveThreadToStack (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int SkipInstruction);
83921 +extern void       ReissueStackPointer (ELAN3_CTXT *ctxt, E3_Addr StackPointer);
83922 +
83923 +/* tprocinsts.c */
83924 +extern int        RollThreadToClose (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, E3_uint32 PAckVal);
83925 +
83926 +/* tproc_osdep.c */
83927 +extern int        ThreadSyscall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip);
83928 +extern int       ThreadElancall (ELAN3_CTXT *ctxt, THREAD_TRAP *trap, int *skip);
83929 +
83930 +/* dproc.c */
83931 +extern int       NextDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
83932 +extern void      ResolveDProcTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
83933 +extern int       DProcNeedsRestart (ELAN3_CTXT *ctxt);
83934 +extern void      RestartDProcItems (ELAN3_CTXT *ctxt);
83935 +extern void       RestartDmaDesc (ELAN3_CTXT *ctxt, E3_DMA_BE *desc);
83936 +extern void       RestartDmaTrap (ELAN3_CTXT *ctxt, DMA_TRAP *trap);
83937 +extern void      RestartDmaPtr (ELAN3_CTXT *ctxt, E3_Addr ptr);
83938 +
83939 +/* network_error.c */
83940 +extern void       InitialiseNetworkErrorResolver (void);
83941 +extern void       FinaliseNetworkErrorResolver (void);
83942 +extern int        QueueNetworkErrorResolver (ELAN3_CTXT *ctxt, INPUT_TRAP *trap, NETERR_RESOLVER **rvpp);
83943 +extern void      FreeNetworkErrorResolver (NETERR_RESOLVER *rvp);
83944 +extern void       CancelNetworkErrorResolver (NETERR_RESOLVER *rvp);
83945 +extern int       ExecuteNetworkErrorFixup (NETERR_MSG *msg);
83946 +extern void      CompleteNetworkErrorFixup (ELAN3_CTXT *ctxt, NETERR_FIXUP *nef, int status);
83947 +
83948 +extern int        AddNeterrServerSyscall (int elanId, void *configp, void *addrp, char *namep);
83949 +
83950 +/* eventcookie.c */
83951 +extern void                cookie_init(void);
83952 +extern void                cookie_fini(void);
83953 +extern EVENT_COOKIE_TABLE *cookie_alloc_table (unsigned long task, unsigned long handle);
83954 +extern void                cookie_free_table (EVENT_COOKIE_TABLE *tbl);
83955 +extern int                 cookie_alloc_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
83956 +extern int                 cookie_free_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
83957 +extern int                 cookie_fire_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
83958 +extern int                 cookie_wait_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
83959 +extern int                 cookie_arm_cookie (EVENT_COOKIE_TABLE *tbl, EVENT_COOKIE cookie);
83960 +
83961 +/* routecheck.c */
83962 +extern int elan3_route_check          (ELAN3_CTXT *ctxt, E3_uint16 *flits, int destNode);
83963 +extern int elan3_route_broadcast_check(ELAN3_CTXT *ctxt, E3_uint16 *flitsA, int lowNode, int highNode);
83964 +
83965 +
83966 +#endif /* __KERNEL__ */
83967 +
83968 +#ifdef __cplusplus
83969 +}
83970 +#endif
83971 +
83972 +#endif /* _ELAN3_ELANCTXT_H */
83973 +
83974 +/*
83975 + * Local variables:
83976 + * c-file-style: "stroustrup"
83977 + * End:
83978 + */
83979 diff -urN clean/include/elan3/elandebug.h linux-2.6.9/include/elan3/elandebug.h
83980 --- clean/include/elan3/elandebug.h     1969-12-31 19:00:00.000000000 -0500
83981 +++ linux-2.6.9/include/elan3/elandebug.h       2003-09-24 09:57:24.000000000 -0400
83982 @@ -0,0 +1,106 @@
83983 +/*
83984 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
83985 + *
83986 + *    For licensing information please see the supplied COPYING file
83987 + *
83988 + */
83989 +
83990 +#ifndef _ELAN3_ELANDEBUG_H
83991 +#define _ELAN3_ELANDEBUG_H
83992 +
83993 +#ident "$Id: elandebug.h,v 1.38 2003/09/24 13:57:24 david Exp $"
83994 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandebug.h,v $ */
83995 +
83996 +#if defined(__KERNEL__)
83997 +
83998 +extern u_int elan3_debug;
83999 +extern u_int elan3_debug_console;
84000 +extern u_int elan3_debug_buffer;
84001 +extern u_int elan3_debug_ignore_dev;
84002 +extern u_int elan3_debug_ignore_kcomm;
84003 +extern u_int elan3_debug_ignore_ctxt;
84004 +extern u_int elan3_debug_display_ctxt;
84005 +
84006 +#define DBG_CONFIG     0x00000001                      /* Module configuration */
84007 +#define DBG_HAT                0x00000002
84008 +#define DBG_FN         0x00000004
84009 +#define DBG_SEG                0x00000008
84010 +#define DBG_INTR       0x00000010
84011 +#define DBG_LWP                0x00000020
84012 +#define DBG_FAULT      0x00000040
84013 +#define DBG_EVENT      0x00000080
84014 +#define DBG_CPROC      0x00000100
84015 +#define DBG_TPROC      0x00000200
84016 +#define DBG_DPROC      0x00000400
84017 +#define DBG_IPROC      0x00000800
84018 +#define DBG_SWAP       0x00001000
84019 +#define DBG_CMD                0x00002000
84020 +#define DBG_VP         0x00004000
84021 +#define DBG_SYSCALL    0x00008000
84022 +#define DBG_BSCAN      0x00010000
84023 +#define DBG_LINKERR    0x00020000
84024 +#define DBG_NETERR     0x00040000
84025 +#define DBG_NETRPC     0x00080000
84026 +#define DBG_EVENTCOOKIE 0x00100000
84027 +#define DBG_SDRAM      0x00200000
84028 +
84029 +#define DBG_EP         0x10000000
84030 +#define DBG_EPCONSOLE  0x20000000
84031 +
84032 +#define DBG_EIP                0x40000000
84033 +#define DBG_EIPFAIL    0x80000000
84034 +
84035 +#define DBG_ALL                0xffffffff
84036 +
84037 +/* values to pass as "ctxt" rather than a "ctxt" pointer */
84038 +#define DBG_DEVICE     ((void *) 0)
84039 +#define DBG_KCOMM      ((void *) 1)
84040 +#define DBG_ICS                ((void *) 2)
84041 +#define DBG_USER       ((void *) 3)
84042 +#define DBG_NTYPES     64
84043 +
84044 +#if defined(DEBUG_PRINTF)
84045 +#  define DBG(m,fn)                            ((elan3_debug&(m)) ? (void)(fn) : (void)0)
84046 +#  define PRINTF0(ctxt,m,fmt)                  ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt)             : (void)0)
84047 +#  define PRINTF1(ctxt,m,fmt,a)                        ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a)           : (void)0)
84048 +#  define PRINTF2(ctxt,m,fmt,a,b)              ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b)         : (void)0)
84049 +#  define PRINTF3(ctxt,m,fmt,a,b,c)            ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c)       : (void)0)
84050 +#  define PRINTF4(ctxt,m,fmt,a,b,c,d)          ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d)     : (void)0)
84051 +#  define PRINTF5(ctxt,m,fmt,a,b,c,d,e)                ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e)   : (void)0)
84052 +#  define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f)      ((elan3_debug&(m)) ? elan3_debugf(ctxt,m,fmt,a,b,c,d,e,f) : (void)0)
84053 +#ifdef __GNUC__
84054 +#  define PRINTF(ctxt,m,args...)               ((elan3_debug&(m)) ? elan3_debugf(ctxt,m, ##args)         : (void)0)
84055 +#endif
84056 +
84057 +#else
84058 +
84059 +#  define DBG(m, fn)                           do { ; } while (0)
84060 +#  define PRINTF0(ctxt,m,fmt)                  do { ; } while (0)
84061 +#  define PRINTF1(ctxt,m,fmt,a)                        do { ; } while (0)
84062 +#  define PRINTF2(ctxt,m,fmt,a,b)              do { ; } while (0)
84063 +#  define PRINTF3(ctxt,m,fmt,a,b,c)            do { ; } while (0)
84064 +#  define PRINTF4(ctxt,m,fmt,a,b,c,d)          do { ; } while (0)
84065 +#  define PRINTF5(ctxt,m,fmt,a,b,c,d,e)                do { ; } while (0)
84066 +#  define PRINTF6(ctxt,m,fmt,a,b,c,d,e,f)      do { ; } while (0)
84067 +#ifdef __GNUC__
84068 +#  define PRINTF(ctxt,m,args...)               do { ; } while (0)
84069 +#endif
84070 +
84071 +#endif /* DEBUG_PRINTF */
84072 +
84073 +#ifdef __GNUC__
84074 +extern void       elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...)
84075 +    __attribute__ ((format (printf,3,4)));
84076 +#else
84077 +extern void       elan3_debugf (void *ctxt, unsigned int mode, char *fmt, ...);
84078 +#endif
84079 +
84080 +
84081 +#endif /* __KERNEL__ */
84082 +#endif /* _ELAN3_ELANDEBUG_H */
84083 +
84084 +/*
84085 + * Local variables:
84086 + * c-file-style: "stroustrup"
84087 + * End:
84088 + */
84089 diff -urN clean/include/elan3/elandev.h linux-2.6.9/include/elan3/elandev.h
84090 --- clean/include/elan3/elandev.h       1969-12-31 19:00:00.000000000 -0500
84091 +++ linux-2.6.9/include/elan3/elandev.h 2005-07-20 07:35:20.000000000 -0400
84092 @@ -0,0 +1,581 @@
84093 +/*
84094 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84095 + *
84096 + *    For licensing information please see the supplied COPYING file
84097 + *
84098 + */
84099 +
84100 +#ifndef __ELAN3_ELANDEV_H
84101 +#define __ELAN3_ELANDEV_H
84102 +
84103 +#ident "$Id: elandev.h,v 1.76.2.1 2005/07/20 11:35:20 mike Exp $"
84104 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev.h,v $ */
84105 +
84106 +#include <elan/bitmap.h>
84107 +#include <elan/devinfo.h>
84108 +#include <elan/stats.h>
84109 +
84110 +#if defined(DIGITAL_UNIX)
84111 +#  include <elan3/elandev_dunix.h>
84112 +#elif defined(LINUX)
84113 +#  include <elan3/elandev_linux.h>
84114 +#elif defined(SOLARIS)
84115 +#  include <elan3/elandev_solaris.h>
84116 +#endif
84117 +
84118 +#ifndef TRUE
84119 +#  define TRUE 1
84120 +#endif
84121 +#ifndef FALSE
84122 +#  define FALSE 0
84123 +#endif
84124 +
84125 +/*
84126 + * Elan base address registers defined as follows :
84127 + */
84128 +#define ELAN3_BAR_SDRAM                0
84129 +#define ELAN3_BAR_COMMAND_PORT 1
84130 +#define ELAN3_BAR_REGISTERS    2
84131 +#define ELAN3_BAR_EBUS         3
84132 +
84133 +/* Macro to generate 'offset' to mmap "mem" device */
84134 +#define OFF_TO_SPACE(off)      ((off) >> 28)
84135 +#define OFF_TO_OFFSET(off)     ((off) & 0x0FFFFFFF)
84136 +#define GEN_OFF(space,off)     (((space) << 28) | ((off) & 0x0FFFFFFF))
84137 +
84138 +#ifdef __KERNEL__
84139 +
84140 +/*
84141 + * Elan EBUS is configured as follows :
84142 + */
84143 +#define ELAN3_EBUS_ROM_OFFSET          0x000000                /* rom */
84144 +#define ELAN3_EBUS_INTPAL_OFFSET       0x180000                /* interrupt pal (write only) */
84145 +
84146 +#define ELAN3_EBUS_ROM_SIZE            0x100000
84147 +
84148 +/*
84149 + * Elan SDRAM is arranged as follows :
84150 + */
84151 +#define ELAN3_TANDQ_SIZE               0x0020000               /* Trap And Queue Size */
84152 +#define ELAN3_CONTEXT_SIZE             0x0010000               /* Context Table Size */
84153 +#define ELAN3_COMMAND_TRAP_SIZE                0x0010000               /* Command Port Trap Size */
84154 +
84155 +#ifdef MPSAS
84156 +#define ELAN3_LN2_NUM_CONTEXTS 8                               /* Support 256 contexts */
84157 +#else
84158 +#define ELAN3_LN2_NUM_CONTEXTS 12                              /* Support 4096 contexts */
84159 +#endif
84160 +#define ELAN3_NUM_CONTEXTS     (1 << ELAN3_LN2_NUM_CONTEXTS)   /* Entries in context table */
84161 +
84162 +#define ELAN3_SDRAM_NUM_BANKS  4                               /* Elan supports 4 Banks of Sdram */
84163 +#define ELAN3_SDRAM_BANK_SHIFT 26                              /* each of which can be 64 mbytes ? */
84164 +#define ELAN3_SDRAM_BANK_SIZE  (1 << ELAN3_SDRAM_BANK_SHIFT)
84165 +
84166 +#define ELAN3_MAX_CACHE_SIZE   (64 * 1024)                     /* Maximum cache size */
84167 +#define ELAN3_CACHE_SIZE       (64 * 4 * E3_CACHELINE_SIZE)    /* Elan3 has 8K cache */
84168 +
84169 +#ifndef offsetof
84170 +#define offsetof(s, m)         (size_t)(&(((s *)0)->m))
84171 +#endif
84172 +
84173 +/*
84174 + * circular queue and macros to access members.
84175 + */
84176 +typedef struct
84177 +{
84178 +    u_int      q_back;                 /* Next free space */
84179 +    u_int      q_front;                /* First object to remove */
84180 +    u_int      q_size;                 /* Size of queue */
84181 +    u_int      q_count;                /* Current number of entries */
84182 +    u_int      q_slop;                 /* FULL <=> (count+slop) == size */
84183 +} ELAN3_QUEUE;
84184 +
84185 +typedef struct 
84186 +{
84187 +    u_int      q_back;                 /* Next free space */
84188 +    u_int      q_middle;               /* Middle pointer */
84189 +    u_int      q_front;                /* First object to remove */
84190 +    u_int      q_size;                 /* Size of queue */
84191 +    u_int      q_count;                /* Current number of entries */
84192 +    u_int      q_slop;                 /* FULL <=> (count+slop) == size */
84193 +} ELAN3_SPLIT_QUEUE;
84194 +
84195 +#define ELAN3_QUEUE_INIT(q,num,slop)   ((q).q_size = (num), (q).q_slop = (slop)+1, (q).q_front = (q).q_back = 0, (q).q_count = 0)
84196 +#define ELAN3_QUEUE_FULL(q)            ((q).q_count == ((q).q_size - (q).q_slop))
84197 +#define ELAN3_QUEUE_REALLY_FULL(q)     ((q).q_count == (q).q_size - 1)
84198 +#define ELAN3_QUEUE_EMPTY(q)           ((q).q_count == 0)
84199 +#define ELAN3_QUEUE_FRONT_EMPTY(q)     ((q).q_front == (q).q_middle)
84200 +#define ELAN3_QUEUE_BACK_EMPTY(q)      ((q).q_middle == (q).q_back)
84201 +#define ELAN3_QUEUE_ADD(q)             ((q).q_back = ((q).q_back+1) % (q).q_size, (q).q_count++)
84202 +#define ELAN3_QUEUE_REMOVE(q)          ((q).q_front = ((q).q_front+1) % (q).q_size, (q).q_count--)
84203 +#define ELAN3_QUEUE_ADD_FRONT(q)               ((q).q_front = ((q).q_front-1) % (q).q_size, (q).q_count++)
84204 +#define ELAN3_QUEUE_CONSUME(q)         ((q).q_middle = ((q).q_middle+1) % (q).q_size)
84205 +#define ELAN3_QUEUE_FRONT(q,qArea)     (&(qArea)[(q).q_front])
84206 +#define ELAN3_QUEUE_MIDDLE(q,qArea)    (&(qArea)[(q).q_middle])
84207 +#define ELAN3_QUEUE_BACK(q,qArea)      (&(qArea)[(q).q_back])
84208 +
84209 +#define SDRAM_MIN_BLOCK_SHIFT  10
84210 +#define SDRAM_NUM_FREE_LISTS   17                              /* allows max 64Mb block */
84211 +#define SDRAM_MIN_BLOCK_SIZE   (1 << SDRAM_MIN_BLOCK_SHIFT)
84212 +#define SDRAM_MAX_BLOCK_SIZE   (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1))
84213 +#define SDRAM_FREELIST_TRIGGER 32
84214 +
84215 +typedef struct elan3_sdram_bank
84216 +{
84217 +    u_int              Size;                                   /* Size of bank of memory */
84218 +
84219 +    ioaddr_t           Mapping;                                /* Where mapped in the kernel */
84220 +    DeviceMappingHandle Handle;                                        /* and mapping handle */
84221 +
84222 +    struct elan3_ptbl_gr **PtblGroups;
84223 +    
84224 +    bitmap_t          *Bitmaps[SDRAM_NUM_FREE_LISTS];
84225 +} ELAN3_SDRAM_BANK;
84226 +
84227 +typedef struct elan3_haltop
84228 +{
84229 +    struct elan3_haltop          *Next;                                /* Chain to next in list. */
84230 +    E3_uint32             Mask;                                /* Interrupt mask to see before calling function */
84231 +    
84232 +    void                (*Function)(void *, void *);           /* Function to call */
84233 +    void                 *Arguement;                           /* Arguement to pass to function */
84234 +} ELAN3_HALTOP;
84235 +
84236 +#define HALTOP_BATCH   32
84237 +
84238 +#endif /* __KERNEL__ */
84239 +
84240 +typedef struct elan3_stats
84241 +{
84242 +    u_long     Version;                                        /* version field */
84243 +    u_long     Interrupts;                                     /* count of elan interrupts */
84244 +    u_long     TlbFlushes;                                     /* count of tlb flushes */
84245 +    u_long     InvalidContext;                                 /* count of traps with invalid context */
84246 +    u_long     ComQueueHalfFull;                               /* count of interrupts due to com queue being half full */
84247 +
84248 +    u_long     CProcTraps;                                     /* count of cproc traps */
84249 +    u_long     DProcTraps;                                     /* count of dproc traps */
84250 +    u_long     TProcTraps;                                     /* cound of tproc traps */
84251 +    u_long     IProcTraps;                                     /* count of iproc traps */
84252 +    u_long     EventInterrupts;                                /* count of event interrupts */
84253 +
84254 +    u_long     PageFaults;                                     /* count of elan page faults */
84255 +
84256 +    /* inputter related */
84257 +    u_long     EopBadAcks;                                     /* count of EOP_BAD_ACKs */
84258 +    u_long     EopResets;                                      /* count of EOP_ERROR_RESET */
84259 +    u_long      InputterBadLength;                             /* count of BadLength */
84260 +    u_long      InputterCRCDiscards;                           /* count of CRC_STATUS_DISCARD */
84261 +    u_long      InputterCRCErrors;                             /* count of CRC_STATUS_ERROR */
84262 +    u_long      InputterCRCBad;                                        /* count of CRC_STATUS_BAD */
84263 +    u_long     DmaNetworkErrors;                               /* count of errors in dma data */
84264 +    u_long     DmaIdentifyNetworkErrors;                       /* count of errors after dma identify */
84265 +    u_long     ThreadIdentifyNetworkErrors;                    /* count of errors after thread identify */
84266 +
84267 +    /* dma related */
84268 +    u_long     DmaRetries;                                     /* count of dma retries (due to retry fail count) */    
84269 +    u_long     DmaOutputTimeouts;                              /* count of dma output timeouts */
84270 +    u_long     DmaPacketAckErrors;                             /* count of dma packet ack errors */
84271 +
84272 +    /* thread related */
84273 +    u_long     ForcedTProcTraps;                               /* count of forced tproc traps */
84274 +    u_long     TrapForTooManyInsts;                            /* count of too many instruction traps */
84275 +    u_long     ThreadOutputTimeouts;                           /* count of thread output timeouts */
84276 +    u_long       ThreadPacketAckErrors;                                /* count of thread packet ack errors */
84277 +
84278 +    /* link related */
84279 +    u_long     LockError;                                      /* count of RegPtr->Exts.LinkErrorTypes:LS_LockError */
84280 +    u_long     DeskewError;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_DeskewError */
84281 +    u_long     PhaseError;                                     /* count of RegPtr->Exts.LinkErrorTypes:LS_PhaseError */
84282 +    u_long     DataError;                                      /* count of RegPtr->Exts.LinkErrorTypes:LS_DataError */
84283 +    u_long     FifoOvFlow0;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow0 */
84284 +    u_long     FifoOvFlow1;                                    /* count of RegPtr->Exts.LinkErrorTypes:LS_FifoOvFlow1 */
84285 +    u_long     LinkErrorValue;                                 /* link error value on data error */
84286 +
84287 +    /* memory related */
84288 +    u_long     CorrectableErrors;                              /* count of correctable ecc errors */
84289 +    u_long     UncorrectableErrors;                            /* count of uncorrectable ecc errors */
84290 +    u_long       MultipleErrors;                                       /* count of multiple ecc errors */
84291 +    u_long     SdramBytesFree;                                 /* count of sdram bytes free */
84292 +    
84293 +    /* Interrupt related */
84294 +    u_long     LongestInterrupt;                               /* length of longest interrupt in ticks */
84295 +
84296 +    u_long     EventPunts;                                     /* count of punts of event interrupts to thread */
84297 +    u_long     EventRescheds;                                  /* count of reschedules of event interrupt thread */
84298 +} ELAN3_STATS;
84299 +
84300 +#define ELAN3_STATS_VERSION    (ulong)2
84301 +#define ELAN3_NUM_STATS                (sizeof (ELAN3_STATS)/sizeof (u_long))
84302 +
84303 +#define ELAN3_STATS_DEV_FMT   "elan3_stats_dev_%d"
84304 +
84305 +#ifdef __KERNEL__
84306 +
84307 +#define BumpStat(dev,stat)     ((dev)->Stats.stat++)
84308 +
84309 +typedef struct elan3_level_ptbl_block
84310 +{
84311 +    spinlock_t             PtblLock;                           /* Page table freelist lock */
84312 +    int                            PtblTotal;                          /* Count of level N page tables allocated */
84313 +    int                            PtblFreeCount;                      /* Count of free level N page tables */
84314 +    struct elan3_ptbl     *PtblFreeList;                       /* Free level N page tables */
84315 +    struct elan3_ptbl_gr          *PtblGroupList;                      /* List of Groups of level N page tables */
84316 +} ELAN3_LEVEL_PTBL_BLOCK;
84317
84318 +typedef struct elan3_dev
84319 +{
84320 +    ELAN3_DEV_OSDEP        Osdep;                              /* OS specific entries */
84321 +    int                            Instance;                           /* Device number */
84322 +    ELAN_DEVINFO            Devinfo;                           
84323 +    ELAN_POSITION          Position;                           /* position in switch network (for user code) */
84324 +    ELAN_DEV_IDX           DeviceIdx;                          /* device index registered with elanmod */
84325 +
84326 +    int                            ThreadsShouldStop;                  /* flag that kernel threads should stop */
84327 +
84328 +    spinlock_t             IntrLock;
84329 +    spinlock_t             TlbLock;
84330 +    spinlock_t             CProcLock;
84331 +    kcondvar_t             IntrWait;                           /* place event interrupt thread sleeps */
84332 +    unsigned               EventInterruptThreadStarted:1;      /* event interrupt thread started */
84333 +    unsigned               EventInterruptThreadStopped:1;      /* event interrupt thread stopped */
84334 +    
84335 +    DeviceMappingHandle            RegHandle;                          /* DDI Handle */
84336 +    ioaddr_t               RegPtr;                             /* Elan Registers */
84337 +
84338 +    volatile E3_uint32     InterruptMask;                      /* copy of RegPtr->InterruptMask */
84339 +    volatile E3_uint32     Event_Int_Queue_FPtr;               /* copy of RegPtr->Event_Int_Queue_FPtr */
84340 +    volatile E3_uint32      SchCntReg;                         /* copy of RegPtr->SchCntReg */
84341 +    volatile E3_uint32      Cache_Control_Reg;                 /* true value for RegPtr->Cache_Control_Reg */
84342 +    
84343 +    ELAN3_SDRAM_BANK       SdramBanks[ELAN3_SDRAM_NUM_BANKS];  /* Elan sdram banks */
84344 +    spinlock_t             SdramLock;                          /* Sdram allocator */
84345 +    sdramaddr_t                    SdramFreeLists[SDRAM_NUM_FREE_LISTS];
84346 +    unsigned               SdramFreeCounts[SDRAM_NUM_FREE_LISTS];
84347 +               
84348 +    sdramaddr_t                    TAndQBase;                          /* Trap and Queue area */
84349 +    sdramaddr_t                    ContextTable;                       /* Elan Context Table */
84350 +    u_int                  ContextTableSize;                   /* # entries in context table */
84351 +
84352 +    struct elan3_ctxt      **CtxtTable;                         /* array of ctxt pointers or nulls */
84353 +
84354 +    sdramaddr_t                    CommandPortTraps[2];                /* Command port trap overflow */
84355 +    int                            CurrentCommandPortTrap;             /* Which overflow queue we're using */
84356 +    
84357 +    u_int                  HaltAllCount;                       /* Count of reasons to halt context 0 queues */
84358 +    u_int                  HaltNonContext0Count;               /* Count of reasons to halt non-context 0 queues */
84359 +    u_int                  HaltDmaDequeueCount;                /* Count of reasons to halt dma from dequeuing */
84360 +    u_int                  HaltThreadCount;                    /* Count of reasons to halt the thread processor */
84361 +    u_int                  FlushCommandCount;                  /* Count of reasons to flush command queues */
84362 +    u_int                  DiscardAllCount;                    /* Count of reasons to discard context 0 */
84363 +    u_int                  DiscardNonContext0Count;            /* Count of reasons to discard non context 0 */
84364 +
84365 +    struct thread_trap    *ThreadTrap;                         /* Thread Processor trap space */
84366 +    struct dma_trap       *DmaTrap;                            /* DMA Processor trap space */
84367 +
84368 +    spinlock_t             FreeHaltLock;                       /* Lock for haltop free list */
84369 +    ELAN3_HALTOP                  *FreeHaltOperations;                 /* Free list of haltops */
84370 +    u_int                  NumHaltOperations;                  /* Number of haltops allocated */
84371 +    u_int                  ReservedHaltOperations;             /* Number of haltops reserved */
84372 +
84373 +    ELAN3_HALTOP                  *HaltOperations;                     /* List of operations to call */
84374 +    ELAN3_HALTOP                 **HaltOperationsTailpp;               /* Pointer to last "next" pointer in list */
84375 +    E3_uint32              HaltOperationsMask;                 /* Or of all bits in list of operations */
84376 +
84377 +    physaddr_t             SdramPhysBase;                      /* Physical address of SDRAM */
84378 +    physaddr_t             SdramPhysMask;                      /* and mask of significant bits */ 
84379 +    
84380 +    physaddr_t             PciPhysBase;                        /* physical address of local PCI segment */
84381 +    physaddr_t             PciPhysMask;                        /* and mask of significant bits */
84382 +
84383 +    long                   ErrorTime;                          /* lbolt at last error (link,ecc etc) */
84384 +    long                   ErrorsPerTick;                      /* count of errors for this tick */
84385 +    timer_fn_t             ErrorTimeoutId;                     /* id of timeout when errors masked out */
84386 +    timer_fn_t             DmaPollTimeoutId;                   /* id of timeout to poll for "bad" dmas */
84387 +    int                            FilterHaltQueued;
84388 +
84389 +    /*
84390 +     * HAT layer specific entries.
84391 +     */
84392 +    ELAN3_LEVEL_PTBL_BLOCK   Level[4];
84393 +    spinlock_t             PtblGroupLock;                      /* Lock for Page Table group lists */
84394 +    struct elan3_ptbl_gr    *Level3PtblGroupHand;              /* Hand for ptbl stealing */
84395 +
84396 +    /*
84397 +     * Per-Context Information structures.
84398 +     */
84399 +    struct elan3_info     *Infos;                              /* List of "infos" for this device */
84400 +
84401 +    char                    LinkShutdown;                       /* link forced into reset by panic/shutdown/dump */
84402 +
84403 +    /*
84404 +     * Device statistics.
84405 +     */
84406 +    ELAN3_STATS                    Stats;
84407 +    ELAN_STATS_IDX          StatsIndex;
84408 +
84409 +    struct {
84410 +       E3_Regs            *RegPtr;
84411 +       char               *Sdram[ELAN3_SDRAM_NUM_BANKS];
84412 +    } PanicState;
84413 +} ELAN3_DEV;
84414 +
84415 +#define ELAN3_DEV_CTX_TABLE(dev,ctxtn) ( (dev)->CtxtTable[ (ctxtn) &  MAX_ROOT_CONTEXT_MASK] )
84416 +
84417 +/* macros for accessing dev->RegPtr.Tags/Sets. */
84418 +#define write_cache_tag(dev,what,val)  writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Tags.what)))
84419 +#define read_cache_tag(dev,what)       readq ((void *) (dev->RegPtr + offsetof (E3_Regs, Tags.what)))
84420 +#define write_cache_set(dev,what,val)  writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Sets.what)))
84421 +#define read_cache_set(dev,what)       readq ((void *) (dev->RegPtr + offsetof (E3_Regs, Sets.what)))
84422 +
84423 +/* macros for accessing dev->RegPtr.Regs. */
84424 +#define write_reg64(dev,what,val)      writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what)))
84425 +#define write_reg32(dev,what,val)      writel (val, (void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what)))
84426 +#define read_reg64(dev,what)           readq ((void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what)))
84427 +#define read_reg32(dev,what)           readl ((void *) (dev->RegPtr + offsetof (E3_Regs, Regs.what)))
84428 +
84429 +/* macros for accessing dev->RegPtr.uRegs. */
84430 +#define write_ureg64(dev,what,val)     writeq (val, (void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what)))
84431 +#define write_ureg32(dev,what,val)     writel (val, (void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what)))
84432 +#define read_ureg64(dev,what)          readq ((void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what)))
84433 +#define read_ureg32(dev,what)          readl ((void *) (dev->RegPtr + offsetof (E3_Regs, URegs.what)))
84434 +
84435 +/* macros for accessing dma descriptor/thread regs */
84436 +#define copy_dma_regs(dev, desc) \
84437 +MACRO_BEGIN \
84438 +    register int i;  \
84439 +    for (i = 0; i < sizeof (E3_DMA)/sizeof(E3_uint64); i++) \
84440 +       ((E3_uint64 *) desc)[i] = readq ((void *)(dev->RegPtr + offsetof (E3_Regs, Regs.Dma_Desc) + i*sizeof (E3_uint64))); \
84441 +MACRO_END
84442 +
84443 +#define copy_thread_regs(dev, regs) \
84444 +MACRO_BEGIN \
84445 +    register int i;  \
84446 +    for (i = 0; i < (32*sizeof (E3_uint32))/sizeof(E3_uint64); i++) \
84447 +       ((E3_uint64 *) regs)[i] = readq ((void *)(dev->RegPtr + offsetof (E3_Regs, Regs.Globals[0]) + i*sizeof (E3_uint64))); \
84448 +MACRO_END
84449 +
84450 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock, 
84451 +                         _E3_DataBusMap::Exts _E3_DataBusMap::Input_Context_Fil_Flush
84452 +                         elan3_dev::CurrentCommandPortTrap elan3_dev::HaltAllCount elan3_dev::HaltDmaDequeueCount
84453 +                         elan3_dev::FlushCommandCount elan3_dev::DiscardAllCount elan3_dev::DiscardNonContext0Count
84454 +                         elan3_dev::HaltOperations elan3_dev::HaltOperationsMask))
84455 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::TlbLock, 
84456 +                         _E3_DataBusMap::Cache_Control_Reg))
84457 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock, 
84458 +                         elan3_dev::Infos elan3_dev::InfoTable))
84459 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::FreeHaltLock, 
84460 +                         elan3_dev::FreeHaltOperations elan3_dev::NumHaltOperations elan3_dev::ReservedHaltOperations))
84461 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PageFreeListLock, 
84462 +                         elan3_dev::PageFreeList elan3_dev::PageFreeListSize))
84463 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level1PtblLock,
84464 +                         elan3_dev::Level1PtblTotal elan3_dev::Level1PtblFreeCount elan3_dev::Level1PtblFreeList))
84465 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level2PtblLock,
84466 +                         elan3_dev::Level2PtblTotal elan3_dev::Level2PtblFreeCount elan3_dev::Level2PtblFreeList))
84467 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::Level3PtblLock,
84468 +                         elan3_dev::Level3PtblTotal elan3_dev::Level3PtblFreeCount elan3_dev::Level3PtblFreeList))
84469 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::PtblGroupLock,
84470 +                         elan3_dev::Level1PtblGroupList elan3_dev::Level2PtblGroupList elan3_dev::Level3PtblGroupList))
84471 +
84472 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_dev::InfoTable elan3_dev::Level1PtblFreeList
84473 +                                elan3_dev::Level2PtblFreeList elan3_dev::Level3PtblFreeList))
84474 +
84475 +_NOTE(LOCK_ORDER(elan3_dev::InfoLock elan3_dev::IntrLock))
84476 +_NOTE(LOCK_ORDER(as::a_lock elan3_dev::InfoLock))
84477 +_NOTE(LOCK_ORDER(as::a_lock elan3_dev::IntrLock))
84478 +
84479 +#define SET_INT_MASK(dev,Mask)         MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev)->InterruptMask = (Mask)));  mmiob(); MACRO_END
84480 +#define ENABLE_INT_MASK(dev, bits)     MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask |= (bits)))); mmiob(); MACRO_END
84481 +#define DISABLE_INT_MASK(dev, bits)    MACRO_BEGIN write_reg32 (dev, Exts.InterruptMask, ((dev->InterruptMask &= ~(bits)))); mmiob(); MACRO_END
84482 +
84483 +#define INIT_SCHED_STATUS(dev, val) \
84484 +MACRO_BEGIN \
84485 +       (dev)->SchCntReg = (val); \
84486 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
84487 +       mmiob(); \
84488 +MACRO_END
84489 +
84490 +#define SET_SCHED_STATUS(dev, val) \
84491 +MACRO_BEGIN \
84492 +       ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \
84493 +       (dev)->SchCntReg |= (val); \
84494 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
84495 +        mmiob (); \
84496 +MACRO_END
84497 +
84498 +#define CLEAR_SCHED_STATUS(dev, val) \
84499 +MACRO_BEGIN \
84500 +       ASSERT (((val) & HaltStopAndExtTestMask) == (val)); \
84501 +       (dev)->SchCntReg &= ~(val); \
84502 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
84503 +        mmiob(); \
84504 +MACRO_END
84505 +
84506 +#define MODIFY_SCHED_STATUS(dev, SetBits, ClearBits) \
84507 +MACRO_BEGIN \
84508 +       ASSERT ((((SetBits)|(ClearBits)) & HaltStopAndExtTestMask) == ((SetBits)|(ClearBits))); \
84509 +       (dev)->SchCntReg = (((dev)->SchCntReg | (SetBits)) & ~(ClearBits)); \
84510 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
84511 +       mmiob(); \
84512 +MACRO_END
84513 +
84514 +#define PULSE_SCHED_STATUS(dev, RestartBits) \
84515 +MACRO_BEGIN \
84516 +       ASSERT (((RestartBits) & HaltStopAndExtTestMask) == 0); \
84517 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg | (RestartBits)); \
84518 +       mmiob(); \
84519 +MACRO_END
84520 +
84521 +#define SET_SCHED_LINK_VALUE(dev, enabled, val) \
84522 +MACRO_BEGIN \
84523 +       (dev)->SchCntReg = (((dev)->SchCntReg & HaltAndStopMask) | ((enabled) ? LinkBoundaryScan : 0) | LinkSetValue(val, 0)); \
84524 +       write_reg32 (dev, Exts.SchCntReg, (dev)->SchCntReg); \
84525 +       mmiob(); \
84526 +MACRO_END
84527 +
84528 +#ifdef DEBUG_ASSERT
84529 +#  define ELAN3_ASSERT(dev, EX)        ((void)((EX) || elan3_assfail(dev, #EX, __FILE__, __LINE__)))
84530 +#else
84531 +#  define ELAN3_ASSERT(dev, EX)
84532 +#endif
84533 +
84534 +/* elandev_generic.c */
84535 +extern int        InitialiseElan (ELAN3_DEV *dev, ioaddr_t CmdPort);
84536 +extern void       FinaliseElan (ELAN3_DEV *dev);
84537 +extern int        InterruptHandler (ELAN3_DEV *dev);
84538 +extern void       PollForDmaHungup (void *arg);
84539 +
84540 +extern int        SetLinkBoundaryScan (ELAN3_DEV *dev);
84541 +extern void       ClearLinkBoundaryScan (ELAN3_DEV *dev);
84542 +extern int        WriteBoundaryScanValue (ELAN3_DEV *dev, int value);
84543 +extern int        ReadBoundaryScanValue(ELAN3_DEV *dev, int link);
84544 +
84545 +extern int        ReadVitalProductData (ELAN3_DEV *dev, int *CasLatency);
84546 +
84547 +extern struct elan3_ptbl_gr *ElanGetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset);
84548 +extern void       ElanSetPtblGr (ELAN3_DEV *dev, sdramaddr_t offset, struct elan3_ptbl_gr *ptg);
84549 +
84550 +extern void       ElanFlushTlb (ELAN3_DEV *dev);
84551 +
84552 +extern void       SetSchedStatusRegister (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp);
84553 +extern void      FreeHaltOperation (ELAN3_DEV *dev, ELAN3_HALTOP *op);
84554 +extern int       ReserveHaltOperations (ELAN3_DEV *dev, int count, int cansleep);
84555 +extern void      ReleaseHaltOperations (ELAN3_DEV *dev, int count);
84556 +extern void      ProcessHaltOperations (ELAN3_DEV *dev, E3_uint32 Pend);
84557 +extern void      QueueHaltOperation (ELAN3_DEV *dev, E3_uint32 Pend, volatile E3_uint32 *Maskp,
84558 +                                     E3_uint32 ReqMask, void (*Function)(ELAN3_DEV *, void *), void *Arguement);
84559 +
84560 +extern int        ComputePosition (ELAN_POSITION *pos, unsigned NodeId, unsigned NumNodes, unsigned numDownLinksVal);
84561 +
84562 +extern caddr_t   MiToName (int mi);
84563 +extern void      ElanBusError (ELAN3_DEV *dev);
84564 +
84565 +extern void      TriggerLsa (ELAN3_DEV *dev);
84566 +
84567 +extern ELAN3_DEV  *elan3_device (int instance);
84568 +extern int       DeviceRegisterSize (ELAN3_DEV *dev, int rnumber, int *sizep);
84569 +extern int       MapDeviceRegister (ELAN3_DEV *dev, int rnumber, ioaddr_t *addrp, int offset, 
84570 +                                    int len, DeviceMappingHandle *handlep);
84571 +extern void       UnmapDeviceRegister (ELAN3_DEV *dev, DeviceMappingHandle *handlep);
84572 +
84573 +
84574 +/* sdram.c */
84575 +/* sdram accessing functions - define 4 different types for 8,16,32,64 bit accesses */
84576 +extern unsigned char      elan3_sdram_readb (ELAN3_DEV *dev, sdramaddr_t ptr);
84577 +extern unsigned short     elan3_sdram_readw (ELAN3_DEV *dev, sdramaddr_t ptr);
84578 +extern unsigned int       elan3_sdram_readl (ELAN3_DEV *dev, sdramaddr_t ptr);
84579 +extern unsigned long long elan3_sdram_readq (ELAN3_DEV *dev, sdramaddr_t ptr);
84580 +extern void               elan3_sdram_writeb (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned char val);
84581 +extern void               elan3_sdram_writew (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned short val);
84582 +extern void               elan3_sdram_writel (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned int val);
84583 +extern void               elan3_sdram_writeq (ELAN3_DEV *dev, sdramaddr_t ptr, unsigned long long val);
84584 +
84585 +extern void              elan3_sdram_zerob_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
84586 +extern void              elan3_sdram_zerow_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
84587 +extern void              elan3_sdram_zerol_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
84588 +extern void              elan3_sdram_zeroq_sdram (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
84589 +
84590 +extern void               elan3_sdram_copyb_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
84591 +extern void               elan3_sdram_copyw_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
84592 +extern void               elan3_sdram_copyl_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
84593 +extern void               elan3_sdram_copyq_from_sdram (ELAN3_DEV *dev, sdramaddr_t from, void *to, int nbytes);
84594 +extern void               elan3_sdram_copyb_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
84595 +extern void               elan3_sdram_copyw_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
84596 +extern void               elan3_sdram_copyl_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
84597 +extern void               elan3_sdram_copyq_to_sdram (ELAN3_DEV *dev, void *from, sdramaddr_t to, int nbytes);
84598 +
84599 +extern void              elan3_sdram_init (ELAN3_DEV *dev);
84600 +extern void               elan3_sdram_fini (ELAN3_DEV *dev);
84601 +extern void              elan3_sdram_add (ELAN3_DEV *dev, sdramaddr_t base, sdramaddr_t top);
84602 +extern sdramaddr_t        elan3_sdram_alloc (ELAN3_DEV *dev, int nbytes);
84603 +extern void               elan3_sdram_free (ELAN3_DEV *dev, sdramaddr_t ptr, int nbytes);
84604 +extern physaddr_t         elan3_sdram_to_phys (ELAN3_DEV *dev, sdramaddr_t addr);
84605 +
84606 +/* cproc.c */
84607 +extern void      HandleCProcTrap (ELAN3_DEV *dev, E3_uint32 Pend, E3_uint32 *Mask);
84608 +
84609 +/* iproc.c */
84610 +extern void      HandleIProcTrap (ELAN3_DEV *dev, int Channel, E3_uint32 Pend, sdramaddr_t FaultSaveOff, 
84611 +                                  sdramaddr_t TransactionsOff, sdramaddr_t DataOff);
84612 +
84613 +/* tproc.c */
84614 +extern int       HandleTProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits);
84615 +extern void      DeliverTProcTrap (ELAN3_DEV *dev, struct thread_trap *threadTrap, E3_uint32 Pend);
84616 +
84617 +/* dproc.c */
84618 +extern int       HandleDProcTrap (ELAN3_DEV *dev, E3_uint32 *RestartBits);
84619 +extern void      DeliverDProcTrap (ELAN3_DEV *dev, struct dma_trap *dmaTrap, E3_uint32 Pend);
84620 +
84621 +#if defined(LINUX)
84622 +/* procfs_linux.h */
84623 +extern struct proc_dir_entry *elan3_procfs_root;
84624 +extern struct proc_dir_entry *elan3_config_root;
84625 +
84626 +extern void elan3_procfs_init(void);
84627 +extern void elan3_procfs_fini(void);
84628 +extern void elan3_procfs_device_init (ELAN3_DEV *dev);
84629 +extern void elan3_procfs_device_fini (ELAN3_DEV *dev);
84630 +#endif /* defined(LINUX) */
84631 +
84632 +/* elan3_osdep.c */
84633 +extern int        BackToBackMaster;
84634 +extern int        BackToBackSlave;
84635 +
84636 +#define ELAN_REG_REC_MAX (100)
84637 +#define ELAN_REG_REC(REG)  {                                         \
84638 +elan_reg_rec_file [elan_reg_rec_index] = __FILE__;                   \
84639 +elan_reg_rec_line [elan_reg_rec_index] = __LINE__;                   \
84640 +elan_reg_rec_reg  [elan_reg_rec_index] = REG;                        \
84641 +elan_reg_rec_cpu  [elan_reg_rec_index] = smp_processor_id();         \
84642 +elan_reg_rec_lbolt[elan_reg_rec_index] = lbolt;                      \
84643 +elan_reg_rec_index = ((elan_reg_rec_index+1) % ELAN_REG_REC_MAX);}
84644 +
84645 +extern char *    elan_reg_rec_file [ELAN_REG_REC_MAX];
84646 +extern int       elan_reg_rec_line [ELAN_REG_REC_MAX];
84647 +extern long      elan_reg_rec_lbolt[ELAN_REG_REC_MAX];
84648 +extern int       elan_reg_rec_cpu  [ELAN_REG_REC_MAX];
84649 +extern E3_uint32 elan_reg_rec_reg  [ELAN_REG_REC_MAX];
84650 +extern int       elan_reg_rec_index;
84651
84652 +#endif /* __KERNEL__ */
84653 +
84654 +
84655 +#define ELAN3_PROCFS_ROOT          "/proc/qsnet/elan3"
84656 +#define ELAN3_PROCFS_VERSION       "/proc/qsnet/elan3/version"
84657 +#define ELAN3_PROCFS_DEBUG         "/proc/qsnet/elan3/config/elandebug"
84658 +#define ELAN3_PROCFS_DEBUG_CONSOLE "/proc/qsnet/elan3/config/elandebug_console"
84659 +#define ELAN3_PROCFS_DEBUG_BUFFER  "/proc/qsnet/elan3/config/elandebug_buffer"
84660 +#define ELAN3_PROCFS_MMU_DEBUG     "/proc/qsnet/elan3/config/elan3mmu_debug"
84661 +#define ELAN3_PROCFS_PUNT_LOOPS    "/proc/qsnet/elan3/config/eventint_punt_loops"
84662 +
84663 +#define ELAN3_PROCFS_DEVICE_STATS_FMT    "/proc/qsnet/elan3/device%d/stats"
84664 +#define ELAN3_PROCFS_DEVICE_POSITION_FMT "/proc/qsnet/elan3/device%d/position"
84665 +#define ELAN3_PROCFS_DEVICE_NODESET_FMT  "/proc/qsnet/elan3/device%d/nodeset"
84666 +
84667 +#endif /* __ELAN3_ELANDEV_H */
84668 +
84669 +/*
84670 + * Local variables:
84671 + * c-file-style: "stroustrup"
84672 + * End:
84673 + */
84674 diff -urN clean/include/elan3/elandev_linux.h linux-2.6.9/include/elan3/elandev_linux.h
84675 --- clean/include/elan3/elandev_linux.h 1969-12-31 19:00:00.000000000 -0500
84676 +++ linux-2.6.9/include/elan3/elandev_linux.h   2005-04-05 11:28:37.000000000 -0400
84677 @@ -0,0 +1,74 @@
84678 +/*
84679 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84680 + *
84681 + *    For licensing information please see the supplied COPYING file
84682 + *
84683 + */
84684 +
84685 +#ifndef __ELANDEV_LINUX_H
84686 +#define __ELANDEV_LINUX_H
84687 +
84688 +#ident "$Id: elandev_linux.h,v 1.14 2005/04/05 15:28:37 robin Exp $"
84689 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elandev_linux.h,v $*/
84690 +
84691 +#ifdef __KERNEL__
84692 +#include <linux/mm.h>
84693 +#include <linux/sched.h>
84694 +#include <linux/pci.h>
84695 +
84696 +#include <qsnet/autoconf.h>
84697 +
84698 +#if !defined(NO_COPROC)                                /* The older coproc kernel patch is applied */
84699 +#include <linux/coproc.h>
84700 +
84701 +#define ioproc_ops             coproc_ops_struct
84702 +#define ioproc_register_ops    register_coproc_ops
84703 +#define ioproc_unregister_ops  unregister_coproc_ops
84704 +
84705 +#define IOPROC_MM_STRUCT_ARG   1
84706 +#define IOPROC_PATCH_APPLIED   1
84707 +
84708 +#elif !defined(NO_IOPROC)                      /* The new ioproc kernel patch is applied */
84709 +#include <linux/ioproc.h>
84710 +
84711 +#define IOPROC_PATCH_APPLIED   1
84712 +#endif
84713 +#endif
84714 +
84715 +#define ELAN3_MAJOR              60
84716 +#define ELAN3_NAME               "elan3"
84717 +#define ELAN3_MAX_CONTROLLER     16                      /* limited to 4 bits */
84718
84719 +#define ELAN3_MINOR_DEVNUM(m)    ((m) & 0x0f)            /* card number */
84720 +#define ELAN3_MINOR_DEVFUN(m)    (((m) >> 4) & 0x0f)     /* function */
84721 +#define ELAN3_MINOR_CONTROL      0                       /* function values */
84722 +#define ELAN3_MINOR_MEM          1
84723 +#define ELAN3_MINOR_USER               2
84724
84725 +typedef void                   *DeviceMappingHandle;
84726 +
84727 +/* task and ctxt handle types */
84728 +typedef struct mm_struct       *TaskHandle;
84729 +typedef int                    CtxtHandle;
84730
84731 +#define ELAN3_MY_TASK_HANDLE() (current->mm)
84732 +#define KERNEL_TASK_HANDLE()   (get_kern_mm())
84733
84734 +/*
84735 + * OS-dependent component of ELAN3_DEV struct.
84736 + */
84737 +typedef struct elan3_dev_osdep
84738 +{
84739 +       struct pci_dev  *pci;                   /* PCI config data */
84740 +       int             ControlDeviceOpen;      /* flag to indicate control */
84741 +                                               /*   device open */
84742 +       struct proc_dir_entry *procdir;
84743 +} ELAN3_DEV_OSDEP;
84744 +
84745 +#endif /* __ELANDEV_LINUX_H */
84746 +
84747 +/*
84748 + * Local variables:
84749 + * c-file-style: "stroustrup"
84750 + * End:
84751 + */
84752 diff -urN clean/include/elan3/elanio.h linux-2.6.9/include/elan3/elanio.h
84753 --- clean/include/elan3/elanio.h        1969-12-31 19:00:00.000000000 -0500
84754 +++ linux-2.6.9/include/elan3/elanio.h  2003-12-08 10:40:26.000000000 -0500
84755 @@ -0,0 +1,226 @@
84756 +/*
84757 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84758 + *
84759 + *    For licensing information please see the supplied COPYING file
84760 + *
84761 + */
84762 +
84763 +#ifndef __ELAN3_ELAN3IO_H
84764 +#define __ELAN3_ELAN3IO_H
84765 +
84766 +#ident "$Id: elanio.h,v 1.19 2003/12/08 15:40:26 mike Exp $"
84767 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanio.h,v $*/
84768 +
84769 +#define ELAN3IO_CONTROL_PATHNAME       "/dev/elan3/control%d"
84770 +#define ELAN3IO_MEM_PATHNAME   "/dev/elan3/mem%d"
84771 +#define ELAN3IO_USER_PATHNAME  "/dev/elan3/user%d"
84772 +#define ELAN3IO_SDRAM_PATHNAME   "/dev/elan3/sdram%d"
84773 +#define ELAN3IO_MAX_PATHNAMELEN        32
84774 +
84775 +/* ioctls on /dev/elan3/control */
84776 +#define ELAN3IO_CONTROL_BASE           0
84777 +
84778 +#define ELAN3IO_SET_BOUNDARY_SCAN      _IO   ('e', ELAN3IO_CONTROL_BASE + 0)
84779 +#define ELAN3IO_CLEAR_BOUNDARY_SCAN    _IO   ('e', ELAN3IO_CONTROL_BASE + 1)
84780 +#define ELAN3IO_READ_LINKVAL           _IOWR ('e', ELAN3IO_CONTROL_BASE + 2, E3_uint32)
84781 +#define ELAN3IO_WRITE_LINKVAL          _IOWR ('e', ELAN3IO_CONTROL_BASE + 3, E3_uint32)
84782 +
84783 +typedef struct elanio_set_debug_struct
84784 +{
84785 +    char       what[32];
84786 +    u_long     value;
84787 +} ELAN3IO_SET_DEBUG_STRUCT;
84788 +#define ELAN3IO_SET_DEBUG              _IOW  ('e', ELAN3IO_CONTROL_BASE + 4, ELAN3IO_SET_DEBUG_STRUCT)
84789 +
84790 +typedef struct elanio_debug_buffer_struct
84791 +{
84792 +    caddr_t    addr;
84793 +    size_t     len;
84794 +} ELAN3IO_DEBUG_BUFFER_STRUCT;
84795 +#define ELAN3IO_DEBUG_BUFFER           _IOWR ('e', ELAN3IO_CONTROL_BASE + 5, ELAN3IO_DEBUG_BUFFER_STRUCT)
84796 +
84797 +typedef struct elanio_neterr_server_struct
84798 +{
84799 +    u_int              elanid;
84800 +    void              *addr;
84801 +    char              *name;
84802 +} ELAN3IO_NETERR_SERVER_STRUCT;
84803 +#define ELAN3IO_NETERR_SERVER          _IOW  ('e', ELAN3IO_CONTROL_BASE + 6, ELAN3IO_NETERR_SERVER_STRUCT)
84804 +#define ELAN3IO_NETERR_FIXUP           _IOWR ('e', ELAN3IO_CONTROL_BASE + 7, NETERR_MSG)
84805 +
84806 +typedef struct elanio_set_position_struct
84807 +{
84808 +    u_int              device;
84809 +    unsigned short      nodeId;
84810 +    unsigned short      numNodes;
84811 +} ELAN3IO_SET_POSITION_STRUCT;
84812 +#define ELAN3IO_SET_POSITION           _IOW ('e', ELAN3IO_CONTROL_BASE + 8, ELAN3IO_SET_POSITION_STRUCT)
84813 +
84814 +#if defined(LINUX)
84815 +
84816 +/* ioctls on /dev/elan3/sdram */
84817 +#define ELAN3IO_SDRAM_BASE             20
84818 +
84819 +/* ioctls on /dev/elan3/user */
84820 +#define ELAN3IO_USER_BASE              30
84821 +
84822 +#define ELAN3IO_FREE                   _IO  ('e', ELAN3IO_USER_BASE + 0)
84823 +
84824 +#define ELAN3IO_ATTACH                 _IOWR('e', ELAN3IO_USER_BASE + 1, ELAN_CAPABILITY)
84825 +#define ELAN3IO_DETACH                 _IO  ('e', ELAN3IO_USER_BASE + 2)
84826 +
84827 +typedef struct elanio_addvp_struct
84828 +{
84829 +    u_int              process;
84830 +    ELAN_CAPABILITY     capability;
84831 +} ELAN3IO_ADDVP_STRUCT;
84832 +#define ELAN3IO_ADDVP                  _IOWR('e', ELAN3IO_USER_BASE + 3, ELAN3IO_ADDVP_STRUCT)
84833 +#define ELAN3IO_REMOVEVP                       _IOW ('e', ELAN3IO_USER_BASE + 4, int)
84834 +
84835 +typedef struct elanio_bcastvp_struct
84836 +{
84837 +    u_int              process;
84838 +    u_int              lowvp;
84839 +    u_int              highvp;
84840 +} ELAN3IO_BCASTVP_STRUCT;
84841 +#define ELAN3IO_BCASTVP                        _IOW ('e', ELAN3IO_USER_BASE + 5, ELAN3IO_BCASTVP_STRUCT)
84842 +
84843 +typedef struct elanio_loadroute_struct
84844 +{
84845 +    u_int              process;
84846 +    E3_uint16          flits[MAX_FLITS];
84847 +} ELAN3IO_LOAD_ROUTE_STRUCT;
84848 +#define ELAN3IO_LOAD_ROUTE             _IOW ('e', ELAN3IO_USER_BASE + 6, ELAN3IO_LOAD_ROUTE_STRUCT)
84849 +
84850 +#define ELAN3IO_PROCESS                        _IO  ('e', ELAN3IO_USER_BASE + 7)
84851 +
84852 +typedef struct elanio_setperm_struct
84853 +{
84854 +    caddr_t            maddr;
84855 +    E3_Addr            eaddr;
84856 +    size_t             len;
84857 +    int                        perm;
84858 +} ELAN3IO_SETPERM_STRUCT;
84859 +#define ELAN3IO_SETPERM                        _IOW ('e', ELAN3IO_USER_BASE + 8, ELAN3IO_SETPERM_STRUCT)
84860 +
84861 +typedef struct elanio_clearperm_struct
84862 +{
84863 +    E3_Addr            eaddr;
84864 +    size_t             len;
84865 +} ELAN3IO_CLEARPERM_STRUCT;
84866 +#define ELAN3IO_CLEARPERM              _IOW ('e', ELAN3IO_USER_BASE + 9, ELAN3IO_CLEARPERM_STRUCT)
84867 +
84868 +typedef struct elanio_changeperm_struct
84869 +{
84870 +    E3_Addr            eaddr;
84871 +    size_t             len;
84872 +    int                        perm;
84873 +} ELAN3IO_CHANGEPERM_STRUCT;
84874 +#define ELAN3IO_CHANGEPERM             _IOW ('e', ELAN3IO_USER_BASE + 10, ELAN3IO_CHANGEPERM_STRUCT)
84875 +
84876 +
84877 +#define ELAN3IO_HELPER_THREAD          _IO  ('e', ELAN3IO_USER_BASE + 11)
84878 +#define ELAN3IO_WAITCOMMAND            _IO  ('e', ELAN3IO_USER_BASE + 12)
84879 +#define ELAN3IO_BLOCK_INPUTTER         _IOW ('e', ELAN3IO_USER_BASE + 13, int)
84880 +#define ELAN3IO_SET_FLAGS              _IOW ('e', ELAN3IO_USER_BASE + 14, int)
84881 +
84882 +#define ELAN3IO_WAITEVENT              _IOW ('e', ELAN3IO_USER_BASE + 15, E3_Event)
84883 +#define ELAN3IO_ALLOC_EVENTCOOKIE      _IOW ('e', ELAN3IO_USER_BASE + 16, EVENT_COOKIE)
84884 +#define ELAN3IO_FREE_EVENTCOOKIE               _IOW ('e', ELAN3IO_USER_BASE + 17, EVENT_COOKIE)
84885 +#define ELAN3IO_ARM_EVENTCOOKIE                _IOW ('e', ELAN3IO_USER_BASE + 18, EVENT_COOKIE)
84886 +#define ELAN3IO_WAIT_EVENTCOOKIE               _IOW ('e', ELAN3IO_USER_BASE + 19, EVENT_COOKIE)
84887 +
84888 +#define ELAN3IO_SWAPSPACE              _IOW ('e', ELAN3IO_USER_BASE + 20, SYS_SWAP_SPACE)
84889 +#define ELAN3IO_EXCEPTION_SPACE                _IOW ('e', ELAN3IO_USER_BASE + 21, SYS_EXCEPTION_SPACE)
84890 +#define ELAN3IO_GET_EXCEPTION          _IOR ('e', ELAN3IO_USER_BASE + 22, SYS_EXCEPTION)
84891 +
84892 +typedef struct elanio_unload_struct
84893 +{
84894 +    void       *addr;
84895 +    size_t      len;
84896 +} ELAN3IO_UNLOAD_STRUCT;
84897 +#define ELAN3IO_UNLOAD                 _IOW ('e', ELAN3IO_USER_BASE + 23, ELAN3IO_UNLOAD_STRUCT)
84898 +
84899 +
84900 +
84901 +typedef struct elanio_getroute_struct
84902 +{
84903 +    u_int              process;
84904 +    E3_uint16          flits[MAX_FLITS];
84905 +} ELAN3IO_GET_ROUTE_STRUCT;
84906 +#define ELAN3IO_GET_ROUTE              _IOW ('e', ELAN3IO_USER_BASE + 24, ELAN3IO_GET_ROUTE_STRUCT)
84907 +
84908 +typedef struct elanio_resetroute_struct
84909 +{
84910 +    u_int              process;
84911 +} ELAN3IO_RESET_ROUTE_STRUCT;
84912 +#define ELAN3IO_RESET_ROUTE            _IOW ('e', ELAN3IO_USER_BASE + 25, ELAN3IO_RESET_ROUTE_STRUCT)
84913 +
84914 +typedef struct elanio_checkroute_struct
84915 +{
84916 +    u_int              process;
84917 +    E3_uint32           routeError;
84918 +    E3_uint16          flits[MAX_FLITS];
84919 +} ELAN3IO_CHECK_ROUTE_STRUCT;
84920 +#define ELAN3IO_CHECK_ROUTE            _IOW ('e', ELAN3IO_USER_BASE + 26, ELAN3IO_CHECK_ROUTE_STRUCT)
84921 +
84922 +typedef struct elanio_vp2nodeId_struct
84923 +{
84924 +    u_int              process;
84925 +    unsigned short      nodeId;
84926 +    ELAN_CAPABILITY    cap;
84927 +} ELAN3IO_VP2NODEID_STRUCT;
84928 +#define ELAN3IO_VP2NODEID      _IOWR('e', ELAN3IO_USER_BASE + 27, ELAN3IO_VP2NODEID_STRUCT)
84929 +
84930 +#define ELAN3IO_SET_SIGNAL     _IOW ('e', ELAN3IO_USER_BASE + 28, int)
84931 +
84932 +typedef struct elanio_process_2_location_struct
84933 +{
84934 +    u_int              process;
84935 +    ELAN_LOCATION       loc;
84936 +} ELAN3IO_PROCESS_2_LOCATION_STRUCT;
84937 +#define ELAN3IO_PROCESS_2_LOCATION     _IOW ('e', ELAN3IO_USER_BASE + 29, ELAN3IO_PROCESS_2_LOCATION_STRUCT)
84938 +
84939 +
84940 +
84941 +/* ioctls on all device */
84942 +#define ELAN3IO_GENERIC_BASE           100
84943 +typedef struct elanio_get_devinfo_struct
84944 +{
84945 +    ELAN_DEVINFO *devinfo;
84946 +} ELAN3IO_GET_DEVINFO_STRUCT;
84947 +#define ELAN3IO_GET_DEVINFO            _IOR ('e', ELAN3IO_GENERIC_BASE + 0, ELAN_DEVINFO)
84948 +
84949 +typedef struct elanio_get_position_struct
84950 +{
84951 +    ELAN_POSITION *position;
84952 +} ELAN3IO_GET_POSITION_STRUCT;
84953 +#define ELAN3IO_GET_POSITION             _IOR ('e', ELAN3IO_GENERIC_BASE + 1, ELAN_POSITION)
84954 +
84955 +typedef struct elanio_stats_struct
84956 +{
84957 +    int                which;
84958 +    void       *ptr;
84959 +} ELAN3IO_STATS_STRUCT;
84960 +#define ELAN3IO_STATS                  _IOR ('e', ELAN3IO_GENERIC_BASE + 2, ELAN3IO_STATS_STRUCT)
84961 +#  define ELAN3_SYS_STATS_DEVICE       0
84962 +#  define ELAN3_SYS_STATS_MMU          1
84963 +
84964 +/* offsets on /dev/elan3/control */
84965 +
84966 +/* offsets on /dev/elan3/mem */
84967 +
84968 +/* page numbers on /dev/elan3/user */
84969 +#define ELAN3IO_OFF_COMMAND_PAGE               0
84970 +#define ELAN3IO_OFF_FLAG_PAGE          1
84971 +#define ELAN3IO_OFF_UREG_PAGE          2
84972 +
84973 +#endif /* LINUX */
84974 +
84975 +#endif /* __ELAN3_ELAN3IO_H */
84976 +
84977 +/*
84978 + * Local variables:
84979 + * c-file-style: "stroustrup"
84980 + * End:
84981 + */
84982 diff -urN clean/include/elan3/elanregs.h linux-2.6.9/include/elan3/elanregs.h
84983 --- clean/include/elan3/elanregs.h      1969-12-31 19:00:00.000000000 -0500
84984 +++ linux-2.6.9/include/elan3/elanregs.h        2004-04-22 08:27:21.000000000 -0400
84985 @@ -0,0 +1,1063 @@
84986 +/*
84987 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
84988 + *
84989 + *    For licensing information please see the supplied COPYING file
84990 + *
84991 + */
84992 +
84993 +/*
84994 + * Header file for internal slave mapping of the ELAN3 registers
84995 + */
84996 +
84997 +#ifndef _ELAN3_ELANREGS_H
84998 +#define _ELAN3_ELANREGS_H
84999 +
85000 +#ident "$Id: elanregs.h,v 1.87 2004/04/22 12:27:21 david Exp $"
85001 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanregs.h,v $*/
85002 +
85003 +#include <elan3/e3types.h>
85004 +#include <elan3/dma.h>
85005 +#include <elan3/elanuregs.h>
85006 +
85007 +#define MAX_ROOT_CONTEXT_MASK  0xfff
85008 +#define SYS_CONTEXT_BIT                0x1000
85009 +#define ALL_CONTEXT_BITS       (MAX_ROOT_CONTEXT_MASK | SYS_CONTEXT_BIT)
85010 +#define ROOT_TAB_OFFSET(Cntxt) (((Cntxt) & MAX_ROOT_CONTEXT_MASK) << 4)
85011 +#define CLEAR_SYS_BIT(Cntxt)   ((Cntxt) & ~SYS_CONTEXT_BIT)
85012 +
85013 +#define E3_CACHELINE_SIZE      (32)
85014 +#define E3_CACHE_SIZE          (8192)
85015 +
85016 +typedef volatile struct _E3_CacheSets
85017 +{
85018 +   E3_uint64   Set0[256];      /* 2k bytes per set */
85019 +   E3_uint64   Set1[256];      /* 2k bytes per set */
85020 +   E3_uint64   Set2[256];      /* 2k bytes per set */
85021 +   E3_uint64   Set3[256];      /* 2k bytes per set */
85022 +} E3_CacheSets;
85023 +
85024 +typedef union e3_cache_tag
85025 +{
85026 +   E3_uint64   Value;
85027 +   struct {
85028 +#if defined(__LITTLE_ENDIAN__)
85029 +       E3_uint32 pad2:8;               /* Undefined value when read */
85030 +       E3_uint32 LineError:1;          /* A line error has occured */
85031 +       E3_uint32 Modified:1;           /* Cache data is modified */
85032 +       E3_uint32 FillPending:1;                /* Pipelined fill occuring*/
85033 +       E3_uint32 AddrTag27to11:17;     /* Tag address bits 27 to 11 */
85034 +       E3_uint32 pad1:4;               /* Undefined value when read */
85035 +       E3_uint32 pad0;                 /* Undefined value when read */
85036 +#else
85037 +       E3_uint32 pad0;                 /* Undefined value when read */
85038 +       E3_uint32 pad1:4;               /* Undefined value when read */
85039 +       E3_uint32 AddrTag27to11:17;     /* Tag address bits 27 to 11 */
85040 +       E3_uint32 FillPending:1;                /* Pipelined fill occuring*/
85041 +       E3_uint32 Modified:1;           /* Cache data is modified */
85042 +       E3_uint32 LineError:1;          /* A line error has occured */
85043 +       E3_uint32 pad2:8;               /* Undefined value when read */
85044 +#endif
85045 +   } s;
85046 +} E3_CacheTag;
85047 +
85048 +#define E3_NumCacheLines       64
85049 +#define E3_NumCacheSets                4
85050 +
85051 +typedef volatile struct _E3_CacheTags
85052 +{
85053 +   E3_CacheTag Tags[E3_NumCacheLines][E3_NumCacheSets];        /* 2k bytes per set */
85054 +} E3_CacheTags;
85055 +
85056 +typedef union E3_IProcStatus_Reg
85057 +{
85058 +    E3_uint32 Status;
85059 +    struct
85060 +    {
85061 +#if defined(__LITTLE_ENDIAN__)
85062 +       E3_uint32 TrapType:8;           /* iprocs trap ucode address */
85063 +       E3_uint32 SuspendAddr:8;        /* iprocs suspend address */
85064 +       E3_uint32 EopType:2;            /* Type of Eop Received */
85065 +       E3_uint32 QueueingPacket:1;     /* receiving a queueing packet */
85066 +       E3_uint32 AckSent:1;            /* a packet ack has been sent */
85067 +       E3_uint32 Reject:1;             /* a packet nack has been sent */
85068 +       E3_uint32 CrcStatus:2;          /* Crc Status value */
85069 +       E3_uint32 BadLength:1;          /* Eop was received in a bad place */
85070 +       E3_uint32 Chan1:1;              /* This packet received on v chan1 */
85071 +       E3_uint32 First:1;              /* This is the first transaction in the packet */
85072 +       E3_uint32 Last:1;               /* This is the last transaction in the packet */
85073 +       E3_uint32 Unused:2;
85074 +       E3_uint32 WakeupFunction:3;     /* iprocs wakeup function */
85075 +#else
85076 +       E3_uint32 WakeupFunction:3;     /* iprocs wakeup function */
85077 +       E3_uint32 Unused:2;
85078 +       E3_uint32 Last:1;               /* This is the last transaction in the packet */
85079 +       E3_uint32 First:1;              /* This is the first transaction in the packet */
85080 +       E3_uint32 Chan1:1;              /* This packet received on v chan1 */
85081 +       E3_uint32 BadLength:1;          /* Eop was received in a bad place */
85082 +       E3_uint32 CrcStatus:2;          /* Crc Status value */
85083 +       E3_uint32 Reject:1;             /* a packet nack has been sent */
85084 +       E3_uint32 AckSent:1;            /* a packet ack has been sent */
85085 +       E3_uint32 QueueingPacket:1;     /* receiving a queueing packet */
85086 +       E3_uint32 EopType:2;            /* Type of Eop Received */
85087 +       E3_uint32 SuspendAddr:8;        /* iprocs suspend address */
85088 +       E3_uint32 TrapType:8;           /* iprocs trap ucode address */
85089 +#endif
85090 +    } s;
85091 +} E3_IProcStatus_Reg;
85092 +
85093 +#define CRC_STATUS_GOOD    (0 << 21)
85094 +#define CRC_STATUS_DISCARD (1 << 21)
85095 +#define CRC_STATUS_ERROR   (2 << 21)
85096 +#define CRC_STATUS_BAD     (3 << 21)
85097 +
85098 +#define CRC_MASK          (3 << 21)
85099 +
85100 +#define EOP_GOOD          (1 << 16)
85101 +#define EOP_BADACK        (2 << 16)
85102 +#define EOP_ERROR_RESET           (3 << 16)
85103 +
85104 +#define E3_IPS_LastTrans       (1 << 26)
85105 +#define E3_IPS_FirstTrans      (1 << 25)
85106 +#define E3_IPS_VChan1          (1 << 24)
85107 +#define E3_IPS_BadLength       (1 << 23)
85108 +#define E3_IPS_CrcMask         (3 << 21)
85109 +#define E3_IPS_Rejected                (1 << 20)
85110 +#define E3_IPS_AckSent         (1 << 19)
85111 +#define E3_IPS_QueueingPacket  (1 << 18)
85112 +#define E3_IPS_EopType         (3 << 16)
85113 +
85114 +typedef union E3_Status_Reg
85115 +{
85116 +    E3_uint32 Status;
85117 +    struct
85118 +    {
85119 +#if defined(__LITTLE_ENDIAN__)
85120 +       E3_uint32 TrapType:8;           /* procs trap ucode address */
85121 +       E3_uint32 SuspendAddr:8;        /* procs suspend address */
85122 +       E3_uint32 Context:13;           /* procs current context */
85123 +       E3_uint32 WakeupFunction:3;     /* procs wakeup function */
85124 +#else
85125 +       E3_uint32 WakeupFunction:3;     /* procs wakeup function */
85126 +       E3_uint32 Context:13;           /* procs current context */
85127 +       E3_uint32 SuspendAddr:8;        /* procs suspend address */
85128 +       E3_uint32 TrapType:8;           /* procs trap ucode address */
85129 +#endif
85130 +    } s;
85131 +} E3_Status_Reg;
85132 +
85133 +/* values for WakeupFunction */
85134 +#define SleepOneTick                   0
85135 +#define WakeupToSendTransOrEop         1
85136 +#define SleepOneTickThenRunnable       2
85137 +#define WakeupNever                    4
85138 +/* extra dma wakeup functions */
85139 +#define WakupeToSendTransOrEop         1
85140 +#define WakeupForPacketAck             3
85141 +#define WakeupToSendTrans              5
85142 +/* extra thread wakup function */
85143 +#define WakeupStopped                  3
85144 +/* extra cproc wakup function */
85145 +#define WakeupSetEvent                 3
85146 +
85147 +#define GET_STATUS_CONTEXT(Ptr)      ((Ptr.Status >> 16) & 0x1fff)
85148 +#define GET_STATUS_SUSPEND_ADDR(Ptr) ((Ptr.Status >> 8) & 0xff)
85149 +#define GET_STATUS_TRAPTYPE(Ptr)     ((E3_uint32)(Ptr.Status & 0xff))
85150 +
85151 +/*
85152 + * Interrupt register bits
85153 + */
85154 +#define INT_PciMemErr                  (1<<15) /* Pci memory access error */
85155 +#define INT_SDRamInt                   (1<<14) /* SDRam ECC interrupt */
85156 +#define INT_EventInterrupt             (1<<13) /* Event Interrupt */
85157 +#define INT_LinkError                  (1<<12) /* Link Error */
85158 +#define INT_ComQueue                   (1<<11) /* a comm queue half full */
85159 +#define INT_TProcHalted                        (1<<10) /* Tproc Halted */
85160 +#define INT_DProcHalted                        (1<<9) /* Dmas Halted */
85161 +#define INT_DiscardingNonSysCntx       (1<<8) /* Inputters Discarding Non-SysCntx */
85162 +#define INT_DiscardingSysCntx          (1<<7) /* Inputters Discarding SysCntx */
85163 +#define INT_TProc                      (1<<6) /* tproc interrupt */
85164 +#define INT_CProc                      (1<<5) /* cproc interrupt */
85165 +#define INT_DProc                      (1<<4) /* dproc interrupt */
85166 +#define INT_IProcCh1NonSysCntx         (1<<3) /* iproc non-SysCntx interrupt */
85167 +#define INT_IProcCh1SysCntx            (1<<2) /* iproc SysCntx interrupt */
85168 +#define INT_IProcCh0NonSysCntx         (1<<1) /* iproc non-SysCntx interrupt */
85169 +#define INT_IProcCh0SysCntx            (1<<0) /* iproc SysCntx interrupt */
85170 +
85171 +#define INT_Inputters          (INT_IProcCh0SysCntx | INT_IProcCh0NonSysCntx | INT_IProcCh1SysCntx | INT_IProcCh1NonSysCntx)
85172 +#define INT_Discarding         (INT_DiscardingSysCntx | INT_DiscardingNonSysCntx)
85173 +#define INT_Halted             (INT_DProcHalted | INT_TProcHalted)
85174 +#define INT_ErrorInterrupts    (INT_PciMemErr | INT_SDRamInt | INT_LinkError)
85175 +
85176 +/*
85177 + * Link state bits.
85178 + */
85179 +#define LS_LinkNotReady        (1 << 0) /* Link is in reset or recovering from an error */
85180 +#define LS_Locked      (1 << 1) /* Linkinput PLL is locked */
85181 +#define LS_LockError   (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */
85182 +#define LS_DeskewError (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */
85183 +#define LS_PhaseError  (1 << 4) /* Linkinput Phase alignment error. */
85184 +#define LS_DataError   (1 << 5) /* Received value was neither good data or a token. */
85185 +#define LS_FifoOvFlow0 (1 << 6) /* Channel 0 input fifo overflowed. */
85186 +#define LS_FifoOvFlow1 (1 << 7) /* Channel 1 input fifo overflowed. */
85187 +
85188 +/*
85189 + * Link State Constant defines, used for writing to LinkSetValue
85190 + */
85191 +
85192 +#define LRS_DataDel0           0x0
85193 +#define LRS_DataDel1           0x1
85194 +#define LRS_DataDel2           0x2
85195 +#define LRS_DataDel3           0x3
85196 +#define LRS_DataDel4           0x4
85197 +#define LRS_DataDel5           0x5
85198 +#define LRS_DataDel6           0x6
85199 +#define LRS_DataDel7           0x7
85200 +#define LRS_DataDel8           0x8
85201 +#define LRS_PllDelValue                0x9
85202 +#define LRS_ClockEven          0xA
85203 +#define LRS_ClockOdd           0xB
85204 +#define LRS_ErrorLSW           0xC
85205 +#define LRS_ErrorMSW           0xD
85206 +#define LRS_FinCoarseDeskew    0xE
85207 +#define LRS_LinkInValue                0xF
85208 +#define LRS_NumLinkDels         0x10
85209 +
85210 +#define LRS_Pllfast             0x40
85211
85212 +union Sched_Status
85213 +{
85214 +    E3_uint32 Status;
85215 +    struct
85216 +    {
85217 +#if defined(__LITTLE_ENDIAN__)
85218 +       E3_uint32 StopNonSysCntxs:1;
85219 +       E3_uint32 FlushCommandQueues:1;
85220 +       E3_uint32 HaltDmas:1;
85221 +       E3_uint32 HaltDmaDequeue:1;
85222 +       E3_uint32 HaltThread:1;
85223 +       E3_uint32 CProcStop:1;
85224 +       E3_uint32 DiscardSysCntxIn:1;
85225 +       E3_uint32 DiscardNonSysCntxIn:1;
85226 +       E3_uint32 RestartCh0SysCntx:1;
85227 +       E3_uint32 RestartCh0NonSysCntx:1;
85228 +       E3_uint32 RestartCh1SysCntx:1;
85229 +       E3_uint32 RestartCh1NonSysCntx:1;
85230 +       E3_uint32 RestartDProc:1;
85231 +       E3_uint32 RestartTProc:1;
85232 +       E3_uint32 RestartCProc:1;
85233 +       E3_uint32 ClearLinkErrorInt:1;
85234 +       E3_uint32 :3;
85235 +       E3_uint32 LinkSetValue:10; 
85236 +       E3_uint32 FixLinkDelays:1;
85237 +       E3_uint32 LinkBoundaryScan:1;
85238 +#else
85239 +       E3_uint32 LinkBoundaryScan:1;
85240 +       E3_uint32 FixLinkDelays:1;
85241 +       E3_uint32 LinkSetValue:10; 
85242 +       E3_uint32 :3;
85243 +       E3_uint32 ClearLinkErrorInt:1;
85244 +       E3_uint32 RestartCProc:1;
85245 +       E3_uint32 RestartTProc:1;
85246 +       E3_uint32 RestartDProc:1;
85247 +       E3_uint32 RestartCh1NonSysCntx:1;
85248 +       E3_uint32 RestartCh1SysCntx:1;
85249 +       E3_uint32 RestartCh0NonSysCntx:1;
85250 +       E3_uint32 RestartCh0SysCntx:1;
85251 +       E3_uint32 DiscardNonSysCntxIn:1;
85252 +       E3_uint32 DiscardSysCntxIn:1;
85253 +       E3_uint32 CProcStop:1;
85254 +       E3_uint32 HaltThread:1;
85255 +       E3_uint32 HaltDmaDequeue:1;
85256 +       E3_uint32 HaltDmas:1;
85257 +       E3_uint32 FlushCommandQueues:1;
85258 +       E3_uint32 StopNonSysCntxs:1;
85259 +#endif
85260 +    } s;
85261 +};
85262 +
85263 +#define LinkBoundaryScan       ((E3_uint32) 1<<31) /* Clears the link error interrupt */
85264 +#define FixLinkDelays          ((E3_uint32) 1<<30) /* Clears the link error interrupt */
85265 +#define LinkSetValue(Val, OldVal) ((E3_uint32) (((Val) & 0x3ff) << 20) | ((OldVal) & ((~0x3ff) << 20)))
85266 +
85267 +#define ClearLinkErrorInt      ((E3_uint32) 1<<16) /* Clears the link error interrupt */
85268 +#define RestartCProc           ((E3_uint32) 1<<15) /* Clears command proc interrupt */
85269 +#define RestartTProc           ((E3_uint32) 1<<14) /* Clears thread interrupt */
85270 +#define RestartDProc           ((E3_uint32) 1<<13) /* Clears dma0 interrupt */
85271 +#define RestartCh1NonSysCntx   ((E3_uint32) 1<<12) /* Clears interrupt */
85272 +#define RestartCh1SysCntx      ((E3_uint32) 1<<11) /* Clears interrupt */
85273 +#define RestartCh0NonSysCntx   ((E3_uint32) 1<<10) /* Clears interrupt */
85274 +#define RestartCh0SysCntx      ((E3_uint32) 1<<9) /* Clears interrupt */
85275 +#define CProcStopped           ((E3_uint32) 1<<9) /* Read value only */
85276 +
85277 +#define TraceSetEvents         ((E3_uint32) 1<<8)
85278 +#define DiscardNonSysCntxIn    ((E3_uint32) 1<<7)
85279 +#define DiscardSysCntxIn       ((E3_uint32) 1<<6)
85280 +#define CProcStop              ((E3_uint32) 1<<5) /* Will empty all the command port queues. */
85281 +#define HaltThread             ((E3_uint32) 1<<4) /* Will stop the thread proc and clear the tproc command queue */
85282 +#define HaltDmaDequeue         ((E3_uint32) 1<<3) /* Will stop the dmaers starting new dma's. */
85283 +#define HaltDmas               ((E3_uint32) 1<<2) /* Will stop the dmaers and clear the dma command queues */
85284 +#define FlushCommandQueues     ((E3_uint32) 1<<1) /* Causes the command ports to be flushed. */
85285 +#define StopNonSysCntxs                ((E3_uint32) 1<<0) /* Prevents a non-SysCntx from starting. */
85286 +
85287 +/* Initial value of schedule status register */
85288 +#define LinkResetToken         0x00F
85289 +
85290 +#define Sched_Initial_Value    (LinkBoundaryScan | (LinkResetToken << 20) | \
85291 +                                DiscardSysCntxIn | DiscardNonSysCntxIn | HaltThread | HaltDmas)
85292 +
85293 +#define StopDmaQueues       (HaltDmaDequeue | HaltDmas | \
85294 +                             DiscardNonSysCntxIn | DiscardSysCntxIn)
85295 +#define CheckDmaQueueStopped (INT_DiscardingNonSysCntx | INT_DiscardingSysCntx | INT_DProcHalted)
85296 +
85297 +#define HaltStopAndExtTestMask 0xfff001ff
85298 +#define HaltAndStopMask                0x000001ff
85299 +
85300 +
85301 +#define DmaComQueueNotEmpty    (1<<0)
85302 +#define ThreadComQueueNotEmpty (1<<1)
85303 +#define EventComQueueNotEmpty  (1<<2)
85304 +#define DmaComQueueHalfFull    (1<<3)
85305 +#define ThreadComQueueHalfFull (1<<4)
85306 +#define EventComQueueHalfFull  (1<<5)
85307 +#define DmaComQueueError       (1<<6)
85308 +#define ThreadComQueueError    (1<<7)
85309 +#define EventComQueueError     (1<<8)
85310 +
85311 +#define ComQueueNotEmpty       (DmaComQueueNotEmpty | ThreadComQueueNotEmpty | EventComQueueNotEmpty)
85312 +#define ComQueueError          (DmaComQueueError | ThreadComQueueError | EventComQueueError)
85313 +
85314 +typedef union _E3_DmaInfo
85315 +{
85316 +    E3_uint32  Value;
85317 +    struct
85318 +    {
85319 +#if defined(__LITTLE_ENDIAN__)
85320 +       E3_uint32 DmaOutputOpen:1;      /* The packet is currently open */
85321 +       E3_uint32 :7;
85322 +       E3_uint32 TimeSliceCount:2;     /* Time left to timeslice */
85323 +       E3_uint32 UseRemotePriv:1;      /* Set for remote read dmas */
85324 +       E3_uint32 DmaLastPacket:1;      /* Set for the last packet of a dma */
85325 +       E3_uint32 PacketAckValue:2;     /* Packet ack type. Valid if AckBufferValid set. */
85326 +       E3_uint32 PacketTimeout:1;      /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
85327 +       E3_uint32 AckBufferValid:1;     /* Packet ack is valid. */
85328 +       E3_uint32 :16;                  /* read as Zero */
85329 +#else
85330 +       E3_uint32 :16;                  /* read as Zero */
85331 +       E3_uint32 AckBufferValid:1;     /* Packet ack is valid. */
85332 +       E3_uint32 PacketTimeout:1;      /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
85333 +       E3_uint32 PacketAckValue:2;     /* Packet ack type. Valid if AckBufferValid set. */
85334 +       E3_uint32 DmaLastPacket:1;      /* Set for the last packet of a dma */
85335 +       E3_uint32 UseRemotePriv:1;      /* Set for remote read dmas */
85336 +       E3_uint32 TimeSliceCount:2;     /* Time left to timeslice */
85337 +       E3_uint32 :7;
85338 +       E3_uint32 DmaOutputOpen:1;      /* The packet is currently open */
85339 +#endif
85340 +    } s;
85341 +} E3_DmaInfo;
85342 +
85343 +typedef volatile struct _E3_DmaRds
85344 +{
85345 +   E3_uint32           DMA_Source4to0AndTwoReads;
85346 +   E3_uint32           pad13;
85347 +   E3_uint32           DMA_BytesToRead;
85348 +   E3_uint32           pad14;
85349 +   E3_uint32           DMA_MinusPacketSize;
85350 +   E3_uint32           pad15;
85351 +   E3_uint32           DMA_MaxMinusPacketSize;
85352 +   E3_uint32           pad16;
85353 +   E3_uint32           DMA_DmaOutputOpen;
85354 +   E3_uint32           pad16a;
85355 +   E3_DmaInfo          DMA_PacketInfo;
85356 +   E3_uint32           pad17[7];
85357 +   E3_uint32           IProcTrapBase;
85358 +   E3_uint32           pad18;
85359 +   E3_uint32           IProcBlockTrapBase;
85360 +   E3_uint32           pad19[11];
85361 +} E3_DmaRds;
85362 +   
85363 +typedef volatile struct _E3_DmaWrs
85364 +{
85365 +   E3_uint64           pad0;
85366 +   E3_uint64           LdAlignment;
85367 +   E3_uint64           ResetAckNLdBytesToWr;
85368 +   E3_uint64           SetAckNLdBytesToWr;
85369 +   E3_uint64           LdBytesToRd;
85370 +   E3_uint64           LdDmaType;
85371 +   E3_uint64           SendRoutes;
85372 +   E3_uint64           SendEop;
85373 +   E3_uint64           pad1[8];
85374 +} E3_DmaWrs;
85375 +
85376 +typedef volatile struct _E3_Exts
85377 +{
85378 +   E3_uint32           CurrContext;                            /* 0x12a00 */
85379 +   E3_uint32           pad0;
85380 +   E3_Status_Reg       DProcStatus;                            /* 0x12a08 */
85381 +   E3_uint32           pad1;
85382 +   E3_Status_Reg       CProcStatus;                            /* 0x12a10 */
85383 +   E3_uint32           pad2;
85384 +   E3_Status_Reg       TProcStatus;                            /* 0x12a18 */
85385 +   E3_uint32           pad3;
85386 +   E3_IProcStatus_Reg  IProcStatus;                            /* 0x12a20 */
85387 +   E3_uint32           pad4[3];
85388 +
85389 +   E3_uint32           IProcTypeContext;                       /* 0x12a30 */
85390 +   E3_uint32           pad5;
85391 +   E3_uint32           IProcTransAddr;                         /* 0x12a38 */
85392 +   E3_uint32           pad6;
85393 +   E3_uint32           IProcCurrTransData0;                    /* 0x12a40 */
85394 +   E3_uint32           pad7;
85395 +   E3_uint32           IProcCurrTransData1;                    /* 0x12a48 */
85396 +   E3_uint32           pad8;
85397 +
85398 +   E3_uint32           SchCntReg;                              /* 0x12a50 */
85399 +   E3_uint32           pad9;
85400 +   E3_uint32           InterruptReg;                           /* 0x12a58 */
85401 +   E3_uint32           pad10;
85402 +   E3_uint32           InterruptMask;                          /* 0x12a60 */
85403 +   E3_uint32           pad11;
85404 +   E3_uint32           LinkErrorTypes;                         /* 0x12a68 */
85405 +   E3_uint32           pad12[3];
85406 +   E3_uint32           LinkState;      /* a read here returens the DataDel value for the */
85407 +                                       /* link that has just been defined by a write to */
85408 +                                       /* Regs.Exts.SchCntReg.LinkSetValue */
85409 +   E3_uint32           pad13;
85410 +
85411 +   union                                                       /* 0x12a80 */
85412 +   {
85413 +      E3_DmaWrs                DmaWrs;
85414 +      E3_DmaRds                DmaRds;
85415 +   } Dmas;
85416 +} E3_Exts;
85417 +
85418 +typedef union com_port_entry
85419 +{
85420 +    E3_uint64  type;
85421 +    struct
85422 +    {
85423 +       E3_uint32 Address;              /* Command VAddr */
85424 +#if defined(__LITTLE_ENDIAN__)
85425 +       E3_uint32 Context0Issue:1;      /* Issue was for context 0 */
85426 +       E3_uint32 EventNotCommand:1;    /* Issue address bit 3 */
85427 +       E3_uint32 RemoteDesc:1;         /* Issue address bit 5 */
85428 +       E3_uint32 :13;                  /* read as Zero */
85429 +       E3_uint32 Context:12;           /* Command Context */
85430 +       E3_uint32 :4;                   /* read as Zero */
85431 +#else
85432 +       E3_uint32 :4;                   /* read as Zero */
85433 +       E3_uint32 Context:12;           /* Command Context */
85434 +       E3_uint32 :13;                  /* read as Zero */
85435 +       E3_uint32 RemoteDesc:1;         /* Issue address bit 5 */
85436 +       E3_uint32 EventNotCommand:1;    /* Issue address bit 3 */
85437 +       E3_uint32 Context0Issue:1;      /* Issue was for context 0 */
85438 +#endif
85439 +    } s;
85440 +} E3_ComPortEntry;
85441 +
85442 +/* control reg bits */
85443 +#define CONT_MMU_ENABLE                (1 << 0) /* bit 0 enables mmu */
85444 +#define CONT_ENABLE_8K_PAGES   (1 << 1) /* When set smallest page is 8k instead of 4k. */
85445 +#define CONT_EN_ALL_SETS       (1 << 2) /* enable cache */
85446 +#define CONT_CACHE_LEVEL0      (1 << 3) /* cache context table */
85447 +#define CONT_CACHE_LEVEL1      (1 << 4) /* cache up level 1 PTD/PTE */
85448 +#define CONT_CACHE_LEVEL2      (1 << 5) /* cache up level 2 PTD/PTE */
85449 +#define CONT_CACHE_LEVEL3      (1 << 6) /* cache up level 3 PTD/PTE */
85450 +#define CONT_CACHE_TRAPS       (1 << 7) /* cache up traps */
85451 +#define CONT_CACHE_LEV0_ROUTES (1 << 8) /* cache up small routes */
85452 +#define CONT_CACHE_LEV1_ROUTES (1 << 9) /* cache up large routes */
85453 +#define CONT_CACHE_ALL         (CONT_CACHE_LEVEL0 | CONT_CACHE_LEVEL1 | CONT_CACHE_LEVEL2 | \
85454 +                                CONT_CACHE_LEVEL3 | CONT_CACHE_TRAPS | \
85455 +                                CONT_CACHE_LEV0_ROUTES | CONT_CACHE_LEV1_ROUTES)
85456 +
85457 +#define CONT_SYNCHRONOUS       (1 << 10) /* PCI running sync */
85458 +#define CONT_SER               (1 << 11) /* Single bit output (Elan1 SER bit) */
85459 +#define CONT_SIR               (1 << 12) /* Writing 1 resets elan. */
85460 +
85461 +#define CONT_PSYCHO_MODE       (1 << 13) /* Enables all the perversion required by psycho */
85462 +#define CONT_ENABLE_ECC                (1 << 14) /* Enables error detecting on the ECC */
85463 +#define CONT_SDRAM_TESTING     (1 << 15) /* Switches to test mode for checking EEC data bits */
85464 +
85465 +/* defines SDRam CasLatency. Once set will not change again unless reset is reasserted. */
85466 +/* 1 = Cas Latency is 3, 0 = Cas Latency is 2 */
85467 +#define CAS_LATENCY_2          (0 << 16)
85468 +#define CAS_LATENCY_3          (1 << 16)
85469 +#define REFRESH_RATE_2US       (0 << 17) /* defines 2us SDRam Refresh rate. */
85470 +#define REFRESH_RATE_4US       (1 << 17) /* defines 4us SDRam Refresh rate. */
85471 +#define REFRESH_RATE_8US       (2 << 17) /* defines 8us SDRam Refresh rate. */
85472 +#define REFRESH_RATE_16US      (3 << 17) /* defines 16us SDRam Refresh rate. */
85473 +
85474 +#define CONT_PCI_ERR           (1 << 19) /* Read 1 if PCI Error */
85475 +#define CONT_CLEAR_PCI_ERROR   (1 << 19) /* Clears an PCI error. */
85476 +
85477 +/* Will cause the PCI error bit to become set. This is used to force the threads proc
85478 +   and the uProc to start to stall. */
85479 +#define CONT_SET_PCI_ERROR     (1 << 20)
85480 +
85481 +/* Writes SDram control reg when set. Also starts SDram memory system refreshing. */
85482 +#define SETUP_SDRAM            (1 << 21)
85483 +
85484 +/* Flushes the tlb */
85485 +#define MMU_FLUSH              (1 << 22)
85486 +/* and read back when it's finished */
85487 +#define MMU_FLUSHED            (1 << 0)
85488 +
85489 +/* Clears any ECC error detected by SDRam interface */
85490 +#define CLEAR_SDRAM_ERROR      (1 << 23)
85491 +
85492 +#define ECC_ADDR_MASK          0x0ffffff8
85493 +#define ECC_UE_MASK            0x1 
85494 +#define ECC_CE_MASK            0x2
85495 +#define ECC_ME_MASK            0x4 
85496 +#define ECC_SYN_MASK           0xff
85497 +
85498 +/* define page table entry bit fields */
85499 +#define TLB_PageSizeBits       (3 << 0)
85500 +#define TLB_ACCBits            (7 << 2)
85501 +#define TLB_LocalBit           (1 << 5)
85502 +#define TLB_PCI64BitTargetBit  (1 << 6)
85503 +#define TLB_PCIBigEndianBit    (1 << 7)
85504 +
85505 +#define TLB_ModifiedBit                (1 << 55)
85506 +#define TLB_ReferencedBit      (1 << 63)
85507 +
85508 +/* Used to read values from the tlb. */
85509 +#define TLB_TlbReadCntBitsSh   56
85510 +#define TLB_UseSelAddrSh       (1ULL << 60)
85511 +#define TLB_WriteTlbLine       (1ULL << 61)
85512 +
85513 +#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \
85514 +                             ((E3_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh))
85515 +
85516 +typedef union _E3_CacheContReg
85517 +{
85518 +    E3_uint32 ContReg;
85519 +    struct
85520 +    {
85521 +#if defined(__LITTLE_ENDIAN__)
85522 +       E3_uint32 MMU_Enable:1;         /* wr 1 to enable the MMU */
85523 +       E3_uint32 Set8kPages:1;         /* wr 1 smallest page is 8k. */
85524 +       E3_uint32 EnableAllSets:1;      /* wr 1 All the cache sets are enabled */
85525 +       E3_uint32 Cache_Level0:1;       /* wr 1 lev0 page tabs will be cached */
85526 +       E3_uint32 Cache_Level1:1;       /* wr 1 lev1 page tabs will be cached */
85527 +       E3_uint32 Cache_Level2:1;       /* wr 1 lev2 page tabs will be cached */
85528 +       E3_uint32 Cache_Level3:1;       /* wr 1 lev3 page tabs will be cached */
85529 +       E3_uint32 Cache_Traps:1;        /* wr 1 trap info will be cached */
85530 +       E3_uint32 Cache_Lev0_Routes:1;  /* wr 1 small routes will be cached */
85531 +       E3_uint32 Cache_Lev1_Routes:1;  /* wr 1 big routes will be cached */
85532 +       E3_uint32 PCI_Synchronous:1;    /* Pci and sys clocks are running synchronously*/
85533 +       E3_uint32 SER:1;                /* 1 bit output port */
85534 +       E3_uint32 SIR:1;                /* write 1 will reset elan */
85535 +       E3_uint32 PsychoMode:1;         /* Enables psycho perversion mode. */
85536 +       E3_uint32 CasLatency:1;         /* 1=cas latency=3, 1=cas latency=2 */
85537 +       E3_uint32 RefreshRate:2;        /* 0=2us, 1=4us, 2=8us, 3=16us */
85538 +       E3_uint32 Pci_Err:1;            /* pci error. Write 1 clears err */
85539 +       E3_uint32 Set_Pci_Error:1;      /* Will simulate an Pci error */
85540 +       E3_uint32 StartSDRam:1;         /* Starts the sdram subsystem */
85541 +       E3_uint32 FlushTlb:1;           /* Flush the contence of the tlb */
85542 +       E3_uint32 :11;
85543 +#else
85544 +       E3_uint32 :11;
85545 +       E3_uint32 FlushTlb:1;           /* Flush the contence of the tlb */
85546 +       E3_uint32 StartSDRam:1;         /* Starts the sdram subsystem */
85547 +       E3_uint32 Set_Pci_Error:1;      /* Will simulate an Pci error */
85548 +       E3_uint32 Pci_Err:1;            /* pci error. Write 1 clears err */
85549 +       E3_uint32 RefreshRate:2;        /* 0=2us, 1=4us, 2=8us, 3=16us */
85550 +       E3_uint32 CasLatency:1;         /* 1=cas latency=3, 1=cas latency=2 */
85551 +       E3_uint32 PsychoMode:1;         /* Enables psycho perversion mode. */
85552 +       E3_uint32 SIR:1;                /* write 1 will reset elan */
85553 +       E3_uint32 SER:1;                /* 1 bit output port */
85554 +       E3_uint32 PCI_Synchronous:1;    /* Pci and sys clocks are running synchronously*/
85555 +       E3_uint32 Cache_Lev1_Routes:1;  /* wr 1 big routes will be cached */
85556 +       E3_uint32 Cache_Lev0_Routes:1;  /* wr 1 small routes will be cached */
85557 +       E3_uint32 Cache_Traps:1;        /* wr 1 trap info will be cached */
85558 +       E3_uint32 Cache_Level3:1;       /* wr 1 lev3 page tabs will be cached */
85559 +       E3_uint32 Cache_Level2:1;       /* wr 1 lev2 page tabs will be cached */
85560 +       E3_uint32 Cache_Level1:1;       /* wr 1 lev1 page tabs will be cached */
85561 +       E3_uint32 Cache_Level0:1;       /* wr 1 lev0 page tabs will be cached */
85562 +       E3_uint32 EnableAllSets:1;      /* wr 1 All the cache sets are enabled */
85563 +       E3_uint32 Set8kPages:1;         /* wr 1 smallest page is 8k. */
85564 +       E3_uint32 MMU_Enable:1;         /* wr 1 to enable the MMU */
85565 +#endif
85566 +    } s;
85567 +} E3_CacheContReg;
85568 +
85569 +typedef union _E3_TrapBits
85570 +{
85571 +    volatile E3_uint32 Bits;
85572 +    struct
85573 +    {
85574 +#if defined(__LITTLE_ENDIAN__)
85575 +       E3_uint32 ForcedTProcTrap:1;     /* The theads proc has been halted */
85576 +       E3_uint32 InstAccessException:1; /* An instruction access exception */
85577 +       E3_uint32 Unimplemented:1;       /* Unimplemented instruction executed */
85578 +       E3_uint32 DataAccessException:1; /* A data access exception */  
85579 +
85580 +       E3_uint32 ThreadTimeout:1;       /* The threads outputer has timed out */
85581 +       E3_uint32 OpenException:1;       /* Invalid sequence of open, sendtr or close */
85582 +       E3_uint32 OpenRouteFetch:1;      /* Fault while fetching routes for previous open*/
85583 +       E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */
85584 +       
85585 +       E3_uint32 PacketAckValue:2;      /* Packet ack type. Valid if AckBufferValid set. */
85586 +       E3_uint32 PacketTimeout:1;       /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
85587 +
85588 +       E3_uint32 AckBufferValid:1;      /* The PacketAckValue bits are valid */
85589 +       E3_uint32 OutputWasOpen:1;       /* The output was open when tproc trapped */
85590 +       E3_uint32 TProcDeschedule:2;     /* The reason the tproc stopped running. */
85591 +       E3_uint32 :17;
85592 +#else
85593 +       E3_uint32 :17;
85594 +       E3_uint32 TProcDeschedule:2;     /* The reason the tproc stopped running. */
85595 +       E3_uint32 OutputWasOpen:1;       /* The output was open when tproc trapped */
85596 +       E3_uint32 AckBufferValid:1;      /* The PacketAckValue bits are valid */
85597 +       
85598 +       E3_uint32 PacketTimeout:1;       /* Packet timeout. Sent an EopError. Valid if AckBufferValid set. */
85599 +       E3_uint32 PacketAckValue:2;      /* Packet ack type. Valid if AckBufferValid set. */
85600 +       
85601 +       E3_uint32 TrapForTooManyInsts:1; /* Thread has been executing for too long */
85602 +       E3_uint32 OpenRouteFetch:1;      /* Fault while fetching routes for previous open*/
85603 +       E3_uint32 OpenException:1;       /* Invalid sequence of open, sendtr or close */
85604 +       E3_uint32 ThreadTimeout:1;       /* The threads outputer has timed out */
85605 +
85606 +       E3_uint32 DataAccessException:1; /* A data access exception */
85607 +       E3_uint32 Unimplemented:1;       /* Unimplemented instruction executed */
85608 +       E3_uint32 InstAccessException:1; /* An instruction access exception */
85609 +       E3_uint32 ForcedTProcTrap:1;     /* The theads proc has been halted */
85610 +#endif
85611 +    } s;
85612 +} E3_TrapBits;
85613 +
85614 +typedef union _E3_DirtyBits
85615 +{
85616 +    volatile E3_uint32 Bits;
85617 +    struct
85618 +    {
85619 +#if defined(__LITTLE_ENDIAN__)
85620 +       E3_uint32 GlobalsDirty:8;
85621 +       E3_uint32 OutsDirty:8;          /* will always read as dirty. */
85622 +       E3_uint32 LocalsDirty:8;
85623 +       E3_uint32 InsDirty:8;
85624 +#else
85625 +       E3_uint32 InsDirty:8;
85626 +       E3_uint32 LocalsDirty:8;
85627 +       E3_uint32 OutsDirty:8;          /* will always read as dirty. */
85628 +       E3_uint32 GlobalsDirty:8;
85629 +#endif
85630 +    } s;
85631 +} E3_DirtyBits;
85632 +
85633 +#define E3_TProcDescheduleMask    0x6000
85634 +#define E3_TProcDescheduleWait    0x2000
85635 +#define E3_TProcDescheduleSuspend 0x4000
85636 +#define E3_TProcDescheduleBreak   0x6000
85637 +
85638 +#define E3_TrapBitsMask          0x7fff
85639 +
85640 +#define ThreadRestartFromTrapBit       1
85641 +#define ThreadReloadAllRegs            2
85642 +
85643 +#define E3_PAckOk      0
85644 +#define E3_PAckTestFail        1
85645 +#define E3_PAckDiscard 2
85646 +#define E3_PAckError   3
85647 +
85648 +typedef volatile struct _E3_DataBusMap
85649 +{
85650 +   E3_uint64            Dma_Alignment_Port[8];         /* 0x00002800 */
85651 +   E3_uint32            pad0[0x30];                    /* 0x00002840 */
85652 +
85653 +   E3_uint32            Input_Trans0_Data[0x10];       /* 0x00002900 */
85654 +   E3_uint32            Input_Trans1_Data[0x10];
85655 +   E3_uint32            Input_Trans2_Data[0x10];
85656 +   E3_uint32            Input_Trans3_Data[0x10];
85657 +
85658 +/* this is the start of the exts directly addressable from the ucode. */
85659 +   E3_Exts              Exts;                          /* 0x00002a00 */
85660 +
85661 +/* this is the start of the registers directly addressable from the ucode. */
85662 +   E3_DMA               Dma_Desc;                      /* 0x00002b00 */
85663 +
85664 +   E3_uint32            Dma_Last_Packet_Size;          /* 0x00002b20 */
85665 +   E3_uint32            Dma_This_Packet_Size;          /* 0x00002b24 */
85666 +   E3_uint32            Dma_Tmp_Source;                /* 0x00002b28 */
85667 +   E3_uint32            Dma_Tmp_Dest;                  /* 0x00002b2c */
85668 +
85669 +   E3_Addr              Thread_SP_Save_Ptr;    /* points to the thread desched save word. */
85670 +   E3_uint32            Dma_Desc_Size_InProg;          /* 0x00002b34 */
85671 +
85672 +   E3_uint32            Thread_Desc_SP;                /* 0x00002b38 */
85673 +   E3_uint32            Thread_Desc_Context;           /* 0x00002b3c */
85674 +
85675 +   E3_uint32            uCode_TMP[0x10];               /* 0x00002b40 */
85676 +
85677 +   E3_uint32            TProc_NonSysCntx_FPtr;         /* 0x00002b80 */
85678 +   E3_uint32            TProc_NonSysCntx_BPtr;         /* 0x00002b84 */
85679 +   E3_uint32            TProc_SysCntx_FPtr;            /* 0x00002b88 */
85680 +   E3_uint32            TProc_SysCntx_BPtr;            /* 0x00002b8c */
85681 +   E3_uint32            DProc_NonSysCntx_FPtr;         /* 0x00002b90 */
85682 +   E3_uint32            DProc_NonSysCntx_BPtr;         /* 0x00002b94 */
85683 +   E3_uint32            DProc_SysCntx_FPtr;            /* 0x00002b98 */
85684 +   E3_uint32            DProc_SysCntx_BPtr;            /* 0x00002b9c */
85685 +
85686 +   E3_uint32            Input_Trap_Base;               /* 0x00002ba0 */
85687 +   E3_uint32            Input_Queue_Offset;            /* 0x00002ba4 */
85688 +   E3_uint32            CProc_TrapSave_Addr;           /* 0x00002ba8 */
85689 +   E3_uint32            Input_Queue_Addr;              /* 0x00002bac */
85690 +   E3_uint32            uCode_TMP10;                   /* 0x00002bb0 */
85691 +   E3_uint32            uCode_TMP11;                   /* 0x00002bb4 */
85692 +   E3_uint32            Event_Trace_Ptr;               /* 0x00002bb8 */
85693 +   E3_uint32            Event_Trace_Mask;              /* 0x00002bbc */
85694 +
85695 +   E3_ComPortEntry      DmaComQueue[3];                /* 0x00002bc0 */
85696 +
85697 +   E3_uint32            Event_Int_Queue_FPtr;          /* 0x00002bd8 */
85698 +   E3_uint32            Event_Int_Queue_BPtr;          /* 0x00002bdc */
85699 +
85700 +   E3_ComPortEntry      ThreadComQueue[2];             /* 0x00002be0 */
85701 +   E3_ComPortEntry      SetEventComQueue[2];           /* 0x00002bf0 */
85702 +
85703 +   E3_uint32            pad1[96];                      /* 0x00002c00 */
85704 +   E3_uint32            ComQueueStatus;                /* 0x00002d80 */
85705 +   E3_uint32            pad2[31];                      /* 0x00002d84 */
85706 +
85707 +/* These are the internal registers of the threads proc. */
85708 +   E3_uint32            Globals[8];                    /* 0x00002e00 */
85709 +   E3_uint32            Outs[8];
85710 +   E3_uint32            Locals[8];
85711 +   E3_uint32            Ins[8];
85712 +
85713 +   E3_uint32            pad3[16];
85714 +
85715 +   E3_uint32            IBufferReg[4];
85716 +
85717 +   E3_uint32            ExecuteNPC;
85718 +   E3_uint32            ExecutePC;
85719 +
85720 +   E3_uint32            StartPC;
85721 +   E3_uint32            pad4;
85722 +
85723 +   E3_uint32            StartnPC;
85724 +   E3_uint32            pad5;
85725 +
85726 +   E3_TrapBits          TrapBits;
85727 +   E3_DirtyBits                 DirtyBits;
85728 +   E3_uint64            LoadDataReg;
85729 +   E3_uint64            StoreDataReg;
85730 +
85731 +   E3_uint32            ECC_STATUS0;
85732 +   E3_uint32            ECC_STATUS1;
85733 +   E3_uint32            pad6[0xe];
85734 +
85735 +/* Pci slave port regs */
85736 +   E3_uint32            PciSlaveReadCache[0x10];
85737 +
85738 +   E3_uint32            Fault_Base_Ptr;
85739 +   E3_uint32            pad7;
85740 +   E3_uint32            Context_Ptr;
85741 +   E3_uint32            pad8;
85742 +   E3_uint32            Input_Context_Filter;      /* write only, No data */
85743 +   E3_uint32            Input_Context_Fil_Flush;   /* write only, No data */
85744 +   E3_CacheContReg      Cache_Control_Reg;
85745 +   E3_uint32            pad9;
85746 +
85747 +   E3_uint64            Tlb_Line_Value;
85748 +   
85749 +   E3_uint32            Walk_Datareg1;
85750 +   E3_uint32            Walk_VAddr_Tab_Base;
85751 +   E3_uint32            Walk_Datareg;
85752 +   E3_uint32            Walk_ContextReg;
85753 +   E3_uint32            Walk_FaultAddr;
85754 +   E3_uint32            Walk_EventAddr;
85755 +
85756 +/* outputers output cont ext registers. */
85757 +   E3_uint64            Dma_Route_012345_Context;
85758 +   E3_uint64            pad10;
85759 +   E3_uint64            Dma_Route_01234567;
85760 +   E3_uint64            Dma_Route_89ABCDEF;
85761 +
85762 +   E3_uint64            Thread_Route_012345_Context;
85763 +   E3_uint64            pad11;
85764 +   E3_uint64            Thread_Route_01234567;
85765 +   E3_uint64            Thread_Route_89ABCDEF;
85766 +} E3_DataBusMap;
85767 +
85768 +typedef volatile struct _E3_Regs
85769 +{
85770 +   E3_CacheSets                  Sets;                         /* 0x00000000 */
85771 +   E3_CacheTags                  Tags;                         /* 0x00002000 */
85772 +   E3_DataBusMap         Regs;                         /* 0x00002800 */
85773 +   E3_uint32             pad1[0x400];
85774 +   E3_User_Regs          URegs;
85775 +} E3_Regs;
85776 +
85777 +#define MAX_TRAPPED_TRANS      16
85778 +#define TRANS_DATA_WORDS       16
85779 +#define TRANS_DATA_BYTES       64
85780 +
85781 +/*
85782 + * Event interrupt
85783 + */
85784 +typedef volatile union _E3_EventInt
85785 +{
85786 +   E3_uint64    ForceAlign;
85787 +   struct {
85788 +       E3_uint32 IntCookie;
85789 +       E3_uint32 EventContext; /* Bits 16 to 28 */
85790 +    } s;
85791 +} E3_EventInt;
85792 +
85793 +#define GET_EVENT_CONTEXT(Ptr) ((Ptr->s.EventContext >> 16) & MAX_ROOT_CONTEXT_MASK)
85794 +
85795 +typedef volatile union _E3_ThreadQueue
85796 +{
85797 +   E3_uint64   ForceAlign;
85798 +   struct
85799 +   {
85800 +       E3_Addr  Thread;
85801 +#if defined(__LITTLE_ENDIAN__)
85802 +       E3_uint32 :16;          /* Bits 0  to 15 */
85803 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
85804 +       E3_uint32 :3;           /* Bits 29 to 31 */
85805 +#else
85806 +       E3_uint32 :3;           /* Bits 29 to 31 */
85807 +       E3_uint32 Context:13;   /* Bits 16 to 28 */
85808 +       E3_uint32 :16;          /* Bits 0  to 15 */
85809 +#endif
85810 +   } s;
85811 +} E3_ThreadQueue;
85812 +
85813 +typedef volatile union _E3_FaultStatusReg
85814 +{
85815 +   E3_uint32 Status;
85816 +   struct
85817 +   {
85818 +#if defined(__LITTLE_ENDIAN__)
85819 +      E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */
85820 +      E3_uint32 AccSize:4;     /* Access size. See below for different types. Bits 3 to 6 */
85821 +      E3_uint32 WrAcc:1;       /* Access was a write. Bit 7 */
85822 +      E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */
85823 +      E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */
85824 +      E3_uint32 RdLine:1;      /* Access was a dma read line. Bit 11 */
85825 +      E3_uint32 RdMult:1;      /* Access was a dma read multiple. Bit 12 */
85826 +      E3_uint32 Walking:1;     /* The fault occued when walking. Bit 13 */
85827 +      E3_uint32 Level:2;       /* Page table level when the fault occued. Bits 14 to 15 */
85828 +      E3_uint32 ProtFault:1;   /* A protection fault occured. Bit 16 */
85829 +      E3_uint32 FaultPte:2;    /* Page table type when the fault occured. Bit 17 */
85830 +      E3_uint32 AlignmentErr:1;        /* Address alignment did not match the access size. Bit 19 */
85831 +      E3_uint32 VProcSizeErr:1;        /* VProc number is out of range. Bit 20 */
85832 +      E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */
85833 +      E3_uint32 :10;           /* Bits 22 to 31 */
85834 +#else
85835 +      E3_uint32 :10;           /* Bits 22 to 31 */
85836 +      E3_uint32 WalkBadData:1; /* Memory CRC error during a walk. Bit 21 */
85837 +      E3_uint32 VProcSizeErr:1;        /* VProc number is out of range. Bit 20 */
85838 +      E3_uint32 AlignmentErr:1;        /* Address alignment did not match the access size. Bit 19 */
85839 +      E3_uint32 FaultPte:2;    /* Page table type when the fault occured. Bit 17 */
85840 +      E3_uint32 ProtFault:1;   /* A protection fault occured. Bit 16 */
85841 +      E3_uint32 Level:2;       /* Page table level when the fault occued. Bits 14 to 15 */
85842 +      E3_uint32 Walking:1;     /* The fault occued when walking. Bit 13 */
85843 +      E3_uint32 RdMult:1;      /* Access was a dma read multiple. Bit 12 */
85844 +      E3_uint32 RdLine:1;      /* Access was a dma read line. Bit 11 */
85845 +      E3_uint32 BlkDataType:2; /* Data size used for endian flips. Bits 9 to 10 */
85846 +      E3_uint32 NonAllocAcc:1; /* Access was a cache non allocate type. Bit 8 */
85847 +      E3_uint32 WrAcc:1;       /* Access was a write. Bit 7 */
85848 +      E3_uint32 AccSize:4;     /* Access size. See below for different types. Bits 3 to 6 */
85849 +      E3_uint32 AccTypePerm:3; /* Access permission. See below. Bits 0 to 2 */
85850 +#endif
85851 +   } s;
85852 +} E3_FaultStatusReg;
85853 +
85854 +typedef union _E3_FaultSave
85855 +{
85856 +   E3_uint64            ForceAlign;
85857 +   struct {
85858 +      E3_FaultStatusReg         FSR;
85859 +      volatile E3_uint32 FaultContext;
85860 +      volatile E3_uint32 FaultAddress;
85861 +      volatile E3_uint32 EventAddress;
85862 +   } s;
85863 +} E3_FaultSave;
85864 +
85865 +/* MMU fault status reg bit positions. */
85866 +#define FSR_WritePermBit       0       /* 1=Write access perm, 0=Read access perm */
85867 +#define FSR_RemotePermBit      1       /* 1=Remote access perm, 0=local access perm */
85868 +#define FSR_EventPermBit       2       /* 1=Event access perm, 0=data access perm */
85869 +#define FSR_Size0Bit           3
85870 +#define FSR_Size1Bit           4
85871 +#define FSR_Size2Bit           5
85872 +#define FSR_Size3Bit           6
85873 +#define FSR_WriteAccBit                7       /* 1=Write access, 0=Read access. */
85874 +#define FSR_NonAllocBit                8       /* 1=Do not fill cache with this data */
85875 +#define FSR_BlkDataTy0Bit      9
85876 +#define FSR_BlkDataTy1Bit      10
85877 +#define FSR_ReadLineBit                11
85878 +#define FSR_ReadMultipleBit    12
85879 +
85880 +#define FSR_PermMask           (0xf << FSR_WritePermBit)
85881 +#define FSR_SizeMask           (0xf << FSR_Size0Bit)
85882 +#define FSR_AccTypeMask                (3 << FSR_WriteAccBit)
85883 +#define FSR_BlkDataTyMask      (3 << FSR_BlkDataTy0Bit)
85884 +#define FSR_PciAccTyMask       (3 << FSR_ReadLineBit)
85885 +#define FSR_Walking            (0x1 << 13)
85886 +#define FSR_Level_Mask         (0x3 << 14)
85887 +#define FSR_ProtFault          (0x1 << 16)
85888 +#define FSR_FaultPTEType       (0x2 << 17)
85889 +#define FSR_AddrSizeError      (0x1 << 19)
85890 +#define FSR_VProcSizeError     (0x1 << 20)
85891 +#define FSR_WalkBadData                (0x1 << 21)
85892 +
85893 +#define FSR_PermRead           0
85894 +#define FSR_PermWrite          1
85895 +#define FSR_PermRemoteRead     2
85896 +#define FSR_PermRemoteWrite    3
85897 +#define FSR_PermEventRd                4
85898 +#define FSR_PermEventWr                5
85899 +#define FSR_PermRemoteEventRd  6
85900 +#define FSR_PermRemoteEventWr  7
85901 +
85902 +/* AT size values for each access type */
85903 +#define FSR_Word               (0x0 << FSR_Size0Bit)
85904 +#define FSR_DWord              (0x1 << FSR_Size0Bit)
85905 +#define FSR_QWord              (0x2 << FSR_Size0Bit)
85906 +#define FSR_Block32            (0x3 << FSR_Size0Bit)
85907 +#define FSR_ReservedBlock      (0x6 << FSR_Size0Bit)
85908 +#define FSR_Block64            (0x7 << FSR_Size0Bit)
85909 +#define FSR_GetCntxFilter      (0x8 << FSR_Size0Bit)
85910 +#define FSR_QueueDWord         (0x9 << FSR_Size0Bit)
85911 +#define FSR_RouteFetch         (0xa << FSR_Size0Bit)
85912 +#define FSR_QueueBlock         (0xb << FSR_Size0Bit)
85913 +#define FSR_Block32PartWrite   (0xe << FSR_Size0Bit)
85914 +#define FSR_Block64PartWrite   (0xf << FSR_Size0Bit)
85915 +
85916 +#define FSR_AllocRead          (0 << FSR_WriteAccBit)
85917 +#define FSR_AllocWrite         (1 << FSR_WriteAccBit)
85918 +#define FSR_NonAllocRd         (2 << FSR_WriteAccBit)
85919 +#define FSR_NonAllocWr         (3 << FSR_WriteAccBit)
85920 +
85921 +#define FSR_TypeByte           (0 << FSR_BlkDataTy0Bit)
85922 +#define FSR_TypeHWord          (1 << FSR_BlkDataTy0Bit)
85923 +#define FSR_TypeWord           (2 << FSR_BlkDataTy0Bit)
85924 +#define FSR_TypeDWord          (3 << FSR_BlkDataTy0Bit)
85925 +
85926 +typedef union E3_TrTypeCntx
85927 +{
85928 +   E3_uint32 TypeContext;
85929 +   struct
85930 +   {
85931 +#if defined(__LITTLE_ENDIAN__)
85932 +      E3_uint32 Type:16;               /* Transaction type field */
85933 +      E3_uint32 Context:13;            /* Transaction context */
85934 +      E3_uint32 TypeCntxInvalid:1;     /* Bit  29 */
85935 +      E3_uint32 StatusRegValid:1;      /* Bit  30 */
85936 +      E3_uint32 LastTrappedTrans:1;    /* Bit  31 */
85937 +#else
85938 +      E3_uint32 LastTrappedTrans:1;    /* Bit  31 */
85939 +      E3_uint32 StatusRegValid:1;      /* Bit  30 */
85940 +      E3_uint32 TypeCntxInvalid:1;     /* Bit  29 */
85941 +      E3_uint32 Context:13;            /* Transaction context */
85942 +      E3_uint32 Type:16;               /* Transaction type field */
85943 +#endif
85944 +   } s;
85945 +} E3_TrTypeCntx;
85946 +
85947 +#define GET_TRAP_TYPE(Ptr)    (Ptr.TypeContext & 0xfff)
85948 +#define GET_TRAP_CONTEXT(Ptr) ((Ptr.TypeContext >> 16) & 0x1fff)
85949 +
85950 +/* Words have been swapped for big endian access when fetched with dword access from elan.*/
85951 +typedef union _E3_IprocTrapHeader
85952 +{
85953 +   E3_uint64   forceAlign;
85954 +
85955 +   struct
85956 +   {
85957 +      E3_TrTypeCntx     TrTypeCntx;
85958 +      E3_uint32                 TrAddr;
85959 +      E3_uint32                 TrData0;
85960 +      union
85961 +      {
85962 +        E3_IProcStatus_Reg u_IProcStatus;
85963 +        E3_uint32          u_TrData1;
85964 +      } ipsotd;
85965 +   } s;
85966 +} E3_IprocTrapHeader;
85967 +
85968 +#define IProcTrapStatus ipsotd.u_IProcStatus
85969 +#define TrData1                ipsotd.u_TrData1
85970 +
85971 +typedef struct E3_IprocTrapData
85972 +{
85973 +   E3_uint32 TrData[TRANS_DATA_WORDS];
85974 +} E3_IprocTrapData;
85975 +
85976 +/*
85977 + * 64 kbytes of elan local memory. Must be aligned on a 64k boundary
85978 + */
85979 +#define E3_NonSysCntxQueueSize 0x400
85980 +#define E3_SysCntxQueueSize    0x100
85981 +
85982 +typedef struct _E3_TrapAndQueue
85983 +{
85984 +   E3_DMA              NonSysCntxDmaQueue[E3_NonSysCntxQueueSize];                     /* 0x000000 */
85985 +   E3_DMA              SysCntxDmaQueue[E3_SysCntxQueueSize];                           /* 0x008000 */
85986 +   E3_EventInt         EventIntQueue[E3_NonSysCntxQueueSize];                          /* 0x00A000 */
85987 +   E3_ThreadQueue      NonSysCntxThreadQueue[E3_NonSysCntxQueueSize];                  /* 0x00C000 */  
85988 +   E3_ThreadQueue      SysCntxThreadQueue[E3_SysCntxQueueSize];                        /* 0x00E000 */
85989 +   E3_FaultSave                IProcSysCntx;                                                   /* 0x00E800 */
85990 +   E3_Addr             Thread_SP_Save;                                                 /* 0x00E810 */
85991 +   E3_uint32           dummy0[3];                                                      /* 0x00E814 */
85992 +   E3_FaultSave                ThreadProcData;                                                 /* 0x00E820 */
85993 +   E3_FaultSave                ThreadProcInst;                                                 /* 0x00E830 */
85994 +   E3_FaultSave                dummy1[2];                                                      /* 0x00E840 */  
85995 +   E3_FaultSave                ThreadProcOpen;                                                 /* 0x00E860 */
85996 +   E3_FaultSave                dummy2;                                                         /* 0x00E870 */
85997 +   E3_FaultSave                IProcNonSysCntx;                                                /* 0x00E880 */
85998 +   E3_FaultSave                DProc;                                                          /* 0x00E890 */
85999 +   E3_FaultSave                CProc;                                                          /* 0x00E8A0 */
86000 +   E3_FaultSave                TProc;                                                          /* 0x00E8B0 */
86001 +   E3_FaultSave                DProcData0;                                                     /* 0x00E8C0 */
86002 +   E3_FaultSave                DProcData1;                                                     /* 0x00E8D0 */
86003 +   E3_FaultSave                DProcData2;                                                     /* 0x00E8E0 */
86004 +   E3_FaultSave                DProcData3;                                                     /* 0x00E8F0 */
86005 +   E3_uint32           dummy3[0xc0];                                                   /* 0x00E900 */
86006 +   E3_IprocTrapHeader  VCh0_C0_TrHead[MAX_TRAPPED_TRANS];
86007 +   E3_IprocTrapHeader  VCh0_NonC0_TrHead[MAX_TRAPPED_TRANS];
86008 +   E3_IprocTrapHeader  VCh1_C0_TrHead[MAX_TRAPPED_TRANS];
86009 +   E3_IprocTrapHeader  VCh1_NonC0_TrHead[MAX_TRAPPED_TRANS];
86010 +   E3_IprocTrapData    VCh0_C0_TrData[MAX_TRAPPED_TRANS];
86011 +   E3_IprocTrapData    VCh0_NonC0_TrData[MAX_TRAPPED_TRANS];
86012 +   E3_IprocTrapData    VCh1_C0_TrData[MAX_TRAPPED_TRANS];
86013 +   E3_IprocTrapData    VCh1_NonC0_TrData[MAX_TRAPPED_TRANS];
86014 +   E3_uint64           DmaOverflowQueueSpace[0x1000];
86015 +   E3_uint64           ThreadOverflowQueueSpace[0x800];
86016 +   E3_uint64           EventOverflowQueueSpace[0x800];
86017 +} E3_TrapAndQueue;
86018 +
86019 +
86020 +typedef struct _E3_ContextControlBlock 
86021 +{
86022 +   E3_uint32   rootPTP;
86023 +   E3_uint32   filter;
86024 +   E3_uint32   VPT_ptr;
86025 +   E3_uint32   VPT_mask;
86026 +} E3_ContextControlBlock;
86027 +
86028 +#define E3_CCB_CNTX0           (0x20000000)
86029 +#define E3_CCB_DISCARD_ALL     (0x40000000)
86030 +#define E3_CCB_ACKOK_ALL       (0x80000000)
86031 +#define E3_CCB_MASK            (0xc0000000)
86032 +
86033 +#define E3_NUM_CONTEXT_0       (0x20)
86034 +
86035 +/* Macros to manipulate event queue pointers */
86036 +/*     generate index in EventIntQueue */
86037 +#define E3_EVENT_INTQ_INDEX(fptr)      (((fptr) & 0x1fff) >> 3)
86038 +/*     generate next fptr */
86039 +#define E3_EVENT_INTQ_NEXT(fptr)       ((((fptr) + 8) & ~0x4000) | 0x2000)
86040 +
86041 +
86042 +#endif /* notdef _ELAN3_ELANREGS_H */
86043 +
86044 +/*
86045 + * Local variables:
86046 + * c-file-style: "stroustrup"
86047 + * End:
86048 + */
86049 diff -urN clean/include/elan3/elansyscall.h linux-2.6.9/include/elan3/elansyscall.h
86050 --- clean/include/elan3/elansyscall.h   1969-12-31 19:00:00.000000000 -0500
86051 +++ linux-2.6.9/include/elan3/elansyscall.h     2004-06-07 09:50:06.000000000 -0400
86052 @@ -0,0 +1,124 @@
86053 +/*
86054 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86055 + *
86056 + *    For licensing information please see the supplied COPYING file
86057 + *
86058 + */
86059 +
86060 +#ifndef __ELAN3_ELANSYSCALL_H
86061 +#define __ELAN3_ELANSYSCALL_H
86062 +
86063 +#ident "$Id: elansyscall.h,v 1.34 2004/06/07 13:50:06 mike Exp $"
86064 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elansyscall.h,v $*/
86065 +
86066 +#ifdef __cplusplus
86067 +extern "C" {
86068 +#endif
86069 +
86070 +#ifndef _ASM
86071 +
86072 +typedef struct sys_word_item
86073 +{
86074 +    struct sys_word_item *Next;
86075 +    E3_uint32            Value;
86076 +} SYS_WORD_ITEM;
86077 +
86078 +typedef struct sys_block_item
86079 +{
86080 +    struct sys_block_item *Next;
86081 +    E3_uint32            *Pointer;
86082 +} SYS_BLOCK_ITEM;
86083 +
86084 +typedef struct sys_swap_space
86085 +{
86086 +    int                 Magic;
86087 +    void       *ItemListsHead[MAX_LISTS];
86088 +    void       **ItemListsTailp[MAX_LISTS];
86089 +} SYS_SWAP_SPACE;
86090 +
86091 +typedef struct sys_exception
86092 +{
86093 +    int                        Type;
86094 +    int                        Proc;
86095 +    u_long             Res;
86096 +    u_long             Value;
86097 +    E3_FaultSave_BE    FaultArea;
86098 +    
86099 +    union
86100 +    {
86101 +       DMA_TRAP        Dma;
86102 +       THREAD_TRAP     Thread;
86103 +       COMMAND_TRAP    Command;
86104 +       INPUT_TRAP      Input;
86105 +    }                  Union;
86106 +} SYS_EXCEPTION;
86107 +
86108 +typedef struct sys_exception_space
86109 +{
86110 +    struct sys_exception_space *Next;
86111 +    int                                Magic;
86112 +    int                                Front;
86113 +    int                                Back;
86114 +    int                                Count;
86115 +    int                                Overflow;
86116 +    SYS_EXCEPTION              Exceptions[1];
86117 +} SYS_EXCEPTION_SPACE;
86118 +
86119 +#ifdef __KERNEL__
86120 +
86121 +typedef struct sys_ctxt
86122 +{
86123 +    SYS_SWAP_SPACE      *Swap;
86124 +    SYS_EXCEPTION_SPACE *Exceptions;
86125 +    kmutex_t            Lock;
86126 +
86127 +    spinlock_t          WaitLock;
86128 +    kcondvar_t          NetworkErrorWait;
86129 +
86130 +    int                         Armed;
86131 +    int                         Backoff;
86132 +    long                Time;
86133 +
86134 +    u_long              Flags;
86135 +    int                  signal;
86136 +
86137 +    EVENT_COOKIE_TABLE  *Table;
86138 +} SYS_CTXT;
86139 +
86140 +extern SYS_CTXT *sys_init (ELAN3_CTXT *ctxt);
86141 +extern int       sys_waitevent (ELAN3_CTXT *ctxt, E3_Event *event);
86142 +extern void      sys_addException (SYS_CTXT *sctx, int type, int proc, caddr_t ptr, int size, 
86143 +                                  E3_FaultSave_BE *, u_long res, u_long value);
86144 +extern int       sys_getException (SYS_CTXT *sctx, SYS_EXCEPTION *ex);
86145 +
86146 +/* returns -ve error or ELAN_CAP_OK or ELAN_CAP_RMS */
86147 +/* use = ELAN_USER_ATTACH, ELAN_USER_P2P, ELAN_USER_BROADCAST */
86148 +extern int  elan3_validate_cap (ELAN3_DEV *dev, ELAN_CAPABILITY *cap ,int use);
86149 +
86150 +#endif /* __KERNEL__ */
86151 +
86152 +#endif /* _ASM */
86153 +
86154 +/* values for "Flags" */
86155 +#define ELAN3_SYS_FLAG_DMA_BADVP               1
86156 +#define ELAN3_SYS_FLAG_THREAD_BADVP    2
86157 +#define ELAN3_SYS_FLAG_DMAFAIL         4
86158 +#define ELAN3_SYS_FLAG_NETERR          8
86159 +
86160 +#define SYS_SWAP_MAGIC         0xB23C52DF
86161 +#define SYS_EXCEPTION_MAGIC    0xC34D63E0
86162 +
86163 +#define EXCEPTION_GLOBAL_STRING        "elan3_exceptions"
86164 +#define EXCEPTION_ABORT_STRING  "elan3_abortstring"
86165 +
86166 +#ifdef __cplusplus
86167 +}
86168 +#endif
86169 +
86170 +#endif /* __ELAN3_ELANSYSCALL_H */
86171 +
86172 +/*
86173 + * Local variables:
86174 + * c-file-style: "stroustrup"
86175 + * End:
86176 + */
86177 diff -urN clean/include/elan3/elanuregs.h linux-2.6.9/include/elan3/elanuregs.h
86178 --- clean/include/elan3/elanuregs.h     1969-12-31 19:00:00.000000000 -0500
86179 +++ linux-2.6.9/include/elan3/elanuregs.h       2003-09-24 09:57:24.000000000 -0400
86180 @@ -0,0 +1,295 @@
86181 +/*
86182 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86183 + *
86184 + *    For licensing information please see the supplied COPYING file
86185 + *
86186 + */
86187 +
86188 +#ifndef __ELAN3_ELANUREGS_H
86189 +#define __ELAN3_ELANUREGS_H
86190 +
86191 +#ident "$Id: elanuregs.h,v 1.10 2003/09/24 13:57:24 david Exp $"
86192 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanuregs.h,v $*/
86193 +
86194 +#ifdef __cplusplus
86195 +extern "C" {
86196 +#endif
86197 +
86198 +/*
86199 + * Statistic control reg values
86200 + * Each 4-bit nibble of the control word specifies what statistic
86201 + * is to be recorded in each of the 8 statistic counters
86202 + */
86203 +
86204 +/* Count reg 0 */
86205 +#define STC_INPUT_TRANSACTIONS         0
86206 +#define STP_DMA_EOP_WAIT_ACK           1
86207 +#define STP_THREAD_RUNNING             2
86208 +#define STP_UCODE_WAIT_MEM             3
86209 +#define STC_CACHE_WRITE_BACKS          4
86210 +#define STC_PCI_SLAVE_READS            5
86211 +#define STC_REG0_UNUSED6               6
86212 +#define STP_REG0_UNUSED7               7
86213 +
86214 +#define STATS_REG0_NAMES {             \
86215 +         "STC_INPUT_TRANSACTIONS",     \
86216 +         "STP_DMA_EOP_WAIT_ACK",       \
86217 +         "STP_THREAD_RUNNING",         \
86218 +         "STP_UCODE_WAIT_MEM",         \
86219 +         "STC_CACHE_WRITE_BACKS",      \
86220 +         "STC_PCI_SLAVE_READS",        \
86221 +         "STC_REG0_UNUSED6",           \
86222 +         "STP_REG0_UNUSED7"            \
86223 +}
86224 +
86225 +/* Count reg 1 */
86226 +#define STC_INPUT_WRITE_BLOCKS         (0 << 4)
86227 +#define STP_DMA_DATA_TRANSMITTING      (1 << 4)
86228 +#define STP_THEAD_WAITING_INST         (2 << 4)
86229 +#define STC_REG1_UNUSED3               (3 << 4)
86230 +#define STP_FETCHING_ROUTES            (4 << 4)
86231 +#define STC_REG1_UNUSED5               (5 << 4)
86232 +#define STC_PCI_SLAVE_WRITES           (6 << 4)
86233 +#define STP_PCI_SLAVE_READ_WAITING     (7 << 4)
86234 +
86235 +#define STATS_REG1_NAMES {             \
86236 +      "STC_INPUT_WRITE_BLOCKS",                \
86237 +         "STP_DMA_DATA_TRANSMITTING",  \
86238 +         "STP_THEAD_WAITING_INST",     \
86239 +         "STC_REG1_UNUSED3",           \
86240 +         "STP_FETCHING_ROUTES",        \
86241 +         "STC_REG1_UNUSED5",           \
86242 +         "STC_PCI_SLAVE_WRITES",       \
86243 +         "STP_PCI_SLAVE_READ_WAITING"  \
86244 +}
86245 +
86246 +/* Count reg 2 */
86247 +#define STC_INPUT_PKTS                 (0 << 8)
86248 +#define STP_DMA_WAITING_MEM            (1 << 8)
86249 +#define STP_THREAD_WAIT_OPEN_PKT       (2 << 8)
86250 +#define STC_REG2_UNUSED3               (3 << 8)
86251 +#define STC_ROUTE_FETCHES              (4 << 8)
86252 +#define STC_CACHE_NON_ALLOC_MISSES     (5 << 8)
86253 +#define STC_REG2_UNUSED6               (6 << 8)
86254 +#define STP_PCI_SLAVE_WRITE_WAITING    (7 << 8)
86255 +
86256 +#define STATS_REG2_NAMES {             \
86257 +      "STC_INPUT_PKTS",                        \
86258 +         "STP_DMA_WAITING_MEM",        \
86259 +         "STP_THREAD_WAIT_OPEN_PKT",   \
86260 +         "STC_REG2_UNUSED3",           \
86261 +         "STC_ROUTE_FETCHES",          \
86262 +         "STC_CACHE_NON_ALLOC_MISSES", \
86263 +         "STC_REG2_UNUSED6",           \
86264 +         "STP_PCI_SLAVE_WRITE_WAITING" \
86265 +}
86266 +
86267 +/* Count reg 3 */
86268 +#define STC_INPUT_PKTS_REJECTED                (0 << 12)
86269 +#define STP_DMA_WAIT_NETWORK_BUSY      (1 << 12)
86270 +#define STP_THREAD_WAIT_PACK           (2 << 12)
86271 +#define STP_UCODE_BLOCKED_UCODE                (3 << 12)
86272 +#define STC_TLB_HITS                   (4 << 12)
86273 +#define STC_REG3_UNUSED5               (5 << 12)
86274 +#define STC_PCI_MASTER_READS           (6 << 12)
86275 +#define STP_PCI_MASTER_WRITE_WAITING   (7 << 12)
86276 +
86277 +#define STATS_REG3_NAMES {             \
86278 +      "STC_INPUT_PKTS_REJECTED",       \
86279 +         "STP_DMA_WAIT_NETWORK_BUSY",  \
86280 +         "STP_THREAD_WAIT_PACK",       \
86281 +         "STP_UCODE_BLOCKED_UCODE",    \
86282 +         "STC_TLB_HITS",               \
86283 +         "STC_REG3_UNUSED5",           \
86284 +         "STC_PCI_MASTER_READS",       \
86285 +         "STP_PCI_MASTER_WRITE_WAITING"\
86286 +}
86287 +
86288 +/* Count reg 4 */
86289 +#define STP_INPUT_DATA_TRANSMITTING    (0 << 16)
86290 +#define STC_DMA_NON_CTX0_PKTS          (1 << 16)
86291 +#define STP_THREAD_EOP_WAIT_ACK                (2 << 16)
86292 +#define STP_UCODE_DPROC_RUNNING                (3 << 16)
86293 +#define STC_TLB_MEM_WALKS              (4 << 16)
86294 +#define STC_REG4_UNUSED5               (5 << 16)
86295 +#define STC_PCI_MASTER_WRITES          (6 << 16)
86296 +#define STP_PCI_MASTER_READ_WAITING    (7 << 16)
86297 +
86298 +#define STATS_REG4_NAMES {             \
86299 +      "STP_INPUT_DATA_TRANSMITTING",   \
86300 +         "STC_DMA_NON_CTX0_PKTS",      \
86301 +         "STP_THREAD_EOP_WAIT_ACK",    \
86302 +         "STP_UCODE_DPROC_RUNNING",    \
86303 +         "STC_TLB_MEM_WALKS",          \
86304 +         "STC_REG4_UNUSED5",           \
86305 +         "STC_PCI_MASTER_WRITES",      \
86306 +         "STP_PCI_MASTER_READ_WAITING" \
86307 +}
86308 +
86309 +/* Count reg 5 */
86310 +#define STP_INPUT_WAITING_NETWORK_DATA (0 << 20)
86311 +#define STC_DMA_NON_CTX0_PKTS_REJECTED (1 << 20)
86312 +#define STP_THREAD_WAITING_DATA                (2 << 20)
86313 +#define STP_UCODE_CPROC_RUNNING                (3 << 20)
86314 +#define STP_THREAD_TRANSMITTING_DATA   (4 << 20)
86315 +#define STP_PCI_WAITING_MAIN           (5 << 20)
86316 +#define STC_REG5_UNUSED6               (6 << 20)
86317 +#define STC_REG5_UNUSED7               (7 << 20)
86318 +
86319 +#define STATS_REG5_NAMES {                     \
86320 +      "STP_INPUT_WAITING_NETWORK_DATA",                \
86321 +         "STC_DMA_NON_CTX0_PKTS_REJECTED",     \
86322 +         "STP_THREAD_WAITING_DATA",            \
86323 +         "STP_UCODE_CPROC_RUNNING",            \
86324 +         "STP_THREAD_TRANSMITTING_DATA",       \
86325 +         "STP_PCI_WAITING_MAIN",               \
86326 +         "STC_REG5_UNUSED6",                   \
86327 +         "STC_REG5_UNUSED7"                    \
86328 +}
86329 +
86330 +/* Count reg 6 */
86331 +#define STP_INPUT_WAITING_MEMORY       (0 << 24)
86332 +#define STC_DMA_CTX0_PKTS              (1 << 24)
86333 +#define STP_THREAD_WAITING_MEMORY      (2 << 24)
86334 +#define STP_UCODE_TPROC_RUNNING                (3 << 24)
86335 +#define STC_CACHE_HITS                 (4 << 24)
86336 +#define STP_PCI_WAITING_ELAN           (5 << 24)
86337 +#define STC_REG6_UNUSED4               (6 << 24)
86338 +#define STC_REG6_UNUSED7               (7 << 24)
86339 +
86340 +#define STATS_REG6_NAMES {             \
86341 +      "STP_INPUT_WAITING_MEMORY",      \
86342 +         "STC_DMA_CTX0_PKTS",          \
86343 +         "STP_THREAD_WAITING_MEMORY",  \
86344 +         "STP_UCODE_TPROC_RUNNING",    \
86345 +         "STC_CACHE_HITS",             \
86346 +         "STP_PCI_WAITING_ELAN",       \
86347 +         "STC_REG6_UNUSED4",           \
86348 +         "STC_REG6_UNUSED7"            \
86349 +}
86350 +
86351 +/* Count reg 7 */
86352 +#define STC_INPUT_CTX_FILTER_FILL      (0 << 28)       
86353 +#define STC_DMA_CTX0_PKTS_REJECTED     (1 << 28)
86354 +#define STP_THREAD_WAIT_NETWORK_BUSY   (2 << 28)
86355 +#define STP_UCODE_IPROC_RUNNING                (3 << 28)
86356 +#define STP_TLB_MEM_WALKING            (4 << 28)
86357 +#define STC_CACHE_ALLOC_MISSES         (5 << 28)
86358 +#define STP_PCI_DATA_TRANSFER          (6 << 28)
86359 +#define STC_REG7_UNUSED7               (7 << 28)
86360 +
86361 +#define STATS_REG7_NAMES {             \
86362 +      "STC_INPUT_CTX_FILTER_FILL",     \
86363 +         "STC_DMA_CTX0_PKTS_REJECTED", \
86364 +         "STP_THREAD_WAIT_NETWORK_BUSY",\
86365 +         "STP_UCODE_IPROC_RUNNING",    \
86366 +         "STP_TLB_MEM_WALKING",        \
86367 +         "STC_CACHE_ALLOC_MISSES",     \
86368 +         "STP_PCI_DATA_TRANSFER",      \
86369 +         "STC_REG7_UNUSED7"            \
86370 +}
86371 +
86372 +#define STATS_REG_NAMES { \
86373 +    STATS_REG0_NAMES, \
86374 +    STATS_REG1_NAMES, \
86375 +    STATS_REG2_NAMES, \
86376 +    STATS_REG3_NAMES, \
86377 +    STATS_REG4_NAMES, \
86378 +    STATS_REG5_NAMES, \
86379 +    STATS_REG6_NAMES, \
86380 +    STATS_REG7_NAMES, \
86381 +}
86382 +
86383 +extern const char *elan3_stats_names[8][8];
86384 +
86385 +#define ELAN3_STATS_NAME(COUNT, CONTROL) (elan3_stats_names[(COUNT)][(CONTROL) & 7])
86386 +
86387 +typedef volatile union e3_StatsControl
86388 +{
86389 +   E3_uint32 StatsControl;
86390 +   struct
86391 +   {
86392 +#if defined(__LITTLE_ENDIAN__)
86393 +      E3_uint32 StatCont0:4;
86394 +      E3_uint32 StatCont1:4;
86395 +      E3_uint32 StatCont2:4;
86396 +      E3_uint32 StatCont3:4;
86397 +      E3_uint32 StatCont4:4;
86398 +      E3_uint32 StatCont5:4;
86399 +      E3_uint32 StatCont6:4;
86400 +      E3_uint32 StatCont7:4;
86401 +#else
86402 +      E3_uint32 StatCont7:4;
86403 +      E3_uint32 StatCont6:4;
86404 +      E3_uint32 StatCont5:4;
86405 +      E3_uint32 StatCont4:4;
86406 +      E3_uint32 StatCont3:4;
86407 +      E3_uint32 StatCont2:4;
86408 +      E3_uint32 StatCont1:4;
86409 +      E3_uint32 StatCont0:4;
86410 +#endif
86411 +   } s;
86412 +} E3_StatsControl;
86413 +
86414 +typedef volatile union e3_StatsCount
86415 +{
86416 +   E3_uint64    ClockStat; 
86417 +   struct
86418 +   {
86419 +       E3_uint32 ClockLSW;     /* read only */
86420 +       E3_uint32 StatsCount;
86421 +   } s;
86422 +} E3_StatsCount;
86423 +
86424 +typedef volatile union e3_clock
86425 +{
86426 +   E3_uint64 NanoSecClock;
86427 +   struct
86428 +   {
86429 +      E3_uint32 ClockLSW;
86430 +      E3_uint32 ClockMSW;
86431 +   } s;
86432 +} E3_Clock;
86433 +#define E3_TIME( X ) ((X).NanoSecClock)
86434 +
86435 +typedef volatile struct _E3_User_Regs
86436 +{
86437 +   E3_StatsCount       StatCounts[8];
86438 +   E3_StatsCount       InstCount;
86439 +   E3_uint32           pad0;
86440 +   E3_StatsControl     StatCont;
86441 +   E3_Clock            Clock;
86442 +   E3_uint32           pad1[0x7ea];
86443 +} E3_User_Regs;
86444 +
86445 +typedef volatile struct _E3_CommandPort 
86446 +{
86447 +   E3_Addr             PutDma;         /* 0x000 */
86448 +   E3_uint32           Pad1;
86449 +   E3_Addr             GetDma;         /* 0x008 */
86450 +   E3_uint32           Pad2;
86451 +   E3_Addr             RunThread;      /* 0x010 */
86452 +   E3_uint32           Pad3[3];
86453 +   E3_Addr             WaitEvent0;     /* 0x020 */
86454 +   E3_uint32           Pad4;
86455 +   E3_Addr             WaitEvent1;     /* 0x028 */
86456 +   E3_uint32           Pad5;
86457 +   E3_Addr             SetEvent;       /* 0x030 */
86458 +   E3_uint32           Pad6[3];
86459 +   E3_uint32           Pad7[0x7f0];    /* Fill out to an 8K page */
86460 +} E3_CommandPort;
86461 +/* Should have the new structures for the top four pages of the elan3 space */
86462 +
86463 +#define E3_COMMANDPORT_SIZE    (sizeof (E3_CommandPort))
86464 +
86465 +#ifdef __cplusplus
86466 +}
86467 +#endif
86468 +
86469 +#endif /* __ELAN3_ELANUREGS_H */
86470 +
86471 +/*
86472 + * Local variables:
86473 + * c-file-style: "stroustrup"
86474 + * End:
86475 + */
86476 diff -urN clean/include/elan3/elanvp.h linux-2.6.9/include/elan3/elanvp.h
86477 --- clean/include/elan3/elanvp.h        1969-12-31 19:00:00.000000000 -0500
86478 +++ linux-2.6.9/include/elan3/elanvp.h  2004-06-18 05:28:06.000000000 -0400
86479 @@ -0,0 +1,165 @@
86480 +/*
86481 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86482 + *
86483 + *    For licensing information please see the supplied COPYING file
86484 + *
86485 + */
86486 +
86487 +#ifndef _ELAN3_ELANVP_H
86488 +#define _ELAN3_ELANVP_H
86489 +
86490 +#ident "$Id: elanvp.h,v 1.45 2004/06/18 09:28:06 mike Exp $"
86491 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/elanvp.h,v $ */
86492 +
86493 +#include <elan3/e3types.h>
86494 +#include <elan/bitmap.h>
86495 +#include <elan/capability.h>
86496 +
86497 +#ifdef __cplusplus
86498 +extern "C" {
86499 +#endif
86500 +
86501 +/*
86502 + * Context number allocation.
86503 + * [0-31]      system contexts
86504 + * [32-63]     hardware test
86505 + * [64-1023]   available
86506 + * [1024-2047] RMS allocatable
86507 + * [2048-4095] kernel comms data contexts
86508 + */
86509 +#define ELAN3_KCOMM_CONTEXT_NUM                0x001                   /* old kernel comms context (system) */
86510 +#define ELAN3_CM_CONTEXT_NUM           0x002                   /* new cluster member ship comms context (system) */
86511 +#define ELAN3_MRF_CONTEXT_NUM          0x003                   /* multi-rail kernel comms context */
86512 +#define ELAN3_DMARING_BASE_CONTEXT_NUM 0x010                   /* 16 contexts for dma ring issue (system) */
86513 +#define ELAN3_DMARING_TOP_CONTEXT_NUM  0x01f
86514 +
86515 +#define ELAN3_HWTEST_BASE_CONTEXT_NUM  0x020                   /* reserved for hardware test */
86516 +#define ELAN3_HWTEST_TOP_CONTEXT_NUM   0x03f
86517 +
86518 +#define ELAN3_KCOMM_BASE_CONTEXT_NUM   0x800                   /* kernel comms data transfer contexts */
86519 +#define ELAN3_KCOMM_TOP_CONTEXT_NUM    0xfff
86520 +
86521 +#define ELAN3_HWTEST_CONTEXT(ctx)      ((ctx) >= ELAN3_HWTEST_BASE_CONTEXT_NUM && \
86522 +                                        (ctx) <= ELAN3_HWTEST_TOP_CONTEXT_NUM)    
86523 +
86524 +#define ELAN3_SYSTEM_CONTEXT(ctx)      (((ctx) & SYS_CONTEXT_BIT) != 0 || \
86525 +                                        (ctx) < E3_NUM_CONTEXT_0 || \
86526 +                                        (ctx) >= ELAN3_KCOMM_BASE_CONTEXT_NUM)
86527 +
86528 +/* Maximum number of virtual processes */
86529 +#define ELAN3_MAX_VPS          (16384)
86530 +
86531 +#define ELAN3_INVALID_PROCESS  (0x7fffffff)            /* A GUARANTEED invalid process # */
86532 +#define ELAN3_INVALID_NODE     (0xFFFF)
86533 +#define ELAN3_INVALID_CONTEXT  (0xFFFF)
86534 +
86535 +
86536 +
86537 +#if defined(__KERNEL__) && !defined(__ELAN3__)
86538 +
86539 +/*
86540 + * Contexts are accessible via Elan capabilities,
86541 + * for each context that can be "attached" to there
86542 + * is a ELAN3_CTXT_INFO structure created by its
86543 + * "owner".  This also "remembers" all remote 
86544 + * segments that have "blazed" a trail to it.
86545 + *
86546 + * If the "owner" goes away the soft info is 
86547 + * destroyed when it is no longer "attached" or 
86548 + * "referenced" by a remote segment.
86549 + *
86550 + * If the owner changes the capability, then 
86551 + * the soft info must be not "referenced" or 
86552 + * "attached" before a new process can "attach"
86553 + * to it.
86554 + */
86555 +
86556 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::InfoLock,
86557 +                         elan3_info::Next elan3_info::Prev elan3_info::Device elan3_info::Owner
86558 +                         elan3_info::Capability elan3_info::AttachedCapability elan3_info::Context))
86559 +_NOTE(MUTEX_PROTECTS_DATA(elan3_dev::IntrLock,
86560 +                         elan3_info::Nacking elan3_info::Disabled))
86561 +_NOTE(DATA_READABLE_WITHOUT_LOCK(elan3_info::Context elan3_info::Device elan3_info::Capability))
86562 +
86563 +#endif /* __KERNEL__ */
86564 +
86565 +#define LOW_ROUTE_PRIORITY     0
86566 +#define HIGH_ROUTE_PRIORITY    1
86567 +
86568 +#define DEFAULT_ROUTE_TIMEOUT  3
86569 +#define DEFAULT_ROUTE_PRIORITY LOW_ROUTE_PRIORITY
86570 +
86571 +
86572 +/* a small route is 4 flits (8 bytes), a big route  */
86573 +/* is 8 flits (16 bytes) - each packed route is 4 bits */
86574 +/* so giving us a maximum of 28 as flit0 does not contain */
86575 +/* packed routes */
86576 +#define MAX_FLITS              8
86577 +#define MAX_PACKED             28
86578 +
86579 +/* bit definitions for 64 bit route pointer */
86580 +#define ROUTE_VALID            (1ULL << 63)
86581 +#define ROUTE_PTR              (1ULL << 62)
86582 +#define ROUTE_CTXT_SHIFT       48
86583 +#define ROUTE_PTR_MASK         ((1ull << ROUTE_CTXT_SHIFT)-1)
86584 +#define ROUTE_GET_CTXT          ((VAL >> ROUTE_CTXT_SHIFT) & 0x3fff )
86585 +
86586 +#define SMALL_ROUTE(flits, context)    (((E3_uint64) (flits)[0] <<  0) | ((E3_uint64) (flits)[1] << 16) | \
86587 +                                        ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (context) << ROUTE_CTXT_SHIFT) | \
86588 +                                        ROUTE_VALID)
86589 +
86590 +#define BIG_ROUTE_PTR(paddr, context)  ((E3_uint64) (paddr) | ((E3_uint64) context << ROUTE_CTXT_SHIFT) | ROUTE_VALID | ROUTE_PTR)
86591 +
86592 +#define BIG_ROUTE0(flits)              (((E3_uint64) (flits)[0] <<  0) | ((E3_uint64) (flits)[1] << 16) | \
86593 +                                        ((E3_uint64) (flits)[2] << 32) | ((E3_uint64) (flits)[3] << 48))
86594 +#define BIG_ROUTE1(flits)              (((E3_uint64) (flits)[4] <<  0) | ((E3_uint64) (flits)[5] << 16) | \
86595 +                                        ((E3_uint64) (flits)[6] << 32) | ((E3_uint64) (flits)[7] << 48))
86596 +
86597 +
86598 +/* defines for first flit of a route */
86599 +#define FIRST_HIGH_PRI         (1 << 15)
86600 +#define FIRST_AGE(Val)         ((Val) << 11)
86601 +#define FIRST_TIMEOUT(Val)     ((Val) << 9)
86602 +#define FIRST_PACKED(X)                ((X) << 7)
86603 +#define FIRST_ROUTE(Val)       (Val)
86604 +#define FIRST_ADAPTIVE         (0x30)
86605 +#define FIRST_BCAST_TREE       (0x20)
86606 +#define FIRST_MYLINK           (0x10)
86607 +#define FIRST_BCAST(Top, Bot)  (0x40 | ((Top) << 3) | (Bot))
86608 +
86609 +/* defines for 3 bit packed entries for subsequent flits */
86610 +#define PACKED_ROUTE(Val)      (8 | (Val))
86611 +#define PACKED_ADAPTIVE                (3)
86612 +#define PACKED_BCAST_TREE      (2)
86613 +#define PACKED_MYLINK          (1)
86614 +#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3))
86615 +#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2))
86616 +
86617 +/* ---------------------------------------------------------- 
86618 + * elan3_route functions 
86619 + * return ELAN3_ROUTE_xxx codes
86620 + * ---------------------------------------------------------- */
86621 +
86622 +#define ELAN3_ROUTE_SUCCESS        (0x00)
86623 +#define ELAN3_ROUTE_SYSCALL_FAILED (0x01)
86624 +#define ELAN3_ROUTE_INVALID        (0x02)
86625 +#define ELAN3_ROUTE_TOO_LONG       (0x04)
86626 +#define ELAN3_ROUTE_LOAD_FAILED    (0x08)
86627 +#define ELAN3_ROUTE_PROC_RANGE     (0x0f)
86628 +#define ELAN3_ROUTE_INVALID_LEVEL  (0x10)
86629 +#define ELAN3_ROUTE_OCILATES       (0x20)
86630 +#define ELAN3_ROUTE_WRONG_DEST     (0x40)
86631 +#define ELAN3_ROUTE_TURN_LEVEL     (0x80)
86632 +#define ELAN3_ROUTE_NODEID_UNKNOWN (0xf0)
86633 +
86634 +#ifdef __cplusplus
86635 +}
86636 +#endif
86637 +
86638 +#endif /* _ELAN3_ELANVP_H */
86639 +
86640 +/*
86641 + * Local variables:
86642 + * c-file-style: "stroustrup"
86643 + * End:
86644 + */
86645 diff -urN clean/include/elan3/events.h linux-2.6.9/include/elan3/events.h
86646 --- clean/include/elan3/events.h        1969-12-31 19:00:00.000000000 -0500
86647 +++ linux-2.6.9/include/elan3/events.h  2003-09-24 09:57:24.000000000 -0400
86648 @@ -0,0 +1,183 @@
86649 +/*
86650 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
86651 + *
86652 + *    For licensing information please see the supplied COPYING file
86653 + *
86654 + */
86655 +
86656 +#ifndef _ELAN3_EVENTS_H
86657 +#define _ELAN3_EVENTS_H
86658 +
86659 +#ident "$Id: events.h,v 1.45 2003/09/24 13:57:24 david Exp $"
86660 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/events.h,v $*/
86661 +
86662 +/*
86663 + * Alignments for events, event queues and blockcopy blocks.
86664 + */
86665 +#define E3_EVENT_ALIGN                 (8)
86666 +#define E3_QUEUE_ALIGN         (32)
86667 +#define E3_BLK_ALIGN           (64)
86668 +#define E3_BLK_SIZE            (64)
86669 +#define E3_BLK_PATTERN                 (0xfeedface)
86670 +
86671 +#define E3_EVENT_FREE          ((0 << 4) | EV_WCOPY)
86672 +#define E3_EVENT_PENDING       ((1 << 4) | EV_WCOPY)
86673 +#define E3_EVENT_ACTIVE                ((2 << 4) | EV_WCOPY)
86674 +#define E3_EVENT_FIRED         ((3 << 4) | EV_WCOPY)
86675 +#define E3_EVENT_FAILED                ((4 << 4) | EV_WCOPY)
86676 +#define E3_EVENT_DONE          ((5 << 4) | EV_WCOPY)
86677 +#define E3_EVENT_PRIVATE       ((6 << 4) | EV_WCOPY)
86678 +
86679 +/*
86680 + * Event values and masks
86681 + *
86682 + * Block Copy event    xxxxxxxxxxxxxxxx1
86683 + * Chained event       30 bit ptr ....0x
86684 + * Event interrupt     29 bit cookie 01x
86685 + * Dma event           28 bit ptr   011x
86686 + * thread event                28 bit ptr   111x
86687 + */
86688 +#define EV_CLEAR               (0x00000000)
86689 +#define EV_TYPE_BCOPY          (0x00000001)
86690 +#define EV_TYPE_CHAIN          (0x00000000)
86691 +#define EV_TYPE_EVIRQ          (0x00000002)
86692 +#define EV_TYPE_DMA            (0x00000006)
86693 +#define EV_TYPE_THREAD         (0x0000000e)
86694 +
86695 +#define EV_TYPE_BCOPY_BYTE     (0)
86696 +#define EV_TYPE_BCOPY_HWORD    (1)
86697 +#define EV_TYPE_BCOPY_WORD     (2)
86698 +#define EV_TYPE_BCOPY_DWORD    (3)
86699 +
86700 +/*
86701 + * Data type is in the lowest two bits of the Dest pointer.
86702 + */
86703 +#define EV_BCOPY_DTYPE_MASK    (3)
86704 +#define EV_WCOPY               (1)     /* [DestWord] = Source */
86705 +#define EV_BCOPY               (0)     /* [DestBlock] = [SourceBlock] */
86706 +
86707 +#define EV_TYPE_MASK           (0x0000000e)
86708 +#define EV_TYPE_MASK_BCOPY     (0x00000001)
86709 +#define EV_TYPE_MASK_CHAIN     (0x00000002)
86710 +#define EV_TYPE_MASK_EVIRQ     (0x00000006)
86711 +#define EV_TYPE_MASK_DMA       (0x0000000e)
86712 +#define EV_TYPE_MASK_THREAD    (0x0000000e)
86713 +#define EV_TYPE_MASK2          (0x0000000f)
86714 +
86715 +/*
86716 + * Min/Max size for Elan queue entries 
86717 + */
86718 +#define E3_QUEUE_MIN   E3_BLK_SIZE
86719 +#define E3_QUEUE_MAX   (E3_BLK_SIZE * 5)
86720 +
86721 +/*
86722 + * Elan queue state bits
86723 + */
86724 +#define E3_QUEUE_FULL  (1<<0)
86725 +#define E3_QUEUE_LOCKED        (1<<8)
86726 +
86727 +#ifndef _ASM
86728 +
86729 +typedef union _E3_Event
86730 +{
86731 +   E3_uint64   ev_Int64;
86732 +   struct {
86733 +      volatile E3_int32        u_Count;
86734 +      E3_uint32                u_Type;
86735 +   } ev_u;
86736 +} E3_Event;
86737 +
86738 +typedef union _E3_BlockCopyEvent
86739 +{
86740 +   E3_uint64 ev_ForceAlign;
86741 +   struct E3_BlockCopyEvent_u {
86742 +      volatile E3_int32        u_Count;
86743 +      E3_uint32                u_Type;
86744 +      E3_Addr          u_Source;
86745 +      E3_Addr          u_Dest;   /* lowest bits are the data type for endian conversion */
86746 +   } ev_u;
86747 +} E3_BlockCopyEvent;
86748 +
86749 +#define ev_Type   ev_u.u_Type
86750 +#define ev_Count  ev_u.u_Count
86751 +#define ev_Source ev_u.u_Source
86752 +#define ev_Dest   ev_u.u_Dest
86753 +
86754 +typedef union _E3_WaitEvent0
86755 +{
86756 +   E3_uint64            we_ForceAlign;
86757 +   struct {
86758 +       E3_Addr         u_EventLoc;
86759 +       E3_int32        u_WaitCount;
86760 +   } we_u;
86761 +} E3_WaitEvent0;
86762 +#define we_EventLoc we_u.u_EventLoc
86763 +#define we_WaitCount we_u.u_WaitCount
86764 +
86765 +typedef union _E3_Event_Blk
86766 +{
86767 +    E3_uint8  eb_Bytes[E3_BLK_SIZE];
86768 +    E3_uint32 eb_Int32[E3_BLK_SIZE/sizeof (E3_uint32)];
86769 +    E3_uint64 eb_Int64[E3_BLK_SIZE/sizeof (E3_uint64)];
86770 +} E3_Event_Blk;
86771 +
86772 +/* We make eb_done the last word of the blk
86773 + * so that we can guarantee the rest of the blk is
86774 + * correct when this value is set.
86775 + * However, when the TPORT code copies the envelope
86776 + * info into the blk, it uses a dword endian type.
86777 + * Thus we must correct for this when initialising
86778 + * the pattern in the Elan SDRAM blk (eeb_done)
86779 + */
86780 +#define eb_done eb_Int32[15]
86781 +#define eeb_done eb_Int32[15^WordEndianFlip]
86782 +
86783 +#define EVENT_WORD_READY(WORD) (*((volatile E3_uint32 *) WORD) != 0)
86784 +#define EVENT_BLK_READY(BLK) (((volatile E3_Event_Blk *) (BLK))->eb_done != 0)
86785 +#define EVENT_READY(EVENT)   (((volatile E3_Event *) (EVENT))->ev_Count <= 0)
86786 +
86787 +#define ELAN3_WAIT_EVENT (0)
86788 +#define ELAN3_POLL_EVENT (-1)
86789 +
86790 +#define SETUP_EVENT_TYPE(ptr,typeval) (((unsigned long)(ptr)) | (typeval))
86791 +
86792 +#define E3_RESET_BCOPY_BLOCK(BLK)                                                      \
86793 +       do {                                                                            \
86794 +               (BLK)->eb_done = 0;                                                     \
86795 +       } while (0)
86796 +
86797 +typedef struct e3_queue
86798 +{
86799 +   volatile E3_uint32  q_state;        /* queue is full=bit0, queue is locked=bit8 */
86800 +   volatile E3_Addr    q_bptr;         /* block aligned ptr to current back item */
86801 +   E3_uint32           q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
86802 +   E3_Addr             q_top;          /* block aligned ptr to last queue item */
86803 +   E3_Addr             q_base;         /* block aligned ptr to first queue item */
86804 +   volatile E3_Addr    q_fptr;         /* block aligned ptr to current front item */
86805 +   E3_Event            q_event;        /* queue event */
86806 +} E3_Queue;
86807 +
86808 +typedef struct e3_blockcopy_queue
86809 +{
86810 +   volatile E3_uint32  q_state;        /* queue is full=bit0, queue is locked=bit8 */
86811 +   volatile E3_Addr    q_bptr;         /* block aligned ptr to current back item */
86812 +   E3_uint32           q_size;         /* size of queue item; 0x1 <= size <= (0x40 * 5) */
86813 +   E3_Addr             q_top;          /* block aligned ptr to last queue item */
86814 +   E3_Addr             q_base;         /* block aligned ptr to first queue item */
86815 +   volatile E3_Addr    q_fptr;         /* block aligned ptr to current front item */
86816 +   E3_BlockCopyEvent   q_event;        /* queue event */
86817 +   E3_uint32           q_pad[6];
86818 +} E3_BlockCopyQueue;
86819 +
86820 +#define E3_QUEUE_EVENT_OFFSET  24
86821 +#define QUEUE_FULL(Q)          ((Q)->q_state & E3_QUEUE_FULL)          
86822 +
86823 +#endif /* ! _ASM */
86824 +
86825 +#endif /* _ELAN3_EVENTS_H */
86826 +
86827 +/*
86828 + * Local variables:
86829 + * c-file-style: "stroustrup"
86830 + * End:
86831 + */
86832 diff -urN clean/include/elan3/intrinsics.h linux-2.6.9/include/elan3/intrinsics.h
86833 --- clean/include/elan3/intrinsics.h    1969-12-31 19:00:00.000000000 -0500
86834 +++ linux-2.6.9/include/elan3/intrinsics.h      2003-09-24 09:57:24.000000000 -0400
86835 @@ -0,0 +1,320 @@
86836 +/*
86837 + *    Copyright (c) 2003 by Quadrics Limited.
86838 + * 
86839 + *    For licensing information please see the supplied COPYING file
86840 + *
86841 + */
86842 +
86843 +#ifndef _ELAN3_INTRINSICS_H
86844 +#define _ELAN3_INTRINSICS_H
86845 +
86846 +#ident "$Id: intrinsics.h,v 1.35 2003/09/24 13:57:24 david Exp $"
86847 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/intrinsics.h,v $ */
86848 +
86849 +#include <elan3/e3types.h>
86850 +#include <elan3/events.h>
86851 +
86852 +/* 
86853 + * This file contains definitions of the macros for accessing the QSW
86854 + * specific instructions, as if they were functions.
86855 + * The results from the function 
86856 + */
86857 +
86858 +#define C_ACK_OK       0                       /* return from c_close() */
86859 +#define C_ACK_TESTFAIL 1                       /* return from c_close() */
86860 +#define C_ACK_DISCARD  2                       /* return from c_close() */
86861 +#define C_ACK_ERROR    3                       /* return from c_close() */
86862 +
86863 +/*
86864 + * Elan asi's for tproc block accesses
86865 + */
86866 +#define EASI_BYTE      0
86867 +#define EASI_HALF      1
86868 +#define EASI_WORD      2
86869 +#define EASI_DOUBLE    3
86870 +
86871 +#if defined(__ELAN3__) && !defined (_ASM)
86872 +
86873 +extern inline void c_abort(void) 
86874 +{
86875 +    asm volatile (".word 0x0000                ! die you thread you " : : );
86876 +}
86877 +
86878 +extern inline void c_suspend(void) 
86879 +{
86880 +    asm volatile (
86881 +       "set 1f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
86882 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
86883 +       "bne 1f                         ! RevB bug fix. jump to other alignment\n"
86884 +       "nop                            ! RevB bug fix. delay slot\n"
86885 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
86886 +       "suspend                        ! do the real suspend\n"
86887 +       "1: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
86888 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
86889 +       "suspend                        ! do the real suspend\n" : : );
86890 +}
86891 +
86892 +extern inline int c_close(void) 
86893 +{
86894 +    register int rc asm("o0");
86895 +
86896 +    asm volatile ("close %0" : "=r" (rc) : );
86897 +
86898 +    return (rc);
86899 +}
86900 +
86901 +extern inline int c_close_cookie(volatile E3_uint32 *cookiep, E3_uint32 next)
86902 +{
86903 +    register int rc asm("o0");
86904 +
86905 +    asm volatile ("close       %0              ! close the packet\n"
86906 +                 "bz,a         1f              ! ack received\n"
86907 +                 "st           %1, [%2]        ! update cookie on ack\n"
86908 +                 "1:                           ! label for not-ack\n"
86909 +                 : "=r" (rc) : "r" (next), "r" (cookiep));
86910 +
86911 +    return (rc);
86912 +}
86913 +
86914 +extern inline void c_break_busywait(void)
86915 +{
86916 +    asm volatile (
86917 +       "breaktest                      ! test to see if break necessary\n"
86918 +       "bpos 1f                        ! no other thread ready\n"
86919 +       "nop                            ! delay slot\n"
86920 +       "sub     %%sp,3*8*4,%%sp        ! Space to save the registers\n"
86921 +       "stblock %%g0,[%%sp+0]          ! save the globals\n"
86922 +       "stblock %%i0,[%%sp+8*4]        ! save the ins\n"
86923 +       "stblock %%l0,[%%sp+16*4]       ! save the locals\n"
86924 +       "set 2f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
86925 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
86926 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
86927 +       "nop                            ! RevB bug fix. delay slot\n"
86928 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
86929 +       "break                          ! do the real break\n"
86930 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
86931 +       " ldblock [%%sp+16*4],%%l0      ! RevB bug fix. restore locals in delay slot\n"
86932 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
86933 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
86934 +       "break                          ! do the real break\n"
86935 +       "ldblock [%%sp+16*4],%%l0       ! restore locals\n"
86936 +       "4: ldblock [%%sp+8*4], %%i0    ! restore ins\n"
86937 +       "ldblock [%%sp+0],%%g0          ! restore globals\n"
86938 +       "add     %%sp,3*8*4,%%sp        ! restore stack pointer\n"
86939 +       "1: " : : );
86940 +}
86941 +
86942 +extern inline void c_break(void)
86943 +{
86944 +    asm volatile (
86945 +       "breaktest                      ! test to see if break necessary\n"
86946 +       "bne 1f                         ! haven't exceeded our inst count yet\n"
86947 +       "nop                            ! delay slot\n"
86948 +       "sub     %%sp,3*8*4,%%sp        ! Space to save the registers\n"
86949 +       "stblock %%g0,[%%sp+0]          ! save the globals\n"
86950 +       "stblock %%i0,[%%sp+8*4]        ! save the ins\n"
86951 +       "stblock %%l0,[%%sp+16*4]       ! save the locals\n"
86952 +       "set 2f, %%i7                   ! RevB bug fix. get address of the wakeup inst\n"
86953 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
86954 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
86955 +       "nop                            ! RevB bug fix. delay slot\n"
86956 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
86957 +       "break                          ! do the real break\n"
86958 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
86959 +       " ldblock [%%sp+16*4],%%l0      ! RevB bug fix. restore locals in delay slot\n"
86960 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
86961 +       "ldd [%%i7],%%i6                ! RevB bug fix. data fetch of instructions\n"
86962 +       "break                          ! do the real break\n"
86963 +       "ldblock [%%sp+16*4],%%l0       ! restore locals\n"
86964 +       "4: ldblock [%%sp+8*4], %%i0    ! restore ins\n"
86965 +       "ldblock [%%sp+0],%%g0          ! restore globals\n"
86966 +       "add     %%sp,3*8*4,%%sp        ! restore stack pointer\n"
86967 +       "1: " : : );
86968 +}
86969 +
86970 +extern inline void c_open( const int arg ) 
86971 +{
86972 +    asm volatile ("open %0" : : "r" (arg) );
86973 +    asm volatile ("nop; nop; nop; nop");
86974 +    asm volatile ("nop; nop; nop; nop");
86975 +    asm volatile ("nop; nop; nop; nop");
86976 +    asm volatile ("nop; nop; nop; nop");
86977 +    asm volatile ("nop; nop; nop; nop");
86978 +    asm volatile ("nop; nop; nop; nop");
86979 +}
86980 +
86981 +extern inline void c_waitevent( volatile E3_Event *const ptr,
86982 +                               const int count) 
86983 +{
86984 +    register volatile E3_Event *a_unlikely asm("o0") = ptr;
86985 +    register int a_very_unlikely asm("o1") = count;
86986 +
86987 +    asm volatile (
86988 +        "sub     %%sp,1*8*4,%%sp       ! Space to save the registers\n"
86989 +        "stblock %%i0,[%%sp+0]         ! save the ins\n"
86990 +       "set    2f, %%i7                ! RevB bug fix. get address of the wakeup inst\n"
86991 +       "andcc %%i7,0x4,%%g0            ! RevB bug fix. check alignment\n"
86992 +       "bne 3f                         ! RevB bug fix. jump to other alignment\n"
86993 +       "nop                            ! RevB bug fix. delay slot\n"
86994 +       "ldd [%%i7],%%i4                ! RevB bug fix. data fetch of instructions\n"
86995 +        "waitevent                     ! do the business\n"
86996 +       "2: b 4f                        ! RevB bug fix. Branch over other alignment case\n"
86997 +        "  ldblock [%%sp+0],%%i0       ! RevB bug fix. restore ins in delay slot\n"
86998 +       "3: add %%i7,5*4,%%i7           ! RevB bug fix. Point i7 to first ldblock\n"
86999 +       "ldd [%%i7],%%i4                ! RevB bug fix. data fetch of instructions\n"
87000 +        "waitevent                     ! do the business\n"
87001 +        "ldblock [%%sp+0],%%i0         ! restore ins\n"
87002 +        "4: add     %%sp,1*8*4,%%sp    ! restore stack pointer\n"
87003 +        : /* no outputs */
87004 +        : /* inputs */ "r" (a_unlikely), "r" (a_very_unlikely)
87005 +        : /* clobbered */ "g0", "g1", "g2", "g3", "g4", "g5", "g6", "g7",
87006 +                         "l0", "l1", "l2", "l3", "l4", "l5", "l6", "l7" );
87007 +
87008 +}
87009 +
87010 +#define c_sendtrans0(type,dest)                        \
87011 +       asm volatile ("sendtrans %0, %%g0, %1" : : "i" (type), "r" (dest))
87012 +
87013 +#define c_sendtrans1(type,dest,arg)            \
87014 +       asm volatile ("sendtrans %0, %2, %1" : : "i" (type), "r" (dest), "r" (arg))
87015 +
87016 +#define c_sendtrans2(type,dest,arg1,arg2)      \
87017 +       do {                                    \
87018 +            register const unsigned long a_unlikely_1 asm("o4") = arg1;                        \
87019 +            register const unsigned long a_unlikely_2 asm("o5") = arg2;                        \
87020 +            asm volatile ("sendtrans %0, %2, %1"                                       \
87021 +                : : "i" (type), "r" (dest), "r" (a_unlikely_1), "r" (a_unlikely_2));   \
87022 +       } while(0)
87023 +
87024 +#define c_sendmem(type,dest,ptr)               \
87025 +       asm volatile ("sendtrans %0, [%2], %1" : : "i" (type), "r" (dest), "r" (ptr))
87026 +
87027 +/* Copy a single 64-byte block (src blk is read using a BYTE endian type) */
87028 +extern inline void elan3_copy64b(void *src, void *dst)
87029 +{
87030 +    /* Copy 64 bytes using ldblock/stblock
87031 +     * We save and restore the locals/ins because if we don't gcc
87032 +     * really makes a bad job of optimisising the rest of the thread code!
87033 +     *
87034 +     * We force the parameters in g5, g6 so that they aren't
87035 +     * trashed by the loadblk32 into the locals/ins
87036 +     */
87037 +    register void *tmp1 asm("g5") = src;
87038 +    register void *tmp2 asm("g6") = dst;
87039 +
87040 +    asm volatile (
87041 +       "and     %%sp,63,%%g7           ! Calculate stack alignment\n"
87042 +       "sub     %%sp,2*8*4,%%sp        ! Space to save the registers\n"
87043 +       "sub     %%sp,%%g7,%%sp         ! align stack\n" 
87044 +       "stblock64 %%l0,[%%sp]          ! save the locals and ins\n"
87045 +       "ldblock64a [%0]%2,%%l0         ! load 64-byte block into locals/ins\n"
87046 +       "stblock64a %%l0,[%1]%2         ! store 64-byte block from local/ins\n"
87047 +       "ldblock64 [%%sp],%%l0          ! restore locals and ins\n"
87048 +       "add     %%sp,%%g7, %%sp        ! undo alignment\n"
87049 +       "add     %%sp,2*8*4,%%sp        ! restore stack pointer\n"
87050 +       : /* outputs */
87051 +       : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_BYTE)
87052 +       : /* clobbered */ "g5", "g6", "g7" );
87053 +}
87054 +
87055 +/* Copy a single 64-byte block (src blk is read using a WORD endian type) */
87056 +extern inline void elan3_copy64w(void *src, void *dst)
87057 +{
87058 +    /* Copy 64 bytes using ldblock/stblock
87059 +     * We save and restore the locals/ins because if we don't gcc
87060 +     * really makes a bad job of optimisising the rest of the thread code!
87061 +     *
87062 +     * We force the parameters in g5, g6 so that they aren't
87063 +     * trashed by the loadblk32 into the locals/ins
87064 +     */
87065 +    register void *tmp1 asm("g5") = src;
87066 +    register void *tmp2 asm("g6") = dst;
87067 +
87068 +    asm volatile (
87069 +       "and     %%sp,63,%%g7           ! Calculate stack alignment\n"
87070 +       "sub     %%sp,2*8*4,%%sp        ! Space to save the registers\n"
87071 +       "sub     %%sp,%%g7,%%sp         ! align stack\n" 
87072 +       "stblock64 %%l0,[%%sp]          ! save the locals and ins\n"
87073 +       "ldblock64a [%0]%2,%%l0         ! load 64-byte block into locals/ins\n"
87074 +       "stblock64a %%l0,[%1]%2         ! store 64-byte block from local/ins\n"
87075 +       "ldblock64 [%%sp],%%l0          ! restore locals and ins\n"
87076 +       "add     %%sp,%%g7, %%sp        ! undo alignment\n"
87077 +       "add     %%sp,2*8*4,%%sp        ! restore stack pointer\n"
87078 +       : /* outputs */
87079 +       : /* inputs */ "r" (tmp1), "r" (tmp2), "n" (EASI_WORD)
87080 +       : /* clobbered */ "g5", "g6", "g7" );
87081 +}
87082 +
87083 +/* Read a 64-bit value with a WORD (32-bit) endian type */
87084 +extern inline E3_uint64 elan3_read64w( volatile E3_uint64 *const ptr )
87085 +{
87086 +    E3_uint64 result;
87087 +
87088 +    asm volatile (
87089 +       "ldblock8a [%1]%2, %0\n"
87090 +       : /* outputs */ "=r" (result)
87091 +       : /* inputs */ "r" (ptr), "n" (EASI_WORD) );
87092 +
87093 +    return( result );
87094 +}
87095 +
87096 +/* Read a 64-bit value with a DOUBLEWORD (64-bit) endian type */
87097 +extern inline E3_uint64 elan3_read64dw( volatile E3_uint64 *const ptr )
87098 +{
87099 +    E3_uint64 result;
87100 +
87101 +    asm volatile (
87102 +       "ldblock8a [%1]%2, %0\n"
87103 +       : /* outputs */ "=r" (result)
87104 +       : /* inputs */ "r" (ptr), "n" (EASI_DOUBLE) );
87105 +
87106 +    return( result );
87107 +}
87108 +
87109 +/* Write a 32-bit value with a WORD (32-bit) endian type */
87110 +extern inline void elan3_write64w( volatile E3_uint64 *const ptr, E3_uint64 value )
87111 +{
87112 +    asm volatile (
87113 +       "stblock8a %1, [%0]%2\n"
87114 +       : /* no outputs */
87115 +       : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_WORD) );
87116 +}
87117 +
87118 +/* Write a 64-bit value with a DOUBLEWORD (64-bit) endian type */
87119 +extern inline void elan3_write64dw( volatile E3_uint64 *const ptr, E3_uint64 value )
87120 +{
87121 +    asm volatile (
87122 +       "stblock8a %1, [%0]%2\n"
87123 +       : /* no outputs */
87124 +       : /* inputs */ "r" (ptr), "r" (value), "n" (EASI_DOUBLE) );
87125 +}
87126 +
87127 +extern inline E3_uint32 c_swap(volatile E3_uint32 *source, E3_uint32 result)
87128 +{
87129 +   asm volatile("swap [%1],%0\n"
87130 +               : "=r" (result)
87131 +               : "r" (source) ,"0" (result)
87132 +               : "memory");
87133 +   return result;
87134 +}
87135 +
87136 +extern inline E3_uint32 c_swap_save(volatile E3_uint32 *source, const E3_uint32 result)
87137 +{
87138 +   register E3_uint32 a_unlikely;
87139 +   asm volatile("" : "=r" (a_unlikely) : );
87140 +
87141 +   asm volatile("mov %2,%0; swap [%1],%0\n"
87142 +               : "=r" (a_unlikely)
87143 +               : "r" (source) ,"r" (result), "0" (a_unlikely)
87144 +               : "memory");
87145 +   return a_unlikely;
87146 +}
87147 +#endif /* (__ELAN3__) && !(_ASM) */
87148 +
87149 +#endif /* _ELAN3_INTRINSICS_H */
87150 +
87151 +/*
87152 + * Local variables:
87153 + * c-file-style: "stroustrup"
87154 + * End:
87155 + */
87156 diff -urN clean/include/elan3/minames.h linux-2.6.9/include/elan3/minames.h
87157 --- clean/include/elan3/minames.h       1969-12-31 19:00:00.000000000 -0500
87158 +++ linux-2.6.9/include/elan3/minames.h 2005-09-07 10:39:37.000000000 -0400
87159 @@ -0,0 +1,256 @@
87160 +{MI_WaitForRemoteDescRead,     "MI_WaitForRemoteDescRead"},
87161 +{MI_WaitForRemoteDescRead2,    "MI_WaitForRemoteDescRead2"},
87162 +{MI_WaitForRemoteDescRead2_seq1,       "MI_WaitForRemoteDescRead2_seq1"},
87163 +{MI_SendRemoteDmaRoutes,       "MI_SendRemoteDmaRoutes"},
87164 +{MI_IProcTrapped,      "MI_IProcTrapped"},
87165 +{MI_DProcTrapped,      "MI_DProcTrapped"},
87166 +{MI_CProcTrapped,      "MI_CProcTrapped"},
87167 +{MI_TProcTrapped,      "MI_TProcTrapped"},
87168 +{MI_TestWhichDmaQueue, "MI_TestWhichDmaQueue"},
87169 +{MI_TestWhichDmaQueue_seq1,    "MI_TestWhichDmaQueue_seq1"},
87170 +{MI_InputRemoteDmaUpdateBPtr,  "MI_InputRemoteDmaUpdateBPtr"},
87171 +{MI_FixupQueueContextAndRemoteBit,     "MI_FixupQueueContextAndRemoteBit"},
87172 +{MI_FixupQueueContextAndRemoteBit_seq1,        "MI_FixupQueueContextAndRemoteBit_seq1"},
87173 +{MI_FixupQueueContextAndRemoteBit_seq2,        "MI_FixupQueueContextAndRemoteBit_seq2"},
87174 +{MI_FixupQueueContextAndRemoteBit_seq3,        "MI_FixupQueueContextAndRemoteBit_seq3"},
87175 +{MI_FixupQueueContextAndRemoteBit_seq4,        "MI_FixupQueueContextAndRemoteBit_seq4"},
87176 +{MI_RunDmaCommand,     "MI_RunDmaCommand"},
87177 +{MI_DoSendRemoteDmaDesc,       "MI_DoSendRemoteDmaDesc"},
87178 +{MI_DequeueNonSysCntxDma,      "MI_DequeueNonSysCntxDma"},
87179 +{MI_WaitForRemoteDescRead1,    "MI_WaitForRemoteDescRead1"},
87180 +{MI_RemoteDmaCommand,  "MI_RemoteDmaCommand"},
87181 +{MI_WaitForRemoteRoutes,       "MI_WaitForRemoteRoutes"},
87182 +{MI_DequeueSysCntxDma, "MI_DequeueSysCntxDma"},
87183 +{MI_ExecuteDmaDescriptorForQueue,      "MI_ExecuteDmaDescriptorForQueue"},
87184 +{MI_ExecuteDmaDescriptor1,     "MI_ExecuteDmaDescriptor1"},
87185 +{MI_ExecuteDmaDescriptor1_seq1,        "MI_ExecuteDmaDescriptor1_seq1"},
87186 +{MI_ExecuteDmaDescriptor1_seq2,        "MI_ExecuteDmaDescriptor1_seq2"},
87187 +{MI_ExecuteDmaDescriptor1_seq3,        "MI_ExecuteDmaDescriptor1_seq3"},
87188 +{MI_GetNewSizeInProg,  "MI_GetNewSizeInProg"},
87189 +{MI_GetNewSizeInProg_seq1,     "MI_GetNewSizeInProg_seq1"},
87190 +{MI_FirstBlockRead,    "MI_FirstBlockRead"},
87191 +{MI_ExtraFirstBlockRead,       "MI_ExtraFirstBlockRead"},
87192 +{MI_UnimplementedError,        "MI_UnimplementedError"},
87193 +{MI_UpdateDescriptor,  "MI_UpdateDescriptor"},
87194 +{MI_UpdateDescriptor_seq1,     "MI_UpdateDescriptor_seq1"},
87195 +{MI_UpdateDescriptor_seq2,     "MI_UpdateDescriptor_seq2"},
87196 +{MI_UpdateDescriptor_seq3,     "MI_UpdateDescriptor_seq3"},
87197 +{MI_UpdateDescriptor_seq4,     "MI_UpdateDescriptor_seq4"},
87198 +{MI_UpdateDescriptor_seq5,     "MI_UpdateDescriptor_seq5"},
87199 +{MI_GetNextSizeInProg, "MI_GetNextSizeInProg"},
87200 +{MI_DoStopThisDma,     "MI_DoStopThisDma"},
87201 +{MI_DoStopThisDma_seq1,        "MI_DoStopThisDma_seq1"},
87202 +{MI_GenNewBytesToRead, "MI_GenNewBytesToRead"},
87203 +{MI_WaitForEventReadTy1,       "MI_WaitForEventReadTy1"},
87204 +{MI_WaitUpdateEvent,   "MI_WaitUpdateEvent"},
87205 +{MI_WaitUpdateEvent_seq1,      "MI_WaitUpdateEvent_seq1"},
87206 +{MI_DoSleepOneTickThenRunable, "MI_DoSleepOneTickThenRunable"},
87207 +{MI_RunEvent,  "MI_RunEvent"},
87208 +{MI_EnqueueThread,     "MI_EnqueueThread"},
87209 +{MI_CheckContext0,     "MI_CheckContext0"},
87210 +{MI_EnqueueDma,        "MI_EnqueueDma"},
87211 +{MI_CprocTrapping,     "MI_CprocTrapping"},
87212 +{MI_CprocTrapping_seq1,        "MI_CprocTrapping_seq1"},
87213 +{MI_WaitForRemoteRoutes1,      "MI_WaitForRemoteRoutes1"},
87214 +{MI_SetEventCommand,   "MI_SetEventCommand"},
87215 +{MI_DoSetEvent,        "MI_DoSetEvent"},
87216 +{MI_DoRemoteSetEventNowOrTrapQueueingDma,      "MI_DoRemoteSetEventNowOrTrapQueueingDma"},
87217 +{MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1, "MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1"},
87218 +{MI_SendRemoteDmaRoutes2,      "MI_SendRemoteDmaRoutes2"},
87219 +{MI_WaitForRemoteRoutes2,      "MI_WaitForRemoteRoutes2"},
87220 +{MI_WaitEventCommandTy0,       "MI_WaitEventCommandTy0"},
87221 +{MI_DequeueNonSysCntxDma2,     "MI_DequeueNonSysCntxDma2"},
87222 +{MI_WaitEventCommandTy1,       "MI_WaitEventCommandTy1"},
87223 +{MI_WaitEventCommandTy1_seq1,  "MI_WaitEventCommandTy1_seq1"},
87224 +{MI_DequeueNonSysCntxThread,   "MI_DequeueNonSysCntxThread"},
87225 +{MI_DequeueSysCntxDma1,        "MI_DequeueSysCntxDma1"},
87226 +{MI_DequeueSysCntxThread,      "MI_DequeueSysCntxThread"},
87227 +{MI_TestNonSysCntxDmaQueueEmpty,       "MI_TestNonSysCntxDmaQueueEmpty"},
87228 +{MI_TestNonSysCntxDmaQueueEmpty_seq1,  "MI_TestNonSysCntxDmaQueueEmpty_seq1"},
87229 +{MI_TestNonSysCntxDmaQueueEmpty_seq2,  "MI_TestNonSysCntxDmaQueueEmpty_seq2"},
87230 +{MI_RunThreadCommand,  "MI_RunThreadCommand"},
87231 +{MI_SetEventWaitForLastAcess,  "MI_SetEventWaitForLastAcess"},
87232 +{MI_SetEventReadWait,  "MI_SetEventReadWait"},
87233 +{MI_SetEventReadWait_seq1,     "MI_SetEventReadWait_seq1"},
87234 +{MI_TestEventType,     "MI_TestEventType"},
87235 +{MI_TestEventType_seq1,        "MI_TestEventType_seq1"},
87236 +{MI_TestEventBit2,     "MI_TestEventBit2"},
87237 +{MI_DmaDescOrBlockCopyOrChainedEvent,  "MI_DmaDescOrBlockCopyOrChainedEvent"},
87238 +{MI_RunThread, "MI_RunThread"},
87239 +{MI_RunThread1,        "MI_RunThread1"},
87240 +{MI_RunThread1_seq1,   "MI_RunThread1_seq1"},
87241 +{MI_IncDmaSysCntxBPtr, "MI_IncDmaSysCntxBPtr"},
87242 +{MI_IncDmaSysCntxBPtr_seq1,    "MI_IncDmaSysCntxBPtr_seq1"},
87243 +{MI_IncDmaSysCntxBPtr_seq2,    "MI_IncDmaSysCntxBPtr_seq2"},
87244 +{MI_WaitForCntxDmaDescRead,    "MI_WaitForCntxDmaDescRead"},
87245 +{MI_FillInContext,     "MI_FillInContext"},
87246 +{MI_FillInContext_seq1,        "MI_FillInContext_seq1"},
87247 +{MI_WriteNewDescToQueue,       "MI_WriteNewDescToQueue"},
87248 +{MI_WriteNewDescToQueue_seq1,  "MI_WriteNewDescToQueue_seq1"},
87249 +{MI_TestForQueueWrap,  "MI_TestForQueueWrap"},
87250 +{MI_TestForQueueWrap_seq1,     "MI_TestForQueueWrap_seq1"},
87251 +{MI_TestQueueIsFull,   "MI_TestQueueIsFull"},
87252 +{MI_TestQueueIsFull_seq1,      "MI_TestQueueIsFull_seq1"},
87253 +{MI_TestQueueIsFull_seq2,      "MI_TestQueueIsFull_seq2"},
87254 +{MI_CheckPsychoShitFixup,      "MI_CheckPsychoShitFixup"},
87255 +{MI_PsychoShitFixupForcedRead, "MI_PsychoShitFixupForcedRead"},
87256 +{MI_PrepareDMATimeSlice,       "MI_PrepareDMATimeSlice"},
87257 +{MI_PrepareDMATimeSlice_seq1,  "MI_PrepareDMATimeSlice_seq1"},
87258 +{MI_TProcRestartFromTrapOrTestEventBit2,       "MI_TProcRestartFromTrapOrTestEventBit2"},
87259 +{MI_TProcRestartFromTrapOrTestEventBit2_seq1,  "MI_TProcRestartFromTrapOrTestEventBit2_seq1"},
87260 +{MI_WaitForGlobalsRead,        "MI_WaitForGlobalsRead"},
87261 +{MI_WaitForNPCRead,    "MI_WaitForNPCRead"},
87262 +{MI_EventInterrupt,    "MI_EventInterrupt"},
87263 +{MI_EventInterrupt_seq1,       "MI_EventInterrupt_seq1"},
87264 +{MI_EventInterrupt_seq2,       "MI_EventInterrupt_seq2"},
87265 +{MI_EventInterrupt_seq3,       "MI_EventInterrupt_seq3"},
87266 +{MI_TestSysCntxDmaQueueEmpty,  "MI_TestSysCntxDmaQueueEmpty"},
87267 +{MI_TestSysCntxDmaQueueEmpty_seq1,     "MI_TestSysCntxDmaQueueEmpty_seq1"},
87268 +{MI_TestIfRemoteDesc,  "MI_TestIfRemoteDesc"},
87269 +{MI_DoDmaLocalSetEvent,        "MI_DoDmaLocalSetEvent"},
87270 +{MI_DoDmaLocalSetEvent_seq1,   "MI_DoDmaLocalSetEvent_seq1"},
87271 +{MI_DoDmaLocalSetEvent_seq2,   "MI_DoDmaLocalSetEvent_seq2"},
87272 +{MI_DmaLoop1,  "MI_DmaLoop1"},
87273 +{MI_ExitDmaLoop,       "MI_ExitDmaLoop"},
87274 +{MI_ExitDmaLoop_seq1,  "MI_ExitDmaLoop_seq1"},
87275 +{MI_RemoteDmaTestPAckType,     "MI_RemoteDmaTestPAckType"},
87276 +{MI_PacketDiscardOrTestFailRecIfCCis0, "MI_PacketDiscardOrTestFailRecIfCCis0"},
87277 +{MI_PacketDiscardOrTestFailRecIfCCis0_seq1,    "MI_PacketDiscardOrTestFailRecIfCCis0_seq1"},
87278 +{MI_TestNackFailIsZero2,       "MI_TestNackFailIsZero2"},
87279 +{MI_TestNackFailIsZero3,       "MI_TestNackFailIsZero3"},
87280 +{MI_DmaFailCountError, "MI_DmaFailCountError"},
87281 +{MI_TestDmaForSysCntx, "MI_TestDmaForSysCntx"},
87282 +{MI_TestDmaForSysCntx_seq1,    "MI_TestDmaForSysCntx_seq1"},
87283 +{MI_TestDmaForSysCntx_seq2,    "MI_TestDmaForSysCntx_seq2"},
87284 +{MI_TestAeqB2, "MI_TestAeqB2"},
87285 +{MI_TestAeqB2_seq1,    "MI_TestAeqB2_seq1"},
87286 +{MI_GetNextDmaDescriptor,      "MI_GetNextDmaDescriptor"},
87287 +{MI_DequeueSysCntxDma2,        "MI_DequeueSysCntxDma2"},
87288 +{MI_InputSetEvent,     "MI_InputSetEvent"},
87289 +{MI_PutBackSysCntxDma, "MI_PutBackSysCntxDma"},
87290 +{MI_PutBackSysCntxDma_seq1,    "MI_PutBackSysCntxDma_seq1"},
87291 +{MI_PutBackSysCntxDma_seq2,    "MI_PutBackSysCntxDma_seq2"},
87292 +{MI_InputRemoteDma,    "MI_InputRemoteDma"},
87293 +{MI_InputRemoteDma_seq1,       "MI_InputRemoteDma_seq1"},
87294 +{MI_WaitOneTickForWakeup1,     "MI_WaitOneTickForWakeup1"},
87295 +{MI_SendRemoteDmaDesc, "MI_SendRemoteDmaDesc"},
87296 +{MI_InputLockQueue,    "MI_InputLockQueue"},
87297 +{MI_CloseTheTrappedPacketIfCCis1,      "MI_CloseTheTrappedPacketIfCCis1"},
87298 +{MI_CloseTheTrappedPacketIfCCis1_seq1, "MI_CloseTheTrappedPacketIfCCis1_seq1"},
87299 +{MI_PostDmaInterrupt,  "MI_PostDmaInterrupt"},
87300 +{MI_InputUnLockQueue,  "MI_InputUnLockQueue"},
87301 +{MI_WaitForUnLockDescRead,     "MI_WaitForUnLockDescRead"},
87302 +{MI_SendEOPforRemoteDma,       "MI_SendEOPforRemoteDma"},
87303 +{MI_LookAtRemoteAck,   "MI_LookAtRemoteAck"},
87304 +{MI_InputWriteBlockQueue,      "MI_InputWriteBlockQueue"},
87305 +{MI_WaitForSpStore,    "MI_WaitForSpStore"},
87306 +{MI_TProcNext, "MI_TProcNext"},
87307 +{MI_TProcStoppedRunning,       "MI_TProcStoppedRunning"},
87308 +{MI_InputWriteBlock,   "MI_InputWriteBlock"},
87309 +{MI_RunDmaOrDeqNonSysCntxDma,  "MI_RunDmaOrDeqNonSysCntxDma"},
87310 +{MI_ExecuteDmaDescriptorForRun,        "MI_ExecuteDmaDescriptorForRun"},
87311 +{MI_ConfirmQueueLock,  "MI_ConfirmQueueLock"},
87312 +{MI_DmaInputIdentify,  "MI_DmaInputIdentify"},
87313 +{MI_TProcStoppedRunning2,      "MI_TProcStoppedRunning2"},
87314 +{MI_TProcStoppedRunning2_seq1, "MI_TProcStoppedRunning2_seq1"},
87315 +{MI_TProcStoppedRunning2_seq2, "MI_TProcStoppedRunning2_seq2"},
87316 +{MI_ThreadInputIdentify,       "MI_ThreadInputIdentify"},
87317 +{MI_InputIdWriteAddrAndType3,  "MI_InputIdWriteAddrAndType3"},
87318 +{MI_IProcTrappedWriteStatus,   "MI_IProcTrappedWriteStatus"},
87319 +{MI_FinishTrappingEop, "MI_FinishTrappingEop"},
87320 +{MI_InputTestTrans,    "MI_InputTestTrans"},
87321 +{MI_TestAeqB3, "MI_TestAeqB3"},
87322 +{MI_ThreadUpdateNonSysCntxBack,        "MI_ThreadUpdateNonSysCntxBack"},
87323 +{MI_ThreadQueueOverflow,       "MI_ThreadQueueOverflow"},
87324 +{MI_RunContext0Thread, "MI_RunContext0Thread"},
87325 +{MI_RunContext0Thread_seq1,    "MI_RunContext0Thread_seq1"},
87326 +{MI_RunContext0Thread_seq2,    "MI_RunContext0Thread_seq2"},
87327 +{MI_RunDmaDesc,        "MI_RunDmaDesc"},
87328 +{MI_RunDmaDesc_seq1,   "MI_RunDmaDesc_seq1"},
87329 +{MI_RunDmaDesc_seq2,   "MI_RunDmaDesc_seq2"},
87330 +{MI_TestAeqB,  "MI_TestAeqB"},
87331 +{MI_WaitForNonCntxDmaDescRead, "MI_WaitForNonCntxDmaDescRead"},
87332 +{MI_DmaQueueOverflow,  "MI_DmaQueueOverflow"},
87333 +{MI_BlockCopyEvent,    "MI_BlockCopyEvent"},
87334 +{MI_BlockCopyEventReadBlock,   "MI_BlockCopyEventReadBlock"},
87335 +{MI_BlockCopyWaitForReadData,  "MI_BlockCopyWaitForReadData"},
87336 +{MI_InputWriteWord,    "MI_InputWriteWord"},
87337 +{MI_TraceSetEvents,    "MI_TraceSetEvents"},
87338 +{MI_TraceSetEvents_seq1,       "MI_TraceSetEvents_seq1"},
87339 +{MI_TraceSetEvents_seq2,       "MI_TraceSetEvents_seq2"},
87340 +{MI_InputWriteDoubleWd,        "MI_InputWriteDoubleWd"},
87341 +{MI_SendLockTransIfCCis1,      "MI_SendLockTransIfCCis1"},
87342 +{MI_WaitForDmaRoutes1, "MI_WaitForDmaRoutes1"},
87343 +{MI_LoadDmaContext,    "MI_LoadDmaContext"},
87344 +{MI_InputTestAndSetWord,       "MI_InputTestAndSetWord"},
87345 +{MI_InputTestAndSetWord_seq1,  "MI_InputTestAndSetWord_seq1"},
87346 +{MI_GetDestEventValue, "MI_GetDestEventValue"},
87347 +{MI_SendDmaIdentify,   "MI_SendDmaIdentify"},
87348 +{MI_InputAtomicAddWord,        "MI_InputAtomicAddWord"},
87349 +{MI_LoadBFromTransD0,  "MI_LoadBFromTransD0"},
87350 +{MI_ConditionalWriteBackCCTrue,        "MI_ConditionalWriteBackCCTrue"},
87351 +{MI_WaitOneTickForWakeup,      "MI_WaitOneTickForWakeup"},
87352 +{MI_SendFinalUnlockTrans,      "MI_SendFinalUnlockTrans"},
87353 +{MI_SendDmaEOP,        "MI_SendDmaEOP"},
87354 +{MI_GenLastAddrForPsycho,      "MI_GenLastAddrForPsycho"},
87355 +{MI_FailedAckIfCCis0,  "MI_FailedAckIfCCis0"},
87356 +{MI_FailedAckIfCCis0_seq1,     "MI_FailedAckIfCCis0_seq1"},
87357 +{MI_WriteDmaSysCntxDesc,       "MI_WriteDmaSysCntxDesc"},
87358 +{MI_TimesliceDmaQueueOverflow, "MI_TimesliceDmaQueueOverflow"},
87359 +{MI_DequeueNonSysCntxThread1,  "MI_DequeueNonSysCntxThread1"},
87360 +{MI_DequeueNonSysCntxThread1_seq1,     "MI_DequeueNonSysCntxThread1_seq1"},
87361 +{MI_TestThreadQueueEmpty,      "MI_TestThreadQueueEmpty"},
87362 +{MI_ClearThreadQueueIfCC,      "MI_ClearThreadQueueIfCC"},
87363 +{MI_DequeueSysCntxThread1,     "MI_DequeueSysCntxThread1"},
87364 +{MI_DequeueSysCntxThread1_seq1,        "MI_DequeueSysCntxThread1_seq1"},
87365 +{MI_TProcStartUpGeneric,       "MI_TProcStartUpGeneric"},
87366 +{MI_WaitForPCload2,    "MI_WaitForPCload2"},
87367 +{MI_WaitForNPCWrite,   "MI_WaitForNPCWrite"},
87368 +{MI_WaitForEventWaitAddr,      "MI_WaitForEventWaitAddr"},
87369 +{MI_WaitForWaitEventAccess,    "MI_WaitForWaitEventAccess"},
87370 +{MI_WaitForWaitEventAccess_seq1,       "MI_WaitForWaitEventAccess_seq1"},
87371 +{MI_WaitForWaitEventDesc,      "MI_WaitForWaitEventDesc"},
87372 +{MI_WaitForEventReadTy0,       "MI_WaitForEventReadTy0"},
87373 +{MI_SendCondTestFail,  "MI_SendCondTestFail"},
87374 +{MI_InputMoveToNextTrans,      "MI_InputMoveToNextTrans"},
87375 +{MI_ThreadUpdateSysCntxBack,   "MI_ThreadUpdateSysCntxBack"},
87376 +{MI_FinishedSetEvent,  "MI_FinishedSetEvent"},
87377 +{MI_EventIntUpdateBPtr,        "MI_EventIntUpdateBPtr"},
87378 +{MI_EventQueueOverflow,        "MI_EventQueueOverflow"},
87379 +{MI_MaskLowerSource,   "MI_MaskLowerSource"},
87380 +{MI_DmaLoop,   "MI_DmaLoop"},
87381 +{MI_SendNullSetEvent,  "MI_SendNullSetEvent"},
87382 +{MI_SendFinalSetEvent, "MI_SendFinalSetEvent"},
87383 +{MI_TestNackFailIsZero1,       "MI_TestNackFailIsZero1"},
87384 +{MI_DmaPacketTimedOutOrPacketError,    "MI_DmaPacketTimedOutOrPacketError"},
87385 +{MI_NextPacketIsLast,  "MI_NextPacketIsLast"},
87386 +{MI_TestForZeroLengthDma,      "MI_TestForZeroLengthDma"},
87387 +{MI_WaitForPCload,     "MI_WaitForPCload"},
87388 +{MI_ReadInIns, "MI_ReadInIns"},
87389 +{MI_WaitForInsRead,    "MI_WaitForInsRead"},
87390 +{MI_WaitForLocals,     "MI_WaitForLocals"},
87391 +{MI_WaitForOutsWrite,  "MI_WaitForOutsWrite"},
87392 +{MI_WaitForWaitEvWrBack,       "MI_WaitForWaitEvWrBack"},
87393 +{MI_WaitForLockRead,   "MI_WaitForLockRead"},
87394 +{MI_TestQueueLock,     "MI_TestQueueLock"},
87395 +{MI_InputIdWriteAddrAndType,   "MI_InputIdWriteAddrAndType"},
87396 +{MI_InputIdWriteAddrAndType2,  "MI_InputIdWriteAddrAndType2"},
87397 +{MI_ThreadInputIdentify2,      "MI_ThreadInputIdentify2"},
87398 +{MI_WriteIntoTrapArea0,        "MI_WriteIntoTrapArea0"},
87399 +{MI_GenQueueBlockWrAddr,       "MI_GenQueueBlockWrAddr"},
87400 +{MI_InputDiscardFreeLock,      "MI_InputDiscardFreeLock"},
87401 +{MI_WriteIntoTrapArea1,        "MI_WriteIntoTrapArea1"},
87402 +{MI_WriteIntoTrapArea2,        "MI_WriteIntoTrapArea2"},
87403 +{MI_ResetBPtrToBase,   "MI_ResetBPtrToBase"},
87404 +{MI_InputDoTrap,       "MI_InputDoTrap"},
87405 +{MI_RemoteDmaCntxt0Update,     "MI_RemoteDmaCntxt0Update"},
87406 +{MI_ClearQueueLock,    "MI_ClearQueueLock"},
87407 +{MI_IProcTrappedBlockWriteData,        "MI_IProcTrappedBlockWriteData"},
87408 +{MI_FillContextFilter, "MI_FillContextFilter"},
87409 +{MI_IProcTrapped4,     "MI_IProcTrapped4"},
87410 +{MI_RunSysCntxDma,     "MI_RunSysCntxDma"},
87411 +{MI_ChainedEventError, "MI_ChainedEventError"},
87412 +{MI_InputTrappingEOP,  "MI_InputTrappingEOP"},
87413 +{MI_CheckForRunIfZero, "MI_CheckForRunIfZero"},
87414 +{MI_TestForBreakOrSuspend,     "MI_TestForBreakOrSuspend"},
87415 +{MI_SwapForRunable,    "MI_SwapForRunable"},
87416 diff -urN clean/include/elan3/neterr_rpc.h linux-2.6.9/include/elan3/neterr_rpc.h
87417 --- clean/include/elan3/neterr_rpc.h    1969-12-31 19:00:00.000000000 -0500
87418 +++ linux-2.6.9/include/elan3/neterr_rpc.h      2003-06-26 12:05:22.000000000 -0400
87419 @@ -0,0 +1,68 @@
87420 +/*
87421 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87422 + *
87423 + *    For licensing information please see the supplied COPYING file
87424 + *
87425 + */
87426 +
87427 +#ifndef __ELAN3_NETERR_RPC_H
87428 +#define __ELAN3_NETERR_RPC_H
87429 +
87430 +#ident "$Id: neterr_rpc.h,v 1.20 2003/06/26 16:05:22 fabien Exp $"
87431 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/neterr_rpc.h,v $*/
87432 +
87433 +#define NETERR_SERVICE "neterr-srv"
87434 +#define NETERR_PROGRAM ((u_long) 170002)
87435 +#define NETERR_VERSION ((u_long) 1)
87436 +
87437 +#define NETERR_NULL_RPC                0
87438 +#define NETERR_FIXUP_RPC       1
87439 +
87440 +/* network error rpc timeout */
87441 +#define NETERR_RPC_TIMEOUT     5
87442 +
87443 +/*
87444 + * XDR functions for Tru64 and Linux in userspace. 
87445 + *  NB Linux kernelspace xdr routines are in network_error.
87446 + *     and *must* be kept consistent.
87447 + */
87448 +#if defined(DIGITAL_UNIX) || !defined(__KERNEL__)
87449 +bool_t
87450 +xdr_capability (XDR *xdrs, void *arg)
87451 +{
87452 +    ELAN_CAPABILITY *cap = (ELAN_CAPABILITY *) arg;
87453 +
87454 +    return (xdr_opaque (xdrs, (caddr_t) &cap->cap_userkey, sizeof (cap->cap_userkey)) &&
87455 +           xdr_int (xdrs, &cap->cap_version) &&
87456 +           xdr_u_short (xdrs, &cap->cap_type) &&
87457 +           xdr_int (xdrs, &cap->cap_lowcontext) &&
87458 +           xdr_int (xdrs, &cap->cap_highcontext) &&
87459 +           xdr_int (xdrs, &cap->cap_mycontext) &&
87460 +           xdr_int (xdrs, &cap->cap_lownode) &&
87461 +           xdr_int (xdrs, &cap->cap_highnode) &&
87462 +           xdr_u_int (xdrs, &cap->cap_railmask) &&
87463 +           xdr_opaque (xdrs, (caddr_t) &cap->cap_bitmap[0], sizeof (cap->cap_bitmap)));
87464 +}
87465 +
87466 +bool_t
87467 +xdr_neterr_msg (XDR *xdrs, void *req)
87468 +{
87469 +    NETERR_MSG *msg = (NETERR_MSG *) req;
87470 +
87471 +    return (xdr_u_int (xdrs, &msg->Rail) &&
87472 +           xdr_capability (xdrs, &msg->SrcCapability) &&
87473 +           xdr_capability (xdrs, &msg->DstCapability) &&
87474 +           xdr_u_int (xdrs, &msg->DstProcess) &&
87475 +           xdr_u_int (xdrs, &msg->CookieAddr) &&
87476 +           xdr_u_int (xdrs, &msg->CookieVProc) &&
87477 +           xdr_u_int (xdrs, &msg->NextCookie) &&
87478 +           xdr_u_int (xdrs, &msg->WaitForEop));
87479 +}
87480 +#endif /* INCLUDE_XDR_INLINE */
87481 +
87482 +/*
87483 + * Local variables:
87484 + * c-file-style: "stroustrup"
87485 + * End:
87486 + */
87487 +#endif /* __ELAN3_NETERR_RPC_H */
87488 diff -urN clean/include/elan3/perm.h linux-2.6.9/include/elan3/perm.h
87489 --- clean/include/elan3/perm.h  1969-12-31 19:00:00.000000000 -0500
87490 +++ linux-2.6.9/include/elan3/perm.h    2003-09-24 09:57:24.000000000 -0400
87491 @@ -0,0 +1,29 @@
87492 +/*
87493 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87494 + *
87495 + *    For licensing information please see the supplied COPYING file
87496 + *
87497 + */
87498 +
87499 +#ifndef __ELAN3_PERM_H
87500 +#define __ELAN3_PERM_H
87501 +
87502 +#ident "$Id: perm.h,v 1.7 2003/09/24 13:57:24 david Exp $"
87503 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/perm.h,v $*/
87504 +
87505 +#define ELAN3_PERM_NULL                0x00
87506 +#define ELAN3_PERM_LOCAL_READ  0x04
87507 +#define ELAN3_PERM_READ                0x08
87508 +#define ELAN3_PERM_NOREMOTE    0x0c
87509 +#define ELAN3_PERM_REMOTEREAD  0x10
87510 +#define ELAN3_PERM_REMOTEWRITE 0x14
87511 +#define ELAN3_PERM_REMOTEEVENT 0x18
87512 +#define ELAN3_PERM_REMOTEALL   0x1c
87513 +
87514 +#endif /* __ELAN3_PERM_H */
87515 +
87516 +/*
87517 + * Local variables:
87518 + * c-file-style: "stroustrup"
87519 + * End:
87520 + */
87521 diff -urN clean/include/elan3/pte.h linux-2.6.9/include/elan3/pte.h
87522 --- clean/include/elan3/pte.h   1969-12-31 19:00:00.000000000 -0500
87523 +++ linux-2.6.9/include/elan3/pte.h     2003-09-24 09:57:24.000000000 -0400
87524 @@ -0,0 +1,139 @@
87525 +/*
87526 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87527 + *
87528 + *    For licensing information please see the supplied COPYING file
87529 + *
87530 + */
87531 +
87532 +#ifndef __ELAN3_PTE_H
87533 +#define __ELAN3_PTE_H
87534 +
87535 +#ident "$Id: pte.h,v 1.26 2003/09/24 13:57:24 david Exp $"
87536 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/pte.h,v $*/
87537 +
87538 +#ifdef __cplusplus
87539 +extern          "C"
87540 +{
87541 +#endif
87542 +
87543 +#include <elan3/e3types.h>
87544 +#include <elan3/perm.h>
87545 +
87546 +typedef E3_uint64 ELAN3_PTE;
87547 +typedef E3_uint32 ELAN3_PTP;
87548 +
87549 +#define ELAN3_PTE_SIZE         (8)
87550 +#define ELAN3_PTP_SIZE         (4)
87551 +
87552 +#define ELAN3_PTE_REF          ((E3_uint64) 1 << 63)           /* 63      - referenced bit */
87553 +#define ELAN3_PTE_MOD          ((E3_uint64) 1 << 55)           /* 55      - modified bit */
87554 +#define ELAN3_RM_MASK          (ELAN3_PTE_REF | ELAN3_PTE_MOD)
87555 +
87556 +#define ELAN3_PTE_PFN_MASK     0x0000fffffffff000ull           /* [12:48] - Physical address */
87557 +
87558 +#define ELAN3_PTE_BIG_ENDIAN   0x80                            /* 7       - big endian */
87559 +#define ELAN3_PTE_64_BIT               0x40                            /* 6       - 64 bit pci address */
87560 +#define ELAN3_PTE_LOCAL                0x20                            /* 5       - local sdram */
87561 +
87562 +#define ELAN3_PTE_PERM_MASK    0x1c                            /* [2:4]   - Permissions */
87563 +#define ELAN3_PTE_PERM_SHIFT      2
87564 +
87565 +#define ELAN3_ET_MASK          0x3
87566 +#define ELAN3_ET_INVALID               0x0                                     /* [0:1] */
87567 +#define ELAN3_ET_PTP           0x1
87568 +#define ELAN3_ET_PTE           0x2
87569 +
87570 +#define ELAN3_INVALID_PTP      ((ELAN3_PTP) 0)
87571 +#define ELAN3_INVALID_PTE      ((ELAN3_PTE) 0)
87572 +
87573 +#define ELAN3_PTP_TYPE(ptp)    ((ptp) & ELAN3_ET_MASK)
87574 +#define ELAN3_PTE_TYPE(pte)    ((pte) & ELAN3_ET_MASK)
87575 +#define ELAN3_PTE_PERM(pte)    ((pte) & ELAN3_PTE_PERM_MASK)
87576 +#define ELAN3_PTE_VALID(pte)   (((pte) & ELAN3_ET_MASK) == ELAN3_ET_PTE)
87577 +#define ELAN3_PTE_ISREF(pte)   ((pte) & ELAN3_PTE_REF)
87578 +#define ELAN3_PTE_ISMOD(pte)   ((pte) & ELAN3_PTE_MOD)
87579 +#define ELAN3_PTE_WRITEABLE(pte)       (ELAN3_PERM_WRITEABLE(ELAN3_PTE_PERM(pte)))
87580 +
87581 +#define ELAN3_PERM_WRITEABLE(perm)     ((perm) == ELAN3_PERM_NOREMOTE || (perm) > ELAN3_PERM_REMOTEREAD)
87582 +#define ELAN3_PERM_REMOTE(perm)                ((perm) > ELAN3_PERM_NOREMOTE)
87583 +
87584 +#define ELAN3_PERM_READONLY(perm)      ((perm) == ELAN3_PERM_NOREMOTE ? ELAN3_PERM_LOCAL_READ : \
87585 +                                        (perm) > ELAN3_PERM_REMOTEREAD ? ELAN3_PERM_READ : (perm))
87586 +#if PAGE_SHIFT == 12
87587 +#  define ELAN3_PAGE_SHIFT     12
87588 +#else
87589 +#  define ELAN3_PAGE_SHIFT     13
87590 +#endif
87591 +
87592 +#define ELAN3_PAGE_SIZE                (1 << ELAN3_PAGE_SHIFT)
87593 +#define ELAN3_PAGE_OFFSET      (ELAN3_PAGE_SIZE-1)
87594 +#define ELAN3_PAGE_MASK                (~ELAN3_PAGE_OFFSET)
87595 +
87596 +#if ELAN3_PAGE_SHIFT == 13
87597 +#  define ELAN3_L3_SHIFT               5
87598 +#else
87599 +#  define ELAN3_L3_SHIFT               6
87600 +#endif
87601 +#define ELAN3_L2_SHIFT         6
87602 +#define ELAN3_L1_SHIFT         8
87603 +
87604 +/* Number of entries in a given level ptbl */
87605 +#define ELAN3_L3_ENTRIES               (1 << ELAN3_L3_SHIFT)
87606 +#define ELAN3_L2_ENTRIES               (1 << ELAN3_L2_SHIFT)
87607 +#define ELAN3_L1_ENTRIES               (1 << ELAN3_L1_SHIFT)
87608 +
87609 +/* Virtual address spanned by each entry */
87610 +#define ELAN3_L3_SIZE          (1 << (ELAN3_PAGE_SHIFT))
87611 +#define ELAN3_L2_SIZE          (1 << (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
87612 +#define ELAN3_L1_SIZE          (1 << (ELAN3_L3_SHIFT+ELAN3_L2_SHIFT+ELAN3_PAGE_SHIFT))
87613 +
87614 +/* Virtual address size of page table */
87615 +#define ELAN3_L1_PTSIZE          (ELAN3_L1_ENTRIES * ELAN3_L1_SIZE)
87616 +#define ELAN3_L3_PTSIZE                (ELAN3_L3_ENTRIES * ELAN3_L3_SIZE)
87617 +#define ELAN3_L2_PTSIZE                (ELAN3_L2_ENTRIES * ELAN3_L2_SIZE)
87618 +
87619 +/* Mask for offset into page table */
87620 +#define ELAN3_L1_PTOFFSET        ((ELAN3_L1_SIZE*ELAN3_L1_ENTRIES)-1)
87621 +#define ELAN3_L3_PTOFFSET      ((ELAN3_L3_SIZE*ELAN3_L3_ENTRIES)-1)
87622 +#define ELAN3_L2_PTOFFSET      ((ELAN3_L2_SIZE*ELAN3_L2_ENTRIES)-1)
87623 +
87624 +#define ELAN3_L1_INDEX(addr)   (((E3_Addr) (addr) & 0xFF000000) >> (ELAN3_L2_SHIFT+ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
87625 +#define ELAN3_L2_INDEX(addr)   (((E3_Addr) (addr) & 0x00FD0000) >> (ELAN3_L3_SHIFT+ELAN3_PAGE_SHIFT))
87626 +#define ELAN3_L3_INDEX(addr)   (((E3_Addr) (addr) & 0x0003F000) >> ELAN3_PAGE_SHIFT)
87627 +
87628 +#define        ELAN3_L1_BASE(addr)     (((E3_Addr)(addr)) & 0x00000000)
87629 +#define        ELAN3_L2_BASE(addr)     (((E3_Addr)(addr)) & 0xFF000000)
87630 +#define        ELAN3_L3_BASE(addr)     (((E3_Addr)(addr)) & 0xFFFC0000)
87631 +
87632 +/* Convert a page table pointer entry to the PT */
87633 +#define PTP_TO_PT_PADDR(ptp)   ((E3_Addr)(ptp & 0xFFFFFFFC))
87634 +
87635 +#ifdef __KERNEL__
87636 +/*
87637 + * incompatible access for permission macro.
87638 + */
87639 +extern  u_char  elan3mmu_permissionTable[8];
87640 +#define ELAN3_INCOMPAT_ACCESS(perm,access) (! (elan3mmu_permissionTable[(perm)>>ELAN3_PTE_PERM_SHIFT] & (1 << (access))))
87641 +
87642 +#define elan3_readptp(dev, ptp)                (elan3_sdram_readl (dev, ptp))
87643 +#define elan3_writeptp(dev, ptp, value)        (elan3_sdram_writel (dev, ptp, value))
87644 +#define elan3_readpte(dev, pte)                (elan3_sdram_readq (dev, pte))
87645 +#define elan3_writepte(dev,pte, value) (elan3_sdram_writeq (dev, pte, value))
87646 +
87647 +#define elan3_invalidatepte(dev, pte)  (elan3_sdram_writel (dev, pte, 0))
87648 +#define elan3_modifypte(dev,pte,new)   (elan3_sdram_writel (dev, pte, (int) (new)))
87649 +#define elan3_clrref(dev,pte)          (elan3_sdram_writeb (dev, pte + 7)
87650 +
87651 +#endif /* __KERNEL__ */
87652 +
87653 +#ifdef __cplusplus
87654 +}
87655 +#endif
87656 +
87657 +#endif /* __ELAN3_PTE_H */
87658 +
87659 +/*
87660 + * Local variables:
87661 + * c-file-style: "stroustrup"
87662 + * End:
87663 + */
87664 diff -urN clean/include/elan3/spinlock.h linux-2.6.9/include/elan3/spinlock.h
87665 --- clean/include/elan3/spinlock.h      1969-12-31 19:00:00.000000000 -0500
87666 +++ linux-2.6.9/include/elan3/spinlock.h        2003-09-24 09:57:24.000000000 -0400
87667 @@ -0,0 +1,195 @@
87668 +/*
87669 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87670 + *
87671 + *    For licensing information please see the supplied COPYING file
87672 + *
87673 + */
87674 +
87675 +#ifndef _ELAN3_SPINLOCK_
87676 +#define _ELAN3_SPINLOCK_
87677 +
87678 +#ident "$Id: spinlock.h,v 1.31 2003/09/24 13:57:24 david Exp $"
87679 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/spinlock.h,v $*/
87680 +
87681 +/*
87682 + * This spinlock is designed for main/elan processor interactions.
87683 + * The lock is split over Elan/Main memory in such a way that
87684 + * we don't end up busy-polling over the PCI.
87685 + * In the Elan memory we have two words; one is a sequence number
87686 + * and the other is a lock word for main.
87687 + * In main memory we have a copy of the sequence number which main polls when it is
87688 + * waiting for the Elan to drop the lock. Main polls this word until it becomes
87689 + * equal to the sequence number it sampled.
87690 + * The Elan drops the lock by writing the current sequence number to main memory.
87691 + * It is coded to always give priority to the Elan thread, and so when both go for the
87692 + * lock, main will back off first.
87693 + *
87694 + * 18/3/98
87695 + * This has been extended to avoid a starvation case where both the main and thread claim the
87696 + * lock and so both backoff (thread does a break). So now, main attempts to claim the
87697 + * lock by writing 'mainLock' then samples the 'sl_seq' and if it has the lock
87698 + * it sets 'mainGotLock'. The thread will now see the 'sl_mainLock' set, but will only
87699 + * backoff with a c_break_busywait() if 'mainGotLock' is set too.
87700 + */
87701 +typedef struct elan3_spinlock_elan {
87702 +    union {
87703 +       volatile E3_uint64      mainLocks;              /* main writes this dble word */
87704 +       struct {
87705 +           volatile E3_uint32  mainLock;               /* main wants a lock */
87706 +           volatile E3_uint32  mainGotLock;            /* main has the lock */
87707 +       } s;
87708 +    } sl_u;
87709 +    volatile E3_uint32         sl_seq;                 /* thread owns this word */
87710 +    volatile E3_uint32         sl_mainWait;            /* performance counter */
87711 +    volatile E3_uint32         sl_elanWait;            /* performance counter */
87712 +    volatile E3_uint32         sl_elanBusyWait;        /* performance counter */
87713 +    /* NOTE: The lock/seq words must be within the same 32-byte Elan cache-line */
87714 +    E3_uint64                   sl_pad[5];             /* pad to 64-bytes */
87715 +} ELAN3_SPINLOCK_ELAN;
87716 +
87717 +#define sl_mainLocks sl_u.mainLocks
87718 +#define sl_mainLock  sl_u.s.mainLock
87719 +#define sl_mainGotLock sl_u.s.mainGotLock
87720 +
87721 +#define SL_MAIN_RECESSIVE      1
87722 +#define SL_MAIN_DOMINANT       2
87723 +
87724 +/* Declare this as a main memory cache block for efficiency */
87725 +typedef union elan3_spinlock_main {
87726 +    volatile E3_uint32         sl_seq;                 /* copy of seq number updated by Elan */
87727 +    volatile E3_uint32         sl_Int32[E3_BLK_SIZE/sizeof (E3_uint32)];
87728 +} ELAN3_SPINLOCK_MAIN;
87729 +
87730 +/* Main/Main or Elan/Elan lock word */
87731 +typedef volatile int   ELAN3_SPINLOCK;
87732 +
87733 +#ifdef __ELAN3__
87734 +
87735 +/* Main/Elan interlock */
87736 +
87737 +#define ELAN3_ME_SPINENTER(SLE,SL) do {\
87738 +                       asm volatile ("! elan3_spinlock store barrier");\
87739 +                       (SLE)->sl_seq++; \
87740 +                       if ((SLE)->sl_mainLock) \
87741 +                         elan3_me_spinblock(SLE, SL);\
87742 +                       asm volatile ("! elan3_spinlock store barrier");\
87743 +               } while (0)
87744 +#define ELAN3_ME_SPINEXIT(SLE,SL) do {\
87745 +                       asm volatile ("! elan3_spinlock store barrier");\
87746 +                       (SL)->sl_seq = (SLE)->sl_seq;\
87747 +                       asm volatile ("! elan3_spinlock store barrier");\
87748 +               } while (0)
87749 +
87750 +
87751 +/* Elan/Elan interlock */
87752 +#define ELAN3_SPINENTER(L)     do {\
87753 +                          asm volatile ("! store barrier");\
87754 +                          if (c_swap ((L), 1)) elan3_spinenter(L);\
87755 +                          asm volatile ("! store barrier");\
87756 +                       } while (0)
87757 +#define ELAN3_SPINEXIT(L)      do {\
87758 +                          asm volatile ("! store barrier");\
87759 +                          c_swap((L), 0);\
87760 +                          asm volatile ("! store barrier");\
87761 +                       } while (0)
87762 +
87763 +extern void elan3_me_spinblock (ELAN3_SPINLOCK_ELAN *sle, ELAN3_SPINLOCK_MAIN *sl);
87764 +extern void elan3_spinenter (ELAN3_SPINLOCK *l);
87765 +
87766 +#else                     
87767 +
87768 +/* Main/Elan interlock */
87769 +#ifdef DEBUG
87770 +#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\
87771 +                       register E3_int32 maxLoops = 0x7fffffff;        \
87772 +                       register E3_uint32 seq;\
87773 +                       elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
87774 +                       MEMBAR_STORELOAD(); \
87775 +                       seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87776 +                       while (seq != (SL)->sl_seq) {\
87777 +                           elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \
87778 +                           while ((SL)->sl_seq == (seq-1) && maxLoops--) ; \
87779 +                           if (maxLoops < 0) { \
87780 +                               printf("Failed to get ME lock %lx/%lx seq %d sle_seq %d sl_seq %d\n", \
87781 +                                      SL, SLE, seq, \
87782 +                                      elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)), \
87783 +                                      (SL)->sl_seq); \
87784 +                           } \
87785 +                           elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
87786 +                           MEMBAR_STORELOAD(); \
87787 +                           seq = elan3_read32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87788 +                       }\
87789 +                       elan3_write32_sdram(SDRAM, (SLE) + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
87790 +                       MEMBAR_LOADLOAD();\
87791 +               } while (0)
87792 +#else
87793 +#define ELAN3_ME_SPINENTER(SDRAM,SLE,SL) do {\
87794 +                       register E3_uint32 seq;\
87795 +                       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
87796 +                       MEMBAR_STORELOAD(); \
87797 +                       seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87798 +                       while (seq != (SL)->sl_seq) {\
87799 +                           elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), 0); \
87800 +                           while ((SL)->sl_seq == (seq-1)) ; \
87801 +                           elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
87802 +                           MEMBAR_STORELOAD(); \
87803 +                           seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87804 +                       }\
87805 +                       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
87806 +                       MEMBAR_LOADLOAD();\
87807 +               } while (0)
87808 +#endif
87809 +#define ELAN3_ME_FORCEENTER(SDRAM,SLE,SL) do { \
87810 +       register E3_uint32 seq; \
87811 +       MEMBAR_STORELOAD(); \
87812 +       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_DOMINANT); \
87813 +       MEMBAR_STORELOAD(); \
87814 +       seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87815 +       while (seq != (SL)->sl_seq) \
87816 +       { \
87817 +               /* NOTE: we MUST call elan3_usecspin here for kernel comms */\
87818 +               while ((SL)->sl_seq == (seq)-1) \
87819 +                       elan3_usecspin (1); \
87820 +               seq = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87821 +       } \
87822 +       elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
87823 +       MEMBAR_LOADLOAD(); \
87824 +} while (0)
87825 +
87826 +#define ELAN3_ME_TRYENTER(SDRAM,SLE,SL,SEQ) do { \
87827 +    elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLock), SL_MAIN_RECESSIVE); \
87828 +    MEMBAR_STORELOAD(); \
87829 +    SEQ = elan3_read32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_seq)); \
87830 +} while (0)
87831 +
87832 +#define ELAN3_ME_CHECKENTER(SDRAM,SLE,SL,SEQ) do { \
87833 +    if ((SEQ) == ((SL)->sl_seq)) { \
87834 +        elan3_write32_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainGotLock), 1); \
87835 +        MEMBAR_LOADLOAD();\
87836 +    } \
87837 +    else ELAN3_ME_SPINENTER(SLE,SL); \
87838 +} while (0)
87839 +       
87840 +#define ELAN3_ME_SPINEXIT(SDRAM,SLE,SL) do {\
87841 +                       MEMBAR_STORESTORE(); \
87842 +                       elan3_write64_sdram(SDRAM, SLE + offsetof(ELAN3_SPINLOCK_ELAN, sl_mainLocks), 0); \
87843 +                       MEMBAR_STORESTORE(); \
87844 +               } while (0)
87845 +
87846 +
87847 +/* Main/Main */
87848 +#define ELAN3_SPINENTER(L)     do {\
87849 +                          while (c_swap ((L), 1)) ; \
87850 +                       } while (0)
87851 +#define ELAN3_SPINEXIT(L)      do {\
87852 +                          c_swap((L), 0);\
87853 +                       } while (0)
87854 +#endif /* _ELAN3_ */
87855 +
87856 +#endif /* _ELAN3_SPINLOCK_H */
87857 +
87858 +/*
87859 + * Local variables:
87860 + * c-file-style: "stroustrup"
87861 + * End:
87862 + */
87863 diff -urN clean/include/elan3/thread.h linux-2.6.9/include/elan3/thread.h
87864 --- clean/include/elan3/thread.h        1969-12-31 19:00:00.000000000 -0500
87865 +++ linux-2.6.9/include/elan3/thread.h  2002-08-09 07:23:34.000000000 -0400
87866 @@ -0,0 +1,137 @@
87867 +/*
87868 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
87869 + *
87870 + *    For licensing information please see the supplied COPYING file
87871 + *
87872 + */
87873 +
87874 +#ifndef _ELAN3_THREAD_H
87875 +#define _ELAN3_THREAD_H
87876 +
87877 +#ident "$Id: thread.h,v 1.17 2002/08/09 11:23:34 addy Exp $"
87878 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/thread.h,v $*/
87879 +
87880 +/* Alignment for a stack frame */
87881 +#define E3_STACK_ALIGN         (64)
87882 +
87883 +typedef struct _E3_Frame {
87884 +    E3_uint32         fr_local[8];             /* saved locals (not used) */
87885 +    E3_uint32         fr_arg[6];               /* saved arguements o0 -> o5 */
87886 +    E3_Addr           fr_savefp;               /* saved frame pointer o6 */
87887 +    E3_Addr           fr_savepc;               /* saved program counter o7 */
87888 +    E3_Addr           fr_stret;                /* stuct return addr */
87889 +    E3_uint32         fr_argd[6];              /* arg dump area */
87890 +    E3_uint32         fr_argx[1];              /* array of args past the sixth */
87891 +} E3_Frame;
87892 +
87893 +typedef struct _E3_Stack {
87894 +    E3_uint32          Locals[8];
87895 +    E3_uint32          Ins[8];
87896 +    E3_uint32          Globals[8];
87897 +    E3_uint32          Outs[8];
87898 +} E3_Stack;
87899 +
87900 +typedef struct _E3_OutsRegs {
87901 +   E3_uint32   o[8];                           /* o6 == pc, o7 == fptr */
87902 +} E3_OutsRegs;
87903 +
87904 +/*
87905 + * "Magic" value for stack pointer to be ignored.
87906 + */
87907 +#define VanishingStackPointer  0x42
87908 +
87909 +
87910 +/*
87911 + * When the Elan traps the N & Z CC bits are held in the NPC
87912 + * and the V & C bits are in the PC
87913 + */
87914 +#define PSR_C_BIT      (1)
87915 +#define PSR_V_BIT      (2)
87916 +#define PSR_Z_BIT      (1)
87917 +#define PSR_N_BIT      (2)
87918 +#define CC_MASK                (3)
87919 +#define PC_MASK        (~3)
87920 +#define SP_MASK                (~3)
87921 +
87922 +/*
87923 + * Threads processor Opcodes.
87924 + */
87925 +#define OPCODE_MASK            (0xC1F80000)
87926 +#define OPCODE_IMM             (1 << 13)
87927 +
87928 +#define OPCODE_CLASS(instr)    ((instr) & 0xC0000000)
87929 +#define OPCODE_CLASS_0         0x00000000
87930 +#define OPCODE_CLASS_1         0x40000000
87931 +#define OPCODE_CLASS_2         0x80000000
87932 +#define OPCODE_CLASS_3         0xC0000000
87933 +
87934 +#define OPCODE_CPOP            0x81B00000
87935 +#define OPCODE_Ticc            0x81D00000
87936 +
87937 +#define OPCODE_FCODE_SHIFT     19
87938 +#define OPCODE_FCODE_MASK      0x1f
87939 +#define OPCODE_NOT_ALUOP       0x01000000
87940 +
87941 +#define OPCODE_SLL             0x81280000
87942 +#define OPCODE_SRL             0x81300000
87943 +#define OPCODE_SRA             0x81380000
87944 +
87945 +#define OPCODE_OPEN            0x81600000
87946 +#define OPCODE_CLOSE           0x81680000
87947 +#define OPCODE_BREAKTEST       0x81700000
87948 +
87949 +#define OPCODE_BREAK           0x81a00000
87950 +#define OPCODE_SUSPEND         0x81a80000
87951 +#define OPCODE_WAIT            0x81b00000
87952 +
87953 +#define OPCODE_JMPL            0x81c00000
87954 +
87955 +#define OPCODE_LD              0xC0000000
87956 +#define OPCODE_LDD             0xC0180000
87957 +
87958 +#define OPCODE_LDBLOCK16       0xC0900000
87959 +#define OPCODE_LDBLOCK32       0xC0800000
87960 +#define OPCODE_LDBLOCK64       0xC0980000
87961 +
87962 +#define OPCODE_ST              0xC0200000
87963 +#define OPCODE_STD             0xC0380000
87964 +
87965 +#define OPCODE_SWAP            0xC0780000
87966 +
87967 +#define OPCODE_STBLOCK16       0xC0b00000
87968 +#define OPCODE_STBLOCK32       0xC0a00000
87969 +#define OPCODE_STBLOCK64       0xC0b80000
87970 +
87971 +#define OPCODE_CLASS0_MASK     0xC1C00000
87972 +#define OPCODE_SETHI           0x01000000
87973 +#define OPCODE_BICC            0x00800000
87974 +#define OPCODE_SENDREG         0x01800000
87975 +#define OPCODE_SENDMEM         0x01c00000
87976 +
87977 +#define OPCODE_BICC_BN         0x00000000
87978 +#define OPCODE_BICC_BE         0x02000000
87979 +#define OPCODE_BICC_BLE                0x04000000
87980 +#define OPCODE_BICC_BL         0x06000000
87981 +#define OPCODE_BICC_BLEU       0x08000000
87982 +#define OPCODE_BICC_BCS                0x0A000000
87983 +#define OPCODE_BICC_BNEG       0x0C000000
87984 +#define OPCODE_BICC_BVS                0x0E000000
87985 +
87986 +#define OPCODE_BICC_MASK       0x0E000000
87987 +#define OPCODE_BICC_ANNUL      0x20000000
87988 +
87989 +#define INSTR_RS2(instr)       (((instr) >>  0) & 0x1F)
87990 +#define INSTR_RS1(instr)       (((instr) >> 14) & 0x1F)
87991 +#define INSTR_RD(instr)                (((instr) >> 25) & 0x1F)
87992 +#define INSTR_IMM(instr)       (((instr) & 0x1000) ? ((instr) & 0xFFF) | 0xFFFFF000 : (instr) & 0xFFF)
87993 +
87994 +#define Ticc_COND(instr)       INSTR_RD(instr)
87995 +#define Ticc_TA                        8
87996 +
87997 +#endif /* _ELAN3_THREAD_H */
87998 +
87999 +/*
88000 + * Local variables:
88001 + * c-file-style: "stroustrup"
88002 + * End:
88003 + */
88004 diff -urN clean/include/elan3/threadlinkage.h linux-2.6.9/include/elan3/threadlinkage.h
88005 --- clean/include/elan3/threadlinkage.h 1969-12-31 19:00:00.000000000 -0500
88006 +++ linux-2.6.9/include/elan3/threadlinkage.h   2002-08-09 07:23:34.000000000 -0400
88007 @@ -0,0 +1,103 @@
88008 +/*
88009 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88010 + *
88011 + *    For licensing information please see the supplied COPYING file
88012 + *
88013 + */
88014 +
88015 +#ifndef __ELAN3_THREADLINKAGE_H
88016 +#define        __ELAN3_THREADLINKAGE_H
88017 +
88018 +#ident "$Id: threadlinkage.h,v 1.6 2002/08/09 11:23:34 addy Exp $"
88019 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadlinkage.h,v $*/
88020 +
88021 +#ifdef __cplusplus
88022 +extern "C" {
88023 +#endif
88024 +
88025 +#if defined(_ASM) || defined(__LANGUAGE_ASSEMBLY__)
88026 +
88027 +/*
88028 + * Macro to define weak symbol aliases. These are similar to the ANSI-C
88029 + *     #pragma weak name = _name
88030 + * except a compiler can determine type. The assembler must be told. Hence,
88031 + * the second parameter must be the type of the symbol (i.e.: function,...)
88032 + */
88033 +#define        ANSI_PRAGMA_WEAK(sym, stype)    \
88034 +       .weak   sym; \
88035 +       .type sym, #stype; \
88036 +/* CSTYLED */ \
88037 +sym    = _/**/sym
88038 +
88039 +/*
88040 + * ENTRY provides the standard procedure entry code
88041 + */
88042 +#define        ENTRY(x) \
88043 +       .section        ".text"; \
88044 +       .align  4; \
88045 +       .global x; \
88046 +x:
88047 +
88048 +/*
88049 + * ENTRY2 is identical to ENTRY but provides two labels for the entry point.
88050 + */
88051 +#define        ENTRY2(x, y) \
88052 +       .section        ".text"; \
88053 +       .align  4; \
88054 +       .global x, y; \
88055 +/* CSTYLED */ \
88056 +x:     ; \
88057 +y:
88058 +
88059 +
88060 +/*
88061 + * ALTENTRY provides for additional entry points.
88062 + */
88063 +#define        ALTENTRY(x) \
88064 +       .global x; \
88065 +x:
88066 +
88067 +/*
88068 + * DGDEF and DGDEF2 provide global data declarations.
88069 + *
88070 + * DGDEF provides a word aligned word of storage.
88071 + *
88072 + * DGDEF2 allocates "sz" bytes of storage with **NO** alignment.  This
88073 + * implies this macro is best used for byte arrays.
88074 + *
88075 + * DGDEF3 allocates "sz" bytes of storage with "algn" alignment.
88076 + */
88077 +#define        DGDEF2(name, sz) \
88078 +       .section        ".data"; \
88079 +       .global name; \
88080 +       .size   name, sz; \
88081 +name:
88082 +
88083 +#define        DGDEF3(name, sz, algn) \
88084 +       .section        ".data"; \
88085 +       .align  algn; \
88086 +       .global name; \
88087 +       .size   name, sz; \
88088 +name:
88089 +
88090 +#define        DGDEF(name)     DGDEF3(name, 4, 4)
88091 +
88092 +/*
88093 + * SET_SIZE trails a function and set the size for the ELF symbol table.
88094 + */
88095 +#define        SET_SIZE(x) \
88096 +       .size   x, (.-x)
88097 +
88098 +#endif /* _ASM || __LANGUAGE_ASSEMBLY__ */
88099 +
88100 +#ifdef __cplusplus
88101 +}
88102 +#endif
88103 +
88104 +#endif /* __ELAN3_THREADLINKAGE_H */
88105 +
88106 +/*
88107 + * Local variables:
88108 + * c-file-style: "stroustrup"
88109 + * End:
88110 + */
88111 diff -urN clean/include/elan3/threadsyscall.h linux-2.6.9/include/elan3/threadsyscall.h
88112 --- clean/include/elan3/threadsyscall.h 1969-12-31 19:00:00.000000000 -0500
88113 +++ linux-2.6.9/include/elan3/threadsyscall.h   2003-09-24 09:57:24.000000000 -0400
88114 @@ -0,0 +1,64 @@
88115 +/*
88116 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88117 + *
88118 + *    For licensing information please see the supplied COPYING file
88119 + *
88120 + */
88121 +
88122 +#ifndef __ELAN3_SYSCALL_H
88123 +#define __ELAN3_SYSCALL_H
88124 +
88125 +#ident "$Id: threadsyscall.h,v 1.12 2003/09/24 13:57:24 david Exp $"
88126 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/threadsyscall.h,v $*/
88127 +
88128 +/* 
88129 + * This file contains the system calls supported from the Elan.
88130 + */
88131 +#define ELAN3_DEBUG_TRAPNUM    5       /* thread debugging trap */
88132 +#define ELAN3_ABORT_TRAPNUM    6       /* bad abort trap */
88133 +#define ELAN3_ELANCALL_TRAPNUM 7       /* elansyscall trap */
88134 +#define ELAN3_SYSCALL_TRAPNUM  8       /* new syscall trap */
88135 +
88136 +#define ELAN3_T_SYSCALL_CODE   0       /* offsets in struct elan3_t_syscall */
88137 +#define ELAN3_T_SYSCALL_ERRNO  4
88138 +
88139 +#define ELAN3_SYS_open         1
88140 +#define ELAN3_SYS_close                2
88141 +#define ELAN3_SYS_write                3
88142 +#define ELAN3_SYS_read         4
88143 +#define ELAN3_SYS_poll         5
88144 +#define ELAN3_SYS_ioctl                6
88145 +#define ELAN3_SYS_lseek                7
88146 +#define ELAN3_SYS_mmap         8
88147 +#define ELAN3_SYS_munmap       9
88148 +#define ELAN3_SYS_kill         10
88149 +#define ELAN3_SYS_getpid       11
88150 +
88151 +#if !defined(SYS_getpid) && defined(__NR_getxpid) 
88152 +#define SYS_getpid __NR_getxpid                /* for linux */
88153 +#endif
88154 +
88155 +#if !defined(_ASM) && !defined(__LANGUAGE_ASSEMBLY__)
88156 +
88157 +extern int     elan3_t_open (const char *, int, ...);
88158 +extern ssize_t elan3_t_write (int, const void *, unsigned);
88159 +extern ssize_t elan3_t_read(int, void *, unsigned);
88160 +extern int     elan3_t_ioctl(int, int, ...);
88161 +extern int     elan3_t_close(int);
88162 +extern off_t   elan3_t_lseek(int filedes, off_t offset, int whence);
88163 +
88164 +extern caddr_t elan3_t_mmap(caddr_t, size_t, int, int, int, off_t);
88165 +extern int     elan3_t_munmap(caddr_t, size_t);
88166 +
88167 +extern int     elan3_t_getpid(void);
88168 +extern void    elan3_t_abort(char *str);
88169 +
88170 +#endif /* !_ASM && ! __LANGUAGE_ASSEMBLY__ */
88171 +
88172 +#endif /* __ELAN3_SYSCALL_H */
88173 +
88174 +/*
88175 + * Local variables:
88176 + * c-file-style: "stroustrup"
88177 + * End:
88178 + */
88179 diff -urN clean/include/elan3/trtype.h linux-2.6.9/include/elan3/trtype.h
88180 --- clean/include/elan3/trtype.h        1969-12-31 19:00:00.000000000 -0500
88181 +++ linux-2.6.9/include/elan3/trtype.h  2002-08-09 07:23:34.000000000 -0400
88182 @@ -0,0 +1,116 @@
88183 +/*
88184 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88185 + *
88186 + *    For licensing information please see the supplied COPYING file
88187 + *
88188 + */
88189 +
88190 +#ifndef _ELAN3_TRTYPE_H
88191 +#define _ELAN3_TRTYPE_H
88192 +
88193 +#ident "$Id: trtype.h,v 1.13 2002/08/09 11:23:34 addy Exp $"
88194 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/trtype.h,v $ */
88195 +
88196 +/*<15> ackNow  */
88197 +#define TR_SENDACK     (1 << 15)
88198 +
88199 +#define TR_SIZE_SHIFT  12
88200 +#define TR_SIZE_MASK   7
88201 +
88202 +/*<14:12> Size 0, 1, 2, 4, 8, 16, 32, 64  Double Words
88203 +          Bit 14 is forced to zero currently so that only size 0, 1, 2, 4 are
88204 +         allowed    */
88205 +
88206 +#define TR_SIZE0       (0 << TR_SIZE_SHIFT)
88207 +#define TR_SIZE1       (1 << TR_SIZE_SHIFT)
88208 +#define TR_SIZE2       (2 << TR_SIZE_SHIFT)
88209 +#define TR_SIZE4       (3 << TR_SIZE_SHIFT)
88210 +#define TR_SIZE8       (4 << TR_SIZE_SHIFT)
88211 +
88212 +#define TR_64_BIT_ADDR (1 << 11)
88213 +#define TR_LAST_TRANS  (1 << 10)
88214 +
88215 +#define TR_WRITEBLOCK_BIT      (1 << 9)
88216 +#define TR_WRITEBLOCK          (TR_WRITEBLOCK_BIT | TR_SIZE8)
88217 +
88218 +
88219 +#define TR_WRITEBLOCK_SIZE     64
88220 +
88221 +/*
88222 + * write-block
88223 + */
88224 +/*     WriteBlock      <8:7>   Data type
88225 +                       <6:0>   Part write size */
88226 +#define TR_TYPE_SHIFT  7
88227 +#define TR_TYPE_MASK   ((1 << 2) - 1)
88228 +
88229 +#define TR_TYPE_BYTE   0
88230 +#define TR_TYPE_SHORT  1
88231 +#define TR_TYPE_WORD   2
88232 +#define TR_TYPE_DWORD  3
88233 +
88234 +#define TR_PARTSIZE_MASK ((1 << 7) -1)
88235 +
88236 +#define TR_WAIT_FOR_EOP        (1 << 8)
88237 +
88238 +/*
88239 + * trace-route format 
88240 + */
88241 +#define TR_TRACEROUTE0_CHANID(val)             ((val) & 1)                     /* 0     Chan Id */
88242 +#define TR_TRACEROUTE0_LINKID(val)             (((val) >> 1) & 7)              /* 1:3   Link Id */
88243 +#define TR_TRACEROUTE0_REVID(val)              (((val) >> 4) & 7)              /* 4:6   Revision ID */
88244 +#define TR_TRACEROUTE0_BCAST_TOP_PIN(val)      (((val) >> 7) & 1)              /* 7     Broadcast Top Pin (REV B) */
88245 +#define TR_TRACEROUTE0_LNR(val)                        ((val) >> 8)                    /* 8:15  Global Link Not Ready */
88246 +
88247 +#define TR_TRACEROUTE1_PRIO(val)               ((val & 0xF))                   /* 0:3   Arrival Priority (REV A) */
88248 +#define TR_TRACEROUTE1_AGE(val)                        (((val) >> 4) & 0xF)            /* 4:7   Priority Held(Age) (REV A) */
88249 +#define TR_TRACEROUTE1_ROUTE_SELECTED(val)     ((val) & 0xFF)                  /* 0:7   Arrival age (REV B) */
88250 +#define TR_TRACEROUTE1_BCAST_TOP(val)          (((val) >> 8) & 7)              /* 8:10  Broadcast Top */
88251 +#define TR_TRACEROUTE1_ADAPT(val)              (((val) >> 12) & 3)             /* 12:13 This Adaptive Value (REV A) */
88252 +#define TR_TRACEROUTE1_BCAST_BOT(val)          (((val) >> 12) & 7)             /* 12:14 Broadcast Bottom (REV B) */
88253 +
88254 +#define TR_TRACEROUTE2_ARRIVAL_AGE(val)                ((val) & 0xF)                   /* 0:3   Arrival Age (REV B) */
88255 +#define TR_TRACEROUTE2_CURR_AGE(val)           (((val) >> 4) & 0xF)            /* 4:7   Current Age (REV B) */
88256 +#define TR_TRACEROUTE2_BUSY(val)               (((val) >> 8) & 0xFF)           /* 8:15  Busy (REV B) */
88257 +
88258 +#define TR_TRACEROUTE_SIZE     32
88259 +#define TR_TRACEROUTE_ENTRIES  (TR_TRACEROUTE_SIZE/2)
88260 +
88261 +/*
88262 + * non-write block
88263 + */
88264 +#define TR_OPCODE_MASK         (((1 << 8) - 1) |                       \
88265 +                                (TR_SIZE_MASK << TR_SIZE_SHIFT) |      \
88266 +                                TR_WRITEBLOCK_BIT)
88267 +
88268 +#define TR_NOP_TRANS           (0x0 | TR_SIZE0)
88269 +#define TR_SETEVENT            (0x0 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS)
88270 +#define TR_REMOTEDMA           (0x1 | TR_SIZE4 | TR_SENDACK | TR_LAST_TRANS)
88271 +#define TR_LOCKQUEUE           (0x2 | TR_SIZE0)
88272 +#define TR_UNLOCKQUEUE         (0x3 | TR_SIZE0 | TR_SENDACK | TR_LAST_TRANS)
88273 +
88274 +#define TR_SENDDISCARD         (0x4 | TR_SIZE0)
88275 +#define TR_TRACEROUTE          (0x5 | TR_SIZE4)
88276 +
88277 +#define TR_DMAIDENTIFY         (0x6 | TR_SIZE0)
88278 +#define TR_THREADIDENTIFY      (0x7 | TR_SIZE1)
88279 +
88280 +#define TR_GTE                 (0x8 | TR_SIZE1)
88281 +#define TR_LT                  (0x9 | TR_SIZE1)
88282 +#define TR_EQ                  (0xA | TR_SIZE1)
88283 +#define TR_NEQ                 (0xB | TR_SIZE1)
88284 +
88285 +#define TR_WRITEWORD           (0xC | TR_SIZE1)
88286 +#define TR_WRITEDOUBLEWORD     (0xD | TR_SIZE1)
88287 +#define TR_TESTANDWRITE        (0xE | TR_SIZE1)
88288 +#define TR_ATOMICADDWORD       (0xF | TR_SIZE1 | TR_SENDACK | TR_LAST_TRANS)
88289 +#define TR_OPCODE_TYPE_MASK    0xff
88290 +
88291 +
88292 +#endif /* notdef _ELAN3_TRTYPE_H */
88293 +
88294 +/*
88295 + * Local variables:
88296 + * c-file-style: "stroustrup"
88297 + * End:
88298 + */
88299 diff -urN clean/include/elan3/urom_addrs.h linux-2.6.9/include/elan3/urom_addrs.h
88300 --- clean/include/elan3/urom_addrs.h    1969-12-31 19:00:00.000000000 -0500
88301 +++ linux-2.6.9/include/elan3/urom_addrs.h      2002-07-12 10:28:21.000000000 -0400
88302 @@ -0,0 +1,262 @@
88303 +#define MI_WaitForRemoteDescRead 0x0
88304 +#define MI_WaitForRemoteDescRead2 0x1
88305 +#define MI_WaitForRemoteDescRead2_seq1 0x2
88306 +#define MI_SendRemoteDmaRoutes 0x3
88307 +#define MI_IProcTrapped 0x4
88308 +#define MI_DProcTrapped 0x5
88309 +#define MI_CProcTrapped 0x6
88310 +#define MI_TProcTrapped 0x7
88311 +#define MI_TestWhichDmaQueue 0x8
88312 +#define MI_TestWhichDmaQueue_seq1 0x9
88313 +#define MI_InputRemoteDmaUpdateBPtr 0xa
88314 +#define MI_FixupQueueContextAndRemoteBit 0xb
88315 +#define MI_FixupQueueContextAndRemoteBit_seq1 0xc
88316 +#define MI_FixupQueueContextAndRemoteBit_seq2 0xd
88317 +#define MI_FixupQueueContextAndRemoteBit_seq3 0xe
88318 +#define MI_FixupQueueContextAndRemoteBit_seq4 0xf
88319 +#define MI_RunDmaCommand 0x10
88320 +#define MI_DoSendRemoteDmaDesc 0x11
88321 +#define MI_DequeueNonSysCntxDma 0x12
88322 +#define MI_WaitForRemoteDescRead1 0x13
88323 +#define MI_RemoteDmaCommand 0x14
88324 +#define MI_WaitForRemoteRoutes 0x15
88325 +#define MI_DequeueSysCntxDma 0x16
88326 +#define MI_ExecuteDmaDescriptorForQueue 0x17
88327 +#define MI_ExecuteDmaDescriptor1 0x18
88328 +#define MI_ExecuteDmaDescriptor1_seq1 0x19
88329 +#define MI_ExecuteDmaDescriptor1_seq2 0x1a
88330 +#define MI_ExecuteDmaDescriptor1_seq3 0x1b
88331 +#define MI_GetNewSizeInProg 0x1c
88332 +#define MI_GetNewSizeInProg_seq1 0x1d
88333 +#define MI_FirstBlockRead 0x1e
88334 +#define MI_ExtraFirstBlockRead 0x1f
88335 +#define MI_UnimplementedError 0x20
88336 +#define MI_UpdateDescriptor 0x21
88337 +#define MI_UpdateDescriptor_seq1 0x22
88338 +#define MI_UpdateDescriptor_seq2 0x23
88339 +#define MI_UpdateDescriptor_seq3 0x24
88340 +#define MI_UpdateDescriptor_seq4 0x25
88341 +#define MI_UpdateDescriptor_seq5 0x26
88342 +#define MI_GetNextSizeInProg 0x27
88343 +#define MI_DoStopThisDma 0x28
88344 +#define MI_DoStopThisDma_seq1 0x29
88345 +#define MI_GenNewBytesToRead 0x2a
88346 +#define MI_WaitForEventReadTy1 0x2b
88347 +#define MI_WaitUpdateEvent 0x2c
88348 +#define MI_WaitUpdateEvent_seq1 0x2d
88349 +#define MI_DoSleepOneTickThenRunable 0x2e
88350 +#define MI_RunEvent 0x2f
88351 +#define MI_EnqueueThread 0x30
88352 +#define MI_CheckContext0 0x31
88353 +#define MI_EnqueueDma 0x32
88354 +#define MI_CprocTrapping 0x33
88355 +#define MI_CprocTrapping_seq1 0x34
88356 +#define MI_WaitForRemoteRoutes1 0x35
88357 +#define MI_SetEventCommand 0x36
88358 +#define MI_DoSetEvent 0x37
88359 +#define MI_DoRemoteSetEventNowOrTrapQueueingDma 0x38
88360 +#define MI_DoRemoteSetEventNowOrTrapQueueingDma_seq1 0x39
88361 +#define MI_SendRemoteDmaRoutes2 0x3a
88362 +#define MI_WaitForRemoteRoutes2 0x3b
88363 +#define MI_WaitEventCommandTy0 0x3c
88364 +#define MI_DequeueNonSysCntxDma2 0x3d
88365 +#define MI_WaitEventCommandTy1 0x3e
88366 +#define MI_WaitEventCommandTy1_seq1 0x3f
88367 +#define MI_DequeueNonSysCntxThread 0x40
88368 +#define MI_DequeueSysCntxDma1 0x41
88369 +#define MI_DequeueSysCntxThread 0x42
88370 +#define MI_TestNonSysCntxDmaQueueEmpty 0x43
88371 +#define MI_TestNonSysCntxDmaQueueEmpty_seq1 0x44
88372 +#define MI_TestNonSysCntxDmaQueueEmpty_seq2 0x45
88373 +#define MI_RunThreadCommand 0x46
88374 +#define MI_SetEventWaitForLastAcess 0x47
88375 +#define MI_SetEventReadWait 0x48
88376 +#define MI_SetEventReadWait_seq1 0x49
88377 +#define MI_TestEventType 0x4a
88378 +#define MI_TestEventType_seq1 0x4b
88379 +#define MI_TestEventBit2 0x4c
88380 +#define MI_DmaDescOrBlockCopyOrChainedEvent 0x4d
88381 +#define MI_RunThread 0x4e
88382 +#define MI_RunThread1 0x4f
88383 +#define MI_RunThread1_seq1 0x50
88384 +#define MI_IncDmaSysCntxBPtr 0x51
88385 +#define MI_IncDmaSysCntxBPtr_seq1 0x52
88386 +#define MI_IncDmaSysCntxBPtr_seq2 0x53
88387 +#define MI_WaitForCntxDmaDescRead 0x54
88388 +#define MI_FillInContext 0x55
88389 +#define MI_FillInContext_seq1 0x56
88390 +#define MI_WriteNewDescToQueue 0x57
88391 +#define MI_WriteNewDescToQueue_seq1 0x58
88392 +#define MI_TestForQueueWrap 0x59
88393 +#define MI_TestForQueueWrap_seq1 0x5a
88394 +#define MI_TestQueueIsFull 0x5b
88395 +#define MI_TestQueueIsFull_seq1 0x5c
88396 +#define MI_TestQueueIsFull_seq2 0x5d
88397 +#define MI_CheckPsychoShitFixup 0x5e
88398 +#define MI_PsychoShitFixupForcedRead 0x5f
88399 +#define MI_PrepareDMATimeSlice 0x60
88400 +#define MI_PrepareDMATimeSlice_seq1 0x61
88401 +#define MI_TProcRestartFromTrapOrTestEventBit2 0x62
88402 +#define MI_TProcRestartFromTrapOrTestEventBit2_seq1 0x63
88403 +#define MI_WaitForGlobalsRead 0x64
88404 +#define MI_WaitForNPCRead 0x65
88405 +#define MI_EventInterrupt 0x66
88406 +#define MI_EventInterrupt_seq1 0x67
88407 +#define MI_EventInterrupt_seq2 0x68
88408 +#define MI_EventInterrupt_seq3 0x69
88409 +#define MI_TestSysCntxDmaQueueEmpty 0x6a
88410 +#define MI_TestSysCntxDmaQueueEmpty_seq1 0x6b
88411 +#define MI_TestIfRemoteDesc 0x6c
88412 +#define MI_DoDmaLocalSetEvent 0x6d
88413 +#define MI_DoDmaLocalSetEvent_seq1 0x6e
88414 +#define MI_DoDmaLocalSetEvent_seq2 0x6f
88415 +#define MI_DmaLoop1 0x70
88416 +#define MI_ExitDmaLoop 0x71
88417 +#define MI_ExitDmaLoop_seq1 0x72
88418 +#define MI_RemoteDmaTestPAckType 0x73
88419 +#define MI_PacketDiscardOrTestFailRecIfCCis0 0x74
88420 +#define MI_PacketDiscardOrTestFailRecIfCCis0_seq1 0x75
88421 +#define MI_TestNackFailIsZero2 0x76
88422 +#define MI_TestNackFailIsZero3 0x77
88423 +#define MI_DmaFailCountError 0x78
88424 +#define MI_TestDmaForSysCntx 0x79
88425 +#define MI_TestDmaForSysCntx_seq1 0x7a
88426 +#define MI_TestDmaForSysCntx_seq2 0x7b
88427 +#define MI_TestAeqB2 0x7c
88428 +#define MI_TestAeqB2_seq1 0x7d
88429 +#define MI_GetNextDmaDescriptor 0x7e
88430 +#define MI_DequeueSysCntxDma2 0x7f
88431 +#define MI_InputSetEvent 0x80
88432 +#define MI_PutBackSysCntxDma 0x81
88433 +#define MI_PutBackSysCntxDma_seq1 0x82
88434 +#define MI_PutBackSysCntxDma_seq2 0x83
88435 +#define MI_InputRemoteDma 0x84
88436 +#define MI_InputRemoteDma_seq1 0x85
88437 +#define MI_WaitOneTickForWakeup1 0x86
88438 +#define MI_SendRemoteDmaDesc 0x87
88439 +#define MI_InputLockQueue 0x88
88440 +#define MI_CloseTheTrappedPacketIfCCis1 0x89
88441 +#define MI_CloseTheTrappedPacketIfCCis1_seq1 0x8a
88442 +#define MI_PostDmaInterrupt 0x8b
88443 +#define MI_InputUnLockQueue 0x8c
88444 +#define MI_WaitForUnLockDescRead 0x8d
88445 +#define MI_SendEOPforRemoteDma 0x8e
88446 +#define MI_LookAtRemoteAck 0x8f
88447 +#define MI_InputWriteBlockQueue 0x90
88448 +#define MI_WaitForSpStore 0x91
88449 +#define MI_TProcNext 0x92
88450 +#define MI_TProcStoppedRunning 0x93
88451 +#define MI_InputWriteBlock 0x94
88452 +#define MI_RunDmaOrDeqNonSysCntxDma 0x95
88453 +#define MI_ExecuteDmaDescriptorForRun 0x96
88454 +#define MI_ConfirmQueueLock 0x97
88455 +#define MI_DmaInputIdentify 0x98
88456 +#define MI_TProcStoppedRunning2 0x99
88457 +#define MI_TProcStoppedRunning2_seq1 0x9a
88458 +#define MI_TProcStoppedRunning2_seq2 0x9b
88459 +#define MI_ThreadInputIdentify 0x9c
88460 +#define MI_InputIdWriteAddrAndType3 0x9d
88461 +#define MI_IProcTrappedWriteStatus 0x9e
88462 +#define MI_FinishTrappingEop 0x9f
88463 +#define MI_InputTestTrans 0xa0
88464 +#define MI_TestAeqB3 0xa1
88465 +#define MI_ThreadUpdateNonSysCntxBack 0xa2
88466 +#define MI_ThreadQueueOverflow 0xa3
88467 +#define MI_RunContext0Thread 0xa4
88468 +#define MI_RunContext0Thread_seq1 0xa5
88469 +#define MI_RunContext0Thread_seq2 0xa6
88470 +#define MI_RunDmaDesc 0xa7
88471 +#define MI_RunDmaDesc_seq1 0xa8
88472 +#define MI_RunDmaDesc_seq2 0xa9
88473 +#define MI_TestAeqB 0xaa
88474 +#define MI_WaitForNonCntxDmaDescRead 0xab
88475 +#define MI_DmaQueueOverflow 0xac
88476 +#define MI_BlockCopyEvent 0xad
88477 +#define MI_BlockCopyEventReadBlock 0xae
88478 +#define MI_BlockCopyWaitForReadData 0xaf
88479 +#define MI_InputWriteWord 0xb0
88480 +#define MI_TraceSetEvents 0xb1
88481 +#define MI_TraceSetEvents_seq1 0xb2
88482 +#define MI_TraceSetEvents_seq2 0xb3
88483 +#define MI_InputWriteDoubleWd 0xb4
88484 +#define MI_SendLockTransIfCCis1 0xb5
88485 +#define MI_WaitForDmaRoutes1 0xb6
88486 +#define MI_LoadDmaContext 0xb7
88487 +#define MI_InputTestAndSetWord 0xb8
88488 +#define MI_InputTestAndSetWord_seq1 0xb9
88489 +#define MI_GetDestEventValue 0xba
88490 +#define MI_SendDmaIdentify 0xbb
88491 +#define MI_InputAtomicAddWord 0xbc
88492 +#define MI_LoadBFromTransD0 0xbd
88493 +#define MI_ConditionalWriteBackCCTrue 0xbe
88494 +#define MI_WaitOneTickForWakeup 0xbf
88495 +#define MI_SendFinalUnlockTrans 0xc0
88496 +#define MI_SendDmaEOP 0xc1
88497 +#define MI_GenLastAddrForPsycho 0xc2
88498 +#define MI_FailedAckIfCCis0 0xc3
88499 +#define MI_FailedAckIfCCis0_seq1 0xc4
88500 +#define MI_WriteDmaSysCntxDesc 0xc5
88501 +#define MI_TimesliceDmaQueueOverflow 0xc6
88502 +#define MI_DequeueNonSysCntxThread1 0xc7
88503 +#define MI_DequeueNonSysCntxThread1_seq1 0xc8
88504 +#define MI_TestThreadQueueEmpty 0xc9
88505 +#define MI_ClearThreadQueueIfCC 0xca
88506 +#define MI_DequeueSysCntxThread1 0xcb
88507 +#define MI_DequeueSysCntxThread1_seq1 0xcc
88508 +#define MI_TProcStartUpGeneric 0xcd
88509 +#define MI_WaitForPCload2 0xce
88510 +#define MI_WaitForNPCWrite 0xcf
88511 +#define MI_WaitForEventWaitAddr 0xd0
88512 +#define MI_WaitForWaitEventAccess 0xd1
88513 +#define MI_WaitForWaitEventAccess_seq1 0xd2
88514 +#define MI_WaitForWaitEventDesc 0xd3
88515 +#define MI_WaitForEventReadTy0 0xd4
88516 +#define MI_SendCondTestFail 0xd5
88517 +#define MI_InputMoveToNextTrans 0xd6
88518 +#define MI_ThreadUpdateSysCntxBack 0xd7
88519 +#define MI_FinishedSetEvent 0xd8
88520 +#define MI_EventIntUpdateBPtr 0xd9
88521 +#define MI_EventQueueOverflow 0xda
88522 +#define MI_MaskLowerSource 0xdb
88523 +#define MI_DmaLoop 0xdc
88524 +#define MI_SendNullSetEvent 0xdd
88525 +#define MI_SendFinalSetEvent 0xde
88526 +#define MI_TestNackFailIsZero1 0xdf
88527 +#define MI_DmaPacketTimedOutOrPacketError 0xe0
88528 +#define MI_NextPacketIsLast 0xe1
88529 +#define MI_TestForZeroLengthDma 0xe2
88530 +#define MI_WaitForPCload 0xe3
88531 +#define MI_ReadInIns 0xe4
88532 +#define MI_WaitForInsRead 0xe5
88533 +#define MI_WaitForLocals 0xe6
88534 +#define MI_WaitForOutsWrite 0xe7
88535 +#define MI_WaitForWaitEvWrBack 0xe8
88536 +#define MI_WaitForLockRead 0xe9
88537 +#define MI_TestQueueLock 0xea
88538 +#define MI_InputIdWriteAddrAndType 0xeb
88539 +#define MI_InputIdWriteAddrAndType2 0xec
88540 +#define MI_ThreadInputIdentify2 0xed
88541 +#define MI_WriteIntoTrapArea0 0xee
88542 +#define MI_GenQueueBlockWrAddr 0xef
88543 +#define MI_InputDiscardFreeLock 0xf0
88544 +#define MI_WriteIntoTrapArea1 0xf1
88545 +#define MI_WriteIntoTrapArea2 0xf2
88546 +#define MI_ResetBPtrToBase 0xf3
88547 +#define MI_InputDoTrap 0xf4
88548 +#define MI_RemoteDmaCntxt0Update 0xf5
88549 +#define MI_ClearQueueLock 0xf6
88550 +#define MI_IProcTrappedBlockWriteData 0xf7
88551 +#define MI_FillContextFilter 0xf8
88552 +#define MI_IProcTrapped4 0xf9
88553 +#define MI_RunSysCntxDma 0xfa
88554 +#define MI_ChainedEventError 0xfb
88555 +#define MI_InputTrappingEOP 0xfc
88556 +#define MI_CheckForRunIfZero 0xfd
88557 +#define MI_TestForBreakOrSuspend 0xfe
88558 +#define MI_SwapForRunable 0xff
88559 +
88560 +/*
88561 + * Local variables:
88562 + * c-file-style: "stroustrup"
88563 + * End:
88564 + */
88565 diff -urN clean/include/elan3/vmseg.h linux-2.6.9/include/elan3/vmseg.h
88566 --- clean/include/elan3/vmseg.h 1969-12-31 19:00:00.000000000 -0500
88567 +++ linux-2.6.9/include/elan3/vmseg.h   2003-09-24 09:57:24.000000000 -0400
88568 @@ -0,0 +1,75 @@
88569 +/*
88570 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88571 + *
88572 + *    For licensing information please see the supplied COPYING file
88573 + *
88574 + */
88575 +
88576 +#ifndef _VM_SEG_ELAN3_H
88577 +#define _VM_SEG_ELAN3_H
88578 +
88579 +#ident "$Id: vmseg.h,v 1.20 2003/09/24 13:57:24 david Exp $"
88580 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vmseg.h,v $*/
88581 +
88582 +#include <elan3/elanuregs.h>
88583 +
88584 +/*
88585 + * This segment maps Elan registers,  it is fixed size and has 8K 
88586 + * pages split up as follows
88587 + *
88588 + *    ----------------------------------------
88589 + *    |    Performance Counters (read-only)  |
88590 + *    ----------------------------------------
88591 + *    |    Flag Page (read-only)            |
88592 + *    ----------------------------------------
88593 + *    |    Command Port                             |
88594 + *    ----------------------------------------
88595 + */
88596 +typedef volatile struct elan3_flagstats 
88597 +{
88598 +    u_int      CommandFlag;
88599 +    u_int      PageFaults;
88600 +    u_int      CProcTraps;
88601 +    u_int      DProcTraps;
88602 +    u_int      TProcTraps;
88603 +    u_int      IProcTraps;
88604 +    u_int      EopBadAcks;
88605 +    u_int      EopResets;
88606 +    u_int      DmaNetworkErrors;
88607 +    u_int      DmaIdentifyNetworkErrors;
88608 +    u_int      ThreadIdentifyNetworkErrors;
88609 +    u_int      DmaRetries;
88610 +    u_int      ThreadSystemCalls;
88611 +    u_int      ThreadElanCalls;
88612 +    u_int      LoadVirtualProcess;
88613 +} ELAN3_FLAGSTATS;
88614 +
88615 +#ifdef DIGITAL_UNIX
88616 +typedef volatile union elan3_flagpage
88617 +{
88618 +    u_char        Padding[8192];
88619 +    ELAN3_FLAGSTATS Stats;
88620 +} ELAN3_FLAGPAGE;
88621 +
88622 +typedef volatile struct elan3_vmseg
88623 +{
88624 +    E3_CommandPort CommandPort;
88625 +    ELAN3_FLAGPAGE  FlagPage;
88626 +    E3_User_Regs   UserRegs;
88627 +} ELAN3_VMSEG;
88628 +
88629 +#define SEGELAN3_SIZE   (sizeof (ELAN3_VMSEG))
88630 +
88631 +#define SEGELAN3_COMMAND_PORT  0
88632 +#define SEGELAN3_FLAG_PAGE     1
88633 +#define SEGELAN3_PERF_COUNTERS 2
88634 +
88635 +#endif /* DIGITAL_UNIX */
88636 +
88637 +#endif /* _VM_SEG_ELAN3_H */
88638 +
88639 +/*
88640 + * Local variables:
88641 + * c-file-style: "stroustrup"
88642 + * End:
88643 + */
88644 diff -urN clean/include/elan3/vpd.h linux-2.6.9/include/elan3/vpd.h
88645 --- clean/include/elan3/vpd.h   1969-12-31 19:00:00.000000000 -0500
88646 +++ linux-2.6.9/include/elan3/vpd.h     2002-08-09 07:23:34.000000000 -0400
88647 @@ -0,0 +1,47 @@
88648 +/*
88649 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88650 + *
88651 + *    For licensing information please see the supplied COPYING file
88652 + *
88653 + */
88654 +
88655 +#ident "$Id: vpd.h,v 1.5 2002/08/09 11:23:34 addy Exp $"
88656 +/*      $Source: /cvs/master/quadrics/elan3mod/elan3/elan3/vpd.h,v $*/
88657 +
88658 +#ifndef __ELAN3_VPD_H
88659 +#define __ELAN3_VPD_H
88660 +
88661 +#define LARGE_RESOURCE_BIT                     0x80
88662 +
88663 +#define SMALL_RESOURCE_COMPATIBLE_DEVICE_ID    0x3
88664 +#define SMALL_RESOURCE_VENDOR_DEFINED          0xE
88665 +#define SMALL_RESOURCE_END_TAG                 0xF
88666 +
88667 +#define LARGE_RESOURCE_STRING                  0x2
88668 +#define LARGE_RESOURCE_VENDOR_DEFINED          0x4
88669 +#define LARGE_RESOURCE_VITAL_PRODUCT_DATA      0x10
88670 +
88671 +#define VPD_PART_NUMBER                        "PN"
88672 +#define VPD_FRU_PART_NUMBER            "FN"
88673 +#define VPD_EC_LEVEL                   "EC"
88674 +#define VPD_MANUFACTURE_ID             "MN"
88675 +#define VPD_SERIAL_NUMBER              "SN"
88676 +
88677 +#define VPD_LOAD_ID                    "LI"
88678 +#define VPD_ROM_LEVEL                  "RL"
88679 +#define VPD_ALTERABLE_ROM_LEVEL                "RM"
88680 +#define VPD_NETWORK_ADDRESS            "NA"
88681 +#define VPD_DEVICE_DRIVER_LEVEL                "DD"
88682 +#define VPD_DIAGNOSTIC_LEVEL           "DG"
88683 +#define VPD_LOADABLE_MICROCODE_LEVEL   "LL"
88684 +#define VPD_VENDOR_ID                  "VI"
88685 +#define VPD_FUNCTION_NUMBER            "FU"
88686 +#define VPD_SUBSYSTEM_VENDOR_ID                "SI"
88687 +
88688 +#endif /* __ELAN3_VPD_H */
88689 +
88690 +/*
88691 + * Local variables:
88692 + * c-file-style: "stroustrup"
88693 + * End:
88694 + */
88695 diff -urN clean/include/elan4/commands.h linux-2.6.9/include/elan4/commands.h
88696 --- clean/include/elan4/commands.h      1969-12-31 19:00:00.000000000 -0500
88697 +++ linux-2.6.9/include/elan4/commands.h        2004-06-16 11:45:02.000000000 -0400
88698 @@ -0,0 +1,247 @@
88699 +/*
88700 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
88701 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
88702 + *
88703 + *    For licensing information please see the supplied COPYING file
88704 + *
88705 + */
88706 +
88707 +#ifndef __ELAN4_COMMANDS_H
88708 +#define __ELAN4_COMMANDS_H
88709 +
88710 +#ident "$Id: commands.h,v 1.29 2004/06/16 15:45:02 addy Exp $"
88711 +/*      $Source: /cvs/master/quadrics/elan4hdr/commands.h,v $*/
88712 +
88713 +/*
88714 + * This header file describes the command format for the Elan 4
88715 + *    See CommandFormat.doc
88716 + */
88717 +
88718 +/*
88719 + * Number of channels in traced elanlib_trace.c
88720 + */
88721 +#define TRACE_MAX_CHANNELS     2
88722 +
88723 +/*
88724 + * Define encoding for the commands issued into the command queues
88725 + */
88726 +#define RUN_THREAD_CMD       0x00
88727 +#define OPEN_STEN_PKT_CMD    0x01
88728 +#define WRITE_DWORD_CMD      0x02
88729 +#define ADD_DWORD_CMD        0x03
88730 +#define COPY64_CMD           0x05
88731 +#define GUARD_CMD            0x06
88732 +#define SET_EVENT_CMD        0x07
88733 +#define SEND_TRANS_CMD       0x09
88734 +#define INTERRUPT_CMD        0x0d
88735 +#define RUN_DMA_CMD          0x0e
88736 +#define SET_EVENTN_CMD       0x0f
88737 +#define NOP_CMD                     0x17
88738 +#define MAKE_EXT_CLEAN_CMD   0x37
88739 +#define WAIT_EVENT_CMD       0x1f
88740 +
88741 +/*
88742 + * Define the portion of the data word the user is NOT
88743 + * allowed to use. This varies with Commmand type
88744 + */
88745 +#define RUN_THREAD_CMD_MASK       0x03
88746 +#define OPEN_STEN_PKT_CMD_MASK    0x0f
88747 +#define WRITE_DWORD_CMD_MASK      0x07
88748 +#define ADD_DWORD_CMD_MASK        0x07
88749 +#define COPY64_CMD_MASK           0x0f
88750 +#define GUARD_CMD_MASK            0x0f
88751 +#define SET_EVENT_CMD_MASK        0x1f
88752 +#define SEND_TRANS_CMD_MASK       0x1f
88753 +#define INTERRUPT_CMD_MASK        0x0f
88754 +#define RUN_DMA_CMD_MASK          0x0f
88755 +#define SET_EVENTN_CMD_MASK       0x1f
88756 +#define NOP_CMD_MASK             0x3f
88757 +#define MAKE_EXT_CLEAN_MASK      0x3f
88758 +#define WAIT_EVENT_CMD_MASK       0x1f
88759 +
88760 +#define COPY64_DATA_TYPE_SHIFT    0x4
88761 +#define COPY64_DTYPE_BYTE        (0 << COPY64_DATA_TYPE_SHIFT)
88762 +#define COPY64_DTYPE_SHORT       (1 << COPY64_DATA_TYPE_SHIFT)
88763 +#define COPY64_DTYPE_WORD        (2 << COPY64_DATA_TYPE_SHIFT)
88764 +#define COPY64_DTYPE_LONG        (3 << COPY64_DATA_TYPE_SHIFT)
88765 +
88766 +/*
88767 + * SET_EVENTN - word 1 has following form
88768 + * [63:5]      Event Address
88769 + * [4:0]       Part Set Value.
88770 + */
88771 +#define SET_EVENT_PART_SET_MASK      0x1f
88772 +
88773 +/* OPEN_STEN_PKT_CMD 
88774 + *   [63:32]   Vproc
88775 + *   [31]      Use Test
88776 + *   [30:28]   unused
88777 + *   [27:21]   Test Acceptable PAck code
88778 + *   [20:16]   Test Ack Channel Number
88779 + *   [15:9]    Acceptable PAck code
88780 + *   [8:4]     Ack Channel Number (1 bit on Elan4)
88781 + *   [3:0]     Command type
88782 + */
88783 +/* Acceptable PAck code */
88784 +#define PACK_OK                        (1 << 0)
88785 +#define PACK_TESTFAIL          (1 << 1)
88786 +#define PACK_DISCARD           (1 << 2)
88787 +#define RESTART_COUNT_ZERO     (1 << 3)
88788 +#define PACK_ERROR             (1 << 7)
88789 +#define PACK_TIMEOUT           (1 << 8)
88790 +
88791 +/*
88792 + *#ifndef USE_DIRTY_COMMANDS
88793 + *#define USE_DIRTY_COMMANDS
88794 + *#endif
88795 + */
88796 +#ifdef USE_DIRTY_COMMANDS
88797 +#define OPEN_PACKET_USED_MASK    0x00000000780f00e0ULL
88798 +#define SEND_TRANS_USED_MASK     0xffffffff0000fff0ULL
88799 +#define COPY64_WRITE_USED_MASK   0x000000000000000fULL
88800 +#define MAIN_INT_USED_MASK       0x0000000000003ff0ULL
88801 +#define GUARD_USED_MASK          0xfffffe007000fde0ULL
88802 +#define DMA_TYPESIZE_USED_MASK   0x000000000000fff0ULL
88803 +#define SETEVENTN_USED_MASK      0xffffffffffffffe0ULL
88804 +#define NOP_USED_MASK            0xffffffffffffffc0ULL
88805 +#define EXT_CLEAN_USED_MASK      0xffffffffffffffc0ULL
88806 +#define WAIT_CNT_TYPE_USED_MASK  0x00000000fffff800ULL
88807 +#else
88808 +#define OPEN_PACKET_USED_MASK    0x0ULL
88809 +#define SEND_TRANS_USED_MASK     0x0ULL
88810 +#define COPY64_WRITE_USED_MASK   0x0ULL
88811 +#define MAIN_INT_USED_MASK       0x0ULL
88812 +#define GUARD_USED_MASK          0x0ULL
88813 +#define DMA_TYPESIZE_USED_MASK   0x0ULL
88814 +#define SETEVENTN_USED_MASK      0x0ULL
88815 +#define NOP_USED_MASK            0x0ULL
88816 +#define EXT_CLEAN_USED_MASK      0x0ULL
88817 +#define WAIT_CNT_TYPE_USED_MASK  0x0ULL
88818 +#endif
88819 +
88820 +#define OPEN_PACKET(chan, code, vproc) \
88821 +       ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | OPEN_STEN_PKT_CMD)
88822 +
88823 +#define OPEN_PACKET_TEST(chan, code, vproc, tchan, tcode) \
88824 +       ((((chan) & 1) << 4) | (((code) & 0x7f) << 9) | ((E4_uint64)(vproc) << 32) | \
88825 +        (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | (((E4_uint64) 1) << 31) | OPEN_STEN_PKT_CMD)
88826 +
88827 +/*
88828 + * GUARD_CMD
88829 + *   [63:41]   unused
88830 + *   [40]      Reset Restart Fail Count        // only performed if the Guard executes the next command.
88831 + *   [39:32]   New Restart Fail Count value
88832 + *   [31]      Use Test
88833 + *   [30:28]   unused
88834 + *   [27:21]   Test Acceptable PAck code
88835 + *   [20:16]   Test Ack Channel Number
88836 + *   [15:9]    unused
88837 + *   [8:4]     Ack Channel Number
88838 + *   [3:0]     Command type
88839 + */
88840 +/* GUARD_CHANNEL(chan)
88841 + */
88842 +#define GUARD_ALL_CHANNELS     ((1 << 9) | GUARD_CMD)
88843 +#define GUARD_CHANNEL(chan)    ((((chan) & 1) << 4) | GUARD_CMD)
88844 +#define GUARD_TEST(chan,code)  ((1ull << 31) | (((code) & 0x7f) << 21) | (((chan) & 1) << 16))
88845 +#define GUARD_RESET(count)     ((1ull << 40) | ((((E4_uint64) count) & 0xff) << 32))
88846 +
88847 +#define GUARD_CHANNEL_TEST(chan,tchan,tcode) \
88848 +       ((((chan) & 1) << 4) | (((tchan) & 1) << 16) | (((tcode) & 0x7f) << 21) | \
88849 +        (((E4_uint64) 1) << 31) | GUARD_CMD)
88850 +
88851 +/*
88852 + * SEND_TRANS_CMD
88853 + * [63:32]     unused
88854 + * [31:16]     transaction type
88855 + * [15:4]      unused
88856 + * [3:0]       Command type
88857 + */
88858 +#define SEND_TRANS(TransType)  (((TransType) << 16) | SEND_TRANS_CMD)
88859 +
88860 +/*
88861 + * Command port trace debug levels
88862 + */
88863 +#define TRACE_CMD_BUFFER       0x01
88864 +#define TRACE_CMD_TYPE         0x02
88865 +#define TRACE_CHANNEL_OPENS    0x04
88866 +#define TRACE_GUARDED_ATOMICS  0x08
88867 +#define TRACE_CMD_TIMEOUT      0x10
88868 +
88869 +/*
88870 + * Commands that should be preceeded by a GUARD_CMD.
88871 + */
88872 +#define IS_ATOMIC_CMD(cmd)                                                             \
88873 +   ((cmd) == RUN_THREAD_CMD || (cmd) == ADD_DWORD_CMD || (cmd) == INTERRUPT_CMD ||     \
88874 +    (cmd) == RUN_DMA_CMD    || (cmd) == SET_EVENT_CMD || (cmd) == SET_EVENTN_CMD ||    \
88875 +    (cmd) == WAIT_EVENT_CMD)
88876 +
88877 +#ifndef _ASM
88878 +
88879 +/*
88880 + * These structures are used to build event copy command streams. They are intended to be included
88881 + * in a larger structure to form a self documenting command sequence that can be easily coped and manipulated.
88882 + */
88883 +
88884 +typedef struct e4_runthreadcmd
88885 +{
88886 +   E4_Addr     PC;
88887 +   E4_uint64   r[6];
88888 +} E4_RunThreadCmd;
88889 +
88890 +typedef E4_uint64 E4_OpenCmd;
88891 +
88892 +typedef struct e4_writecmd
88893 +{
88894 +   E4_Addr     WriteAddr;
88895 +   E4_uint64   WriteValue;
88896 +} E4_WriteCmd;
88897 +
88898 +typedef struct e4_addcmd
88899 +{
88900 +   E4_Addr     AddAddr;
88901 +   E4_uint64   AddValue;
88902 +} E4_AddCmd;
88903 +
88904 +typedef struct e4_copycmd
88905 +{
88906 +   E4_Addr     SrcAddr;
88907 +   E4_Addr     DstAddr;
88908 +} E4_CopyCmd;
88909 +
88910 +typedef E4_uint64 E4_GaurdCmd;
88911 +typedef E4_uint64 E4_SetEventCmd;
88912 +
88913 +/*
88914 + * The data to this command must be declared as a vector after the use of this.
88915 + */
88916 +typedef struct e4_sendtranscmd
88917 +{
88918 +   E4_Addr     Type;
88919 +   E4_Addr     Addr;
88920 +} E4_SendTransCmd;
88921 +
88922 +typedef E4_uint64 E4_IntCmd;
88923 +
88924 +/* The normal Dma struc can be used here. */
88925 +
88926 +typedef struct e4_seteventncmd
88927 +{
88928 +   E4_Addr     Event;
88929 +   E4_Addr     SetCount;
88930 +} E4_SetEventNCmd;
88931 +
88932 +typedef E4_uint64 E4_NopCmd;
88933 +typedef E4_uint64 E4_MakeExtCleanCmd;
88934 +
88935 +typedef struct e4_waitcmd
88936 +{
88937 +   E4_Addr     ev_Event;
88938 +   E4_Addr     ev_CountType;
88939 +   E4_Addr     ev_Params[2];
88940 +} E4_WaitCmd;
88941 +
88942 +#endif /* _ASM */
88943 +
88944 +#endif /* __ELAN4_COMMANDS_H  */
88945 +
88946 diff -urN clean/include/elan4/debug.h linux-2.6.9/include/elan4/debug.h
88947 --- clean/include/elan4/debug.h 1969-12-31 19:00:00.000000000 -0500
88948 +++ linux-2.6.9/include/elan4/debug.h   2005-03-23 06:06:15.000000000 -0500
88949 @@ -0,0 +1,112 @@
88950 +/*
88951 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
88952 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
88953 + * 
88954 + *    For licensing information please see the supplied COPYING file
88955 + *
88956 + */
88957 +
88958 +#ifndef _ELAN4_DEBUG_H
88959 +#define _ELAN4_DEBUG_H
88960 +
88961 +#ident "$Id: debug.h,v 1.21 2005/03/23 11:06:15 david Exp $"
88962 +/*      $Source: /cvs/master/quadrics/elan4mod/debug.h,v $ */
88963 +
88964 +/* values for "type" field - note a "ctxt" is permissible */
88965 +/* and BUFFER/CONSOLE are for explict calls to elan4_debugf() */
88966 +#define DBG_DEVICE     ((void *) 0)
88967 +#define DBG_USER       ((void *) 1)
88968 +
88969 +#define DBG_BUFFER     ((void *) 62)
88970 +#define DBG_CONSOLE    ((void *) 63)
88971 +#define DBG_NTYPES     64
88972 +
88973 +/* values for "mode" field */
88974 +#define DBG_CONFIG     0x00000001
88975 +#define DBG_INTR       0x00000002
88976 +#define DBG_MAININT    0x00000004
88977 +#define DBG_SDRAM      0x00000008
88978 +#define DBG_MMU                0x00000010
88979 +#define DBG_REGISTER   0x00000020
88980 +#define DBG_CQ         0x00000040
88981 +#define DBG_NETWORK_CTX        0x00000080
88982 +
88983 +#define DBG_FLUSH      0x00000100
88984 +#define DBG_FILE       0x00000200
88985 +#define DBG_CONTROL    0x00000400
88986 +#define DBG_MEM                0x00000800
88987 +
88988 +#define DBG_PERM       0x00001000
88989 +#define DBG_FAULT      0x00002000
88990 +#define DBG_SWAP       0x00004000
88991 +#define DBG_TRAP       0x00008000
88992 +#define DBG_DDCQ       0x00010000
88993 +#define DBG_VP         0x00020000
88994 +#define DBG_RESTART    0x00040000
88995 +#define DBG_RESUME     0x00080000
88996 +#define DBG_CPROC      0x00100000
88997 +#define DBG_DPROC      0x00200000
88998 +#define DBG_EPROC      0x00400000
88999 +#define DBG_IPROC      0x00800000
89000 +#define DBG_TPROC      0x01000000
89001 +#define DBG_IOPROC     0x02000000
89002 +#define DBG_ROUTE      0x04000000
89003 +#define DBG_NETERR     0x08000000
89004 +
89005 +#define DBG_ALL                0x7FFFFFFF
89006 +
89007 +
89008 +#ifdef DEBUG_PRINTF
89009 +
89010 +#  define PRINTF0(type,m,fmt)                  ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt)                  : (void)0)
89011 +#  define PRINTF1(type,m,fmt,a)                        ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a)                : (void)0)
89012 +#  define PRINTF2(type,m,fmt,a,b)              ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b)              : (void)0)
89013 +#  define PRINTF3(type,m,fmt,a,b,c)            ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c)            : (void)0)
89014 +#  define PRINTF4(type,m,fmt,a,b,c,d)          ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d)          : (void)0)
89015 +#  define PRINTF5(type,m,fmt,a,b,c,d,e)                ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e)        : (void)0)
89016 +#  define PRINTF6(type,m,fmt,a,b,c,d,e,f)      ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f)      : (void)0)
89017 +#  define PRINTF7(type,m,fmt,a,b,c,d,e,f,g)    ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g)    : (void)0)
89018 +#  define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h)  ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h)  : (void)0)
89019 +#  define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i)        ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m,fmt,a,b,c,d,e,f,g,h,i): (void)0)
89020 +#ifdef __GNUC__
89021 +#  define PRINTF(type,m,args...)               ((elan4_debug&(m) || (type) == DBG_CONSOLE) ? elan4_debugf(type,m, ##args)              : (void)0)
89022 +#endif
89023 +#  define DBGCMD(type,m,cmd)                   ((elan4_debug&(m) || (type) == DBG_CONSOLE)  ? (void) (cmd) : (void) 0)
89024 +
89025 +#else
89026 +
89027 +#  define PRINTF0(type,m,fmt)                  (0)
89028 +#  define PRINTF1(type,m,fmt,a)                        (0)
89029 +#  define PRINTF2(type,m,fmt,a,b)              (0)
89030 +#  define PRINTF3(type,m,fmt,a,b,c)            (0)
89031 +#  define PRINTF4(type,m,fmt,a,b,c,d)          (0)
89032 +#  define PRINTF5(type,m,fmt,a,b,c,d,e)                (0)
89033 +#  define PRINTF6(type,m,fmt,a,b,c,d,e,f)      (0)
89034 +#  define PRINTF7(type,m,fmt,a,b,c,d,e,f,g)    (0)
89035 +#  define PRINTF8(type,m,fmt,a,b,c,d,e,f,g,h)  (0)
89036 +#  define PRINTF9(type,m,fmt,a,b,c,d,e,f,g,h,i)        (0)
89037 +#ifdef __GNUC__
89038 +#  define PRINTF(type,m,args...)
89039 +#endif
89040 +#  define DBGCMD(type,m,cmd)                   ((void) 0)
89041 +
89042 +#endif /* DEBUG_PRINTF */
89043 +
89044 +extern unsigned   elan4_debug;
89045 +extern unsigned   elan4_debug_toconsole;
89046 +extern unsigned   elan4_debug_tobuffer;
89047 +extern unsigned   elan4_debug_display_ctxt;
89048 +extern unsigned   elan4_debug_ignore_ctxt;
89049 +extern unsigned   elan4_debug_ignore_type;
89050 +
89051 +extern void      elan4_debug_init(void);
89052 +extern void      elan4_debug_fini(void);
89053 +extern void       elan4_debugf (void *type, int mode, char *fmt, ...);
89054 +
89055 +
89056 +/*
89057 + * Local variables:
89058 + * c-file-style: "stroustrup"
89059 + * End:
89060 + */
89061 +#endif /* _ELAN4_DEBUG_H */
89062 diff -urN clean/include/elan4/device.h linux-2.6.9/include/elan4/device.h
89063 --- clean/include/elan4/device.h        1969-12-31 19:00:00.000000000 -0500
89064 +++ linux-2.6.9/include/elan4/device.h  2005-08-09 05:57:04.000000000 -0400
89065 @@ -0,0 +1,888 @@
89066 +/*
89067 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
89068 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89069 + * 
89070 + *    For licensing information please see the supplied COPYING file
89071 + *
89072 + */
89073 +
89074 +#ifndef __ELAN4_ELANDEV_H
89075 +#define __ELAN4_ELANDEV_H
89076 +
89077 +#ident "$Id: device.h,v 1.92.2.2 2005/08/09 09:57:04 mike Exp $"
89078 +/*      $Source: /cvs/master/quadrics/elan4mod/device.h,v $ */
89079 +
89080 +#include <elan/devinfo.h>
89081 +#include <elan/capability.h>
89082 +
89083 +#include <elan4/pci.h>
89084 +#include <elan4/sdram.h>
89085 +#include <elan4/dma.h>
89086 +#include <elan4/events.h>
89087 +#include <elan4/registers.h>
89088 +
89089 +#include <elan4/mmu.h>
89090 +#include <elan4/trap.h>
89091 +#include <elan4/stats.h>
89092 +#include <elan4/neterr.h>
89093 +
89094 +#ifdef CONFIG_MPSAS
89095 +#include <elan4/mpsas.h>
89096 +#endif
89097 +
89098 +#if defined(LINUX)
89099 +#include <elan4/device_Linux.h>
89100 +#elif defined(TRU64UNIX)
89101 +#include <elan4/device_OSF1.h>
89102 +#elif defined(SOLARIS)
89103 +#include <elan4/device_SunOS.h>
89104 +#endif
89105 +
89106 +/*
89107 + * Network context number allocation.
89108 + * [0]          neterr fixup system context
89109 + * [1]          kernel comms system context
89110 + * [2048-4095] kernel comms data contexts
89111 + */
89112 +#define ELAN4_NETERR_CONTEXT_NUM       0x00                    /* network error fixup context number */
89113 +#define ELAN4_KCOMM_CONTEXT_NUM                0x01                    /* kernel comms context number */
89114 +#define ELAN4_KCOMM_BASE_CONTEXT_NUM   0x800                   /* kernel comms data transfer contexts */
89115 +#define ELAN4_KCOMM_TOP_CONTEXT_NUM    0xfff
89116 +
89117 +#define ELAN4_SYSTEM_CONTEXT(ctx)  ((ctx) >= ELAN4_KCOMM_BASE_CONTEXT_NUM)
89118 +
89119 +typedef void (ELAN4_HALTFN)(struct elan4_dev *dev, void *arg);
89120 +
89121 +typedef struct elan4_haltop
89122 +{
89123 +    struct list_head    op_link;                               /* chain on a list */
89124 +    E4_uint32          op_mask;                                /* Interrupt mask to see before calling function */
89125 +    
89126 +    ELAN4_HALTFN       *op_function;                           /* function to call */
89127 +    void              *op_arg;                                 /* arguement to pass to function */
89128 +} ELAN4_HALTOP;
89129 +
89130 +typedef void (ELAN4_DMA_FLUSHFN)(struct elan4_dev *dev, void *arg, int qfull);
89131 +
89132 +typedef struct elan4_dma_flushop
89133 +{
89134 +    struct list_head    op_link;                               /* chain on a list */
89135 +    ELAN4_DMA_FLUSHFN  *op_function;                           /* function to call */
89136 +    void              *op_arg;                                 /* arguement to pass to function */
89137 +} ELAN4_DMA_FLUSHOP;
89138 +
89139 +typedef void (ELAN4_INTFN)(struct elan4_dev *dev, void *arg);
89140 +
89141 +typedef struct elan4_intop
89142 +{
89143 +    struct list_head    op_link;                               /* chain on a list */
89144 +    ELAN4_INTFN        *op_function;                           /* function to call */
89145 +    void              *op_arg;                                 /* arguement to pass to function */
89146 +    E4_uint64          op_cookie;                              /* and main interrupt cookie */
89147 +} ELAN4_INTOP;
89148 +
89149 +typedef struct elan4_eccerrs
89150 +{
89151 +    E4_uint64        EccStatus;
89152 +    E4_uint64        ConfigReg;
89153 +    E4_uint32        ErrorCount;
89154 +} ELAN4_ECCERRS;
89155 +
89156 +#define SDRAM_MIN_BLOCK_SHIFT  10
89157 +#define SDRAM_NUM_FREE_LISTS   19                                      /* allows max 256 Mb block */
89158 +#define SDRAM_MIN_BLOCK_SIZE   (1 << SDRAM_MIN_BLOCK_SHIFT)
89159 +#define SDRAM_MAX_BLOCK_SIZE   (SDRAM_MIN_BLOCK_SIZE << (SDRAM_NUM_FREE_LISTS-1))
89160 +
89161 +#if PAGE_SHIFT < 13
89162 +#define SDRAM_PAGE_SIZE                8192
89163 +#define SDRAM_PGOFF_OFFSET     1
89164 +#define SDRAM_PGOFF_MASK       (~SDRAM_PGOFF_OFFSET)
89165 +#else
89166 +#define SDRAM_PAGE_SIZE                PAGE_SIZE
89167 +#define SDRAM_PGOFF_OFFSET     0
89168 +#define SDRAM_PGOFF_MASK       (~SDRAM_PGOFF_OFFSET)
89169 +#endif
89170 +
89171 +typedef struct elan4_sdram
89172 +{
89173 +    sdramaddr_t        b_base;                                 /* offset in sdram bar */
89174 +    unsigned           b_size;                                 /* size of bank */
89175 +    ioaddr_t           b_ioaddr;                               /* ioaddr where mapped into the kernel */
89176 +    ELAN4_MAP_HANDLE   b_handle;                               /*    and mapping handle */
89177 +    bitmap_t          *b_bitmaps[SDRAM_NUM_FREE_LISTS];        /* buddy allocator bitmaps */
89178 +} ELAN4_SDRAM_BANK;
89179 +
89180 +/* command queue */
89181 +typedef struct elan4_cq 
89182 +{
89183 +    struct elan4_cqa    *cq_cqa;                                       /* command queue allocator this belongs to */
89184 +    unsigned            cq_idx;                                        /*  and which command queue this is */
89185 +
89186 +    sdramaddr_t                 cq_space;                                      /* sdram backing up command queue */
89187 +    unsigned            cq_size;                                       /* size value */
89188 +    unsigned            cq_perm;                                       /* permissions */
89189 +    ioaddr_t            cq_mapping;                                    /* mapping of command queue page */
89190 +    ELAN4_MAP_HANDLE    cq_handle;                                     /*    and mapping handle */
89191 +} ELAN4_CQ;
89192 +
89193 +/* cqtype flags to elan4_alloccq() */
89194 +#define CQ_Priority    (1 << 0)
89195 +#define CQ_Reorder     (1 << 1)
89196 +
89197 +/* command queues are allocated in chunks,so that all the
89198 + * command ports are in a single system page */
89199 +#define ELAN4_CQ_PER_CQA       MAX(1, (PAGESIZE/CQ_CommandMappingSize))
89200 +
89201 +/* maximum number of command queues per context */
89202 +#define ELAN4_MAX_CQA          (256 / ELAN4_CQ_PER_CQA)
89203 +
89204 +typedef struct elan4_cqa
89205 +{
89206 +    struct list_head   cqa_link;                                       /* linked together */
89207 +    bitmap_t           cqa_bitmap[BT_BITOUL(ELAN4_CQ_PER_CQA)];        /* bitmap of which are free */
89208 +    unsigned int        cqa_type;                                      /* allocation type */
89209 +    unsigned int       cqa_cqnum;                                      /* base cq number */
89210 +    unsigned int       cqa_ref;                                        /* "mappings" to a queue */
89211 +    unsigned int       cqa_idx;                                        /* index number */
89212 +    ELAN4_CQ           cqa_cq[ELAN4_CQ_PER_CQA];                       /* command queue entries */
89213 +} ELAN4_CQA;
89214 +
89215 +#define elan4_cq2num(cq)       ((cq)->cq_cqa->cqa_cqnum + (cq)->cq_idx)
89216 +#define elan4_cq2idx(cq)       ((cq)->cq_cqa->cqa_idx * ELAN4_CQ_PER_CQA + (cq)->cq_idx)
89217 +
89218 +#ifdef ELAN4_LARGE_PAGE_SUPPORT
89219 +#define NUM_HASH_TABLES                2
89220 +#else
89221 +#define NUM_HASH_TABLES                1
89222 +#endif
89223 +
89224 +typedef struct elan4_ctxt_trans_index
89225 +{
89226 +    int        tbl;
89227 +    int        index;
89228 +} ELAN4_CTXT_TRANS_INDEX;
89229 +
89230 +#define ELAN4_CTXT_MAX_SHUFFLE (10)
89231 +
89232 +typedef struct elan4_ctxt
89233 +{
89234 +    struct elan4_dev      *ctxt_dev;                                   /* device we're associated with */
89235 +    struct list_head       ctxt_link;                                  /* chained on device */
89236 +    
89237 +    struct elan4_trap_ops *ctxt_ops;                                   /* client specific operations */
89238 +
89239 +    unsigned int          ctxt_features;                               /* device features this context is using */
89240 +    signed int            ctxt_num;                                    /* local context number */
89241 +
89242 +    struct list_head      ctxt_cqalist;                                /* link list of command queue allocators */
89243 +    bitmap_t              ctxt_cqamap[BT_BITOUL(ELAN4_MAX_CQA)];       /*   bitmap for allocating cqa_idx */
89244 +
89245 +    ELAN4_HASH_ENTRY     **ctxt_mmuhash[NUM_HASH_TABLES];              /* software hash tables */
89246 +    spinlock_t            ctxt_mmulock;                                /*   and spinlock. */
89247 +
89248 +    struct proc_dir_entry *procdir;
89249 +    ELAN4_CTXT_TRANS_INDEX trans_index[NUM_HASH_TABLES];                /* place to stash info for /proc/qsnet/elan4/deviceN/ctxt/N/translations_N */
89250 +
89251 +    int                    shuffle_needed[NUM_HASH_TABLES];                  /* true when there are entries in shuffle array */
89252 +    int                    shuffle[NUM_HASH_TABLES][ELAN4_CTXT_MAX_SHUFFLE]; /* hashidx's that need shuffling or -1 = none. if all set then shuffle ALL hashidx's */
89253 +} ELAN4_CTXT;
89254 +
89255 +typedef struct elan4_trap_ops
89256 +{
89257 +    void       (*op_eproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status);
89258 +    void       (*op_cproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned cqnum);
89259 +    void       (*op_dproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit);
89260 +    void       (*op_tproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status);
89261 +    void       (*op_iproc_trap) (ELAN4_CTXT *ctxt, E4_uint64 status, unsigned unit);
89262 +    void       (*op_interrupt)  (ELAN4_CTXT *ctxt, E4_uint64 cookie);
89263 +    void       (*op_neterrmsg)  (ELAN4_CTXT *ctxt, ELAN4_NETERR_MSG *msg);
89264 +    void       (*op_need_shuffle) (ELAN4_CTXT *ctxt, int tbl, int index);
89265 +} ELAN4_TRAP_OPS;
89266 +
89267 +typedef struct elan4_route_table
89268 +{
89269 +    spinlock_t  tbl_lock;
89270 +    unsigned   tbl_size;
89271 +    sdramaddr_t tbl_entries;
89272 +} ELAN4_ROUTE_TABLE;
89273 +
89274 +#define DEV_STASH_ROUTE_COUNT 20
89275 +
89276 +typedef struct elan4_route_ringbuf {
89277 +    int start;
89278 +    int end;
89279 +    E4_VirtualProcessEntry routes[DEV_STASH_ROUTE_COUNT]; 
89280 +} ELAN4_ROUTE_RINGBUF;
89281 +
89282 +#define elan4_ringbuf_init(ringbuf) memset(&ringbuf, 0, sizeof(ELAN4_ROUTE_RINGBUF));
89283 +
89284 +typedef struct elan4_trans_index
89285 +{
89286 +    int tbl;
89287 +} ELAN4_TRANS_INDEX;
89288 +
89289 +#define ELAN4_TRANS_STATS_NUM_BUCKETS 7
89290 +typedef struct elan4_trans_stats
89291 +{
89292 +    int tbl;
89293 +    int buckets[ELAN4_TRANS_STATS_NUM_BUCKETS];
89294 +} ELAN4_TRANS_STATS;
89295 +
89296 +typedef struct elan4_dev
89297 +{
89298 +    ELAN4_CTXT          dev_ctxt;                                      /* context for device operations */
89299 +
89300 +    ELAN4_DEV_OSDEP     dev_osdep;                                     /* OS specific entries */
89301 +
89302 +    int                         dev_instance;                                  /* device number */
89303 +    ELAN_DEVINFO        dev_devinfo;                                   /* device information (revision etc */
89304 +    ELAN_POSITION       dev_position;                                  /* position connected to switch */
89305 +    ELAN_DEV_IDX        dev_idx;                                       /* device idx registered with elanmod */
89306 +
89307 +    kmutex_t            dev_lock;                                      /* lock for device state/references */
89308 +    unsigned            dev_state;                                     /* device state */
89309 +    unsigned            dev_references;                                /*  # references */
89310 +
89311 +    ioaddr_t            dev_regs;                                      /* Mapping of device registers */
89312 +    ELAN4_MAP_HANDLE    dev_regs_handle;
89313 +    ioaddr_t            dev_rom;                                       /* Mapping of rom */
89314 +    ELAN4_MAP_HANDLE    dev_rom_handle;
89315 +    ioaddr_t            dev_i2c;                                       /* Mapping of I2C registers */
89316 +    ELAN4_MAP_HANDLE    dev_i2c_handle;
89317 +    
89318 +    E4_uint64           dev_sdram_cfg;                                 /* SDRAM config value (from ROM) */
89319 +    E4_uint64           dev_sdram_initial_ecc_val;                     /* power on ECC register value */
89320 +    int                         dev_sdram_numbanks;                            /* # banks of sdram */
89321 +    ELAN4_SDRAM_BANK    dev_sdram_banks[SDRAM_MAX_BANKS];              /* Mapping of sdram banks */
89322 +    spinlock_t          dev_sdram_lock;                                /* spinlock for buddy allocator */
89323 +    sdramaddr_t                 dev_sdram_freelists[SDRAM_NUM_FREE_LISTS];
89324 +    unsigned            dev_sdram_freecounts[SDRAM_NUM_FREE_LISTS];
89325 +
89326 +    physaddr_t          dev_regs_phys;                                 /* physaddr of registers */
89327 +    physaddr_t          dev_sdram_phys;                                /*  and of sdram */
89328 +
89329 +    sdramaddr_t                 dev_cacheflush_space;                          /* sdram reserved for cache flush operation */
89330 +
89331 +    sdramaddr_t                 dev_faultarea;                                 /* fault areas for each unit */
89332 +    sdramaddr_t                 dev_inputtraparea;                             /* trap area for trapped transactions */
89333 +    sdramaddr_t                 dev_ctxtable;                                  /* context table (E4_ContextControlBlock) */
89334 +    int                         dev_ctxtableshift;                             /* and size (in bits) */
89335 +
89336 +    E4_uint32           dev_syscontrol;                                /* copy of system control register */
89337 +    spinlock_t          dev_syscontrol_lock;                           /*   spinlock to sequentialise modifications */
89338 +    unsigned            dev_direct_map_pci_writes;                     /*   # counts for CONT_DIRECT_MAP_PCI_WRITES */
89339 +
89340 +    volatile E4_uint32  dev_intmask;                                   /* copy of interrupt mask register */
89341 +    spinlock_t          dev_intmask_lock;                              /*   spinlock to sequentialise modifications */
89342 +
89343 +    /* i2c section */
89344 +    spinlock_t          dev_i2c_lock;                                  /* spinlock for i2c operations */
89345 +    unsigned int         dev_i2c_led_disabled;                         /* count of reasons led auto update disabled */
89346 +
89347 +    /* mmu section */
89348 +    unsigned            dev_pagesizeval[NUM_HASH_TABLES];              /* page size value */
89349 +    unsigned            dev_pageshift[NUM_HASH_TABLES];                        /* pageshift in bits. */
89350 +    unsigned            dev_hashsize[NUM_HASH_TABLES];                 /* # entries in mmu hash table */
89351 +    sdramaddr_t                 dev_hashtable[NUM_HASH_TABLES];                /* mmu hash table */
89352 +    ELAN4_HASH_ENTRY   *dev_mmuhash[NUM_HASH_TABLES];                  /*   and software shadow */
89353 +    ELAN4_HASH_ENTRY    *dev_mmufree_count;                            /*   space holder - will use to indicate if there is a free slot in chain */
89354 +    ELAN4_HASH_ENTRY    *dev_mmufreelist;                              /*   and free blocks */
89355 +    spinlock_t           dev_mmulock;
89356 +    E4_uint16           dev_topaddr[4];                                /* top address values */
89357 +    unsigned char       dev_topaddrvalid;
89358 +    unsigned char       dev_topaddrmode;
89359 +    unsigned char       dev_pteval;                                    /* allow setting of relaxed order/dont snoop attributes */
89360 +
89361 +    unsigned            dev_rsvd_hashmask[NUM_HASH_TABLES];
89362 +    unsigned            dev_rsvd_hashval[NUM_HASH_TABLES];
89363 +
89364 +    ELAN4_TRANS_INDEX    trans_index[NUM_HASH_TABLES];                  /* place to stash info for /proc/qsnet/elan4/deviceN/translations_N */
89365 +    ELAN4_TRANS_STATS    trans_stats[NUM_HASH_TABLES];                  /* place to stash info for /proc/qsnet/elan4/deviceN/stats/translations_N */
89366 +    /* run queues */
89367 +    sdramaddr_t                 dev_comqlowpri;                                /* CProc low & high pri run queues */
89368 +    sdramaddr_t                 dev_comqhighpri;
89369 +
89370 +    sdramaddr_t                 dev_dmaqlowpri;                                /* DProc,TProc,Interrupt queues */
89371 +    sdramaddr_t                 dev_dmaqhighpri;
89372 +    sdramaddr_t                 dev_threadqlowpri;
89373 +    sdramaddr_t                 dev_threadqhighpri;
89374 +    sdramaddr_t                 dev_interruptq;
89375 +
89376 +    E4_uint32           dev_interruptq_nfptr;                          /* cache next main interrupt fptr */
89377 +    struct list_head     dev_interruptq_list;                          /*   list of operations to call when space in interruptq*/
89378 +
89379 +    /* command queue section */
89380 +    sdramaddr_t                 dev_cqaddr;                                    /* SDRAM address of command queues */
89381 +    unsigned            dev_cqoffset;                                  /* offset for command queue alignment constraints */
89382 +    unsigned            dev_cqcount;                                   /* number of command queue descriptors */
89383 +    bitmap_t           *dev_cqamap;                                    /* bitmap for allocation */
89384 +    spinlock_t          dev_cqlock;                                    /* spinlock to protect bitmap */
89385 +    unsigned            dev_cqreorder;                                 /* offset for first re-ordering queue with mtrr */
89386 +
89387 +    /* halt operation section */
89388 +    struct list_head     dev_haltop_list;                              /* list of operations to call when units halted */
89389 +    E4_uint32           dev_haltop_mask;                               /* mask of which ones to halt */
89390 +    E4_uint32           dev_haltop_active;                             /* mask of which haltops are executing */
89391 +    spinlock_t          dev_haltop_lock;                               /*    and their spinlock */
89392 +    struct timer_list    dev_haltop_timer;                              /* timer looking for haltop locked in list */
89393 +
89394 +    struct {
89395 +       struct list_head list;                                          /* list of halt operations for DMAs */
89396 +       ELAN4_CQ        *cq;                                            /*   and command queue's */
89397 +       ELAN4_INTOP      intop;                                         /*   and main interrupt op */
89398 +       E4_uint64        status;                                        /*   status register (when waiting for intop)*/
89399 +    }                   dev_dma_flushop[2];
89400 +
89401 +    unsigned            dev_halt_all_count;                            /* count of reasons to halt all units */
89402 +    unsigned            dev_halt_lowpri_count;                         /* count of reasons to halt lowpri queues */
89403 +    unsigned            dev_halt_cproc_count;                          /* count of reasons to halt command processor */
89404 +    unsigned            dev_halt_dproc_count;                          /* count of reasons to halt dma processor */
89405 +    unsigned            dev_halt_tproc_count;                          /* count of reasons to halt thread processor */
89406 +    unsigned            dev_discard_all_count;                         /* count of reasons to discard all packets */
89407 +    unsigned            dev_discard_lowpri_count;                      /* count of reasons to discard non-system packets */
89408 +    unsigned            dev_discard_highpri_count;                     /* count of reasons to discard system packets */
89409 +
89410 +    E4_uint32           dev_schedstatus;                               /* copy of schedule status register */
89411 +
89412 +    /* local context allocation section */
89413 +    spinlock_t          dev_ctxlock;                                   /* spinlock to protect bitmap */
89414 +    bitmap_t           *dev_ctxmap;                                    /* bitmap for local context allocation */
89415 +
89416 +    spinlock_t          dev_ctxt_lock;                                 /* spinlock to protect context list */
89417 +    struct list_head     dev_ctxt_list;                                        /* linked list of contexts */
89418 +
89419 +    /* locks to sequentialise interrupt handling */
89420 +    spinlock_t          dev_trap_lock;                                 /* spinlock while handling a trap */
89421 +    spinlock_t          dev_requeue_lock;                              /* spinlock sequentialising cproc requeue */
89422 +
89423 +    /* error rate interrupt section */
89424 +    long                dev_error_time;                                /* lbolt at start of sampling period */
89425 +    unsigned            dev_errors_per_period;                         /* errors so far this sampling period */
89426 +    timer_fn_t          dev_error_timeoutid;                           /* timeout to re-enable error interrupts */
89427 +    timer_fn_t          dev_linkerr_timeoutid;                         /* timeout to clear link error led */
89428 +
89429 +    /* kernel threads */
89430 +    unsigned            dev_stop_threads:1;                            /* kernel threads should exit */
89431 +
89432 +    /* main interrupt thread */
89433 +    kcondvar_t          dev_mainint_wait;                              /* place for mainevent interrupt thread to sleep */
89434 +    spinlock_t          dev_mainint_lock;                              /*   and it's spinlock */
89435 +    unsigned            dev_mainint_started:1;
89436 +    unsigned            dev_mainint_stopped:1;
89437 +
89438 +    /* device context - this is used to flush insert cache/instruction cache/dmas & threads */
89439 +    ELAN4_CPROC_TRAP     dev_cproc_trap;                               /* space to extract cproc trap into */
89440 +
89441 +    struct list_head     dev_intop_list;                               /* list of main interrupt operations */
89442 +    spinlock_t          dev_intop_lock;                                /*   and spinlock */
89443 +    E4_uint64           dev_intop_cookie;                              /*   and next cookie to use */
89444 +
89445 +    spinlock_t          dev_flush_lock;                                /* spinlock for flushing */
89446 +    kcondvar_t          dev_flush_wait;                                /*  and place to sleep */
89447 +
89448 +    ELAN4_CQ           *dev_flush_cq[COMMAND_INSERTER_CACHE_ENTRIES];  /* command queues to flush the insert cache */
89449 +    ELAN4_INTOP          dev_flush_op[COMMAND_INSERTER_CACHE_ENTRIES]; /* and a main interrupt operation for each one */
89450 +    unsigned            dev_flush_finished;                            /* flush command finished */
89451 +
89452 +    ELAN4_HALTOP        dev_iflush_haltop;                             /* halt operation for icache flush */
89453 +    unsigned            dev_iflush_queued:1;                           /* icache haltop queued */
89454 +
89455 +    ELAN4_ROUTE_TABLE   *dev_routetable;                               /* virtual process table (for dma queue flush)*/
89456 +    sdramaddr_t          dev_sdrampages[2];                            /* pages of sdram to hold suspend code sequence */
89457 +    E4_Addr             dev_tproc_suspend;                             /*  st8suspend instruction */
89458 +    E4_Addr             dev_tproc_space;                               /*     and target memory */
89459 +
89460 +    sdramaddr_t                 dev_neterr_inputq;                             /* network error input queue descriptor & event */
89461 +    sdramaddr_t                 dev_neterr_slots;                              /* network error message slots */
89462 +    ELAN4_CQ           *dev_neterr_msgcq;                              /* command queue for sending messages */
89463 +    ELAN4_CQ           *dev_neterr_intcq;                              /* command queue for message received interrupt */
89464 +    ELAN4_INTOP                 dev_neterr_intop;                              /*   and it's main interrupt operation */
89465 +    E4_uint64           dev_neterr_queued;                             /* # message queued in msgcq */
89466 +    spinlock_t           dev_neterr_lock;                              /*   and spinlock .... */
89467 +
89468 +    ELAN4_DEV_STATS     dev_stats;                                     /* device statistics */
89469 +    ELAN4_ECCERRS       dev_sdramerrs[30];                             /* last few sdram errors for procfs */
89470 +
89471 +    unsigned int       *dev_ack_errors;                                /* Map of source of dproc ack errors */
89472 +    ELAN4_ROUTE_RINGBUF  dev_ack_error_routes;
89473 +    unsigned int        *dev_dproc_timeout;                             /* Ditto dproc timeout errors */
89474 +    ELAN4_ROUTE_RINGBUF  dev_dproc_timeout_routes;
89475 +    unsigned int        *dev_cproc_timeout;                             /* Ditto cproc timeout errors */
89476 +    ELAN4_ROUTE_RINGBUF  dev_cproc_timeout_routes;
89477 +
89478 +    unsigned            dev_linkerr_signalled;                         /* linkerror signalled to switch controller */
89479 +
89480 +    struct list_head     dev_hc_list;                                   /* list of the allocated hash_chunks */
89481 +
89482 +    ELAN4_IPROC_TRAP     dev_iproc_trap;                               /* space for iproc trap */
89483 +} ELAN4_DEV;
89484 +
89485 +/* values for dev_state */
89486 +#define ELAN4_STATE_STOPPED            (1 << 0)                        /* device initialised but not started */
89487 +#define ELAN4_STATE_STARTING           (1 << 1)                        /* device in process of starting */
89488 +#define ELAN4_STATE_STARTED            (1 << 2)                        /* device started */
89489 +#define ELAN4_STATE_STOPPING           (1 << 3)                        /* device in process of stopping */
89490 +
89491 +extern __inline__ unsigned long long
89492 +__elan4_readq (ELAN4_DEV *dev, ioaddr_t addr)
89493 +{
89494 +#if defined(__i386)
89495 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
89496 +    {
89497 +       uint64_t save[2];
89498 +       uint64_t rval;
89499 +       unsigned long flags, cr0;
89500 +       
89501 +       local_irq_save (flags);
89502 +
89503 +       /* Save FPU state */
89504 +        asm volatile("mov %%cr0,%0 ; clts\n" : "=r" (cr0));
89505 +       
89506 +       /* GNAT 7726: Save 128-bit xmm0 register value */
89507 +       asm volatile ("movupd %%xmm0,%0\n" : "=m" (save[0]));
89508 +       
89509 +       /* Do a 64-bit PCI read */
89510 +       asm volatile ("sfence\n"
89511 +                     "movq (%1), %%xmm0\n"
89512 +                     "movq %%xmm0, %0\n"
89513 +                     "sfence\n"
89514 +                     : "=m" (rval) : "r" (addr) : "memory");
89515 +       
89516 +       /* GNAT 7726: Restore 128-bit xmm0 register value */
89517 +       asm volatile("movupd %0,%%xmm0\n" : : "m" (save[0]));
89518 +       
89519 +       /* Restore FPU state */
89520 +       asm volatile("mov %0,%%cr0\n" : : "r" (cr0));
89521 +       
89522 +       local_irq_restore(flags);
89523 +       
89524 +       return rval;
89525 +    }
89526 +#endif
89527 +    return readq ((void *)addr);
89528 +}
89529 +
89530 +extern __inline__ unsigned int
89531 +__elan4_readl (ELAN4_DEV *dev, ioaddr_t addr)
89532 +{
89533 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
89534 +    {
89535 +       uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7));
89536 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffffffff);
89537 +    }
89538 +    return readl ((void *)addr);
89539 +}
89540 +
89541 +extern __inline__ unsigned int
89542 +__elan4_readw (ELAN4_DEV *dev, ioaddr_t addr)
89543 +{
89544 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
89545 +    {
89546 +       uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7));
89547 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xffff);
89548 +    }
89549 +    return readw ((void *)addr);
89550 +}
89551 +
89552 +extern __inline__ unsigned int
89553 +__elan4_readb (ELAN4_DEV *dev, ioaddr_t addr)
89554 +{
89555 +    if (dev->dev_devinfo.dev_params.values[ELAN4_PARAM_DRIVER_FEATURES] & ELAN4_FEATURE_64BIT_READ)
89556 +    {
89557 +       uint64_t val = __elan4_readq (dev, ((unsigned long) addr & ~7));
89558 +       return ((val >> (((unsigned long) addr & 7) << 3)) & 0xff);
89559 +    }
89560 +    return readb ((void *)addr);
89561 +}
89562 +
89563 +/* macros for accessing dev->dev_regs.Tags. */
89564 +#define write_tag(dev,what,val)                writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Tags.what)))
89565 +#define read_tag(dev,what)             __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, Tags.what))
89566 +
89567 +/* macros for accessing dev->dev_regs.Regs. */
89568 +#define write_reg64(dev,what,val)      writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Regs.what)))
89569 +#define write_reg32(dev,what,val)      writel (val, (void *) (dev->dev_regs + offsetof (E4_Registers, Regs.what)))
89570 +#define read_reg64(dev,what)           __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what))
89571 +#define read_reg32(dev,what)           __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, Regs.what))
89572 +
89573 +/* macros for accessing dev->dev_regs.uRegs. */
89574 +#define write_ureg64(dev,what,val)     writeq (val, (void *) (dev->dev_regs + offsetof (E4_Registers, uRegs.what)))
89575 +#define write_ureg32(dev,what,val)     writel (val, (void *) (dev->dev_regs + offsetof (E4_Registers, uRegs.what)))
89576 +#define read_ureg64(dev,what)          __elan4_readq (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
89577 +#define read_ureg32(dev,what)          __elan4_readl (dev, dev->dev_regs + offsetof (E4_Registers, uRegs.what))
89578 +
89579 +/* macros for accessing dev->dev_i2c */
89580 +#define write_i2c(dev,what,val)                writeb (val, (void *) (dev->dev_i2c + offsetof (E4_I2C, what)))
89581 +#define read_i2c(dev,what)             __elan4_readb (dev, dev->dev_i2c + offsetof (E4_I2C, what))
89582 +
89583 +/* macros for accessing dev->dev_rom */
89584 +#define read_ebus_rom(dev,off)         __elan4_readb (dev, dev->dev_rom + off)
89585 +
89586 +/* PIO flush operations - ensure writes to registers/sdram are ordered */
89587 +#ifdef CONFIG_IA64_SGI_SN2
89588 +#define pioflush_reg(dev)              read_reg32(dev,InterruptReg)
89589 +#define pioflush_sdram(dev)            elan4_sdram_readl(dev, 0)
89590 +#else
89591 +#define pioflush_reg(dev)              mb()
89592 +#define pioflush_sdram(dev)            mb()
89593 +#endif
89594 +
89595 +/* macros for manipulating the interrupt mask register */
89596 +#define SET_INT_MASK(dev,value)        \
89597 +do { \
89598 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask = (value)); \
89599 +    pioflush_reg(dev);\
89600 +} while (0)
89601 +
89602 +#define CHANGE_INT_MASK(dev, value) \
89603 +do { \
89604 +    if ((dev)->dev_intmask != (value)) \
89605 +    {\
89606 +       write_reg32 (dev, InterruptMask, (dev)->dev_intmask = (value));\
89607 +       pioflush_reg(dev);\
89608 +    }\
89609 +} while (0)
89610 +
89611 +#define ENABLE_INT_MASK(dev,value) \
89612 +do { \
89613 +    unsigned long flags; \
89614 + \
89615 +    spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \
89616 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask |= (value)); \
89617 +    pioflush_reg(dev);\
89618 +    spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \
89619 +} while (0)
89620 +
89621 +#define DISABLE_INT_MASK(dev,value) \
89622 +do { \
89623 +    unsigned long flags; \
89624 + \
89625 +    spin_lock_irqsave (&(dev)->dev_intmask_lock, flags); \
89626 +    write_reg32(dev, InterruptMask, (dev)->dev_intmask &= ~(value)); \
89627 +    pioflush_reg(dev);\
89628 +    spin_unlock_irqrestore (&(dev)->dev_intmask_lock, flags); \
89629 +} while (0)
89630 +
89631 +#define SET_SYSCONTROL(dev,what,value) \
89632 +do { \
89633 +    unsigned long flags; \
89634 +\
89635 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
89636 +    if ((dev)->what++ == 0) \
89637 +        write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol |= (value)); \
89638 +    pioflush_reg(dev);\
89639 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
89640 +} while (0)
89641 +
89642 +#define CLEAR_SYSCONTROL(dev,what,value) \
89643 +do { \
89644 +    unsigned long flags; \
89645 +\
89646 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
89647 +    if (--(dev)->what == 0)\
89648 +       write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol &= ~(value)); \
89649 +    pioflush_reg (dev); \
89650 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
89651 +} while (0)
89652 +
89653 +#define PULSE_SYSCONTROL(dev,value) \
89654 +do { \
89655 +    unsigned long flags; \
89656 +\
89657 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
89658 +    write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol | (value)); \
89659 +    pioflush_reg (dev); \
89660 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
89661 +} while (0)
89662 +
89663 +#define CHANGE_SYSCONTROL(dev,add,sub) \
89664 +do { \
89665 +    unsigned long flags; \
89666 +\
89667 +    spin_lock_irqsave (&(dev)->dev_syscontrol_lock, flags); \
89668 +    dev->dev_syscontrol |= (add);\
89669 +    dev->dev_syscontrol &= ~(sub);\
89670 +    write_reg64 (dev, SysControlReg, (dev)->dev_syscontrol);\
89671 +    pioflush_reg (dev); \
89672 +    spin_unlock_irqrestore (&(dev)->dev_syscontrol_lock, flags); \
89673 +} while (0)
89674 +
89675 +#define SET_SCHED_STATUS(dev, value)\
89676 +do {\
89677 +    write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\
89678 +    pioflush_reg (dev);\
89679 +} while (0)
89680 +
89681 +#define CHANGE_SCHED_STATUS(dev, value)\
89682 +do {\
89683 +    if ((dev)->dev_schedstatus != (value))\
89684 +    {\
89685 +       write_reg32 (dev, SchedStatus.Status, (dev)->dev_schedstatus = (value));\
89686 +       pioflush_reg (dev);\
89687 +    }\
89688 +} while (0)
89689 +
89690 +#define PULSE_SCHED_RESTART(dev,value)\
89691 +do {\
89692 +    write_reg32 (dev, SchedStatus.Restart, value);\
89693 +    pioflush_reg (dev);\
89694 +} while (0)
89695 +
89696 +/* device context elan address space */
89697 +#define DEVICE_TPROC_SUSPEND_ADDR              (0x1000000000000000ull)
89698 +#define DEVICE_TPROC_SPACE_ADDR                        (0x1000000000000000ull + SDRAM_PAGE_SIZE)
89699 +#if defined(__LITTLE_ENDIAN__)
89700 +#  define DEVICE_TPROC_SUSPEND_INSTR           0xd3f040c0 /* st64suspend %r16, [%r1] */
89701 +#else
89702 +#  define DEVICE_TPROC_SUSPEND_INSTR           0xc040f0d3 /* st64suspend %r16, [%r1] */
89703 +#endif
89704 +
89705 +#define DEVICE_NETERR_INPUTQ_ADDR              (0x2000000000000000ull)
89706 +#define DEVICE_NETERR_INTCQ_ADDR               (0x2000000000000000ull + SDRAM_PAGE_SIZE)
89707 +#define DEVICE_NETERR_SLOTS_ADDR               (0x2000000000000000ull + SDRAM_PAGE_SIZE*2)
89708 +
89709 +/*
89710 + * Interrupt operation cookie space
89711 + * [50:48]     type
89712 + * [47:0]      value
89713 + */
89714 +#define INTOP_PERSISTENT                       (0x1000000000000ull)
89715 +#define INTOP_ONESHOT                          (0x2000000000000ull)
89716 +#define INTOP_TYPE_MASK                                (0x3000000000000ull)
89717 +#define INTOP_VALUE_MASK                       (0x0ffffffffffffull)
89718 +
89719 +/* functions for accessing sdram - sdram.c */
89720 +extern unsigned char      elan4_sdram_readb (ELAN4_DEV *dev, sdramaddr_t ptr);
89721 +extern unsigned short     elan4_sdram_readw (ELAN4_DEV *dev, sdramaddr_t ptr);
89722 +extern unsigned int       elan4_sdram_readl (ELAN4_DEV *dev, sdramaddr_t ptr);
89723 +extern unsigned long long elan4_sdram_readq (ELAN4_DEV *dev, sdramaddr_t ptr);
89724 +extern void               elan4_sdram_writeb (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned char val);
89725 +extern void               elan4_sdram_writew (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned short val);
89726 +extern void               elan4_sdram_writel (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned int val);
89727 +extern void               elan4_sdram_writeq (ELAN4_DEV *dev, sdramaddr_t ptr, unsigned long long val);
89728 +
89729 +extern void              elan4_sdram_zerob_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
89730 +extern void              elan4_sdram_zerow_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
89731 +extern void              elan4_sdram_zerol_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
89732 +extern void              elan4_sdram_zeroq_sdram (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
89733 +
89734 +extern void               elan4_sdram_copyb_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
89735 +extern void               elan4_sdram_copyw_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
89736 +extern void               elan4_sdram_copyl_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
89737 +extern void               elan4_sdram_copyq_from_sdram (ELAN4_DEV *dev, sdramaddr_t from, void *to, int nbytes);
89738 +extern void               elan4_sdram_copyb_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
89739 +extern void               elan4_sdram_copyw_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
89740 +extern void               elan4_sdram_copyl_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
89741 +extern void               elan4_sdram_copyq_to_sdram (ELAN4_DEV *dev, void *from, sdramaddr_t to, int nbytes);
89742 +
89743 +/* device.c - configuration */
89744 +extern unsigned int elan4_hash_0_size_val;
89745 +extern unsigned int elan4_hash_1_size_val;
89746 +extern unsigned int elan4_ctxt_table_shift;
89747 +extern unsigned int elan4_ln2_max_cqs;
89748 +extern unsigned int elan4_dmaq_highpri_size;
89749 +extern unsigned int elan4_threadq_highpri_size;
89750 +extern unsigned int elan4_dmaq_lowpri_size;
89751 +extern unsigned int elan4_threadq_lowpri_size;
89752 +extern unsigned int elan4_interruptq_size;
89753 +extern unsigned int elan4_mainint_punt_loops;
89754 +extern unsigned int elan4_mainint_resched_ticks;
89755 +extern unsigned int elan4_linkport_lock;
89756 +extern unsigned int elan4_eccerr_recheck;
89757 +
89758 +/* device.c */
89759 +extern void               elan4_set_schedstatus (ELAN4_DEV *dev, E4_uint32 intreg);
89760 +extern void               elan4_queue_haltop (ELAN4_DEV *dev, ELAN4_HALTOP *op);
89761 +extern void              elan4_queue_intop (ELAN4_DEV *dev, ELAN4_CQ *cq, ELAN4_INTOP *op);
89762 +extern void              elan4_register_intop (ELAN4_DEV *dev, ELAN4_INTOP *op);
89763 +extern void              elan4_deregister_intop (ELAN4_DEV *dev, ELAN4_INTOP *op);
89764 +extern void              elan4_queue_dma_flushop (ELAN4_DEV *dev, ELAN4_DMA_FLUSHOP *op, int hipri);
89765 +extern void              elan4_queue_mainintop (ELAN4_DEV *dev, ELAN4_INTOP *op);
89766 +
89767 +extern int                elan4_1msi0 (ELAN4_DEV *dev);
89768 +
89769 +extern int                elan4_insertctxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt, ELAN4_TRAP_OPS *ops);
89770 +extern void               elan4_removectxt (ELAN4_DEV *dev, ELAN4_CTXT *ctxt);
89771 +extern ELAN4_CTXT        *elan4_localctxt (ELAN4_DEV *dev, unsigned num);
89772 +extern ELAN4_CTXT        *elan4_networkctxt (ELAN4_DEV *dev, unsigned num);
89773 +
89774 +extern int                elan4_attach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum);
89775 +extern void               elan4_detach_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum);
89776 +extern void              elan4_set_filter (ELAN4_CTXT *ctxt, unsigned int ctxnum, E4_uint32 state);
89777 +extern void              elan4_set_routetable (ELAN4_CTXT *ctxt, ELAN4_ROUTE_TABLE *tbl);
89778 +
89779 +extern ELAN4_CQA *        elan4_getcqa (ELAN4_CTXT *ctxt, unsigned int idx);
89780 +extern void               elan4_putcqa (ELAN4_CTXT *ctxt, unsigned int idx);
89781 +extern ELAN4_CQ          *elan4_alloccq (ELAN4_CTXT *ctxt, unsigned cqsize, unsigned cqperm, unsigned cqtype);
89782 +extern void               elan4_freecq (ELAN4_CTXT *ctxt, ELAN4_CQ *cq);
89783 +extern void               elan4_restartcq (ELAN4_DEV *dev, ELAN4_CQ *cq);
89784 +extern void               elan4_flushcq (ELAN4_DEV *dev, ELAN4_CQ *cq);
89785 +extern void               elan4_updatecq (ELAN4_DEV *dev, ELAN4_CQ *cq, unsigned perm, unsigned restart);
89786 +
89787 +extern void              elan4_flush_icache (ELAN4_CTXT *ctxt);
89788 +extern void              elan4_flush_icache_halted (ELAN4_CTXT *ctxt);
89789 +
89790 +extern int                elan4_initialise_device (ELAN4_DEV *dev);
89791 +extern void               elan4_finalise_device (ELAN4_DEV *dev);
89792 +extern int                elan4_start_device (ELAN4_DEV *dev);
89793 +extern void               elan4_stop_device (ELAN4_DEV *dev);
89794 +
89795 +extern int               elan4_compute_position (ELAN_POSITION *pos, unsigned nodeid, unsigned numnodes, unsigned aritiyval);
89796 +extern int               elan4_get_position (ELAN4_DEV *dev, ELAN_POSITION *pos);
89797 +extern int               elan4_set_position (ELAN4_DEV *dev, ELAN_POSITION *pos);
89798 +extern void              elan4_get_params   (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short *mask);
89799 +extern void              elan4_set_params   (ELAN4_DEV *dev, ELAN_PARAMS *params, unsigned short mask);
89800 +
89801 +
89802 +extern int                elan4_read_vpd(ELAN4_DEV *dev, unsigned char *tag, unsigned char *result) ;
89803 +
89804 +extern void               proc_insertctxt(ELAN4_DEV *dev,ELAN4_CTXT *ctxt);
89805 +extern void               proc_removectxt(ELAN4_DEV *dev,ELAN4_CTXT *ctxt);
89806 +
89807 +extern int                elan4_route2str (E4_VirtualProcessEntry *route, char *routeStr);
89808 +extern void               elan4_hardware_lock_check(ELAN4_DEV *dev, char *from);
89809 +
89810 +/* device_osdep.c */
89811 +extern unsigned int      elan4_pll_cfg;
89812 +extern int               elan4_pll_div;
89813 +extern int               elan4_mod45disable;
89814 +extern int                assfail_mode;
89815 +
89816 +extern int                elan4_pciinit (ELAN4_DEV *dev);
89817 +extern void               elan4_pcifini (ELAN4_DEV *dev);
89818 +extern void              elan4_updatepll (ELAN4_DEV *dev, unsigned int val);
89819 +extern void               elan4_pcierror (ELAN4_DEV *dev);
89820 +
89821 +extern ELAN4_DEV        *elan4_reference_device (int instance, int state);
89822 +extern void              elan4_dereference_device (ELAN4_DEV *dev);
89823 +
89824 +extern ioaddr_t           elan4_map_device (ELAN4_DEV *dev, unsigned bar, unsigned off, unsigned size, ELAN4_MAP_HANDLE *handlep);
89825 +extern void               elan4_unmap_device (ELAN4_DEV *dev, ioaddr_t ptr, unsigned size, ELAN4_MAP_HANDLE *handlep);
89826 +extern unsigned long      elan4_resource_len (ELAN4_DEV *dev, unsigned bar);
89827 +
89828 +extern void               elan4_configure_writecombining (ELAN4_DEV *dev);
89829 +extern void              elan4_unconfigure_writecombining (ELAN4_DEV *dev);
89830 +
89831 +/* i2c.c */
89832 +extern int               i2c_disable_auto_led_update (ELAN4_DEV *dev);
89833 +extern void              i2c_enable_auto_led_update (ELAN4_DEV *dev);
89834 +extern int               i2c_write (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
89835 +extern int               i2c_read (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
89836 +extern int               i2c_writereg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data);
89837 +extern int               i2c_readreg (ELAN4_DEV *dev, unsigned int addr, unsigned int reg, unsigned int count, unsigned char *data);
89838 +extern int               i2c_read_rom (ELAN4_DEV *dev, unsigned int addr, unsigned int count, unsigned char *data);
89839 +
89840 +#if defined(__linux__)
89841 +/* procfs_Linux.c */
89842 +extern void              elan4_procfs_device_init (ELAN4_DEV *dev);
89843 +extern void              elan4_procfs_device_fini (ELAN4_DEV *dev);
89844 +extern void              elan4_procfs_init(void);
89845 +extern void              elan4_procfs_fini(void);
89846 +
89847 +extern struct proc_dir_entry *elan4_procfs_root;
89848 +extern struct proc_dir_entry *elan4_config_root;
89849 +#endif
89850 +
89851 +/* sdram.c */
89852 +extern void              elan4_sdram_init (ELAN4_DEV *dev);
89853 +extern void               elan4_sdram_fini (ELAN4_DEV *dev);
89854 +extern void               elan4_sdram_setup_delay_lines (ELAN4_DEV *dev, int factor);
89855 +extern int                elan4_sdram_init_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
89856 +extern void               elan4_sdram_fini_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
89857 +extern void              elan4_sdram_add_bank (ELAN4_DEV *dev, ELAN4_SDRAM_BANK *bank);
89858 +extern sdramaddr_t        elan4_sdram_alloc (ELAN4_DEV *dev, int nbytes);
89859 +extern void               elan4_sdram_free (ELAN4_DEV *dev, sdramaddr_t ptr, int nbytes);
89860 +extern void               elan4_sdram_flushcache (ELAN4_DEV *dev, sdramaddr_t base, int nbytes);
89861 +extern char              *elan4_sdramerr2str (ELAN4_DEV *dev, E4_uint64 status, E4_uint64 ConfigReg, char *str);
89862 +
89863 +/* traps.c */
89864 +extern void               elan4_display_eproc_trap (void *type, int mode, char *str, ELAN4_EPROC_TRAP *trap);
89865 +extern void               elan4_display_cproc_trap (void *type, int mode, char *str, ELAN4_CPROC_TRAP *trap);
89866 +extern void               elan4_display_dproc_trap (void *type, int mode, char *str, ELAN4_DPROC_TRAP *trap);
89867 +extern void               elan4_display_tproc_trap (void *type, int mode, char *str, ELAN4_TPROC_TRAP *trap);
89868 +extern void               elan4_display_iproc_trap (void *type, int mode, char *str, ELAN4_IPROC_TRAP *trap);
89869 +
89870 +
89871 +extern void               elan4_extract_eproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_EPROC_TRAP *trap, int iswaitevent);
89872 +extern void               elan4_extract_cproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_CPROC_TRAP *trap, unsigned cqnum);
89873 +extern void               elan4_extract_dproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_DPROC_TRAP *trap, unsigned unit);
89874 +extern void               elan4_extract_tproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_TPROC_TRAP *trap);
89875 +extern void               elan4_extract_iproc_trap (ELAN4_DEV *dev, E4_uint64 status, ELAN4_IPROC_TRAP *trap, unsigned unit);
89876 +extern void elan4_ringbuf_store(ELAN4_ROUTE_RINGBUF *ringbuf, E4_VirtualProcessEntry *route, ELAN4_DEV *dev);
89877 +extern int                cproc_open_extract_vp (ELAN4_DEV *dev, ELAN4_CQ *cq, int chan);
89878 +
89879 +extern void               elan4_inspect_iproc_trap (ELAN4_IPROC_TRAP *trap);
89880 +extern E4_uint64          elan4_trapped_open_command (ELAN4_DEV *dev, ELAN4_CQ *cq);
89881 +
89882 +/* mmu.c */
89883 +extern void               elan4mmu_flush_tlb (ELAN4_DEV *dev);
89884 +extern ELAN4_HASH_ENTRY  *elan4mmu_ptealloc (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, unsigned int *tagidxp);
89885 +extern int                elan4mmu_pteload (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, int type, E4_uint64 pte);
89886 +extern int                elan4mmu_pteload_page (ELAN4_CTXT *ctxt, int tbl, E4_Addr vaddr, struct page *page, int perm);
89887 +extern void               elan4mmu_pteunload (ELAN4_CTXT *ctxt, ELAN4_HASH_ENTRY *he, unsigned int tagidx, unsigned int pteidx);
89888 +extern void               elan4mmu_unload_range (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned long len);
89889 +extern void               elan4mmu_invalidate_ctxt (ELAN4_CTXT *ctxt);
89890 +
89891 +extern ELAN4_HASH_CACHE  *elan4mmu_reserve (ELAN4_CTXT *ctxt, int tbl, E4_Addr start, unsigned int npages, int cansleep);
89892 +extern void               elan4mmu_release (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc);
89893 +extern void               elan4mmu_set_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx, E4_uint64 newpte);
89894 +extern E4_uint64          elan4mmu_get_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx);
89895 +extern void               elan4mmu_clear_pte (ELAN4_CTXT *ctxt, ELAN4_HASH_CACHE *hc, unsigned int idx);
89896 +
89897 +extern int                elan4mmu_display_mmuhash(ELAN4_DEV *dev, int tlb, int *index, char *page, int count);
89898 +extern int                elan4mmu_display_ctxt_mmuhash(ELAN4_CTXT *ctxt, int tlb, int *index, char *page, int count);
89899 +extern int                elan4mmu_display_bucket_mmuhash(ELAN4_DEV *dev, int tlb, int *buckets, int nBuckets, char *page, int count);
89900 +extern void               elan4mmu_do_shuffle(ELAN4_CTXT *ctxt, int tbl);
89901 +extern void               elan4mmu_set_shuffle(ELAN4_CTXT *ctxt, int tbl, int hashidx);
89902 +
89903 +/* mmu_osdep.c */
89904 +extern int                elan4mmu_sdram_aliascheck (ELAN4_CTXT *ctxt, E4_Addr addr, sdramaddr_t phys);
89905 +extern int                elan4mmu_alloc_topaddr (ELAN4_DEV *dev, physaddr_t paddr, unsigned type);
89906 +extern E4_uint64          elan4mmu_phys2pte (ELAN4_DEV *dev, physaddr_t phys, unsigned perm);
89907 +extern physaddr_t        elan4mmu_pte2phys (ELAN4_DEV *dev, E4_uint64 pte);
89908 +
89909 +/* neterr.c */
89910 +extern int                elan4_neterr_init (ELAN4_DEV *dev);
89911 +extern void               elan4_neterr_destroy (ELAN4_DEV *dev);
89912 +extern int                elan4_neterr_sendmsg (ELAN4_DEV *dev, unsigned int nodeid, unsigned int retries, ELAN4_NETERR_MSG *msg);
89913 +extern int                elan4_neterr_iproc_trap (ELAN4_DEV *dev, ELAN4_IPROC_TRAP *trap);
89914 +
89915 +/* routetable.c */
89916 +extern ELAN4_ROUTE_TABLE *elan4_alloc_routetable (ELAN4_DEV *dev, unsigned size);
89917 +extern void               elan4_free_routetable (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl);
89918 +extern void               elan4_write_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry);
89919 +extern void               elan4_read_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp, E4_VirtualProcessEntry *entry);
89920 +extern void               elan4_invalidate_route (ELAN4_DEV *dev, ELAN4_ROUTE_TABLE *tbl, unsigned vp);
89921 +extern int                elan4_generate_route (ELAN_POSITION *pos, E4_VirtualProcessEntry *route, unsigned ctxnum,
89922 +                                               unsigned lowid, unsigned highid, unsigned options);
89923 +extern int               elan4_check_route (ELAN_POSITION *pos, ELAN_LOCATION location, E4_VirtualProcessEntry *route, unsigned flags);
89924 +
89925 +/* user.c */
89926 +extern int        __categorise_command (E4_uint64 command, int *cmdSize);
89927 +extern int        __whole_command (sdramaddr_t *commandPtr, sdramaddr_t insertPtr, unsigned int cqSize, unsigned int cmdSize);
89928 +
89929 +/* debug.c */
89930 +extern int        elan4_assfail (ELAN4_CTXT *ctxt, const char *ex, const char *func, const char *file, const int line);
89931 +extern int        elan4_debug_trigger (ELAN4_CTXT *ctxt, const char *func, const char *file, const int line, const char *fmt, ...);
89932 +
89933 +#if defined(DEBUG_ASSERT)
89934 +#define ELAN4_ASSERT(ctxt,EXPR)                        do { \
89935 +    if (!(EX) && elan4_assfail (ctxt, #EXPR, __FUNCTION__, __FILE__, __LINE__)) { \
89936 +       BUG(); \
89937 +    } \
89938 +} while (0)
89939 +#else
89940 +#define ELAN4_ASSERT(ctxt,EXPR)                        ((void) 0)
89941 +#endif
89942 +
89943 +#define ELAN4_DEBUG_TRIGGER(ctxt,fmt, args...) do {\
89944 +    if (elan4_debug_trigger (ctxt, __FUNCTION__, __FILE__, __LINE__, fmt, ##args)) \
89945 +       BUG();\
89946 +} while (0)
89947 +
89948 +/*
89949 + * Local variables:
89950 + * c-file-style: "stroustrup"
89951 + * End:
89952 + */
89953 +#endif /* __ELAN4_ELANDEV_H */
89954 diff -urN clean/include/elan4/device_Linux.h linux-2.6.9/include/elan4/device_Linux.h
89955 --- clean/include/elan4/device_Linux.h  1969-12-31 19:00:00.000000000 -0500
89956 +++ linux-2.6.9/include/elan4/device_Linux.h    2005-04-05 11:29:28.000000000 -0400
89957 @@ -0,0 +1,118 @@
89958 +/*
89959 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
89960 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
89961 + * 
89962 + *    For licensing information please see the supplied COPYING file
89963 + *
89964 + */
89965 +
89966 +#ifndef __ELAN4_ELANDEV_LINUX_H
89967 +#define __ELAN4_ELANDEV_LINUX_H
89968 +
89969 +#ident "$Id: device_Linux.h,v 1.26 2005/04/05 15:29:28 robin Exp $"
89970 +/*      $Source: /cvs/master/quadrics/elan4mod/device_Linux.h,v $*/
89971 +
89972 +#include <qsnet/autoconf.h>
89973 +
89974 +#if !defined(NO_COPROC)                                /* The older coproc kernel patch is applied */
89975 +#include <linux/coproc.h>
89976 +
89977 +#define ioproc_ops             coproc_ops_struct
89978 +#define ioproc_register_ops    register_coproc_ops
89979 +#define ioproc_unregister_ops  unregister_coproc_ops
89980 +
89981 +#define IOPROC_MM_STRUCT_ARG   1
89982 +#define IOPROC_PATCH_APPLIED   1
89983 +
89984 +#elif !defined(NO_IOPROC)                      /* The new ioproc kernel patch is applied */
89985 +#include <linux/ioproc.h>
89986 +
89987 +#define IOPROC_PATCH_APPLIED   1
89988 +#endif
89989 +
89990 +
89991 +#if defined(MPSAS)
89992 +#include <elan4/mpsas.h>
89993 +#endif
89994 +
89995 +#if defined(CONFIG_DEVFS_FS)
89996 +#include <linux/devfs_fs_kernel.h>
89997 +#endif
89998 +
89999 +#define ELAN4_MAJOR              61
90000 +#define ELAN4_NAME               "elan4"
90001 +#define ELAN4_MAX_CONTROLLER     16           /* limited to 4 bits */
90002
90003 +/* OS dependant component of ELAN4_DEV struct */
90004 +typedef struct elan4_dev_osdep
90005 +{
90006 +    struct pci_dev       *pdev;                        /* PCI config data */
90007 +
90008 +    struct proc_dir_entry *procdir;
90009 +    struct proc_dir_entry *configdir;
90010 +    struct proc_dir_entry *statsdir;
90011 +    struct proc_dir_entry *ctxtdir;
90012 +
90013 +#if defined(CONFIG_DEVFS_FS)
90014 +    devfs_handle_t devfs_control;
90015 +    devfs_handle_t devfs_sdram;
90016 +    devfs_handle_t devfs_user;
90017 +#endif
90018 +
90019 +#if defined(CONFIG_MTRR)
90020 +    int                           sdram_mtrr;
90021 +    int                           regs_mtrr;
90022 +#endif
90023 +} ELAN4_DEV_OSDEP;
90024 +
90025 +/* /dev/elan/rmsX */
90026 +
90027 +/* /dev/elan4/controlX */
90028 +typedef struct control_private
90029 +{
90030 +    struct elan4_dev   *pr_dev;
90031 +    unsigned           pr_boundary_scan;
90032 +} CONTROL_PRIVATE;
90033 +
90034 +/* /dev/elan4/sdramX */
90035 +typedef struct mem_page
90036 +{
90037 +    struct mem_page *pg_next;
90038 +    sdramaddr_t      pg_addr;
90039 +    unsigned long    pg_pgoff;
90040 +    unsigned        pg_ref;
90041 +} MEM_PAGE;
90042 +
90043 +#define MEM_HASH_SIZE  32
90044 +#define MEM_HASH(pgoff)        ((pgoff) & (MEM_HASH_SIZE-1))
90045 +
90046 +typedef struct mem_private
90047 +{
90048 +    struct elan4_dev *pr_dev;
90049 +    MEM_PAGE         *pr_pages[MEM_HASH_SIZE];
90050 +    spinlock_t        pr_lock;
90051 +} MEM_PRIVATE;
90052 +
90053 +/* /dev/elan4/userX */
90054 +typedef struct user_private
90055 +{
90056 +    atomic_t         pr_ref;
90057 +    struct user_ctxt *pr_uctx;
90058 +    struct mm_struct *pr_mm;
90059 +
90060 +#if defined(IOPROC_PATCH_APPLIED)
90061 +    struct ioproc_ops pr_ioproc;
90062 +#endif
90063 +} USER_PRIVATE;
90064 +
90065 +/* No mapping handles on linux */
90066 +typedef void *ELAN4_MAP_HANDLE;
90067 +
90068 +#define ELAN4_TASK_HANDLE()    ((unsigned long) current->mm)
90069 +
90070 +/*
90071 + * Local variables:
90072 + * c-file-style: "stroustrup"
90073 + * End:
90074 + */
90075 +#endif /* __ELAN4_ELANDEV_LINUX_H */
90076 diff -urN clean/include/elan4/dma.h linux-2.6.9/include/elan4/dma.h
90077 --- clean/include/elan4/dma.h   1969-12-31 19:00:00.000000000 -0500
90078 +++ linux-2.6.9/include/elan4/dma.h     2003-09-04 08:39:17.000000000 -0400
90079 @@ -0,0 +1,82 @@
90080 +/*
90081 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90082 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90083 + *
90084 + *    For licensing information please see the supplied COPYING file
90085 + *
90086 + */
90087 +
90088 +#ifndef __ELAN4_DMA_H
90089 +#define __ELAN4_DMA_H
90090 +
90091 +#ident "$Id: dma.h,v 1.16 2003/09/04 12:39:17 david Exp $"
90092 +/*      $Source: /cvs/master/quadrics/elan4hdr/dma.h,v $*/
90093 +
90094 +#include <elan4/types.h>
90095 +
90096 +/* Alignment for a DMA descriptor */
90097 +#define E4_DMA_ALIGN           (64)
90098 +
90099 +/* Maximum size of a single DMA ((1 << 31)-1) */
90100 +#define E4_MAX_DMA_SIZE                (0x7fffffff)
90101 +
90102 +/* 
90103 + * dma_typeSize
90104 + *
90105 + * [63:32]     Size
90106 + * [31]                unused
90107 + * [30]                IsRemote
90108 + * [29]                QueueWrite
90109 + * [28]                ShmemWrite
90110 + * [27:26]     DataType
90111 + * [25]                Broadcast
90112 + * [24]                AlignPackets
90113 + * [23:16]     FailCount
90114 + * [15:14]     unused
90115 + * [13:0]      Context
90116 + */
90117 +
90118 +#define DMA_FailCount(val)     (((val) & 0xff) << 16)
90119 +#define DMA_AlignPackets       (1 << 24)
90120 +#define DMA_Broadcast          (1 << 25)
90121 +#define DMA_ShMemWrite         (1 << 28)
90122 +#define DMA_QueueWrite         (1 << 29)
90123 +#define DMA_IsRemote           (1 << 30)
90124 +#define DMA_Context(val)       ((unsigned) (val) & 0x3ff)
90125 +#define DMA_ContextMask                0x3fffull
90126 +#define Dma_TypeSizeMask       0xfffffffffff00000ull
90127 +
90128 +#define DMA_DataTypeByte       (E4_DATATYPE_BYTE  << 26)
90129 +#define DMA_DataTypeShort      (E4_DATATYPE_SHORT << 26)
90130 +#define DMA_DataTypeWord       (E4_DATATYPE_WORD  << 26)
90131 +#define DMA_DataTypeLong       (E4_DATATYPE_DWORD << 26)
90132 +
90133 +#define E4_DMA_TYPE_SIZE(size, dataType, flags, failCount)     \
90134 +    ((((E4_uint64)(size)) << 32) |  ((dataType) & DMA_DataTypeLong) | \
90135 +     (flags) | DMA_FailCount(failCount))
90136 +
90137 +typedef volatile struct e4_dma
90138 +{
90139 +    E4_uint64          dma_typeSize;
90140 +    E4_uint64          dma_cookie;
90141 +    E4_uint64          dma_vproc;
90142 +    E4_Addr            dma_srcAddr;
90143 +    E4_Addr            dma_dstAddr;
90144 +    E4_Addr            dma_srcEvent;
90145 +    E4_Addr            dma_dstEvent;
90146 +} E4_DMA;
90147 +
90148 +/* Same as above but padded to 64-bytes */
90149 +typedef volatile struct e4_dma64
90150 +{
90151 +    E4_uint64          dma_typeSize;
90152 +    E4_uint64          dma_cookie;
90153 +    E4_uint64          dma_vproc;
90154 +    E4_Addr            dma_srcAddr;
90155 +    E4_Addr            dma_dstAddr;
90156 +    E4_Addr            dma_srcEvent;
90157 +    E4_Addr            dma_dstEvent;
90158 +    E4_Addr            dma_pad;
90159 +} E4_DMA64;
90160 +
90161 +#endif /* __ELAN4_DMA_H */
90162 diff -urN clean/include/elan4/events.h linux-2.6.9/include/elan4/events.h
90163 --- clean/include/elan4/events.h        1969-12-31 19:00:00.000000000 -0500
90164 +++ linux-2.6.9/include/elan4/events.h  2004-06-23 07:07:18.000000000 -0400
90165 @@ -0,0 +1,179 @@
90166 +/*
90167 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90168 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90169 + *
90170 + *    For licensing information please see the supplied COPYING file
90171 + *
90172 + */
90173 +
90174 +#ifndef __ELAN4_EVENTS_H
90175 +#define __ELAN4_EVENTS_H
90176 +
90177 +#ident "$Id: events.h,v 1.22 2004/06/23 11:07:18 addy Exp $"
90178 +/*      $Source: /cvs/master/quadrics/elan4hdr/events.h,v $*/
90179 +
90180 +#define E4_EVENT_ALIGN         32
90181 +#define E4_EVENTBLOCK_SIZE     64
90182 +
90183 +#ifndef _ASM
90184 +/*
90185 + * Event locations must be aligned to a 32 byte boundary. It is very much more efficent to place
90186 + * them in elan local memory but is not essential.
90187 + */
90188 +typedef struct _E4_Event
90189 +{
90190 +    volatile E4_uint64 ev_CountAndType;
90191 +    E4_uint64          ev_Params[2];
90192 +} E4_Event;
90193 +
90194 +/* Same as above but padded to correct Event alignment */
90195 +typedef struct _E4_Event32
90196 +{
90197 +    volatile E4_uint64 ev_CountAndType;
90198 +    E4_uint64          ev_Params[2];
90199 +    E4_uint64          ev_pad;
90200 +} E4_Event32;
90201 +
90202 +/*
90203 + * An E4_EVENTBLOCK_SIZE aligned block of Main or Elan memory
90204 + */
90205 +typedef union _E4_Event_Blk
90206 +{
90207 +    /* Padded to 64-bytes in case a cache-line write is more efficient */
90208 +    volatile E4_uint8  eb_unit8[E4_EVENTBLOCK_SIZE];
90209 +    volatile E4_uint32 eb_uint32[E4_EVENTBLOCK_SIZE/sizeof(E4_uint32)];
90210 +    volatile E4_uint64 eb_uint64[E4_EVENTBLOCK_SIZE/sizeof(E4_uint64)];
90211 +} E4_Event_Blk;
90212 +#define eb_done       eb_uint32[14]
90213 +#define eb_done_dword eb_uint64[7]
90214 +
90215 +#endif /* ! _ASM */
90216 +
90217 +/*
90218 + * ev_CountAndType
90219 + *  [63:31]   Count
90220 + *  [10]      CopyType
90221 + *  [9:8]     DataType
90222 + *  [7:0]     CopySize
90223 + */
90224 +#define E4_EVENT_TYPE_MASK     0x00000000ffffffffull
90225 +#define E4_EVENT_COUNT_MASK    0xffffffff00000000ull
90226 +#define E4_EVENT_COUNT_SHIFT   32
90227 +#define E4_EVENT_COPY_TYPE_MASK        (1 << 10)
90228 +#define E4_EVENT_DATA_TYPE_MASK        (3 << 8)
90229 +#define E4_EVENT_COPY_SIZE_MASK        (0xff)
90230 +
90231 +/* CopyType */
90232 +#define E4_EVENT_COPY          (0 << 10)
90233 +#define E4_EVENT_WRITE         (1 << 10)
90234 +
90235 +/* DataType */
90236 +#define E4_EVENT_DTYPE_BYTE    (0 << 8)
90237 +#define E4_EVENT_DTYPE_SHORT   (1 << 8)
90238 +#define E4_EVENT_DTYPE_WORD    (2 << 8)
90239 +#define E4_EVENT_DTYPE_LONG    (3 << 8)
90240 +
90241 +#define EVENT_COUNT(EventPtr)  ((E4_int32)(elan4_load64 (&(EventPtr)->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT))
90242 +#define EVENT_TYPE(EventPtr)   ((E4_uint32)(elan4_load64 (&(EventPtr)->ev_CountAndType) & E4_EVENT_TYPE_MASK))
90243 +
90244 +#define E4_WAITEVENT_COUNT_TYPE_VALUE(Count, EventType, DataType, CopySize) \
90245 +       (((E4_uint64)(Count) << E4_EVENT_COUNT_SHIFT) | (EventType) | (DataType) | (CopySize))
90246 +
90247 +#define E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize)     \
90248 +       ((EventType) | (DataType) | (CopySize))
90249 +
90250 +#define E4_EVENT_INIT_VALUE(InitialCount, EventType, DataType, CopySize)       \
90251 +       (((E4_uint64)(InitialCount) << E4_EVENT_COUNT_SHIFT) | E4_EVENT_TYPE_VALUE(EventType, DataType, CopySize))
90252 +
90253 +#define ev_CopySource  ev_Params[0]
90254 +#define ev_CopyDest    ev_Params[1]
90255 +#define ev_WritePtr    ev_Params[0]
90256 +#define ev_WriteValue  ev_Params[1]
90257 +
90258 +#define EVENT_BLK_READY(BLK) ((BLK)->eb_done != 0)
90259 +#define EVENT_READY(EVENT)   ((E4_uint32)((((volatile E4_Event *) (EVENT))->ev_CountAndType) >> E4_EVENT_COUNT_SHIFT) >= 0)
90260 +
90261 +#define ELAN_WAIT_EVENT (0)
90262 +#define ELAN_POLL_EVENT (-1)
90263 +
90264 +#define E4_BLK_PATTERN ((E4_uint32)0xfeedface)
90265 +
90266 +#define E4_INIT_COPY_EVENT(EVENT, BLK_ELAN, BLK, SIZE)                                                         \
90267 +       do {                                                                                            \
90268 +          elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, SIZE), &(EVENT)->ev_CountAndType); \
90269 +           elan4_store64 ((BLK_ELAN), &(EVENT)->ev_CopySource); \
90270 +          elan4_store64 ((BLK), &(EVENT)->ev_CopyDest); \
90271 +       } while (0)
90272 +
90273 +#define E4_INIT_WRITE_EVENT(EVENT, DWORD)                                                              \
90274 +       do {                                                                                            \
90275 +           elan4_store64 (E4_EVENT_INIT_VALUE(0, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);  \
90276 +           elan4_store64 ((DWORD), &(EVENT)->ev_WritePtr); \
90277 +           elan4_store64 ((E4_Addr) (E4_BLK_PATTERN), &(EVENT)->ev_WriteValue); \
90278 +       } while (0)
90279 +
90280 +#define E4_RESET_BLK_EVENT(BLK)                                        \
90281 +       do {                                                            \
90282 +               (BLK)->eb_done = (0);                                   \
90283 +       } while (0)
90284 +
90285 +#define E4_PRIME_BLK_EVENT(EVENT, COUNT)                               \
90286 +       do {                                                            \
90287 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, 8), &(EVENT)->ev_CountAndType);\
90288 +       } while (0)
90289 +
90290 +#define E4_PRIME_COPY_EVENT(EVENT, SIZE, COUNT)                                \
90291 +       do {                                                            \
90292 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_COPY, E4_EVENT_DTYPE_LONG, (SIZE >> 3)), &(EVENT)->ev_CountAndType);\
90293 +       } while (0)
90294 +
90295 +#define E4_PRIME_WRITE_EVENT(EVENT, COUNT)                                     \
90296 +       do {                                                                    \
90297 +          elan4_store64 (E4_EVENT_INIT_VALUE(COUNT, E4_EVENT_WRITE, E4_EVENT_DTYPE_LONG, 0), &(EVENT)->ev_CountAndType);\
90298 +       } while (0)
90299 +
90300 +#ifndef _ASM
90301 +
90302 +#define E4_INPUTQ_ALIGN                        32      /* Descriptor must be 32-byte aligned */
90303 +
90304 +typedef struct _E4_InputQueue
90305 +{
90306 +   volatile E4_Addr    q_bptr;         /* 64 bit aligned ptr to current back item */
90307 +   E4_Addr             q_fptr;         /* 64 bit aligned ptr to current front item */
90308 +   E4_uint64           q_control;      /* this defines the last item, item size, and offset back to the first item. */
90309 +   E4_Addr             q_event;        /* queue event */
90310 +} E4_InputQueue;
90311 +
90312 +#define E4_INPUTQ_LASTITEM_MASK        0x00000000ffffffffULL
90313 +#define E4_INPUTQ_ITEMSIZE_MASK                0x000000ff00000000ULL
90314 +#define E4_INPUTQ_LASTITEM_OFFSET_MASK 0xffffff0000000000ULL
90315 +#define E4_INPUTQ_LASTITEM_SHIFT       0
90316 +#define E4_INPUTQ_ITEMSIZE_SHIFT       32
90317 +#define E4_INPUTQ_LASTITEM_OFFSET_SHIFT        40
90318 +
90319 +/*
90320 + * Macro to initialise the InputQueue control word given the FirstItem, LastItem & ItemSize
90321 + * FirstItem and LastItem are 64 bit double word aligned elan addresses.
90322 + */
90323 +#define E4_InputQueueControl(FirstItem, LastItem, ItemSizeInBytes)\
90324 +   (((((E4_uint64)(LastItem)))                                                      & E4_INPUTQ_LASTITEM_MASK) |\
90325 +    ((((E4_uint64)(ItemSizeInBytes))        << (E4_INPUTQ_ITEMSIZE_SHIFT-3))        & E4_INPUTQ_ITEMSIZE_MASK)  |\
90326 +    ((((E4_uint64)((FirstItem)-(LastItem))) << (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3)) & E4_INPUTQ_LASTITEM_OFFSET_MASK))    
90327 +
90328 +/* 
90329 + * LastItemOffset is a sign extended -ve quantity with LastItemOffset[26:3] == q_control[63:40]
90330 + * we sign extend this by setting LastItemOffset[63:27] to be #one.
90331 + */
90332 +#define E4_InputQueueLastItemOffset(control)  ((((E4_int64) -1) << (64 - (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))) | \
90333 +                                              ((E4_int64) (((control) & E4_INPUTQ_LASTITEM_OFFSET_MASK) >> (E4_INPUTQ_LASTITEM_OFFSET_SHIFT-3))))
90334 +#define E4_InputQueueItemSize(control)       (((control) & E4_INPUTQ_ITEMSIZE_MASK) >> (E4_INPUTQ_ITEMSIZE_SHIFT-3))
90335 +
90336 +/*
90337 + * Macro to increment the InputQ front pointer taking into account wrap 
90338 + */
90339 +#define E4_InputQueueFptrIncrement(Q, FirstItem, LastItem, ItemSizeInBytes) \
90340 +       ((Q)->q_fptr = ( ((Q)->q_fptr == (LastItem)) ? (FirstItem) : ((Q)->q_fptr + (ItemSizeInBytes))) )
90341 +
90342 +#endif /* _ASM */
90343 +
90344 +#endif /* __ELAN4_EVENTS_H */
90345 diff -urN clean/include/elan4/i2c.h linux-2.6.9/include/elan4/i2c.h
90346 --- clean/include/elan4/i2c.h   1969-12-31 19:00:00.000000000 -0500
90347 +++ linux-2.6.9/include/elan4/i2c.h     2003-12-02 11:11:22.000000000 -0500
90348 @@ -0,0 +1,47 @@
90349 +/*
90350 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90351 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90352 + *
90353 + *    For licensing information please see the supplied COPYING file
90354 + *
90355 + */
90356 +
90357 +#ifndef _ELAN4_I2C_H
90358 +#define _ELAN4_I2C_H
90359 +
90360 +#ident "@(#)$Id: i2c.h,v 1.10 2003/12/02 16:11:22 lee Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
90361 +/*      $Source: /cvs/master/quadrics/elan4hdr/i2c.h,v $*/
90362 +
90363 +/* I2C address space - bits[7:1] */
90364 +#define I2C_LED_I2C_ADDR                       0x20
90365 +#define I2C_TEMP_ADDR                          0x48
90366 +#define I2C_EEPROM_ADDR                                0x50
90367 +
90368 +#define I2C_WRITE_ADDR(addr)                   ((addr) << 1 | 0)
90369 +#define I2C_READ_ADDR(addr)                    ((addr) << 1 | 1)
90370 +
90371 +/* I2C EEPROM appears as 8 I2C 256 byte devices */
90372 +#define I2C_24LC16B_BLOCKSIZE                  (256)
90373 +#define I2C_24LC16B_BLOCKADDR(addr)            ((addr) >> 8)
90374 +#define I2C_24LC16B_BLOCKOFFSET(addr)          ((addr) & 0xff)
90375 +
90376 +#define I2C_ELAN_EEPROM_PCI_BASEADDR           0       /* PCI config starts at addr 0 in the EEPROM */
90377 +#define I2C_ELAN_EEPROM_VPD_BASEADDR           256     /* VPD data start                            */
90378 +#define I2C_ELAN_EEPROM_PCI_SIZE               256     /* PCI data max size                         */
90379 +#define I2C_ELAN_EEPROM_VPD_SIZE               256     /* VPD data max size                         */
90380 +
90381 +#define I2C_ELAN_EEPROM_SIZE                   2048
90382 +
90383 +#define I2C_ELAN_EEPROM_DEVICE_ID              0xA0
90384 +#define I2C_ELAN_EEPROM_FAIL_LIMIT              8
90385 +
90386 +#define I2C_ELAN_EEPROM_ADDR_BLOCKSIZE_SHIFT   0x8
90387 +#define I2C_ELAN_EEPROM_ADDR_BLOCK_MASK                0x7
90388 +#define I2C_ELAN_EEPROM_ADDR_BLOCK_SHIFT       0x1
90389 +
90390 +/*
90391 + * Local variables:
90392 + * c-file-style: "stroustrup"
90393 + * End:
90394 + */
90395 +#endif /* _ELAN4_I2C_H */
90396 diff -urN clean/include/elan4/intcookie.h linux-2.6.9/include/elan4/intcookie.h
90397 --- clean/include/elan4/intcookie.h     1969-12-31 19:00:00.000000000 -0500
90398 +++ linux-2.6.9/include/elan4/intcookie.h       2004-08-09 10:02:37.000000000 -0400
90399 @@ -0,0 +1,62 @@
90400 +/*
90401 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
90402 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90403 + * 
90404 + *    For licensing information please see the supplied COPYING file
90405 + *
90406 + */
90407 +
90408 +#ident "@(#)$Id: intcookie.h,v 1.10 2004/08/09 14:02:37 daniel Exp $"
90409 +/*      $Source: /cvs/master/quadrics/elan4mod/intcookie.h,v $*/
90410 +
90411 +#ifndef __ELAN4_INTCOOKIE_H
90412 +#define __ELAN4_INTCOOKIE_H
90413 +
90414 +typedef E4_uint64 ELAN4_INTCOOKIE;
90415 +
90416 +#ifdef __KERNEL__
90417 +
90418 +typedef struct intcookie_entry
90419 +{
90420 +    struct intcookie_entry    *ent_next;
90421 +    struct intcookie_entry    *ent_prev;
90422 +
90423 +    spinlock_t                ent_lock;
90424 +    unsigned                  ent_ref;
90425 +
90426 +    ELAN4_INTCOOKIE           ent_cookie;
90427 +    ELAN4_INTCOOKIE           ent_fired;
90428 +    kcondvar_t                ent_wait;
90429 +} INTCOOKIE_ENTRY;
90430 +
90431 +typedef struct intcookie_table
90432 +{
90433 +    struct intcookie_table    *tbl_next;
90434 +    struct intcookie_table    *tbl_prev;
90435 +
90436 +    ELAN_CAPABILITY           *tbl_cap;
90437 +
90438 +    spinlock_t                tbl_lock;
90439 +    unsigned                  tbl_ref;
90440 +    INTCOOKIE_ENTRY           *tbl_entries;
90441 +} INTCOOKIE_TABLE;
90442 +
90443 +extern void                intcookie_init(void);
90444 +extern void                intcookie_fini(void);
90445 +extern INTCOOKIE_TABLE    *intcookie_alloc_table (ELAN_CAPABILITY *cap);
90446 +extern void                intcookie_free_table (INTCOOKIE_TABLE *tbl);
90447 +extern int                 intcookie_alloc (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
90448 +extern int                 intcookie_free (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
90449 +extern int                 intcookie_fire (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
90450 +extern int                 intcookie_fire_cap (ELAN_CAPABILITY *cap, ELAN4_INTCOOKIE cookie);
90451 +extern int                 intcookie_wait (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
90452 +extern int                 intcookie_arm (INTCOOKIE_TABLE *tbl, ELAN4_INTCOOKIE cookie);
90453 +
90454 +#endif /* __KERNEL */
90455 +
90456 +/*
90457 + * Local variables:
90458 + * c-file-style: "stroustrup"
90459 + * End:
90460 + */
90461 +#endif /* __ELAN4_INTCOOKIE_H */
90462 diff -urN clean/include/elan4/ioctl.h linux-2.6.9/include/elan4/ioctl.h
90463 --- clean/include/elan4/ioctl.h 1969-12-31 19:00:00.000000000 -0500
90464 +++ linux-2.6.9/include/elan4/ioctl.h   2005-01-10 12:45:50.000000000 -0500
90465 @@ -0,0 +1,320 @@
90466 +/*
90467 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
90468 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90469 + * 
90470 + *    For licensing information please see the supplied COPYING file
90471 + *
90472 + */
90473 +
90474 +#ifndef __ELAN4_IOCTL_H
90475 +#define __ELAN4_IOCTL_H
90476 +
90477 +#ident "@(#)$Id: ioctl.h,v 1.29 2005/01/10 17:45:50 duncant Exp $"
90478 +/*      $Source: /cvs/master/quadrics/elan4mod/ioctl.h,v $*/
90479 +
90480 +#include <elan/devinfo.h>
90481 +#include <elan/capability.h>
90482 +
90483 +#include <elan4/dma.h>
90484 +#include <elan4/neterr.h>
90485 +#include <elan4/registers.h>
90486 +#include <elan4/intcookie.h>
90487 +
90488 +#define ELAN4IO_CONTROL_PATHNAME       "/dev/elan4/control%d"
90489 +#define ELAN4IO_USER_PATHNAME          "/dev/elan4/user%d"
90490 +#define ELAN4IO_SDRAM_PATHNAME         "/dev/elan4/sdram%d"
90491 +#define ELAN4IO_MAX_PATHNAMELEN        32
90492 +
90493 +/*
90494 + * NOTE - ioctl values 0->0x1f are defined for 
90495 + *        generic/control usage.
90496 + */
90497 +
90498 +/* Macro to generate 'offset' to mmap "control" device */
90499 +#define OFF_TO_BAR(off)                (((off) >> 28) & 0xF)
90500 +#define OFF_TO_OFFSET(off)     ((off) & 0x0FFFFFFF)
90501 +#define GEN_OFF(bar,off)       (((bar) << 28) | ((off) & 0x0FFFFFFF))
90502 +
90503 +/* Definiations for generic ioctls */
90504 +#define ELAN4IO_GENERIC_BASE           0x00
90505 +
90506 +typedef struct elan4io_stats_struct
90507 +{
90508 +    int                       which;
90509 +    unsigned long long ptr;                                    /* always pass pointer as 64 bit */
90510 +} ELAN4IO_STATS_STRUCT;
90511 +
90512 +#define ELAN4IO_STATS                  _IOR ('e', ELAN4IO_GENERIC_BASE + 0, ELAN4IO_STATS_STRUCT)
90513 +#define ELAN4IO_DEVINFO                        _IOR ('e', ELAN4IO_GENERIC_BASE + 1, ELAN_DEVINFO)
90514 +#define ELAN4IO_POSITION               _IOR ('e', ELAN4IO_GENERIC_BASE + 2, ELAN_POSITION)
90515 +
90516 +
90517 +/* 
90518 + * Definitions for /dev/elan4/controlX
90519 + */
90520 +#define ELAN4IO_CONTROL_BASE           0x20
90521 +
90522 +#define ELAN4IO_GET_POSITION           _IOR ('e', ELAN4IO_CONTROL_BASE + 0, ELAN_POSITION)
90523 +#define ELAN4IO_SET_POSITION           _IOW ('e', ELAN4IO_CONTROL_BASE + 1, ELAN_POSITION)
90524 +#define ELAN4IO_DEBUG_SNAPSHOT         _IOW ('e', ELAN4IO_CONTROL_BASE + 2, )
90525 +
90526 +typedef struct elan4io_params_mask_struct
90527 +{
90528 +    unsigned short     p_mask;
90529 +    ELAN_PARAMS                p_params;
90530 +} ELAN4IO_PARAMS_STRUCT;
90531 +#define ELAN4IO_GET_PARAMS             _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN4IO_PARAMS_STRUCT)
90532 +#define ELAN4IO_SET_PARAMS             _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN4IO_PARAMS_STRUCT)
90533 +
90534 +/* old versions - implicit p_mask == 3 */
90535 +#define ELAN4IO_OLD_GET_PARAMS         _IOR ('e', ELAN4IO_CONTROL_BASE + 3, ELAN_PARAMS)
90536 +#define ELAN4IO_OLD_SET_PARAMS         _IOW ('e', ELAN4IO_CONTROL_BASE + 4, ELAN_PARAMS)
90537 +
90538 +/*
90539 + * Definitions for /dev/elan4/userX
90540 + */
90541 +#define ELAN4IO_USER_BASE              0x40
90542 +
90543 +#define ELAN4IO_FREE                   _IO   ('e', ELAN4IO_USER_BASE + 0)
90544 +#define ELAN4IO_ATTACH                 _IOWR ('e', ELAN4IO_USER_BASE + 1, ELAN_CAPABILITY)
90545 +#define ELAN4IO_DETACH                 _IOWR ('e', ELAN4IO_USER_BASE + 2, ELAN_CAPABILITY)
90546 +#define ELAN4IO_BLOCK_INPUTTER         _IO   ('e', ELAN4IO_USER_BASE + 3)
90547 +
90548 +typedef struct elan4io_add_p2pvp_struct 
90549 +{
90550 +    unsigned        vp_process;
90551 +    ELAN_CAPABILITY vp_capability;
90552 +} ELAN4IO_ADD_P2PVP_STRUCT;
90553 +
90554 +#define ELAN4IO_ADD_P2PVP              _IOW  ('e', ELAN4IO_USER_BASE + 4, ELAN4IO_ADD_P2PVP_STRUCT)
90555 +
90556 +typedef struct elan4io_add_bcastvp_struct
90557 +{
90558 +    unsigned int       vp_process;
90559 +    unsigned int       vp_lowvp;
90560 +    unsigned int       vp_highvp;
90561 +} ELAN4IO_ADD_BCASTVP_STRUCT;
90562 +
90563 +#define ELAN4IO_ADD_BCASTVP            _IOW  ('e', ELAN4IO_USER_BASE + 5, ELAN4IO_ADD_BCASTVP_STRUCT)
90564 +
90565 +#define ELAN4IO_REMOVEVP               _IO   ('e', ELAN4IO_USER_BASE + 6)
90566 +
90567 +typedef struct elan4io_route_struct
90568 +{
90569 +    unsigned int          rt_process;
90570 +    unsigned int          rt_error;
90571 +    E4_VirtualProcessEntry rt_route;
90572 +} ELAN4IO_ROUTE_STRUCT;
90573 +
90574 +#define ELAN4IO_SET_ROUTE              _IOW  ('e', ELAN4IO_USER_BASE + 7, ELAN4IO_ROUTE_STRUCT)
90575 +#define ELAN4IO_RESET_ROUTE            _IOW  ('e', ELAN4IO_USER_BASE + 9, ELAN4IO_ROUTE_STRUCT)
90576 +#define ELAN4IO_GET_ROUTE              _IOWR ('e', ELAN4IO_USER_BASE + 8, ELAN4IO_ROUTE_STRUCT)
90577 +#define ELAN4IO_CHECK_ROUTE            _IOWR ('e', ELAN4IO_USER_BASE + 10, ELAN4IO_ROUTE_STRUCT)
90578 +
90579 +typedef struct elan4io_alloc_cq_struct
90580 +{
90581 +    unsigned int cq_size;                                      /* input: size of queue */
90582 +    unsigned int cq_perm;                                      /* input: requested permissions */
90583 +    unsigned int cq_type;                                      /* input: queue type */
90584 +    unsigned int cq_indx;                                      /* output: queue number */
90585 +} ELAN4IO_ALLOCCQ_STRUCT;
90586 +
90587 +#define ELAN4IO_ALLOCCQ                        _IOWR ('e', ELAN4IO_USER_BASE + 11, ELAN4IO_ALLOCCQ_STRUCT)
90588 +#define ELAN4IO_FREECQ                 _IOWR ('e', ELAN4IO_USER_BASE + 12, unsigned)
90589 +
90590 +#define ELAN4IO_CQ_TYPE_REORDER                1                       /* revb reordering command queue */
90591 +
90592 +typedef struct elan4io_perm_struct
90593 +{
90594 +    E4_Addr            ps_eaddr;
90595 +    E4_uint64          ps_len;
90596 +    unsigned long      ps_maddr;
90597 +    unsigned int       ps_perm;
90598 +} ELAN4IO_PERM_STRUCT;
90599 +
90600 +typedef struct elan4io_perm_struct32
90601 +{
90602 +    E4_Addr            ps_eaddr;
90603 +    E4_uint64          ps_len;
90604 +    unsigned int       ps_maddr;
90605 +    unsigned int       ps_perm;
90606 +} ELAN4IO_PERM_STRUCT32;
90607 +
90608 +#define ELAN4IO_SETPERM                        _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT)
90609 +#define ELAN4IO_SETPERM32              _IOWR ('e', ELAN4IO_USER_BASE + 13, ELAN4IO_PERM_STRUCT32)
90610 +#define ELAN4IO_CLRPERM                        _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT)
90611 +#define ELAN4IO_CLRPERM32              _IOWR ('e', ELAN4IO_USER_BASE + 14, ELAN4IO_PERM_STRUCT32)
90612 +
90613 +typedef struct elan4io_trapsig_struct
90614 +{
90615 +    int                ts_signo;
90616 +} ELAN4IO_TRAPSIG_STRUCT;
90617 +#define ELAN4IO_TRAPSIG                        _IOW  ('e', ELAN4IO_USER_BASE + 15, ELAN4IO_TRAPSIG_STRUCT)
90618 +
90619 +typedef struct elan4io_traphandler_struct
90620 +{
90621 +    unsigned int       th_nticks;                              /* number of ticks to sleep for next trap */
90622 +    unsigned int       th_proc;                                        /* elan processor involved */
90623 +    unsigned long      th_trapp;                               /* space to store trap */
90624 +} ELAN4IO_TRAPHANDLER_STRUCT;
90625 +
90626 +typedef struct elan4io_traphandler_struct32
90627 +{
90628 +    unsigned int       th_nticks;                              /* number of ticks to sleep for next trap */
90629 +    unsigned int       th_proc;                                        /* elan processor involved */
90630 +    unsigned int       th_trapp;                               /* space to store trap */
90631 +} ELAN4IO_TRAPHANDLER_STRUCT32;
90632 +
90633 +#define ELAN4IO_TRAPHANDLER            _IOW  ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT)
90634 +#define ELAN4IO_TRAPHANDLER32          _IOW  ('e', ELAN4IO_USER_BASE + 16, ELAN4IO_TRAPHANDLER_STRUCT32)
90635 +
90636 +typedef struct elan4io_required_mappings_struct
90637 +{
90638 +    E4_Addr    rm_upage_addr;                                  /* elan address of user page */
90639 +    E4_Addr    rm_trestart_addr;                               /* elan address of tproc restart trampoline */
90640 +} ELAN4IO_REQUIRED_MAPPINGS_STRUCT;
90641 +#define ELAN4IO_REQUIRED_MAPPINGS      _IOW  ('e', ELAN4IO_USER_BASE + 17, ELAN4IO_REQUIRED_MAPPINGS_STRUCT)
90642 +
90643 +typedef struct elan4io_resume_eproc_trap_struct
90644 +{
90645 +    E4_Addr             rs_addr;
90646 +} ELAN4IO_RESUME_EPROC_TRAP_STRUCT;
90647 +#define ELAN4IO_RESUME_EPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 18, ELAN4IO_RESUME_EPROC_TRAP_STRUCT)
90648 +
90649 +typedef struct elan4io_resume_cproc_trap_struct
90650 +{
90651 +    unsigned int       rs_indx;
90652 +} ELAN4IO_RESUME_CPROC_TRAP_STRUCT;
90653 +#define ELAN4IO_RESUME_CPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 19, ELAN4IO_RESUME_CPROC_TRAP_STRUCT)
90654 +
90655 +typedef struct elan4io_resume_dproc_trap_struct
90656 +{
90657 +    E4_DMA             rs_desc;
90658 +} ELAN4IO_RESUME_DPROC_TRAP_STRUCT;
90659 +#define ELAN4IO_RESUME_DPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 20, ELAN4IO_RESUME_DPROC_TRAP_STRUCT)
90660 +
90661 +typedef struct elan4io_resume_tproc_trap_struct
90662 +{
90663 +    E4_ThreadRegs      rs_regs;
90664 +} ELAN4IO_RESUME_TPROC_TRAP_STRUCT;
90665 +#define ELAN4IO_RESUME_TPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 21, ELAN4IO_RESUME_TPROC_TRAP_STRUCT)
90666 +
90667 +typedef struct elan4io_resume_iproc_trap_struct
90668 +{
90669 +    unsigned int       rs_channel;
90670 +    unsigned int       rs_trans;
90671 +    E4_IprocTrapHeader  rs_header;
90672 +    E4_IprocTrapData    rs_data;
90673 +} ELAN4IO_RESUME_IPROC_TRAP_STRUCT;
90674 +#define ELAN4IO_RESUME_IPROC_TRAP      _IOW  ('e', ELAN4IO_USER_BASE + 22, ELAN4IO_RESUME_IPROC_TRAP_STRUCT)
90675 +
90676 +#define ELAN4IO_FLUSH_ICACHE           _IO   ('e', ELAN4IO_USER_BASE + 23)
90677 +#define ELAN4IO_STOP_CTXT              _IO   ('e', ELAN4IO_USER_BASE + 24)
90678 +
90679 +#define ELAN4IO_ALLOC_INTCOOKIE                _IOW  ('e', ELAN4IO_USER_BASE + 25, ELAN4_INTCOOKIE)
90680 +#define ELAN4IO_FREE_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 26, ELAN4_INTCOOKIE)
90681 +#define ELAN4IO_ARM_INTCOOKIE          _IOW  ('e', ELAN4IO_USER_BASE + 27, ELAN4_INTCOOKIE)
90682 +#define ELAN4IO_WAIT_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 28, ELAN4_INTCOOKIE)
90683 +
90684 +typedef struct elan4io_alloc_trap_queues_struct
90685 +{
90686 +    unsigned int       tq_ndproc_traps;
90687 +    unsigned int       tq_neproc_traps;
90688 +    unsigned int       tq_ntproc_traps;
90689 +    unsigned int       tq_nthreads;
90690 +    unsigned int       tq_ndmas;
90691 +} ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT;
90692 +#define ELAN4IO_ALLOC_TRAP_QUEUES      _IOW  ('e', ELAN4IO_USER_BASE + 29, ELAN4IO_ALLOC_TRAP_QUEUES_STRUCT)
90693 +
90694 +typedef struct elan4io_neterr_msg_struct
90695 +{
90696 +    unsigned int       nm_vp;
90697 +    unsigned int       nm_nctx;
90698 +    unsigned int       nm_retries;
90699 +    unsigned int        nm_pad;
90700 +    ELAN4_NETERR_MSG    nm_msg;
90701 +} ELAN4IO_NETERR_MSG_STRUCT;
90702 +#define ELAN4IO_NETERR_MSG             _IOW ('e', ELAN4IO_USER_BASE + 30, ELAN4IO_NETERR_MSG_STRUCT)
90703 +
90704 +typedef struct elan4io_neterr_timer_struct 
90705 +{
90706 +    unsigned int       nt_usecs;
90707 +} ELAN4IO_NETERR_TIMER_STUCT;
90708 +
90709 +#define ELAN4IO_NETERR_TIMER           _IO  ('e', ELAN4IO_USER_BASE + 31)
90710 +
90711 +typedef struct elan4io_neterr_fixup_struct
90712 +{
90713 +    E4_uint64          nf_cookie;
90714 +    unsigned int       nf_waitforeop;
90715 +    unsigned int       nf_sten;
90716 +    unsigned int       nf_vp;
90717 +    unsigned int       nf_pad;
90718 +} ELAN4IO_NETERR_FIXUP_STRUCT;
90719 +
90720 +#define ELAN4IO_NETERR_FIXUP           _IOW ('e', ELAN4IO_USER_BASE + 32, ELAN4IO_NETERR_FIXUP_STRUCT)
90721 +
90722 +typedef struct elan4io_firecap_struct 
90723 +{
90724 +    ELAN_CAPABILITY     fc_capability;
90725 +    ELAN4_INTCOOKIE     fc_cookie;
90726 +} ELAN4IO_FIRECAP_STRUCT;
90727 +
90728 +#define ELAN4IO_FIRE_INTCOOKIE         _IOW  ('e', ELAN4IO_USER_BASE + 33, ELAN4IO_FIRECAP_STRUCT)
90729 +
90730 +#define ELAN4IO_ALLOC_INTCOOKIE_TABLE  _IOW  ('e', ELAN4IO_USER_BASE + 34, ELAN_CAPABILITY)
90731 +#define ELAN4IO_FREE_INTCOOKIE_TABLE   _IO   ('e', ELAN4IO_USER_BASE + 35)
90732 +
90733 +typedef struct elan4io_translation
90734 +{
90735 +    E4_Addr            tr_addr;
90736 +    unsigned long      tr_len;
90737 +    unsigned int       tr_access;
90738 +} ELAN4IO_TRANSLATION_STRUCT;
90739 +
90740 +#define ELAN4IO_LOAD_TRANSLATION       _IOW  ('e', ELAN4IO_USER_BASE + 36, ELAN4IO_TRANSLATION_STRUCT)
90741 +#define ELAN4IO_UNLOAD_TRANSLATION     _IOW  ('e', ELAN4IO_USER_BASE + 37, ELAN4IO_TRANSLATION_STRUCT)
90742 +
90743 +typedef struct elan4io_dumpcq_struct32
90744 +{
90745 +    E4_uint64 cq_space;      /* output: sdram addr of q, used to decode ptrs */
90746 +    E4_uint32 cq_size;       /* output: The real size of the command queue */
90747 +    E4_uint32 bufsize;       /* input: The size of the buffer to dump to */
90748 +    E4_uint32 cq_indx;       /* input: index of cq to dump */
90749 +    unsigned int buffer;     /* input: user address of rgs->buffer to dump to */
90750 +} ELAN4IO_DUMPCQ_STRUCT32;
90751 +
90752 +typedef struct elan4io_dumpcq_struct
90753 +{
90754 +    E4_uint64 cq_space;      /* output: sdram addr of q, used to decode ptrs */
90755 +    E4_uint32 cq_size;       /* output: The real size of the command queue */
90756 +    E4_uint32 bufsize;       /* input: The size of the buffer to dump to */
90757 +    E4_uint32 cq_indx;       /* input: index of cq to dump */
90758 +    unsigned long buffer;    /* input: user address of rgs->buffer to dump to */
90759 +} ELAN4IO_DUMPCQ_STRUCT;
90760 +
90761 +#define ELAN4IO_DUMPCQ                 _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT)
90762 +#define ELAN4IO_DUMPCQ32                       _IOWR ('e', ELAN4IO_USER_BASE + 38, ELAN4IO_DUMPCQ_STRUCT32)
90763 +
90764 +/* mmap offsets - - we define the file offset space as follows:
90765 + *
90766 + * page 0 - 4095 - command queues
90767 + * page 4096    - device user registers
90768 + * page 4097    - flag page/user stats
90769 + * page 4098    - device stats
90770 + * page 4099     - tproc trampoline
90771 + */
90772 +
90773 +#define ELAN4_OFF_COMMAND_QUEUES       0
90774 +#define ELAN4_OFF_USER_REGS            4096
90775 +#define ELAN4_OFF_USER_PAGE            4097
90776 +#define ELAN4_OFF_DEVICE_STATS         4098
90777 +#define ELAN4_OFF_TPROC_TRAMPOLINE     4099
90778 +
90779 +
90780 +/*
90781 + * Local variables:
90782 + * c-file-style: "stroustrup"
90783 + * End:
90784 + */
90785 +#endif /* __ELAN4_IOCTL_H */
90786 diff -urN clean/include/elan4/mmu.h linux-2.6.9/include/elan4/mmu.h
90787 --- clean/include/elan4/mmu.h   1969-12-31 19:00:00.000000000 -0500
90788 +++ linux-2.6.9/include/elan4/mmu.h     2005-04-21 07:12:06.000000000 -0400
90789 @@ -0,0 +1,117 @@
90790 +/*
90791 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
90792 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90793 + * 
90794 + *    For licensing information please see the supplied COPYING file
90795 + *
90796 + */
90797 +
90798 +#ident "@(#)$Id: mmu.h,v 1.14 2005/04/21 11:12:06 mike Exp $"
90799 +/*      $Source: /cvs/master/quadrics/elan4mod/mmu.h,v $*/
90800 +
90801 +
90802 +#ifndef __ELAN4_MMU_H
90803 +#define __ELAN4_MMU_H
90804 +
90805 +#include <linux/pci.h>
90806 +
90807 +typedef union elan4_pte_page
90808 +{
90809 +    struct {
90810 +       struct page     *page;
90811 +       physaddr_t       dma_addr;
90812 +    } _page;
90813 +#define pg_page                _page.page
90814 +#define pg_dma_addr    _page.dma_addr
90815 +
90816 +} ELAN4_PTE_PAGE;
90817 +
90818 +typedef struct elan4_hash_entry
90819 +{
90820 +    struct elan4_hash_entry    *he_next;
90821 +    struct elan4_hash_entry    *he_prev;
90822 +
90823 +    sdramaddr_t                         he_entry;
90824 +    
90825 +    struct elan4_hash_entry    *he_chain[2];
90826 +    E4_uint64                   he_tag[2];
90827 +    E4_uint32                   he_pte[2];
90828 +
90829 +    ELAN4_PTE_PAGE              he_pg[2][4];
90830 +} ELAN4_HASH_ENTRY;
90831 +
90832 +#define ELAN4_HENT_CHUNKS      16              /* SDRAM_MIN_BLOCK_SIZE/sizeof (E4_HashTableEntry) */
90833 +
90834 +typedef struct elan4_hash_chunk
90835 +{
90836 +    struct list_head            hc_link;
90837 +    ELAN4_HASH_ENTRY           hc_hents[ELAN4_HENT_CHUNKS];
90838 +} ELAN4_HASH_CHUNK;
90839 +
90840 +typedef struct elan4_hash_cache
90841 +{
90842 +    E4_Addr           hc_start;
90843 +    E4_Addr           hc_end;
90844 +    int                      hc_tbl;
90845 +
90846 +    ELAN4_HASH_ENTRY *hc_hes[1];
90847 +} ELAN4_HASH_CACHE;
90848 +
90849 +/* 
90850 + * he_pte is really 4 bytes of pte "type" one for each pte
90851 + * entry - however we declare it as an "int" so we can
90852 + * easily determine that all 4 entries are invalid 
90853 + */
90854 +#define HE_SET_PTE(he,tagidx,pteidx,val)       (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx] = (val))
90855 +#define HE_GET_PTE(he,tagidx,pteidx)           (((E4_uint8 *) &(he->he_pte[tagidx]))[pteidx])
90856 +
90857 +#define HE_TYPE_INVALID                                0
90858 +#define HE_TYPE_SDRAM                          1
90859 +#define HE_TYPE_COMMAND                                2
90860 +#define HE_TYPE_REGS                           3
90861 +#define HE_TYPE_PAGE                           4
90862 +#define HE_TYPE_OTHER                          5
90863 +#define HE_TYPE_RESERVED                       6
90864 +
90865 +/*
90866 + * he_tag has the following form :
90867 + *     [63:27] tag
90868 + *     [20:17]  pte valid
90869 + *     [16]     locked
90870 + *     [15]     copy
90871 + *     [14]     valid
90872 + *     [13:0]  context
90873 + */
90874 +
90875 +#define HE_TAG_VALID           (1 << 14)
90876 +#define HE_TAG_COPY            (1 << 15)
90877 +#define HE_TAG_LOCKED          (1 << 16)
90878 +
90879 +#define INVALID_CONTEXT                0
90880 +
90881 +extern u_char elan4_permtable[];
90882 +#define ELAN4_INCOMPAT_ACCESS(perm,access) ((elan4_permtable[(perm)] & (1 << (access))) == 0)
90883 +extern u_char elan4_permreadonly[];
90884 +#define ELAN4_PERM_READONLY(perm)        (elan4_permreadonly[(perm)])
90885 +
90886 +extern int elan4_debug_mmu;
90887 +
90888 +extern int elan4_mmuhash_chain_reduction;
90889 +extern int elan4_mmuhash_chain_end_reduce;
90890 +extern int elan4_mmuhash_chain_middle_reduce;
90891 +extern int elan4_mmuhash_chain_middle_fail;
90892 +extern int elan4_mmuhash_shuffle_attempts;
90893 +extern int elan4_mmuhash_shuffle_done;
90894 +
90895 +#ifdef DEBUG_PRINTF
90896 +#  define MPRINTF(ctxt,lvl,args...)    (elan4_debug_mmu > (lvl) ? elan4_debugf(ctxt,DBG_MMU, ##args) : (void)0)
90897 +#else
90898 +#  define MPRINTF(ctxt,lvl,args...)    ((void) 0)
90899 +#endif
90900 +
90901 +/*
90902 + * Local variables:
90903 + * c-file-style: "stroustrup"
90904 + * End:
90905 + */
90906 +#endif /* __ELAN4_MMU_H */
90907 diff -urN clean/include/elan4/neterr.h linux-2.6.9/include/elan4/neterr.h
90908 --- clean/include/elan4/neterr.h        1969-12-31 19:00:00.000000000 -0500
90909 +++ linux-2.6.9/include/elan4/neterr.h  2004-01-19 09:38:34.000000000 -0500
90910 @@ -0,0 +1,40 @@
90911 +/*
90912 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90913 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
90914 + *
90915 + *    For licensing information please see the supplied COPYING file
90916 + *
90917 + */
90918 +
90919 +#ifndef __ELAN4_NETERR_H
90920 +#define __ELAN4_NETERR_H
90921 +
90922 +#ident "@(#)$Id: neterr.h,v 1.1 2004/01/19 14:38:34 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
90923 +/*      $Source: /cvs/master/quadrics/elan4mod/neterr.h,v $*/
90924 +
90925 +typedef struct elan4_neterr_msg
90926 +{
90927 +    E4_uint8           msg_type;
90928 +    E4_uint8           msg_waitforeop;
90929 +    E4_uint16          msg_context;                            /* network context # message sent to */
90930 +    E4_int16           msg_found;                              /* # cookie found (response) */
90931 +
90932 +    ELAN_LOCATION      msg_sender;                             /* nodeid/context # message sent from */
90933 +    E4_uint32          msg_pad;
90934 +
90935 +    E4_uint64          msg_cookies[6];                         /* 64 bit cookies from identify packets */
90936 +} ELAN4_NETERR_MSG;
90937 +
90938 +#define ELAN4_NETERR_MSG_SIZE          sizeof (ELAN4_NETERR_MSG)
90939 +#define ELAN4_NETERR_MSG_REQUEST       1
90940 +#define ELAN4_NETERR_MSG_RESPONSE      2
90941 +
90942 +#define ELAN4_NETERR_MAX_COOKIES       (sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies) / \
90943 +                                        sizeof (((ELAN4_NETERR_MSG *) 0)->msg_cookies[0]))
90944 +
90945 +/*
90946 + * Local variables:
90947 + * c-file-style: "stroustrup"
90948 + * End:
90949 + */
90950 +#endif /* __ELAN4_NETERR_H */
90951 diff -urN clean/include/elan4/pci.h linux-2.6.9/include/elan4/pci.h
90952 --- clean/include/elan4/pci.h   1969-12-31 19:00:00.000000000 -0500
90953 +++ linux-2.6.9/include/elan4/pci.h     2003-09-04 08:39:17.000000000 -0400
90954 @@ -0,0 +1,227 @@
90955 +/*
90956 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
90957 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
90958 + *
90959 + *    For licensing information please see the supplied COPYING file
90960 + *
90961 + */
90962 +
90963 +#ifndef __ELAN4_PCI_H 
90964 +#define __ELAN4_PCI_H
90965 +
90966 +#ident "$Id: pci.h,v 1.32 2003/09/04 12:39:17 david Exp $"
90967 +/*      $Source: /cvs/master/quadrics/elan4hdr/pci.h,v $*/
90968 +
90969 +/* Elan has 2 64 bit bars */
90970 +#define ELAN4_BAR_SDRAM                        0
90971 +#define ELAN4_BAR_REGISTERS            2
90972 +
90973 +#define PCI_VENDOR_ID_QUADRICS         0x14fc
90974 +#define PCI_DEVICE_ID_ELAN3            0x0000
90975 +#define   PCI_REVISION_ID_ELAN3_REVA   0x0000
90976 +#define   PCI_REVISION_ID_ELAN3_REVB   0x0001
90977 +#define PCI_DEVICE_ID_ELAN4            0x0001
90978 +#define   PCI_REVISION_ID_ELAN4_REVA   0x0000
90979 +#define   PCI_REVISION_ID_ELAN4_REVB   0x0001
90980 +
90981 +/* support standard pseudo bars */
90982 +#define ELAN4_PSEUDO_BAR_ROM           8
90983 +
90984 +/* Elan PCI control
90985 + configuration space register. ElanControlRegister */
90986 +#define PCI_ELAN_PARITY_ADDR_LO                0x40
90987 +#define PCI_ELAN_PARITY_ADDR_HI                0x44
90988 +#define PCI_ELAN_PARITY_TYPE           0x48
90989 +#define PCI_ELAN_CONTROL               0x4c
90990 +#define PCI_ELAN_PLL_CONTROL           0x50
90991 +#define PCI_ELAN_SPLIT_MESSAGE_ATTR    0x54
90992 +#define PCI_ELAN_SPLIT_MESSAGE_VALUE   0x54
90993 +#define PCI_ELAN_RAMBIST_FAILED                0x54
90994 +#define PCI_ELAN_TOPPHYSADDR(i)                (0x58 + ((i)<<1))
90995 +
90996 +/*
90997 + * [31]           PciM66EN             This is set it the bus is running in PCI2.3 - 66MHz mode.
90998 + * [30:28] InitPattern         This gives the PCI-X startup mode. See "Pci intialisation patterns" below.
90999 + * [27]           notBusIs64Bits       If set the bus is running 32 bits wide. If Clear it is a 64 bit bus.
91000 + * [26:24] RamBistCntl         Used to control the Elan4 RAM BIST. Not acitive it zero.
91001 + * [23]           RamBistFinished      Only used when performing the RAM BIST test.
91002 + * [22]           SelectSplitMessAttr  See ECTRL_SELECT_SPLIT_MESS_ATTR below.
91003 + * [21]           ReceivedSplitCompError See ECTRL_REC_SPLIT_COMP_MESSAGE below
91004 + * [20:16] WriteHighPriTime    Used with ReadHighPriTime to control the ratio of PCI master write to PCI master
91005 + *                             read bandwidth under heavy load. The high the value of WriteHighPriTime the longer
91006 + *                             the PCI write bursts will be allowed without interruption from a read transfer.
91007 + * [15]    DisableCouplingTest This is only used as part of the RAM BIST test. It effects the testing of the main
91008 + *                             cache tag RAMS.
91009 + * [14:13] Not used            Will read as zero.
91010 + * [12:8]  ReadHighPriTime     Used with WriteHighPriTime to control the ratio of PCI master write to PCI master
91011 + *                             read bandwidth under heavy load. The high the value of ReadHighPriTime the longer
91012 + *                             the PCI read bursts will be allowed without interruption from a write transfer.
91013 + * [7] EnableLatencyCountReset  This bit effect the behaviour of disconnects due to the removal of GNT# after the latency
91014 + *                             counter has expired. If set it will allow the latency counter to be reset each time the
91015 + *                             GNT# is reasserted. If asserted it should provided improved bandwidth on the PCI bus
91016 + *                             without increasing the maximum latency another device would have for access to the bus.
91017 + *                             It will increase the average latency of other devices.
91018 + * [6] ExtraMasterAddrBits     This bit used to control the physical PCI addresses generated by the MMU.
91019 + * [5] ReducedPciDecode                If set the PCI local memory BAR will decode 256Mbytes of PCI address space. If clear it
91020 + *                             will decode 2Gbyte of PCI address space.
91021 + * [4] ConfigInEBusRom         If set the constant values of the Elan4 PCI configuration space will be taken from the
91022 + *                             EEPROM. If clear the internal values will be used.
91023 + * [3] EnableRd2_2Bursts       This bit only effects the behaviour of burst reads when the PCI bus is operating in
91024 + *                             PCI-2.2 mode. It allows adjacent reads to be merged into longer bursts for higher
91025 + *                             performance.
91026 + * [2] SoftIntReset            If set this bit will cause the Elan4 to reset itself with the exception of the PCI
91027 + *                             configuation space. All internal state machines will be put into the reset state.
91028 + * [1] EnableWrBursts          This bit allows much longer PCI-X write bursts. If set it will stop the Elan4 from
91029 + *                             being completely PCI-X compliant as the Elan4 may request a long PCI-X write burst that
91030 + *                             it does not complete. However it should significantly increase the maximum PCI-X write
91031 + *                             bandwidth and is unlikely to cause problems with many PCI-X bridge chips.
91032 + * [0] InvertMSIPriority       This bit effect the way MSI interrupts are generated. It provides flexiblity to generate
91033 + *                             the MSI interrupts in a different way to allow for different implimentations of MSI
91034 + *                             logic and still give the correct priority of Elan4 interrupts.
91035 + *
91036 + *     {PciM66EN, InitPattern, notBusIs64Bits, RamBistCntl, RamBistFinished,
91037 + *      SelectSplitMessAttr, ReceivedSplitCompError, WriteHighPriTime,
91038 + *      DisableCouplingTest, 2'h0, ReadHighPriTime,
91039 + *      EnableLatencyCountReset, ExtraMasterAddrBits, ReducedPciDecode, ConfigInEBusRom,
91040 + *      EnableRd2_2Bursts, SoftIntReset, EnableWrBursts, InvertMSIPriority}
91041 + */
91042 +
91043 +#define ECTRL_INVERT_MSI_PRIO          (1 << 0)
91044 +#define ECTRL_ENABLE_WRITEBURSTS       (1 << 1)
91045 +#define ECTRL_SOFTWARE_INTERNAL_RESET  (1 << 2)
91046 +#define ECTRL_ENABLE_2_2READBURSTS     (1 << 3)
91047 +#define ECTRL_CONFIG_IN_EBUS_ROM       (1 << 4)
91048 +#define ECTRL_28_NOT_30_BIT_LOCAL_BAR  (1 << 5)
91049 +#define ECTRL_ExtraMasterAddrBits      (1 << 6)
91050 +#define ECTRL_ENABLE_LATENCY_RESET      (1 << 7)
91051 +#define ECTRL_DISABLE_COUPLING_TEST    (1 << 15)
91052 +
91053 +/*
91054 + * Ratio of the following two registers set the relative bandwidth given to intputer data
91055 + * versus other PCI pci traffic when scheduling new PCI master accesses.
91056 + */
91057 +#define ECTRL_OTHER_HIGH_PRI_TIME_SHIFT        (8)     /* Sets top 4 bits of 8 bit counter */
91058 +#define ECTRL_OTHER_HIGH_PRI_TIME_MASK (0x1f)
91059 +
91060 +
91061 +#define ECTRL_IPROC_HIGH_PRI_TIME_SHIFT        (16)    /* Sets top 4 bits of 8 bit counter */
91062 +#define ECTRL_IPROC_HIGH_PRI_TIME_MASK (0x1f)
91063 +
91064 +/*
91065 + * This is set if a split completion message is received.
91066 + * This will cause a PCI error interrupt.
91067 + * This error is cleared by writting a 1 to this bit.
91068 + */
91069 +#define ECTRL_REC_SPLIT_COMP_MESSAGE   (1 << 21)
91070 +/*
91071 + * This bit is used to select reading of either the Split message attribute value when
91072 + * set or the split completion message data value from 0x54 in the config space
91073 + * if the ECTRL_REC_SPLIT_COMP_MESSAGE bit is set. 0x54 returns the the BistFailed flags
91074 + * if any of the BIST control bits are set (bits 26 to 24)
91075 + */
91076 +#define ECTRL_SELECT_SPLIT_MESS_ATTR   (1 << 22)
91077 +
91078 +// Internal RAM bist control bits.
91079 +// Three bits of state control the RAM BIST (Built in self test).
91080 +//
91081 +// These bits must not be set unless the ECTRL_SOFTWARE_INTERNAL_RESET bit has also been set!
91082 +//
91083 +// For a normal fast ram test assert ECTRL_BIST_FAST_TEST. 
91084 +// For a data retention test first write ECTRL_START_RETENTION_TEST then wait the retention period of
91085 +// at least 1ms and preferably much longer then write ECTRL_CONTINUE_RETENTION_TEST then wait
91086 +// again and finallly write ECTRL_FINISH_RETENTION_TEST.
91087 +// 
91088 +// The read only bit ECTRL_BIST_FINISHED_TEST can be polled to check that the test has compleated.
91089 +#define ECTRL_BIST_CTRL_SHIFT          (24)
91090 +#define ECTRL_BIST_CTRL_MASK           (7 << 24)
91091 +
91092 +#define ECTRL_BIST_FAST_TEST           ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)     // old scheme
91093 +#define ECTRL_START_RETENTION_TEST     ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
91094 +#define ECTRL_CONTINUE_RETENTION_TEST  ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
91095 +#define ECTRL_FINISH_RETENTION_TEST    ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
91096 +
91097 +#define ECTRL_BIST_KICK_OFF            ((1 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)     // new scheme
91098 +#define ECTRL_BIST_MOVE_ON_ODD         ((3 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
91099 +#define ECTRL_BIST_MOVE_ON_EVEN                ((5 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
91100 +#define ECTRL_BIST_SCREAM_THROUGH      ((7 << 24) | ECTRL_SOFTWARE_INTERNAL_RESET)
91101 +
91102 +#define ECTRL_CLEAR_BIST_TEST          (0 << 24)
91103 +#define ECTRL_BIST_FINISHED_TEST       (1 << 23)
91104 +
91105 +// Read only current PCI bus type.
91106 +#define ECTRL_RUNNING_32BIT_MODE       (1 << 27)
91107 +#define ECTRL_INITIALISATION_MODE      (7 << 28)
91108 +#define ECTRL_RUNNING_M66EN_MODE       (1 << 31)
91109 +
91110 +#define ECTRL_INIT_PATTERN_SHIFT       (28)
91111 +#define ECTRL_INIT_PATTERN_MASK                (0x7)
91112 +
91113 +// Pci intialisation patterns
91114 +#define Pci2_2                         (0 << 28)
91115 +#define PciX50To66MHz                  (1 << 28)
91116 +#define PciX66to100MHz                 (2 << 28)
91117 +#define PciX100to133MHz                        (3 << 28)
91118 +#define PciXReserved1                  (4 << 28)
91119 +#define PciXReserved2                  (5 << 28)
91120 +#define PciXReserved3                  (6 << 28)
91121 +#define PciXReserved4                  (7 << 28)
91122 +
91123 +/* Elan PCI pll and pad control configuration space register. ElanPllControlReg */
91124 +// This overrides the default PCI pll control settings.
91125 +#define PciPll_FeedForwardISel0                (1 << 0)        // Lsi name Z0
91126 +#define PciPll_FeedForwardISel1                (1 << 1)        // Lsi name Z1
91127 +#define PciPll_ChargePumpISel0         (1 << 2)        // Lsi name P0
91128 +#define PciPll_ChargePumpISel1         (1 << 3)        // Lsi name P1
91129 +#define PciPll_EnableAutoReset         (1 << 4)        // Lsi name ENARST
91130 +#define PciPll_RSEL200500              (1 << 5)        // Lsi name Range Select, 0: 100 - 250MHz, 1: 200 - 500MHz
91131 +#define PciPll_DivideFeedback          (1 << 6)        // Just used for test - This divides the shortcut feedback to the PCI PLL so that it can lock to the tester clock.
91132 +#define PciPll_CutFeedback             (1 << 7)        // Just used for test - This disables the shortcut feedback.
91133 +
91134 +// This overrides the default PCI BZ controler settings.
91135 +#define PciBZ_UPDI                     (0xf << 8)
91136 +#define PciBZ_WAIT_INT                 (0xf << 12)
91137 +
91138 +// This overrides the default Sys and SDRam pll control settings.
91139 +#define SysPll_FeedForwardISel0                (1 << 16)       // Lsi name P0     
91140 +#define SysPll_FeedForwardISel1                (1 << 17)       // Lsi name P1     
91141 +#define SysPll_ChargePumpISel0         (1 << 18)       // Lsi name Z0    
91142 +#define SysPll_ChargePumpISel1         (1 << 19)       // Lsi name Z1    
91143 +#define SysPll_EnableAutoReset         (1 << 20)       // Lsi name ENARST
91144 +#define SysPll_DivPhaseCompInBy2       (1 << 21)       // Lsi name NODIV (Should be DIV)
91145 +#define SysPll_PllTestClkSel           (1 << 22)       // If asserted the master clock source is not taken from the pll.
91146 +
91147 +#define Pll_ForceEBusADTristate                (1 << 23)       // Required to enable the testing of EnableAutoReset. Enables use of EBusAD[7] (rev A)
91148 +#define Pll_LinkErrDirectToSDA         (1 << 23)       // Access to link error flag for triggering (rev B)
91149 +
91150 +
91151 +#define ECTRL_SYS_CLOCK_RATIO_SHIFT    (24)
91152 +// Config: with 800MHz         Speeds are 266 200 160 133.
91153 +//         0 = 133/133 (1:1)   6:6     1
91154 +//        1 = 160/133 (6:5)    5:6     1.2
91155 +//         2 = 200/133 (3:2)   4:6     1.5
91156 +//        3 = 266/133 (2:1)    3:6     2
91157 +//        4 = 200/200 (1:1)    4:4     1
91158 +//        5 = 266/200 (4:3)    3:4     1.33
91159 +
91160 +// Config: with 600MHz         Speeds are 200 150 120 100
91161 +//         0 = 100/100 (1:1)   6:6     1
91162 +//        1 = 120/100 (6:5)    5:6     1.2
91163 +//         2 = 150/100 (3:2)   4:6     1.5
91164 +//        3 = 200/100 (2:1)    3:6     2
91165 +//        4 = 150/150 (1:1)    4:4     1
91166 +//        5 = 200/150 (4:3)    3:4     1.33
91167 +
91168 +#define ECTRL_SYS_CLOCK_RATIO_SHIFT    (24)
91169 +#define ECTRL_SYS_CLOCK_RATIO_1_1Slow  (0 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91170 +#define ECTRL_SYS_CLOCK_RATIO_6_5      (1 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91171 +#define ECTRL_SYS_CLOCK_RATIO_3_2      (2 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91172 +#define ECTRL_SYS_CLOCK_RATIO_2_1      (3 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91173 +#define ECTRL_SYS_CLOCK_RATIO_1_1Fast  (4 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91174 +#define ECTRL_SYS_CLOCK_RATIO_4_3      (5 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91175 +#define ECTRL_SYS_CLOCK_MAX_NORMAL     (6)                                     /* used to generate a valid random value */
91176 +#define GET_RANDOM_CLOCK_RATIO         (Random(ECTRL_SYS_CLOCK_MAX_NORMAL) << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91177 +#define ECTRL_SYS_CLOCK_RATIO_PLL_TEST (6 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91178 +#define ECTRL_SYS_CLOCK_RATIO_TEST     (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91179 +#define ECTRL_SYS_CLOCK_RATIO_MASK     (7 << ECTRL_SYS_CLOCK_RATIO_SHIFT)
91180 +
91181 +#endif /* __ELAN4_PCI_H */
91182 diff -urN clean/include/elan4/registers.h linux-2.6.9/include/elan4/registers.h
91183 --- clean/include/elan4/registers.h     1969-12-31 19:00:00.000000000 -0500
91184 +++ linux-2.6.9/include/elan4/registers.h       2005-03-03 11:28:50.000000000 -0500
91185 @@ -0,0 +1,1587 @@
91186 +/*
91187 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
91188 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
91189 + *
91190 + *    For licensing information please see the supplied COPYING file
91191 + *
91192 + */
91193 +
91194 +#ifndef _ELAN4_REGISTERS_H
91195 +#define _ELAN4_REGISTERS_H
91196 +
91197 +#ident "$Id: registers.h,v 1.120 2005/03/03 16:28:50 david Exp $"
91198 +/*      $Source: /cvs/master/quadrics/elan4hdr/registers.h,v $*/
91199 +
91200 +/*
91201 + * Header file for internal slave mapping of the ELAN4 registers
91202 + */
91203 +
91204 +#define E4_CACHELINE_SIZE      (64)
91205 +#define E4_STACK_ALIGN         (64)
91206 +
91207 +#ifndef _ASM
91208 +
91209 +#include <elan4/types.h>
91210 +#include <elan4/dma.h>
91211 +#include <elan4/userregs.h>
91212 +
91213 +typedef volatile struct _E4_CacheSets
91214 +{
91215 +   E4_uint64   Set0[1024];     /* 8k bytes per set */
91216 +   E4_uint64   Set1[1024];     /* 8k bytes per set */
91217 +   E4_uint64   Set2[1024];     /* 8k bytes per set */
91218 +   E4_uint64   Set3[1024];     /* 8k bytes per set */
91219 +} E4_CacheSets;
91220 +
91221 +typedef union e4_cache_tag
91222 +{
91223 +   struct {
91224 +       E4_uint32 pad0;                 /* Undefined value when read */
91225 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
91226 +       E4_uint32 :10;                                          /* 0-9   - reserved */
91227 +       E4_uint32 LineError:1;                                  /* 10    - line error */
91228 +       E4_uint32 Modified:1;                                   /* 11    - modified */
91229 +       E4_uint32 FillPending:1;                                        /* 12    - fill pending */
91230 +       E4_uint32 AddrTag30to13:18;                             /* 30-13 - tag */
91231 +       E4_uint32 :1;                                           /* 31    -  */
91232 +#else
91233 +       E4_uint32 :1;                                           /* 31    -  */
91234 +       E4_uint32 AddrTag30to13:18;                             /* 30-13 - tag */
91235 +       E4_uint32 FillPending:1;                                        /* 12    - fill pending */
91236 +       E4_uint32 Modified:1;                                   /* 11    - modified */
91237 +       E4_uint32 LineError:1;                                  /* 10    - line error */
91238 +       E4_uint32 :10;                                          /* 0-9   - reserved */
91239 +#endif
91240 +   } s;
91241 +   E4_uint64   Value;
91242 +} E4_CacheTag;
91243 +
91244 +typedef volatile struct _E4_CacheTags
91245 +{
91246 +   E4_CacheTag Tags[4][128];   /* 8k bytes per set, 64 byte cache line */
91247 +} E4_CacheTags;
91248 +
91249 +#define E4_NumCacheSets                4
91250 +#define E4_NumCacheLines       128
91251 +#define E4_CacheLineSize       64
91252 +#define E4_CacheSize           (E4_NumCacheSets * E4_NumCacheLines * E4_CacheLineSize)
91253 +#define E4_CacheSetSize        (E4_NumCacheLines * E4_CacheLineSize)
91254 +
91255 +/*
91256 + * Run Queue pointers 
91257 + *
91258 + * [62:35]     FrontPointer[30:3]
91259 + * [33:32]     Size Value
91260 + * [30:3]      BackPointer[30:3]
91261 + */
91262 +#define E4_QueuePtrMask                (0x7ffffff8ULL)
91263 +#define E4_QueueSizeMask       3
91264 +#define E4_QueueEntrySize       sizeof (E4_uint64)
91265 +
91266 +#define E4_Queue8KBytes                0
91267 +#define E4_Queue64KBytes       1
91268 +#define E4_Queue512KBytes      2
91269 +#define E4_Queue4MBytes                3
91270 +
91271 +#define E4_QueueFrontValue(val,size)   ((val) | (size))
91272 +#define E4_QueueValue(queue,size)      (((E4_uint64) E4_QueueFrontValue(queue,size)) << 32 | ((E4_uint64) (queue)))
91273 +
91274 +#define E4_QueueFrontPointer(val)      /* extract queue front pointer from register */\
91275 +       (((val) >> 32) & E4_QueuePtrMask)
91276 +#define E4_QueueBackPointer(val)       /* extract queue back pointer from register */ \
91277 +       ((val) & E4_QueuePtrMask)
91278 +#define E4_QueueSizeValue(val)         /* extract queue size value from register */ \
91279 +       (((val) >> 32) & E4_QueueSizeMask)
91280 +#define E4_QueueSize(value)            /* queue size in bytes from size value */ \
91281 +       (1 << (((value)*3) + 13))
91282 +#define E4_QueueOffsetMask(fptr)\
91283 +        ((8192 << (((fptr) & E4_QueueSizeMask) << 3)) - 1)
91284 +#define E4_QueueOffset(fptr)\
91285 +        ((fptr) & E4_QueueOffsetMask(fptr))
91286 +#define E4_QueueFrontPointerInc(fptr)   \
91287 +        ( ((fptr) & ~E4_QueueOffsetMask(fptr)) | ((E4_QueueOffset(fptr) + 8) & E4_QueueOffsetMask(fptr)) )
91288 +
91289 +typedef union _E4_QueuePtr
91290 +{
91291 +   E4_uint64   Value;
91292 +   struct {
91293 +       E4_uint32 Back;
91294 +       E4_uint32 Front;
91295 +   } s;
91296 +} E4_QueuePtr;
91297 +
91298 +/*
91299 + * DMA processor status register.
91300 + *
91301 + * [48]                FirstSendTrans          Set for the first packet of a dma.
91302 + * [47:46]     TimeSliceCount          Time left to timeslice.
91303 + * [45]                DmaLastPacket           Set for the last packet of a dma.
91304 + * [44]                CurrPrefetchDma         Dma descriptor the prefetcher is valid for.
91305 + * [43:39]     PrefetcherState         Dma prefetcher's state machines value.
91306 + * [38:33]     PacketAssemblyState     Packet assembler's state machines value.
91307 + * [32:31]     PrefetcherWakeupFnt     Dma prefetcher's wakeup function.
91308 + * [30:28]     PacketAssWakeupFnt      Packet assembler's wakeup function.
91309 + * [27]                AckBufferValid          Packet ack is valid.
91310 + * [26]                PrefetchedDataProblem   Had either a data read fault or data error. Valid if AckBufferValid.
91311 + * [25]                PrefetcherHalting       Prefetch data about to stop for halt. Valid if AckBufferValid.
91312 + * [24]                PacketTimeout           Packet timeout. Sent an EopError. Valid if AckBufferValid set.
91313 + * [23:22]     PacketAckValue          Packet ack type. Valid if AckBufferValid set.
91314 + * [21:20]     FaultUnitNo             Set if the dma prefetcher has faulted.
91315 + * [19:17]     TrapType                Packet assembler's trap type.
91316 + * [16]                PrefetcherFault         Set if the dma prefetcher has faulted for this DMA unit.
91317 + * [15]                Remote                  The Dma had been issued remotly
91318 + * [14]                Priority                Running at high priority.
91319 + * [13:0]      Context                 procs current context.
91320 + */
91321 +
91322 +#define DPROC_FirstSendTrans(s)                ((unsigned)((s) >> 48) & 1)
91323 +#define DPROC_TimeSliceCount(s)                ((unsigned)(((s) >> 46) & 3)
91324 +#define DPROC_DmaLastPacket(s)         ((unsigned)((s) >> 45) & 1)
91325 +#define DPROC_CurrPrefetchDma(s)       ((unsigned)((s) >> 44) & 1)
91326 +#define DPROC_PrefetcerState(s)                ((unsigned)((s) >> 39) & 0x1f)
91327 +#define DPROC_PacketAssemblerState(s)  ((unsigned)((s) >> 33) & 0x1f)
91328 +#define DPROC_PrefetcherWakeupFn(s)    ((unsigned)((s) >> 31) & 3)
91329 +#define DPROC_PacketAssemblerWakeupFn(s)((unsigned)((s) >> 28) & 3)
91330 +#define DPROC_AckBufferValid(s)                ((unsigned)((s) >> 27) & 1)
91331 +#define DPROC_PrefetcherDataProblem(s) ((unsigned)((s) >> 26) & 1)
91332 +#define DPROC_PrefetcherHalting(s)     ((unsigned)((s) >> 25) & 1)
91333 +#define DPROC_PacketTimeout(s)         ((unsigned)((s) >> 24) & 1)
91334 +#define DPROC_PacketAckValue(s)                ((unsigned)((s) >> 22) & 3)
91335 +#define DPROC_FaultUnitNo(s)           ((unsigned)((s) >> 20) & 3)
91336 +#define DPROC_TrapType(s)              ((unsigned)((s) >> 17) & 7)
91337 +#define DPROC_PrefetcherFault(s)       ((unsigned)((s) >> 16) & 1)
91338 +#define DPROC_Remote(s)                        ((unsigned)((s) >> 15) & 1)
91339 +#define DPROC_Priority(s)              ((unsigned)((s) >> 14) & 1)
91340 +#define DPROC_Context(s)               ((unsigned)(s) & 0x3fff)
91341 +
91342 +/*
91343 + * Command processor status register.
91344 + *
91345 + * [26:21]     CPState         procs current state.
91346 + * [20]                WakeupFnt       procs wakeup function.
91347 + * [19:16]     TrapValue       procs trap value.
91348 + * [15]                Remote          Issued remotely.
91349 + * [14]                Priority        Running at high priority.
91350 + * [13:0]      Context         procs current context.
91351 + */
91352 +
91353 +#define CPROC_TrapType(s)              ((unsigned)((s) >> 16) & 0xf)
91354 +#define CPROC_Remote(s)                        ((unsigned)((s) >> 15) & 0x1)
91355 +#define CPROC_Priority(s)              ((unsigned)((s) >> 14) & 0x1)
91356 +#define CPROC_Context(s)               ((unsigned)(s) & 0x3fff)
91357 +
91358 +/*
91359 + * Event processor status register.
91360 + *
91361 + * [34:30]     CPState         event procs current state.
91362 + * [29:28]     WakeupFnt       event procs wakeup function.
91363 + * [27:20]     EventCopySize   This is the number of DWords to still be copied on a copy dword event.
91364 + * [19]                EProcPort1Fault CUN_EventProc1 has taken a translation fault.
91365 + * [18]                EProcPort0Fault CUN_EventProc0 has taken a translation fault.
91366 + * [17:16]     TrapValue       event proc's trap value.
91367 + * [15]                Remote          Issued remotely.
91368 + * [14]                Priority        Running at high priority.
91369 + * [13:0]      Context         procs current context.
91370 + */
91371 +
91372 +#define EPROC_CPState(s)               ((unsigned)((s) >> 30) & 0x1f)
91373 +#define EPROC_WakeupFunction(s)                ((unsigned)((s) >> 28) & 3)
91374 +#define EPROC_CopySize(s)              ((unsigned)((s) >> 20) & 0xFF)
91375 +#define EPROC_Port1Fault(s)            ((unsigned)((s) >> 19) & 1)
91376 +#define EPROC_Port0Fault(s)            ((unsigned)((s) >> 18) & 1)
91377 +#define EPROC_TrapType(s)              ((unsigned)((s) >> 16) & 3)
91378 +#define EPROC_Remote(s)                        ((unsigned)((s) >> 15) & 1)
91379 +#define EPROC_Priority(s)              ((unsigned)((s) >> 14) & 1)
91380 +#define EPROC_Context(s)               ((unsigned)(s) & 0x3fff)
91381 +
91382 +/*
91383 + * Thread processor status register.
91384 + *
91385 + * [39:24]     MemPortBusy             16 bits of port busy flags for all FFU memory ports.
91386 + * [23:21]     Reads as zero
91387 + * [20:18]     TQState                 State vector for thread queuing proc.
91388 + * [17]                HighRunQueueFull        High priority run queue is full
91389 + * [16]                LowRunQueueFull         Low priority run queue is full
91390 + * [15]                ReadyHigh               More runable threads at high priority
91391 + * [14]                ReadyLow                More runable threads at low priority
91392 + * [13:0]      Context                 procs current context.
91393 + */
91394 +#define TPROC_HighRunQueueFull(s)      ((unsigned)((s) >> 17) & 1)
91395 +#define TPROC_LowRunQueueFull(s)       ((unsigned)((s) >> 16) & 1)
91396 +#define TPROC_ReadyHigh(s)             ((unsigned)((s) >> 15) & 1)
91397 +#define TPROC_ReadyLow(s)              ((unsigned)((s) >> 14) & 1)
91398 +#define TPROC_Context(s)               ((unsigned)((s) & 0x3fff))
91399 +
91400 +/*
91401 + * Input processor status register
91402 + *
91403 + * [55]                Last Trans (~EOP)
91404 + * [54]                First Trans (~EOP)
91405 + * [53]                Channel (~EOP) 
91406 + * [52]                Bad Length (~EOP)
91407 + * [51:50]     Trans CRC Status (~EOP)
91408 + * [49:48]     EOP type
91409 + * [47]                EOP trap
91410 + * [46]                Trapping priority
91411 + * [45]                Trapping Channel
91412 + * [44:43]     Bad ack sent
91413 + * [42:41]     Good ack sent
91414 + * [40]                Queueing Packet (~EOP)
91415 + * [39:36]     Channel trapped bits
91416 + * [35:32]     IProc Trap Value
91417 + * [31:16]     Network Context (~EOP)
91418 + * [15:0]      Transaction Type (~EOP)
91419 + */
91420 +#define IPROC_LastTrans(s)             ((unsigned)((s) >> 55) & 0x1)
91421 +#define IPROC_FirstTrans(s)            ((unsigned)((s) >> 54) & 0x1)
91422 +#define IPROC_Channel(s)               ((unsigned)((s) >> 53) & 0x1)
91423 +#define IPROC_BadLength(s)             ((unsigned)((s) >> 52) & 0x1)
91424 +#define IPROC_TransCRCStatus(s)                ((unsigned)((s) >> 50) & 0x3)
91425 +#define IPROC_EOPType(s)               ((unsigned)((s) >> 48) & 0x3)
91426 +#define IPROC_EOPTrap(s)               ((unsigned)((s) >> 47) & 0x1)
91427 +#define IPROC_InputterPri(s)           ((unsigned)((s) >> 46) & 0x1)
91428 +#define IPROC_InputterChan(s)          ((unsigned)((s) >> 45) & 0x1)
91429 +#define IPROC_BadAckSent(s)            ((unsigned)((s) >> 43) & 0x3)
91430 +#define IPROC_GoodAckSent(s)           ((unsigned)((s) >> 41) & 0x3)
91431 +#define IPROC_QueueingPacket(s)                ((unsigned)((s) >> 40) & 0x1)
91432 +#define IPROC_ChannelTrapped(s)                ((unsigned)((s) >> 36) & 0xF)
91433 +#define IPROC_TrapValue(s)             ((unsigned)((s) >> 32) & 0xF)
91434 +#define IPROC_NetworkContext(s)                ((unsigned)((s) >> 16) & 0xFFFF)
91435 +#define IPROC_TransactionType(s)       ((unsigned)(s) & 0xFFFF)
91436 +
91437 +/* values for IPROC_TransCRCStatus */
91438 +#define CRC_STATUS_GOOD    (0)
91439 +#define CRC_STATUS_DISCARD (1)
91440 +#define CRC_STATUS_ERROR   (2)
91441 +#define CRC_STATUS_BAD     (3)
91442 +
91443 +/* values for IPROC_EOPType */
91444 +#define EOP_GOOD          (1)
91445 +#define EOP_BADACK        (2)
91446 +#define EOP_ERROR_RESET           (3)
91447 +
91448 +/*
91449 + * Interrupt register bits
91450 + *
91451 + * There are up to four sources of interrupt for the MSI port.
91452 + * The Elan will request 4 ports but may only get either 2 or 1 port. The Interrupts are assigned
91453 + * as shown below:
91454 + * No Of MSI ints      Low Prioity                                                     High Prioity
91455 + *     4               Event Ints      OtherInts               Inputer Ints            Hard Error ints.
91456 + *                i.e.                 Dproc, Tproc, Sten.     HighPri and LowPri      Link errs, ECC errs,
91457 + *
91458 + *     2               Event Ints      All other interrupts.
91459 + *     1               All together.
91460 + * 
91461 + * It is not safe to change the number of sources of interrupt while there may be outstanding,
91462 + * unserviced interrupts pending.
91463 + * There two forms of encoding. This has been provided in case an MSI implimentation assumes either
91464 + * a high value to have a high priority or a low value to have a high priority. This is controled
91465 + * by a bit in the Elan Pci Control register.
91466 + */
91467 +#define INT_LinkPortKeyFail            (1<<18)
91468 +#define INT_PciMemErr                  (1<<17)
91469 +#define INT_SDRamInt                   (1<<16)
91470 +#define INT_LinkError                  (1<<15)
91471 +#define INT_IProcCh1HighPri            (1<<14)
91472 +#define INT_IProcCh0HighPri            (1<<13)
91473 +#define INT_IProcCh1LowPri             (1<<12)
91474 +#define INT_IProcCh0LowPri             (1<<11)
91475 +#define INT_DiscardingHighPri          (1<<10)
91476 +#define INT_DiscardingLowPri           (1<<9)
91477 +#define INT_CProcHalted                        (1<<8)
91478 +#define INT_TProcHalted                        (1<<7)
91479 +#define INT_DProcHalted                        (1<<6)
91480 +#define INT_EProc                      (1<<5)
91481 +#define INT_TProc                      (1<<4)
91482 +#define INT_CProc                      (1<<3)
91483 +#define INT_Dma1Proc                   (1<<2)
91484 +#define INT_Dma0Proc                   (1<<1)
91485 +#define INT_MainInterrupt              (1<<0)
91486 +
91487 +#define INT_Units              (INT_EProc | INT_TProc | INT_CProc | INT_Dma1Proc | INT_Dma0Proc)
91488 +#define INT_Inputters          (INT_IProcCh1HighPri | INT_IProcCh0HighPri | INT_IProcCh1LowPri | INT_IProcCh0LowPri)
91489 +#define INT_Discarding         (INT_DiscardingHighPri | INT_DiscardingLowPri)
91490 +#define INT_Halted             (INT_CProcHalted | INT_TProcHalted | INT_DProcHalted)
91491 +#define INT_ErrorInterrupts    (INT_PciMemErr | INT_SDRamInt | INT_LinkError)
91492 +
91493 +#define INT_MSI0               INT_MainInterrupt
91494 +#define INT_MSI1               (INT_Units | INT_Discarding | INT_Halted)
91495 +#define INT_MSI2               (INT_Inputters)
91496 +#define INT_MSI3               (INT_ErrorInterrupts)
91497 +
91498 +#define E4_INTERRUPT_REG_SHIFT 32
91499 +#define E4_INTERRUPT_MASK_MASK (0xffffffffULL)
91500 +
91501 +/*
91502 + * Trap type values - see trapvalues.v
91503 + */
91504 +
91505 +#define CommandProcInserterError               0x1
91506 +#define CommandProcPermissionTrap              0x2
91507 +#define CommandProcSendTransInvalid            0x3
91508 +#define CommandProcSendTransExpected           0x4
91509 +#define CommandProcDmaQueueOverflow            0x5
91510 +#define CommandProcInterruptQueueOverflow      0x6
91511 +#define CommandProcMemoryFault                 0x7
91512 +#define CommandProcRouteFetchFault             0x8
91513 +#define CommandProcFailCountZero               0x9
91514 +#define CommandProcAddressAlignment            0xa
91515 +#define CommandProcWaitTrap                    0xb
91516 +#define CommandProcMultipleGuards              0xc
91517 +#define CommandProcOpenOnGuardedChan           0xd
91518 +#define CommandProcThreadQueueOverflow         0xe
91519 +#define CommandProcBadData                      0xf
91520 +
91521 +#define DmaProcNoFault                         0x0
91522 +#define DmaProcRouteFetchFault                 0x1
91523 +#define DmaProcFailCountError                  0x2
91524 +#define DmaProcPacketAckError                  0x3
91525 +#define DmaProcRunQueueReadFault               0x4
91526 +#define DmaProcQueueOverflow                   0x5
91527 +
91528 +#define EventProcNoFault                       0x0
91529 +#define EventProcAddressAlignment              0x1
91530 +#define EventProcMemoryFault                   0x2
91531 +#define EventProcCountWrapError                        0x3
91532 +
91533 +#define InputNoFault                           0x0
91534 +#define InputAddressAlignment                  0x1
91535 +#define InputMemoryFault                       0x2
91536 +#define InputInvalidTransType                  0x3
91537 +#define InputDmaQueueOverflow                  0x4
91538 +#define InputEventEngineTrapped                        0x5
91539 +#define InputCrcErrorAfterPAckOk               0x6
91540 +#define InputEopErrorOnWaitForEop              0x7
91541 +#define InputEopErrorTrap                      0x8
91542 +#define InputDiscardAfterAckOk                 0x9
91543
91544 +typedef struct _E4_Sched_Status
91545 +{
91546 +    E4_uint32 Status;
91547 +    E4_uint32 Restart;
91548 +} E4_Sched_Status;
91549
91550 +typedef struct _E4_Input_Ptrs
91551 +{
91552 +    E4_uint32 ContextFilterTable;
91553 +    E4_uint32 TrapBasePtr;
91554 +} E4_Input_Ptrs;
91555 +
91556 +#define SCH_StopLowPriQueues           (1 << 0)
91557 +#define SCH_DProcHalt                  (1 << 1)
91558 +#define SCH_TProcHalt                  (1 << 2)
91559 +#define SCH_CProcHalt                  (1 << 3)
91560 +
91561 +#define SCH_CProcTimeout600ns          (1 << 4)
91562 +#define SCH_CProcTimeout1p4us          (2 << 4)
91563 +#define SCH_CProcTimeout3p0us          (3 << 4)
91564 +#define SCH_CProcTimeout6p2us          (4 << 4)
91565 +#define SCH_CProcTimeout12p6us         (5 << 4)
91566 +#define SCH_CProcTimeout25p4us         (6 << 4)
91567 +#define SCH_CProcTimeout51p0us         (7 << 4)
91568 +#define SCH_DiscardLowPriInput         (1 << 7)
91569 +#define SCH_DiscardHighPriInput                (1 << 8)
91570 +
91571 +#define SCH_DProcTimeslice64us         (0 << 9)
91572 +#define SCH_DProcTimeslice128us                (1 << 9)
91573 +#define SCH_DProcTimeslice256us                (2 << 9)
91574 +#define SCH_DProcTimeslice512us                (3 << 9)
91575 +
91576 +#define SCH_Halt                       (SCH_StopLowPriQueues | SCH_DProcHalt | SCH_TProcHalt | SCH_CProcHalt)
91577 +#define SCH_Discard                    (SCH_DiscardLowPriInput | SCH_DiscardHighPriInput)
91578 +
91579 +#define SCH_RestartCProc               (1 << 0)
91580 +#define SCH_RestartTProc               (1 << 1)
91581 +#define SCH_RestartEProc               (1 << 2)
91582 +#define SCH_RestartDma0Proc            (1 << 3)
91583 +#define SCH_RestartDma1Proc            (1 << 4)
91584 +#define SCH_RestartDmaPrefetchProc     (1 << 5)
91585 +#define SCH_RestartCh0LowPriInput      (1 << 6)
91586 +#define SCH_RestartCh1LowPriInput      (1 << 7)
91587 +#define SCH_RestartCh0HighPriInput     (1 << 8)
91588 +#define SCH_RestartCh1HighPriInput     (1 << 9)
91589 +#define SCH_ClearLinkErrorInt          (1 << 10)
91590 +#define SCH_ContextFilterFlush         (1 << 11)
91591 +
91592 +/*
91593 + * Link state bits.
91594 + */
91595 +#define LS_LinkNotReady                (1 << 0) /* Link is in reset or recovering from an error */
91596 +#define LS_Locked              (1 << 1) /* Linkinput PLL is locked */
91597 +#define LS_LockError           (1 << 2) /* Linkinput PLL was unable to lock onto the input clock. */
91598 +#define LS_DeskewError         (1 << 3) /* Linkinput was unable to Deskew all the inputs. (Broken wire?) */
91599 +#define LS_PhaseError          (1 << 4) /* Linkinput Phase alignment error. */
91600 +#define LS_DataError           (1 << 5) /* Received value was neither good data or a token. */
91601 +#define LS_FifoOvFlow0         (1 << 6) /* Channel 0 input fifo overflowed. */
91602 +#define LS_FifoOvFlow1         (1 << 7) /* Channel 1 input fifo overflowed. */
91603 +#define LS_Mod45Changed                (1 << 8) /* Mod45 bit has changed. Error setr to force reset. */
91604 +#define LS_PAckNotSeenError    (1 << 9) /* PAck value not returned for this packet. */
91605 +
91606 +/*
91607 + * Link State Constant defines, used for writing to LinkSetValue
91608 + */
91609 +
91610 +#define LRS_DataDel0           0x0
91611 +#define LRS_DataDel1           0x1
91612 +#define LRS_DataDel2           0x2
91613 +#define LRS_DataDel3           0x3
91614 +#define LRS_DataDel4           0x4
91615 +#define LRS_DataDel5           0x5
91616 +#define LRS_DataDel6           0x6
91617 +#define LRS_DataDel7           0x7
91618 +#define LRS_DataDel8           0x8
91619 +#define LRS_LinkInValue                0x9
91620 +#define LRS_PllDelValue                0xA
91621 +#define LRS_ClockEven          0xB
91622 +#define LRS_ErrorVal8to0       0xC
91623 +#define LRS_ErrorVal17to9      0xD
91624 +#define LRS_ErrorVal26to18     0xE
91625 +#define LRS_ErrorVal35to27     0xF
91626 +#define LRS_NumLinkDels         0x10
91627 +
91628 +#define LRS_Pllfast             0x40
91629 +
91630 +typedef struct _E4_CommandControl
91631 +{
91632 +    volatile E4_uint32 CommandQueueDescsBase;
91633 +    volatile E4_uint32 CommandRequeuePtr;
91634 +} E4_CommandControl;
91635 +
91636 +#define E4_CommandRequeueBusy          0x80000000      /* Test against read value of CommandRequeuePtr */
91637 +#define E4_CommandRequeueHighPri       0x1             /* Will requeue onto the high pri queue */
91638 +#define E4_QueueDescPtrMask            0x7fffffe0
91639 +
91640 +typedef struct _E4_CommandQueueDesc
91641 +{
91642 +    E4_uint64 CQ_QueuePtrs;
91643 +    E4_uint64 CQ_HoldingValue;         /* 32 bit value for 32 bit accesses or OutOfOrderMask*/
91644 +    E4_uint64 CQ_AckBuffers;           /* Space for 32 4 bit ack buffer values. */
91645 +    E4_uint64 CQ_Control;
91646 +} E4_CommandQueueDesc;
91647 +
91648 +/*
91649 + * Rev A - CQ_QueuePtrs
91650 + * [63]                Unused          Should be set to zero.
91651 + * [62:51]     Unused          (reads as top of InsertPtr)
91652 + * [50:35]     CompletedPtr    Completed pointer. This is alligned to a byte address.
91653 + * [34]                Trapped         Will be set if the command has trapped.
91654 + * [33:32]     Size            Size of queue.
91655 + * [31]                Used            Will be set if the descriptor has been changed and written back by the elan.
91656 + * [30:3]      InsertPtr       Insert pointer. This is alligned to a byte address.
91657 + * [2]         TimedOut        Will be set if the queue timedout executing a command.
91658 + * [1]         Priority        When set the queue runs at high priority.
91659 + * [0]         Error           If this becomes set all new data written to the queue is * discarded.
91660 + *
91661 + * Rev B - CQ_QueuePtrs
91662 + * [63]                TimedOut        Will be set if the queue timedout executing a command.
91663 + * [62]                Priority        When set the queue runs at high priority.
91664 + * [61]                QueueType       1=will accept unordered 64 bit PCI writes. 0=will accept ordered 32 or 64 bit PCI writes.
91665 + * [60:51]     Unused          (reads as top of InsertPtr)
91666 + * [50:35]     CompletedPtr    Completed pointer. This is alligned to a byte address.
91667 + * [34]                Trapped         Will be set if the command has trapped.
91668 + * [33:32]     Size            Size of queue.
91669 + * [31]                Used            Will be set if the descriptor has been changed and written back by the elan.
91670 + * [30:3]      InsertPtr       Insert pointer. This is alligned to a byte address.
91671 + * [2]         OrderControl    Holds bit 8 of last PCI accesses. Used by a reordering queue.
91672 + * [1:0]       ErrorType       This field has the current error status of the queue.
91673 + */
91674 +
91675 +/* Common between revA and RevB */
91676 +#define CQ_PtrMask             (0x7ffffff8)                    /* 31 bit sdram address */
91677 +#define CQ_PtrOffsetMask       (0x7fff8)
91678 +#define CQ_PtrBaseMask         (0x7ff80000)
91679 +
91680 +#define CQ_InsertPtrShift      (3 - 3)                         /* InsertPtr is 64 bit aligned */
91681 +#define CQ_SizeShift           (32)
91682 +#  define CQ_Size1K            0
91683 +#  define CQ_Size8K            1
91684 +#  define CQ_Size64K           2
91685 +#  define CQ_Size512K          3
91686 +#  define CQ_SizeMask          3
91687 +
91688 +#define CQ_CompletedPtrShift   (35 - 3)                        /* CompletedPtr is 64 but aligned */
91689 +
91690 +#define CQ_Used                        (1ull << 31)
91691 +#define CQ_Trapped             (1ull << 34)
91692 +
91693 +#define CQ_QueuePtrsValue(Size,Inserter,Completer) \
91694 +       (((E4_uint64) (Size) << CQ_SizeShift) | \
91695 +        ((E4_uint64) (Inserter) << CQ_InsertPtrShift) | \
91696 +        ((E4_uint64) (Completer) << CQ_CompletedPtrShift))
91697 +
91698 +#define CQ_InsertPtr(QueuePtrs) \
91699 +       (((E4_uint64) QueuePtrs) & CQ_PtrMask)
91700 +
91701 +#define CQ_CompletedPtr(QueuePtrs) \
91702 +       (((E4_uint32)((QueuePtrs) >> CQ_CompletedPtrShift) & CQ_PtrOffsetMask) | \
91703 +        (CQ_InsertPtr(QueuePtrs) & CQ_PtrBaseMask))
91704 +
91705 +#define CQ_Size(SizeVal)               (1024 * (1 << ((SizeVal)*3)))
91706 +
91707 +/* Rev A specific */
91708 +#define CQ_RevA_Error                  (1 << 0)
91709 +#define CQ_RevA_Priority               (1 << 1)
91710 +#define CQ_RevA_TimedOut               (1 << 2)
91711 +
91712 +/* Rev B specific */
91713 +#define CQ_RevB_ErrorType(QueuePtr)    ((QueuePtr) & (3 << 0))
91714 +#  define CQ_RevB_NoError              (0ull << 0)
91715 +#  define CQ_RevB_Overflowed           (1ull << 0)
91716 +#  define CQ_RevB_InvalidWriteSize     (2ull << 0)
91717 +#  define CQ_RevB_InvalidWriteOrder    (3ull << 0)
91718 +#define CQ_RevB_OrderControl           (1ull << 2)
91719 +
91720 +#define CQ_RevB_QueueType(QueuePtr)    ((QueuePtr) & (1ull << 61))
91721 +#  define CQ_RevB_ReorderingQueue      (1ull << 61)
91722 +#  define CQ_RevB_32bitWriteQueue      (0ull << 61)
91723 +
91724 +#define CQ_RevB_Priority               (1ull << 62)
91725 +#define CQ_RevB_TimedOut               (1ull << 62)
91726 +
91727 +/* 
91728 + * CQ_AckBuffers - Packet Ack Values
91729 + */
91730 +#define PackOk                 (0x0)
91731 +#define PackTestFail           (0x1)
91732 +#define PackDiscard            (0x2)
91733 +#define PackError              (0x7)
91734 +#define PackTimeout            (0x8)
91735 +#define PackWaiting            (0xF)
91736 +#define PackValue(val,chan)    (((val) >> ((chan) * 4)) & 0xf)
91737 +
91738 +/*
91739 + * CQ_Control
91740 + * [63:35]     ExtractPtr
91741 + * [34]                Unused
91742 + * [33:32]     ChannelNotCompleted
91743 + * [31:24]     Permissions
91744 + * [23:16]     RestartCount            Decremented after each restart. Will trap when zero
91745 + * [15:14]     Unused                  Should be set to zero
91746 + * [13:0]      Context
91747 + */
91748 +#define CQ_Context(Control)            ((E4_uint32) ((Control) >>  0) & 0x3fff)
91749 +#define CQ_RestartCount(Control)       ((E4_uint32) ((Control) >> 16) & 0x7f)
91750 +#define CQ_ChannelNotCompleted(Control)        ((E4_uint32) ((Control) >> 32) & 3)
91751 +#define CQ_ExtractPtr(Control)         ((E4_uint32) ((Control) >> 32) & 0xFFFFFFF8)
91752 +
91753 +#define CQ_RestartCountShift           16
91754 +
91755 +#define CQ_SetEventEnableBit   (1 << 24)
91756 +#define CQ_WaitEventEnableBit  (1 << 25)
91757 +#define CQ_ModifyEnableBit     (1 << 26)
91758 +#define CQ_WriteEnableBit      (1 << 27)
91759 +#define CQ_ThreadStartEnableBit        (1 << 28)
91760 +#define CQ_DmaStartEnableBit   (1 << 29)
91761 +#define CQ_STENEnableBit       (1 << 30)
91762 +#define CQ_InterruptEnableBit  (1 << 31)
91763 +#define CQ_EnableAllBits        (0xFF000000)
91764 +#define CQ_PermissionMask      (0xFF000000)
91765 +
91766 +#define CQ_ControlValue(Cntx, RestartCount, Permissions) \
91767 +       (((Cntx) & 0x3fff) | (((RestartCount) & 0xff) << 16) | ((Permissions) & CQ_PermissionMask))
91768 +
91769 +/*
91770 + * This file describes the slave address map of Elan4.
91771 + *
91772 + * Elan4 has two PCI 64 bit base address registers. One is setup for elan
91773 + * local memory and the other is for the command port, elan registers and ebus.
91774 + *
91775 + * This file describes the command port, elan registers and ebus BAR. This is a
91776 + * 26 bit base address register and is split up as follows:
91777 + * 1 The ebus requires 21 bits of address. 26'h3e00000 to 26'h3ffffff
91778 + * 2 The control regsiters requires 16 bits of address. 26'h3df0000 to 26'h3dfffff
91779 + * 3 The command port has the rest. This give just under 8k command ports or about 123 per
91780 + *   processor of a 64 node SMP.
91781 + */
91782 +
91783 +/* BAR1 contains the command queues followed by the registers and the Ebus - and is 26 bits */
91784 +/* each command queue has an 8K page associated with it */
91785 +#define CQ_CommandMappingSize          (1 << 13)
91786 +#define CQ_NumCommandDescs             ((1 << (26 - 13)))
91787 +#define CQ_CommandDescsAlignment       ((1 << (26 - 13)) * sizeof (E4_CommandQueueDesc))
91788 +
91789 +/* control reg bits i.e. E4_DataBusMap.SysControlReg */
91790 +#define CONT_EN_ALL_SETS               (1ULL << 0) /* enable cache */
91791 +#define CONT_MMU_ENABLE                        (1ULL << 1) /* bit 0 enables mmu */
91792 +#define CONT_CACHE_HASH_TABLE          (1ULL << 2) /* cache up hash table entries */
91793 +#define CONT_CACHE_CHAINS              (1ULL << 3) /* cache up chain entries */
91794 +#define CONT_CACHE_ROOT_CNTX           (1ULL << 4) /* cache root context table for routes and filters. */
91795 +#define CONT_CACHE_STEN_ROUTES         (1ULL << 5) /* cache up sten packet routes */
91796 +#define CONT_CACHE_DMA_ROUTES          (1ULL << 6) /* cache up dma packet routes */
91797 +
91798 +#define CONT_CACHE_NONE                0ULL
91799 +#define CONT_CACHE_ALL         (CONT_CACHE_HASH_TABLE | CONT_CACHE_CHAINS | CONT_CACHE_ROOT_CNTX | \
91800 +                                CONT_CACHE_STEN_ROUTES | CONT_CACHE_DMA_ROUTES)
91801 +
91802 +/* This controls the format size and position of the MMU hash tables. */
91803 +#define CONT_INHIBIT_MAX_CHAIN_ITEMS   (1ULL << 7)     /* Prevents the MaxChainItems value of 1024 from forcing a translation miss */
91804 +#define CONT_TABLE0_MASK_SIZE_SHIFT    8               /* Defines the size of hash table 0 */
91805 +#define CONT_TABLE0_PAGE_SIZE_SHIFT    13              /* Set the page size for hash table 0 */
91806 +#define CONT_TABLE1_MASK_SIZE_SHIFT    16              /* Defines the size of hash table 1 */
91807 +#define CONT_TABLE1_PAGE_SIZE_SHIFT    21              /* Set the page size for hash table 1 */
91808 +#define CONT_TWO_HASH_TABLES           (1ULL << 24)    /* Sets the MMU to use two hash tables. If not set only 0 used. */
91809 +#define CONT_2K_NOT_1K_DMA_PACKETS     (1ULL << 25)    /* Used to select the default DMA packet size. */
91810 +#define CONT_ALIGN_ALL_DMA_PACKETS     (1ULL << 26)    /* Will force all dma packets to be aligned to a page.*/
91811 +#define CONT_DIRECT_MAP_PCI_WRITES     (1ULL << 27)    /* Will force pci writes to write and flush the dcache.*/
91812 +#define CONT_TLB_FLUSH                 (1ULL << 28)    /* Invalidates the TLB and indicates when flushed */
91813 +#define CONT_CLEAR_WALK_WROTE_TABLES   (1ULL << 29)    /* Used to guarantee that the elan is using new PTE values. */
91814 +#define CONT_ROUTE_FLUSH               (1ULL << 30)    /* Invalidates all route cache entries. */
91815 +#define CONT_CLEAR_LINKPORT_INT                (1ULL << 31)    /* Clears the Linkport key fail interrupt. Reads as 0. */
91816 +#define CONT_CLEAR_SDRAM_ERROR         (1ULL << 32)    /* Clears an EEC error interrupt. Reads as 0. */
91817 +
91818 +/*
91819 + * These are extra control bits used for testing the DLLs of the SDRAM interface. Most of the Sdram
91820 + * control bits are defined in xsdram.h
91821 + */
91822 +#define SDRAM_FIXED_DLL_DELAY_SHIFT    47
91823 +#define SDRAM_FIXED_DLL_DELAY_BITS     5
91824 +#define SDRAM_FIXED_DLL_DELAY_MASK     ((1ULL << SDRAM_FIXED_DLL_DELAY_BITS) - 1ULL)
91825 +#define SDRAM_FIXED_DLL_DELAY(Value)   ((SDRAM_FIXED_DLL_DELAY_MASK & (Value)) << SDRAM_FIXED_DLL_DELAY_SHIFT)
91826 +#define SDRAM_FIXED_DELAY_ENABLE       (1ULL << 52)
91827 +#define SDRAM_GET_DLL_DELAY(Value)     (((Value) >> SDRAM_FIXED_DLL_DELAY_SHIFT) & SDRAM_FIXED_DLL_DELAY_MASK)
91828 +
91829 +#define SDRAM_166_DLL_CORRECTION_FACTOR        3       /* This is to allow for SSO and ringing on the DQ lines */
91830 +#define SDRAM_150_DLL_CORRECTION_FACTOR        2       /* This is to allow for SSO and ringing on the DQ lines */
91831 +
91832 +#define PAGE_SIZE_4K   0x0
91833 +#define PAGE_SIZE_8K   0x1
91834 +#define PAGE_SIZE_64K  0x2
91835 +#define PAGE_SIZE_512K 0x3
91836 +#define PAGE_SIZE_2M   0x4
91837 +#define PAGE_SIZE_4M   0x5
91838 +#define PAGE_SIZE_64M  0x6
91839 +#define PAGE_SIZE_512M 0x7
91840 +
91841 +#define PAGE_SIZE_MASK 0x7
91842 +#define PAGE_MASK_MASK 0x1f
91843 +
91844 +/* control reg bits i.e. E4_DataBusMap.LinkControlReg */
91845 +#define LCONT_REVA_GREEN_LED           (1 << 0)
91846 +#define LCONT_REVA_YELLOW_LED          (1 << 1)
91847 +#define LCONT_REVA_RED_LED             (1 << 2)
91848 +#define LCONT_REVA_ENABLE_LED_DRIVE    (1 << 3) /* Enable manual setting of the Leds to the bits set above. */
91849 +
91850 +#define LCONT_REVB_DISABLE_TLB_PREFETCH        (1 << 0)
91851 +#define LCONT_REVB_DISABLE_CRC_ERROR_CHECKING  (1 << 1)
91852 +
91853 +
91854 +#define LCONT_EN_SYS_WRITES            (1 << 4) /* Enable linkport writes to sys registers. i.e. all of E4_DataBusMap. */
91855 +#define LCONT_EN_SYS_READS             (1 << 5) /* Enable linkport reads from sys registers. i.e. all of E4_DataBusMap. */
91856 +#define LCONT_EN_USER_WRITES           (1 << 6) /* Enable linkport writes to user registers. i.e. all of E4_User_Regs. */
91857 +#define LCONT_EN_USER_READS            (1 << 7) /* Enable linkport reads from user registers. i.e. all of E4_User_Regs. */
91858 +
91859 +#define LCONT_TEST_VALUE_MASK          0x3ff    /* Value used for test writes and link boundary scan. */
91860 +#define LCONT_TEST_VALUE_SHIFT         8
91861 +#define LCONT_TEST_VALUE(Value)                ((LCONT_LINK_STATE_MASK & (Value)) << LCONT_TEST_VALUE_SHIFT)
91862 +
91863 +/*
91864 + * State read from LINK_STATE when TEST_VALUE is set to the following values.
91865 + * TEST_VALUE    LINK_STATE read       TEST_VALUE        LINK_STATE read
91866 + *    000     -   Data delay count 0      008       -  Data delay count 8
91867 + *    001     -   Data delay count 1      009       -  Link in value
91868 + *    002     -   Data delay count 2      00a       -  PLL delay
91869 + *    003     -   Data delay count 3      00b       -  Clock Delay
91870 + *    004     -   Data delay count 4      00c       ?  ErrorVal8to0
91871 + *    005     -   Data delay count 5      00d       ?  ErrorVal17to9
91872 + *    006     -   Data delay count 6      00e       ?  ErrorVal26to18
91873 + *    007     -   Data delay count 7      00f       ?  ErrorVal35to27
91874 + */
91875 +
91876 +#define LCONT_TEST_CONTROL_MASK                0x3     /* Selects and controls the action of the LINK_STATE value. */
91877 +#define LCONT_TEST_CONTROL_SHIFT       18
91878 +
91879 +#define LCONT_READ_ERRORS              0       /* {Mod45RequestChanged, FifoOverflowError, DataError, PhaseError,
91880 +                                                *      DeskewError, LockError, Locked, LinkNotReady} */
91881 +#define LCONT_READ_STATE               1       /* Read valus addressed by TEST_CONTROL value */
91882 +#define LCONT_FIX_LINK_DELAYS          2       /* Sets delays to TEST_CONTROL value */
91883 +#define LCONT_BOUNDARY_SCAN            3       /* Puts link into boundary scan. Outputs TEST_CONTROL value to link,
91884 +                                                * reads LINK_STATE from link. */ 
91885 +
91886 +#define LCONT_LINK_STATE_MASK          0x3ff   /* Read only */
91887 +#define LCONT_LINK_STATE_SHIFT         20      /* Read only */
91888 +#define LCONT_LINK_STATE(ControlRegValue)      (LCONT_LINK_STATE_MASK & ((ControlRegValue) >> LCONT_LINK_STATE_SHIFT))
91889 +
91890 +/* control reg bits i.e. E4_DataBusMap.LinkContSettings */
91891 +#define LCONT_MOD45_DISABLE            (1 << 0) /* is set the link will try to run in TNB mode. */
91892 +#define LCONT_CONFIG_PHASE_MASK                0x7     /* This set the delay through the phase alignment buffer. */
91893 +#define LCONT_CONFIG_PHASE_SHIFT       1
91894 +
91895 +#define LCONT_PLL_REF_VAL_BITS_MASK    0x7f    /* This is the divide value on the LinkIn clock to form the comms PLL */
91896 +#define LCONT_PLL_REF_VAL_BITS_SHIFT   4       /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */
91897 +
91898 +#define LCONT_FORCE_COMMSCLK_LOCAL     (1 << 11) /* This must be set at one end of a back to back Elan configuration. */
91899 +#define LCONT_LVDS_VOLTAGE_BITS_MASK   0x3     /* This is used to set the voltage swing on the LVDS link output pads. */
91900 +#define LCONT_LVDS_VOLTAGE_BITS_SHIFT  12      /* reference clock. Div value is (n - 2). e.g. to Divide by 7 set to 5. */
91901 +
91902 +#define LCONT_VOD_170                  0       /* Approximate differential voltage swing in mV of link outputs into */
91903 +#define LCONT_VOD_360                  1       /* a 100 ohm diferential load. */
91904 +#define LCONT_VOD_460                  2
91905 +#define LCONT_VOD_550                  3
91906 +
91907 +#define LCONT_LVDS_TERMINATION_MASK    0x3     /* This set the resistor values of the internal single ended termation */
91908 +#define LCONT_LVDS_TERMINATION_SHIFT   14      /* resistors of the link input and comms input clcok. */
91909 +
91910 +#define LCONT_TERM_55_OHM              0       /* Resistor values for internal termination of LVDS pads. */
91911 +#define LCONT_TERM_50_OHM              1
91912 +#define LCONT_TERM_AUTO_OHM            2       /* Should normally be set to auto. */
91913 +#define LCONT_TERM_45_OHM              3
91914 +
91915 +#define LCONT_LVDS_EN_TERM_UPDATE      (1 << 47) /* This should be asserted and deasserted if LCONT_LVDS_TERMINATION is changed. */
91916 +
91917 +/* Macros used to access and construct MMU hash table and chain entries. */
91918 +/*
91919 + * Each hash entry is made up of a 64 byte block. Each entry hash two tags where each
91920 + * tag has 4 PTE's. PTE's 0 to 2 use the bottom 48 bits of a 64 bit word and PTE 3
91921 + * uses the top 16 bits of 3 64 bit words.
91922 + *
91923 + * These macros can be used to build a single PTE. PTE3 needs to be built into a 48 bit
91924 + * object before they can be used.
91925 + */
91926 +#define PTE_ENTRY_MASK         0x0000ffffffffffffULL
91927 +#define PTE_TYPE_MASK          0x000000000000000fULL   
91928 +#define PTE_PERM_MASK          0x00000000000000f0ULL
91929 +#define PTE_PERM_TYPE_MASK     0x00000000000000ffULL
91930 +#define PTE_REF_MASK           0x0000000000000100ULL
91931 +#define PTE_PPN_MASK           0x00007ffffffffe00ULL
91932 +#define PTE_MOD_MASK           0x0000800000000000ULL
91933 +#define PTE_TOPADDR_MASK       0x0000600000000000ULL
91934 +
91935 +#define PTE_MOD_SHIFT          47
91936 +#define PTE_PPN_SHIFT          9
91937 +#define PTE_REF_SHIFT          8
91938 +#define PTE_PERM_SHIFT         4
91939 +#define PTE_TYPE_SHIFT         0
91940 +
91941 +#define PTE_PADDR_SHIFT                (12 - 9)                /* Physical addresses are shifted down 3 this to go into the PTE */
91942 +
91943 +
91944 +/* Values required for tag 3 */
91945 +#define PTE_REF_3                      0x0100000000000000ULL
91946 +#define PTE_MOD_3                      0x8000000000000000ULL
91947 +#define PTE_ENTRY_MASK_3               0xffff000000000000ULL
91948 +#define PTE_PERM_TYPE_MASK_3           0x00ff000000000000ULL
91949 +#define PTE_ENTRY_3_FOR_0(NewPte)      ((NewPte << (48)) & PTE_ENTRY_MASK_3)
91950 +#define PTE_ENTRY_3_FOR_1(NewPte)      ((NewPte << (32)) & PTE_ENTRY_MASK_3)
91951 +#define PTE_ENTRY_3_FOR_2(NewPte)      ((NewPte << (16)) & PTE_ENTRY_MASK_3)
91952 +
91953 +/* Values required for the tags */
91954 +#define TAG_CONTEXT_MASK               0x0000000000003fffULL
91955 +#define TAG_ADDRESS_MASK               0xfffffffff8000000ULL
91956 +#define TAG_CHAINPTR_18TO6_MASK                0x0000000007ffc000ULL
91957 +#define TAG_CHAINPTR_LOW_SHIFT         (14 - 6)
91958 +#define TAG_CHAINPTR_30TO19_MASK       0x0000000003ffc000ULL
91959 +#define TAG_CHAINPTR_HIGH_SHIFT                (19 - 14)
91960 +#define TAG_COPY_BIT                   0x0000000004000000ULL
91961 +
91962 +/*
91963 + * This takes number loaded into the control register and returns the page size as a power of two.
91964 + */
91965 +
91966 +#define E4_PAGE_SIZE_TABLE             E4_uint32 const PageSizeTable[] = {12, 13, 16, 19, 21, 22, 26, 29}
91967 +#define E4_PAGE_SIZE_TABLE_SIZE                (sizeof(PageSizeTable)/sizeof(PageSizeTable[0]))
91968 +
91969 +/*
91970 + * This macro generates a hash block index.
91971 + *
91972 + * Cntx                 This is the 14 bit context. It should not be larger than 14 bits.
91973 + * VAddr        This is the 64 bit virtual address. It does not require any masking and can be a byte address.
91974 + * PageSize     This is the value loaded into the control register for this hash table.
91975 + * HashTableMask This should be set mask out upper bits past the end of the hash table.
91976 + */
91977 +#define E4MMU_SHIFT_ADDR(VAddr, Shift) \
91978 +    ((((E4_uint32)(VAddr)) >> (Shift)) | (((E4_uint32)((VAddr) >> 32)) << (32 - (Shift))))
91979 +
91980 +#define E4MMU_CONTEXT_SCRAMBLE(Cntx) \
91981 +             ((((Cntx) << 8) | ((Cntx) >> 6)) ^ (((Cntx) << 15) | ((Cntx) << 1)))
91982 +
91983 +#define E4MMU_HASH_INDEX(Cntx, VAddr, PageShift, HashTableMask)                \
91984 +           ((E4MMU_SHIFT_ADDR(VAddr, (PageShift) + 2) ^ E4MMU_CONTEXT_SCRAMBLE(Cntx)) & (HashTableMask))
91985 +
91986 +#define E4MMU_TAG(vaddr,ctx)   (((vaddr) & TAG_ADDRESS_MASK) | ((ctx) & TAG_CONTEXT_MASK))
91987 +
91988 +#define E4MMU_TAG2VADDR(tag,hashidx,PageShift,HashTableMask)   \
91989 +               (((tag) & TAG_ADDRESS_MASK) | ((((hashidx) ^ E4MMU_CONTEXT_SCRAMBLE((tag) & TAG_CONTEXT_MASK)) & (HashTableMask)) << ((PageShift + 2))))
91990 +
91991 +/*
91992 + * Detailed bit descriptions for the tags and PTE's are better done with the macros
91993 + * defined above.
91994 + */
91995 +typedef struct _E4_HashTableEntry
91996 +{
91997 +   E4_uint64   Tag[2];
91998 +   E4_uint64   TagPTE[2][3];
91999 +} E4_HashTableEntry;
92000 +
92001 +#define E4MMU_TAG_OFFSET(tag)          ((tag) << 3)
92002 +#define E4MMU_PTE_LOW_OFFSET(tag,pte)  ((((tag)*3 + (pte) + 2) << 3))
92003 +#define E4MMU_PTE_HIGH_OFFSET(tag,pte) ((((tag)*3 + (pte) + 2) << 3) + 4)
92004 +#define E4MMU_PTE3_WORD0_OFFSET(tag)   ((((tag)*3 + 2) << 3) + 6)
92005 +#define E4MMU_PTE3_WORD1_OFFSET(tag)   ((((tag)*3 + 3) << 3) + 6)
92006 +#define E4MMU_PTE3_WORD2_OFFSET(tag)   ((((tag)*3 + 4) << 3) + 6)
92007 +
92008 +
92009 +/*
92010 + * Hash0AddrBits is the size of the hash table in bytes as a power of 2.
92011 + * e.g. 11 would give 32 hash entries where each entry is 64 bytes.
92012 + */
92013 +#define SETUP_HASH_TABLES(Hash0PageSize, Hash0AddrBits, Hash1PageSize, Hash1AddrBits)  \
92014 +                         (((Hash0PageSize) << CONT_TABLE0_PAGE_SIZE_SHIFT) |   \
92015 +                          ((Hash0AddrBits) << CONT_TABLE0_MASK_SIZE_SHIFT) |   \
92016 +                          ((Hash1PageSize) << CONT_TABLE1_PAGE_SIZE_SHIFT) |   \
92017 +                          ((Hash1AddrBits) << CONT_TABLE1_MASK_SIZE_SHIFT))
92018 +
92019 +/* ECC status register */
92020 +#define ECC_Addr(s)                    ((s) & 0x7ffffff8ULL)
92021 +#define ECC_Syndrome(s)                        (((s) >> 32) & 0xffffULL)
92022 +#define ECC_RisingDQSSyndrome(s)       (((s) >> 32) & 0xffULL)
92023 +#define ECC_FallingDQSSyndrome(s)      (((s) >> 40) & 0xffULL)
92024 +#define ECC_UncorrectableErr(s)        (((s) >> 48) & 1ULL)
92025 +#define ECC_MultUncorrectErrs(s)       (((s) >> 49) & 1ULL)
92026 +#define ECC_CorrectableErr(s)          (((s) >> 50) & 1ULL)
92027 +#define ECC_MultCorrectErrs(s)         (((s) >> 51) & 1ULL)
92028 +
92029 +/* Permission type saved in a PTE. This is a four bit field */
92030 +#define PERM_Disabled          0x0
92031 +#define PERM_Unused            0x1
92032 +#define PERM_LocDataRead       0x2
92033 +#define PERM_LocDataWrite      0x3
92034 +#define PERM_LocRead           0x4
92035 +#define PERM_LocExecute                0x5
92036 +#define PERM_ReadOnly          0x6
92037 +#define PERM_LocWrite          0x7
92038 +#define PERM_LocEventOnly      0x8
92039 +#define PERM_LocEventWrite     0x9
92040 +#define PERM_RemoteEvent       0xa
92041 +#define PERM_RemoteAll         0xb
92042 +#define PERM_RemoteReadOnly    0xc
92043 +#define PERM_RemoteWriteLocRead        0xd
92044 +#define PERM_DataReadWrite     0xe
92045 +#define PERM_NoFault           0xf
92046 +
92047 +#define PERM_Mask              0xf
92048 +
92049 +/* Permission type hints to device driver */
92050 +#define PERM_Preload           0x10
92051 +
92052 +#define PTE_SetPerm(Perm)      (((Perm) & PERM_Mask) << 4)
92053 +
92054 +/* Control info saved in the lookup field of the TLB */
92055 +#define PTE_PciNotLocal                (1ULL << 0)             /* Directs the access to the PCI interface */
92056 +#define PTE_BigEndian          (1ULL << 1)             /* Valid for PCI entries only */
92057 +#define PTE_RelaxedOrder       (1ULL << 2)             /* Valid for PCI entries only */
92058 +#define PTE_DontSnoop          (1ULL << 3)             /* Valid for PCI entries only */
92059 +
92060 +#define PTE_UseFixedSet                (1ULL << 1)             /* Value for non PCI entries only */
92061 +#define PTE_CommandQueue       (1ULL << 2)             /* Value for non PCI entries only */
92062 +#define PTE_SetFixedSetNo(Set) ((((Set) & 3) << 2) | PTE_UseFixedSet)
92063 +
92064 +#define PTE_TypeBitsMask       (0xfULL)
92065 +#define PTE_PermissionTypeMask (0xfULL << 4)
92066 +#define PTE_Referenced         (1ULL << 8)
92067 +#define PTE_PhysicalPageNoMask (0x7ffffffffe00ULL)
92068 +#define PTE_Modified           (1ULL << 47)
92069 +
92070 +#define PTE_PhysicalAddrShiftIntoPTE   (12 - 9)
92071 +
92072 +/* define page table entry bit fields */
92073 +#define TLB_PageSizeBits       (3 << 0)
92074 +#define TLB_ACCBits            (7 << 2)
92075 +#define TLB_LocalBit           (1 << 5)
92076 +#define TLB_PCI64BitTargetBit  (1 << 6)
92077 +#define TLB_PCIBigEndianBit    (1 << 7)
92078 +
92079 +#define TLB_ModifiedBit                (1 << 55)
92080 +#define TLB_ReferencedBit      (1 << 63)
92081 +
92082 +/* Used to read values from the tlb. */
92083 +#define TLB_TlbReadCntBitsSh   56
92084 +#define TLB_UseSelAddrSh       (1ULL << 60)
92085 +#define TLB_WriteTlbLine       (1ULL << 61)
92086 +
92087 +#define TLB_SEL_LINE(LineNo) (TLB_UseSelAddrSh | \
92088 +                             ((E4_uint64)((LineNo) & 0xf) << TLB_TlbReadCntBitsSh))
92089 +
92090 +#define TLB_NUM_ENTRIES                16
92091 +/*
92092 + * The following macros are used with the test access port (TlbLineValue) for the TLBs.
92093 + */
92094 +#define TLV_DoPciAccess                        (1ULL << 0)
92095 +#define TLV_CommandAccess              (1ULL << 1)
92096 +#define TLV_DoCacheAccess              (1ULL << 2)
92097 +#define TLV_notStartTLBWalk            (1ULL << 3)
92098 +#define TLV_UseFixedSet                        (1ULL << 4)
92099 +#define TLV_BigEndian                  (1ULL << 4)
92100 +#define TLV_RelaxedOrder               (1ULL << 5)
92101 +#define TLV_DontSnoop                  (1ULL << 6)
92102 +#define TLV_FixedSetNo_MASK            (3ULL << 5)
92103 +#define TLV_PciTypeBits_MASK           (7ULL << 4)
92104 +#define TLV_LookupBits_MASK            (0x7fULL)
92105 +#define TLV_MissErr                    (1ULL << 7)
92106 +#define TLV_TypeBits                   (0xffULL)
92107 +
92108 +#define TLV_PhysicalAddr_MASK          (0x3fffffffff000ULL)
92109 +
92110 +#define TLV_TlbTesting                 (1ULL << 51)
92111 +#define TLV_SelectUnitsTlbRead         (1ULL << 52)
92112 +#define TLV_SelectTProcTlbRead         (1ULL << 53)
92113 +
92114 +#define TLV_TlbLineSelect_MASK         (0xf)
92115 +#define TLV_UnitsTlbLineSelect_SHIFT   (54)
92116 +#define TLV_TProcTlbLineSelect_SHIFT   (59)
92117 +#define TLV_EnableUnitsTlbRead         (1ULL << 58)
92118 +#define TLV_EnableTProcTlbRead         (1ULL << 63)
92119 +
92120 +/*
92121 + * Use this macro to enable direct testing of the Units TLB.
92122 + * When Line is in the range 0 to 15 a TLB line is selected for reading or writing.
92123 + * When Line is set to -1 the tlb will be activated to perform a match.
92124 + */
92125 +#define TLV_UnitsTlbLineSel(Line) (((Line) == -1) ? 0ULL : \
92126 +    (TLV_EnableUnitsTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_UnitsTlbLineSelect_SHIFT)))
92127 +#define TLV_TProcTlbLineSel(Line) (((Line) == -1) ? 0ULL : \
92128 +    (TLV_EnableTProcTlbRead | ((E4_uint64)((Line) & TLV_TlbLineSelect_MASK) << TLV_TProcTlbLineSelect_SHIFT)))
92129
92130 +/* 
92131 + * Thread_Trap_State
92132 + *  see f_RegFileControl.v TProcStatus
92133 + */
92134 +#define TS_HaltThread                (1 << 0)
92135 +#define TS_TrapForTooManyInstructions (1 << 1)
92136 +#define TS_InstAccessException       (1 << 2)
92137 +#define TS_Unimplemented             (1 << 3)
92138 +#define TS_DataAccessException       (1 << 4)
92139 +#define TS_DataAlignmentError        (1 << 5)
92140 +#define TS_TrapForUsingBadData       (1 << 6)
92141 +#define TS_TrapTypeMask                      (0x7f)
92142 +#define TS_DataPortNo(ts)            (((ts) >> 7) & 7)
92143 +#define TS_TrappedFlag               (1 << 10)
92144 +#define TS_MemLock                   (1 << 11)
92145 +#define TS_XCCshift                  12
92146 +#define TS_XCCmask                   0xff
92147 +#define TS_ICC(ts)                   (((ts) >> 12) & 15)
92148 +#define TS_XCC(ts)                   (((ts) >> 16) & 15)
92149 +#define TS_InstValid_F               (1 << 20)
92150 +#define TS_InstValid_R               (1 << 21)
92151 +#define TS_InstValid_E               (1 << 22)
92152 +#define TS_InstValid_W               (1 << 23)
92153 +#define TS_HighPriority                      (1 << 24)
92154 +#define TS_RemoteThread                      (1 << 25)
92155 +#define TS_TProcTranslationInProgress (1 << 26)
92156 +#define TS_MemLock_E                 (1 << 27)
92157 +
92158 +/* Thread run queue entries */
92159 +typedef struct E4_ThreadRegs
92160 +{
92161 +    E4_uint64 Registers[7];
92162 +} E4_ThreadRegs;
92163 +
92164 +typedef struct E4_TProcQueueEntry
92165 +{
92166 +    E4_ThreadRegs      Regs;                   /* XXXX: jon check this */
92167 +    E4_uint64          Context;                /* XXXX: jon check this */
92168 +} E4_TProcQueueEntry;
92169 +
92170 +typedef struct E4_DProcQueueEntry
92171 +{
92172 +    E4_DMA             Desc;
92173 +    E4_uint64          Pad;
92174 +} E4_DProcQueueEntry;
92175 +
92176 +/*
92177 + * Packet acknowledge values.
92178 + */
92179 +#define E4_PAckOk      0
92180 +#define E4_PAckTestFail        1
92181 +#define E4_PAckDiscard 2
92182 +#define E4_PAckError   3
92183 +
92184 +/*
92185 + * return values from breaktest instruction.
92186 + */
92187 +#define ICC_CARRY_BIT           (0x1ULL << 0)  /* Breaktest: Load pending         */
92188 +#define ICC_ZERO_BIT            (0x1ULL << 1)  /* Breaktest: Time to break        */
92189 +#define ICC_SIGNED_BIT          (0x1ULL << 2)  /* Breaktest: Another thread ready */
92190 +#define ICC_TPROC_RDY_LOW_PRI   (0x1ULL << 3)
92191 +#define ICC_TPROC_RDY_HIGH_PRI  (0x1ULL << 4)
92192 +#define ICC_RUNNING_HIGH_PRI    (0x1ULL << 5)
92193 +#define ICC_RUNNING_AS_REMOTE   (0x1ULL << 6)
92194 +#define ICC_TIME_TO_BREAK       (0x1ULL << 7)
92195 +#define ICC_RS1LOAD_PENDING     (0x1ULL << 8)
92196 +#define ICC_TPROC_HALT          (0x1ULL << 9)
92197 +
92198 +/*
92199 + * Main Interrupt cookies
92200 + * [63:14]     user cookie
92201 + * [13:0]      context
92202 + */
92203 +#define E4_MAIN_INT_SHIFT              14
92204 +#define E4_MAIN_INT_COOKIE(cookie)     ((cookie) >> E4_MAIN_INT_SHIFT)
92205 +#define E4_MAIN_INT_CTX(cookie)                ((cookie) & 0x3FFF)
92206 +
92207 +typedef E4_uint64 E4_MainIntEntry;
92208 +
92209 +#define E4_MainIntEntrySize    sizeof (E4_MainIntEntry)
92210 +
92211 +/*
92212 + * The internal databus is 64 bits wide.
92213 + * All writes to the internal registers MUST be made with 64 bit write operations.
92214 + * These can be made up of pairs 32 bit writes on the PCI bus. The writes will be
92215 + * treated as nops if they are performed with two separate 32 bit writes.
92216 + */
92217 +typedef volatile struct _E4_DataBusMap
92218 +{
92219 +   E4_uint64           InputTrans[4][16];                                                                      /* 0x000 */
92220 +
92221 +   E4_uint64           Dma0TransAddr;                                                                          /* 0x200 */
92222 +   E4_DMA              Dma0Desc;       /* Current Dma0 registers */                                            /* 0x208 */
92223 +
92224 +   E4_uint64           Dma1TransAddr;                                                                          /* 0x240 */
92225 +   E4_DMA              Dma1Desc;       /* Current Dma1 registers */                                            /* 0x248 */
92226 +  
92227 +   E4_uint64           Dma0LastPacketSize;                                                                     /* 0x280 */
92228 +   E4_uint64           Dma0ThisPacketSize;                                                                     /* 0x288 */
92229 +   E4_uint64           Dma0DescSizeInProg;                                                                     /* 0x290 */
92230 +   E4_uint64           Dma0BytesToPrefetch;                                                                    /* 0x298 */
92231 +   E4_uint64           Dma0PrefetchAddr;                                                                       /* 0x2a0 */
92232 +   E4_uint64           EventCountAndType;                                                                      /* 0x2a8 */
92233 +   E4_uint64           EventParameters[2];                                                                     /* 0x2b0 */
92234 +  
92235 +   E4_uint64           Dma1LastPacketSize;                                                                     /* 0x2c0 */
92236 +   E4_uint64           Dma1ThisPacketSize;                                                                     /* 0x2c8 */
92237 +   E4_uint64           Dma1DescSizeInProg;                                                                     /* 0x2d0 */
92238 +   E4_uint64           Dma1BytesToPrefetch;                                                                    /* 0x2d8 */
92239 +   E4_uint64           Dma1PrefetchAddr;                                                                       /* 0x2e0 */
92240 +   E4_Input_Ptrs       InputTrapAndFilter;                                                                     /* 0x2e8 */
92241 +   E4_uint64           EventAddress;                                                                           /* 0x2f0 */
92242 +   E4_QueuePtr         MainIntQueuePtrs;                                                                       /* 0x2f8 */
92243 +   
92244 +   E4_uint64           Event_Copy[16];                                                                         /* 0x300 */
92245 +
92246 +   E4_uint64           CommandCopy[7];                                                                         /* 0x380 */
92247 +   E4_uint64           CommandHold;                                                                            /* 0x3b8 */
92248 +
92249 +   E4_uint64           InputQueueDesc[4];                                                                      /* 0x3c0 */
92250 +
92251 +   /* Run queue Pointers */
92252 +   E4_uint64           DProcLowPriPtrs;                                                                        /* 0x3e0 */
92253 +   E4_uint64           DProcHighPriPtrs;                                                                       /* 0x3e8 */
92254 +   E4_uint64           TProcLowPriPtrs;                                                                        /* 0x3f0 */
92255 +   E4_uint64           TProcHighPriPtrs;                                                                       /* 0x3f8 */
92256 +
92257 +   E4_uint64           CProcStatus;                                                                            /* 0x400 */
92258 +   E4_uint64           TProcStatus;                                                                            /* 0x408 */
92259 +   E4_uint64           IProcStatus;                                                                            /* 0x410 */
92260 +   E4_uint64           EProcStatus;                                                                            /* 0x418 */
92261 +   E4_uint64           DProc0Status;                                                                           /* 0x420 */
92262 +   E4_uint64           DProc1Status;                                                                           /* 0x428 */
92263 +   E4_Sched_Status     SchedStatus;                                                                            /* 0x430 */
92264 +
92265 +   E4_uint64           LoadIProcCntxFilter;    /* Will load one of 4 cntx filter regs. Write only */           /* 0x438 */
92266 +
92267 +   E4_CommandControl   CommandControl;                                                                         /* 0x440 */
92268 +   E4_uint64           CommandCacheTestPort;                                                                   /* 0x448 */
92269 +   E4_uint64           CommandLowPriRunPtrs;                                                                   /* 0x450 */
92270 +   E4_uint64           CommandHighPriRunPtrs;                                                                  /* 0x458 */
92271 +   E4_uint64           CommandSchedDataPort[4];                                                                /* 0x460 */
92272 +
92273 +   E4_uint64           DmaRouteBuffer[2][2];   /* Write only. Should not be written to. */                     /* 0x480 */
92274 +   E4_uint64           StenRouteBuffer[2];     /* Write only. Should not be written to. */                     /* 0x4a0 */
92275 +   E4_uint64           pad4[0x098 - 0x096];                                                                    /* 0x4b0 */
92276 +
92277 +   E4_uint64           DmaAlignmentPort[8];    /* Write only. Should only be written to clear the prev reg. */ /* 0x4c0 */
92278 +
92279 +   E4_uint64           MmuBlockEntry[8];       /* Used for hash table and chain fetches */                     /* 0x500 */
92280 +   E4_uint64           WriteUnitsTlbLine[3];                                                                   /* 0x550 */
92281 +   E4_uint64           pad5;                                                                                   /* 0x540 */
92282 +   E4_uint64           WriteTProcTlbLine[3];                                                                   /* 0x568 */
92283 +   E4_uint64           pad6;                                                                                   /* 0x540 */
92284 +
92285 +   E4_uint64           MmuTableBasePtrs;       /* Both tables packed into a single 64 bit value */             /* 0x580 */
92286 +   E4_uint64           MmuFaultAndRootCntxPtr; /* Both packed into a single 64 bit value */                    /* 0x588 */
92287 +   E4_uint64           UnitsVAddr;                                                                             /* 0x590 */
92288 +   E4_uint64           TProcVAddr;                                                                             /* 0x598 */
92289 +   E4_uint64           UnitsCntx;                                                                              /* 0x5a0 */
92290 +   E4_uint64           TProcCntx;              /* Read only. Writes access VProcCacheWritePort */              /* 0x5a8 */
92291 +   E4_uint64           FaultAddrReg;                                                                           /* 0x5b0 */
92292 +   E4_uint64           FaultTypeAndContextReg;                                                                 /* 0x5b8 */
92293 +
92294 +   E4_uint32           SysControlReg;                                                                          /* 0x5c0 */
92295 +   E4_uint32           CacheTagValue;                                                                          /* 0x5c4 */
92296 +   E4_uint64           TlbLineValue;                                                                           /* 0x5c8 */
92297 +   E4_uint64           SDRamConfigReg;                                                                         /* 0x5d0 */
92298 +   E4_uint32           InterruptMask;                                                                          /* 0x5d8 */
92299 +   E4_uint32           InterruptReg;                                                                           /* 0x5dc */
92300 +   E4_uint64           SDRamECCStatus;                                                                         /* 0x5e0 */
92301 +   E4_uint32           LinkControlReg;                                                                         /* 0x5e8 */
92302 +   E4_uint32           LinkContSettings;                                                                       /* 0x5ec */
92303 +   E4_uint64           LinkPortKey;                                                                            /* 0x5f0 */
92304 +   E4_uint64           LinkPortLock;                                                                           /* 0x5f8 */
92305 +
92306 +   E4_uint64           SDRamWriteBuffer[4][8];                                                                 /* 0x600 */
92307 +   E4_uint64           SDRamReadBuffer[4][8];                                                                  /* 0x700 */
92308 +
92309 +   E4_uint64           TProcRegs[64];                                                                          /* 0x800 */
92310 +   E4_uint64           TProcStartUp[8];        /* Not to be used except by the elan itself */                  /* 0xa00 */
92311 +
92312 +   E4_uint64           LoadPending;                                                                            /* 0xa40 */
92313 +   E4_uint64           StortPending;                                                                           /* 0xa48 */
92314 +   E4_uint64           DirtyBits;                                                                              /* 0xa50 */
92315 +   E4_uint64           BadBits;                                                                                /* 0xa58 */
92316 +
92317 +   E4_uint64           ICachePort_Cntl_Addr;                                                                   /* 0xa60 */
92318 +   E4_uint64           Thread_Trap_State;                                                                      /* 0xa68 */
92319 +
92320 +/* Instruction buffer (4 * 32 bit words) */
92321 +   E4_uint64           nPC_W;                                                                                  /* 0xa70 */
92322 +   E4_uint64           PC_W;                                                                                   /* 0xa78 */
92323 +
92324 +   E4_uint64           ICacheFillData[8];                                                                      /* 0xa80 */
92325 +   E4_uint64           ICachePort[8];                                                                          /* 0xac0 */
92326 +
92327 +   E4_uint64           PciDataBufs[4][8];                                                                      /* 0xb00 */
92328 +
92329 +   E4_uint64           CommandQueueBuffer[128];                                                                /* 0xc00 */
92330 +} E4_DataBusMap;
92331 +
92332 +/*
92333 + * These macros are used to setup the thread pcoessors ICache.
92334 + */
92335 +#define E4_ICacheTagAddrShift          6
92336 +#define E4_AccessICacheRams            1
92337 +#define E4_InvalidTagValue             0xffffffffffffffffULL
92338 +#define E4_ICacheSizeInBytes           (1024*16)
92339 +#define E4_ICacheLineSizeInBytes       (64)
92340 +#define E4_ICacheLines                 (E4_ICacheSizeInBytes/E4_ICacheLineSizeInBytes)
92341 +#define E4_ICachePortSize              ( (sizeof((E4_DataBusMap *) 0)->ICachePort) /   \
92342 +                                         (sizeof((E4_DataBusMap *) 0)->ICachePort[0]))
92343 +
92344 +#define E4_ICacheFixupInsn             0xc0b02f95ull           /* st1 [%r0 +  0xf95] */
92345 +#define E4_ICacheFixupAddr             0xf95ull
92346 +#define E4_ICacheFixupOffset           0xfc0
92347 +
92348 +/*
92349 + * Event interrupt
92350 + */
92351 +typedef volatile union _E4_EventInt
92352 +{
92353 +   E4_uint64    ForceAlign;
92354 +   struct {
92355 +       E4_uint32 IntCookie;
92356 +       E4_uint32 EventContext; /* Bits 16 to 28 */
92357 +    } s;
92358 +} E4_EventInt;
92359 +
92360 +/*
92361 + * The following are used to interpret a fault status register.
92362 + */
92363 +
92364 +/*
92365 + * FSR[14:0] - AccessType
92366 + *
92367 + * T = Type bit
92368 + * S = size bit. Size is in units of 64 bits or 8 bytes.
92369 + * E = Byte end pointer. Used to define the last written byte of the last 64 bits written.
92370 + * D = Data type bit. Used for endian conversion in the PCI interface.
92371 + * C = Used by the cache to decide if this access should allocate a cache line.
92372 + * d = Set if dma read or write data data. This is used to guarantee order at the PCI interface.
92373 + * A = Access type used to check permissions by the MMU in a virtual access.
92374 + * P = Part Write. If set some byte enables may be used. Effects the action of a cache miss.
92375 + */
92376 +
92377 +/* FSR[7:0] */
92378 +/* bit 7 => virtual write */
92379 +#define AT_VirtualWriteAccBit          (1 << 7)                /* AAADDdC1EEESSSS = Virtual Write */
92380 +#define AT_VirtualWriteSizeMask                0xf                     /* size of write access (0 => 128 bytes) */
92381 +#define AT_VirtualWriteEndPtrShift     4                       /* end byte pointer for part write block */
92382 +#define AT_VirtualWriteEndPtrMask      0x7
92383 +
92384 +/* else bit 6 => virtual read */
92385 +#define AT_VirtualReadAccBit           (1 << 6)                /* AAADDdC01SSSSSS = Virtual Read */
92386 +#define AT_VirtualReadSizeMask         0x3f                    /* size of read access (0 => 512 bytes) */
92387 +
92388 +/* else => special access */
92389 +#define AT_SelBitsMask                 0xf                     /* Bits to select the type of acces from */
92390 +#define AT_SelBitsShift                        0x4
92391 +#define AT_SpecialRd                   (0x0 << 4)              /* AAADDdC0000TTTT = Special read Access */
92392 +#define AT_SpecialWr                   (0x1 << 4)              /* AAADDdC0001TTTT = Special write Access */
92393 +#define AT_PhysicalRd                  (0x2 << 4)              /* AAADDdC00100SSS = Physical Read */
92394 +#define AT_PhysicalWr                  (0x3 << 4)              /* AAADDdC0011PSSS = Physical write */
92395 +
92396 +#define AT_OtherSizeMask               0xf                     /* Size bits used by all other accesses. 0=128 bytes */
92397 +#define AT_SpecialBitsMask             0xf                     /* Bits used to define the special access types */
92398 +#define AT_CacheSizeBitsMask           0x7                     /* Size bits used for local accesses. 0=64 */
92399 +#define AT_CachePhysPartWriteBit       0x8                     /* This bit is set if the access is a part write to the cache */
92400 +
92401 +/* Special memory access operations */
92402 +#define AT_RegAccess                   0x0
92403 +#define AT_GetCntxFilter               0xe                     /* Only used by special reads */
92404 +#define AT_RouteFetch                  0xf                     /* Only used by special reads */
92405 +
92406 +/* FSR[9:8] */
92407 +#define AT_NonAlloc                    (1 << 8)                /* 1=Do not fill cache with this data */
92408 +#define AT_DmaData                     (1 << 9)                /* This is a DMA read access. Required to guarantee dma read order. */
92409 +
92410 +/* FSR[11:10] - Data Type - defines data type for endian conversion in PCI interface*/
92411 +#define AT_BlkDataTyMask               0x3
92412 +#define AT_BlkDataTyShift              10
92413 +
92414 +#define AT_BlkDataType(FSR)            (((FSR) >> AT_BlkDataTyShift) & AT_BlkDataTyMask)
92415 +#define AT_TypeByte                    0x0
92416 +#define AT_TypeHWord                   0x1
92417 +#define AT_TypeWord                    0x2
92418 +#define AT_TypeDWord                   0x3
92419 +
92420 +/* FSR[14:12] - Access Permissions */
92421 +#define AT_PermBitsMask                        0x7
92422 +#define AT_PermBitsShift               12
92423 +
92424 +#define AT_Perm(FSR)                   (((FSR) >> AT_PermBitsShift) & AT_PermBitsMask)
92425 +#define AT_PermLocalDataRead           0x0
92426 +#define AT_PermLocalDataWrite          0x1
92427 +#define AT_PermRemoteRead              0x2
92428 +#define AT_PermRemoteWrite             0x3
92429 +#define AT_PermExecute                 0x4
92430 +#define AT_PermLocalEvent              0x5
92431 +#define AT_PermRemoteEvent             0x7
92432 +
92433 +/* FSR[22:15] - reason for fault */
92434 +
92435 +#define FSR_WalkForThread              (1 << 15) /* The thread processor caused the fault */
92436 +#define FSR_Walking                    (1 << 16) /* The fault was caused during a hash table access */
92437 +#define FSR_NoTranslationsFound                (1 << 17) /* The hash table did not contain a matching tag */
92438 +#define FSR_WalkingProtectionFault     (1 << 18) /* A protection fault was detected while walking */
92439 +#define FSR_HashTable1                 (1 << 19) /* Was accessing hash table 1 not 0 */
92440 +#define FSR_RouteVProcErr              (1 << 20) /* This is an invalid vproc for a route fetch */
92441 +#define FSR_FaultForBadData            (1 << 21) /* Bad data (double bit ECC error) while performing a walk access */
92442 +#define FSR_FaultForMaxChainCount      (1 << 22) /* The Elan4 has walked a chain of 1024 items. */
92443 +
92444 +typedef volatile struct _E4_FaultSave
92445 +{
92446 +    E4_uint64 FSRAndFaultContext;                 /* Bits 0-31 : FaultContext. Bits 32-63 : FaultStatus Register */
92447 +    E4_uint64 FaultAddress;
92448 +} E4_FaultSave;
92449 +
92450 +#define FaultSaveContext(FSRAndFaultContext)   ((E4_uint32) ((FSRAndFaultContext) & 0xFFFFFFFF))
92451 +#define FaultSaveFSR(FSRAndFaultContext)       ((E4_uint32) ((FSRAndFaultContext) >> 32))
92452 +
92453 +typedef union E4_TrTypeCntx
92454 +{
92455 +   E4_uint32 TypeContext;
92456 +   struct
92457 +   {
92458 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
92459 +      E4_uint32 Type:16;               /* Transaction type field */
92460 +      E4_uint32 Context:13;            /* Transaction context */
92461 +      E4_uint32 TypeCntxInvalid:1;     /* Bit  29 */
92462 +      E4_uint32 StatusRegValid:1;      /* Bit  30 */
92463 +      E4_uint32 LastTrappedTrans:1;    /* Bit  31 */
92464 +#else
92465 +      E4_uint32 LastTrappedTrans:1;    /* Bit  31 */
92466 +      E4_uint32 StatusRegValid:1;      /* Bit  30 */
92467 +      E4_uint32 TypeCntxInvalid:1;     /* Bit  29 */
92468 +      E4_uint32 Context:13;            /* Transaction context */
92469 +      E4_uint32 Type:16;               /* Transaction type field */
92470 +#endif
92471 +   } s;
92472 +} E4_TrTypeCntx;
92473 +
92474 +#define MAX_TRAPPED_TRANS      28
92475 +#define TRANS_DATA_DWORDS      16
92476 +#define TRANS_DATA_BYTES       128
92477 +#define NO_OF_INPUT_CHANNELS   4
92478 +
92479 +#define CH0_LOW_PRI_CHAN       0
92480 +#define CH1_LOW_PRI_CHAN       1
92481 +#define CH0_HIGH_PRI_CHAN      2
92482 +#define CH1_HIGH_PRI_CHAN      3
92483 +
92484 +/* Words have been swapped for big endian access when fetched with dword access from elan.*/
92485 +typedef struct _E4_IprocTrapHeader
92486 +{
92487 +   E4_uint64   TrAddr;
92488 +   E4_uint64   IProcStatusCntxAndTrType;
92489 +} E4_IprocTrapHeader;
92490 +
92491 +typedef struct _E4_IprocTrapData
92492 +{
92493 +   E4_uint64 Data[TRANS_DATA_DWORDS];
92494 +} E4_IprocTrapData;
92495 +
92496 +/*
92497 + * This struct defines the trap state for the inputers. It requires a contiguous 16K byte block of local memory.
92498 + * The channel bits have been grouped to the low end of the address to force all Identify cookies to use the
92499 + * same cache line.
92500 + */
92501 +typedef struct _E4_IprocTrapState
92502 +{
92503 +   E4_IprocTrapData   TrData[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS];
92504 +   E4_IprocTrapHeader TrHeader[MAX_TRAPPED_TRANS][NO_OF_INPUT_CHANNELS];
92505 +   E4_uint64         pad[8*NO_OF_INPUT_CHANNELS];
92506 +} E4_IprocTrapState;
92507 +
92508 +/*
92509 + * 64 kbytes of elan local memory. Must be aligned on a 64k boundary
92510 + */
92511 +#define E4_LowPriQueueSize     0x400
92512 +#define E4_HighPriQueueSize    0x100
92513 +
92514 +typedef struct _E4_FaultSaveArea
92515 +{
92516 +   E4_FaultSave                TProcData[8];
92517 +   E4_FaultSave                TProcInst;
92518 +   E4_FaultSave                Dummy[7];
92519 +   E4_FaultSave                SchedProc;
92520 +   E4_FaultSave                DProc;
92521 +   E4_FaultSave                EventProc;
92522 +   E4_FaultSave                IProc;
92523 +   E4_FaultSave                DProcData[4];
92524 +   E4_FaultSave                QReadData[8];
92525 +} E4_FaultSaveArea;
92526 +
92527 +/* Macros to manipulate event queue pointers */
92528 +/*     generate index in EventIntQueue */
92529 +#define E4_EVENT_INTQ_INDEX(fptr)      (((fptr) & 0x1fff) >> 3)
92530 +/*     generate next fptr */
92531 +#define E4_EVENT_INTQ_NEXT(fptr)       ((((fptr) + 8) & ~0x4000) | 0x2000)
92532 +
92533 +typedef struct _E4_CommandPort
92534 +{
92535 +   volatile E4_uint64 Command[1024];   /* a whole 8k page */
92536 +} E4_CommandPort;
92537 +
92538 +/*
92539 + * This is the allocation of unit numbers within the ELAN. It is used to extract the fault address
92540 + * and fault type after a unit has trapped on a memory fetch. Only units that can generate traps
92541 + * have been included.
92542 + */
92543 +#define CUN_TProcData0         0x00
92544 +#define CUN_TProcData1         0x01
92545 +#define CUN_TProcData2         0x02
92546 +#define CUN_TProcData3         0x03
92547 +#define CUN_TProcData4         0x04
92548 +#define CUN_TProcData5         0x05
92549 +#define CUN_TProcData6         0x06
92550 +#define CUN_TProcData7         0x07
92551 +#define CUN_TProcInst          0x08
92552 +
92553 +/* memory current unit numbers
92554 + * TProc data bus */
92555 +#define CUN_DProcPA0           0x10
92556 +#define CUN_DProcPA1           0x11
92557 +#define CUN_DProcPrefetch      0x12
92558 +#define CUN_CommandProc                0x13
92559 +#define CUN_DProcData0         0x14    /* Dma prefetch reads. */
92560 +#define CUN_DProcData1         0x15    /* Dma prefetch reads. */
92561 +#define CUN_DProcData2         0x16    /* Dma prefetch reads. */
92562 +#define CUN_DProcData3         0x17    /* Dma prefetch reads. */
92563 +
92564 +#define CUN_IProcLowPri                0x18
92565 +#define CUN_IProcHighPri       0x19
92566 +#define CUN_Spare0             0x1A
92567 +#define CUN_Spare1             0x1B
92568 +#define CUN_Spare2             0x1C
92569 +#define CUN_ThreadQueue                0x1D
92570 +#define CUN_EventProc0         0x1e
92571 +#define CUN_EventProc1         0x1f
92572 +
92573 +#define CUN_Entries            0x20
92574 +
92575 +typedef struct E4_Registers
92576 +{
92577 +   E4_CacheTags                Tags;                           /* 4k bytes  c000 -> cfff */
92578 +   E4_DataBusMap       Regs;                           /* 4k bytes  d000 -> dfff */
92579 +   E4_User_Regs                uRegs;                          /* 8k bytes  e000 -> ffff */
92580 +} E4_Registers;
92581 +
92582 +#define I2cCntl_I2cPortWrite           (0 << 0)
92583 +#define I2cCntl_I2cPortRead            (1 << 0)
92584 +#define I2cCntl_I2cPortGenStopBit      (1 << 1)
92585 +#define I2cCntl_I2cPortGenRestartBit   (1 << 2)
92586 +#define I2cCntl_I2cPortAccFailed       (1 << 3)
92587 +#define I2cCntl_I2cStopped             (1 << 4)
92588 +#define I2cCntl_I2cWakeupFailed                (1 << 5)
92589 +#define I2cCntl_I2cFastMode            (1 << 6)
92590 +#define I2cCntl_I2cPortBusy            (1 << 7)
92591 +
92592 +#define I2cCntl_LedI2cRegBase_Mask     0x7f
92593 +#define I2cCntl_I2cUpdatingLedReg      (1 << 7)
92594 +
92595 +#define I2cCntl_InvertLedValues                (1 << 0)                /* read/write */
92596 +#define I2cCntl_LedRegWriteFailed      (1 << 1)                /* read only */
92597 +#define I2cCntl_EEPromLoadFailed       (1 << 2)                /* read only */
92598 +#define I2cCntl_InhibitI2CRom          (1 << 3)                /* read only */
92599 +#define I2cCntl_BadRomCrc              (1 << 4)                /* read only */
92600 +#define I2cCntl_MapInI2cConfigData     (1 << 5)                /* read/write */
92601 +#define I2cCntl_SampleNewLedValues     (1 << 6)                /* read/write */
92602 +#define I2cCntl_ClearLinkError         (1 << 7)                /* write only */
92603 +
92604 +typedef struct E4_I2C
92605 +{
92606 +   volatile E4_uint8    I2cWrData;
92607 +   volatile E4_uint8    I2cRdData;
92608 +   volatile E4_uint8    I2cPortControl;
92609 +   volatile E4_uint8   I2cLedBase;
92610 +   volatile E4_uint8    I2cStatus;
92611 +   volatile E4_uint8    I2cLedsValue;
92612 +   volatile E4_uint16  I2cPad;
92613
92614 +   E4_uint8            pad[256 - sizeof(E4_uint64)];
92615 +
92616 +   E4_uint8            UnchangedElan4ConfigRegs[256];
92617 +   E4_uint8            I2cRomConfigShadowValues[256];
92618 +   E4_uint8            ChangedElan4ConfigRegs[256];
92619 +} E4_I2C;
92620 +
92621 +typedef struct _E4_ContextControlBlock 
92622 +{
92623 +    E4_uint32 Filter;                  /* Use a Network context to index for this value */
92624 +    E4_uint32 VirtualProcessTable;     /* Use a local context to index for this value */
92625 +} E4_ContextControlBlock;
92626 +
92627 +/*
92628 + * Filter
92629 + *   [13:0]    Context
92630 + *   [14]      DiscardAll
92631 + *   [15]      AckAll
92632 + *   [16]      HighPri
92633 + *   [17]      CountStats
92634 + *   [31:18]   Unused
92635 + */
92636 +#define E4_FILTER_STATS                (1 << 17)
92637 +#define E4_FILTER_HIGH_PRI     (1 << 16)
92638 +#define E4_FILTER_ACKOK_ALL    (1 << 15)
92639 +#define E4_FILTER_DISCARD_ALL  (1 << 14)
92640 +#define E4_FILTER_CONTEXT_MASK (0x3FFF)
92641 +
92642 +/*
92643 + * VirtualProcessTable
92644 + *   [8:0]     Unused  
92645 + *   [12:9]    Size       num vp entries = 512 << Size
92646 + *   [30:13]   Pointer
92647 + *   [31]      Valid
92648 + */
92649 +#define E4_VPT_MIN_ENTRIES      512
92650 +#define E4_VPT_VALID           ((unsigned)1 << 31)
92651 +#define E4_VPT_PTR_SHIFT       0
92652 +#define E4_VPT_SIZE_SHIFT      9
92653 +#define E4_VPT_SIZE_MASK        0xf
92654 +#define E4_VPT_NUM_VP(vpt_val)  (E4_VPT_MIN_ENTRIES << (((vpt_val) >> E4_VPT_SIZE_SHIFT) & E4_VPT_SIZE_MASK))
92655 +#define E4_VPT_VALUE(ptr,size) (((ptr) << E4_VPT_PTR_SHIFT) | ((size) << E4_VPT_SIZE_SHIFT))
92656 +
92657 +
92658 +/* Virtual Process Table */
92659 +typedef struct _E4_VirtualProcessEntry
92660 +{
92661 +    E4_uint64  Values[2];
92662 +} E4_VirtualProcessEntry;
92663 +
92664 +/*
92665 + * Entries have the following format - rtX is a packed route 
92666 + *
92667 + * |rt11|rt10|rt9 |rt8 |rt7 |rt6 |rt5 |rt4 |rt3 |rt2 |rt2 |rt0 |PAAADD       RRRRRR|
92668 + * |output context     |rt23|rt22|rt21|rt20|rt19|rt18|rt17|rt16|rt15|rt14|rt13|rt12|
92669 + */
92670 +
92671 +#define ROUTE_CTXT_SHIFT       48
92672 +#define ROUTE_CTXT_MASK                (~((1ull << ROUTE_CTXT_SHIFT)-1))
92673 +#define ROUTE_CTXT_VALUE(ctx)  (((E4_uint64) ctx) << ROUTE_CTXT_SHIFT)
92674 +
92675 +#define ROUTE_PACKED_OFFSET    16
92676 +#define ROUTE_NUM_PACKED       24
92677 +
92678 +/* defines for first flit of a route */
92679 +#define FIRST_TIMEOUT(Val)     ((Val) << 14)                   /* [15:14]  */
92680 +#define FIRST_SYSTEM_PACKET     (1 << 13)                       /* [13]     */
92681 +#define FIRST_FLOOD_PACKET      (1 << 12)                       /* [12]     */
92682 +#define FIRST_HIGH_PRI         (1 << 11)                       /* [11]    */
92683 +#define FIRST_AGE(Val)         ((Val) << 7)                    /* [10:7] */
92684 +#define FIRST_OPTIONS_MASK     (0xFF80)
92685 +
92686 +/* [6:0] unpacked 1st route value */
92687 +#define FIRST_INVALID          (0)
92688 +#define FIRST_ROUTE(Val)       (0x08 | (Val))
92689 +#define FIRST_ADAPTIVE         (0x30)
92690 +#define FIRST_BCAST_TREE       (0x20)
92691 +#define FIRST_MYLINK           (0x10)
92692 +#define FIRST_BCAST(Top, Bot)  (0x40 | ((Top) << 3) | (Bot))
92693 +
92694 +/* defines for 3 bit packed entries for subsequent flits */
92695 +#define PACKED_INVALID         (0)
92696 +#define PACKED_ROUTE(Val)      (8 | (Val))
92697 +#define PACKED_ADAPTIVE                (3)
92698 +#define PACKED_BCAST_TREE      (2)
92699 +#define PACKED_MYLINK          (1)
92700 +#define PACKED_BCAST0(Top,Bot) (4 | (Bot & 3))
92701 +#define PACKED_BCAST1(Top,Bot) ((Top << 1) | (Bot >> 2))
92702 +
92703 +#endif /* _ASM */
92704 +/* The MMU root context pointer has a mask to bounds check 
92705 + * it - this is computed as follows.
92706 + */
92707 +#define E4_CONTEXT_MASK(num)   (((num) >= 0x2000) ? 0x00 :     \
92708 +                                ((num) >= 0x1000) ? 0x80 :     \
92709 +                                ((num) >= 0x0800) ? 0xc0 :     \
92710 +                                ((num) >= 0x0400) ? 0xe0 :     \
92711 +                                ((num) >= 0x0200) ? 0xf0 :     \
92712 +                                ((num) >= 0x0100) ? 0xf8 :     \
92713 +                                ((num) >= 0x0080) ? 0xfc :     \
92714 +                                ((num) >= 0x0040) ? 0xfe : 0xff)
92715 +/*
92716 + * This generates the size field for a virtual process table.
92717 + * Size defined as 2^n no of 8K pages.
92718 + * Single cycle route fetches are possible if the minimum vproc table size is 8k.
92719 + */
92720 +#define E4_GEN_VPT_SIZE(Size)  (((Size) & E4_VPT_SIZE_MASK) << E4_VPT_SIZE_SHIFT)
92721 +
92722 +#define COMMAND_RUN_QUEUE_BITS         (13 + 2) /* 8K entries of 4 bytes. This is fixed in hardware. */
92723 +#define COMMAND_DESCS_SPACE_BITS       (13 + 5) /* 8K entries of 32 bytes. This is fixed in hardware. */
92724 +#define COMMAND_INSERTER_CACHE_ENTRIES 16
92725 +
92726 +#define COM_TEST_PORT_ADDR_MASK                0xfULL
92727 +#define COM_TEST_PORT_ADDR_SH          0
92728 +
92729 +/*
92730 + * The flush register is accessed through the CommandControl register.
92731 + * The address is naturally alligned. It also positions the command descriptors in memory.
92732 + * When no command queues need flushing it should be or with COM_FLUSH_INVALID. This sets
92733 + * it to the top command queue descriptor. This cannot be accessed from the PCI.
92734 + */
92735 +#define COM_ENABLE_DEQUEUE             (1 << 4)
92736 +#define COM_FLUSH_DESCRIPTOR_MASK      0x7fffffe0ULL
92737 +#define COM_FLUSH_INVALID              0x0003ffe0ULL
92738 +
92739 +
92740 +/*
92741 + * Elan4 BAR1 is split up as follows :
92742 + *
92743 + * RevA
92744 + *     0x3f00000 EBUS other
92745 + *     0x3e00000 EBUS ROM
92746 + *     0x3dfc000 registers
92747 + *     0x0000000 command ports
92748 + *
92749 + * RevB
92750 + *     0x3ffc000 registers
92751 + *     0x3ff8000 padding
92752 + *     0x3ff6000 i2c registers
92753 + *     0x0000000 command ports
92754 + */
92755 +#define ELAN4_BAR1_SIZE                        (1 << 26)       /* 64M */
92756 +#define ELAN4_REG_SIZE                 (1 << 14)       /* 16K */
92757 +
92758 +#define ELAN4_REVA_EBUS_SIZE           (1 << 21)       /* 2M */
92759 +#define ELAN4_REVA_EBUS_OFFSET         (ELAN4_BAR1_SIZE - ELAN4_REVA_EBUS_SIZE)
92760 +#define ELAN4_REVA_REG_OFFSET          (ELAN4_REVA_EBUS_OFFSET - ELAN4_REG_SIZE)
92761 +#define ELAN4_REVA_NUM_COMMAND_QUEUES  (ELAN4_REVA_REG_OFFSET >> 13)
92762 +
92763 +#define ELAN4_REVA_EBUS_ROM_SIZE       (1 << 20)       /* 1M */
92764 +#define ELAN4_REVA_EBUS_ROM_OFFSET     0
92765 +
92766 +#define ELAN4_REVB_I2C_PADDING         (1 << 14)       /* 16K */
92767 +#define ELAN4_REVB_I2C_SIZE            (1 << 13)       /* 8k */
92768 +#define ELAN4_REVB_REG_OFFSET          (ELAN4_BAR1_SIZE - ELAN4_REG_SIZE)
92769 +#define ELAN4_REVB_I2C_OFFSET          (ELAN4_REVB_REG_OFFSET - ELAN4_REVB_I2C_PADDING - ELAN4_REVB_I2C_SIZE)
92770 +#define ELAN4_REVB_NUM_COMMAND_QUEUES  (ELAN4_REVB_I2C_OFFSET >> 13)
92771 +
92772 +#endif /* notdef _ELAN4_REGISTERS_H */
92773 diff -urN clean/include/elan4/sdram.h linux-2.6.9/include/elan4/sdram.h
92774 --- clean/include/elan4/sdram.h 1969-12-31 19:00:00.000000000 -0500
92775 +++ linux-2.6.9/include/elan4/sdram.h   2003-09-24 09:55:55.000000000 -0400
92776 @@ -0,0 +1,41 @@
92777 +/*
92778 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92779 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
92780 + *
92781 + *    For licensing information please see the supplied COPYING file
92782 + *
92783 + */
92784 +
92785 +#ifndef __ELAN4_SDRAM_H
92786 +#define __ELAN4_SDRAM_H
92787 +
92788 +#ident "$Id: sdram.h,v 1.8 2003/09/24 13:55:55 david Exp $"
92789 +/*      $Source: /cvs/master/quadrics/elan4hdr/sdram.h,v $*/
92790 +
92791 +/* Include header file generated by sdram configuration program */
92792 +#include <elan4/xsdram.h> 
92793 +
92794 +/* SDRAM bank shift definitions */
92795 +#define SDRAM_0_CS_SHIFT       25
92796 +#define SDRAM_1_CS_SHIFT       27
92797 +#define SDRAM_2_CS_SHIFT       28
92798 +#define SDRAM_3_CS_SHIFT       29
92799 +
92800 +#define SDRAM_BANK_SHIFT(cfg) \
92801 +       (((cfg >> SDRAM_RamSize_SH) & 3) == 0 ? SDRAM_0_CS_SHIFT : \
92802 +        ((cfg >> SDRAM_RamSize_SH) & 3) == 1 ? SDRAM_1_CS_SHIFT : \
92803 +        ((cfg >> SDRAM_RamSize_SH) & 3) == 2 ? SDRAM_2_CS_SHIFT : SDRAM_3_CS_SHIFT)
92804 +
92805 +#define SDRAM_BANK_SIZE(cfg)           (1ULL << SDRAM_BANK_SHIFT(cfg))
92806 +#define SDRAM_BANK_OFFSET(cfg,bank)    ((unsigned long long)(bank) << SDRAM_BANK_SHIFT(cfg))
92807 +#define SDRAM_NUM_BANKS(cfg)           (4)
92808 +#define SDRAM_MAX_BANKS                        4
92809 +
92810 +/* When the elan access sdram it passes eaddr[12] as sdramaddr[12] when
92811 + * running with a 4k page size, however PCI accesses pass paddr[12], so
92812 + * we must ensure that sdram pages are allocated such that eaddr[12] is the
92813 + * same as paddr[12] - the easiest way is to allocate sdram in 8k chunks and
92814 + * ensure that maddr[12] == eaddr[12] == pgoff[0] */
92815 +#define SDRAM_MIN_PAGE_SIZE            (8192)
92816 +
92817 +#endif /* __ELAN4_SDRAM_H */
92818 diff -urN clean/include/elan4/stats.h linux-2.6.9/include/elan4/stats.h
92819 --- clean/include/elan4/stats.h 1969-12-31 19:00:00.000000000 -0500
92820 +++ linux-2.6.9/include/elan4/stats.h   2005-04-19 12:14:52.000000000 -0400
92821 @@ -0,0 +1,83 @@
92822 +/*
92823 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
92824 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
92825 + * 
92826 + *    For licensing information please see the supplied COPYING file
92827 + *
92828 + */
92829 +
92830 +#ident "@(#)$Id: stats.h,v 1.12 2005/04/19 16:14:52 addy Exp $"
92831 +/*      $Source: /cvs/master/quadrics/elan4mod/stats.h,v $*/
92832 +
92833 +#ifndef __ELAN4_STATS_H
92834 +#define __ELAN4_STATS_H
92835 +
92836 +#define ELAN4_DEV_STATS_BUCKETS                8
92837 +
92838 +
92839 +typedef struct elan4_dev_stats
92840 +{
92841 +    unsigned long      s_interrupts;
92842 +    
92843 +    unsigned long       s_mainints[ELAN4_DEV_STATS_BUCKETS];
92844 +    unsigned long      s_mainint_punts;
92845 +    unsigned long      s_mainint_rescheds;
92846 +
92847 +    unsigned long       s_haltints;
92848 +
92849 +    unsigned long      s_cproc_traps;
92850 +    unsigned long      s_dproc_traps;
92851 +    unsigned long      s_eproc_traps;
92852 +    unsigned long      s_iproc_traps;
92853 +    unsigned long      s_tproc_traps;
92854 +
92855 +    unsigned long       s_cproc_trap_types[0x10];
92856 +    unsigned long       s_dproc_trap_types[7];
92857 +    unsigned long       s_eproc_trap_types[4];
92858 +    unsigned long       s_iproc_trap_types[0xa];
92859 +    unsigned long       s_tproc_trap_types[7];
92860 +
92861 +    unsigned long       s_correctable_errors;
92862 +    unsigned long       s_multiple_errors;
92863 +    
92864 +    unsigned long       s_link_errors;
92865 +    unsigned long       s_lock_errors;
92866 +    unsigned long       s_deskew_errors;
92867 +    unsigned long       s_phase_errors;
92868 +    unsigned long      s_data_errors;
92869 +    unsigned long      s_fifo_overflow0;
92870 +    unsigned long      s_fifo_overflow1;
92871 +    unsigned long       s_mod45changed;
92872 +    unsigned long       s_pack_not_seen;
92873 +    unsigned long       s_linkport_keyfail;
92874 +
92875 +    unsigned long      s_eop_reset;
92876 +    unsigned long       s_bad_length;
92877 +    unsigned long       s_crc_bad;
92878 +    unsigned long       s_crc_error;
92879 +
92880 +    unsigned long      s_cproc_timeout;
92881 +    unsigned long      s_dproc_timeout;
92882 +
92883 +    unsigned long      s_sdram_bytes_free;
92884 +} ELAN4_DEV_STATS;
92885 +
92886 +#define MainIntBuckets         ((int[ELAN4_DEV_STATS_BUCKETS-1]) {1, 2, 3, 4, 8, 16, 32})
92887 +
92888 +#define BumpDevStat(dev,stat)  ((dev)->dev_stats.stat++)
92889 +#define BucketDevStat(dev,stat,n,bucket)       ((n) <= (bucket)[0] ? (dev)->dev_stats.stat[0]++ : \
92890 +                                                (n) <= (bucket)[1] ? (dev)->dev_stats.stat[1]++ : \
92891 +                                                (n) <= (bucket)[2] ? (dev)->dev_stats.stat[2]++ : \
92892 +                                                (n) <= (bucket)[3] ? (dev)->dev_stats.stat[3]++ : \
92893 +                                                (n) <= (bucket)[4] ? (dev)->dev_stats.stat[4]++ : \
92894 +                                                (n) <= (bucket)[5] ? (dev)->dev_stats.stat[5]++ : \
92895 +                                                (n) <= (bucket)[6] ? (dev)->dev_stats.stat[6]++ : \
92896 +                                                                     (dev)->dev_stats.stat[7]++)
92897 +
92898 +
92899 +/*
92900 + * Local variables:
92901 + * c-file-style: "stroustrup"
92902 + * End:
92903 + */
92904 +#endif /*__ELAN4_STATS_H */
92905 diff -urN clean/include/elan4/tprintf.h linux-2.6.9/include/elan4/tprintf.h
92906 --- clean/include/elan4/tprintf.h       1969-12-31 19:00:00.000000000 -0500
92907 +++ linux-2.6.9/include/elan4/tprintf.h 2003-09-04 08:39:17.000000000 -0400
92908 @@ -0,0 +1,24 @@
92909 +/*
92910 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
92911 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
92912 + *
92913 + *    For licensing information please see the supplied COPYING file
92914 + *
92915 + */
92916 +
92917 +#ifndef __ELAN4_TPRINTF_H
92918 +#define __ELAN4_TPRINTF_H
92919 +
92920 +#ident "$Id: tprintf.h,v 1.6 2003/09/04 12:39:17 david Exp $"
92921 +/*      $Source: /cvs/master/quadrics/elan4hdr/tprintf.h,v $*/
92922 +
92923 +
92924 +#ifdef _ASM
92925 +#define TPRINTF0(string)           add %r0, __LINE__, %r0
92926 +#define TPRINTF1(string,reg)       add reg, __LINE__, %r0
92927 +#else
92928 +#define TPRINTF0(string)           asm volatile ("add %%r0, %0, %%r0" : : "i" (__LINE__))
92929 +#define TPRINTF1(string, value)            asm volatile ("add %0,   %1, %%r0" : : "r" (value), "i" (__LINE__))
92930 +#endif /* _ASM */
92931 +
92932 +#endif /* __ELAN4_TPRINTF_H */
92933 diff -urN clean/include/elan4/trap.h linux-2.6.9/include/elan4/trap.h
92934 --- clean/include/elan4/trap.h  1969-12-31 19:00:00.000000000 -0500
92935 +++ linux-2.6.9/include/elan4/trap.h    2003-10-07 08:11:10.000000000 -0400
92936 @@ -0,0 +1,95 @@
92937 +/*
92938 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
92939 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
92940 + * 
92941 + *    For licensing information please see the supplied COPYING file
92942 + *
92943 + */
92944 +
92945 +#ident "@(#)$Id: trap.h,v 1.10 2003/10/07 12:11:10 david Exp $"
92946 +/*      $Source: /cvs/master/quadrics/elan4mod/trap.h,v $*/
92947 +
92948 +#ifndef __ELAN4_TRAP_H
92949 +#define __ELAN4_TRAP_H
92950 +
92951 +/*
92952 + * If the EProc Faults whilst performing an action (e.g. Read/Write on the data src or dest Addr)
92953 + *  the Eproc increments the Addr(s) by a block size (64 bytes):
92954 + *  1: Fault on Read: 
92955 + *                     Src EventAddr = Read Addr + block
92956 + *  2: Fault on Write:
92957 + *                     Src EventAddr = Read Addr + block
92958 + *                     Dst EventAddr = Read Addr + block
92959 + *                     Size          = Size - block ndwords
92960 + *  We must rewind the addr correctly to completely the transfer successfully
92961 + */
92962 +#define EVENT_COPY_NDWORDS     0x8
92963 +#define EVENT_COPY_BLOCK_SIZE  0x40
92964 +
92965 +typedef struct elan4_eproc_trap
92966 +{
92967 +    E4_uint64          tr_status;
92968 +    E4_FaultSave       tr_faultarea;
92969 +    E4_Event           tr_event;
92970 +    E4_Addr            tr_eventaddr;
92971 +} ELAN4_EPROC_TRAP;
92972 +
92973 +typedef struct elan4_cproc_trap
92974 +{
92975 +    E4_uint64          tr_status;                                      /* cproc status register */
92976 +    E4_uint64          tr_command;                                     /* cproc command */
92977 +    E4_CommandQueueDesc tr_qdesc;                                      /* copy of command queue descriptor */
92978 +    E4_FaultSave       tr_faultarea;                                   /* fault area for mmu traps */
92979 +    ELAN4_EPROC_TRAP   tr_eventtrap;                                   /* associated event trap (waitevent) */
92980 +} ELAN4_CPROC_TRAP;
92981 +
92982 +typedef struct elan4_dproc_trap
92983 +{
92984 +    E4_DMA             tr_desc;
92985 +    E4_FaultSave       tr_packAssemFault;
92986 +    E4_FaultSave       tr_prefetchFault;
92987 +    E4_uint64          tr_status;
92988 +} ELAN4_DPROC_TRAP;
92989 +
92990 +typedef struct elan4_tproc_trap
92991 +{
92992 +    E4_uint64          tr_regs[64];
92993 +    E4_FaultSave       tr_dataFault;
92994 +    E4_FaultSave       tr_instFault;
92995 +    E4_uint64          tr_status;
92996 +    E4_uint64          tr_state;
92997 +    E4_Addr            tr_pc;
92998 +    E4_Addr            tr_npc;
92999 +    E4_uint64          tr_dirty;
93000 +    E4_uint64          tr_bad;
93001 +} ELAN4_TPROC_TRAP;
93002 +
93003 +typedef struct elan4_iproc_trap
93004 +{
93005 +    E4_uint32            tr_numTransactions;
93006 +    E4_uint32            tr_flags;
93007 +    E4_uint32            tr_trappedTrans;
93008 +    E4_uint32            tr_waitForEopTrans;
93009 +    E4_uint32            tr_identifyTrans;
93010 +    E4_uint32            tr_pad;
93011 +
93012 +    E4_FaultSave          tr_faultarea;
93013 +    E4_IprocTrapHeader    tr_transactions[MAX_TRAPPED_TRANS];
93014 +    E4_IprocTrapData      tr_dataBuffers[MAX_TRAPPED_TRANS];
93015 +} ELAN4_IPROC_TRAP;
93016 +
93017 +#define TR_FLAG_ACK_SENT       (1 << 0)
93018 +#define TR_FLAG_EOP_ERROR      (1 << 1)
93019 +#define TR_FLAG_BAD_TRANS      (1 << 2)
93020 +#define TR_FLAG_DMA_PACKET     (1 << 3)
93021 +#define TR_FLAG_EOP_BAD                (1 << 4)
93022 +#define TR_FLAG_TOOMANY_TRANS  (1 << 5)
93023 +
93024 +#define TR_TRANS_INVALID       (0xffffffff)
93025 +
93026 +/*
93027 + * Local variables:
93028 + * c-file-style: "stroustrup"
93029 + * End:
93030 + */
93031 +#endif /* __ELAN4_TRAP_H */
93032 diff -urN clean/include/elan4/trtype.h linux-2.6.9/include/elan4/trtype.h
93033 --- clean/include/elan4/trtype.h        1969-12-31 19:00:00.000000000 -0500
93034 +++ linux-2.6.9/include/elan4/trtype.h  2004-02-06 05:38:21.000000000 -0500
93035 @@ -0,0 +1,112 @@
93036 +/*
93037 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93038 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
93039 + *
93040 + *    For licensing information please see the supplied COPYING file
93041 + *
93042 + */
93043 +
93044 +#ifndef _ELAN4_TRTYPE_H
93045 +#define _ELAN4_TRTYPE_H
93046 +
93047 +#ident "$Id: trtype.h,v 1.20 2004/02/06 10:38:21 mike Exp $"
93048 +/*      $Source: /cvs/master/quadrics/elan4hdr/trtype.h,v $*/
93049 +
93050 +/*<15:11> Size field is used to give the number of additional 64 bit data values.
93051 +         A value from 0 to 16 inclusive is valid. */
93052 +
93053 +#include <elan4/types.h>
93054 +
93055 +#define TR_SIZE_SHIFT          (11)
93056 +#define TR_SIZE_MASK           (0x1f << TR_SIZE_SHIFT)
93057 +#define SET_TR_SIZE(Size)      (((Size) << TR_SIZE_SHIFT) & TR_SIZE_MASK)
93058 +
93059 +/* <10:9> Last Transaction and AckNow bits, marks the last transaction and
93060 +          enables a PACK_OK to be sent. */
93061 +#define TR_LAST_AND_SEND_ACK   (3 << 9)
93062 +
93063 +
93064 +/* <8>  Only valid on the last transaction. Delays execution until an EOP_GOOD is received.
93065 + *      Any other EOP type will abort execution of this transaction. */
93066 +#define TR_WAIT_FOR_EOP                (1 << 8)
93067 +
93068 +/*
93069 + * Data type. This is used by transactions of variable data type. It controls any endian
93070 + * converion required if the destiantion host processor has a big endian memory format.
93071 + */
93072 +/*     WriteBlock      <8:7>   Data type
93073 +                       <6:0>   Part write size */
93074 +#define TR_DATATYPE_SHIFT      (6)
93075 +#define TR_DATATYPE_MASK       ((1 << 2) - 1)
93076 +
93077 +#define TR_DATATYPE_BYTE       E4_DATATYPE_BYTE        
93078 +#define TR_DATATYPE_SHORT      E4_DATATYPE_SHORT
93079 +#define TR_DATATYPE_WORD       E4_DATATYPE_WORD        
93080 +#define TR_DATATYPE_DWORD      E4_DATATYPE_DWORD
93081 +
93082 +/* <5:0> Transaction Type
93083 + *       For Writeblock <5:3> 000 => Write, 0001 => Read
93084 + *                      <2:0> End Byte Addr */
93085 +#define TR_OPCODE_MASK         0x3F
93086 +#define TR_BLOCK_OPCODE_MASK   0x38
93087 +
93088 +#define TR_WRITEBLOCK          0x0
93089 +#define TR_ENDBYTE_MASK                0x7
93090 +#define TR_WRITE(Size, EndByte, DataType)                                              \
93091 +                       (0x0 | SET_TR_SIZE(Size) | ((EndByte) & TR_ENDBYTE_MASK) |      \
93092 +                        (((DataType) & TR_DATATYPE_MASK) << TR_DATATYPE_SHIFT))
93093 +
93094 +#define TR_NOP_TRANS           (0x10 | SET_TR_SIZE(0))
93095 +#define TR_SETEVENT            0x10
93096 +#define TR_SETEVENT_NOIDENT    (TR_SETEVENT | SET_TR_SIZE(0) | TR_LAST_AND_SEND_ACK)
93097 +#define TR_SETEVENT_IDENTIFY   (TR_SETEVENT | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK)
93098 +#define TR_REMOTEDMA           (0x11 | SET_TR_SIZE(7) | TR_LAST_AND_SEND_ACK)
93099 +#define TR_SENDDISCARD         (0x12 | SET_TR_SIZE(0))
93100 +
93101 +/*
93102 + * Conditional transactions that might return PAckTestFail.
93103 + * All will allow further exection of the packet if ([Address] operator DataValue) is true.
93104 + * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true.
93105 + * These should be used where a definite TRUE/FALSE answer is required.
93106 + */
93107 +#define TR_GTE                 (0x14 | SET_TR_SIZE(1))
93108 +#define TR_LT                  (0x15 | SET_TR_SIZE(1))
93109 +#define TR_EQ                  (0x16 | SET_TR_SIZE(1))
93110 +#define TR_NEQ                 (0x17 | SET_TR_SIZE(1))
93111 +
93112 +/*
93113 + * Conditional transactions that might return PAckDiscard.
93114 + * All will allow further exection of the packet if ([Address] operator DataValue) is true.
93115 + * e.g. for TR_GTE further execution if ([Address] >= DataValue) is true.
93116 + * These should be used where eventually a TRUE answer is expected but the node might not be ready yet.
93117 + * These can be mixed with the normal conditionals to allow a single packet to test for readyness and
93118 + * a TRUE/FALSE answer.
93119 + */
93120 +#define TR_GTE_DISCARD         (0x34 | SET_TR_SIZE(1))
93121 +#define TR_LT_DISCARD          (0x35 | SET_TR_SIZE(1))
93122 +#define TR_EQ_DISCARD          (0x36 | SET_TR_SIZE(1))
93123 +#define TR_NEQ_DISCARD         (0x37 | SET_TR_SIZE(1))
93124 +
93125 +#define TR_TRACEROUTE_TRANS    0x18
93126 +#define TR_TRACEROUTE(Size)    (TR_TRACEROUTE_TRANS | (TR_DATATYPE_WORD << TR_DATATYPE_SHIFT) |SET_TR_SIZE(Size))
93127 +#define TR_IDENTIFY            (0x19 | SET_TR_SIZE(0))
93128 +
93129 +#define TR_ADDWORD             (0x1c | SET_TR_SIZE(2) | TR_LAST_AND_SEND_ACK)
93130 +#define TR_INPUT_Q_COMMIT      (0x1d | SET_TR_SIZE(1) | TR_LAST_AND_SEND_ACK)
93131 +#define TR_TESTANDWRITE        (0x1e | SET_TR_SIZE(3) | TR_LAST_AND_SEND_ACK)
93132 +#define TR_INPUT_Q_GETINDEX    (0x1f | SET_TR_SIZE(0))
93133 +
93134 +
93135 +
93136 +/* TraceRoute formate */
93137 +#define TR_TRACEROUTE0_CHANID(val)             ((val) & 1)                     /* 0     Chan Id */
93138 +#define TR_TRACEROUTE0_LINKID(val)             (((val) >> 1) & 7)              /* 1:3   Link Id */
93139 +#define TR_TRACEROUTE0_REVID(val)              (((val) >> 4) & 7)              /* 4:6   Revision Id */
93140 +#define TR_TRACEROUTE0_BCAST_PIN(val)          (((val) >> 7) & 1)              /* 7     Bcast Top Pin */
93141 +#define TR_TRACEROUTE0_LNR(val)                        (((val) >> 8) & 0xFF)           /* 8:15  Global Link Not Ready */
93142 +
93143 +#define TR_TRACEROUTE1_ROUTES_SELECTED(val)    ((val & 0xFF))                  /* 0:7   Routes Selected */
93144 +#define TR_TRACEROUTE1_BCAST_TOP(val)          (((val) >> 8) & 7)              /* 8:10  Broadcast Top */
93145 +#define TR_TRACEROUTE1_BCAST_BOTTOM(val)       (((val) >> 12) & 7)             /* 12:14 Broadcast Bottom */
93146 +
93147 +#endif /* _ELAN4_TRANSACTIONTYPE_H */
93148 diff -urN clean/include/elan4/types.h linux-2.6.9/include/elan4/types.h
93149 --- clean/include/elan4/types.h 1969-12-31 19:00:00.000000000 -0500
93150 +++ linux-2.6.9/include/elan4/types.h   2003-09-04 08:39:17.000000000 -0400
93151 @@ -0,0 +1,69 @@
93152 +/*
93153 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93154 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
93155 + *
93156 + *    For licensing information please see the supplied COPYING file
93157 + *
93158 + */
93159 +
93160 +#ifndef __ELAN4_TYPES_H
93161 +#define __ELAN4_TYPES_H
93162 +
93163 +#ident "@(#)$Id: types.h,v 1.9 2003/09/04 12:39:17 david Exp $"
93164 +/*      $Source: /cvs/master/quadrics/elan4hdr/types.h,v $*/
93165 +
93166 +#include <qsnet/config.h>
93167 +/*
93168 + * "flip" values for correctly indexing into
93169 + * block data which was copied from the Elan
93170 + * using 64 bit accesses.
93171 + */
93172 +#if defined(__LITTLE_ENDIAN__)
93173 +#  define ByteEndianFlip  0
93174 +#  define ShortEndianFlip 0
93175 +#  define WordEndianFlip  0
93176 +#else
93177 +#  define ByteEndianFlip  7
93178 +#  define ShortEndianFlip 3
93179 +#  define WordEndianFlip  1
93180 +#endif
93181 +
93182 +
93183 +#ifndef _ASM
93184 +
93185 +typedef signed int        E4_int;
93186 +typedef unsigned int              E4_uint;
93187 +
93188 +typedef signed char       E4_int8;
93189 +typedef unsigned char     E4_uint8;
93190 +
93191 +typedef signed short      E4_int16;
93192 +typedef unsigned short            E4_uint16;
93193 +
93194 +typedef signed int        E4_int32;
93195 +typedef unsigned int              E4_uint32;
93196 +
93197 +#ifdef _LP64
93198 +typedef signed long        E4_int64;
93199 +typedef unsigned long      E4_uint64;
93200 +#else
93201 +typedef signed long long   E4_int64;
93202 +typedef unsigned long long E4_uint64;
93203 +#endif
93204 +
93205 +/* 64-bit Elan4 */
93206 +typedef E4_uint64         E4_Addr;
93207 +typedef E4_uint32         E4_LocPhysAddr;      /* Really 31 bits */
93208 +
93209 +#define OneK   (1024)
93210 +#define EightK (8*OneK)
93211 +
93212 +#define E4_DATATYPE_BYTE       0
93213 +#define E4_DATATYPE_SHORT      1
93214 +#define E4_DATATYPE_WORD       2
93215 +#define E4_DATATYPE_DWORD      3
93216 +
93217 +#endif /* _ASM */
93218 +
93219 +#endif /* __ELAN4_TYPES_H */
93220 +
93221 diff -urN clean/include/elan4/user.h linux-2.6.9/include/elan4/user.h
93222 --- clean/include/elan4/user.h  1969-12-31 19:00:00.000000000 -0500
93223 +++ linux-2.6.9/include/elan4/user.h    2005-04-21 07:12:06.000000000 -0400
93224 @@ -0,0 +1,347 @@
93225 +/*
93226 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
93227 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
93228 + * 
93229 + *    For licensing information please see the supplied COPYING file
93230 + *
93231 + */
93232 +
93233 +#ident "@(#)$Id: user.h,v 1.45 2005/04/21 11:12:06 mike Exp $"
93234 +/*      $Source: /cvs/master/quadrics/elan4mod/user.h,v $*/
93235 +
93236 +#ifndef __ELAN4_USER_H
93237 +#define __ELAN4_USER_H
93238 +
93239 +#include <elan/capability.h>
93240 +#include <elan4/usertrap.h>
93241 +#include <elan4/intcookie.h>
93242 +
93243 +typedef struct trap_queue
93244 +{
93245 +    unsigned   q_back;                 /* Next free space */
93246 +    unsigned   q_front;                /* First object to remove */
93247 +    unsigned   q_size;                 /* Size of queue */
93248 +    unsigned   q_count;                /* Current number of entries */
93249 +    unsigned   q_slop;                 /* FULL <=> (count+slop) == size */
93250 +} RING_QUEUE;
93251 +
93252 +#define RING_QUEUE_INIT(q,num,slop)    ((q).q_size = (num), (q).q_slop = (slop), (q).q_front = (q).q_back = 0, (q).q_count = 0)
93253 +#define RING_QUEUE_FULL(q)             ((q).q_count >= ((q).q_size - (q).q_slop))
93254 +#define RING_QUEUE_REALLY_FULL(q)      ((q).q_count == (q).q_size)
93255 +#define RING_QUEUE_EMPTY(q)            ((q).q_count == 0)
93256 +#define RING_QUEUE_NEXT(q,indx)                ((indx) = (((indx)+1) % (q).q_size))
93257 +#define RING_QUEUE_PREV(q,indx)                ((indx) = (((indx)+(q).q_size-1) % (q).q_size))
93258 +#define RING_QUEUE_ADD(q)              (RING_QUEUE_NEXT(q ,(q).q_back),  (++(q).q_count) >= ((q).q_size - (q).q_slop))
93259 +#define RING_QUEUE_REMOVE(q)           (RING_QUEUE_NEXT(q, (q).q_front), (--(q).q_count) == 0)
93260 +#define RING_QUEUE_ADD_FRONT(q)                (RING_QUEUE_PREV(q, (q).q_front), (++(q).q_count) >= ((q).q_size - (q).q_slop))
93261 +#define RING_QUEUE_ENTRY(qArea,indx)   (&(qArea)[(indx)])
93262 +#define RING_QUEUE_FRONT(q,qArea)      RING_QUEUE_ENTRY(qArea, (q).q_front)
93263 +#define RING_QUEUE_BACK(q,qArea)       RING_QUEUE_ENTRY(qArea, (q).q_back)
93264 +#define RING_QUEUE_ITERATE(q,idx)      for (idx = (q).q_front; idx != (q).q_back; idx = (((idx) + 1) % (q).q_size))
93265 +
93266 +typedef struct user_rgn
93267 +{
93268 +    struct user_rgn *rgn_mnext;                                        /* Doubly linked list of regions */
93269 +    struct user_rgn *rgn_mprev;                                        /*   sorted on main address */ 
93270 +    virtaddr_t       rgn_mbase;                                        /* main address of base of region */
93271 +
93272 +    struct user_rgn *rgn_enext;                                        /* Doubly linked list of regions */
93273 +    struct user_rgn *rgn_eprev;                                        /*   sorted on elan address */
93274 +    E4_Addr         rgn_ebase;                                 /* elan address of base of region */
93275 +
93276 +    unsigned long    rgn_len;                                  /* length of region */
93277 +    unsigned        rgn_perm;                                  /* elan access permission */
93278 +} USER_RGN;
93279 +
93280 +typedef struct user_vpseg
93281 +{ 
93282 +    struct list_head   vps_link;
93283 +
93284 +    unsigned short     vps_process;                            /* virtual process number */
93285 +    unsigned short     vps_entries;                            /*   and # virtual processes */
93286 +
93287 +    unsigned           vps_type;
93288 +    union
93289 +    {
93290 +       struct {
93291 +           ELAN_CAPABILITY        *cap;
93292 +           E4_VirtualProcessEntry *routes;
93293 +       } p2p;
93294 +#define vps_p2p_cap    vps_u.p2p.cap
93295 +#define vps_p2p_routes  vps_u.p2p.routes
93296 +
93297 +       struct {
93298 +           unsigned short lowvp;
93299 +           unsigned short highvp;
93300 +       } bcast;
93301 +#define vps_bcast_lowvp                vps_u.bcast.lowvp
93302 +#define vps_bcast_highvp       vps_u.bcast.highvp
93303 +    } vps_u;
93304 +} USER_VPSEG;
93305 +
93306 +/* values for vps_type */
93307 +#define USER_VPSEG_P2P         0
93308 +#define USER_VPSEG_BCAST       1
93309 +
93310 +typedef struct user_cq
93311 +{
93312 +    struct list_head ucq_link;
93313 +
93314 +    ELAN4_CQ       *ucq_cq;                                    /* the real command queue */
93315 +
93316 +    unsigned char    ucq_state;                                        /* command queue state */
93317 +    unsigned char    ucq_errored;                              /* command queue has errored */
93318 +    unsigned char    ucq_flags;                                        /* flags */
93319 +    ELAN4_CPROC_TRAP ucq_trap;                                 /* trap state */
93320 +
93321 +    atomic_t        ucq_ref;                                   /* # references to this cq (mmaps) */
93322 +} USER_CQ;
93323 +
93324 +/* values for ucq_state */
93325 +#define UCQ_RUNNING                     0                      /* command queue is running */
93326 +#define UCQ_TRAPPED                     1                      /* command queue has trapped */
93327 +#define UCQ_NEEDS_RESTART                2                     /* command queue has trapped, and needs restarting */
93328 +#define UCQ_STOPPED                     3                      /* command queue has trapped, and delivered to user */
93329 +
93330 +/* values for ucq_flags */
93331 +#define UCQ_SYSTEM             (1 << 0)
93332 +#define UCQ_REORDER            (1 << 1)
93333 +
93334 +extern int num_fault_save;
93335 +extern int min_fault_pages;
93336 +extern int max_fault_pages;
93337 +
93338 +typedef struct fault_save
93339 +{
93340 +    struct fault_save           *next;
93341 +    E4_Addr                      addr;
93342 +    E4_uint32                    count;
93343 +} FAULT_SAVE;
93344 +
93345 +typedef struct user_iproc_trap
93346 +{
93347 +    unsigned char     ut_state;
93348 +    ELAN4_IPROC_TRAP  ut_trap;
93349 +} USER_IPROC_TRAP;
93350 +
93351 +/* values for ut_state */
93352 +#define UTS_IPROC_RUNNING                      0
93353 +#define UTS_IPROC_TRAPPED                      1
93354 +#define UTS_IPROC_RESOLVING                    2
93355 +#define UTS_IPROC_EXECUTE_PACKET               3
93356 +#define UTS_IPROC_EXECUTING                    4
93357 +#define UTS_IPROC_NETWORK_ERROR                        5
93358 +#define UTS_IPROC_STOPPED                      6
93359 +
93360 +typedef struct user_ctxt_entry
93361 +{
93362 +    struct list_head    cent_link;                                     /* entry chained on context */
93363 +    ELAN_CAPABILITY    *cent_cap;                                      /* capability we attached with */
93364 +} USER_CTXT_ENTRY;
93365 +
93366 +typedef struct user_ctxt
93367 +{
93368 +    ELAN4_CTXT         uctx_ctxt;                              /* is also an elan context */
93369 +
93370 +    spinlock_t        uctx_spinlock;                           /* spinlock for items used with interrupt handler */
93371 +    kcondvar_t        uctx_wait;                               /* place to sleep (traphandler/swapout/swapin/neterr fixup) */
93372 +
93373 +    unsigned          uctx_status;                             /* status                               (uctx_spinlock) */
93374 +
93375 +    pid_t             uctx_trap_pid;                           /* pid to deliver signals to on trap */
93376 +    int                       uctx_trap_signo;                         /* signal number to deliver */
93377 +    unsigned          uctx_trap_state;                         /* state of trap handling code */
93378 +    unsigned          uctx_trap_count;                         /* count of "thread" in user_trap_handler() */
93379 +
93380 +    unsigned          uctx_int_count;                          /* # interrupts since last zeroed */
93381 +    unsigned long      uctx_int_start;                         /* tick when int_count last zeroed */
93382 +    unsigned long      uctx_int_delay;                         /* # ticks to delay next wakeup */
93383 +    struct timer_list  uctx_int_timer;                         /* and timer to use to delay signal */
93384 +    struct timer_list  uctx_shuffle_timer;                     /* and timer to use to delay shuffle signal */
93385 +
93386 +    struct timer_list  uctx_neterr_timer;                      /* network error timer */
93387 +
93388 +    struct list_head   uctx_vpseg_list;                                /* list of vp segments we've got */
93389 +    kmutex_t           uctx_vpseg_lock;                                /*   and lock to protect it. */
93390 +    ELAN4_ROUTE_TABLE *uctx_routetable;                                /* our virtual process table */
93391 +    ELAN_POSITION      uctx_position;                          /* position in network */
93392 +
93393 +    struct list_head   uctx_cent_list;                                 /* list of attached network contexts */
93394 +
93395 +    USER_CQ          *uctx_ddcq;                               /* command queue for re-issueing traps */
93396 +    E4_uint64         uctx_ddcq_insertcnt;                     /* # dwords inserted into command queue */
93397 +    E4_uint64          uctx_ddcq_completed;                    /* last "completed" write was here */
93398 +    int                       uctx_ddcq_intr;                          /* count of outstanding ddcq interrupts */
93399 +
93400 +    ELAN4_HALTOP       uctx_haltop;                            /* halt operation for flushing */
93401 +    ELAN4_DMA_FLUSHOP  uctx_dma_flushop;                       /* flush operation for flushing dma runqueue */
93402 +
93403 +    INTCOOKIE_TABLE   *uctx_intcookie_table;                   /* table of interrupt cookies (shared with other uctxs for this task) */
93404 +
93405 +    kmutex_t          uctx_cqlock;                             /* lock for create/destory cqs */
93406 +    struct list_head   uctx_cqlist;                            /* list of command queues               (uctx_cqlock,uctx_spinlock) */
93407 +
93408 +    ELAN4_DPROC_TRAP  *uctx_dprocTraps;                                /* queue of dproc traps to resolve/reissue */
93409 +    RING_QUEUE        uctx_dprocTrapQ;
93410 +
93411 +    ELAN4_TPROC_TRAP  *uctx_tprocTraps;                                /* queue of tproc traps to resolve/reissue */
93412 +    RING_QUEUE         uctx_tprocTrapQ;
93413 +
93414 +    ELAN4_EPROC_TRAP  *uctx_eprocTraps;                                /* queue of eproc traps to resolve */
93415 +    RING_QUEUE        uctx_eprocTrapQ;
93416 +
93417 +    USER_IPROC_TRAP    uctx_iprocTrap[2];                      /* input trap state, 1 per virtual channel */
93418 +
93419 +    E4_DMA           *uctx_dmas;                               /* queue of dmas to restart */
93420 +    RING_QUEUE         uctx_dmaQ;
93421 +    
93422 +    E4_ThreadRegs     *uctx_threads;                           /* queue of threads to restart */
93423 +    RING_QUEUE         uctx_threadQ;
93424 +
93425 +    ELAN4_NETERR_MSG  *uctx_msgs;                              /* queue of neterr messages */
93426 +    RING_QUEUE        uctx_msgQ;
93427 +    kmutex_t          uctx_rgnmutex;                           /* lock for create/destroy regions */
93428 +    spinlock_t        uctx_rgnlock;                            /* spinlock to protect linked lists */
93429 +    USER_RGN         *uctx_mrgns;                              /* Doubly linked list of memory regions (uctx_rgnlock) */
93430 +    USER_RGN         *uctx_mtail;                              /* Last memory region on list           (uctx_rgnlock) */
93431 +    USER_RGN         *uctx_mrgnlast;                           /* Last region 'hit'                    (uctx_rgnlock) */
93432 +
93433 +    USER_RGN         *uctx_ergns;                              /* Doubly linked list of memory regions (uctx_rgnlock) */
93434 +    USER_RGN         *uctx_etail;                              /* Last memory region on list           (uctx_rgnlock) */
93435 +    USER_RGN         *uctx_ergnlast;                           /* Last region 'hit'                    (uctx_rgnlock) */
93436 +
93437 +    ELAN4_USER_PAGE   *uctx_upage;                             /* kernel page shared with user */
93438 +    sdramaddr_t               uctx_trampoline;                         /* sdram page for tproc trampoline */
93439 +
93440 +    E4_Addr           uctx_upage_addr;                         /*   elan addr page mapped into */
93441 +    E4_Addr           uctx_trestart_addr;                      /* address of thread restart code */
93442 +    FAULT_SAVE         *uctx_faults;
93443 +    FAULT_SAVE         *uctx_fault_list;
93444 +    int                 uctx_num_fault_save;
93445 +    spinlock_t          uctx_fault_lock;
93446 +} USER_CTXT;
93447 +
93448 +/* bit values for uctx_status */
93449 +#define UCTX_EXITING                           (1 << 0)                /* context is exiting. */
93450 +#define UCTX_USER_FILTERING                    (1 << 1)                /* user requested context filter */
93451 +#define UCTX_USER_STOPPED                      (1 << 2)                /* user requested stop */
93452 +
93453 +#define UCTX_SWAPPING                          (1 << 3)                /* context is swapping out */
93454 +#define UCTX_SWAPPED                           (1 << 4)                /* context is swapped out */
93455 +
93456 +#define UCTX_STOPPING                          (1 << 5)                /* stopping elan from running this context */
93457 +#define UCTX_STOPPED                           (1 << 6)                /* elan no longer running this context */
93458 +
93459 +#define UCTX_EPROC_QUEUE_FULL                  (1 << 7)                /* reasons for stopping running */
93460 +#define UCTX_DPROC_QUEUE_FULL                  (1 << 8)
93461 +#define UCTX_TPROC_QUEUE_FULL                  (1 << 9)
93462 +#define UCTX_IPROC_CH0_TRAPPED                 (1 << 10)
93463 +#define UCTX_IPROC_CH1_TRAPPED                 (1 << 11)
93464 +
93465 +#define UCTX_NETERR_TIMER                      (1 << 12)
93466 +#define UCTX_NETERR_FIXUP                      (1 << 13)
93467 +
93468 +#define UCTX_EPROC_QUEUE_OVERFLOW              (1 << 14)
93469 +#define UCTX_DPROC_QUEUE_OVERFLOW              (1 << 15)
93470 +#define UCTX_TPROC_QUEUE_OVERFLOW              (1 << 16)
93471 +
93472 +#define UCTX_EPROC_QUEUE_ERROR                 (1 << 17)
93473 +#define UCTX_DPROC_QUEUE_ERROR                 (1 << 18)
93474 +#define UCTX_TPROC_QUEUE_ERROR                 (1 << 19)
93475 +
93476 +#define UCTX_STOPPED_REASONS                   (UCTX_EPROC_QUEUE_FULL | UCTX_DPROC_QUEUE_FULL | UCTX_TPROC_QUEUE_FULL)
93477 +#define UCTX_SWAPPED_REASONS                   (UCTX_EXITING | UCTX_USER_STOPPED | UCTX_NETERR_FIXUP)
93478 +#define UCTX_NACKING_REASONS                   (UCTX_USER_FILTERING | UCTX_IPROC_CH0_TRAPPED | UCTX_IPROC_CH1_TRAPPED)
93479 +
93480 +#define UCTX_OVERFLOW_REASONS                  (UCTX_EPROC_QUEUE_OVERFLOW | UCTX_DPROC_QUEUE_OVERFLOW | UCTX_TPROC_QUEUE_OVERFLOW)
93481 +#define UCTX_ERROR_REASONS                     (UCTX_EPROC_QUEUE_ERROR | UCTX_DPROC_QUEUE_ERROR | UCTX_TPROC_QUEUE_ERROR)
93482 +
93483 +#define UCTX_RUNNABLE(uctx)                    (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS)) == 0)
93484 +#define UCTX_NACKING(uctx)                     (((uctx)->uctx_status & (UCTX_SWAPPED_REASONS | UCTX_STOPPED_REASONS | UCTX_NACKING_REASONS)) != 0)
93485 +
93486 +/* values for uctx_trap_signalled */
93487 +#define UCTX_TRAP_IDLE                         0
93488 +#define UCTX_TRAP_SLEEPING                     1
93489 +#define UCTX_TRAP_SIGNALLED                    2
93490 +#define UCTX_TRAP_ACTIVE                       3
93491 +
93492 +extern int        user_p2p_route_options;
93493 +extern int        user_bcast_route_options;
93494 +extern int       user_dproc_retry_count;
93495 +extern int       user_cproc_retry_count;
93496 +extern int       user_ioproc_enabled;
93497 +extern int       user_pagefault_enabled;
93498 +
93499 +extern USER_CTXT *user_alloc (ELAN4_DEV *dev);
93500 +extern void       user_free (USER_CTXT *uctx);
93501 +extern void       user_swapout (USER_CTXT *uctx, unsigned reason);
93502 +extern void       user_swapin (USER_CTXT *uctx, unsigned reason);
93503 +extern int        user_attach (USER_CTXT *uctx, ELAN_CAPABILITY *cap);
93504 +extern void       user_detach (USER_CTXT *uctx, ELAN_CAPABILITY *cap);
93505 +extern void       user_block_inputter (USER_CTXT *uctx, unsigned blocked);
93506 +extern int        user_alloc_trap_queues (USER_CTXT *uctx, unsigned ndproc_traps, unsigned neproc_traps, 
93507 +                                         unsigned ntproc_traps, unsigned nthreads, unsigned ndmas);
93508 +
93509 +extern int        user_add_p2pvp (USER_CTXT *uctx, unsigned process, ELAN_CAPABILITY *cap);
93510 +extern int        user_add_bcastvp (USER_CTXT *uctx, unsigned process, unsigned lowvp, unsigned highvp);
93511 +extern int        user_removevp (USER_CTXT *uctx, unsigned process);
93512 +
93513 +extern int        user_set_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route);
93514 +extern int        user_reset_route (USER_CTXT *uctx, unsigned process);
93515 +extern int        user_get_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route);
93516 +extern int        user_check_route (USER_CTXT *uctx, unsigned process, E4_VirtualProcessEntry *route, unsigned *error);
93517 +extern int       user_send_neterr_msg (USER_CTXT *uctx, unsigned int vp, unsigned int nctx, unsigned int retries, ELAN4_NETERR_MSG *msg);
93518 +extern int        user_neterr_sten (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop);
93519 +extern int        user_neterr_dma (USER_CTXT *uctx, unsigned int vp, E4_uint64 cookie, int waitforeop);
93520 +
93521 +extern int        user_resume_eproc_trap (USER_CTXT *uctx, E4_Addr addr);
93522 +extern int        user_resume_cproc_trap (USER_CTXT *uctx, unsigned indx);
93523 +extern int        user_resume_dproc_trap (USER_CTXT *uctx, E4_DMA *dma);
93524 +extern int        user_resume_tproc_trap (USER_CTXT *uctx, E4_ThreadRegs *regs);
93525 +extern int        user_resume_iproc_trap (USER_CTXT *uctx, unsigned channel, unsigned trans,
93526 +                                         E4_IprocTrapHeader *hdrp, E4_IprocTrapData *datap);
93527 +
93528 +extern int        user_trap_handler (USER_CTXT *uctx, ELAN4_USER_TRAP *utrapp, int nticks);
93529 +extern USER_CQ   *user_findcq (USER_CTXT *uctx, unsigned num);
93530 +extern USER_CQ   *user_alloccq (USER_CTXT *uctx, unsigned size, unsigned perm, unsigned flags);
93531 +extern void       user_freecq (USER_CTXT *uctx, USER_CQ *cq);
93532 +extern void       user_dropcq (USER_CTXT *uctx, USER_CQ *cq);
93533 +
93534 +/* user_osdep.c */
93535 +extern int        user_load_range (USER_CTXT *uctx, E4_Addr addr, unsigned long nbytes, E4_uint32 fsr);
93536 +extern void       user_update_main (USER_CTXT *uctx, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long start, unsigned long len);
93537 +extern void       user_unload_main (USER_CTXT *uctx, unsigned long start, unsigned long len);
93538 +
93539 +
93540 +/* regions.c */
93541 +extern USER_RGN  *user_findrgn_elan (USER_CTXT *uctx, E4_Addr addr, int tail);
93542 +extern USER_RGN  *user_findrgn_main (USER_CTXT *uctx, virtaddr_t addr, int tail);
93543 +extern USER_RGN  *user_rgnat_elan (USER_CTXT *uctx, E4_Addr addr);
93544 +extern USER_RGN  *user_rgnat_main (USER_CTXT *uctx, virtaddr_t addr);
93545 +extern int        user_setperm (USER_CTXT *uctx, virtaddr_t maddr, E4_Addr eaddr, unsigned long len, unsigned perm);
93546 +extern void       user_clrperm (USER_CTXT *uctx, E4_Addr addr, unsigned long len);
93547 +extern int        user_checkperm (USER_CTXT *uctx, E4_Addr raddr, unsigned long rsize, unsigned access);
93548 +extern virtaddr_t user_elan2main (USER_CTXT *uctx, E4_Addr addr);
93549 +extern E4_Addr    user_main2elan (USER_CTXT *uctx, virtaddr_t addr);
93550 +extern void       user_preload_main (USER_CTXT *uctx, virtaddr_t addr, unsigned long len);
93551 +extern void       user_freergns (USER_CTXT *uctx);
93552 +
93553 +/* user_ddcq.c */
93554 +extern int        user_ddcq_check (USER_CTXT *uctx, unsigned num);
93555 +extern int        user_ddcq_flush (USER_CTXT *uctx);
93556 +extern void       user_ddcq_intr (USER_CTXT *uctx);
93557 +extern void       user_ddcq_write_dword (USER_CTXT *uctx, E4_Addr addr, E4_uint64 value);
93558 +extern void       user_ddcq_interrupt (USER_CTXT *uctx, E4_uint64 cookie);
93559 +extern void       user_ddcq_run_dma (USER_CTXT *uctx, E4_DMA *dma);
93560 +extern void       user_ddcq_run_thread (USER_CTXT *uctx, E4_ThreadRegs *regs);
93561 +extern void       user_ddcq_setevent (USER_CTXT *uctx, E4_Addr addr);
93562 +extern void       user_ddcq_seteventn (USER_CTXT *uctx, E4_Addr addr, E4_uint32 count);
93563 +extern void       user_ddcq_waitevent (USER_CTXT *uctx, E4_Addr addr, E4_uint64 CountAndType, E4_uint64 Param0, E4_uint64 Param1);
93564 +
93565 +
93566 +/*
93567 + * Local variables:
93568 + * c-file-style: "stroustrup"
93569 + * End:
93570 + */
93571 +#endif /* __ELAN4_USER_H */
93572 diff -urN clean/include/elan4/userregs.h linux-2.6.9/include/elan4/userregs.h
93573 --- clean/include/elan4/userregs.h      1969-12-31 19:00:00.000000000 -0500
93574 +++ linux-2.6.9/include/elan4/userregs.h        2004-10-06 10:50:38.000000000 -0400
93575 @@ -0,0 +1,383 @@
93576 +/*
93577 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
93578 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
93579 + *
93580 + *    For licensing information please see the supplied COPYING file
93581 + *
93582 + */
93583 +
93584 +#ifndef __ELAN4_USERREGS_H
93585 +#define __ELAN4_USERREGS_H
93586 +
93587 +#ident "$Id: userregs.h,v 1.15 2004/10/06 14:50:38 addy Exp $"
93588 +/*      $Source: /cvs/master/quadrics/elan4hdr/userregs.h,v $*/
93589 +
93590 +#ifdef __cplusplus
93591 +extern "C" {
93592 +#endif
93593 +
93594 +/*
93595 + * Statistic control reg values
93596 + * Each 4-bit nibble of the control word specifies what statistic
93597 + * is to be recorded in each of the 8 statistic counters
93598 + */
93599 +#define COUNT_REG0_SHIFT   32ull
93600 +#define COUNT_REG1_SHIFT   36ull
93601 +#define COUNT_REG2_SHIFT   40ull
93602 +#define COUNT_REG3_SHIFT   44ull
93603 +#define COUNT_REG4_SHIFT   48ull
93604 +#define COUNT_REG5_SHIFT   52ull
93605 +#define COUNT_REG6_SHIFT   56ull
93606 +#define COUNT_REG7_SHIFT   60ull
93607 +
93608 +
93609 +/* Count reg 0 */
93610 +#define STC_INPUT_NON_WRITE_BLOCKS     (0x0ull << COUNT_REG0_SHIFT)
93611 +#define STP_DMA_EOP_WAIT_ACK           (0x1ull << COUNT_REG0_SHIFT)
93612 +#define STP_TPROC_RUNNING              (0x2ull << COUNT_REG0_SHIFT)
93613 +#define STC_STEN_PKTS_OPEN              (0x3ull << COUNT_REG0_SHIFT)
93614 +#define STP_CPROC_HOLDS_FFU_DP         (0x4ull << COUNT_REG0_SHIFT)
93615 +#define STC_TLB_TABLE_WALKS             (0x5ull << COUNT_REG0_SHIFT)
93616 +#define STC_CACHE_HITS                  (0x6ull << COUNT_REG0_SHIFT)
93617 +#define STC_PCI_SLAVE_READS             (0x7ull << COUNT_REG0_SHIFT)
93618 +#define STP_PCI_WAITING_FOR_GNT         (0x8ull << COUNT_REG0_SHIFT)
93619 +#define STP_SYS_CLOCK_RATE0            (0xfull << COUNT_REG0_SHIFT)
93620 +
93621 +#define STATS_REG0_NAMES {                     \
93622 +          "STC_INPUT_NON_WRITE_BLOCKS",        \
93623 +          "STP_DMA_EOP_WAIT_ACK",              \
93624 +          "STP_TPROC_RUNNING",                 \
93625 +          "STC_STEN_PKTS_OPEN",                \
93626 +          "STP_CPROC_HOLDS_FFU_DP",            \
93627 +          "STC_TLB_TABLE_WALKS",               \
93628 +          "STC_CACHE_HITS",                    \
93629 +          "STC_PCI_SLAVE_READS",               \
93630 +          "STP_PCI_WAITING_FOR_GNT",           \
93631 +          "STP_SYS_CLOCK_RATE0"                \
93632 +}
93633 +
93634 +/* Count reg 1 */
93635 +#define STC_INPUT_WRITE_BLOCKS         (0x0ull << COUNT_REG1_SHIFT)
93636 +#define STP_DMA_DATA_TRANSMITTING      (0x1ull << COUNT_REG1_SHIFT)
93637 +#define STC_CPROC_VALUES_EXE           (0x2ull << COUNT_REG1_SHIFT)
93638 +#define STC_STEN_TRANS_SENT            (0x3ull << COUNT_REG1_SHIFT)
93639 +#define STP_TPROC_DQ_HOLDS_FFU_DP      (0x4ull << COUNT_REG1_SHIFT)
93640 +#define STC_TPROC_TLB_HITS             (0x5ull << COUNT_REG1_SHIFT)
93641 +#define STC_CACHE_ALLOC_MISSES         (0x6ull << COUNT_REG1_SHIFT)
93642 +#define STP_PCI_MASTER_READ_WAITING    (0x7ull << COUNT_REG1_SHIFT)
93643 +#define STP_PCI_WAITING_FOR_DEVSEL      (0x8ull << COUNT_REG1_SHIFT)
93644 +#define STP_SYS_CLOCK_RATE1            (0xfull << COUNT_REG1_SHIFT)
93645 +
93646 +#define STATS_REG1_NAMES {                    \
93647 +          "STC_INPUT_WRITE_BLOCKS",            \
93648 +          "STP_DMA_DATA_TRANSMITTING",         \
93649 +          "STC_CPROC_VALUES_EXE",              \
93650 +          "STC_STEN_TRANS_SENT",               \
93651 +          "STP_TPROC_DQ_HOLDS_FFU_DP",         \
93652 +          "STC_TPROC_TLB_HITS",                \
93653 +          "STC_CACHE_ALLOC_MISSES",            \
93654 +          "STP_PCI_MASTER_READ_WAITING",       \
93655 +          "STP_PCI_WAITING_FOR_DEVSEL",        \
93656 +          "STP_SYS_CLOCK_RATE1"                \
93657 +}
93658 +
93659 +/* Count reg 2 */
93660 +#define STC_INPUT_PKTS                 (0x0ull << COUNT_REG2_SHIFT)
93661 +#define STP_DMA_WAITING_MEM            (0x1ull << COUNT_REG2_SHIFT)
93662 +#define STC_CPROC_TRANSFERS             (0x2ull << COUNT_REG2_SHIFT)
93663 +#define STP_STEN_WAIT_NETWORK_BUSY     (0x3ull << COUNT_REG2_SHIFT)
93664 +#define STP_IPROC_HOLDS_FFU_DP         (0x4ull << COUNT_REG2_SHIFT)
93665 +#define STC_UNITS_TLB_HITS             (0x5ull << COUNT_REG2_SHIFT)
93666 +#define STC_CACHE_NON_ALLOC_MISSES      (0x6ull << COUNT_REG2_SHIFT)
93667 +#define STP_PCI_MASTER_WRITE_WAITING   (0x7ull << COUNT_REG2_SHIFT)
93668 +#define STC_PCI_OUT_OF_ORDER_SPLIT_COMP (0x8ull << COUNT_REG2_SHIFT)
93669 +#define STP_SYS_CLOCK_RATE2            (0xfull << COUNT_REG2_SHIFT)
93670 +
93671 +#define STATS_REG2_NAMES {                    \
93672 +          "STC_INPUT_PKTS",                    \
93673 +          "STP_DMA_WAITING_MEM",               \
93674 +          "STC_CPROC_TRANSFERS",               \
93675 +          "STP_STEN_WAIT_NETWORK_BUSY",        \
93676 +          "STP_IPROC_HOLDS_FFU_DP",            \
93677 +          "STC_UNITS_TLB_HITS",                \
93678 +          "STC_CACHE_NON_ALLOC_MISSES",        \
93679 +          "STP_PCI_MASTER_WRITE_WAITING",      \
93680 +          "STC_PCI_OUT_OF_ORDER_SPLIT_COMP",   \
93681 +          "STP_SYS_CLOCK_RATE2"                \
93682 +}
93683 +
93684 +/* Count reg 3 */
93685 +#define STC_INPUT_PKTS_REJECTED         (0x0ull << COUNT_REG3_SHIFT)
93686 +#define STP_DMA_WAIT_NETWORK_BUSY       (0x1ull << COUNT_REG3_SHIFT)
93687 +#define STC_CPROC_PREFETCH_SDRAM        (0x2ull << COUNT_REG3_SHIFT)
93688 +#define STP_STEN_BLOCKED_ACKS_OR_VC     (0x3ull << COUNT_REG3_SHIFT)
93689 +#define STP_EPROC_HOLDS_FFU_DP          (0x4ull << COUNT_REG3_SHIFT)
93690 +#define STP_TPROC_BLOCKED_MEMSYS        (0x5ull << COUNT_REG3_SHIFT)
93691 +#define STC_CACHE_WRITE_BACKS           (0x6ull << COUNT_REG3_SHIFT)
93692 +#define STP_PCI_SLAVE_READ_WAITING      (0x7ull << COUNT_REG3_SHIFT)
93693 +#define STP_PCI_IDLE_CYCLES            (0x8ull << COUNT_REG3_SHIFT)
93694 +#define STP_SYS_CLOCK_RATE3            (0xfull << COUNT_REG3_SHIFT)
93695 +
93696 +#define STATS_REG3_NAMES {                    \
93697 +          "STC_INPUT_PKTS_REJECTED",           \
93698 +          "STP_DMA_WAIT_NETWORK_BUSY",         \
93699 +          "STC_CPROC_PREFETCH_SDRAM",          \
93700 +          "STP_STEN_BLOCKED_ACKS_OR_VC",       \
93701 +          "STP_EPROC_HOLDS_FFU_DP",            \
93702 +          "STP_TPROC_BLOCKED_MEMSYS",          \
93703 +          "STC_CACHE_WRITE_BACKS",             \
93704 +          "STP_PCI_SLAVE_READ_WAITING",        \
93705 +          "STP_PCI_IDLE_CYCLES",               \
93706 +          "STP_SYS_CLOCK_RATE3"                \
93707 +}
93708 +
93709 +/* Count reg 4 */
93710 +#define STP_INPUT_DATA_TRANSMITTING    (0x0ull << COUNT_REG4_SHIFT)
93711 +#define STC_DMA_PKTS_ACCEPTED          (0x1ull << COUNT_REG4_SHIFT)
93712 +#define STC_CPROC_FLUSH_REQ_SDRAM      (0x2ull << COUNT_REG4_SHIFT)
93713 +#define STP_STEN_EOP_WAIT_ACK          (0x3ull << COUNT_REG4_SHIFT)
93714 +#define STP_DMA_HOLDS_FFU_DP           (0x4ull << COUNT_REG4_SHIFT)
93715 +#define STP_UNIT_BLOCKED_MEMSYS        (0x5ull << COUNT_REG4_SHIFT)
93716 +#define STC_PCI_MASTER_READS           (0x6ull << COUNT_REG4_SHIFT)
93717 +#define STP_PCI_SLAVE_WRITE_WAITING    (0x7ull << COUNT_REG4_SHIFT)
93718 +#define STC_INPUT_PACKETS_DISCARDED    (0x8ull << COUNT_REG4_SHIFT)
93719 +#define STP_SYS_CLOCK_RATE4            (0xfull << COUNT_REG4_SHIFT)
93720 +
93721 +#define STATS_REG4_NAMES {                    \
93722 +          "STP_INPUT_DATA_TRANSMITTING",       \
93723 +          "STC_DMA_PKTS_ACCEPTED",             \
93724 +          "STC_CPROC_FLUSH_REQ_SDRAM",         \
93725 +          "STP_STEN_EOP_WAIT_ACK",             \
93726 +          "STP_DMA_HOLDS_FFU_DP",              \
93727 +          "STP_UNIT_BLOCKED_MEMSYS",           \
93728 +          "STC_PCI_MASTER_READS",              \
93729 +          "STP_PCI_SLAVE_WRITE_WAITING",       \
93730 +          "STC_INPUT_PACKETS_DISCARDED",       \
93731 +          "STP_SYS_CLOCK_RATE4"                \
93732 +}
93733 +
93734 +/* Count reg 5 */
93735 +#define STP_INPUT_WAITING_NETWORK_DATA  (0x0ull << COUNT_REG5_SHIFT)
93736 +#define STC_DMA_PKTS_REJECTED           (0x1ull << COUNT_REG5_SHIFT)
93737 +#define STC_CPROC_INSERT_CACHE_MISSES   (0x2ull << COUNT_REG5_SHIFT)
93738 +#define STP_STEN_TRANSMITTING_DATA      (0x3ull << COUNT_REG5_SHIFT)
93739 +#define FFU_BLOCKED_DIFF_FFU_PROC       (0x4ull << COUNT_REG5_SHIFT)
93740 +#define STP_TABLE_WALKS_BLOCKED_MEMSYS  (0x5ull << COUNT_REG5_SHIFT)
93741 +#define STC_PCI_MASTER_WRITES           (0x6ull << COUNT_REG5_SHIFT)
93742 +#define STP_PCI_MASTER_HOLDS_BUS        (0x7ull << COUNT_REG5_SHIFT)
93743 +#define STC_PCI_NO_SPLIT_COMPS         (0x8ull << COUNT_REG5_SHIFT)
93744 +#define STP_SYS_CLOCK_RATE5            (0xfull << COUNT_REG5_SHIFT)
93745 +
93746 +#define STATS_REG5_NAMES {                    \
93747 +          "STP_INPUT_WAITING_NETWORK_DATA",    \
93748 +          "STC_DMA_PKTS_REJECTED",             \
93749 +          "STC_CPROC_INSERT_CACHE_MISSES",     \
93750 +          "STP_STEN_TRANSMITTING_DATA",        \
93751 +          "FFU_BLOCKED_DIFF_FFU_PROC",         \
93752 +          "STP_TABLE_WALKS_BLOCKED_MEMSYS",    \
93753 +          "STC_PCI_MASTER_WRITES",             \
93754 +          "STP_PCI_MASTER_HOLDS_BUS",          \
93755 +          "STC_PCI_NO_SPLIT_COMPS",            \
93756 +          "STP_SYS_CLOCK_RATE5"                \
93757 +}
93758 +
93759 +/* Count reg 6 */
93760 +#define STP_INPUT_BLOCKED_WAITING_TRANS (0x0ull << COUNT_REG6_SHIFT)
93761 +#define STP_TPROC_INST_STALL           (0x1ull << COUNT_REG6_SHIFT)
93762 +#define STP_CPROC_WAITING_DESCHED      (0x2ull << COUNT_REG6_SHIFT)
93763 +#define STP_STEN_PKT_OPEN_WAITING_DATA (0x3ull << COUNT_REG6_SHIFT)
93764 +#define STP_TLB_HASH_TABLE_ACCESSES    (0x4ull << COUNT_REG6_SHIFT)
93765 +#define STP_PCI_SLAVE_BLOCKED_MEMSYS   (0x5ull << COUNT_REG6_SHIFT)
93766 +#define STP_PCI_TRANSFERRING_DATA       (0x6ull << COUNT_REG6_SHIFT)
93767 +#define STP_PCI_MASTER_WAITING_BUS      (0x7ull << COUNT_REG6_SHIFT)
93768 +#define STP_PCI_READ_LATENCY           (0x8ull << COUNT_REG6_SHIFT)
93769 +#define STP_SYS_CLOCK_RATE6            (0xfull << COUNT_REG6_SHIFT)
93770 +
93771 +#define STATS_REG6_NAMES {                    \
93772 +          "STP_INPUT_BLOCKED_WAITING_TRANS",   \
93773 +          "STP_TPROC_INST_STALL",              \
93774 +          "STP_CPROC_WAITING_DESCHED",         \
93775 +          "STP_STEN_PKT_OPEN_WAITING_DATA",    \
93776 +          "STP_TLB_HASH_TABLE_ACCESSES",       \
93777 +          "STP_PCI_SLAVE_BLOCKED_MEMSYS",      \
93778 +          "STP_PCI_TRANSFERRING_DATA",         \
93779 +          "STP_PCI_MASTER_WAITING_BUS",        \
93780 +          "STP_PCI_READ_LATENCY",              \
93781 +          "STP_SYS_CLOCK_RATE6"                \
93782 +}
93783 +
93784 +/* Count reg 7 */
93785 +#define STC_INPUT_CTX_FILTER_FILL       (0x0ull << COUNT_REG7_SHIFT)   
93786 +#define STP_TPROC_LOAD_STORE_STALL      (0x1ull << COUNT_REG7_SHIFT)
93787 +#define STC_CPROC_TIMEOUTS              (0x2ull << COUNT_REG7_SHIFT)
93788 +#define STP_STEN_BLOCKED_NETWORK        (0x3ull << COUNT_REG7_SHIFT)
93789 +#define STP_TLB_CHAIN_ACCESSES          (0x4ull << COUNT_REG7_SHIFT)
93790 +#define STP_CPROC_SCHED_BLOCKED_MEMSYS  (0x5ull << COUNT_REG7_SHIFT)
93791 +#define STC_PCI_SLAVE_WRITES            (0x6ull << COUNT_REG7_SHIFT)
93792 +#define STC_PCI_DISCONNECTS_RETRIES     (0x7ull << COUNT_REG7_SHIFT)
93793 +#define STC_RING_OSCILLATOR            (0x8ull << COUNT_REG7_SHIFT)
93794 +#define STP_SYS_CLOCK_RATE7            (0xfull << COUNT_REG7_SHIFT)
93795 +
93796 +#define STATS_REG7_NAMES {                    \
93797 +          "STC_INPUT_CTX_FILTER_FILL",         \
93798 +          "STP_TPROC_LOAD_STORE_STALL",        \
93799 +          "STC_CPROC_TIMEOUTS",                \
93800 +          "STP_STEN_BLOCKED_NETWORK",          \
93801 +          "STP_TLB_CHAIN_ACCESSES",            \
93802 +          "STP_CPROC_SCHED_BLOCKED_MEMSYS",    \
93803 +          "STC_PCI_SLAVE_WRITES",              \
93804 +          "STC_PCI_DISCONNECTS_RETRIES",       \
93805 +          "STC_RING_OSCILLATOR",               \
93806 +          "STP_SYS_CLOCK_RATE7"                \
93807 +}
93808 +
93809 +#define STATS_REG_NAMES { \
93810 +    STATS_REG0_NAMES, \
93811 +    STATS_REG1_NAMES, \
93812 +    STATS_REG2_NAMES, \
93813 +    STATS_REG3_NAMES, \
93814 +    STATS_REG4_NAMES, \
93815 +    STATS_REG5_NAMES, \
93816 +    STATS_REG6_NAMES, \
93817 +    STATS_REG7_NAMES, \
93818 +}
93819 +
93820 +
93821 +#define INPUT_PERF_STATS        (STC_INPUT_NON_WRITE_BLOCKS | STC_INPUT_WRITE_BLOCKS |              \
93822 +                                STC_INPUT_PKTS | STC_INPUT_PKTS_REJECTED |                         \
93823 +                                 STC_INPUT_CTX_FILTER_FILL | STP_INPUT_DATA_TRANSMITTING |           \
93824 +                                STP_INPUT_WAITING_NETWORK_DATA | STP_INPUT_BLOCKED_WAITING_TRANS | STC_INPUT_PACKETS_DISCARDED) 
93825 +
93826 +#define DMA_PERF_STATS          (STC_DMA_PKTS_ACCEPTED | STC_DMA_PKTS_REJECTED |                    \
93827 +                                 STP_DMA_EOP_WAIT_ACK | STP_DMA_DATA_TRANSMITTING |                 \
93828 +                                STP_DMA_WAITING_MEM | STP_DMA_WAIT_NETWORK_BUSY)                 
93829 +
93830 +
93831 +#define TPROC_PERF_STATS        (STP_TPROC_RUNNING | STP_TPROC_INST_STALL |                         \
93832 +                                 STP_TPROC_LOAD_STORE_STALL)
93833 +
93834 +#define CPROC_PERF_STATS        (STC_CPROC_VALUES_EXE | STC_CPROC_TRANSFERS |                       \
93835 +                                STC_CPROC_PREFETCH_SDRAM | STC_CPROC_FLUSH_REQ_SDRAM |             \
93836 +                                STC_CPROC_INSERT_CACHE_MISSES | STP_CPROC_WAITING_DESCHED |        \
93837 +                                STC_CPROC_TIMEOUTS)
93838 +
93839 +#define STEN_PERF_STATS         (STC_STEN_PKTS_OPEN | STC_STEN_TRANS_SENT |                         \
93840 +                                STP_STEN_WAIT_NETWORK_BUSY | STP_STEN_BLOCKED_ACKS_OR_VC |         \
93841 +                                STP_STEN_EOP_WAIT_ACK | STP_STEN_TRANSMITTING_DATA |               \
93842 +                                STP_STEN_PKT_OPEN_WAITING_DATA | STP_STEN_BLOCKED_NETWORK)
93843 +
93844 +#define FFU_PREF_STATS          (STP_CPROC_HOLDS_FFU_DP | STP_TPROC_DQ_HOLDS_FFU_DP |               \
93845 +                                STP_IPROC_HOLDS_FFU_DP | STP_EPROC_HOLDS_FFU_DP |                  \
93846 +                                STP_DMA_HOLDS_FFU_DP | FFU_BLOCKED_DIFF_FFU_PROC)
93847 +
93848 +#define TABLE_WALK_PERF_STATS   (STC_TPROC_TLB_HITS | STC_UNITS_TLB_HITS |                          \
93849 +                                STP_TLB_HASH_TABLE_ACCESSES | STP_TLB_CHAIN_ACCESSES |             \
93850 +                                STC_TLB_TABLE_WALKS)
93851 +
93852 +#define ADDRESS_ARB_PERF_STATS  (STP_UNIT_BLOCKED_MEMSYS | STP_TPROC_BLOCKED_MEMSYS |               \
93853 +                                STP_TABLE_WALKS_BLOCKED_MEMSYS | STP_CPROC_SCHED_BLOCKED_MEMSYS |  \
93854 +                                STP_PCI_SLAVE_BLOCKED_MEMSYS)
93855 +
93856 +#define CACHE_PERF_STATS        (STC_CACHE_HITS | STC_CACHE_ALLOC_MISSES |                          \
93857 +                                STC_CACHE_NON_ALLOC_MISSES | STC_CACHE_WRITE_BACKS)
93858 +
93859 +
93860 +#define PCI_PERF_STATS          (STC_PCI_SLAVE_READS | STP_PCI_MASTER_READ_WAITING |                \
93861 +                                 STP_PCI_MASTER_WRITE_WAITING | STP_PCI_SLAVE_READ_WAITING |        \
93862 +                                 STP_PCI_SLAVE_WRITE_WAITING | STC_PCI_MASTER_WRITES |              \
93863 +                                 STP_PCI_TRANSFERRING_DATA | STC_PCI_SLAVE_WRITES)
93864 +
93865 +#define PCIBUS_PERF_STATS       (STP_PCI_WAITING_FOR_GNT | STP_PCI_WAITING_FOR_DEVSEL |                    \
93866 +                                STC_PCI_OUT_OF_ORDER_SPLIT_COMP | STP_PCI_IDLE_CYCLES |            \
93867 +                                STC_PCI_MASTER_READS | STP_PCI_MASTER_HOLDS_BUS |                  \
93868 +                                STP_PCI_MASTER_WAITING_BUS | STC_PCI_DISCONNECTS_RETRIES)
93869 +
93870 +                                
93871 +    extern const char *elan_stats_names[8][10];
93872 +
93873 +#define ELAN_STATS_NAME(COUNT, CONTROL) (elan_stats_names[(COUNT)][(CONTROL) & 7])
93874 +
93875 +    typedef volatile union e4_StatsControl
93876 +    {
93877 +       E4_uint64 StatsControl;
93878 +       struct
93879 +       {
93880 +#if (BYTE_ORDER == LITTLE_ENDIAN) || defined(__LITTLE_ENDIAN__)
93881 +           E4_uint32 StatCont0:4;
93882 +           E4_uint32 StatCont1:4;
93883 +           E4_uint32 StatCont2:4;
93884 +           E4_uint32 StatCont3:4;
93885 +           E4_uint32 StatCont4:4;
93886 +           E4_uint32 StatCont5:4;
93887 +           E4_uint32 StatCont6:4;
93888 +           E4_uint32 StatCont7:4;
93889 +#else
93890 +           E4_uint32 StatCont7:4;
93891 +           E4_uint32 StatCont6:4;
93892 +           E4_uint32 StatCont5:4;
93893 +
93894 +           E4_uint32 StatCont4:4;
93895 +           E4_uint32 StatCont3:4;
93896 +           E4_uint32 StatCont2:4;
93897 +           E4_uint32 StatCont1:4;
93898 +           E4_uint32 StatCont0:4;
93899 +#endif
93900 +           E4_uint32 pad;
93901 +       } s;
93902 +    } E4_StatsControl;
93903 +
93904 +typedef volatile union e4_StatsCount
93905 +{
93906 +   E4_uint64    ClockStat; 
93907 +   struct
93908 +   {
93909 +       E4_uint32 ClockLSW;     /* read only */
93910 +       E4_uint32 StatsCount;
93911 +   } s;
93912 +} E4_StatsCount;
93913 +
93914 +typedef volatile union e4_clock
93915 +{
93916 +   E4_uint64 NanoSecClock;
93917 +   struct
93918 +   {
93919 +      E4_uint32 ClockLSW;
93920 +      E4_uint32 ClockMSW;
93921 +   } s;
93922 +} E4_Clock;
93923 +#define E4_TIME( X ) ((X).NanoSecClock)
93924 +
93925 +#define ELAN4_COMMS_CLOCK_FREQUENCY    660             /* In Mhz. This is half the bit rate. */
93926 +#define ELAN4_CLOCK_ADD_VALUE          200             /* For 200ns increment rate */
93927 +#define ELAN4_CLOCK_COMMS_DIV_VALUE    (((ELAN4_COMMS_CLOCK_FREQUENCY * ELAN4_CLOCK_ADD_VALUE) / (1000 * 4)) - 1)
93928 +#define ELAN4_CLOCK_TICK_RATE          ((ELAN4_CLOCK_ADD_VALUE << 8) + ELAN4_CLOCK_COMMS_DIV_VALUE)
93929 +
93930 +typedef volatile union e4_clocktickrate
93931 +{
93932 +   E4_uint64 NanoSecClock;
93933 +   struct
93934 +   {
93935 +      E4_uint32 pad1;
93936 +      E4_uint32 TickRates;
93937 +   } s;
93938 +} E4_ClockTickRate;
93939 +
93940 +/*
93941 + * This is made into an 8k byte object.
93942 + */
93943 +typedef volatile struct _E4_User_Regs
93944 +{
93945 +   E4_StatsCount       StatCounts[8];
93946 +   E4_StatsCount       InstCount;
93947 +   E4_Clock            Clock;
93948 +   E4_StatsControl     StatCont;
93949 +   E4_ClockTickRate    ClockTickRate;
93950 +   E4_uint8            pad1[EightK - ((sizeof(E4_StatsCount)*9)+sizeof(E4_StatsControl)+
93951 +                                       sizeof(E4_Clock)+sizeof(E4_ClockTickRate))];
93952 +} E4_User_Regs;
93953 +
93954 +#ifdef __cplusplus
93955 +}
93956 +#endif
93957 +
93958 +#endif /* __ELAN4_USERREGS_H */
93959 diff -urN clean/include/elan4/usertrap.h linux-2.6.9/include/elan4/usertrap.h
93960 --- clean/include/elan4/usertrap.h      1969-12-31 19:00:00.000000000 -0500
93961 +++ linux-2.6.9/include/elan4/usertrap.h        2004-05-05 05:08:35.000000000 -0400
93962 @@ -0,0 +1,114 @@
93963 +/*
93964 + *    Copyright (c) 2001-2002 by Quadrics Supercomputers World Ltd.
93965 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
93966 + * 
93967 + *    For licensing information please see the supplied COPYING file
93968 + *
93969 + */
93970 +
93971 +#ident "@(#)$Id: usertrap.h,v 1.17 2004/05/05 09:08:35 david Exp $"
93972 +/*      $Source: /cvs/master/quadrics/elan4mod/usertrap.h,v $*/
93973 +
93974 +#ifndef __ELAN4_USERTRAP_H
93975 +#define __ELAN4_USERTRAP_H
93976 +
93977 +#ifndef _ASM
93978 +typedef struct elan4_user_page
93979 +{
93980 +    E4_uint64          upage_ddcq_completed;
93981 +} ELAN4_USER_PAGE;
93982 +
93983 +typedef struct elan4_user_trap
93984 +{
93985 +    int                                ut_type;
93986 +    unsigned                   ut_proc;
93987 +    unsigned                   ut_args[4];
93988 +
93989 +    union {
93990 +       ELAN4_EPROC_TRAP        eproc;
93991 +       ELAN4_CPROC_TRAP        cproc;
93992 +       ELAN4_DPROC_TRAP        dproc;
93993 +       ELAN4_IPROC_TRAP        iproc;
93994 +       ELAN4_TPROC_TRAP        tproc;
93995 +       ELAN4_NETERR_MSG        msg;
93996 +    }                  ut_trap;
93997 +} ELAN4_USER_TRAP;
93998 +
93999 +#endif /* _ASM */
94000 +
94001 +
94002 +/* value for ut_type */
94003 +#define UTS_FINISHED           0                               /* all pending traps have been handled */
94004 +#define UTS_RESCHEDULE         1                               /* must return to user mode and re-enter */
94005 +#define UTS_UNIMP_INSTR                2                               /* unimplemented thread instruction */
94006 +#define UTS_EXECUTE_PACKET     3                               /* iproc trap needs packet executing */
94007 +#define UTS_NETWORK_ERROR_TRAP 4                               /* network error on this trap */
94008 +#define UTS_NETWORK_ERROR_MSG  5                               /* network error message  */
94009 +#define UTS_NETWORK_ERROR_TIMER        6                               /* network error timer expired */
94010 +
94011 +#define UTS_EFAULT             -1                              /* failed to copyout trap */
94012 +#define UTS_INVALID_ADDR       -2                              /* all -ve codes mean trap could not be resolved. */
94013 +#define UTS_INVALID_VPROC      -3
94014 +#define UTS_INVALID_COMMAND    -4
94015 +#define UTS_BAD_TRAP           -5
94016 +#define UTS_ALIGNMENT_ERROR    -6
94017 +#define UTS_QUEUE_OVERFLOW     -7
94018 +#define UTS_QUEUE_ERROR                -8
94019 +#define UTS_INVALID_TRANS      -9
94020 +#define UTS_PERMISSION_DENIED  -10
94021 +#define UTS_CPROC_ERROR                -11
94022 +#define UTS_INVALID_COOKIE     -12
94023 +#define UTS_NETERR_ERROR       -13
94024 +
94025 +/* "special" values for registering handlers */
94026 +#define UTS_ALL_TRAPS          -9999
94027 +
94028 +/* value for ut_proc */
94029 +#define UTS_NOPROC             0
94030 +#define UTS_EPROC              1
94031 +#define UTS_CPROC              2
94032 +#define UTS_DPROC              3
94033 +#define UTS_TPROC              4
94034 +#define UTS_IPROC              5
94035 +#define UTS_NETERR_MSG         6
94036 +
94037 +/* unimplemented trap numbers for thread processor */
94038 +#define ELAN4_T_TRAP_INSTR(t)  (0x80202000 | ((t) & 0xFF))
94039 +
94040 +#define ELAN4_T_SYSCALL_TRAP   1
94041 +#  define ELAN4_T_OPEN         0
94042 +#  define ELAN4_T_WRITE                1
94043 +#  define ELAN4_T_READ         2
94044 +#  define ELAN4_T_IOCTL                3
94045 +#  define ELAN4_T_LSEEK                4
94046 +#  define ELAN4_T_POLL         5
94047 +#  define ELAN4_T_CLOSE                6
94048 +#  define ELAN4_T_KILL         7
94049 +#  define ELAN4_T_MMAP         8
94050 +#  define ELAN4_T_MUNMAP       9
94051 +#  define ELAN4_T_ABORT                100
94052 +#  define ELAN4_T_DEBUG                101
94053 +#  define ELAN4_T_REGDUMP      102
94054 +
94055 +#define ELAN4_T_REGDUMP_TRAP   2
94056 +
94057 +#define ELAN4_T_LIBELAN_TRAP   3
94058 +#  define ELAN4_T_TPORT_NEWBUF 0
94059 +#  define ELAN4_T_TPORT_GC     1
94060 +#  define ELAN4_T_TPORT_DEBUG  2
94061 +
94062 +#define ELAN4_T_ALLOC_TRAP     4
94063 +#  define ELAN4_T_ALLOC_ELAN   0
94064 +#  define ELAN4_T_ALLOC_MAIN   1
94065 +#  define ELAN4_T_FREE_ELAN    2
94066 +#  define ELAN4_T_FREE_MAIN    3
94067 +
94068 +/* reserved main interrupt cookies */
94069 +#define ELAN4_INT_COOKIE_DDCQ  0
94070 +
94071 +/*
94072 + * Local variables:
94073 + * c-file-style: "stroustrup"
94074 + * End:
94075 + */
94076 +#endif /* __ELAN4_USERTRAP_H */
94077 diff -urN clean/include/elan4/xsdram.h linux-2.6.9/include/elan4/xsdram.h
94078 --- clean/include/elan4/xsdram.h        1969-12-31 19:00:00.000000000 -0500
94079 +++ linux-2.6.9/include/elan4/xsdram.h  2004-03-05 07:32:04.000000000 -0500
94080 @@ -0,0 +1,59 @@
94081 +/*
94082 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
94083 + *    Copyright (c) 2002-2003 by Quadrics Ltd.
94084 + *
94085 + *    For licensing information please see the supplied COPYING file
94086 + *
94087 + */
94088 +
94089 +#ifndef __ELAN4_XSDRAM_H
94090 +#define __ELAN4_XSDRAM_H
94091 +
94092 +#ident "@(#)$Id: xsdram.h,v 1.13 2004/03/05 12:32:04 jon Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
94093 +/*      $Source: /cvs/master/quadrics/elan4hdr/xsdram.h,v $*/
94094 +
94095 +/* SAMSUNG K4H281638D-TCB3 */
94096 +
94097 +#define SDRAM_tRCF_1_SH         0
94098 +#define SDRAM_tRP_1_SH          4
94099 +#define SDRAM_tRCD_SH           8
94100 +#define SDRAM_tRRD_SH           12
94101 +#define SDRAM_tEndWr_SH         16
94102 +#define SDRAM_tEndRd_SH         20
94103 +#define SDRAM_Burst_SH          24
94104 +#define SDRAM_CL_SH             28
94105 +#define SDRAM_DsblBypass       (1ULL << 31)
94106 +#define SDRAM_RefreshRate_SH    32
94107 +#define SDRAM_RamSize_SH        34
94108 +#define SDRAM_ReadLtncy_1_SH    36
94109 +#define SDRAM_RdOffset_SH       40
94110 +#define SDRAM_FlightDelay_SH    42
94111 +
94112 +#define SDRAM_ENABLE_ECC       (1ULL << 44) // Enables error detecting on the ECC.
94113 +#define SDRAM_SDRAM_TESTING    (1ULL << 45) // Switches to test mode for checking EEC data bits
94114 +#define SDRAM_SETUP            (1ULL << 46) // Writes SDram control reg when set. Also starts
94115 +
94116 +#define SDRAM_CS_MODE0          0ULL         // 64Mbit, 128Mbit, 256Mbit, 512Mbit or 1Gbit (16-bit output)
94117 +#define SDRAM_CS_MODE1          1ULL         // 64Mbit, 128Mbit, 256Mbit or 512Mbit (8-bit output)
94118 +#define SDRAM_CS_MODE2          2ULL         // 2Gbit (16-bit output) or 1Gbit (8-bit output)
94119 +#define SDRAM_CS_MODE3          3ULL         // 4Gbit (16-bit output) or 2Gbit (8-bit output)
94120 +
94121 +#if defined(LINUX) && !defined(CONFIG_MPSAS)
94122 +#define SDRAM_STARTUP_VALUE   ((0xbULL << SDRAM_tRCF_1_SH)      | (0x2ULL << SDRAM_tRP_1_SH)       | \
94123 +                               (0x3ULL << SDRAM_tRCD_SH)        | (0x2ULL << SDRAM_tRRD_SH)        | \
94124 +                               (0xaULL << SDRAM_tEndWr_SH)      | (0x6ULL << SDRAM_tEndRd_SH)      | \
94125 +                               (0x8ULL << SDRAM_Burst_SH)       | (0x6ULL << SDRAM_CL_SH)          | \
94126 +                               (0x2ULL << SDRAM_RefreshRate_SH) | (0x3ULL << SDRAM_RamSize_SH)     | \
94127 +                               (0x1ULL << SDRAM_RdOffset_SH)    | (0x1ULL << SDRAM_FlightDelay_SH) | \
94128 +                               (0x4ULL << SDRAM_ReadLtncy_1_SH))
94129 +#else
94130 +#define SDRAM_STARTUP_VALUE   ((0xbULL << SDRAM_tRCF_1_SH)      | (0x2ULL << SDRAM_tRP_1_SH)       | \
94131 +                               (0x3ULL << SDRAM_tRCD_SH)        | (0x2ULL << SDRAM_tRRD_SH)        | \
94132 +                               (0xaULL << SDRAM_tEndWr_SH)      | (0x6ULL << SDRAM_tEndRd_SH)      | \
94133 +                               (0x8ULL << SDRAM_Burst_SH)       | (0x6ULL << SDRAM_CL_SH)          | \
94134 +                               (0x0ULL << SDRAM_RefreshRate_SH) | (0x0ULL << SDRAM_RamSize_SH)     | \
94135 +                               (0x1ULL << SDRAM_RdOffset_SH)    | (0x1ULL << SDRAM_FlightDelay_SH) | \
94136 +                               (0x4ULL << SDRAM_ReadLtncy_1_SH) | SDRAM_ENABLE_ECC | SDRAM_SETUP)
94137 +#endif
94138 +
94139 +#endif /* __ELAN4_XSDRAM_H */
94140 diff -urN clean/include/jtag/jtagio.h linux-2.6.9/include/jtag/jtagio.h
94141 --- clean/include/jtag/jtagio.h 1969-12-31 19:00:00.000000000 -0500
94142 +++ linux-2.6.9/include/jtag/jtagio.h   2004-12-16 05:39:27.000000000 -0500
94143 @@ -0,0 +1,106 @@
94144 +/*
94145 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
94146 + *
94147 + *    For licensing information please see the supplied COPYING file
94148 + *
94149 + */
94150 +
94151 +#ident "$Id: jtagio.h,v 1.8 2004/12/16 10:39:27 lee Exp $"
94152 +/*             $Source: /cvs/master/quadrics/jtagmod/jtagio.h,v $*/
94153 +
94154 +
94155 +#ifndef __SYS_JTAGMOD_H
94156 +#define __SYS_JTAGMOD_H
94157 +
94158 +#ifdef __cplusplus
94159 +extern "C" {
94160 +#endif
94161 +
94162 +#define JTAG_MAX_CHIPS         8
94163 +#define JTAG_MAX_INSTR_LEN     8
94164 +#define JTAG_MAX_BITS           (JTAG_MAX_CHIPS * JTAG_MAX_INSTR_LEN)
94165 +#define JTAG_MAX_DATA_LEN      1024
94166 +
94167 +#define JTAG_BYPASS            0xFF
94168 +
94169 +#define I2C_ADDR_LEN           7                               /* 7 bits of address */
94170 +#define I2C_DATA_LEN           8                               /* 8 bits of data */
94171 +#define I2C_MAX_DATA_LEN       9                               /* and upto 9 bytes worth */
94172 +
94173 +#define BITS_PER_BYTE          8
94174 +#define JTAG_NBYTES(nbits)     (((nbits)+BITS_PER_BYTE-1)/BITS_PER_BYTE)
94175 +#define JTAG_BIT(v, num)       (((v)[(num) / BITS_PER_BYTE] >> ((num) % BITS_PER_BYTE)) & 1)
94176 +#define JTAG_SET_BIT(v, num)   ((v)[(num) / BITS_PER_BYTE] |= (1 << ((num) % BITS_PER_BYTE)))
94177 +#define JTAG_CLR_BIT(v, num)   ((v)[(num) / BITS_PER_BYTE] &= ~(1 << ((num) % BITS_PER_BYTE)))
94178 +
94179 +#define RING_CLOCK_CARD                (0x3D)
94180 +#define RING_CLOCK_SHIFT       (0x3E)
94181 +#define RING_JTAG_LOOPBACK     (0x3F)
94182 +#define RING_MAX               (0x40)
94183 +
94184 +#define RING_QUAD_BIT          (0x40)
94185 +#define RING_I2C_BIT           (0x80)
94186 +
94187 +#define VALID_JTAG_RING(ring)  ((ring) < 0x20 || (ring) == RING_JTAG_LOOPBACK)
94188 +#define VALID_I2C_RING(ring)   ((ring) < 0x20 || (ring) == RING_CLOCK_CARD)
94189 +
94190 +
94191 +typedef struct jtag_value
94192 +{
94193 +    u_char     bytes[JTAG_NBYTES(JTAG_MAX_DATA_LEN)];
94194 +} JTAG_VALUE;
94195 +
94196 +/* arguements to JTAG_SHIFT_IR/JTAG_SHIFT_DR */
94197 +typedef struct jtag_reset_args
94198 +{
94199 +    u_int      ring;
94200 +} JTAG_RESET_ARGS;
94201 +
94202 +typedef struct jtag_shift_args
94203 +{
94204 +    u_int      ring;
94205 +    u_int      nbits;
94206 +    u_char     *value;
94207 +} JTAG_SHIFT_ARGS;
94208 +
94209 +typedef struct i2c_args
94210 +{
94211 +    u_int      ring;
94212 +    u_int      device;
94213 +    u_int      reg;
94214 +    u_int      count;
94215 +    u_int      ok;
94216 +    u_char     data[I2C_MAX_DATA_LEN];
94217 +} I2C_ARGS;
94218 +
94219 +/* values for 'ok' - the return value from i2c_xx functions */
94220 +#define I2C_OP_SUCCESS         0
94221 +#define I2C_OP_ERROR           1
94222 +#define I2C_OP_NOT_IDLE                2
94223 +#define I2C_OP_NO_DEVICE       3
94224 +#define I2C_OP_WRITE_TO_BIG    4
94225 +#define I2C_OP_BAD_RESOURCE    5
94226 +
94227 +typedef struct i2c_clock_shift_args
94228 +{
94229 +    u_int      t;
94230 +    u_int      n;
94231 +    u_int      m;
94232 +} I2C_CLOCK_SHIFT_ARGS;
94233 +
94234 +#define JTAG_RESET             _IOWR('j', '0', JTAG_RESET_ARGS)
94235 +#define JTAG_SHIFT_IR          _IOWR('j', '1', JTAG_SHIFT_ARGS)
94236 +#define JTAG_SHIFT_DR          _IOWR('j', '2', JTAG_SHIFT_ARGS)
94237 +
94238 +#define I2C_CLOCK_SHIFT                _IOWR('j', '4', I2C_CLOCK_SHIFT_ARGS)
94239 +#define I2C_WRITE              _IOWR('j', '5', I2C_ARGS)
94240 +#define I2C_READ               _IOWR('j', '6', I2C_ARGS)
94241 +#define I2C_WRITEREG           _IOWR('j', '7', I2C_ARGS)
94242 +#define I2C_READREG            _IOWR('j', '8', I2C_ARGS)
94243 +
94244 +
94245 +#ifdef __cplusplus
94246 +}
94247 +#endif
94248 +
94249 +#endif /* __SYS_JTAGMOD_H */
94250 diff -urN clean/include/linux/init_task.h linux-2.6.9/include/linux/init_task.h
94251 --- clean/include/linux/init_task.h     2004-10-18 17:53:13.000000000 -0400
94252 +++ linux-2.6.9/include/linux/init_task.h       2005-10-10 17:47:17.000000000 -0400
94253 @@ -2,6 +2,7 @@
94254  #define _LINUX__INIT_TASK_H
94255  
94256  #include <linux/file.h>
94257 +#include <linux/ptrack.h>
94258  
94259  #define INIT_FILES \
94260  {                                                      \
94261 @@ -112,6 +113,7 @@
94262         .proc_lock      = SPIN_LOCK_UNLOCKED,                           \
94263         .switch_lock    = SPIN_LOCK_UNLOCKED,                           \
94264         .journal_info   = NULL,                                         \
94265 +       INIT_TASK_PTRACK(tsk)                                           \
94266  }
94267  
94268  
94269 diff -urN clean/include/linux/ioproc.h linux-2.6.9/include/linux/ioproc.h
94270 --- clean/include/linux/ioproc.h        1969-12-31 19:00:00.000000000 -0500
94271 +++ linux-2.6.9/include/linux/ioproc.h  2005-10-10 17:47:17.000000000 -0400
94272 @@ -0,0 +1,271 @@
94273 +/* -*- linux-c -*-
94274 + *
94275 + *    Copyright (C) 2002-2004 Quadrics Ltd.
94276 + *
94277 + *    This program is free software; you can redistribute it and/or modify
94278 + *    it under the terms of the GNU General Public License as published by
94279 + *    the Free Software Foundation; either version 2 of the License, or
94280 + *    (at your option) any later version.
94281 + *
94282 + *    This program is distributed in the hope that it will be useful,
94283 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
94284 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
94285 + *    GNU General Public License for more details.
94286 + *
94287 + *    You should have received a copy of the GNU General Public License
94288 + *    along with this program; if not, write to the Free Software
94289 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
94290 + *
94291 + *
94292 + */
94293 +
94294 +/*
94295 + * Callbacks for IO processor page table updates.
94296 + */
94297 +
94298 +#ifndef __LINUX_IOPROC_H__
94299 +#define __LINUX_IOPROC_H__
94300 +
94301 +#include <linux/sched.h>
94302 +#include <linux/mm.h>
94303 +
94304 +typedef struct ioproc_ops {
94305 +       struct ioproc_ops *next;
94306 +       void *arg;
94307 +
94308 +       void (*release)(void *arg, struct mm_struct *mm);
94309 +       void (*sync_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
94310 +       void (*invalidate_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
94311 +       void (*update_range)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end);
94312 +
94313 +       void (*change_protection)(void *arg, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot);
94314 +
94315 +       void (*sync_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
94316 +       void (*invalidate_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
94317 +       void (*update_page)(void *arg, struct vm_area_struct *vma, unsigned long address);
94318 +
94319 +} ioproc_ops_t;
94320 +
94321 +/* IOPROC Registration
94322 + * 
94323 + * Called by the IOPROC device driver to register its interest in page table
94324 + * changes for the process associated with the supplied mm_struct
94325 + *
94326 + * The caller should first allocate and fill out an ioproc_ops structure with 
94327 + * the function pointers initialised to the device driver specific code for
94328 + * each callback. If the device driver doesn't have code for a particular 
94329 + * callback then it should set the function pointer to be NULL.
94330 + * The ioproc_ops arg parameter will be passed unchanged as the first argument
94331 + * to each callback function invocation.
94332 + *
94333 + * The ioproc registration is not inherited across fork() and should be called
94334 + * once for each process that the IOPROC device driver is interested in.
94335 + *
94336 + * Must be called holding the mm->page_table_lock
94337 + */
94338 +extern int ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip);
94339 +
94340 +
94341 +/* IOPROC De-registration
94342 + * 
94343 + * Called by the IOPROC device driver when it is no longer interested in page 
94344 + * table changes for the process associated with the supplied mm_struct
94345 + *
94346 + * Normally this is not needed to be called as the ioproc_release() code will
94347 + * automatically unlink the ioproc_ops struct from the mm_struct as the
94348 + * process exits
94349 + *
94350 + * Must be called holding the mm->page_table_lock
94351 + */
94352 +extern int ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip);
94353 +
94354 +#ifdef CONFIG_IOPROC
94355 +
94356 +/* IOPROC Release
94357 + *
94358 + * Called during exit_mmap() as all vmas are torn down and unmapped.
94359 + *
94360 + * Also unlinks the ioproc_ops structure from the mm list as it goes.
94361 + *
94362 + * No need for locks as the mm can no longer be accessed at this point
94363 + *
94364 + */
94365 +static inline void 
94366 +ioproc_release(struct mm_struct *mm)
94367 +{
94368 +       struct ioproc_ops *cp;
94369 +
94370 +       while ((cp = mm->ioproc_ops) != NULL) {
94371 +               mm->ioproc_ops = cp->next;
94372 +        
94373 +               if (cp->release)
94374 +                       cp->release(cp->arg, mm);
94375 +       }
94376 +}
94377 +
94378 +/* IOPROC SYNC RANGE
94379 + *
94380 + * Called when a memory map is synchronised with its disk image i.e. when the 
94381 + * msync() syscall is invoked. Any future read or write to the associated 
94382 + * pages by the IOPROC should cause the page to be marked as referenced or 
94383 + * modified.
94384 + *
94385 + * Called holding the mm->page_table_lock
94386 + */
94387 +static inline void 
94388 +ioproc_sync_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
94389 +{
94390 +       struct ioproc_ops *cp;
94391 +
94392 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94393 +               if (cp->sync_range)
94394 +                       cp->sync_range(cp->arg, vma, start, end);
94395 +}
94396 +
94397 +/* IOPROC INVALIDATE RANGE
94398 + *
94399 + * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the
94400 + * user or paged out by the kernel. 
94401 + *
94402 + * After this call the IOPROC must not access the physical memory again unless
94403 + * a new translation is loaded.
94404 + *
94405 + * Called holding the mm->page_table_lock
94406 + */
94407 +static inline void 
94408 +ioproc_invalidate_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
94409 +{
94410 +       struct ioproc_ops *cp;
94411 +       
94412 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94413 +               if (cp->invalidate_range)
94414 +                       cp->invalidate_range(cp->arg, vma, start, end);
94415 +}
94416 +
94417 +/* IOPROC UPDATE RANGE
94418 + *
94419 + * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk 
94420 + * up, when breaking COW or faulting in an anonymous page of memory.
94421 + *
94422 + * These give the IOPROC device driver the opportunity to load translations 
94423 + * speculatively, which can improve performance by avoiding device translation
94424 + * faults.
94425 + *
94426 + * Called holding the mm->page_table_lock
94427 + */
94428 +static inline void 
94429 +ioproc_update_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
94430 +{
94431 +       struct ioproc_ops *cp;
94432 +
94433 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94434 +               if (cp->update_range)
94435 +                       cp->update_range(cp->arg, vma, start, end);
94436 +}
94437 +
94438 +
94439 +/* IOPROC CHANGE PROTECTION
94440 + *
94441 + * Called when the protection on a region of memory is changed i.e. when the 
94442 + * mprotect() syscall is invoked.
94443 + *
94444 + * The IOPROC must not be able to write to a read-only page, so if the 
94445 + * permissions are downgraded then it must honour them. If they are upgraded 
94446 + * it can treat this in the same way as the ioproc_update_[range|sync]() calls
94447 + *
94448 + * Called holding the mm->page_table_lock
94449 + */
94450 +static inline void 
94451 +ioproc_change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot)
94452 +{
94453 +       struct ioproc_ops *cp;
94454 +
94455 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94456 +               if (cp->change_protection)
94457 +                       cp->change_protection(cp->arg, vma, start, end, newprot);
94458 +}
94459 +
94460 +/* IOPROC SYNC PAGE
94461 + *
94462 + * Called when a memory map is synchronised with its disk image i.e. when the 
94463 + * msync() syscall is invoked. Any future read or write to the associated page
94464 + * by the IOPROC should cause the page to be marked as referenced or modified.
94465 + *
94466 + * Not currently called as msync() calls ioproc_sync_range() instead
94467 + *
94468 + * Called holding the mm->page_table_lock
94469 + */
94470 +static inline void 
94471 +ioproc_sync_page(struct vm_area_struct *vma, unsigned long addr)
94472 +{
94473 +       struct ioproc_ops *cp;
94474 +
94475 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94476 +               if (cp->sync_page)
94477 +                       cp->sync_page(cp->arg, vma, addr);
94478 +}
94479 +
94480 +/* IOPROC INVALIDATE PAGE
94481 + *
94482 + * Called whenever a valid PTE is unloaded e.g. when a page is unmapped by the
94483 + * user or paged out by the kernel. 
94484 + *
94485 + * After this call the IOPROC must not access the physical memory again unless
94486 + * a new translation is loaded.
94487 + *
94488 + * Called holding the mm->page_table_lock
94489 + */
94490 +static inline void 
94491 +ioproc_invalidate_page(struct vm_area_struct *vma, unsigned long addr)
94492 +{
94493 +       struct ioproc_ops *cp;
94494 +
94495 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94496 +               if (cp->invalidate_page)
94497 +                       cp->invalidate_page(cp->arg, vma, addr);
94498 +}
94499 +
94500 +/* IOPROC UPDATE PAGE
94501 + *
94502 + * Called whenever a valid PTE is loaded e.g. mmaping memory, moving the brk 
94503 + * up, when breaking COW or faulting in an anoymous page of memory.
94504 + *
94505 + * These give the IOPROC device the opportunity to load translations 
94506 + * speculatively, which can improve performance by avoiding device translation
94507 + * faults.
94508 + *
94509 + * Called holding the mm->page_table_lock
94510 + */
94511 +static inline void 
94512 +ioproc_update_page(struct vm_area_struct *vma, unsigned long addr)
94513 +{
94514 +       struct ioproc_ops *cp;
94515 +
94516 +       for (cp = vma->vm_mm->ioproc_ops; cp; cp = cp->next)
94517 +               if (cp->update_page)
94518 +                       cp->update_page(cp->arg, vma, addr);
94519 +}
94520 +
94521 +#else
94522 +
94523 +/* ! CONFIG_IOPROC so make all hooks empty */
94524 +
94525 +#define ioproc_release(mm)                     do { } while (0)
94526 +
94527 +#define ioproc_sync_range(vma,start,end)               do { } while (0)
94528 +
94529 +#define ioproc_invalidate_range(vma, start,end)        do { } while (0)
94530 +
94531 +#define ioproc_update_range(vma, start, end)   do { } while (0)
94532 +
94533 +#define ioproc_change_protection(vma, start, end, prot)        do { } while (0)
94534 +
94535 +#define ioproc_sync_page(vma, addr)            do { } while (0)
94536 +
94537 +#define ioproc_invalidate_page(vma, addr)      do { } while (0)
94538 +
94539 +#define ioproc_update_page(vma, addr)          do { } while (0)
94540 +
94541 +#endif /* CONFIG_IOPROC */
94542 +
94543 +#endif /* __LINUX_IOPROC_H__ */
94544 diff -urN clean/include/linux/ptrack.h linux-2.6.9/include/linux/ptrack.h
94545 --- clean/include/linux/ptrack.h        1969-12-31 19:00:00.000000000 -0500
94546 +++ linux-2.6.9/include/linux/ptrack.h  2005-10-10 17:47:17.000000000 -0400
94547 @@ -0,0 +1,65 @@
94548 +/*
94549 + *    Copyright (C) 2000  Regents of the University of California
94550 + *
94551 + *    This program is free software; you can redistribute it and/or modify
94552 + *    it under the terms of the GNU General Public License as published by
94553 + *    the Free Software Foundation; either version 2 of the License, or
94554 + *    (at your option) any later version.
94555 + *
94556 + *    This program is distributed in the hope that it will be useful,
94557 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
94558 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
94559 + *    GNU General Public License for more details.
94560 + *
94561 + *    You should have received a copy of the GNU General Public License
94562 + *    along with this program; if not, write to the Free Software
94563 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
94564 + *
94565 + * Derived from exit_actn.c by
94566 + *    Copyright (C) 2003 Quadrics Ltd.
94567 + *
94568 + */
94569 +#ifndef __LINUX_PTRACK_H
94570 +#define __LINUX_PTRACK_H
94571 +
94572 +/* 
94573 + * Process tracking - this allows a module to keep track of processes
94574 + * in order that it can manage all tasks derived from a single process.
94575 + */
94576 +
94577 +#define PTRACK_PHASE_CLONE     1
94578 +#define PTRACK_PHASE_CLONE_FAIL        2
94579 +#define PTRACK_PHASE_EXEC      3
94580 +#define PTRACK_PHASE_EXIT              4
94581 +
94582 +#define PTRACK_FINISHED                0
94583 +#define PTRACK_INNHERIT                1
94584 +#define PTRACK_DENIED          2
94585 +
94586 +#ifdef CONFIG_PTRACK
94587 +
94588 +typedef int (*ptrack_callback_t)(void *arg, int phase, struct task_struct *child);
94589 +
94590 +struct ptrack_desc {
94591 +       struct list_head        link;
94592 +       ptrack_callback_t       callback;
94593 +       void                   *arg;
94594 +};
94595 +
94596 +extern int     ptrack_register (ptrack_callback_t callback, void *arg);
94597 +extern void    ptrack_deregister (ptrack_callback_t callback, void *arg);
94598 +extern int     ptrack_registered (ptrack_callback_t callback, void *arg);
94599 +
94600 +extern int     ptrack_call_callbacks (int phase, struct task_struct *child);
94601 +
94602 +#define INIT_TASK_PTRACK(tsk) \
94603 +       .ptrack_list = LIST_HEAD_INIT(tsk.ptrack_list)
94604 +
94605 +#else
94606 +#define ptrack_call_callbacks (phase, child) (0)
94607 +
94608 +#define INIT_TASK_PTRACK(tsk)
94609 +
94610 +#endif
94611 +
94612 +#endif /* __LINUX_PTRACK_H */
94613 diff -urN clean/include/linux/sched.h linux-2.6.9/include/linux/sched.h
94614 --- clean/include/linux/sched.h 2005-05-13 13:39:11.000000000 -0400
94615 +++ linux-2.6.9/include/linux/sched.h   2005-10-10 17:47:17.000000000 -0400
94616 @@ -184,6 +184,9 @@
94617  asmlinkage void schedule(void);
94618  
94619  struct namespace;
94620 +#ifdef CONFIG_IOPROC
94621 +struct ioproc_ops;
94622 +#endif
94623  
94624  /* Maximum number of active map areas.. This is a random (large) number */
94625  #define DEFAULT_MAX_MAP_COUNT  65536
94626 @@ -259,6 +262,11 @@
94627         struct kioctx           *ioctx_list;
94628  
94629         struct kioctx           default_kioctx;
94630
94631 +#ifdef CONFIG_IOPROC
94632 +       /* hooks for io devices with advanced RDMA capabilities */
94633 +       struct ioproc_ops       *ioproc_ops;
94634 +#endif
94635  };
94636  
94637  extern int mmlist_nr;
94638 @@ -600,6 +608,10 @@
94639         struct mempolicy *mempolicy;
94640         short il_next;          /* could be shared with used_math */
94641  #endif
94642 +#ifdef CONFIG_PTRACK
94643 +/* process tracking callback */
94644 +       struct list_head ptrack_list;
94645 +#endif
94646  };
94647  
94648  static inline pid_t process_group(struct task_struct *tsk)
94649 diff -urN clean/include/qsnet/autoconf.h linux-2.6.9/include/qsnet/autoconf.h
94650 --- clean/include/qsnet/autoconf.h      1969-12-31 19:00:00.000000000 -0500
94651 +++ linux-2.6.9/include/qsnet/autoconf.h        2005-10-10 17:47:30.000000000 -0400
94652 @@ -0,0 +1,44 @@
94653 +/*
94654 + *    Copyright (c) 2005 by Quadrics Ltd.
94655 + *
94656 + *    For licensing information please see the supplied COPYING file
94657 + *
94658 + * NOTE: This file has been automatically generated:
94659 + *       node   : lester0.hp.com
94660 + *       kernel : ../linux-2.6.9
94661 + *       date   : Mon Oct 10 17:47:29 EDT 2005
94662 + *
94663 + */
94664 +
94665 +#ifndef __QSNET_AUTOCONF_H
94666 +#define __QSNET_AUTOCONF_H
94667 +
94668 +#include <linux/version.h>
94669 +#undef NO_RMAP
94670 +#undef AC
94671 +#undef NO_O1_SCHED
94672 +#undef NO_NPTL
94673 +#undef NO_ABI
94674 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
94675 +#define        PROCESS_ACCT
94676 +#endif
94677 +#undef RSS_ATOMIC
94678 +#define        NO_COPROC
94679 +#undef NO_IOPROC
94680 +#undef NO_PTRACK
94681 +#define        NO_PANIC_NOTIFIER
94682 +#undef NO_SHM_CLEANUP
94683 +#undef NO_PDE
94684 +
94685 +
94686 +#define        CONFIG_EIP
94687 +#define        CONFIG_ELAN
94688 +#define        CONFIG_ELAN3
94689 +#define        CONFIG_ELAN4
94690 +#define        CONFIG_EP
94691 +#define        CONFIG_JTAG
94692 +#define        CONFIG_QSNET
94693 +#define        CONFIG_RMS
94694 +
94695 +#endif /* __QSNET_AUTOCONF_H */
94696 +
94697 diff -urN clean/include/qsnet/condvar.h linux-2.6.9/include/qsnet/condvar.h
94698 --- clean/include/qsnet/condvar.h       1969-12-31 19:00:00.000000000 -0500
94699 +++ linux-2.6.9/include/qsnet/condvar.h 2003-06-07 11:43:33.000000000 -0400
94700 @@ -0,0 +1,140 @@
94701 +/*
94702 + *    Copyright (C) 2000  Regents of the University of California
94703 + *
94704 + *    This program is free software; you can redistribute it and/or modify
94705 + *    it under the terms of the GNU General Public License as published by
94706 + *    the Free Software Foundation; either version 2 of the License, or
94707 + *    (at your option) any later version.
94708 + *
94709 + *    This program is distributed in the hope that it will be useful,
94710 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
94711 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
94712 + *    GNU General Public License for more details.
94713 + *
94714 + *    You should have received a copy of the GNU General Public License
94715 + *    along with this program; if not, write to the Free Software
94716 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
94717 + *
94718 + */
94719 +
94720 +#if    !defined(_LINUX_CONDVAR_H)
94721 +#define        _LINUX_CONDVAR_H
94722 +
94723 +#if    defined(__KERNEL__)
94724 +
94725 +#include <linux/list.h>
94726 +#include <qsnet/debug.h>
94727 +
94728 +#define CV_RET_SIGPENDING      0
94729 +#define CV_RET_TIMEOUT         (-1)
94730 +#define CV_RET_NORMAL          1
94731 +
94732 +struct kcondvar_task {
94733 +       struct task_struct      *task;          /* need to wrap task in this */
94734 +       struct list_head        list;           /*   to thread as a list */
94735 +       int                     blocked;
94736 +};
94737 +
94738 +typedef struct {
94739 +       struct list_head        task_list;      /* list of kcondvar_task's */
94740 +} kcondvar_t;
94741 +
94742 +#define kcondvar_wait(c,l,fl)                  debug_kcondvar_wait(c, l, fl, 0,  TASK_UNINTERRUPTIBLE)
94743 +#define kcondvar_waitsig(c,l,fl)               debug_kcondvar_wait(c, l, fl, 0,  TASK_INTERRUPTIBLE)
94744 +#define kcondvar_timedwait(c,l,fl,to)          debug_kcondvar_wait(c, l, fl, to, TASK_UNINTERRUPTIBLE)
94745 +#define kcondvar_timedwaitsig(c,l,fl,to)       debug_kcondvar_wait(c, l, fl, to, TASK_INTERRUPTIBLE)
94746 +#define kcondvar_wakeupone(c,l)                        kcondvar_wakeup(c, l, 0)
94747 +#define kcondvar_wakeupall(c,l)                        kcondvar_wakeup(c, l, 1)
94748
94749 +extern __inline__ void
94750 +kcondvar_init(kcondvar_t *c)
94751 +{
94752 +       INIT_LIST_HEAD(&c->task_list);
94753 +}
94754 +
94755 +extern __inline__ void
94756 +kcondvar_destroy(kcondvar_t *c)
94757 +{
94758 +       ASSERT(list_empty(&c->task_list));
94759 +}
94760 +
94761 +/*
94762 + * We thread a struct kcondvar_task, allocated on the stack, onto the kcondvar_t's
94763 + * task_list, and take it off again when we wake up.
94764 + */
94765 +extern __inline__ int
94766 +debug_kcondvar_wait(kcondvar_t *c, spinlock_t *l, unsigned long *fl, long tmo, int state)
94767 +{
94768 +       struct kcondvar_task cvt;
94769 +       int ret = CV_RET_NORMAL;
94770 +
94771 +       ASSERT(!in_interrupt());                /* we can block */
94772 +       ASSERT(SPINLOCK_HELD(l));               /* enter holding lock */
94773 +
94774 +       cvt.task = current;
94775 +       cvt.blocked = 1;
94776 +       list_add(&cvt.list, &c->task_list);
94777 +       do {
94778 +              /* Note: we avoid using TASK_UNINTERRUPTIBLE here because avenrun()
94779 +               * (linux/kernel/timer.c:calc_load())
94780 +               * computation treats it like TASK_RUNNABLE hence creates false high
94781 +               * load averages when we create kernel threads.
94782 +               * The cvt.blocked flag distinguishes a signal wakeup from a kcondvar_wakeup.
94783 +               *
94784 +               * However, if we do take a signal we could end up busily spinning here, if
94785 +               * we ignore it (state == TASK_UNINTERRUPTIBLE) so once we see a signal
94786 +               * pending we do sleep TASK_UNINTERRUPTIBLE to stop a busy spin.
94787 +               * I have now blocked all signals for kernel threads to prevent this
94788 +               * happening but other users of kcondvar_wait may still hit this spin.
94789 +               */
94790 +               set_current_state (signal_pending(current) ? state : TASK_INTERRUPTIBLE);
94791 +
94792 +               if (fl)
94793 +                   spin_unlock_irqrestore(l, *fl);
94794 +               else
94795 +                   spin_unlock(l);
94796 +               if (tmo) {
94797 +                       if (tmo <= jiffies || !schedule_timeout(tmo - jiffies))
94798 +                               ret = CV_RET_TIMEOUT;
94799 +               } else
94800 +                       schedule();
94801 +               if (fl)
94802 +                   spin_lock_irqsave (l, *fl);
94803 +               else
94804 +                   spin_lock(l);
94805 +               
94806 +               /* signal_pending - Only exit the loop if the user was waiting TASK_INTERRUPTIBLE */
94807 +               if ((state == TASK_INTERRUPTIBLE) && signal_pending(current))
94808 +                       ret = CV_RET_SIGPENDING;
94809 +
94810 +       } while (cvt.blocked && ret == CV_RET_NORMAL);
94811 +       list_del(&cvt.list);
94812 +
94813 +       /* Reset task state in case we didn't sleep above */
94814 +       set_current_state (TASK_RUNNING);
94815 +
94816 +       return ret;                             /* return holding lock */
94817 +}
94818 +
94819 +extern __inline__ void
94820 +kcondvar_wakeup(kcondvar_t *c, spinlock_t *l, int wakeall)
94821 +{
94822 +       struct list_head *lp;
94823 +       struct kcondvar_task *cvtp;
94824 +
94825 +       ASSERT(SPINLOCK_HELD(l));                       /* already holding lock */
94826 +       for (lp = c->task_list.next; lp != &c->task_list; lp = lp->next) {
94827 +               cvtp = list_entry(lp, struct kcondvar_task, list);
94828 +               if (cvtp->blocked) {
94829 +                       cvtp->blocked = 0;
94830 +                       /* wake_up_process added to kernel/ksyms.c */
94831 +                       wake_up_process(cvtp->task); 
94832 +                       if (!wakeall)
94833 +                               break;
94834 +               }
94835 +       }
94836 +}                                              /* return still holding lock */
94837 +
94838 +
94839 +#endif /* __KERNEL__ */
94840 +#endif /* _LINUX_CONDVAR_H */
94841 diff -urN clean/include/qsnet/config.h linux-2.6.9/include/qsnet/config.h
94842 --- clean/include/qsnet/config.h        1969-12-31 19:00:00.000000000 -0500
94843 +++ linux-2.6.9/include/qsnet/config.h  2005-04-28 18:59:31.000000000 -0400
94844 @@ -0,0 +1,195 @@
94845 +/*
94846 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
94847 + *
94848 + *    For licensing information please see the supplied COPYING file
94849 + *
94850 + */
94851 +
94852 +#ifndef _QSNET_CONFIG_H
94853 +#define _QSNET_CONFIG_H
94854 +
94855 +#ident "$Id: config.h,v 1.24 2005/04/28 22:59:31 robin Exp $"
94856 +/*      $Source: /cvs/master/quadrics/qsnet/config.h,v $*/
94857 +
94858 +
94859 +/*
94860 + * QSNET standard defines :
94861 + *
94862 + *   Target operating system defines
94863 + *             SOLARIS
94864 + *             TRU64UNIX/DIGITAL_UNIX
94865 + *             LINUX
94866 + *
94867 + *   Target processor defines
94868 + *             SPARC
94869 + *             ALPHA
94870 + *             I386
94871 + *             IA64
94872 + *             X86_64
94873 + *
94874 + *   Byte order defines
94875 + *             __LITTLE_ENDIAN__
94876 + *             __BIG_ENDIAN__
94877 + *
94878 + *   Data size defines
94879 + *             _LP64                   - LP64 - long/pointer is 64 bits
94880 + *             _ILP32                  - LP32 - long/pointer is 32 bits
94881 + *
94882 + *   Elan defines for main processor
94883 + *             __MAIN_LITTLE_ENDIAN__  - main byte order (for thread code)
94884 + *             __MAIN_BIG_ENDIAN__
94885 + *             _MAIN_LP64              - main long size (for thread code)
94886 + *             _MAIN_ILP32
94887 + *
94888 + *   Compiling for kernel (defined in makefile)
94889 + *             _KERNEL
94890 + *
94891 + */
94892 +
94893 +#if defined(__LP64__) && !defined(_LP64)
94894 +#  define _LP64
94895 +#endif
94896 +
94897 +#if defined(__arch64__) && !defined(_LP64) && !defined(_ILP32)
94898 +#  define _LP64
94899 +#endif
94900 +
94901 +#if defined(__alpha__) && !defined(_LP64) && !defined(_ILP32)
94902 +#  define _LP64
94903 +#endif
94904 +
94905 +#if !defined(__arch64__) && !defined(_ILP32) && !defined(_LP64)
94906 +#  define _ILP32
94907 +#endif
94908 +
94909 +#if defined(__ELAN__) || defined(__ELAN3__)
94910 +
94911 +#define __LITTLE_ENDIAN__
94912 +
94913 +#if defined(__host_solaris) && defined(__host_sparc)
94914 +#define SOLARIS
94915 +#define SPARC
94916 +#define SOLARIS_SPARC
94917 +#define _MAIN_ILP32
94918 +#define __MAIN_BIG_ENDIAN__
94919 +
94920 +#elif defined(__host_osf)
94921 +#define TRU64UNIX
94922 +#define DIGITAL_UNIX
94923 +#define ALPHA
94924 +#define _MAIN_LP64
94925 +#define __MAIN_LITTLE_ENDIAN__
94926 +
94927 +#elif defined(__host_linux) && defined(__host_alpha)
94928 +#define LINUX
94929 +#define ALPHA
94930 +#define LINUX_ALPHA
94931 +#define _MAIN_LP64
94932 +#define __MAIN_LITTLE_ENDIAN__
94933 +
94934 +#elif defined(__host_linux) && defined(__host_sparc)
94935 +#define LINUX
94936 +#define SPARC
94937 +#define LINUX_SPARC
94938 +#define __MAIN_BIG_ENDIAN__
94939 +#ifdef __KERNEL__
94940 +#  define _MAIN_LP64
94941 +#else
94942 +#  define _MAIN_ILP32
94943 +#endif
94944 +
94945 +#elif defined(__host_linux) && defined(__host_i386)
94946 +#define LINUX
94947 +#define I386
94948 +#define LINUX_I386
94949 +#define _MAIN_ILP32
94950 +#define __MAIN_LITTLE_ENDIAN__
94951 +
94952 +#elif defined(__host_linux) && defined(__host_ia64)
94953 +#define LINUX
94954 +#define IA64
94955 +#define LINUX_IA64
94956 +#define _MAIN_LP64
94957 +#define __MAIN_LITTLE_ENDIAN__
94958 +
94959 +#elif defined(__host_linux) && defined(__host_x86_64)
94960 +#define LINUX
94961 +#define X86_64
94962 +#define LINUX_X86_64
94963 +#define _MAIN_LP64
94964 +#define __MAIN_LITTLE_ENDIAN__
94965 +
94966 +#else
94967 +#error Cannot determine operating system/processor architecture.
94968 +#endif
94969 +
94970 +#else /* !defined(__ELAN3__) */
94971 +
94972 +#if (defined(sun) || defined(__sun)) && defined(sparc) && !defined(__sparcv9)  /* Sun Solaris 5.6 */
94973 +#define SOLARIS
94974 +#define SPARC
94975 +#define SOLARIS_SPARC
94976 +#ifndef __BIG_ENDIAN__
94977 +#define __BIG_ENDIAN__
94978 +#endif 
94979 +
94980 +#elif (defined(sun) || defined(__sun)) && defined(sparc) && defined(__sparcv9) /* Sun Solaris 5.7 */
94981 +#define SOLARIS
94982 +#define SPARC
94983 +#define SOLARIS_SPARC
94984 +#define __BIG_ENDIAN__
94985 +
94986 +#elif defined(__osf__) && defined(__alpha)                                     /* Digital Unix */
94987 +#define TRU64UNIX
94988 +#define DIGITAL_UNIX
94989 +#define ALPHA
94990 +#define __LITTLE_ENDIAN__
94991 +
94992 +#elif (defined(linux) || defined(__linux__)) && defined(__alpha)               /* Linux Alpha */
94993 +
94994 +#define LINUX
94995 +#define ALPHA
94996 +#define LINUX_ALPHA
94997 +#define __LITTLE_ENDIAN__
94998 +
94999 +#elif (defined(linux) || defined(__linux__)) && defined(__sparc)               /* Linux Sparc */
95000 +
95001 +#define LINUX
95002 +#define SPARC
95003 +#define LINUX_SPARC
95004 +#define __BIG_ENDIAN__
95005 +
95006 +#elif (defined(linux) || defined(__linux__)) && defined(__i386)                        /* Linux i386 */
95007 +
95008 +#define LINUX
95009 +#define I386
95010 +#define LINUX_I386
95011 +#define __LITTLE_ENDIAN__
95012 +
95013 +#elif (defined(linux) || defined(__linux__)) && defined(__ia64)                        /* Linux ia64 */
95014 +
95015 +#define LINUX
95016 +#define IA64
95017 +#define LINUX_IA64
95018 +#define __LITTLE_ENDIAN__
95019 +
95020 +#elif (defined(linux) || defined(__linux__)) && (defined(__x86_64) || defined(__x86_64__))     /* Linux x86_64 */
95021 +
95022 +#define LINUX
95023 +#define X86_64
95024 +#define LINUX_X86_64
95025 +#define __LITTLE_ENDIAN__
95026 +
95027 +#elif defined(__QNXNTO__)
95028 +#define QNX
95029 +#define I386
95030 +#define __LITTLE_ENDIAN__
95031 +#else
95032 +#error Cannot determine operating system/processor architecture.
95033 +#endif
95034 +
95035 +#endif
95036 +
95037 +#include <qsnet/workarounds.h>
95038 +
95039 +#endif /* _QSNET_CONFIG_H */
95040 diff -urN clean/include/qsnet/crwlock.h linux-2.6.9/include/qsnet/crwlock.h
95041 --- clean/include/qsnet/crwlock.h       1969-12-31 19:00:00.000000000 -0500
95042 +++ linux-2.6.9/include/qsnet/crwlock.h 2003-09-24 10:07:02.000000000 -0400
95043 @@ -0,0 +1,207 @@
95044 +/* 
95045 + *    Copyright (C) 2000  Regents of the University of California
95046 + *
95047 + *    This program is free software; you can redistribute it and/or modify
95048 + *    it under the terms of the GNU General Public License as published by
95049 + *    the Free Software Foundation; either version 2 of the License, or
95050 + *    (at your option) any later version.
95051 + *
95052 + *    This program is distributed in the hope that it will be useful,
95053 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
95054 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
95055 + *    GNU General Public License for more details.
95056 + *
95057 + *    You should have received a copy of the GNU General Public License
95058 + *    along with this program; if not, write to the Free Software
95059 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
95060 + *
95061 + */
95062 +
95063 +/*
95064 + *    Complex - Reader/Writer locks
95065 + *    Ref: "UNIX Systems for Modern Architectures", by Curt Schimmel, 
95066 + *    sec 11.6.3. 
95067 + *
95068 + *    This implementation is based on semaphores and may not be called from 
95069 + *    interrupt handlers.
95070 + *
95071 + */
95072 +
95073 +#if    !defined(_LINUX_RWLOCK_H)
95074 +#define        _LINUX_RWLOCK_H
95075 +
95076 +#if    defined(__KERNEL__)
95077 +
95078 +typedef enum { RD, WRT, ANY } crwlock_type_t;
95079 +
95080 +#define crwlock_write_held(l) debug_crwlock_held(l, WRT, __BASE_FILE__,__LINE__)
95081 +#define crwlock_read_held(l) debug_crwlock_held(l, RD, __BASE_FILE__, __LINE__)
95082 +#define crwlock_held(l)      debug_crwlock_held(l, ANY, __BASE_FILE__, __LINE__)
95083 +
95084 +#define crwlock_read(l)             debug_crwlock_read(l, __BASE_FILE__, __LINE__)
95085 +#define crwlock_write(l)     debug_crwlock_write(l, __BASE_FILE__, __LINE__)
95086 +#define crwlock_done(l)      debug_crwlock_done(l, __BASE_FILE__, __LINE__)
95087 +
95088 +#if     defined(DEBUG_RWLOCK) && defined(__alpha__) && !defined(DEBUG_SPINLOCK)
95089 +#define DEBUG_SPINLOCK
95090 +#endif
95091 +
95092 +#include <linux/spinlock.h>
95093 +#include <asm/semaphore.h>
95094 +#include <qsnet/debug.h>
95095 +#include <qsnet/mutex.h>
95096 +#include <linux/version.h>
95097 +
95098 +#if    !defined(DEBUG_SPINLOCK)
95099 +#define debug_spin_lock(lock, file, line)       spin_lock(lock)
95100 +#endif
95101 +
95102 +typedef struct {
95103 +        spinlock_t             m_lock;         /* protects cnt fields below */
95104 +        int                     m_rdcnt;        /* # of rdrs in crit section */
95105 +        int                     m_wrcnt;        /* # of wrtrs in crit section */
95106 +        int                     m_rdwcnt;       /* # of waiting readers */
95107 +        int                     m_wrwcnt;       /* # of waiting writers */
95108 +        struct semaphore        m_rdwait;       /* sema where readers wait */
95109 +        struct semaphore        m_wrwait;       /* sema where writers wait */
95110 +        pid_t                  m_wrholder;     /* task holding write lock */
95111 +} crwlock_t;
95112
95113 +extern __inline__ void 
95114 +crwlock_init(crwlock_t *l)
95115 +{
95116 +       l->m_lock = SPIN_LOCK_UNLOCKED;
95117 +#if    LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
95118 +       l->m_rdwait = MUTEX_LOCKED;
95119 +       l->m_wrwait = MUTEX_LOCKED;
95120 +#else
95121 +       sema_init(&l->m_rdwait,0);
95122 +       sema_init(&l->m_wrwait,0);
95123 +#endif
95124 +       l->m_rdcnt = l->m_wrcnt = l->m_rdwcnt = l->m_wrwcnt = 0;
95125 +       l->m_wrholder = PID_NONE;
95126 +}
95127 +
95128 +extern __inline__ void 
95129 +crwlock_destroy(crwlock_t *l)
95130 +{
95131 +       ASSERT(l->m_rdcnt == 0 && l->m_wrcnt == 0);
95132 +}
95133 +
95134 +/*
95135 + * If a writer has the lock presently or there are writers waiting,
95136 + * then we have to wait.
95137 + */
95138 +extern __inline__ void 
95139 +debug_crwlock_read(crwlock_t *l, char *file, int line)
95140 +{
95141 +       ASSERT(!in_interrupt());
95142 +       spin_lock(&l->m_lock);
95143 +       if (l->m_wrcnt || l->m_wrwcnt) {
95144 +               l->m_rdwcnt++;
95145 +               spin_unlock(&l->m_lock);
95146 +               down(&l->m_rdwait); /* P */
95147 +       } else {
95148 +               l->m_rdcnt++;
95149 +               spin_unlock(&l->m_lock);
95150 +       }
95151 +}
95152 +
95153 +/*
95154 + * If we're the last reader, and a writer is waiting,
95155 + * then let the writer go now.
95156 + */
95157 +/* private */
95158 +extern __inline__ void 
95159 +debug_crwlock_read_done(crwlock_t *l, char *file, int line)
95160 +{
95161 +       spin_lock(&l->m_lock);
95162 +       l->m_rdcnt--;
95163 +       if (l->m_wrwcnt && l->m_rdcnt == 0) {
95164 +               l->m_wrcnt = 1;
95165 +               l->m_wrwcnt--;
95166 +               spin_unlock(&l->m_lock);
95167 +               up(&l->m_wrwait); /* V */       
95168 +               return;
95169 +       }
95170 +       spin_unlock(&l->m_lock);
95171 +}
95172 +
95173 +extern __inline__ void 
95174 +debug_crwlock_write(crwlock_t *l, char *file, int line)
95175 +{
95176 +       ASSERT(!in_interrupt());
95177 +       spin_lock(&l->m_lock);
95178 +       if (l->m_wrcnt || l->m_rdcnt) {         /* block if lock is in use */
95179 +               l->m_wrwcnt++;
95180 +               spin_unlock(&l->m_lock);
95181 +               down(&l->m_wrwait); /* P */
95182 +       } else {                                /* lock is not in use */
95183 +               l->m_wrcnt = 1;
95184 +               spin_unlock(&l->m_lock);
95185 +       }
95186 +       l->m_wrholder = current->pid;
95187 +}
95188 +
95189 +/* private */
95190 +extern __inline__ void
95191 +debug_crwlock_write_done(crwlock_t *l, char *file, int line)
95192 +{
95193 +       int rdrs;
95194 +
95195 +       spin_lock(&l->m_lock);
95196 +       l->m_wrholder = PID_NONE;
95197 +       if (l->m_rdwcnt) {                      /* let any readers go first */
95198 +               l->m_wrcnt = 0;
95199 +               rdrs = l->m_rdwcnt;
95200 +               l->m_rdcnt = rdrs;
95201 +               l->m_rdwcnt = 0;
95202 +               spin_unlock(&l->m_lock);
95203 +               while (rdrs--)
95204 +                       up(&l->m_rdwait); /* V */
95205 +       } else if (l->m_wrwcnt) {               /* or let any writer go */
95206 +               l->m_wrwcnt--;
95207 +               spin_unlock(&l->m_lock);
95208 +               up(&l->m_wrwait); /* V */
95209 +       } else {                                /* nobody waiting, unlock */
95210 +               l->m_wrcnt = 0;
95211 +               spin_unlock(&l->m_lock);
95212 +       }
95213 +}
95214 +
95215 +extern __inline__ void
95216 +debug_crwlock_done(crwlock_t *l, char *file, int line)
95217 +{
95218 +       if (l->m_wrholder == current->pid)
95219 +               debug_crwlock_write_done(l, file, line);
95220 +       else
95221 +               debug_crwlock_read_done(l, file, line);
95222 +}
95223 +
95224 +/*
95225 + * Return nonzero if lock is held
95226 + */
95227 +extern __inline__ int  
95228 +debug_crwlock_held(crwlock_t *l, crwlock_type_t t, char *file, int line)
95229 +{
95230 +       int res;
95231 +
95232 +       spin_lock(&l->m_lock);
95233 +       switch(t) {
95234 +               case RD:
95235 +                       res = l->m_rdcnt;
95236 +                       break;
95237 +               case WRT:
95238 +                       res = l->m_wrcnt;
95239 +                       break;
95240 +               case ANY:
95241 +                       res = l->m_wrcnt + l->m_rdcnt;
95242 +                       break;
95243 +       }
95244 +       spin_unlock(&l->m_lock);
95245 +
95246 +       return res;
95247 +}
95248 +
95249 +#endif /* __KERNEL__ */
95250 +#endif /* _LINUX_RWLOCK_H */
95251 diff -urN clean/include/qsnet/ctrl_linux.h linux-2.6.9/include/qsnet/ctrl_linux.h
95252 --- clean/include/qsnet/ctrl_linux.h    1969-12-31 19:00:00.000000000 -0500
95253 +++ linux-2.6.9/include/qsnet/ctrl_linux.h      2003-03-26 04:32:03.000000000 -0500
95254 @@ -0,0 +1,37 @@
95255 +/*
95256 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
95257 + *
95258 + *    For licensing information please see the supplied COPYING file
95259 + *
95260 + */
95261 +
95262 +#ifndef __QSNET_CTRL_LINUX_H
95263 +#define __QSNET_CTRL_LINUX_H
95264 +
95265 +#ident "$Id: ctrl_linux.h,v 1.3 2003/03/26 09:32:03 mike Exp $"
95266 +/*      $Source: /cvs/master/quadrics/qsnet/ctrl_linux.h,v $*/
95267 +
95268 +#define QSNETIO_USER_BASE              0x40
95269 +
95270 +#define QSNETIO_DEBUG_DUMP             _IO   ('e', QSNETIO_USER_BASE + 0)
95271 +
95272 +typedef struct qsnetio_debug_buffer_struct
95273 +{
95274 +       caddr_t addr; 
95275 +       size_t  len;
95276 +} QSNETIO_DEBUG_BUFFER_STRUCT;
95277 +#define QSNETIO_DEBUG_BUFFER           _IOWR ('e', QSNETIO_USER_BASE + 1, QSNETIO_DEBUG_BUFFER_STRUCT)
95278 +
95279 +typedef struct qsnetio_debug_kmem_struct
95280 +{
95281 +       void *handle;
95282 +} QSNETIO_DEBUG_KMEM_STRUCT;
95283 +#define QSNETIO_DEBUG_KMEM             _IOWR   ('e', QSNETIO_USER_BASE + 2, QSNETIO_DEBUG_KMEM_STRUCT)
95284 +
95285 +#endif /* __QSNET_CTRL_LINUX_H */
95286 +
95287 +/*
95288 + * Local variables:
95289 + * c-file-style: "linux"
95290 + * End:
95291 + */
95292 diff -urN clean/include/qsnet/debug.h linux-2.6.9/include/qsnet/debug.h
95293 --- clean/include/qsnet/debug.h 1969-12-31 19:00:00.000000000 -0500
95294 +++ linux-2.6.9/include/qsnet/debug.h   2005-03-23 06:04:54.000000000 -0500
95295 @@ -0,0 +1,68 @@
95296 +/*
95297 + *    Copyright (C) 2000  Regents of the University of California
95298 + *
95299 + *    This program is free software; you can redistribute it and/or modify
95300 + *    it under the terms of the GNU General Public License as published by
95301 + *    the Free Software Foundation; either version 2 of the License, or
95302 + *    (at your option) any later version.
95303 + *
95304 + *    This program is distributed in the hope that it will be useful,
95305 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
95306 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
95307 + *    GNU General Public License for more details.
95308 + *
95309 + *    You should have received a copy of the GNU General Public License
95310 + *    along with this program; if not, write to the Free Software
95311 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
95312 + *
95313 + */
95314 +#ifndef _QSNET_DEBUG_H
95315 +#define _QSNET_DEBUG_H
95316 +
95317 +#if defined(DIGITAL_UNIX) 
95318 +#include <kern/assert.h>
95319 +#elif defined(LINUX)
95320 +extern int qsnet_assfail (char *ex, const char *func, char *file, int line);
95321 +
95322 +#define ASSERT(EX)     do { \
95323 +       if (!(EX) && qsnet_assfail (#EX, __FUNCTION__, __BASE_FILE__, __LINE__)) { \
95324 +               BUG(); \
95325 +       } \
95326 +} while (0)
95327 +#endif /* DIGITAL_UNIX */
95328 +
95329 +/* debug.c */
95330 +extern void qsnet_debug_init(void);
95331 +extern void qsnet_debug_fini(void);
95332 +extern void qsnet_debug_disable(int);
95333 +extern void qsnet_debug_alloc(void);
95334 +
95335 +#define QSNET_DEBUG_BUFFER  ((unsigned int)(0x01))
95336 +#define QSNET_DEBUG_CONSOLE ((unsigned int)(0x02))
95337 +#define QSNET_DEBUG_BUF_CON ( QSNET_DEBUG_BUFFER | QSNET_DEBUG_CONSOLE )
95338 +
95339 +#ifdef __GNUC__
95340 +extern void qsnet_debugf      (unsigned int mode, const char *fmt, ...)
95341 +       __attribute__ ((format (printf,2,3)));
95342 +extern void kqsnet_debugf      (char *fmt, ...)
95343 +       __attribute__ ((format (printf,1,2)));
95344 +#else
95345 +extern void qsnet_debugf      (unsigned int mode, const char *fmt, ...);
95346 +extern void kqsnet_debugf     (char *fmt, ...);
95347 +#endif
95348 +extern void qsnet_vdebugf     (unsigned int mode, const char *prefix, const char *fmt,  va_list ap);
95349 +extern int  qsnet_debug_buffer(caddr_t ubuffer, int len);
95350 +extern int  qsnet_debug_dump  (void);
95351 +extern int  qsnet_debug_kmem  (void *handle);
95352 +
95353 +extern void qsnet_debug_buffer_on(void);
95354 +extern void qsnet_debug_buffer_clear(void);
95355 +extern void qsnet_debug_buffer_mark(char *str);
95356 +
95357 +#endif /* _QSNET_DEBUG_H */
95358 +
95359 +/*
95360 + * Local variables:
95361 + * c-file-style: "linux"
95362 + * End:
95363 + */
95364 diff -urN clean/include/qsnet/kcompat.h linux-2.6.9/include/qsnet/kcompat.h
95365 --- clean/include/qsnet/kcompat.h       1969-12-31 19:00:00.000000000 -0500
95366 +++ linux-2.6.9/include/qsnet/kcompat.h 2005-07-05 11:09:03.000000000 -0400
95367 @@ -0,0 +1,27 @@
95368 +/*
95369 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
95370 + *
95371 + *    For licensing information please see the supplied COPYING file
95372 + *
95373 + */
95374 +
95375 +#ifndef __QSNET_KCOMPAT_H
95376 +#define __QSNET_KCOMPAT_H
95377 +
95378 +#ident "$Id: kcompat.h,v 1.1.2.1 2005/07/05 15:09:03 mike Exp $"
95379 +/*      $Source: /cvs/master/quadrics/qsnet/kcompat.h,v $*/
95380 +
95381 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
95382 +
95383 +#define module_param(VAR,TYPE,PERM) MODULE_PARM(VAR,"i")
95384 +
95385 +#endif /*  KERNEL_VERSION(2,6,0) */
95386 +
95387 +#endif /* __QSNET_KCOMPAT_H */
95388 +
95389 +
95390 +
95391 +
95392 +
95393 +
95394 +
95395 diff -urN clean/include/qsnet/kernel.h linux-2.6.9/include/qsnet/kernel.h
95396 --- clean/include/qsnet/kernel.h        1969-12-31 19:00:00.000000000 -0500
95397 +++ linux-2.6.9/include/qsnet/kernel.h  2005-07-20 07:35:37.000000000 -0400
95398 @@ -0,0 +1,39 @@
95399 +/*
95400 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
95401 + *
95402 + *    For licensing information please see the supplied COPYING file
95403 + *
95404 + */
95405 +
95406 +#ifndef __QSNET_KERNEL_H
95407 +#define __QSNET_KERNEL_H
95408 +
95409 +#ident "$Id: kernel.h,v 1.8.18.1 2005/07/20 11:35:37 mike Exp $"
95410 +/*      $Source: /cvs/master/quadrics/qsnet/kernel.h,v $*/
95411 +
95412 +#include <qsnet/config.h>
95413 +#include <qsnet/types.h>
95414 +
95415 +#if defined(SOLARIS)
95416 +#include <qsnet/kernel_solaris.h>
95417 +#endif
95418 +
95419 +#if defined(DIGITAL_UNIX)
95420 +#include <qsnet/kernel_dunix.h>
95421 +#endif
95422 +
95423 +#if defined(LINUX)
95424 +#include <qsnet/kernel_linux.h>
95425 +#endif
95426 +
95427 +#include <qsnet/debug.h>
95428 +#include <qsnet/kcompat.h>
95429 +
95430 +#endif /* __QSNET_KERNEL_H */
95431 +
95432 +
95433 +
95434 +
95435 +
95436 +
95437 +
95438 diff -urN clean/include/qsnet/kernel_linux.h linux-2.6.9/include/qsnet/kernel_linux.h
95439 --- clean/include/qsnet/kernel_linux.h  1969-12-31 19:00:00.000000000 -0500
95440 +++ linux-2.6.9/include/qsnet/kernel_linux.h    2005-09-07 10:35:03.000000000 -0400
95441 @@ -0,0 +1,374 @@
95442 +/*
95443 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
95444 + *
95445 + *    For licensing information please see the supplied COPYING file
95446 + *
95447 + */
95448 +
95449 +#ifndef __QSNET_KERNEL_LINUX_H
95450 +#define __QSNET_KERNEL_LINUX_H
95451 +
95452 +#ident "$Id: kernel_linux.h,v 1.69.2.3 2005/09/07 14:35:03 mike Exp $"
95453 +/*      $Source: /cvs/master/quadrics/qsnet/kernel_linux.h,v $*/
95454 +
95455 +#include <linux/version.h>
95456 +#if defined(MODVERSIONS)
95457 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
95458 +#include <config/modversions.h>
95459 +#else
95460 +#include <linux/modversions.h>
95461 +#endif
95462 +#endif
95463 +
95464 +#include <linux/autoconf.h>
95465 +#include <qsnet/module.h>
95466 +
95467 +/* ASSERT(spin_is_locked(l)) would always fail on UP kernels */
95468 +#if defined(CONFIG_SMP)
95469 +#define SPINLOCK_HELD(l)       spin_is_locked(l)
95470 +#else
95471 +#define SPINLOCK_HELD(l)       (1) 
95472 +#endif
95473 +
95474 +#include <asm/io.h>
95475 +#include <asm/uaccess.h>
95476 +
95477 +#include <linux/types.h>
95478 +#include <linux/time.h>
95479 +
95480 +#include <linux/delay.h>
95481 +#include <linux/smp_lock.h>
95482 +#include <linux/spinlock.h>
95483 +#include <qsnet/module.h>
95484 +
95485 +#include <linux/highmem.h>
95486 +
95487 +#include <qsnet/mutex.h>
95488 +#include <qsnet/condvar.h>
95489 +#include <qsnet/crwlock.h>
95490 +
95491 +#if defined(LINUX_ALPHA)
95492 +#  include <asm/core_tsunami.h>        /* for TSUNAMI_MEM */
95493 +#endif
95494 +
95495 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
95496 +#      undef   MOD_INC_USE_COUNT
95497 +#      undef   MOD_DEC_USE_COUNT
95498 +#      define  MOD_INC_USE_COUNT
95499 +#      define  MOD_DEC_USE_COUNT
95500 +#endif
95501 +
95502 +#define MIN(a,b)       ((a) > (b) ? (b) : (a))
95503 +#define MAX(a,b)       ((a) > (b) ? (a) : (b))
95504 +
95505 +/* stray types */
95506 +typedef u64               u_longlong_t;
95507 +typedef unsigned long     uintptr_t;
95508 +typedef int               bool_t;
95509 +
95510 +typedef unsigned long     virtaddr_t;                          /* virtual address */
95511 +typedef unsigned long      ioaddr_t;                           /* io address */
95512 +typedef unsigned long      sdramaddr_t;                                /* elan sdram offset */
95513 +
95514 +/* 386 kernel can be compiled with PAE enabled to use a 44 bit physical address */
95515 +#if defined(CONFIG_X86_PAE)
95516 +typedef unsigned long long physaddr_t;
95517 +#else
95518 +typedef unsigned long     physaddr_t;
95519 +#endif
95520 +
95521 +/* ticks since reboot, and tick freq */
95522 +#define lbolt          jiffies 
95523 +#define hz             HZ
95524 +
95525 +/* System page size and friends */
95526 +#define PAGESIZE       PAGE_SIZE
95527 +#define PAGESHIFT      PAGE_SHIFT
95528 +#define PAGEOFFSET     (PAGE_SIZE - 1)
95529 +#define PAGEMASK       PAGE_MASK
95530 +
95531 +#define PAGE_ALIGNED(a)        (((a) & PAGE_MASK) == a)
95532 +
95533 +/* convert between bytes and pages */
95534 +#define btop(b)         ((unsigned long)(b) >> PAGE_SHIFT)     /* rnd down */ 
95535 +#define btopr(b)        btop(PAGE_ALIGN((unsigned long) b))    /* rnd up */
95536 +#define ptob(p)                ((unsigned long)(p) << PAGE_SHIFT)
95537 +
95538 +/* round up sz to the nearest multiple of blk */
95539 +#define roundup(sz,blk) ((blk) * ((sz) / (blk) + ((sz) % (blk) ? 1 : 0)))      
95540 +
95541 +/* send a signal to a process */
95542 +#define psignal(pr,sig)        send_sig(sig,pr,0)
95543 +
95544 +/* microsecond delay */
95545 +#define DELAY(us)      udelay(us)
95546 +
95547 +/* macro macros */
95548 +#define MACRO_BEGIN     do {
95549 +#define MACRO_END       } while (0)
95550 +
95551 +/* D-Unix compatable errno values */
95552 +#define ESUCCESS        0
95553 +#define EFAIL           255
95554 +
95555 +/* ASSERT(NO_LOCKS_HELD) will be a no-op */
95556 +#define NO_LOCKS_HELD  1
95557 +
95558 +/* misc */
95559 +typedef int            label_t;
95560 +#define on_fault(ljp)  ((ljp) == NULL)
95561 +#define _NOTE(X)
95562 +#define no_fault()     ((void) 0)
95563 +#define panicstr       0
95564 +
95565 +/* return from system call is -EXXX on linux */
95566 +#define set_errno(e)   (-(e))
95567 +
95568 +/* 
95569 + * BSD-style byte ops 
95570 + */
95571 +
95572 +#define bcmp(src1,src2,len)            memcmp(src1,src2,len)
95573 +#define bzero(dst,len)                 memset(dst,0,len)
95574 +#define bcopy(src,dst,len)             memcpy(dst,src,len)
95575 +
95576 +#define preemptable_start              do { long must_yield_at = lbolt + (hz/10);
95577 +#define preemptable_end                        } while (0)
95578 +#define preemptable_check()            do {\
95579 +                                            if ((lbolt - must_yield_at) > 0)\
95580 +                                           {\
95581 +                                               preemptable_yield() ; \
95582 +                                               must_yield_at = lbolt + (hz/10);\
95583 +                                           }\
95584 +                                       } while (0)
95585 +
95586 +#define preemptable_yield()            schedule()
95587 +
95588 +#define CURPROC()                       current
95589 +#define CURTHREAD()                     current
95590 +#define SUSER()                                suser()
95591 +
95592 +/* 64 bit IO operations on 32 bit intel cpus using MMX */
95593 +#if defined(LINUX_I386)
95594 +extern u64         qsnet_readq (volatile u64 *ptr);
95595 +extern void        qsnet_writeq (u64 value, volatile u64 *ptr);
95596 +
95597 +#define readq(ptr)             qsnet_readq((void *) ptr)
95598 +#define writeq(val,ptr)                qsnet_writeq(val, (void *)ptr)
95599 +#endif
95600 +
95601 +/*
95602 + * Memory barriers
95603 + */
95604 +#ifndef mmiob
95605 +#  define mmiob()                      mb()
95606 +#endif
95607 +
95608 +/* 
95609 + * Exit handlers
95610 + */
95611 +#define HANDLER_REGISTER(func,arg,flags)   xa_handler_register(func,arg,flags)
95612 +#define HANDLER_UNREGISTER(func,arg,flags) xa_handler_unregister(func,arg,flags)
95613 +
95614 +/* 
95615 + * KMEM_GETPAGES and KMEM_ALLOC both call kmem_alloc, which 
95616 + * translates the call to kmalloc if < PAGE_SIZE, or vmalloc 
95617 + * if >= PAGE_SIZE.  vmalloc will always return a page-aligned 
95618 + * region rounded up to the nearest page, while kmalloc will 
95619 + * return bits and pieces of a page.
95620 + */
95621 +
95622 +#ifdef KMEM_DEBUG
95623 +extern void          *qsnet_kmem_alloc_debug(int len, int sleep, int zerofill, char *file, int line);
95624 +extern void           qsnet_kmem_free_debug(void *ptr, int len, char *file, int line);
95625 +#define KMEM_ALLOC(ptr,type,len,sleep) \
95626 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,0,__FILE__,__LINE__); }
95627 +#define KMEM_ZALLOC(ptr,type,len,sleep) \
95628 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc_debug(len,sleep,1,__FILE__,__LINE__); }
95629 +
95630 +#define KMEM_FREE(ptr,len)               qsnet_kmem_free_debug((void *)ptr,len,__FILE__,__LINE__)
95631 +
95632 +#else
95633 +
95634 +extern void          *qsnet_kmem_alloc(int len, int sleep, int zerofill);
95635 +extern void           qsnet_kmem_free(void *ptr, int len);
95636 +
95637 +#define KMEM_ALLOC(ptr,type,len,sleep) \
95638 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,0); }
95639 +#define KMEM_ZALLOC(ptr,type,len,sleep) \
95640 +       { KMEM_ASSERT(sleep); (ptr)=(type)qsnet_kmem_alloc(len,sleep,1); }
95641 +
95642 +#define KMEM_FREE(ptr,len)               qsnet_kmem_free((void *)ptr,len)
95643 +
95644 +#endif
95645 +extern void       qsnet_kmem_display(void *handle);
95646 +extern physaddr_t kmem_to_phys(void *ptr);
95647 +
95648 +#define KMEM_ASSERT(sleep)              ASSERT(!(in_interrupt() && sleep))
95649 +
95650 +
95651 +#define KMEM_GETPAGES(ptr,type,pgs,sleep) KMEM_ZALLOC(ptr,type,ptob(pgs),sleep)
95652 +#define KMEM_FREEPAGES(ptr,pgs)          KMEM_FREE(ptr,ptob(pgs));
95653 +
95654 +/*
95655 + * Copying from user space -> kernel space (perms checked)
95656 + */
95657 +#define copyin(up,kp,size)             copy_from_user(kp,up,size)
95658 +#define copyin_noerr(up,kp,size)       copy_from_user(kp,up,size)
95659 +
95660 +/* get_user() gets xfer width right */
95661 +#define fulinux(ret, up)               (get_user(ret, (up)) == 0 ? ret : -1)
95662 +#define fulinuxp(ret, up)              (get_user(ret, (up)) == 0 ? ret : NULL)
95663 +
95664 +extern __inline__ int fubyte    (u8  *up) { u8  ret;   return fulinux(ret, up);}
95665 +extern __inline__ int fusword   (u16 *up) { u16 ret;   return fulinux(ret, up);}
95666 +extern __inline__ int fuword    (u32 *up) { u32 ret;   return fulinux(ret, up);}
95667 +#if BITS_PER_LONG > 32
95668 +extern __inline__ u64 fulonglong(u64 *up) { u64 ret;   return fulinux(ret, up);}
95669 +#else
95670 +extern __inline__ u64 fulonglong(u64 *up) { return ((u64) fuword((u32 *)up) | (((u64) fuword(((u32 *)up)+1))<<32)); }
95671 +#endif
95672 +extern __inline__ void *fuptr (void **up) { void *ret; return fulinuxp(ret,up);}
95673 +
95674 +#define fubyte_noerr(up)               fubyte(up)
95675 +#define fusword_noerr(up)              fusword(up)
95676 +#define fuword_noerr(up)               fuword(up)
95677 +#define fulonglong_noerr(up)           fulonglong(up)
95678 +#define fuptr_noerr(up)                        fuptr(up)
95679 +
95680 +extern __inline__ int copyinstr(char *up, char *kp, int max, int *size)
95681 +{ 
95682 +       for (*size = 1; *size <= max; (*size)++) {
95683 +               if (get_user(*kp, up++) != 0)
95684 +                       return EFAULT;  /* bad user space addr */
95685 +               if (*kp++ == '\0')
95686 +                       return 0;       /* success */
95687 +       }
95688 +       *size = max;
95689 +       return ENAMETOOLONG;            /* runaway string */
95690 +}
95691
95692 +/*
95693 + * Copying from kernel space -> user space (perms checked)
95694 + */
95695 +
95696 +#define copyout(kp,up,size)            copy_to_user(up,kp,size)
95697 +#define copyout_noerr(kp,up,size)      copy_to_user(up,kp,size)
95698 +
95699 +/* put_user() gets xfer width right */
95700 +#define sulinux(val, up)               (put_user(val, (up)) == 0 ? 0 : -1)
95701 +
95702 +extern __inline__ int subyte    (u8  *up, u8  val) { return sulinux(val, up); }
95703 +extern __inline__ int susword   (u16 *up, u16 val) { return sulinux(val, up); }
95704 +extern __inline__ int suword    (u32 *up, u32 val) { return sulinux(val, up); }
95705 +#if BITS_PER_LONG > 32
95706 +extern __inline__ int sulonglong(u64 *up, u64 val) { return sulinux(val, up); }
95707 +#else
95708 +extern __inline__ int sulonglong(u64 *up, u64 val) { return (suword((u32 *) up, (u32) val) == 0 ? 
95709 +                                                            suword(((u32 *) up)+1, (u32) (val >> 32)) : -1); }
95710 +#endif
95711 +extern __inline__ int suptr   (void **up,void *val){ return sulinux(val, up); }
95712 +
95713 +#define subyte_noerr(up,val)           subyte(up,val)  
95714 +#define susword_noerr(up,val)          susword(up,val) 
95715 +#define suword_noerr(up,val)           suword(up,val)  
95716 +#define sulonglong_noerr(up,val)       sulonglong(up,val)      
95717 +#define suptr_noerr(up,val)            suptr(up,val)   
95718 +
95719 +/*
95720 + * /proc/qsnet interface
95721 + */
95722 +extern inline int
95723 +str_append(char *buf, char *add, int size)
95724 +{
95725 +#define TRUNC_MSG       "[Output truncated]\n"
95726 +       int full = 0;
95727 +       int max = size - strlen(TRUNC_MSG) - strlen(add) - 1;
95728 +
95729 +       if (strlen(buf) > max) {
95730 +               strcat(buf, TRUNC_MSG);
95731 +               full = 1;
95732 +       } else
95733 +               strcat(buf, add);
95734 +       return full;
95735 +}
95736 +
95737 +/* Spinlocks */
95738 +#define spin_lock_destroy(l)           ((void) 0)
95739 +
95740 +/* Complex - Reader/Writer locks - we added <linux/crwlock.h> */
95741 +typedef crwlock_t                      krwlock_t;
95742 +#define krwlock_init(l)                        crwlock_init(l)
95743 +#define krwlock_destroy(l)             crwlock_destroy(l)
95744 +#define krwlock_write(l)               crwlock_write(l)
95745 +#define krwlock_read(l)                        crwlock_read(l)
95746 +#define krwlock_done(l)                        crwlock_done(l)
95747 +#define krwlock_is_locked(l)           crwlock_held(l)
95748 +#define krwlock_is_write_locked(l)     crwlock_write_held(l)
95749 +#define krwlock_is_read_locked(l)      crwlock_read_held(l)
95750 +
95751 +/*
95752 + * Timeouts - Solaris style.
95753 + */
95754 +typedef struct timer_list timer_fn_t;
95755 +
95756 +extern inline void
95757 +schedule_timer_fn(timer_fn_t *timer, void (*fun)(void *), void *arg, long hz_delay)
95758 +{
95759 +       init_timer(timer);
95760 +
95761 +       timer->function = (void (*)(unsigned long)) fun;
95762 +       timer->data     = (unsigned long) arg;
95763 +       timer->expires  = jiffies + hz_delay;
95764 +
95765 +       add_timer(timer);
95766 +}
95767 +
95768 +/* returns 1 if timer_fn was cancelled */
95769 +extern inline int
95770 +cancel_timer_fn(timer_fn_t *timer)
95771 +{
95772 +    return (del_timer_sync(timer));
95773 +}
95774 +
95775 +extern inline int
95776 +timer_fn_queued(timer_fn_t *timer)
95777 +{
95778 +    return (timer_pending (timer));
95779 +}
95780 +/*
95781 + * Hold/release CPU's.
95782 + */
95783 +
95784 +extern void    cpu_hold_all(void);
95785 +extern void    cpu_release_all(void);
95786 +#define CAPTURE_CPUS()         cpu_hold_all()
95787 +#define RELEASE_CPUS()         cpu_release_all()
95788 +
95789 +#define IASSERT ASSERT
95790 +
95791 +/* code to support multipage procfs entries */
95792 +
95793 +typedef struct display_info {
95794 +    void (*func)(long, char *, ...);
95795 +    long arg;
95796 +} DisplayInfo;
95797 +
95798 +typedef struct qsnet_proc_private 
95799 +{
95800 +       struct nodeset_private *pr_next;
95801 +       void                   *pr_user_data;
95802 +       char                   *pr_data;
95803 +       int                     pr_data_len;
95804 +       unsigned                pr_off;
95805 +       unsigned                pr_len;
95806 +       DisplayInfo             pr_di;
95807 +} QSNET_PROC_PRIVATE;
95808 +
95809 +#endif /* __QSNET_KERNEL_LINUX_H */
95810 +
95811 +/*
95812 + * Local variables:
95813 + * c-file-style: "linux"
95814 + * End:
95815 + */
95816 diff -urN clean/include/qsnet/kpte.h linux-2.6.9/include/qsnet/kpte.h
95817 --- clean/include/qsnet/kpte.h  1969-12-31 19:00:00.000000000 -0500
95818 +++ linux-2.6.9/include/qsnet/kpte.h    2005-03-18 08:56:40.000000000 -0500
95819 @@ -0,0 +1,132 @@
95820 +/*
95821 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
95822 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
95823 + *
95824 + *    For licensing information please see the supplied COPYING file
95825 + *
95826 + */
95827 +
95828 +#ifndef __QSNET_KPTE_H
95829 +#define __QSNET_KPTE_H
95830 +
95831 +#ident "@(#)$Id: kpte.h,v 1.5 2005/03/18 13:56:40 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
95832 +/*      $Source: /cvs/master/quadrics/qsnet/kpte.h,v $*/
95833 +
95834 +#include <qsnet/autoconf.h>
95835 +
95836 +#ifdef NO_RMAP
95837 +#      define pte_offset_kernel pte_offset
95838 +#      define pte_offset_map    pte_offset
95839 +#       define pte_unmap(A)      do { ; } while (0)
95840 +#endif
95841 +
95842 +/* 
95843 + * Pte stuff
95844 + */
95845 +static __inline__ struct mm_struct *
95846 +get_kern_mm(void)
95847 +{
95848 +        return &init_mm;
95849 +}
95850 +
95851 +static __inline__ pte_t *
95852 +find_pte_map(struct mm_struct *mm, unsigned long vaddr)
95853 +{
95854 +        pgd_t *pgd;
95855 +        pmd_t *pmd;
95856 +       pte_t *ptep;
95857 +
95858 +/* XXXX - need to handle huge tlb code */
95859 +       pgd = pgd_offset(mm, vaddr);
95860 +       if (pgd_none(*pgd) || pgd_bad(*pgd))
95861 +               goto out;
95862 +       
95863 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
95864 +       {
95865 +           pud_t *pud = pud_offset(pgd, vaddr);
95866 +           if (pud_none(*pud) || pud_bad(*pud))
95867 +               goto out;
95868 +
95869 +           pmd = pmd_offset(pud, vaddr);
95870 +       }
95871 +#else
95872 +       pmd = pmd_offset(pgd, vaddr);
95873 +#endif
95874 +       if (pmd_none(*pmd) || pmd_bad (*pmd))
95875 +               goto out;
95876 +
95877 +       ptep = pte_offset_map (pmd, vaddr);
95878 +       if (! ptep)
95879 +               goto out;
95880 +       
95881 +       if (pte_present (*ptep))
95882 +               return ptep;
95883 +
95884 +       pte_unmap (ptep);
95885 +out:
95886 +       return NULL;
95887 +}
95888 +
95889 +static __inline__ pte_t *
95890 +find_pte_kernel(unsigned long vaddr)
95891 +{
95892 +        pgd_t *pgd;
95893 +        pmd_t *pmd;
95894 +       pte_t *pte;
95895 +
95896 +       pgd = pgd_offset_k(vaddr);
95897 +       if (pgd && !pgd_none(*pgd)) {
95898 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
95899 +           pud_t *pud = pud_offset(pgd, vaddr);
95900 +           if (pud && !pud_none(*pud)) {
95901 +               pmd = pmd_offset(pud, vaddr);
95902 +#else
95903 +               pmd = pmd_offset(pgd, vaddr);
95904 +#endif
95905 +               if (pmd && pmd_present(*pmd)) {
95906 +                       pte = pte_offset_kernel(pmd, vaddr);
95907 +                       if (pte && pte_present(*pte))
95908 +                           return (pte);
95909 +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
95910 +               }
95911 +#endif
95912 +           }
95913 +       }
95914 +       return (NULL);
95915 +}
95916 +
95917 +static __inline__ physaddr_t
95918 +pte_phys(pte_t pte)
95919 +{
95920 +#if defined(LINUX_ALPHA)
95921 +       /* RedHat 7.1 2.4.3-12 
95922 +        * They have now enabled Monster windows on Tsunami
95923 +        * and so can use the Main's phys pte value 
95924 +        */
95925 +       return (pte_val(pte) >> (32-PAGE_SHIFT));
95926 +#elif defined(LINUX_I386) || defined(LINUX_X86_64)
95927 +#if defined(_PAGE_NX)
95928 +       return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1) & ~_PAGE_NX);
95929 +#else
95930 +       return (pte_val(pte) & ~((1 << PAGE_SHIFT)-1));
95931 +#endif
95932 +#elif defined(LINUX_SPARC)
95933 +       return (pte_val(pte) & _PAGE_PADDR);
95934 +#elif defined(LINUX_IA64)
95935 +       return (pte_val(pte) & _PFN_MASK);
95936 +#else
95937 +#error Unknown architecture
95938 +#endif
95939 +}
95940 +
95941 +#ifndef page_to_pfn
95942 +#define page_to_pfn(page)      (pte_phys(mk_pte(page, __pgprot(0))) >> PAGE_SHIFT)
95943 +#endif
95944 +
95945 +#endif /* __QSNET_KPTE_H */
95946 +
95947 +/*
95948 + * Local variables:
95949 + * c-file-style: "stroustrup"
95950 + * End:
95951 + */
95952 diff -urN clean/include/qsnet/kthread.h linux-2.6.9/include/qsnet/kthread.h
95953 --- clean/include/qsnet/kthread.h       1969-12-31 19:00:00.000000000 -0500
95954 +++ linux-2.6.9/include/qsnet/kthread.h 2004-10-28 07:50:29.000000000 -0400
95955 @@ -0,0 +1,71 @@
95956 +/*
95957 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
95958 + *    Copyright (c) 2002-2004 by Quadrics Ltd.
95959 + *
95960 + *    For licensing information please see the supplied COPYING file
95961 + *
95962 + */
95963 +
95964 +#ifndef __QSNET_KTHREAD_H
95965 +#define __QSNET_KTHREAD_H
95966 +
95967 +#ident "@(#)$Id: kthread.h,v 1.1 2004/10/28 11:50:29 david Exp $ $Name: QSNETMODULES-5-11-3_20050907 $"
95968 +/*      $Source: /cvs/master/quadrics/qsnet/kthread.h,v $*/
95969 +
95970 +#include <qsnet/autoconf.h>
95971 +
95972 +/* 
95973 + * kernel threads 
95974 + */
95975 +extern __inline__ void
95976 +kernel_thread_init(char *comm)
95977 +{
95978 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
95979 +#ifndef NO_NPTL
95980 +#      define sigmask_lock                     sighand->siglock
95981 +#endif
95982 +       lock_kernel();
95983 +       daemonize();
95984 +        reparent_to_init();
95985 +
95986 +        /* avoid getting signals */
95987 +        spin_lock_irq(&current->sigmask_lock);
95988 +        flush_signals(current);
95989 +        sigfillset(&current->blocked);
95990 +       
95991 +#ifdef NO_NPTL
95992 +        recalc_sigpending(current);
95993 +#else
95994 +        recalc_sigpending();
95995 +#endif
95996 +
95997 +        spin_unlock_irq(&current->sigmask_lock);
95998 +
95999 +       /* set our name for identification purposes */
96000 +       strncpy(current->comm, comm, sizeof(current->comm));
96001 +
96002 +       unlock_kernel();
96003 +#else
96004 +       daemonize(comm);
96005 +#endif
96006 +}
96007 +
96008 +extern __inline__ void *
96009 +kernel_thread_wrap(caddr_t stk, int stksize, void (*proc)(void *), void *arg)
96010 +{
96011 +        ASSERT(stk == NULL && stksize == 0);
96012 +        kernel_thread((int (*)(void *))proc, arg, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
96013 +        return (void *)1; /* non-null value */
96014 +}
96015 +
96016 +#define kernel_thread_create(proc,arg)  kernel_thread_wrap(NULL,0,(void (*)(void *))proc,arg)
96017 +#define kernel_thread_exit()           ((void) 0)
96018 +#define kernel_thread_become_highpri() ((void) 0)
96019 +
96020 +#endif /* __QSNET_KTHREAD_H */
96021 +
96022 +/*
96023 + * Local variables:
96024 + * c-file-style: "linux"
96025 + * End:
96026 + */
96027 diff -urN clean/include/qsnet/list.h linux-2.6.9/include/qsnet/list.h
96028 --- clean/include/qsnet/list.h  1969-12-31 19:00:00.000000000 -0500
96029 +++ linux-2.6.9/include/qsnet/list.h    2003-10-27 08:55:33.000000000 -0500
96030 @@ -0,0 +1,80 @@
96031 +/*
96032 + *    Copyright (c) 2003 by Quadrics Limited.
96033 + * 
96034 + *    For licensing information please see the supplied COPYING file
96035 + *
96036 + */
96037 +
96038 +#ident "@(#)$Id: list.h,v 1.5 2003/10/27 13:55:33 david Exp $"
96039 +/*      $Source: /cvs/master/quadrics/qsnet/list.h,v $*/
96040 +
96041 +#ifndef __QSNET_LIST_H
96042 +#define __QSNET_LIST_H
96043 +
96044 +/* Implementation of doubly linked lists - compatible with linux */
96045 +struct list_head 
96046 +{
96047 +    struct list_head *next;
96048 +    struct list_head *prev;
96049 +};
96050 +
96051 +#if !defined(LINUX)
96052 +#if ! defined( offsetof ) 
96053 +#define offsetof(T,F) ((int )&(((T *)0)->F))
96054 +#endif
96055 +
96056 +#define LIST_HEAD_INIT(name) { &(name), &(name) }
96057 +
96058 +#define LIST_HEAD(name) \
96059 +       struct list_head name = LIST_HEAD_INIT(name)
96060 +#endif
96061 +
96062 +#define list_entry(ptr, type, off) \
96063 +       ((type *) ((unsigned long)(ptr) - offsetof (type,off)))
96064 +
96065 +#define INIT_LIST_HEAD(list) \
96066 +MACRO_BEGIN \
96067 +       (list)->next = (list)->prev = (list); \
96068 +MACRO_END
96069 +
96070 +#define list_add(new, list) \
96071 +MACRO_BEGIN \
96072 +       (list)->next->prev = (new); \
96073 +       (new)->next = (list)->next; \
96074 +       (new)->prev = (list); \
96075 +       (list)->next = (new); \
96076 +MACRO_END
96077 +
96078 +#define list_add_tail(new, list) \
96079 +MACRO_BEGIN \
96080 +       (list)->prev->next = new; \
96081 +       (new)->prev = (list)->prev; \
96082 +       (new)->next = (list); \
96083 +       (list)->prev = (new); \
96084 +MACRO_END
96085 +
96086 +#define list_del(entry) \
96087 +MACRO_BEGIN \
96088 +       (entry)->prev->next = (entry)->next; \
96089 +       (entry)->next->prev = (entry)->prev; \
96090 +MACRO_END
96091 +
96092 +#define list_del_init(entry) \
96093 +MACRO_BEGIN \
96094 +       (entry)->prev->next = (entry)->next; \
96095 +       (entry)->next->prev = (entry)->prev; \
96096 +       (entry)->next = (entry)->prev = (entry); \
96097 +MACRO_END
96098 +
96099 +#define list_empty(list) \
96100 +       ((list)->next == (list))
96101 +
96102 +#define list_for_each(pos,list) \
96103 +       for (pos = (list)->next; pos != (list); \
96104 +            pos = (pos)->next)
96105 +
96106 +#define list_for_each_safe(pos,n,list) \
96107 +       for (pos = (list)->next, n = (pos)->next; pos != (list); \
96108 +            pos = n, n = (pos)->next)
96109 +
96110 +#endif /* __QSNET_LIST_H */
96111 diff -urN clean/include/qsnet/module.h linux-2.6.9/include/qsnet/module.h
96112 --- clean/include/qsnet/module.h        1969-12-31 19:00:00.000000000 -0500
96113 +++ linux-2.6.9/include/qsnet/module.h  2005-09-07 10:35:04.000000000 -0400
96114 @@ -0,0 +1,27 @@
96115 +/*
96116 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
96117 + *
96118 + *    For licensing information please see the supplied COPYING file
96119 + *
96120 + */
96121 +
96122 +#ifndef __QSNET_MODULE_H
96123 +#define __QSNET_MODULE_H
96124 +
96125 +#ident "$Id: module.h,v 1.1.2.1 2005/09/07 14:35:04 mike Exp $"
96126 +/*      $Source: /cvs/master/quadrics/qsnet/module.h,v $*/
96127 +
96128 +#include <linux/module.h>
96129 +
96130 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
96131 +#include <linux/moduleparam.h>
96132 +#endif
96133 +
96134 +#endif /* __QSNET_MODULE_H */
96135 +
96136 +
96137 +
96138 +
96139 +
96140 +
96141 +
96142 diff -urN clean/include/qsnet/mutex.h linux-2.6.9/include/qsnet/mutex.h
96143 --- clean/include/qsnet/mutex.h 1969-12-31 19:00:00.000000000 -0500
96144 +++ linux-2.6.9/include/qsnet/mutex.h   2003-06-26 12:05:45.000000000 -0400
96145 @@ -0,0 +1,91 @@
96146 +/*
96147 + *    Copyright (C) 2000  Regents of the University of California
96148 + *
96149 + *    This program is free software; you can redistribute it and/or modify
96150 + *    it under the terms of the GNU General Public License as published by
96151 + *    the Free Software Foundation; either version 2 of the License, or
96152 + *    (at your option) any later version.
96153 + *
96154 + *    This program is distributed in the hope that it will be useful,
96155 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
96156 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
96157 + *    GNU General Public License for more details.
96158 + *
96159 + *    You should have received a copy of the GNU General Public License
96160 + *    along with this program; if not, write to the Free Software
96161 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
96162 + *
96163 + */
96164 +
96165 +#if    !defined(_LINUX_MUTEX_H)
96166 +#define        _LINUX_MUTEX_H
96167 +#if    defined(__KERNEL__)
96168 +
96169 +#include <asm/smp.h>
96170 +#include <linux/spinlock.h>
96171 +#include <asm/semaphore.h>
96172 +#include <qsnet/debug.h>
96173 +#include <linux/interrupt.h>
96174 +#include <linux/version.h>
96175 +
96176 +#define PID_NONE       0
96177 +
96178 +typedef struct
96179 +{
96180 +    struct semaphore sem;
96181 +    pid_t           holder;
96182 +} kmutex_t;
96183 +
96184 +extern __inline__ void
96185 +kmutex_init (kmutex_t *l)
96186 +{
96187 +#if    LINUX_VERSION_CODE < KERNEL_VERSION(2,3,0)
96188 +    l->sem = MUTEX;
96189 +#else
96190 +    init_MUTEX(&l->sem);
96191 +#endif
96192 +    l->holder = PID_NONE;
96193 +}
96194 +
96195 +extern __inline__ void
96196 +kmutex_destroy (kmutex_t *l) 
96197 +{
96198 +    ASSERT (l->holder == PID_NONE); 
96199 +}
96200 +
96201 +extern __inline__ void
96202 +kmutex_lock (kmutex_t *l) 
96203 +{ 
96204 +    ASSERT(l->holder != current->pid);
96205 +    down (&l->sem);
96206 +    l->holder = current->pid; 
96207 +}
96208 +
96209 +extern __inline__ void
96210 +kmutex_unlock (kmutex_t *l) 
96211 +{
96212 +    ASSERT(l->holder == current->pid);
96213 +
96214 +    l->holder = PID_NONE;
96215 +    up (&l->sem);
96216 +}
96217 +
96218 +extern __inline__ int
96219 +kmutex_trylock (kmutex_t *l) 
96220 +{
96221 +    if (down_trylock (&l->sem) == 0) 
96222 +    {
96223 +       l->holder = current->pid;
96224 +       return (1);
96225 +    }
96226 +    return (0);
96227 +}
96228 +
96229 +extern __inline__ int
96230 +kmutex_is_locked (kmutex_t *l) 
96231 +{
96232 +    return (l->holder == current->pid);
96233 +}
96234 +
96235 +#endif /* __KERNEL__ */
96236 +#endif /* _LINUX_MUTEX_H */
96237 diff -urN clean/include/qsnet/procfs_linux.h linux-2.6.9/include/qsnet/procfs_linux.h
96238 --- clean/include/qsnet/procfs_linux.h  1969-12-31 19:00:00.000000000 -0500
96239 +++ linux-2.6.9/include/qsnet/procfs_linux.h    2005-07-20 07:35:37.000000000 -0400
96240 @@ -0,0 +1,263 @@
96241 +/*
96242 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
96243 + *
96244 + *    For licensing information please see the supplied COPYING file
96245 + *
96246 + */
96247 +
96248 +#ifndef __PROCFS_LINUX_H
96249 +#define __PROCFS_LINUX_H
96250 +
96251 +#ident "$Id: procfs_linux.h,v 1.13.2.2 2005/07/20 11:35:37 mike Exp $"
96252 +/*      $Source: /cvs/master/quadrics/qsnet/procfs_linux.h,v $ */
96253 +
96254 +#if defined(__KERNEL__)
96255 +
96256 +#include <qsnet/autoconf.h>
96257 +#include <qsnet/kernel_linux.h>
96258 +#include <linux/proc_fs.h>
96259 +
96260 +extern gid_t qsnet_procfs_gid;
96261 +
96262 +/* borrowed from fs/proc/proc_misc - helper for proc_read_int */
96263 +static inline int 
96264 +qsnet_proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof, int len)
96265 +{
96266 +       if (len <= off+count) *eof = 1;
96267 +       *start = page + off;
96268 +       len -= off;
96269 +       if (len>count) len = count;
96270 +       if (len<0) len = 0;
96271 +       return len;
96272 +}
96273 +
96274 +static inline int
96275 +qsnet_proc_write_int(struct file *file, const char *buf, unsigned long count, void *data)
96276 +{
96277 +       char tmpbuf[16];
96278 +       int  res = count;
96279 +       
96280 +       if (count > sizeof(tmpbuf) - 1)
96281 +               return (-EINVAL);
96282 +       
96283 +       MOD_INC_USE_COUNT;
96284 +       if (copy_from_user(tmpbuf, buf, count))
96285 +               res = -EFAULT;
96286 +       else
96287 +       {
96288 +               tmpbuf[count] = '\0'; 
96289 +               *(int *)data = simple_strtoul(tmpbuf, NULL, 0);
96290 +       }
96291 +       MOD_DEC_USE_COUNT;
96292 +       
96293 +       return (res);
96294 +}
96295 +
96296 +static inline int
96297 +qsnet_proc_read_int(char *page, char **start, off_t off, int count, int *eof, void *data)
96298 +{
96299 +       int len, res;
96300 +       
96301 +       MOD_INC_USE_COUNT;
96302 +       
96303 +       len = sprintf(page, "%d\n", *(int *)data);
96304 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
96305 +       
96306 +       MOD_DEC_USE_COUNT;
96307 +       return (res);
96308 +}
96309 +
96310 +static inline struct proc_dir_entry *
96311 +qsnet_proc_register_int(struct proc_dir_entry *dir, char *path, int *var, int read_only)
96312 +{
96313 +       struct proc_dir_entry *p;
96314 +       
96315 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
96316 +       if (p) {
96317 +               if (! read_only) 
96318 +                       p->write_proc = qsnet_proc_write_int;
96319 +               p->read_proc  = qsnet_proc_read_int;
96320 +               p->data       = var;
96321 +               p->owner      = THIS_MODULE;
96322 +               p->gid        = qsnet_procfs_gid;
96323 +       }
96324 +       return p;
96325 +}
96326 +
96327 +static inline int
96328 +qsnet_proc_write_hex(struct file *file, const char *buf, unsigned long count, void *data)
96329 +{
96330 +       char tmpbuf[16];
96331 +       int  res = count;
96332 +       
96333 +       if (count > sizeof(tmpbuf) - 1)
96334 +               return (-EINVAL);
96335 +       
96336 +       MOD_INC_USE_COUNT;
96337 +       if (copy_from_user(tmpbuf, buf, count))
96338 +               res = -EFAULT;
96339 +       else
96340 +       {
96341 +               tmpbuf[count] = '\0'; 
96342 +               *(int *)data = simple_strtoul(tmpbuf, NULL, 0);
96343 +       }
96344 +       MOD_DEC_USE_COUNT;
96345 +       
96346 +       return (res);
96347 +}
96348 +
96349 +static inline int
96350 +qsnet_proc_read_hex(char *page, char **start, off_t off, int count, int *eof, void *data)
96351 +{
96352 +       int len, res;
96353 +       
96354 +       MOD_INC_USE_COUNT;
96355 +       
96356 +       len = sprintf(page, "0x%x\n", *(int *)data);
96357 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
96358 +       
96359 +       MOD_DEC_USE_COUNT;
96360 +       return (res);
96361 +}
96362 +
96363 +static inline struct proc_dir_entry *
96364 +qsnet_proc_register_hex(struct proc_dir_entry *dir, char *path, int *var, int read_only)
96365 +{
96366 +       struct proc_dir_entry *p;
96367 +       
96368 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
96369 +       if (p) {
96370 +               if (! read_only) 
96371 +                       p->write_proc = qsnet_proc_write_hex;
96372 +               p->read_proc  = qsnet_proc_read_hex;
96373 +               p->data       = var;
96374 +               p->owner      = THIS_MODULE;
96375 +               p->gid        = qsnet_procfs_gid;
96376 +       }
96377 +       return p;
96378 +}
96379 +
96380 +#define QSNET_PROC_STR_LEN_MAX ((int)256)
96381 +
96382 +static inline int
96383 +qsnet_proc_write_str(struct file *file, const char *buf, unsigned long count, void *data)
96384 +{
96385 +       int  res = count;
96386 +       
96387 +       if (count > (QSNET_PROC_STR_LEN_MAX - 1))
96388 +               return (-EINVAL);
96389 +       
96390 +       MOD_INC_USE_COUNT;
96391 +       if (copy_from_user((char *)data, buf, count))
96392 +               res = -EFAULT;
96393 +       else
96394 +       {
96395 +               ((char *)data)[count] = '\0'; 
96396 +               /* remove linefeed */
96397 +               if ( (count) && (((char *)data)[count -1] == '\n'))
96398 +                       ((char *)data)[count -1] = '\0';
96399 +       }
96400 +       MOD_DEC_USE_COUNT;
96401 +       
96402 +       return (res);
96403 +}
96404 +
96405 +static inline int
96406 +qsnet_proc_read_str(char *page, char **start, off_t off, int count, int *eof, void *data)
96407 +{
96408 +       int len, res;
96409 +       
96410 +       if ( strlen(data) > (count + 1))
96411 +               return (-EINVAL);       
96412 +
96413 +       MOD_INC_USE_COUNT;
96414 +       
96415 +       /* cant output too much */
96416 +       if ( strlen(data) > (count + 1))
96417 +       {
96418 +               MOD_DEC_USE_COUNT;
96419 +               return (-EINVAL);       
96420 +       }
96421 +
96422 +
96423 +       len = sprintf(page, "%s\n", (char *)data);
96424 +       if (len > count)
96425 +       {
96426 +               MOD_DEC_USE_COUNT;
96427 +               return (-EINVAL);       
96428 +       }
96429 +
96430 +       res = qsnet_proc_calc_metrics(page, start, off, count, eof, len);
96431 +       
96432 +       MOD_DEC_USE_COUNT;
96433 +       return (res);
96434 +}
96435 +
96436 +static inline struct proc_dir_entry *
96437 +qsnet_proc_register_str(struct proc_dir_entry *dir, char *path, char *var, int read_only)
96438 +{
96439 +       struct proc_dir_entry *p;
96440 +       
96441 +       p = create_proc_entry(path, read_only ? S_IRUGO : S_IRUGO|S_IWUSR|S_IWGRP, dir);
96442 +       if (p) {
96443 +               if (! read_only) 
96444 +                       p->write_proc = qsnet_proc_write_str;
96445 +               p->read_proc  = qsnet_proc_read_str;
96446 +               p->data       = var;
96447 +               p->owner      = THIS_MODULE;
96448 +               p->gid        = qsnet_procfs_gid;
96449 +       }
96450 +       return p;
96451 +}
96452 +
96453 +extern struct proc_dir_entry *qsnet_procfs_root; 
96454 +extern struct proc_dir_entry *qsnet_procfs_config;
96455 +
96456 +/* code for procfs handling multipage requests */
96457 +
96458 +void qsnet_proc_character_fill (long mode, char *fmt, ...);
96459 +int  qsnet_proc_release (struct inode *inode, struct file *file);
96460 +
96461 +static inline ssize_t
96462 +qsnet_proc_read (struct file *file, char *buf, size_t count, loff_t *ppos)
96463 +{
96464 +    QSNET_PROC_PRIVATE *pr  = (QSNET_PROC_PRIVATE *) file->private_data;
96465 +    int                 error;
96466 +
96467 +    if (pr->pr_off >= pr->pr_len)
96468 +       return (0);
96469 +
96470 +    if ((error = verify_area (VERIFY_WRITE, buf, count)) != 0)
96471 +       return (error);
96472 +
96473 +    if (count >= (pr->pr_len - pr->pr_off))
96474 +       count = pr->pr_len - pr->pr_off;
96475 +
96476 +    copy_to_user (buf, pr->pr_data + pr->pr_off, count);
96477 +
96478 +    pr->pr_off += count;
96479 +    *ppos      += count;
96480 +
96481 +    return (count);
96482 +}
96483 +
96484 +
96485 +#ifdef NO_PDE
96486 +static inline struct proc_dir_entry *PDE(const struct inode *inode)
96487 +{
96488 +    return inode->u.generic_ip;
96489 +}
96490 +#endif
96491 +#endif /* __KERNEL__ */
96492 +
96493 +#define QSNET_PROCFS_IOCTL      "/proc/qsnet/ioctl"
96494 +#define QSNET_PROCFS_KMEM_DEBUG "/proc/qsnet/kmem_debug"
96495 +#define QSNET_PROCFS_VERSION    "/proc/qsnet/version"
96496 +
96497 +#endif /* __PROCFS_LINUX_H */
96498 +
96499 +/*
96500 + * Local variables:
96501 + * c-file-style: "linux"
96502 + * End:
96503 + */
96504 diff -urN clean/include/qsnet/types.h linux-2.6.9/include/qsnet/types.h
96505 --- clean/include/qsnet/types.h 1969-12-31 19:00:00.000000000 -0500
96506 +++ linux-2.6.9/include/qsnet/types.h   2003-08-01 12:21:38.000000000 -0400
96507 @@ -0,0 +1,90 @@
96508 +/*
96509 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
96510 + *
96511 + *    For licensing information please see the supplied COPYING file
96512 + *
96513 + */
96514 +
96515 +#ifndef __QSNET_TYPES_H
96516 +#define __QSNET_TYPES_H
96517 +
96518 +#ident "$Id: types.h,v 1.16 2003/08/01 16:21:38 addy Exp $"
96519 +/*      $Source: /cvs/master/quadrics/qsnet/types.h,v $*/
96520 +
96521 +/*
96522 + * Include typedefs for ISO/IEC 9899:1990 standard types
96523 + *
96524 + *
96525 + *    The following integer typedefs are used:
96526 + *
96527 + *     int8_t, int16_t, int32_t, int64_t, intptr_t
96528 + *     uint8_t, uint16_t, uint32_t, uint64_t, uintptr_t
96529 + *     uchar_t, ushort_t, uint_t, ulong_t
96530 + *
96531 + *    <sys/types.h> also defines the following:
96532 + *     u_char, u_short, u_int, u_long, caddr_t
96533 + */
96534 +
96535 +#include <qsnet/config.h>
96536 +
96537 +#if defined(SOLARIS) && defined(__KERNEL__)
96538 +#  include <sys/inttypes.h>
96539 +#endif
96540 +
96541 +#if defined(SOLARIS) && !defined(__KERNEL__)
96542 +#  include <inttypes.h>
96543 +#  include <sys/types.h>
96544 +#endif
96545 +
96546 +#if defined(DIGITAL_UNIX) && defined(__KERNEL__)
96547 +#  include <sys/bitypes.h>
96548 +#endif
96549 +
96550 +#if defined(DIGITAL_UNIX) && !defined(__KERNEL__)
96551 +#  include <inttypes.h>
96552 +#  include <sys/types.h>
96553 +#endif
96554 +
96555 +#if defined(LINUX) && defined(__KERNEL__)
96556 +#  include <linux/types.h>
96557 +#endif
96558 +
96559 +#if defined(LINUX) && !defined(__KERNEL__)
96560 +#  include <stdint.h>
96561 +#  include <inttypes.h>
96562 +#  include <sys/types.h>
96563 +
96564 +typedef unsigned char  uchar_t;
96565 +typedef unsigned short ushort_t;
96566 +typedef unsigned int   uint_t;
96567 +typedef unsigned long  ulong_t;
96568 +#endif
96569 +
96570 +#if defined(QNX)
96571 +#  include <inttypes.h>
96572 +#  include <sys/types.h>
96573 +#endif
96574 +
96575 +/* Define a type that will represent a Main CPU pointer
96576 + * on both the Main and the Elan
96577 + */
96578 +#ifdef __ELAN__
96579 +
96580 +#if defined(_MAIN_LP64)
96581 +#define QSNET_MAIN_PTR uint64_t
96582 +#else
96583 +#define QSNET_MAIN_PTR uint32_t
96584 +#endif
96585 +
96586 +#else
96587 +
96588 +#ifdef _LP64
96589 +#define QSNET_MAIN_PTR uint64_t
96590 +#else
96591 +#define QSNET_MAIN_PTR uint32_t
96592 +#endif
96593 +
96594 +#endif
96595 +
96596 +
96597 +#endif /* __QSNET_TYPES_H */
96598 diff -urN clean/include/qsnet/workarounds.h linux-2.6.9/include/qsnet/workarounds.h
96599 --- clean/include/qsnet/workarounds.h   1969-12-31 19:00:00.000000000 -0500
96600 +++ linux-2.6.9/include/qsnet/workarounds.h     2002-08-09 07:15:55.000000000 -0400
96601 @@ -0,0 +1,24 @@
96602 +/*
96603 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
96604 + *
96605 + *    For licensing information please see the supplied COPYING file
96606 + *
96607 + */
96608 +
96609 +#ifndef _QSNET_WORKAROUNDS_H
96610 +#define _QSNET_WORKAROUNDS_H
96611 +
96612 +#ident "$Id: workarounds.h,v 1.11 2002/08/09 11:15:55 addy Exp $"
96613 +/*      $Source: /cvs/master/quadrics/qsnet/workarounds.h,v $ */
96614 +
96615 +/* Elan workarounds */
96616 +#undef  ELAN_REVA_SUPPORTED    /* rev a elans no longer supported. */
96617 +#undef  ELITE_REVA_SUPPORTED   /* removed since RMS disables broadcast on rev A elites. */
96618 +#define ELAN_REVB_BUG_1
96619 +/* WORKAROUND for GNAT hw-elan3/3263 */
96620 +#define ELAN_REVB_BUG_2
96621 +
96622 +/* WORKAROUND for GNATs ic-elan3/3637 & ic-elan3/3550 */
96623 +#define ELAN_REVB_BUG_3
96624 +
96625 +#endif /* _QSNET_WORKAROUNDS_H */
96626 diff -urN clean/include/rms/rmscall.h linux-2.6.9/include/rms/rmscall.h
96627 --- clean/include/rms/rmscall.h 1969-12-31 19:00:00.000000000 -0500
96628 +++ linux-2.6.9/include/rms/rmscall.h   2005-07-28 06:49:09.000000000 -0400
96629 @@ -0,0 +1,149 @@
96630 +/*
96631 + * Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
96632 + *
96633 + * For licensing information please see the supplied COPYING file
96634 + *
96635 + * rmscall.h:  user interface to rms kernel module
96636 + *
96637 + * $Id: rmscall.h,v 1.27.2.1 2005/07/28 10:49:09 robin Exp $
96638 + * $Source: /cvs/master/quadrics/rmsmod/rmscall.h,v $
96639 + *
96640 + */
96641 +
96642 +#ifndef RMSCALL_H_INCLUDED
96643 +#define RMSCALL_H_INCLUDED 1
96644 +
96645 +#ident "$Id: rmscall.h,v 1.27.2.1 2005/07/28 10:49:09 robin Exp $"
96646 +
96647 +#ifdef __cplusplus
96648 +extern "C" {
96649 +#endif
96650 +
96651 +/*
96652 + * flags for rms_fork_register
96653 + *
96654 + * RMS_IOF is not in a public header file 
96655 + */
96656 +#define RMS_IOF 1              /* inherit on fork */
96657 +
96658 +#ifndef __KERNEL__
96659 +#include <sys/types.h>
96660 +#endif
96661 +
96662 +#include <qsnet/types.h>
96663 +#include <elan/capability.h>
96664 +
96665 +#define MAXCOREPATHLEN 32
96666 +
96667 +#if defined(SOLARIS)
96668 +typedef long long rmstime_t;
96669 +#else  /* DIGITAL_UNIX */
96670 +typedef long rmstime_t;
96671 +#endif
96672 +
96673 +typedef enum {
96674 +    
96675 +    PRG_RUNNING  = 0x01,       /* program is running                  */
96676 +    PRG_ZOMBIE   = 0x02,       /* last process on a node has exited   */
96677 +    PRG_NODE     = 0x04,       /* stats are complete for this node    */
96678 +    PRG_KILLED   = 0x08,       /* program was killed                  */
96679 +    PRG_SUSPEND  = 0x10,        /* program is suspended                */
96680 +    PRG_ERROR    = 0x80                /* error collecting stats              */
96681 +
96682 +} PRGSTATUS_FLAGS;
96683 +
96684 +/*
96685 + * program time statistics extended in version 5 of the kernel module
96686 + */
96687 +typedef struct {
96688 +    rmstime_t etime;           /* elapsed cpu time (milli-secs)       */
96689 +    rmstime_t atime;           /* allocated cpu time (cpu milli-secs) */
96690 +    rmstime_t utime;           /* user cpu time (cpu milli-secs)      */
96691 +    rmstime_t stime;           /* system cpu time (cpu milli-secs)    */
96692 +    int ncpus;                 /* number of cpus allocated            */
96693 +    int flags;                 /* program status flags                */
96694 +    int mem;                   /* max memory size in MBytes           */
96695 +    int pageflts;              /* number of page faults               */
96696 +    rmstime_t memint;          /* memory integral                     */
96697 +} prgstats_old_t;
96698 +
96699 +typedef struct {
96700 +    uint64_t etime;            /* elapsed cpu time (milli-secs)       */
96701 +    uint64_t atime;            /* allocated cpu time (cpu milli-secs) */
96702 +    uint64_t utime;            /* user cpu time (cpu milli-secs)      */
96703 +    uint64_t stime;            /* system cpu time (cpu milli-secs)    */
96704 +    uint64_t pageflts;         /* number of page faults               */
96705 +    uint64_t memint;           /* memory integral                     */
96706 +    uint64_t ebytes;           /* data transferred by the Elan(s)     */
96707 +    uint64_t exfers;           /* number of Elan data transfers       */
96708 +    uint64_t spare64[4];       /* expansion space                     */
96709 +    int ncpus;                 /* number of cpus allocated            */
96710 +    int flags;                 /* program status flags                */
96711 +    int mem;                   /* max memory size in MBytes           */
96712 +    int spare32[5];             /* expansion space                     */
96713 +} prgstats_t;
96714 +
96715 +int  rmsmod_init(void);
96716 +void rmsmod_fini(void);
96717 +
96718 +int rms_setcorepath(caddr_t path);
96719 +int rms_getcorepath(pid_t pid, caddr_t path, int maxlen);
96720 +int rms_prgcreate(int id, uid_t uid, int cpus);
96721 +int rms_prgdestroy(int id);
96722 +int rms_prgids(int maxids, int *prgids, int *nprgs);
96723 +int rms_prginfo(int id, int maxpids, pid_t *pids, int *nprocs);
96724 +int rms_prgaddcap(int id, int index, ELAN_CAPABILITY *cap);
96725 +
96726 +int rms_prgsuspend(int id);
96727 +int rms_prgresume(int id);
96728 +int rms_prgsignal(int id, int signo);
96729 +
96730 +int rms_getprgid(pid_t pid, int *id);
96731 +int rms_ncaps(int *ncaps);
96732 +int rms_getcap(int index, ELAN_CAPABILITY *cap);
96733 +int rms_mycap(int *index);
96734 +int rms_setcap(int index, int ctx);
96735 +int rms_prefcap(int nprocess, int *index);
96736 +
96737 +int   rms_prggetstats(int id, prgstats_t *stats);
96738 +void  rms_accumulatestats(prgstats_t *total, prgstats_t *stats);
96739 +char *rms_statsreport(prgstats_t *stats, char *buf);
96740 +
96741 +int rms_elaninitdone(int vp);
96742 +int rms_prgelanpids(int id, int maxpids, int *vps, pid_t *pids, int *npids);
96743 +int rms_setelanstats(int id, uint64_t ebytes, uint64_t exfers);
96744 +
96745 +int rms_setpset(int psid);
96746 +int rms_getpset(int id, int *psid);
96747 +int rms_modversion(void);
96748 +
96749 +int rms_addproc(int id, pid_t pid);
96750 +int rms_removeproc(int id, pid_t pid);
96751 +int rms_ptrack_enabled(void);
96752 +
96753 +#ifdef __cplusplus
96754 +}
96755 +#endif
96756 +
96757 +
96758 +#if defined(__KERNEL__)
96759 +
96760 +int rms_init(void);
96761 +int rms_fini(void);
96762 +int rms_reconfigure(void);
96763 +
96764 +extern int rms_debug;
96765 +
96766 +#if 1
96767 +#define DBG(x) do if (rms_debug) x ; while (0)
96768 +#else
96769 +#define DBG(x)
96770 +#endif
96771 +
96772 +#endif
96773 +
96774 +#endif /* RMSCALL_H_INCLUDED */
96775 +
96776 +
96777 +
96778 +
96779 diff -urN clean/include/rms/rmsio.h linux-2.6.9/include/rms/rmsio.h
96780 --- clean/include/rms/rmsio.h   1969-12-31 19:00:00.000000000 -0500
96781 +++ linux-2.6.9/include/rms/rmsio.h     2004-08-26 07:49:30.000000000 -0400
96782 @@ -0,0 +1,194 @@
96783 +/*
96784 + *    Copyright (c) 1996-2002 by Quadrics Supercomputers World Ltd.
96785 + *
96786 + *    For licensing information please see the supplied COPYING file
96787 + *
96788 + */
96789 +
96790 +#ident "@(#)$Id: rmsio.h,v 1.7 2004/08/26 11:49:30 duncan Exp $"
96791 +/*      $Source: /cvs/master/quadrics/rmsmod/rmsio.h,v $*/
96792 +
96793 +
96794 +#ifndef __RMSMOD_RMSIO_H
96795 +#define __RMSMOD_RMSIO_H
96796 +
96797 +/* arg is corepath string */
96798 +#define RMSIO_SETCOREPATH      _IOW ('r', 1, char)
96799 +
96800 +typedef struct rmsio_getcorepath_struct
96801 +{
96802 +    pid_t              pid;
96803 +    char               *corepath;
96804 +    int                        maxlen;
96805 +} RMSIO_GETCOREPATH_STRUCT;
96806 +#define RMSIO_GETCOREPATH      _IOW ('r', 2, RMSIO_GETCOREPATH_STRUCT)
96807 +
96808 +typedef struct rmsio_prgcreate_struct
96809 +{
96810 +    int                        id;
96811 +    uid_t              uid;
96812 +    int                        cpus;
96813 +} RMSIO_PRGCREATE_STRUCT;
96814 +#define RMSIO_PRGCREATE                _IOW ('r', 3, RMSIO_PRGCREATE_STRUCT)
96815 +
96816 +typedef struct rmsio_prginfo_struct
96817 +{
96818 +    int                        id;
96819 +    int                        maxpids;
96820 +    pid_t              *pids;
96821 +    int                        *nprocs;
96822 +} RMSIO_PRGINFO_STRUCT;
96823 +#define RMSIO_PRGINFO          _IOW ('r', 4, RMSIO_PRGINFO_STRUCT)
96824 +
96825 +typedef struct rmsio_prgsignal_struct
96826 +{
96827 +    int                        id;
96828 +    int                        signo;
96829 +} RMSIO_PRGSIGNAL_STRUCT;
96830 +#define RMSIO_PRGSIGNAL                _IOW ('r', 5, RMSIO_PRGSIGNAL_STRUCT)
96831 +
96832 +typedef struct rmsio_prgaddcap_struct
96833 +{
96834 +    int                        id;
96835 +    int                        index;
96836 +    ELAN_CAPABILITY    *cap;
96837 +} RMSIO_PRGADDCAP_STRUCT;
96838 +#define RMSIO_PRGADDCAP                _IOW ('r', 6, RMSIO_PRGADDCAP_STRUCT)
96839 +typedef struct rmsio_setcap_struct
96840 +{
96841 +    int                        index;
96842 +    int                        ctx;
96843 +} RMSIO_SETCAP_STRUCT;
96844 +#define RMSIO_SETCAP           _IOW ('r', 7, RMSIO_SETCAP_STRUCT)
96845 +
96846 +typedef struct rmsio_getcap_struct
96847 +{
96848 +    int                        index;
96849 +    ELAN_CAPABILITY     *cap;
96850 +} RMSIO_GETCAP_STRUCT;
96851 +#define RMSIO_GETCAP           _IOW ('r', 8, RMSIO_GETCAP_STRUCT)
96852 +
96853 +typedef struct rmsio_getcap_struct32
96854 +{
96855 +    int                        index;
96856 +    unsigned int        capptr;
96857 +} RMSIO_GETCAP_STRUCT32;
96858 +#define RMSIO_GETCAP32         _IOW ('r', 8, RMSIO_GETCAP_STRUCT32)
96859 +
96860 +/* arg is pointer to ncaps */
96861 +#define RMSIO_NCAPS            _IOW ('r', 9, int)
96862 +
96863 +typedef struct rmsio_prggetstats_struct
96864 +{
96865 +    int                        id;
96866 +    prgstats_old_t     *stats;
96867 +} RMSIO_PRGGETSTATS_STRUCT;
96868 +#define RMSIO_PRGGETSTATS      _IOW ('r', 10, RMSIO_PRGGETSTATS_STRUCT)
96869 +
96870 +/* arg is program id */
96871 +#define RMSIO_PRGSUSPEND       _IOW ('r', 11, int)
96872 +#define RMSIO_PRGRESUME                _IOW ('r', 12, int)
96873 +#define RMSIO_PRGDESTROY       _IOW ('r', 13, int)
96874 +
96875 +typedef struct rmsio_getprgid_struct
96876 +{
96877 +    pid_t              pid;
96878 +    int                        *id;
96879 +} RMSIO_GETPRGID_STRUCT;
96880 +#define RMSIO_GETPRGID         _IOW ('r', 14, RMSIO_GETPRGID_STRUCT)
96881 +
96882 +typedef struct rmsio_getprgid_struct32
96883 +{
96884 +    pid_t              pid;
96885 +    unsigned int       idptr;
96886 +} RMSIO_GETPRGID_STRUCT32;
96887 +#define RMSIO_GETPRGID32       _IOW ('r', 14, RMSIO_GETPRGID_STRUCT32)
96888 +
96889 +/* arg is pointer to index */
96890 +#define RMSIO_GETMYCAP         _IOW ('r', 15, int)
96891 +
96892 +typedef struct rmsio_prgids_struct
96893 +{
96894 +    int                        maxids;
96895 +    int                        *prgids;
96896 +    int                        *nprgs;
96897 +} RMSIO_PRGIDS_STRUCT;
96898 +#define RMSIO_PRGIDS           _IOW ('r', 16, RMSIO_PRGIDS_STRUCT)
96899 +
96900 +/* arg is pointer to vp */
96901 +#define RMSIO_ELANINITDONE     _IOW ('r', 17, int)
96902 +
96903 +typedef struct rmsio_prgelanpids_struct
96904 +{
96905 +    int    id;
96906 +    int    maxpids;
96907 +    int   *vps;
96908 +    int   *pids;
96909 +    int   *npids;
96910 +} RMSIO_PRGELANPIDS_STRUCT;
96911 +#define RMSIO_PRGELANPIDS      _IOW ('r', 18, RMSIO_PRGELANPIDS_STRUCT)
96912 +
96913 +typedef struct rmsio_setpset_struct
96914 +{
96915 +    int    id;
96916 +    int    psid;
96917 +} RMSIO_SETPSET_STRUCT;
96918 +#define RMSIO_SETPSET          _IOW ('r', 19, RMSIO_SETPSET_STRUCT)
96919 +
96920 +typedef struct rmsio_getpset_struct
96921 +{
96922 +    int    id;
96923 +    int   *psid;
96924 +} RMSIO_GETPSET_STRUCT;
96925 +#define RMSIO_GETPSET          _IOW ('r', 20, RMSIO_GETPSET_STRUCT)
96926 +
96927 +/*
96928 + * have to pass a pointer to the stats, the switch
96929 + * statement goes wrong in the module of the size
96930 + * is too large
96931 + */
96932 +typedef struct {
96933 +    uint64_t ebytes;
96934 +    uint64_t exfers;
96935 +} elanstats_t;
96936 +
96937 +typedef struct rmsio_setelanstats_struct
96938 +{
96939 +    int    id;
96940 +    elanstats_t *estats;
96941 +} RMSIO_SETELANSTATS_STRUCT;
96942 +#define RMSIO_SETELANSTATS      _IOW ('r', 21, RMSIO_SETELANSTATS_STRUCT)
96943 +
96944 +typedef struct rmsio_prggetstats2_struct
96945 +{
96946 +    int                        id;
96947 +    prgstats_t         *stats;
96948 +} RMSIO_PRGGETSTATS2_STRUCT;
96949 +#define RMSIO_PRGGETSTATS2     _IOW ('r', 22, RMSIO_PRGGETSTATS2_STRUCT)
96950 +
96951 +typedef struct rmsio_modversion_struct
96952 +{
96953 +    int *version;
96954 +} RMSIO_MODVERSION_STRUCT;
96955 +#define RMSIO_MODVERSION       _IOW ('r', 23, RMSIO_MODVERSION_STRUCT)
96956 +
96957 +typedef struct rmsio_proc_struct
96958 +{
96959 +    int    id;
96960 +    pid_t  pid;
96961 +} RMSIO_PROC_STRUCT;
96962 +#define RMSIO_ADDPROC           _IOW ('r', 24, RMSIO_PROC_STRUCT)
96963 +#define RMSIO_REMOVEPROC        _IOW ('r', 25, RMSIO_PROC_STRUCT)
96964 +
96965 +
96966 +
96967 +#endif /* __RMSMOD_RMSIO_H */
96968 +
96969 +
96970 +
96971 +
96972 +
96973 +
96974 +
96975 +
96976 +
96977 diff -urN clean/ipc/shm.c linux-2.6.9/ipc/shm.c
96978 --- clean/ipc/shm.c     2005-05-13 13:39:10.000000000 -0400
96979 +++ linux-2.6.9/ipc/shm.c       2005-10-10 17:47:17.000000000 -0400
96980 @@ -26,6 +26,7 @@
96981  #include <linux/proc_fs.h>
96982  #include <linux/shmem_fs.h>
96983  #include <linux/security.h>
96984 +#include <linux/module.h>
96985  #include <linux/audit.h>
96986  #include <asm/uaccess.h>
96987  
96988 @@ -850,6 +851,44 @@
96989         return retval;
96990  }
96991  
96992 +/*
96993 + * Mark all segments created by this process for destruction
96994 + */
96995 +int shm_cleanup (void)
96996 +{
96997 +       int i;
96998 +
96999 +       down(&shm_ids.sem);
97000 +
97001 +       for (i = 0; i <= shm_ids.max_id; i++) {
97002 +               struct shmid_kernel *shp;
97003 +
97004 +               shp = shm_lock(i);
97005 +               if (shp != NULL) {
97006 +                       /* mark this segment for destruction if we created it */
97007 +                       if (current->pid == shp->shm_cprid)
97008 +                       {
97009 +                               /* copy of IPC_RMID code */
97010 +                               if (shp->shm_nattch) {
97011 +                                       shp->shm_flags |= SHM_DEST;
97012 +                                       /* do not find it any more */
97013 +                                       shp->shm_perm.key = IPC_PRIVATE;
97014 +                               } else {
97015 +                                       shm_destroy(shp);
97016 +                                       continue;
97017 +                               }
97018 +                       }
97019 +
97020 +                       shm_unlock(shp);
97021 +               }
97022 +       }
97023 +
97024 +       up(&shm_ids.sem);
97025 +
97026 +       return 0;
97027 +}
97028 +EXPORT_SYMBOL_GPL(shm_cleanup);
97029 +
97030  #ifdef CONFIG_PROC_FS
97031  static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
97032  {
97033 diff -urN clean/kernel/exit.c linux-2.6.9/kernel/exit.c
97034 --- clean/kernel/exit.c 2005-10-10 17:43:57.000000000 -0400
97035 +++ linux-2.6.9/kernel/exit.c   2005-10-10 17:47:17.000000000 -0400
97036 @@ -30,6 +30,8 @@
97037  #include <asm/pgtable.h>
97038  #include <asm/mmu_context.h>
97039  
97040 +#include <linux/ptrack.h>
97041 +
97042  extern void sem_exit (void);
97043  extern struct task_struct *child_reaper;
97044  
97045 @@ -822,6 +824,8 @@
97046  #endif
97047                 current->tux_exit();
97048         }
97049 +       /* Notify any ptrack callbacks of the process exit */
97050 +       ptrack_call_callbacks (PTRACK_PHASE_EXIT, NULL);
97051         __exit_mm(tsk);
97052  
97053         exit_sem(tsk);
97054 diff -urN clean/kernel/fork.c linux-2.6.9/kernel/fork.c
97055 --- clean/kernel/fork.c 2005-05-13 13:39:08.000000000 -0400
97056 +++ linux-2.6.9/kernel/fork.c   2005-10-10 17:47:17.000000000 -0400
97057 @@ -14,6 +14,7 @@
97058  #include <linux/config.h>
97059  #include <linux/slab.h>
97060  #include <linux/init.h>
97061 +#include <linux/ptrack.h>
97062  #include <linux/unistd.h>
97063  #include <linux/smp_lock.h>
97064  #include <linux/module.h>
97065 @@ -430,6 +431,9 @@
97066         mm->page_table_lock = SPIN_LOCK_UNLOCKED;
97067         mm->ioctx_list_lock = RW_LOCK_UNLOCKED;
97068         mm->ioctx_list = NULL;
97069 +#ifdef CONFIG_IOPROC
97070 +       mm->ioproc_ops = NULL;
97071 +#endif
97072         mm->default_kioctx = (struct kioctx)INIT_KIOCTX(mm->default_kioctx, *mm);
97073         mm->free_area_cache = TASK_UNMAPPED_BASE;
97074  
97075 @@ -1264,6 +1268,11 @@
97076                         set_tsk_thread_flag(p, TIF_SIGPENDING);
97077                 }
97078  
97079 +               if (ptrack_call_callbacks(PTRACK_PHASE_CLONE, p)) {
97080 +                       sigaddset(&p->pending.signal, SIGKILL);
97081 +                       set_tsk_thread_flag(p, TIF_SIGPENDING);
97082 +               }
97083 +
97084                 if (!(clone_flags & CLONE_STOPPED))
97085                         wake_up_new_task(p, clone_flags);
97086                 else
97087 diff -urN clean/kernel/Kconfig linux-2.6.9/kernel/Kconfig
97088 --- clean/kernel/Kconfig        1969-12-31 19:00:00.000000000 -0500
97089 +++ linux-2.6.9/kernel/Kconfig  2005-10-10 17:47:17.000000000 -0400
97090 @@ -0,0 +1,14 @@
97091 +#
97092 +# Kernel subsystem specific config
97093 +# 
97094 +
97095 +# Support for Process Tracking callbacks
97096 +#
97097 +config PTRACK
97098 +       bool "Enable PTRACK process tracking hooks"
97099 +       default y
97100 +       help
97101 +       This option enables hooks to be called when processes are
97102 +       created and destoryed in order for a resource management 
97103 +       system to know which processes are a member of a "job" and 
97104 +       to be able to clean up when the job is terminated.
97105 diff -urN clean/kernel/Makefile linux-2.6.9/kernel/Makefile
97106 --- clean/kernel/Makefile       2005-05-13 13:39:07.000000000 -0400
97107 +++ linux-2.6.9/kernel/Makefile 2005-10-10 17:47:17.000000000 -0400
97108 @@ -26,6 +26,7 @@
97109  obj-$(CONFIG_AUDIT) += audit.o
97110  obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
97111  obj-$(CONFIG_KPROBES) += kprobes.o
97112 +obj-$(CONFIG_PTRACK) += ptrack.o
97113  
97114  ifneq ($(CONFIG_IA64),y)
97115  # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
97116 diff -urN clean/kernel/ptrack.c linux-2.6.9/kernel/ptrack.c
97117 --- clean/kernel/ptrack.c       1969-12-31 19:00:00.000000000 -0500
97118 +++ linux-2.6.9/kernel/ptrack.c 2005-10-10 17:47:17.000000000 -0400
97119 @@ -0,0 +1,145 @@
97120 +/*
97121 + *    Copyright (C) 2000  Regents of the University of California
97122 + *
97123 + *    This program is free software; you can redistribute it and/or modify
97124 + *    it under the terms of the GNU General Public License as published by
97125 + *    the Free Software Foundation; either version 2 of the License, or
97126 + *    (at your option) any later version.
97127 + *
97128 + *    This program is distributed in the hope that it will be useful,
97129 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
97130 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
97131 + *    GNU General Public License for more details.
97132 + *
97133 + *    You should have received a copy of the GNU General Public License
97134 + *    along with this program; if not, write to the Free Software
97135 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
97136 + *
97137 + * Derived from exit_actn.c by
97138 + *    Copyright (C) 2003 Quadrics Ltd.
97139 + */
97140 +
97141 +
97142 +#include <linux/module.h>
97143 +#include <linux/spinlock.h>
97144 +#include <linux/sched.h>
97145 +#include <linux/ptrack.h>
97146 +#include <linux/slab.h>
97147 +#include <linux/list.h>
97148 +
97149 +#include <asm/errno.h>
97150 +
97151 +int
97152 +ptrack_register (ptrack_callback_t callback, void *arg)
97153 +{
97154 +       struct ptrack_desc *desc = kmalloc (sizeof (struct ptrack_desc), GFP_KERNEL);
97155 +       
97156 +       if (desc == NULL)
97157 +               return -ENOMEM;
97158 +
97159 +       desc->callback = callback;
97160 +       desc->arg      = arg;
97161 +       
97162 +       list_add_tail (&desc->link, &current->ptrack_list);
97163 +       
97164 +       return 0;
97165 +}
97166 +
97167 +void
97168 +ptrack_deregister (ptrack_callback_t callback, void *arg)
97169 +{      
97170 +       struct list_head *el, *nel;
97171 +       
97172 +       list_for_each_safe (el, nel, &current->ptrack_list) {
97173 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
97174 +               
97175 +               if (desc->callback == callback && desc->arg == arg) {
97176 +                       list_del (&desc->link);
97177 +                       kfree (desc);
97178 +               }
97179 +       }
97180 +}
97181 +
97182 +int
97183 +ptrack_registered (ptrack_callback_t callback, void *arg)
97184 +{
97185 +       struct list_head *el;
97186 +       
97187 +       list_for_each (el, &current->ptrack_list) {
97188 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
97189 +               
97190 +               if (desc->callback == callback && desc->arg == arg)
97191 +                       return 1;
97192 +       }
97193 +       return 0;
97194 +}      
97195 +        
97196 +int
97197 +ptrack_call_callbacks (int phase, struct task_struct *child)
97198 +{
97199 +       struct list_head *el, *nel;
97200 +       struct ptrack_desc *new;
97201 +       int res;
97202 +
97203 +       if (phase == PTRACK_PHASE_CLONE)
97204 +               INIT_LIST_HEAD (&child->ptrack_list);
97205 +
97206 +       list_for_each_safe (el, nel, &current->ptrack_list) {
97207 +               struct ptrack_desc *desc = list_entry (el, struct ptrack_desc, link);
97208 +               
97209 +              res = desc->callback (desc->arg, phase, child);
97210 +               
97211 +               switch (phase)
97212 +               {
97213 +               case PTRACK_PHASE_EXIT:
97214 +                       list_del (&desc->link);
97215 +                       kfree (desc);
97216 +                       break;
97217 +                       
97218 +               case PTRACK_PHASE_CLONE:
97219 +                      switch (res)
97220 +                      {
97221 +                      case PTRACK_FINISHED:
97222 +                              break;
97223 +
97224 +                      case PTRACK_INNHERIT:
97225 +                              if ((new = kmalloc (sizeof (struct ptrack_desc), GFP_ATOMIC)) == NULL)
97226 +                              {
97227 +                                      /* allocation failed - notify that this process is not going
97228 +                                       * to be started by signalling clone failure.
97229 +                                       */
97230 +                                      desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child);
97231 +                                      
97232 +                                      goto failed;
97233 +                              }
97234 +
97235 +                               new->callback = desc->callback;
97236 +                               new->arg      = desc->arg;
97237 +                               
97238 +                               list_add_tail (&new->link, &child->ptrack_list);
97239 +                              break;
97240 +
97241 +                      case PTRACK_DENIED:
97242 +                              goto failed;
97243 +                       }
97244 +                      break;
97245 +               }
97246 +       }
97247 +
97248 +       return 0;
97249 +
97250 + failed:
97251 +       while (! list_empty (&child->ptrack_list))
97252 +       {
97253 +              struct ptrack_desc *desc = list_entry (child->ptrack_list.next, struct ptrack_desc, link);
97254 +              
97255 +              desc->callback (desc->arg, PTRACK_PHASE_CLONE_FAIL, child);
97256 +
97257 +              list_del (&desc->link);
97258 +              kfree (desc);
97259 +       }
97260 +       return 1;
97261 +}
97262 +EXPORT_SYMBOL(ptrack_register);
97263 +EXPORT_SYMBOL(ptrack_deregister);
97264 +EXPORT_SYMBOL(ptrack_registered);
97265 diff -urN clean/kernel/signal.c linux-2.6.9/kernel/signal.c
97266 --- clean/kernel/signal.c       2005-05-13 13:39:11.000000000 -0400
97267 +++ linux-2.6.9/kernel/signal.c 2005-10-10 17:47:17.000000000 -0400
97268 @@ -2266,6 +2266,7 @@
97269  
97270         return kill_something_info(sig, &info, pid);
97271  }
97272 +EXPORT_SYMBOL_GPL(sys_kill);
97273  
97274  /**
97275   *  sys_tgkill - send signal to one specific thread
97276 diff -urN clean/Makefile linux-2.6.9/Makefile
97277 --- clean/Makefile      2005-05-13 13:39:19.000000000 -0400
97278 +++ linux-2.6.9/Makefile        2005-10-10 17:47:31.000000000 -0400
97279 @@ -1,7 +1,7 @@
97280  VERSION = 2
97281  PATCHLEVEL = 6
97282  SUBLEVEL = 9
97283 -EXTRAVERSION = -prep
97284 +EXTRAVERSION = -prep.qp2.2.5.11.3qsnet
97285  NAME=AC 1
97286  
97287  # *DOCUMENTATION*
97288 diff -urN clean/mm/fremap.c linux-2.6.9/mm/fremap.c
97289 --- clean/mm/fremap.c   2004-10-18 17:53:06.000000000 -0400
97290 +++ linux-2.6.9/mm/fremap.c     2005-10-10 17:47:17.000000000 -0400
97291 @@ -12,6 +12,7 @@
97292  #include <linux/mman.h>
97293  #include <linux/pagemap.h>
97294  #include <linux/swapops.h>
97295 +#include <linux/ioproc.h>
97296  #include <linux/rmap.h>
97297  #include <linux/module.h>
97298  
97299 @@ -29,6 +30,7 @@
97300         if (pte_present(pte)) {
97301                 unsigned long pfn = pte_pfn(pte);
97302  
97303 +               ioproc_invalidate_page(vma, addr);
97304                 flush_cache_page(vma, addr);
97305                 pte = ptep_clear_flush(vma, addr, ptep);
97306                 if (pfn_valid(pfn)) {
97307 @@ -93,6 +95,7 @@
97308         pte_val = *pte;
97309         pte_unmap(pte);
97310         update_mmu_cache(vma, addr, pte_val);
97311 +       ioproc_update_page(vma, addr);
97312  
97313         err = 0;
97314  err_unlock:
97315 @@ -132,6 +135,7 @@
97316         pte_val = *pte;
97317         pte_unmap(pte);
97318         update_mmu_cache(vma, addr, pte_val);
97319 +       ioproc_update_page(vma, addr);
97320         spin_unlock(&mm->page_table_lock);
97321         return 0;
97322  
97323 diff -urN clean/mm/hugetlb.c linux-2.6.9/mm/hugetlb.c
97324 --- clean/mm/hugetlb.c  2004-10-18 17:54:37.000000000 -0400
97325 +++ linux-2.6.9/mm/hugetlb.c    2005-10-10 17:47:17.000000000 -0400
97326 @@ -10,6 +10,7 @@
97327  #include <linux/hugetlb.h>
97328  #include <linux/sysctl.h>
97329  #include <linux/highmem.h>
97330 +#include <linux/ioproc.h>
97331  
97332  const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
97333  static unsigned long nr_huge_pages, free_huge_pages;
97334 @@ -254,6 +255,7 @@
97335         struct mm_struct *mm = vma->vm_mm;
97336  
97337         spin_lock(&mm->page_table_lock);
97338 +       ioproc_invalidate_range(vma, start, start + length);
97339         unmap_hugepage_range(vma, start, start + length);
97340         spin_unlock(&mm->page_table_lock);
97341  }
97342 diff -urN clean/mm/ioproc.c linux-2.6.9/mm/ioproc.c
97343 --- clean/mm/ioproc.c   1969-12-31 19:00:00.000000000 -0500
97344 +++ linux-2.6.9/mm/ioproc.c     2005-10-10 17:47:17.000000000 -0400
97345 @@ -0,0 +1,58 @@
97346 +/* -*- linux-c -*-
97347 + *
97348 + *    Copyright (C) 2002-2004 Quadrics Ltd.
97349 + *
97350 + *    This program is free software; you can redistribute it and/or modify
97351 + *    it under the terms of the GNU General Public License as published by
97352 + *    the Free Software Foundation; either version 2 of the License, or
97353 + *    (at your option) any later version.
97354 + *
97355 + *    This program is distributed in the hope that it will be useful,
97356 + *    but WITHOUT ANY WARRANTY; without even the implied warranty of
97357 + *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
97358 + *    GNU General Public License for more details.
97359 + *
97360 + *    You should have received a copy of the GNU General Public License
97361 + *    along with this program; if not, write to the Free Software
97362 + *    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
97363 + *
97364 + *
97365 + */
97366 +
97367 +/*
97368 + * Registration for IO processor page table updates.
97369 + */
97370 +
97371 +#include <linux/kernel.h>
97372 +#include <linux/module.h>
97373 +
97374 +#include <linux/mm.h>
97375 +#include <linux/ioproc.h>
97376 +
97377 +int
97378 +ioproc_register_ops(struct mm_struct *mm, struct ioproc_ops *ip)
97379 +{
97380 +       ip->next = mm->ioproc_ops;
97381 +       mm->ioproc_ops = ip;
97382 +
97383 +       return 0;
97384 +}
97385 +
97386 +EXPORT_SYMBOL_GPL(ioproc_register_ops);
97387 +
97388 +int
97389 +ioproc_unregister_ops(struct mm_struct *mm, struct ioproc_ops *ip)
97390 +{
97391 +       struct ioproc_ops **tmp;
97392 +
97393 +       for (tmp = &mm->ioproc_ops; *tmp && *tmp != ip; tmp= &(*tmp)->next)
97394 +               ;
97395 +       if (*tmp) {
97396 +               *tmp = ip->next;
97397 +               return 0;
97398 +       }
97399 +
97400 +       return -EINVAL;
97401 +}
97402 +
97403 +EXPORT_SYMBOL_GPL(ioproc_unregister_ops);
97404 diff -urN clean/mm/Kconfig linux-2.6.9/mm/Kconfig
97405 --- clean/mm/Kconfig    1969-12-31 19:00:00.000000000 -0500
97406 +++ linux-2.6.9/mm/Kconfig      2005-10-10 17:47:17.000000000 -0400
97407 @@ -0,0 +1,15 @@
97408 +#
97409 +# VM subsystem specific config
97410 +# 
97411 +
97412 +# Support for IO processors which have advanced RDMA capabilities
97413 +#
97414 +config IOPROC
97415 +       bool "Enable IOPROC VM hooks"
97416 +       depends on MMU
97417 +       default y
97418 +       help
97419 +       This option enables hooks in the VM subsystem so that IO devices which
97420 +       incorporate advanced RDMA capabilities can be kept in sync with CPU 
97421 +       page table changes.
97422 +       See Documentation/vm/ioproc.txt for more details.
97423 diff -urN clean/mm/Makefile linux-2.6.9/mm/Makefile
97424 --- clean/mm/Makefile   2005-05-13 13:39:02.000000000 -0400
97425 +++ linux-2.6.9/mm/Makefile     2005-10-10 17:47:17.000000000 -0400
97426 @@ -16,6 +16,7 @@
97427  obj-$(CONFIG_X86_4G)   += usercopy.o
97428  obj-$(CONFIG_HUGETLBFS)        += hugetlb.o
97429  obj-$(CONFIG_NUMA)     += mempolicy.o
97430 +obj-$(CONFIG_IOPROC)    += ioproc.o
97431  obj-$(CONFIG_SHMEM) += shmem.o
97432  obj-$(CONFIG_TINY_SHMEM) += tiny-shmem.o
97433  
97434 diff -urN clean/mm/memory.c linux-2.6.9/mm/memory.c
97435 --- clean/mm/memory.c   2005-05-13 13:39:08.000000000 -0400
97436 +++ linux-2.6.9/mm/memory.c     2005-10-10 17:47:17.000000000 -0400
97437 @@ -43,6 +43,7 @@
97438  #include <linux/swap.h>
97439  #include <linux/highmem.h>
97440  #include <linux/pagemap.h>
97441 +#include <linux/ioproc.h>
97442  #include <linux/rmap.h>
97443  #include <linux/module.h>
97444  #include <linux/init.h>
97445 @@ -619,6 +620,7 @@
97446  
97447         lru_add_drain();
97448         spin_lock(&mm->page_table_lock);
97449 +       ioproc_invalidate_range(vma, address, end);
97450         tlb = tlb_gather_mmu(mm, 0);
97451         unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
97452         tlb_finish_mmu(tlb, address, end);
97453 @@ -968,6 +970,7 @@
97454                 BUG();
97455  
97456         spin_lock(&mm->page_table_lock);
97457 +       ioproc_invalidate_range(vma, beg, end);
97458         do {
97459                 pmd_t *pmd = pmd_alloc(mm, dir, address);
97460                 error = -ENOMEM;
97461 @@ -982,6 +985,7 @@
97462         /*
97463          * Why flush? zeromap_pte_range has a BUG_ON for !pte_none()
97464          */
97465 +       ioproc_update_range(vma, beg, end);
97466         flush_tlb_range(vma, beg, end);
97467         spin_unlock(&mm->page_table_lock);
97468         return error;
97469 @@ -1062,6 +1066,7 @@
97470         vma->vm_flags |= VM_IO | VM_RESERVED;
97471  
97472         spin_lock(&mm->page_table_lock);
97473 +       ioproc_invalidate_range(vma, beg, end);
97474         do {
97475                 pmd_t *pmd = pmd_alloc(mm, dir, from);
97476                 error = -ENOMEM;
97477 @@ -1076,6 +1081,7 @@
97478         /*
97479          * Why flush? remap_pte_range has a BUG_ON for !pte_none()
97480          */
97481 +       ioproc_update_range(vma, beg, end);
97482         flush_tlb_range(vma, beg, end);
97483         spin_unlock(&mm->page_table_lock);
97484         return error;
97485 @@ -1162,6 +1168,7 @@
97486                         ptep_set_access_flags(vma, address, page_table, entry, 1);
97487                         update_mmu_cache(vma, address, entry);
97488                         pte_unmap(page_table);
97489 +                       ioproc_update_page(vma, address);
97490                         spin_unlock(&mm->page_table_lock);
97491                         return VM_FAULT_MINOR;
97492                 }
97493 @@ -1194,6 +1201,7 @@
97494                         ++mm->rss;
97495                 else
97496                         page_remove_rmap(old_page);
97497 +               ioproc_invalidate_page(vma, address);
97498                 break_cow(vma, new_page, address, page_table);
97499                 lru_cache_add_active(new_page);
97500                 page_add_anon_rmap(new_page, vma, address);
97501 @@ -1202,6 +1210,7 @@
97502                 new_page = old_page;
97503         }
97504         pte_unmap(page_table);
97505 +       ioproc_update_page(vma, address);
97506         page_cache_release(new_page);
97507         page_cache_release(old_page);
97508         spin_unlock(&mm->page_table_lock);
97509 @@ -1495,6 +1504,7 @@
97510         /* No need to invalidate - it was non-present before */
97511         update_mmu_cache(vma, address, pte);
97512         pte_unmap(page_table);
97513 +       ioproc_update_page(vma, address);
97514         spin_unlock(&mm->page_table_lock);
97515  out:
97516         return ret;
97517 @@ -1555,6 +1565,7 @@
97518  
97519         /* No need to invalidate - it was non-present before */
97520         update_mmu_cache(vma, addr, entry);
97521 +       ioproc_update_page(vma, addr);
97522         lazy_mmu_prot_update(entry);
97523         spin_unlock(&mm->page_table_lock);
97524  out:
97525 @@ -1673,6 +1684,7 @@
97526  
97527         /* no need to invalidate: a not-present page shouldn't be cached */
97528         update_mmu_cache(vma, address, entry);
97529 +       ioproc_update_page(vma, address);
97530         lazy_mmu_prot_update(entry);
97531         spin_unlock(&mm->page_table_lock);
97532  out:
97533 @@ -1853,6 +1865,7 @@
97534                 return ret;
97535         return ret == len ? 0 : -1;
97536  }
97537 +EXPORT_SYMBOL(make_pages_present);
97538  
97539  /* 
97540   * Map a vmalloc()-space virtual address to the physical page.
97541 diff -urN clean/mm/mmap.c linux-2.6.9/mm/mmap.c
97542 --- clean/mm/mmap.c     2005-05-13 13:39:10.000000000 -0400
97543 +++ linux-2.6.9/mm/mmap.c       2005-10-10 17:47:17.000000000 -0400
97544 @@ -15,6 +15,7 @@
97545  #include <linux/init.h>
97546  #include <linux/file.h>
97547  #include <linux/fs.h>
97548 +#include <linux/ioproc.h>
97549  #include <linux/personality.h>
97550  #include <linux/security.h>
97551  #include <linux/hugetlb.h>
97552 @@ -1680,6 +1681,7 @@
97553         unsigned long nr_accounted = 0;
97554  
97555         lru_add_drain();
97556 +       ioproc_invalidate_range(vma, start, end);
97557         tlb = tlb_gather_mmu(mm, 0);
97558         unmap_vmas(&tlb, mm, vma, start, end, &nr_accounted, NULL);
97559         vm_unacct_memory(nr_accounted);
97560 @@ -1965,6 +1967,7 @@
97561  
97562         spin_lock(&mm->page_table_lock);
97563  
97564 +       ioproc_release(mm);
97565         tlb = tlb_gather_mmu(mm, 1);
97566         flush_cache_mm(mm);
97567         /* Use ~0UL here to ensure all VMAs in the mm are unmapped */
97568 diff -urN clean/mm/mprotect.c linux-2.6.9/mm/mprotect.c
97569 --- clean/mm/mprotect.c 2005-05-13 13:39:02.000000000 -0400
97570 +++ linux-2.6.9/mm/mprotect.c   2005-10-10 17:47:17.000000000 -0400
97571 @@ -10,6 +10,7 @@
97572  
97573  #include <linux/mm.h>
97574  #include <linux/hugetlb.h>
97575 +#include <linux/ioproc.h>
97576  #include <linux/slab.h>
97577  #include <linux/shm.h>
97578  #include <linux/mman.h>
97579 @@ -99,6 +100,7 @@
97580         if (start >= end)
97581                 BUG();
97582         spin_lock(&current->mm->page_table_lock);
97583 +       ioproc_change_protection(vma, start, end, newprot);
97584         do {
97585                 change_pmd_range(dir, start, end - start, newprot);
97586                 start = (start + PGDIR_SIZE) & PGDIR_MASK;
97587 diff -urN clean/mm/mremap.c linux-2.6.9/mm/mremap.c
97588 --- clean/mm/mremap.c   2005-05-13 13:39:02.000000000 -0400
97589 +++ linux-2.6.9/mm/mremap.c     2005-10-10 17:47:17.000000000 -0400
97590 @@ -9,6 +9,7 @@
97591  
97592  #include <linux/mm.h>
97593  #include <linux/hugetlb.h>
97594 +#include <linux/ioproc.h>
97595  #include <linux/slab.h>
97596  #include <linux/shm.h>
97597  #include <linux/mman.h>
97598 @@ -148,6 +149,8 @@
97599  {
97600         unsigned long offset;
97601  
97602 +       ioproc_invalidate_range(vma, old_addr, old_addr + len);
97603 +       ioproc_invalidate_range(vma, new_addr, new_addr + len);
97604         flush_cache_range(vma, old_addr, old_addr + len);
97605  
97606         /*
97607 diff -urN clean/mm/msync.c linux-2.6.9/mm/msync.c
97608 --- clean/mm/msync.c    2004-10-18 17:53:51.000000000 -0400
97609 +++ linux-2.6.9/mm/msync.c      2005-10-10 17:47:17.000000000 -0400
97610 @@ -12,6 +12,7 @@
97611  #include <linux/mm.h>
97612  #include <linux/mman.h>
97613  #include <linux/hugetlb.h>
97614 +#include <linux/ioproc.h>
97615  
97616  #include <asm/pgtable.h>
97617  #include <asm/tlbflush.h>
97618 @@ -115,6 +116,7 @@
97619  
97620         if (address >= end)
97621                 BUG();
97622 +       ioproc_sync_range(vma, address, end);
97623         do {
97624                 error |= filemap_sync_pmd_range(dir, address, end, vma, flags);
97625                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
97626 diff -urN clean/mm/rmap.c linux-2.6.9/mm/rmap.c
97627 --- clean/mm/rmap.c     2005-05-13 13:39:08.000000000 -0400
97628 +++ linux-2.6.9/mm/rmap.c       2005-10-10 17:47:17.000000000 -0400
97629 @@ -51,6 +51,7 @@
97630  #include <linux/slab.h>
97631  #include <linux/init.h>
97632  #include <linux/rmap.h>
97633 +#include <linux/ioproc.h>
97634  #include <linux/rcupdate.h>
97635  
97636  #include <asm/tlbflush.h>
97637 @@ -566,6 +567,7 @@
97638         }
97639  
97640         /* Nuke the page table entry. */
97641 +       ioproc_invalidate_page(vma, address);
97642         flush_cache_page(vma, address);
97643         pteval = ptep_clear_flush(vma, address, pte);
97644  
97645 @@ -673,6 +675,7 @@
97646                         continue;
97647  
97648                 /* Nuke the page table entry. */
97649 +               ioproc_invalidate_page(vma, address);
97650                 flush_cache_page(vma, address);
97651                 pteval = ptep_clear_flush(vma, address, pte);
97652